Python theano.tensor 模块,min() 实例源码

我们从Python开源项目中,提取了以下41个代码示例,用于说明如何使用theano.tensor.min()

项目:face_detection    作者:chintak    | 项目源码 | 文件源码
def iou_loss(p, t):
    # print "pass"
    tp, tt = p.reshape((p.shape[0], 2, 2)), t.reshape((t.shape[0], 2, 2))
    overlaps_t0 = T.maximum(tp[:, 0, :], tt[:, 0, :])
    overlaps_t1 = T.minimum(tp[:, 1, :], tt[:, 1, :])
    intersection = overlaps_t1 - overlaps_t0
    bool_overlap = T.min(intersection, axis=1) > 0
    intersection = intersection[:, 0] * intersection[:, 1]
    intersection = T.maximum(intersection, np.float32(0.))
    dims_p = tp[:, 1, :] - tp[:, 0, :]
    areas_p = dims_p[:, 0] * dims_p[:, 1]
    dims_t = tt[:, 1, :] - tt[:, 0, :]
    areas_t = dims_t[:, 0] * dims_t[:, 1]
    union = areas_p + areas_t - intersection
    loss = 1. - T.minimum(
        T.exp(T.log(T.abs_(intersection)) -
              T.log(T.abs_(union) + np.float32(1e-5))),
        np.float32(1.)
    )
    # return loss
    return T.mean(loss)
项目:face_detection    作者:chintak    | 项目源码 | 文件源码
def iou_loss_val(p, t):
    tp, tt = p.reshape((p.shape[0], 2, 2)), t.reshape((t.shape[0], 2, 2))
    overlaps = np.zeros_like(tp, dtype=np.float32)
    overlaps[:, 0, :] = np.maximum(tp[:, 0, :], tt[:, 0, :])
    overlaps[:, 1, :] = np.minimum(tp[:, 1, :], tt[:, 1, :])
    intersection = overlaps[:, 1, :] - overlaps[:, 0, :]
    bool_overlap = np.min(intersection, axis=1) > 0
    intersection = intersection[:, 0] * intersection[:, 1]
    intersection = np.maximum(intersection, 0.)
    # print "bool", bool_overlap
    # print "Int", intersection
    dims_p = tp[:, 1, :] - tp[:, 0, :]
    areas_p = dims_p[:, 0] * dims_p[:, 1]
    dims_t = tt[:, 1, :] - tt[:, 0, :]
    areas_t = dims_t[:, 0] * dims_t[:, 1]
    union = areas_p + areas_t - intersection
    # print "un", union
    loss = 1. - np.minimum(
        np.exp(np.log(np.abs(intersection)) - np.log(np.abs(union) + 1e-5)),
        1.
    )
    # print loss
    return np.mean(loss)
项目:EAC-Net    作者:wiibrew    | 项目源码 | 文件源码
def LSTM_input(fls,data_size=BATCH_SIZE):
    random.shuffle(fls)
    fls=fls[:data_size]
    npdata_prepath='../DATA/EAC_feat/'
    lstm_data=np.zeros((data_size,24,2048))
    lstm_lb=np.zeros((data_size,12))
    for i,f in enumerate(fls):
        fname,flabel,fpos=f.split('->')
        lstm_lb[i,:]=np.array(patt.findall(flabel))

        for t in range(12):
            lstm_lb[i,t]=min(lstm_lb[i,t],1)

        img_name=fls[i].split('.')[0]
        ind_cur=dic[img_name]
        new_fls=gen_ind(img_name)
        for j,f in enumerate(new_fls):
            frame_array=np.load(npdata_prepath+f.split('.')[0]+'.npy')
            lstm_data[i,j,:]=frame_array
    lstm_data=lstm_data.astype('float32')
    lstm_lb=lstm_lb.astype('float32')
    return lstm_data,lstm_lb

# listtrainpath='../DATA/BP4D_10fold/BP4D_SAD_trag_10fd2.txt'
# listtestpath='../DATA/BP4D_10fold/BP4D_SAD_ts_10fd2.txt'
项目:dl4nlp_in_theano    作者:luyaojie    | 项目源码 | 文件源码
def get_pooling_batch(hs, mask, pooling_method):
    """
    :param hs:   (batch, len, dim)
    :param mask: (batch, len)
    :param pooling_method:
    :return:
    """
    if pooling_method == 'max':
        add_v = ((1 - mask) * -BIG_INT)[:, :, None]
        return T.max(hs + add_v, axis=1)
    elif pooling_method == 'min':
        add_v = ((1 - mask) * BIG_INT)[:, :, None]
        return T.min(hs + add_v, axis=1)
    elif pooling_method in ['averaging', 'mean' , 'average']:
        return T.sum(hs * mask[:, :, None], axis=1) / T.sum(mask, axis=1)[:, None]
    elif pooling_method == 'sum':
        return T.sum(hs * mask[:, :, None], axis=1)
    elif pooling_method in ['final', 'last']:
        return hs[:, -1, :]
    else:
        raise NotImplementedError('Not implemented pooling method: {}'.format(pooling_method))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_reduce_broadcast_some_0(self):
        for fct in [tensor.sum, tensor.all, tensor.any, tensor.prod,
                    tensor.max, tensor.min]:
            x = T.TensorType('int64', (True, False, True))()
            f = theano.function([x], [fct(x, axis=[0, 1])], mode=self.mode)

            order = f.maker.fgraph.toposort()
            assert 1 == sum([isinstance(node.op, T.CAReduce)
                             for node in order])

            node = [node for node in order if isinstance(node.op,
                                                         tensor.CAReduce)][0]

            op = node.op
            assert isinstance(op, T.CAReduce)
            # -- the leading broadcastable dimension has been dropped
            #   by the local_reduce_broadcastable optimization
            #   now summation is over the original x's dimension 1.
            assert node.inputs[0].ndim == 2, node
            assert op.axis == (0,), op.axis
项目:dsb3    作者:EliasVansteenkiste    | 项目源码 | 文件源码
def get_output_for(self, input, **kwargs):
        # take the minimal working slice size, and use that one.
        if self.allow_negative:
            inp_low_zero = input - T.min(input, axis=1).dimshuffle(0, 'x')
        else:
            inp_low_zero = input
        return inp_low_zero / T.sum(inp_low_zero, axis=1).dimshuffle(0, 'x') * self.norm_sum
项目:Neural-Photo-Editor    作者:ajbrock    | 项目源码 | 文件源码
def _get_hidden_layer_connectivity(self, layerIdx):
        layer_size = self._hidden_sizes[layerIdx]
        if layerIdx == 0:
            p_vals = self._get_p(T.min(self.layers_connectivity[layerIdx]))
        else:
            p_vals = self._get_p(T.min(self.layers_connectivity_updates[layerIdx-1]))

        # #Implementations of np.choose in theano GPU
        # return T.nonzero(self._mrng.multinomial(pvals=[self._p_vals] * layer_size, dtype=theano.config.floatX))[1].astype(dtype=theano.config.floatX)
        # return T.argmax(self._mrng.multinomial(pvals=[self._p_vals] * layer_size, dtype=theano.config.floatX), axis=1)
        return T.sum(T.cumsum(self._mrng.multinomial(pvals=T.tile(p_vals[::-1][None, :], (layer_size, 1)), dtype=theano.config.floatX), axis=1), axis=1)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def min(x, axis=None, keepdims=False):
    return T.min(x, axis=axis, keepdims=keepdims)
项目:EAC-Net    作者:wiibrew    | 项目源码 | 文件源码
def get_f1_acc(outputs,y_labels):

    outputs_i=outputs+0.5
    outputs_i=outputs_i.astype('int32')
    y_ilab=y_labels.astype('int32')
    gd_num=T.sum(y_ilab,axis=0)
    pr_num=T.sum(outputs_i,axis=0)
    # pr_rtm=T.eq(outputs_i,y_ilab)
    # pr_rt=T.sum(pr_rtm,axis=0)

    sum_ones=y_ilab+outputs_i
    pr_rtm=sum_ones/2

    # pr_rtm=T.eq(outputs_i,y_ilab)
    pr_rt=T.sum(pr_rtm,axis=0)

    #prevent nan to destroy the f1
    pr_rt=pr_rt.astype('float32')
    gd_num=gd_num.astype('float32')
    pr_num=pr_num.astype('float32')

    acc=pr_rt/outputs.shape[0]

    zero_scale=T.zeros_like(T.min(pr_rt))
    if T.eq(zero_scale,T.min(gd_num)):
        gd_num+=1
    if T.eq(zero_scale,T.min(pr_num)):
        pr_num+=1
    if T.eq(zero_scale,T.min(pr_rt)):
        pr_rt+=0.01

    recall=pr_rt/gd_num
    precision=pr_rt/pr_num
    f1=2*recall*precision/(recall+precision)
    # return T.min(pr_rt)
    return acc,f1
项目:EAC-Net    作者:wiibrew    | 项目源码 | 文件源码
def get_f1_acc(outputs,y_labels):

    outputs_i=outputs+0.5
    outputs_i=outputs_i.astype('int32')
    y_ilab=y_labels.astype('int32')
    gd_num=T.sum(y_ilab,axis=0)
    pr_num=T.sum(outputs_i,axis=0)
    # pr_rtm=T.eq(outputs_i,y_ilab)
    # pr_rt=T.sum(pr_rtm,axis=0)

    sum_ones=y_ilab+outputs_i
    pr_rtm=sum_ones/2

    # pr_rtm=T.eq(outputs_i,y_ilab)
    pr_rt=T.sum(pr_rtm,axis=0)

    #prevent nan to destroy the f1
    pr_rt=pr_rt.astype('float32')
    gd_num=gd_num.astype('float32')
    pr_num=pr_num.astype('float32')

    acc=pr_rt/outputs.shape[0]

    zero_scale=T.zeros_like(T.min(pr_rt))
    if T.eq(zero_scale,T.min(gd_num)):
        gd_num+=1
    if T.eq(zero_scale,T.min(pr_num)):
        pr_num+=1
    if T.eq(zero_scale,T.min(pr_rt)):
        pr_rt+=0.01

    recall=pr_rt/gd_num
    precision=pr_rt/pr_num
    f1=2*recall*precision/(recall+precision)
    # return T.min(pr_rt)
    return acc,f1
项目:keraflow    作者:ipod825    | 项目源码 | 文件源码
def min(self, x, axis=None, keepdims=False):
        return T.min(x, axis=axis, keepdims=keepdims)
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def min(x, axis=None, keepdims=False):
    return T.min(x, axis=axis, keepdims=keepdims)
项目:deeplift    作者:kundajelab    | 项目源码 | 文件源码
def min(x, axis):
    return T.min(x, axis=axis)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def min(x, axis=None, keepdims=False):
    return T.min(x, axis=axis, keepdims=keepdims)
项目:reading-text-in-the-wild    作者:mathDR    | 项目源码 | 文件源码
def min(x, axis=None, keepdims=False):
    return T.min(x, axis=axis, keepdims=keepdims)
项目:SE16-Task6-Stance-Detection    作者:nestle1993    | 项目源码 | 文件源码
def predict_possible_label(self, new_data):
        next_layer_input = new_data
        for i,layer in enumerate(self.layers):
            if i<len(self.layers)-1:
                next_layer_input = self.activations[i](T.dot(next_layer_input,layer.W) + layer.b)
            else:
                p_y_given_x = T.nnet.softmax(T.dot(next_layer_input, layer.W) + layer.b)

                y_max = np.array(T.max(p_y_given_x,axis=1))
                y_min = np.array(T.min(p_y_given_x,axis=1))
                y_sub = abs(y_max - y_min)
                y_pred_2label = y_sub
        return y_pred_2label
项目:Theano-NN_Starter    作者:nightinwhite    | 项目源码 | 文件源码
def step(self, t, s_p, c_p, X):
        #x_t = X[:,t]
        #X = T.matrix()
        if len(self.input_shape) == 3:
            x_t = X[:, t]
        else:
            x_t = X[:, t:t+1]
        x_t = x_t/(1.0+(T.max(x_t)-T.min(x_t)))
        #x_t = X[:,t+self.input_shape[1]-self.hidden_dim+1:t+self.input_shape[1]+1]
        #x_t = x_t*self.E
        #test = T.dot(x_t, self.U)
        res_s = T.dot(x_t, self.U) + T.dot(s_p, self.W) + self.b#[index,channel,hidden_dim]
        i = T.nnet.hard_sigmoid(res_s[:, 0, :])# (index,hidden_dim)
        f = T.nnet.hard_sigmoid(res_s[:, 1, :])#(index,hidden_dim)
        o = T.nnet.hard_sigmoid(res_s[:, 2, :])#(index,hidden_dim)
        g = T.tanh(res_s[:, 3, :])#(index,hidden_dim)
        # i = T.nnet.hard_sigmoid(T.dot(x_t, self.U[0])+T.dot(s_p,self.W[0])+self.b[0])#(index,hidden_dim)
        # f = T.nnet.hard_sigmoid(T.dot(x_t, self.U[1])+T.dot(s_p,self.W[1])+self.b[1])#(index,hidden_dim)
        # o = T.nnet.hard_sigmoid(T.dot(x_t, self.U[2])+T.dot(s_p,self.W[2])+self.b[2])#(index,hidden_dim)
        # g = T.tanh(T.dot(x_t, self.U[3])+T.dot(s_p,self.W[3])+self.b[3])#(index,hidden_dim)
        c_t = c_p*f + g*i#(index,hidden_dim)
        s_t = T.tanh(c_t)*o#(index,hidden_dim)
        # o_t = T.dot(s_t, self.V)#(index,1)
        # o_t = o_t+self.c[0]
        o_t = s_t
        #return o_t
        # o_t = T.cast(o_t,"float32")
        # s_t = T.cast(s_t,"float32")
        # c_t = T.cast(c_t, "float32")
        return [o_t, s_t, c_t]
        #return [o_t,s_t,c_t]
项目:uct_atari    作者:5vision    | 项目源码 | 文件源码
def value_softmax(values, a_probs, norm=True, norm_coeff=10):
    val_max = T.max(values, axis=1, keepdims=True)
    if norm:
        val_min = T.min(values, axis=1, keepdims=True)
        values = 0.5 + (values - val_min) / 2. / (val_max - val_min + 1e-8)
    else:
        values = (values - val_max)
    values /= norm_coeff
    targets = T.nnet.softmax(values)
    return T.mean(T.nnet.categorical_crossentropy(a_probs, targets), axis=-1)
项目:dl4nlp_in_theano    作者:luyaojie    | 项目源码 | 文件源码
def get_pooling(hs, pooling_method):
    if pooling_method == 'max':
        return T.max(hs, axis=0)
    elif pooling_method == 'min':
        return T.min(hs, axis=0)
    elif pooling_method in ['averaging', 'mean' , 'average']:
        return T.mean(hs, axis=0)
    elif pooling_method == 'sum':
        return T.sum(hs, axis=0)
    elif pooling_method in ['final', 'last']:
        return hs[-1]
    else:
        raise NotImplementedError('Not implemented pooling method: {}'.format(pooling_method))
项目:dl4nlp_in_theano    作者:luyaojie    | 项目源码 | 文件源码
def __init__(self, verbose=True):
        super(MinPoolingLayer, self).__init__(pooling='min', verbose=verbose)
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def min(x, axis=None, keepdims=False):
    return T.min(x, axis=axis, keepdims=keepdims)
项目:keras_superpixel_pooling    作者:parag2489    | 项目源码 | 文件源码
def min(x, axis=None, keepdims=False):
    return T.min(x, axis=axis, keepdims=keepdims)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def speed_fusion(self, shared_fn=shared, gpu=False, s=None):
        """
        param type s: a slice object
        param s: a slice to apply to the case to execute. If None, exec all case.
        """

        shp = (3000, 3000)
        shp = (1000, 1000)
        nb_repeat = 50
#        linker=gof.CLinker
#        linker=gof.OpWiseCLinker

        mode1 = copy.copy(compile.get_default_mode())
        mode1._optimizer = mode1._optimizer.including('local_elemwise_fusion')
        # TODO:clinker is much faster... but use to much memory
        # Possible cause: as their is do deletion of intermediate value when we don't keep the fct.
        # More plausible cause: we keep a link to the output data?
        # Follow up. Clinker do the same... second cause?
        mode2 = copy.copy(compile.get_default_mode())
        mode2._optimizer = mode2._optimizer.excluding('local_elemwise_fusion')
        print("test with linker", str(mode1.linker))
        times1 = self.do(mode1, shared_fn, shp, gpu=gpu, nb_repeat=nb_repeat,
                         assert_len_topo=False, slice=s)
        times2 = self.do(mode2, shared_fn, shp, gpu=gpu, nb_repeat=nb_repeat,
                         assert_len_topo=False, slice=s)
        print("times1 with local_elemwise_fusion")
        print(times1, times1.min(), times1.max(), times1.sum())
        print("times2 without local_elemwise_fusion")
        print(times2, times2.min(), times2.max(), times2.sum())
        d = times2 / times1

        print("times2/times1")
        print(d)
        print("min", d.min(), "argmin", d.argmin(), "max", d.max(), \
            "mean", d.mean(), "std", d.std())
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_reduce_broadcast_all_0(self):
        for fct in [tensor.sum, tensor.all, tensor.any, tensor.prod,
                    tensor.max, tensor.min]:
            x = T.TensorType('int64', (True, True, True))()
            f = theano.function([x], [fct(x)], mode=self.mode)
            assert not any([
                isinstance(node.op, T.CAReduce)
                for node in f.maker.fgraph.toposort()])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_reduce_broadcast_all_1(self):
        for fct in [tensor.sum, tensor.all, tensor.any, tensor.prod,
                    tensor.max, tensor.min]:
            x = T.TensorType('int64', (True, True))()
            f = theano.function([x], [fct(x, axis=[0, 1])], mode=self.mode)
            assert not any([
                isinstance(node.op, T.CAReduce)
                for node in f.maker.fgraph.toposort()])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_reduce_broadcast_some_1(self):
        for fct in [tensor.sum, tensor.all, tensor.any, tensor.prod,
                    tensor.max, tensor.min]:
            x = T.TensorType('int64', (True, True, True))()
            f = theano.function([x], [fct(x, axis=[0, 2])], mode=self.mode)
            assert not any([
                isinstance(node.op, T.CAReduce)
                for node in f.maker.fgraph.toposort()])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_optimization_min(self):
        data = numpy.asarray(numpy.random.rand(2, 3), dtype=config.floatX)
        n = tensor.matrix()

        for axis in [0, 1, -1]:
            f = function([n], tensor.min(n, axis), mode=self.mode)
            topo = f.maker.fgraph.toposort()
            assert len(topo) == 1
            assert isinstance(topo[0].op, CAReduce)
            f(data)

            # test variant with neg to make sure we optimize correctly
            f = function([n], tensor.min(-n, axis), mode=self.mode)
            topo = f.maker.fgraph.toposort()
            assert len(topo) == 2
            assert isinstance(topo[0].op, CAReduce)  # max
            assert isinstance(topo[1].op, Elemwise)
            assert isinstance(topo[1].op.scalar_op, scalar.Neg)
            f(data)

            f = function([n], -tensor.min(n, axis), mode=self.mode)
            topo = f.maker.fgraph.toposort()
            assert len(topo) == 2
            assert isinstance(topo[0].op, Elemwise)
            assert isinstance(topo[0].op.scalar_op, scalar.Neg)
            assert isinstance(topo[1].op, CAReduce)  # max
            f(data)

            f = function([n], -tensor.min(-n, axis), mode=self.mode)
            topo = f.maker.fgraph.toposort()
            assert len(topo) == 1
            assert isinstance(topo[0].op, CAReduce)  # max
            f(data)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def compile_gpu_func(nan_is_error, inf_is_error, big_is_error):
    """ compile utility function used by contains_nan and contains_inf
    """
    global f_gpumin, f_gpumax, f_gpuabsmax
    if not cuda.cuda_available:
        return
    guard_input = cuda.fvector('nan_guard')
    cuda_compile_failed = False
    if (nan_is_error or inf_is_error) and f_gpumin is None:
        try:
            f_gpumin = theano.function(
                [guard_input], T.min(guard_input),
                mode='FAST_RUN'
            )
        except RuntimeError:
            # This can happen if cuda is available, but the
            # device is in exclusive mode and used by another
            # process.
            cuda_compile_failed = True
    if inf_is_error and not cuda_compile_failed and f_gpumax is None:
        try:
            f_gpumax = theano.function(
                [guard_input], T.max(guard_input),
                mode='FAST_RUN'
            )
        except RuntimeError:
            # This can happen if cuda is available, but the
            # device is in exclusive mode and used by another
            # process.
            cuda_compile_failed = True
    if big_is_error and not cuda_compile_failed and f_gpuabsmax is None:
        try:
            f_gpuabsmax = theano.function(
                [guard_input], T.max(T.abs_(guard_input)),
                mode='FAST_RUN'
                )
        except RuntimeError:
            # This can happen if cuda is available, but the
            # device is in exclusive mode and used by another
            # process.
            cuda_compile_failed = True
项目:statestream    作者:VolkerFischer    | 项目源码 | 文件源码
def min(x, axis=None, keepdims=False):
    return T.min(x, axis=axis, keepdims=keepdims)
项目:InnerOuterRNN    作者:Chemoinformatics    | 项目源码 | 文件源码
def min(x, axis=None, keepdims=False):
    return T.min(x, axis=axis, keepdims=keepdims)
项目:crayimage    作者:yandexdataschool    | 项目源码 | 文件源码
def _margin_score(self, weights):
    y0 = 1 - self.y
    y1 = self.y

    mu0 = T.min(y0 * weights + y1, axis=1)
    mu1 = T.max(y1 * weights, axis=1)

    return mu0 - mu1
项目:logicnn    作者:ZhitingHu    | 项目源码 | 文件源码
def distribution_helper(self, w, X, F, conds):
        nx = X.shape[0]
        distr = T.alloc(1.0, nx, self.K)
        distr,_ = theano.scan( 
            lambda c,x,f,d: ifelse(T.eq(c,1.), self.distribution_helper_helper(x,f), d),
            sequences=[conds, X, F, distr])
        distr,_ = theano.scan(
            lambda d: -w*(T.min(d,keepdims=True)-d), # relative value w.r.t the minimum
            sequences=distr)
        return distr
项目:logicnn    作者:ZhitingHu    | 项目源码 | 文件源码
def value_single(self, x, y, f):
        ret = T.mean([T.min([1.-y+f[2],1.]), T.min([1.-f[2]+y,1.])])
        ret = T.cast(ret, dtype=theano.config.floatX)
        return T.cast(ifelse(T.eq(self.condition_single(x,f),1.), ret, 1.),
                      dtype=theano.config.floatX)
项目:odin_old    作者:trungnt13    | 项目源码 | 文件源码
def min(x, axis=None, keepdims=False):
    return T.min(x, axis=axis, keepdims=keepdims)
项目:NCRF-AE    作者:cosmozhang    | 项目源码 | 文件源码
def forward_z(self, unary_potentials, interaction_potentials, viterbi = False):

        def inner_function(unary, alpha_tm1_max, alpha_tm1_min, alpha_tm1, interaction):
            # interaction is a (classes by classes) matrix
            # unary is a (classes) vector
            alpha_tm1_max = alpha_tm1_max.dimshuffle(0, 'x')
            alpha_tm1_min = alpha_tm1_min.dimshuffle(0, 'x')
            alpha_tm1 = alpha_tm1.dimshuffle(0, 'x')
            unary = unary.dimshuffle('x', 0)

            out1 = T.max( alpha_tm1_max + unary + interaction, axis = 0)
            out2 = T.min( alpha_tm1_min + unary + interaction, axis = 0)
            out3 = theano_logsumexp( alpha_tm1 + unary + interaction, axis = 0)
            out_argmax = T.argmax( alpha_tm1 + unary + interaction, axis = 0)

            return [out1, out2, out3, out_argmax]

        assert unary_potentials.ndim == 2 #timesteps, classes
        assert interaction_potentials.ndim == 2 #classes+2, classes+2

        initial = unary_potentials[0]

        [alpha_max, alpha_min, alpha, argmax_preds], _ = theano.scan(fn = inner_function,
                                sequences = [unary_potentials[1:]],
                                outputs_info = [initial, initial, initial, None],
                                non_sequences = interaction_potentials)

        def return_seq(trace_at_t, label_idx):

            return trace_at_t[label_idx]

        bestseq, _ = theano.scan(fn = return_seq,
                                sequences = argmax_preds[::-1],
                                outputs_info = T.argmax(alpha[-1]))

        pred_seq = T.concatenate([bestseq[::-1], [T.argmax(alpha[-1])]], axis = 0)

        adplr = T.exp(T.max(alpha_max[-1], axis = 0) - theano_logsumexp(alpha[-1], axis=0)) - T.exp(T.min(alpha_min[-1], axis = 0) - theano_logsumexp(alpha[-1], axis=0))

        if viterbi:
            return T.max(alpha_max[-1], axis = 0), pred_seq, adplr
        else:
            return theano_logsumexp(alpha[-1], axis=0), alpha[:-1, 0:-2], adplr
项目:EAC-Net    作者:wiibrew    | 项目源码 | 文件源码
def imdata(fls,data_size=BATCH_SIZE):
    datablob=np.ndarray((data_size,4,IM_SIZE,IM_SIZE))
    datalb=np.zeros((data_size,1,1,12))
    dataps=np.zeros((data_size,10,4))
    n=len(fls)
    random.shuffle(fls)
    fls=fls[:data_size]

    im224=np.zeros((4,IM_SIZE,IM_SIZE))
    for i,f in enumerate(fls):
        fname,flabel,fpos=f.split('->')
        pre_path='/home/wei/DATA/BP4D_FACE/'
        imi=cv2.imread(pre_path+fname)
        if imi==None:
            # print fname
            fname,flabel,fpos=fls[0].split('->')
            imi=cv2.imread(pre_path+fname)
        #cv2 read img as 3xNxN and with BGR
        if imi==None:continue
        # imi=get_face.one_big_face(imi)
        for t in range(3):
            im224[t,:,:]=cv2.resize(imi[:,:,t],(IM_SIZE,IM_SIZE))
        shape_str=fpos[1:-2]
        np_shape=np.array([float(t) for t in shape_str.split(',')])
        imshape=np.reshape(np_shape,(68,2))
        feat_map=get_attention_map_dlib.get_map(imshape,imi.shape[0],imi.shape[1])
        feat_map224=cv2.resize(feat_map,(224,224))
        im224[3,:,:]=feat_map224
        datablob[i,:,:,:]=im224

        #then the label 
        datalb[i,0,0,:]=np.array(patt.findall(flabel))
        for t in range(12):
            datalb[i,0,0,t]=min(datalb[i,0,0,t],1)

        dataps[i,:,:]=get_attention_map_dlib.get_au_tg_dlib(imshape,imi.shape[0],imi.shape[1])

    datablob=datablob.astype('float32')
    datalb=datalb.astype('float32')
    dataps=dataps.astype('float32')
    dataps/=100
    dataps*=28
    dataps=dataps.astype('int32')
    # print dataps[0,:,:]
    return datablob,datalb,dataps
项目:tsNET    作者:HanKruiger    | 项目源码 | 文件源码
def find_sigma(X_shared, sigma_shared, N, perplexity, sigma_iters, verbose=0):
    X = T.fmatrix('X')
    sigma = T.fvector('sigma')

    target = np.log(perplexity)

    P = T.maximum(p_ij_conditional_var(X, sigma), epsilon)

    entropy = -T.sum(P * T.log(P), axis=1)

    # Setting update for binary search interval
    sigmin_shared = theano.shared(np.full(N, np.sqrt(epsilon), dtype=floath))
    sigmax_shared = theano.shared(np.full(N, np.inf, dtype=floath))

    sigmin = T.fvector('sigmin')
    sigmax = T.fvector('sigmax')

    upmin = T.switch(T.lt(entropy, target), sigma, sigmin)
    upmax = T.switch(T.gt(entropy, target), sigma, sigmax)

    givens = {X: X_shared, sigma: sigma_shared, sigmin: sigmin_shared,
              sigmax: sigmax_shared}
    updates = [(sigmin_shared, upmin), (sigmax_shared, upmax)]

    update_intervals = theano.function([], entropy, givens=givens, updates=updates)

    # Setting update for sigma according to search interval
    upsigma = T.switch(T.isinf(sigmax), sigma * 2, (sigmin + sigmax) / 2.)

    givens = {sigma: sigma_shared, sigmin: sigmin_shared,
              sigmax: sigmax_shared}
    updates = [(sigma_shared, upsigma)]

    update_sigma = theano.function([], sigma, givens=givens, updates=updates)

    for i in range(sigma_iters):
        e = update_intervals()
        update_sigma()
        if verbose:
            print('Finding sigmas... Iteration {0}/{1}: Perplexities in [{2:.4f}, {3:.4f}].'.format(i + 1, sigma_iters, np.exp(e.min()), np.exp(e.max())), end='\r')
        if np.any(np.isnan(np.exp(e))):
            raise SigmaTooLowException('Invalid sigmas. The perplexity is probably too low.')
    if verbose:
        print('\nDone. Perplexities in [{0:.4f}, {1:.4f}].'.format(np.exp(e.min()), np.exp(e.max())))


# Perform momentum-based gradient descent on the cost function with the given
# parameters. Return the vertex coordinates and per-vertex cost.
项目:SE16-Task6-Stance-Detection    作者:nestle1993    | 项目源码 | 文件源码
def __init__(self, input, n_in, n_out, W=None, b=None):
        """ Initialize the parameters of the logistic regression

    :type input: theano.tensor.TensorType
    :param input: symbolic variable that describes the input of the
    architecture (one minibatch)

    :type n_in: int
    :param n_in: number of input units, the dimension of the space in
    which the datapoints lie

    :type n_out: int
    :param n_out: number of output units, the dimension of the space in
    which the labels lie

    """

        # initialize with 0 the weights W as a matrix of shape (n_in, n_out)
        if W is None:
            self.W = theano.shared(
                    value=numpy.zeros((n_in, n_out), dtype=theano.config.floatX),
                    name='W')
        else:
            self.W = W

        # initialize the baises b as a vector of n_out 0s
        if b is None:
            self.b = theano.shared(
                    value=numpy.zeros((n_out,), dtype=theano.config.floatX),
                    name='b')
        else:
            self.b = b

        # compute vector of class-membership probabilities in symbolic form
        self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)

        # compute prediction as class whose probability is maximal in
        # symbolic form
        self.y_pred = T.argmax(self.p_y_given_x, axis=1)

        y_max = np.array(T.max(self.p_y_given_x,axis=1))
        y_min = np.array(T.min(self.p_y_given_x,axis=1))
        y_sub = abs(y_max - y_min)
        self.y_pred_2label = y_sub

        # parameters of the model
        self.params = [self.W, self.b]
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def contains_nan(arr, node=None, var=None):
    """
    Test whether a numpy.ndarray contains any `np.nan` values.

    Parameters
    ----------
    arr : np.ndarray or output of any Theano op
    node : None or an Apply instance.
        If arr is the output of a Theano op, the node associated to it.
    var : The Theano symbolic variable.

    Returns
    -------
    contains_nan : bool
        `True` if the array contains any `np.nan` values, `False` otherwise.

    Notes
    -----
    Tests for the presence of `np.nan`'s using `np.isnan(np.min(ndarray))`.
    This approach is faster and more memory efficient than the obvious
    alternative, calling `np.any(np.isnan(ndarray))`, which requires the
    construction of a boolean array with the same shape as the input array.

    """
    # This should be a whitelist instead of a blacklist
    if isinstance(arr, theano.gof.type._cdata_type):
        return False
    elif isinstance(arr, np.random.mtrand.RandomState):
        return False
    elif var and getattr(var.tag, 'is_rng', False):
        return False
    elif isinstance(arr, slice):
        return False
    elif arr.size == 0:
        return False
    elif cuda.cuda_available and isinstance(arr, cuda.CudaNdarray):
        if (node and hasattr(theano.sandbox, 'rng_mrg') and
            isinstance(
                node.op,
                # It store ints in float container
                theano.sandbox.rng_mrg.GPU_mrg_uniform)):
            return False
        else:
            compile_gpu_func(True, False, False)
            return np.isnan(f_gpumin(arr.reshape(arr.size)))
    elif pygpu_available and isinstance(arr, GpuArray):
        return np.isnan(f_gpua_min(arr.reshape(arr.size)))

    return np.isnan(np.min(arr))
项目:crayimage    作者:yandexdataschool    | 项目源码 | 文件源码
def __init__(self, kernel, n_iter = 10, intermediate_loss_coefs=None):
    self.kernel = kernel

    self.X = theano.shared(
      np.zeros(shape=(0, 0, 0), dtype='float32')
    )

    self.weights_initial = theano.shared(
      np.ones(shape=(0, 0), dtype='float32')
    )

    self.y = theano.shared(
      np.ones(shape=(0, 0), dtype='float32')
    )

    scores = []
    intermediate_weights = []

    for i in range(n_iter):
      weights = self.weights_initial if i == 0 else intermediate_weights[-1]
      weights_update = self._em_step(weights)
      intermediate_weights.append(weights_update)
      separation_score = self._fishers_score(weights_update)
      scores.append(separation_score)

    if intermediate_loss_coefs is not None:
      assert len(intermediate_loss_coefs) == n_iter
      full_score = Ssum([
        c * l for c, l in zip(intermediate_loss_coefs, scores)
      ]) / np.sum(intermediate_loss_coefs)
    else:
      full_score = scores[-1]

    min_score = T.min(full_score)
    mean_score = T.mean(full_score)

    params = self.kernel.params
    learning_rate = T.fscalar('learning rate')

    upd_max = lasagne.updates.adadelta(-min_score, params, learning_rate=learning_rate)
    upd_mean = lasagne.updates.adadelta(-mean_score, params, learning_rate=learning_rate)

    self.train_max = theano.function([learning_rate], full_score, updates=upd_max)
    self.train_mean = theano.function([learning_rate], full_score, updates=upd_mean)

    self.get_weights = theano.function([], intermediate_weights[-1])
项目:Precise-CTC    作者:Michlong    | 项目源码 | 文件源码
def ctc_path_probability(scorematrix, queryseq, blank):
    """
    Compute path probability based on CTC algorithm, only forward pass is used.
    Batch not supported, for batch version, refer to the CTC class above
    Speed much slower than the numba & cython version (51.5min vs ~3.9min on word_correction_CTC experiment)
    :param scorematrix: (T, C+1)
    :param queryseq:    (L, 1)
    :param blank:       scalar, blank symbol
    :return: (NLL, alphas), NLL > 0 (smaller is better, = -log(p(l|x)); alphas is the forward variable)
    """

    def update_s(s, alphas, scorematrix, queryseq, blank, t):
        l = (s - 1) // 2
        alphas = ifelse(tensor.eq(s % 2, 0),
                        ifelse(tensor.eq(s, 0),
                               tensor.set_subtensor(alphas[s, t], alphas[s, t - 1] * scorematrix[blank, t]),
                               tensor.set_subtensor(alphas[s, t],
                                                    (alphas[s, t - 1] + alphas[s - 1, t - 1]) * scorematrix[blank, t]),
                               name='for_blank_symbol'),
                        ifelse(tensor.or_(tensor.eq(s, 1), tensor.eq(queryseq[l], queryseq[l - 1])),
                               tensor.set_subtensor(alphas[s, t],
                                                    (alphas[s, t - 1] + alphas[s - 1, t - 1]) * scorematrix[
                                                        queryseq[l], t]),
                               tensor.set_subtensor(alphas[s, t],
                                                    (alphas[s, t - 1] + alphas[s - 1, t - 1] + alphas[s - 2, t - 1]) *
                                                    scorematrix[queryseq[l], t]),
                               name='for_same_label_twice'))
        return alphas

    def update_t(t, LLForward, alphas, scorematrix, queryseq, blank, T, L2):
        start = tensor.max([0, L2 - 2 * (T - t)])
        end = tensor.min([2 * t + 2, L2])
        s = tensor.arange(start, end)
        results, _ = theano.scan(fn=update_s, sequences=[s], non_sequences=[scorematrix, queryseq, blank, t],
                                 outputs_info=[alphas], name='scan_along_s')
        alphas = results[-1]
        c = tensor.sum(alphas[start:end, t])
        c = tensor.max([1e-15, c])
        alphas = tensor.set_subtensor(alphas[start:end, t], alphas[start:end, t] / c)
        LLForward += tensor.log(c)
        return LLForward, alphas

    L = queryseq.shape[0]                                                 # Length of label sequence
    L2 = 2 * L + 1                                                        # Length of label sequence padded with blanks
    T = scorematrix.shape[1]                                              # time length
    alphas = tensor.zeros((L2, T))
    # Initialize alphas and forward pass
    alphas = tensor.set_subtensor(alphas[[0, 1], 0], scorematrix[[blank, queryseq[0]], 0])
    c = tensor.sum(alphas[:, 0])
    alphas = tensor.set_subtensor(alphas[:, 0], alphas[:, 0] / c)
    LLForward = tensor.log(c)
    t = tensor.arange(1, T)
    results, _ = theano.scan(fn=update_t, sequences=[t], non_sequences=[scorematrix, queryseq, blank, T, L2],
                             outputs_info=[LLForward, alphas], name='scan_along_t')
    NLL, alphas = ifelse(tensor.gt(T, 1), (-results[0][-1], results[1][-1]), (-LLForward, alphas))
    return NLL, alphas