Python theano.tensor 模块,le() 实例源码

我们从Python开源项目中,提取了以下18个代码示例,用于说明如何使用theano.tensor.le()

项目:gated_word_char_rlm    作者:nyu-dl    | 项目源码 | 文件源码
def gate_layer(tparams, X_word, X_char, options, prefix, pretrain_mode, activ='lambda x: x', **kwargs):
    """ 
    compute the forward pass for a gate layer

    Parameters
    ----------
    tparams        : OrderedDict of theano shared variables, {parameter name: value}
    X_word         : theano 3d tensor, word input, dimensions: (num of time steps, batch size, dim of vector)
    X_char         : theano 3d tensor, char input, dimensions: (num of time steps, batch size, dim of vector)
    options        : dictionary, {hyperparameter: value}
    prefix         : string, layer name
    pretrain_mode  : theano shared scalar, 0. = word only, 1. = char only, 2. = word & char
    activ          : string, activation function: 'liner', 'tanh', or 'rectifier'

    Returns
    -------
    X              : theano 3d tensor, final vector, dimensions: (num of time steps, batch size, dim of vector)

    """      
    # compute gating values, Eq.(3)
    G = tensor.nnet.sigmoid(tensor.dot(X_word, tparams[p_name(prefix, 'v')]) + tparams[p_name(prefix, 'b')][0])
    X = ifelse(tensor.le(pretrain_mode, numpy.float32(1.)),  
               ifelse(tensor.eq(pretrain_mode, numpy.float32(0.)), X_word, X_char),
               G[:, :, None] * X_char + (1. - G)[:, :, None] * X_word)   
    return eval(activ)(X)
项目:gated_word_char_rlm    作者:nyu-dl    | 项目源码 | 文件源码
def concat_layer(tparams, X_word, X_char, options, prefix, pretrain_mode, activ='lambda x: x', **kwargs):
    """ 
    compute the forward pass for a concat layer

    Parameters
    ----------
    tparams        : OrderedDict of theano shared variables, {parameter name: value}
    X_word         : theano 3d tensor, word input, dimensions: (num of time steps, batch size, dim of vector)
    X_char         : theano 3d tensor, char input, dimensions: (num of time steps, batch size, dim of vector)
    options        : dictionary, {hyperparameter: value}
    prefix         : string,  layer name
    pretrain_mode  : theano shared scalar, 0. = word only, 1. = char only, 2. = word & char
    activ          : string, activation function: 'liner', 'tanh', or 'rectifier'

    Returns
    -------
    X              : theano 3d tensor, final vector, dimensions: (num of time steps, batch size, dim of vector)

    """
    X = ifelse(tensor.le(pretrain_mode, numpy.float32(1.)),
               ifelse(tensor.eq(pretrain_mode, numpy.float32(0.)), X_word, X_char),
               tensor.dot(tensor.concatenate([X_word, X_char], axis=2), tparams[p_name(prefix, 'W')]) + tparams[p_name(prefix, 'b')]) 
    return eval(activ)(X)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_inequality_with_self(self):
        x = T.scalar('x', dtype=config.floatX)
        mode = theano.compile.get_default_mode().including('local_useless_elemwise_comparison')

        f = theano.function([x], T.lt(x, x), mode=mode)
        self.assert_eqs_const(f, 0)

        f = theano.function([x], T.le(x, x), mode=mode)
        self.assert_eqs_const(f, 1)

        f = theano.function([x], T.gt(x, x), mode=mode)
        self.assert_eqs_const(f, 0)

        f = theano.function([x], T.ge(x, x), mode=mode)
        self.assert_eqs_const(f, 1)

        f = theano.function([x], T.minimum(x, x), mode=mode)
        self.assert_identity(f)

        f = theano.function([x], T.maximum(x, x), mode=mode)
        self.assert_identity(f)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_elemwise_comparaison_cast():
    """
    test if an elemwise comparaison followed by a cast to float32 are
    pushed to gpu.
    """

    a = tensor.fmatrix()
    b = tensor.fmatrix()
    av = theano._asarray(numpy.random.rand(4, 4), dtype='float32')
    bv = numpy.ones((4, 4), dtype='float32')

    for g, ans in [(tensor.lt, av < bv), (tensor.gt, av > bv),
                   (tensor.le, av <= bv), (tensor.ge, av >= bv)]:

        f = pfunc([a, b], tensor.cast(g(a, b), 'float32'), mode=mode_with_gpu)

        out = f(av, bv)
        assert numpy.all(out == ans)
        assert any([isinstance(node.op, cuda.GpuElemwise)
                    for node in f.maker.fgraph.toposort()])
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def lesser_equal(x, y):
    return T.le(x, y)
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def lesser_equal(x, y):
    return T.le(x, y)
项目:NNBuilder    作者:aeloyq    | 项目源码 | 文件源码
def le(self, l, r):
            return T.le(l, r)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def lesser_equal(x, y):
    return T.le(x, y)
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def lesser_equal(x, y):
    return T.le(x, y)
项目:nature_methods_multicut_pipeline    作者:ilastik    | 项目源码 | 文件源码
def sylu(gain=10, spread=0.1):
    return lambda x: switch(T.ge(x, (1 / spread)), gain, 0) + \
                     switch(T.and_(T.gt((1 / spread), x), T.gt(x, -(1 / spread))), gain * spread * x, 0) + \
                     switch(T.le(x, -(1 / spread)), -gain, 0)


# Exponential Linear Unit
项目:nature_methods_multicut_pipeline    作者:ilastik    | 项目源码 | 文件源码
def sylu(gain=10, spread=0.1):
    return lambda x: switch(T.ge(x, (1 / spread)), gain, 0) + \
                     switch(T.and_(T.gt((1 / spread), x), T.gt(x, -(1 / spread))), gain * spread * x, 0) + \
                     switch(T.le(x, -(1 / spread)), -gain, 0)


# Exponential Linear Unit
项目:keras_superpixel_pooling    作者:parag2489    | 项目源码 | 文件源码
def less_equal(x, y):
    return T.le(x, y)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_shape_le_0(self):

        for dtype1 in ['float32', 'float64']:
            x = theano.tensor.matrix('x', dtype=dtype1)
            z0 = theano.tensor.switch(theano.tensor.le(x.shape[0], 0), 0, x.shape[0])
            f0 = theano.function([x], z0, mode=self.mode)
            assert isinstance(f0.maker.fgraph.toposort()[0].op, Shape_i)

            z1 = theano.tensor.switch(theano.tensor.le(x.shape[1], 0), 0, x.shape[1])
            f1 = theano.function([x], z1, mode=self.mode)
            assert isinstance(f1.maker.fgraph.toposort()[0].op, Shape_i)

            vx = numpy.random.randn(0,5).astype(dtype1)
            assert f0(vx) == 0
            assert f1(vx) == 5
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_elemwise(self):
        # float Ops
        mats = theano.tensor.matrices('cabxy')
        c, a, b, x, y = mats
        s1 = T.switch(c, a, b)
        s2 = T.switch(c, x, y)
        for op in (T.add, T.sub, T.mul, T.true_div, T.int_div, T.floor_div,
                   T.minimum, T.maximum, T.gt, T.lt, T.ge, T.le, T.eq, T.neq,
                   T.pow):
            g = optimize(FunctionGraph(mats, [op(s1, s2)]))
            assert str(g).count('Switch') == 1
        # integer Ops
        mats = theano.tensor.imatrices('cabxy')
        c, a, b, x, y = mats
        s1 = T.switch(c, a, b)
        s2 = T.switch(c, x, y)
        for op in (T.and_, T.or_, T.xor,
                   T.bitwise_and, T.bitwise_or, T.bitwise_xor):
            g = optimize(FunctionGraph(mats, [op(s1, s2)]))
            assert str(g).count('Switch') == 1
        # add/mul with more than two inputs
        u, v = theano.tensor.matrices('uv')
        s3 = T.switch(c, u, v)
        for op in (T.add, T.mul):
            g = optimize(FunctionGraph(mats + [u, v], [op(s1, s2, s3)]))
            assert str(g).count('Switch') == 1
项目:InnerOuterRNN    作者:Chemoinformatics    | 项目源码 | 文件源码
def lesser_equal(x, y):
    return T.le(x, y)
项目:odin_old    作者:trungnt13    | 项目源码 | 文件源码
def le(a, b):
    """a <= b"""
    return T.le(a, b)
项目:Theano-NN_Starter    作者:nightinwhite    | 项目源码 | 文件源码
def CTC_B(self,A):
        blank_num = 0
        i = len(A) -1 
        j = i
        while i != 0 :
            j = i-1
            if A[i]!=blank_num and A[j] == A[i]:
                del A[i]
            elif A[i] == blank_num:
                del A[i]
            i-=1
        if A[0] == blank_num :
            del A[0]
        return A

    # def CTC_LOSS(self):
    #     T_ctc = self.output_shape[1]#?????
    #     L = self.Y.shape[1]*2+1#???????
    #
    #     def each_loss(index,T_ctc,L):
    #         o = self.output[index]
    #         y = self.Y[index]
    #         blank_num = 0
    #         def extend_y(i,y):
    #             return T.switch(T.eq(i%2, 0), blank_num, y[(i-1)//2])
    #         y,_ = theano.scan(extend_y,sequences=[T.arange(L)],non_sequences = [y])
    #         #y???2*y.len+1?blank_num??
    #         temp_vector = T.zeros(self.output_shape[1]*2+1)
    #         alpha0 = T.concatenate([[o[0][y[0]]], [o[0][y[1]]], T.zeros_like(temp_vector[:L-2])],axis = 0)
    #         #alpha0???????
    #         def to_T(t,alpha_pre,o,y,T_ctc,L):#???????
    #             alpha_e = 1 + 2*t
    #             alpha_b = L - 2*T_ctc+2*t
    #             def set_alpha_value(i,alpha_t,alpha_pre,t,o,y):#?????????
    #                 iff = T.cast(0,dtype = "float32")
    #                 ift = (alpha_pre[i] + T.gt(i, 0) * alpha_pre[i - 1] + (T.gt(i, 1) * T.eq(i % 2, 1)) * alpha_pre[i - 2]) * o[t][y[i]]
    #                 ans = theano.ifelse.ifelse(T.eq(alpha_t[i],1),ift,iff)
    #                 return ans
    #
    #             temp_vector = T.zeros(self.output_shape[1]*2+1)
    #             alpha_v = T.ones_like(temp_vector[:(T.switch(T.gt(alpha_e, L - 1), L - 1, alpha_e) - T.switch(T.gt(alpha_b, 0), alpha_b, 0))+1])
    #             alpha_t = theano.ifelse.ifelse(T.gt(alpha_b, 0), T.concatenate([T.zeros_like(temp_vector[:alpha_b]), alpha_v]), alpha_v)
    #             alpha_t = theano.ifelse.ifelse(T.ge(alpha_e, L - 1), alpha_t, T.concatenate([alpha_t,T.zeros_like(temp_vector[:L-1-alpha_e])]))
    #             alpha_t = theano.scan(set_alpha_value,
    #                                   sequences=[T.arange(alpha_t.shape[0])],
    #                                   non_sequences=[alpha_t,alpha_pre,t,o,y])
    #             return alpha_t
    #         alphas,_ = theano.scan(to_T,sequences=[T.arange(1,T_ctc)],
    #                                outputs_info = [alpha0],
    #                                non_sequences = [o,y,T_ctc,L])
    #         loss = alphas[-1][-1]+alphas[-1][-2]
    #         loss = T.switch(T.le(loss, 1e-40), 1e-40, loss)
    #         loss = -T.log(loss)
    #         return loss
    #
    #     CTC_LOSSs,_ = theano.scan(each_loss,
    #                               sequences=[T.arange(self.output_shape[0])],
    #                               non_sequences = [T_ctc,L])
    #     self.ctc_loss = theano.function([self.X,self.Y],CTC_LOSSs)
    #     return CTC_LOSSs
项目:Theano-NN_Starter    作者:nightinwhite    | 项目源码 | 文件源码
def CTC_LOSS(self):
        outpts = self.output
        inpts = self.Y
        def each_loss(outpt, inpt):
            # y ????blank???ans
            blank = 26
            y_nblank = T.neq(inpt, blank)
            n = T.dot(y_nblank, y_nblank)  # ???????
            N = 2 * n + 1  # ??????????????????
            labels = inpt[:N]
            labels2 = T.concatenate((labels, [blank, blank]))
            sec_diag = T.neq(labels2[:-2], labels2[2:]) * T.eq(labels2[1:-1], blank)
            recurrence_relation = \
                T.eye(N) + \
                T.eye(N, k=1) + \
                T.eye(N, k=2) * sec_diag.dimshuffle((0, 'x'))

            pred_y = outpt[:, labels]

            fwd_pbblts, _ = theano.scan(
                lambda curr, accum: T.switch(T.eq(curr*T.dot(accum, recurrence_relation), 0.0),
                                             T.dot(accum, recurrence_relation)
                                             , curr*T.dot(accum, recurrence_relation)),
                sequences=[pred_y],
                outputs_info=[T.eye(N)[0]]
            )
            #return fwd_pbblts
            #liklihood = fwd_pbblts[0, 0]
            liklihood = fwd_pbblts[-1, -1] + fwd_pbblts[-1, -2]
            #liklihood = T.switch(T.lt(liklihood, 1e-35), 1e-35, liklihood)
            #loss = -T.log(T.cast(liklihood, "float32"))
            #loss = 10 * (liklihood - 1) * (liklihood - 100)
            loss = (T.le(liklihood, 1.0)*(10*(liklihood-1)*(liklihood-100)))+(T.gt(liklihood, 1.0)*(-T.log(T.cast(liklihood, "float32"))))
            return loss
            #return pred_y

        ctc_losss, _ = theano.scan(each_loss,
                                   sequences=[outpts, inpts],
                                   )
        self.ctc_loss = theano.function([self.X, self.Y], ctc_losss)

        return ctc_losss