Python theano.tensor 模块,sin() 实例源码

我们从Python开源项目中,提取了以下29个代码示例,用于说明如何使用theano.tensor.sin()

项目:EUNN-theano    作者:iguanaus    | 项目源码 | 文件源码
def times_diag(input, n_hidden, diag, swap_re_im):
    # input is a Ix2n_hidden matrix, where I is number
    # of training examples
    # diag is a n_hidden-dimensional real vector, which creates
    # the 2n_hidden x 2n_hidden complex diagonal matrix using 
    # e.^{j.*diag}=cos(diag)+j.*sin(diag)
    d = T.concatenate([diag, -diag]) #d is 2n_hidden

    Re = T.cos(d).dimshuffle('x',0)
    Im = T.sin(d).dimshuffle('x',0)

    input_times_Re = input * Re
    input_times_Im = input * Im

    output = input_times_Re + input_times_Im[:, swap_re_im]

    return output
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_no_leak_many_graphs():
        # Verify no memory leaks when creating and deleting a lot of functions

        # This isn't really a unit test, you have to run it and look at top to
        # see if there's a leak
        for i in xrange(10000):
            x = tensor.vector()
            z = x
            for d in range(10):
                z = tensor.sin(-z + 1)

            f = function([x], z, mode=Mode(optimizer=None, linker='cvm'))
            if not i % 100:
                print(gc.collect())
            sys.stdout.flush()

            gc.collect()
            if 1:
                f([2.0])
                f([3.0])
                f([4.0])
                f([5.0])
项目:urnn    作者:stwisdom    | 项目源码 | 文件源码
def times_diag(input, n_hidden, diag, swap_re_im):
    # input is a Ix2n_hidden matrix, where I is number
    # of training examples
    # diag is a n_hidden-dimensional real vector, which creates
    # the 2n_hidden x 2n_hidden complex diagonal matrix using 
    # e.^{j.*diag}=cos(diag)+j.*sin(diag)
    d = T.concatenate([diag, -diag]) #d is 2n_hidden

    Re = T.cos(d).dimshuffle('x',0)
    Im = T.sin(d).dimshuffle('x',0)

    input_times_Re = input * Re
    input_times_Im = input * Im

    output = input_times_Re + input_times_Im[:, swap_re_im]

    return output
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def sin(x):
    return T.sin(x)
项目:sesame-paste-noodle    作者:aissehust    | 项目源码 | 文件源码
def forward(self, inputtensor):
        inputimage = inputtensor[0]
        return (T.sin(inputimage),)
项目:sesame-paste-noodle    作者:aissehust    | 项目源码 | 文件源码
def forward(self, inputtensor):
        inputimage = inputtensor[0]
        return (T.concatenate([T.sin(inputimage), T.cos(inputimage)], axis=1),)
项目:sesame-paste-noodle    作者:aissehust    | 项目源码 | 文件源码
def forward(self, inputtensor):
        inputimage = inputtensor[0]
        return (T.sin(self.a[0]*inputimage),)
项目:keraflow    作者:ipod825    | 项目源码 | 文件源码
def sin(self, x):
        return T.sin(x)
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def sin(x):
    return T.sin(x)
项目:lddmm-ot    作者:jeanfeydy    | 项目源码 | 文件源码
def _dirac_truncated_rfft(self, point) :
        """
        Returns the truncated real FFT of a dirac at position 'point',
        as a (2+1)-d array of size "K.shape//2+1" + (4,),.
        See real_fft._irfft_2d to understand the format of the output.
        The code may seem quite circonvoluted but hey, it's not my fault
        if theano forces us to use real-valued FFT...
        """
        su, di = self._phase_shifts(point)
        re_re = T.cos(di) + T.cos(su) # 2 cos(a)cos(b) = cos(a-b) + cos(a+b)
        re_im = T.sin(su) + T.sin(di) # 2 sin(a)cos(b) = sin(a+b) + sin(a-b)
        im_re = T.sin(su) - T.sin(di) # 2 cos(a)sin(b) = sin(a+b) - sin(a-b)
        im_im = T.cos(di) - T.cos(su) # 2 sin(a)sin(b) = cos(a-b) - cos(a+b)
        return .5 * T.stack([re_re, re_im, im_re, im_im], axis=2) # Don't forget the .5 !
项目:opt-mmd    作者:dougalsutherland    | 项目源码 | 文件源码
def get_output_for(self, input, **kwargs):
        X = input / T.exp(self.log_sigma)
        f = T.exp(-.5 * T.sum(X ** 2, axis=1))[:, np.newaxis]
        angles = T.dot(X, self.freqs.T)
        return T.concatenate([T.sin(angles) * f, T.cos(angles) * f], axis=1)
项目:bbho    作者:DarkElement75    | 项目源码 | 文件源码
def __init__(self, lengthscale, v):
        covariance_function.__init__(self, lengthscale, v)
        self.f = theano.function([x1, x2], 
                    T.exp(-1.0 * T.sin(5.0 * np.pi * T.sum(T.sqr(x1-x2)))),
                allow_input_downcast=True)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def sin(x):
    return T.sin(x)
项目:theanomodels    作者:clinicalml    | 项目源码 | 文件源码
def nlinear_trans(z, fxn_params = {}, ns=None): 
    return 2*T.sin(z)+z
项目:theanomodels    作者:clinicalml    | 项目源码 | 文件源码
def nlinear_trans_learn(z, fxn_params = {}, ns = None):
    assert z.ndim == 3,'expecting 3d'
    z_1 = z[:,:,[0]]
    z_2 = z[:,:,[1]]
    f_1 = 0.2*z_1+T.tanh(fxn_params['alpha']*z_2)
    f_2 = 0.2*z_2+T.sin(fxn_params['beta']*z_1)
    return T.concatenate([f_1,f_2],axis=2)
项目:Theano-NN_Starter    作者:nightinwhite    | 项目源码 | 文件源码
def get_output(self):
        if len(self.input_shape) == 2:
            output, _ = theano.scan(self.CTC_reshape, sequences=[T.sin(self.input)])#self.input->(batch_size, T_len)
        else:
            output, _ = theano.scan(lambda x: T.nnet.softmax(x), sequences=[self.input])
            #output, _ = theano.scan(lambda x, w, c: T.nnet.softmax(T.dot(x, w)+c), sequences=[self.input],
                                 # non_sequences=[self.S_W, self.S_C])
            #output = T.nnet.softmax(T.dot(self.input, self.S_W)+self.S_C)#self.input->(batch_size, T_len, hidden_dim)
        return output
项目:taxi    作者:xuguanggen    | 项目源码 | 文件源码
def hdist(y_pred,y_true):
    pred_lon = y_pred[:,0] * deg2rad
    pred_lat = y_pred[:,1] * deg2rad
    true_lon = y_true[:,0] * deg2rad
    true_lat = y_true[:,1] * deg2rad
    dlon = abs(pred_lon - true_lon)
    dlat = abs(pred_lat - true_lat)
    a1 = T.sin(dlat/2)**2 + T.cos(true_lat) * T.cos(true_lat) * (T.sin(dlon/2)**2)
    d = T.arctan2(T.sqrt(a1),T.sqrt(const(1)-a1))
    #d = T.arcsin(T.sqrt(a1))
    hd = const(2000) * rearth * d
    #return T.switch(T.eq(hd,float('nan')),(y_pred - y_true).norm(2,axis=1),hd)
    return hd
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def sin(x):
    return T.sin(x)
项目:keras_superpixel_pooling    作者:parag2489    | 项目源码 | 文件源码
def sin(x):
    return T.sin(x)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def sin(x):
    """
    Elemwise sinus of `x`.

    """
    # see decorator for function body
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_fusion_35inputs(self):
        # Make sure a fused graph with more than 35 inputs does not segfault
        # or error.
        inpts = vectors(['i%i' % i for i in xrange(35)])
        # Make an elemwise graph looking like:
        # sin(i34 + sin(i33 + sin(... i1 + sin(i0) ...)))
        out = tensor.sin(inpts[0])
        for idx in xrange(1, 35):
            out = tensor.sin(inpts[idx] + out)

        f = function(inpts, out)
        # Test it on some dummy values
        f(*[list(range(i, 4 + i)) for i in xrange(35)])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_no_leak_many_call_nonlazy():
        # Verify no memory leaks when calling a function a lot of times

        # This isn't really a unit test, you have to run it and look at top to
        # see if there's a leak.

        def build_graph(x, depth=5):
            z = x
            for d in range(depth):
                z = tensor.sin(-z + 1)
            return z

        def time_linker(name, linker):
            steps_a = 10
            x = tensor.dvector()
            a = build_graph(x, steps_a)

            f_a = function([x], a,
                           mode=Mode(optimizer=None,
                                     linker=linker()))
            inp = numpy.random.rand(1000000)
            for i in xrange(500):
                f_a(inp)
        print(1)
        time_linker('vmLinker_C',
                    lambda: vm.VM_Linker(allow_gc=False, use_cloop=True))
        print(2)
        time_linker('vmLinker',
                    lambda: vm.VM_Linker(allow_gc=False, use_cloop=False))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_opt_gpujoin_joinvectors_elemwise_then_minusone():
    # from a bug in gpu normal sampling
    _a = numpy.asarray([1, 2, 3, 4], dtype='float32')
    _b = numpy.asarray([5, 6, 7, 8], dtype='float32')
    a = cuda.shared_constructor(_a)
    b = cuda.shared_constructor(_b)

    a_prime = tensor.cos(a)
    b_prime = tensor.sin(b)

    c = tensor.join(0, a_prime, b_prime)

    d = c[:-1]

    f = theano.function([], d, mode=mode_with_gpu)

    graph_nodes = f.maker.fgraph.toposort()

    assert isinstance(graph_nodes[-1].op, cuda.HostFromGpu)
    assert isinstance(graph_nodes[-2].op, cuda.GpuSubtensor)
    assert isinstance(graph_nodes[-3].op, cuda.GpuJoin)

    concat = numpy.concatenate([numpy.cos(_a), numpy.sin(_b)], axis=0)
    concat = concat[:-1]

    assert numpy.allclose(numpy.asarray(f()), concat)
项目:InnerOuterRNN    作者:Chemoinformatics    | 项目源码 | 文件源码
def sin(x):
    return T.sin(x)
项目:State-Frequency-Memory-Recurrent-Neural-Networks    作者:hhkunming    | 项目源码 | 文件源码
def SFM(tparams, x, omega, opts):
    nsteps = x.shape[0]

    def _recurrence(x_, t_, Re_s_, Im_s_, z_):
        f_ste = T.nnet.sigmoid(T.dot(tparams['W_ste'], z_)+T.dot(tparams['V_ste'], x_)+tparams['b_ste'])
        f_fre = T.nnet.sigmoid(T.dot(tparams['W_fre'], z_)+T.dot(tparams['V_fre'], x_)+tparams['b_fre'])
        f = T.outer(f_ste, f_fre)

        g = T.nnet.sigmoid(T.dot(tparams['W_g'], z_)+T.dot(tparams['V_g'], x_)+tparams['b_g'])
        i = T.tanh(T.dot(tparams['W_i'], z_)+T.dot(tparams['V_i'], x_)+tparams['b_i'])

        Re_s = f*Re_s_+T.outer(g*i, T.cos(omega*t_))
        Im_s = f*Im_s_+T.outer(g*i, T.sin(omega*t_))

        A = T.sqrt(Re_s**2+Im_s**2)

        def __feq(U_o, W_o, V_o, b_o, W_z, b_z, A_k, z_k):
            o = T.nnet.sigmoid(T.dot(U_o, A_k)+T.dot(W_o, z_)+T.dot(V_o, x_)+b_o)
            zz = z_k+o*T.tanh(T.dot(W_z, A_k)+b_z)
            return zz

        res, upd = theano.scan(__feq, sequences=[tparams['U_o'], tparams['W_o'], tparams['V_o'], tparams['b_o'], tparams['W_z'], tparams['b_z'], A.transpose()],
                                        outputs_info=[T.zeros_like(z_)], name='__feq', n_steps=omega.shape[0])
        return Re_s, Im_s, res[-1]

    rval, updates = theano.scan(_recurrence,
                                    sequences=[x, (T.arange(nsteps)+1)/nsteps],
                                    outputs_info=[T.zeros((opts['dim'], opts['dim_feq'])), T.zeros((opts['dim'], opts['dim_feq'])), T.zeros((opts['dim_pitch'],))],
                                    name='MFO_SFM',
                                    n_steps=nsteps)
    return rval[2]
项目:State-Frequency-Memory-Recurrent-Neural-Networks    作者:hhkunming    | 项目源码 | 文件源码
def Adaptive_SFM(tparams, x, omega, opts):
    nsteps = x.shape[0]

    def _recurrence(x_, t_, omg_, Re_s_, Im_s_, z_):
        f_ste = T.nnet.sigmoid(T.dot(tparams['W_ste'], z_)+T.dot(tparams['V_ste'], x_)+tparams['b_ste'])
        f_fre = T.nnet.sigmoid(T.dot(tparams['W_fre'], z_)+T.dot(tparams['V_fre'], x_)+tparams['b_fre'])
        f = T.outer(f_ste, f_fre)

        g = T.nnet.sigmoid(T.dot(tparams['W_g'], z_)+T.dot(tparams['V_g'], x_)+tparams['b_g'])
        i = T.tanh(T.dot(tparams['W_i'], z_)+T.dot(tparams['V_i'], x_)+tparams['b_i'])

        omg = T.dot(tparams['W_omg'], z_)+T.dot(tparams['V_omg'], x_)+tparams['b_omg']

        Re_s = f*Re_s_+T.outer(g*i, T.cos(omg_*t_))
        Im_s = f*Im_s_+T.outer(g*i, T.sin(omg_*t_))

        A = T.sqrt(Re_s**2+Im_s**2)

        def __feq(U_o, W_o, V_o, b_o, W_z, b_z, A_k, z_k):
            o = T.nnet.sigmoid(T.dot(U_o, A_k)+T.dot(W_o, z_)+T.dot(V_o, x_)+b_o)
            zz = z_k+o*T.tanh(T.dot(W_z, A_k)+b_z)
            return zz

        res, upd = theano.scan(__feq, sequences=[tparams['U_o'], tparams['W_o'], tparams['V_o'], tparams['b_o'], tparams['W_z'], tparams['b_z'], A.transpose()],
                                        outputs_info=[T.zeros_like(z_)], name='__feq', n_steps=omega.shape[0])
        return omg, Re_s, Im_s, res[-1]

    rval, updates = theano.scan(_recurrence,
                                    sequences=[x, (T.arange(nsteps)+1)/nsteps],
                                    outputs_info=[T.ones(omega.shape)*omega, T.zeros((opts['dim'], opts['dim_feq'])), T.zeros((opts['dim'], opts['dim_feq'])), T.zeros((opts['dim_pitch'],))],
                                    name='MFO_SFM',
                                    n_steps=nsteps)
    return rval[3]
项目:State-Frequency-Memory-stock-prediction    作者:z331565360    | 项目源码 | 文件源码
def step(self, x, states):
        p_tm1 = states[0]
        h_tm1 = states[1]
        S_re_tm1 = states[2]
        S_im_tm1 = states[3]
        time_tm1 = states[4]
        B_U = states[5]
        B_W = states[6]
        frequency = states[7]

        x_i = K.dot(x * B_W[0], self.W_i) + self.b_i
        x_ste = K.dot(x * B_W[0], self.W_ste) + self.b_ste
        x_fre = K.dot(x * B_W[0], self.W_fre) + self.b_fre
        x_c = K.dot(x * B_W[0], self.W_c) + self.b_c
        x_o = K.dot(x * B_W[0], self.W_o) + self.b_o

        i = self.inner_activation(x_i + K.dot(h_tm1 * B_U[0], self.U_i))

        ste = self.inner_activation(x_ste + K.dot(h_tm1 * B_U[0], self.U_ste))
        fre = self.inner_activation(x_fre + K.dot(h_tm1 * B_U[0], self.U_fre))

        ste = K.reshape(ste, (-1, self.hidden_dim, 1))
        fre = K.reshape(fre, (-1, 1, self.freq_dim))
        f = ste * fre

        c = i * self.activation(x_c + K.dot(h_tm1 * B_U[0], self.U_c))

        time = time_tm1 + 1

        omega = K.cast_to_floatx(2*np.pi)* time * frequency
        re = T.cos(omega)
        im = T.sin(omega)

        c = K.reshape(c, (-1, self.hidden_dim, 1))

        S_re = f * S_re_tm1 + c * re
        S_im = f * S_im_tm1 + c * im

        A = K.square(S_re) + K.square(S_im)

        A = K.reshape(A, (-1, self.freq_dim))
        A_a = K.dot(A * B_U[0], self.U_a)
        A_a = K.reshape(A_a, (-1, self.hidden_dim))
        a = self.activation(A_a + self.b_a)

        o = self.inner_activation(x_o + K.dot(h_tm1 * B_U[0], self.U_o))

        h = o * a
        p = K.dot(h, self.W_p) + self.b_p

        return p, [p, h, S_re, S_im, time]
项目:State-Frequency-Memory-stock-prediction    作者:z331565360    | 项目源码 | 文件源码
def step(self, x, states):
        p_tm1 = states[0]
        h_tm1 = states[1]
        S_re_tm1 = states[2]
        S_im_tm1 = states[3]
        time_tm1 = states[4]
        B_U = states[5]
        B_W = states[6]
        frequency = states[7]

        x_i = K.dot(x * B_W[0], self.W_i) + self.b_i
        x_ste = K.dot(x * B_W[0], self.W_ste) + self.b_ste
        x_fre = K.dot(x * B_W[0], self.W_fre) + self.b_fre
        x_c = K.dot(x * B_W[0], self.W_c) + self.b_c
        x_o = K.dot(x * B_W[0], self.W_o) + self.b_o

        i = self.inner_activation(x_i + K.dot(h_tm1 * B_U[0], self.U_i))

        ste = self.inner_activation(x_ste + K.dot(h_tm1 * B_U[0], self.U_ste))
        fre = self.inner_activation(x_fre + K.dot(h_tm1 * B_U[0], self.U_fre))

        ste = K.reshape(ste, (-1, self.hidden_dim, 1))
        fre = K.reshape(fre, (-1, 1, self.freq_dim))
        f = ste * fre

        c = i * self.activation(x_c + K.dot(h_tm1 * B_U[0], self.U_c))

        time = time_tm1 + 1

        omega = K.cast_to_floatx(2*np.pi)* time * frequency
        re = T.cos(omega)
        im = T.sin(omega)

        c = K.reshape(c, (-1, self.hidden_dim, 1))

        S_re = f * S_re_tm1 + c * re
        S_im = f * S_im_tm1 + c * im

        A = K.square(S_re) + K.square(S_im)

        A = K.reshape(A, (-1, self.freq_dim))
        A_a = K.dot(A * B_U[0], self.U_a)
        A_a = K.reshape(A_a, (-1, self.hidden_dim))
        a = self.activation(A_a + self.b_a)

        o = self.inner_activation(x_o + K.dot(h_tm1 * B_U[0], self.U_o))

        h = o * a
        p = K.dot(h, self.W_p) + self.b_p

        return p, [p, h, S_re, S_im, time]
项目:SCFGP    作者:MaxInGaussian    | 项目源码 | 文件源码
def build_theano_models(self, algo, algo_params):
        epsilon = 1e-6
        kl = lambda mu, sig: sig+mu**2-TT.log(sig)
        X, y = TT.dmatrices('X', 'y')
        params = TT.dvector('params')
        a, b, c, l_F, F, l_FC, FC = self.unpack_params(params)
        sig2_n, sig_f = TT.exp(2*a), TT.exp(b)
        l_FF = TT.dot(X, l_F)+l_FC
        FF = TT.concatenate((l_FF, TT.dot(X, F)+FC), 1)
        Phi = TT.concatenate((TT.cos(FF), TT.sin(FF)), 1)
        Phi = sig_f*TT.sqrt(2./self.M)*Phi
        noise = TT.log(1+TT.exp(c))
        PhiTPhi = TT.dot(Phi.T, Phi)
        A = PhiTPhi+(sig2_n+epsilon)*TT.identity_like(PhiTPhi)
        L = Tlin.cholesky(A)
        Li = Tlin.matrix_inverse(L)
        PhiTy = Phi.T.dot(y)
        beta = TT.dot(Li, PhiTy)
        alpha = TT.dot(Li.T, beta)
        mu_f = TT.dot(Phi, alpha)
        var_f = (TT.dot(Phi, Li.T)**2).sum(1)[:, None]
        dsp = noise*(var_f+1)
        mu_l = TT.sum(TT.mean(l_F, axis=1))
        sig_l = TT.sum(TT.std(l_F, axis=1))
        mu_w = TT.sum(TT.mean(F, axis=1))
        sig_w = TT.sum(TT.std(F, axis=1))
        hermgauss = np.polynomial.hermite.hermgauss(30)
        herm_x = Ts(hermgauss[0])[None, None, :]
        herm_w = Ts(hermgauss[1]/np.sqrt(np.pi))[None, None, :]
        herm_f = TT.sqrt(2*var_f[:, :, None])*herm_x+mu_f[:, :, None]
        nlk = (0.5*herm_f**2.-y[:, :, None]*herm_f)/dsp[:, :, None]+0.5*(
            TT.log(2*np.pi*dsp[:, :, None])+y[:, :, None]**2/dsp[:, :, None])
        enll = herm_w*nlk
        nlml = 2*TT.log(TT.diagonal(L)).sum()+2*enll.sum()+1./sig2_n*(
            (y**2).sum()-(beta**2).sum())+2*(X.shape[0]-self.M)*a
        penelty = (kl(mu_w, sig_w)*self.M+kl(mu_l, sig_l)*self.S)/(self.S+self.M)
        cost = (nlml+penelty)/X.shape[0]
        grads = TT.grad(cost, params)
        updates = getattr(OPT, algo)(self.params, grads, **algo_params)
        updates = getattr(OPT, 'apply_nesterov_momentum')(updates, momentum=0.9)
        train_inputs = [X, y]
        train_outputs = [cost, alpha, Li]
        self.train_func = Tf(train_inputs, train_outputs,
            givens=[(params, self.params)])
        self.train_iter_func = Tf(train_inputs, train_outputs,
            givens=[(params, self.params)], updates=updates)
        Xs, Li, alpha = TT.dmatrices('Xs', 'Li', 'alpha')
        l_FFs = TT.dot(Xs, l_F)+l_FC
        FFs = TT.concatenate((l_FFs, TT.dot(Xs, F)+FC), 1)
        Phis = TT.concatenate((TT.cos(FFs), TT.sin(FFs)), 1)
        Phis = sig_f*TT.sqrt(2./self.M)*Phis
        mu_pred = TT.dot(Phis, alpha)
        std_pred = (noise*(1+(TT.dot(Phis, Li.T)**2).sum(1)))**0.5
        pred_inputs = [Xs, alpha, Li]
        pred_outputs = [mu_pred, std_pred]
        self.pred_func = Tf(pred_inputs, pred_outputs,
            givens=[(params, self.params)])