Python theano.tensor 模块,identity_like() 实例源码

我们从Python开源项目中,提取了以下5个代码示例,用于说明如何使用theano.tensor.identity_like()

项目:dcnn    作者:jcatw    | 项目源码 | 文件源码
def get_output_for(self, inputs, **kwargs):
        """
        Compute diffusion convolution of inputs.

        """

        A = inputs[0]
        X = inputs[1]

        # Normalize by degree.
        A = A / (T.sum(A, 0) + 1.0)

        Apow_list = [T.identity_like(A)]
        for i in range(1, self.parameters.num_hops + 1):
            Apow_list.append(A.dot(Apow_list[-1]))
        Apow = T.stack(Apow_list)

        Apow_dot_X = T.dot(Apow, X)

        Apow_dot_X_times_W = Apow_dot_X * self.W

        out = T.reshape(
            self.nonlinearity(T.mean(Apow_dot_X_times_W, 1)),
            (1, (self.parameters.num_hops + 1) * self.num_features)
        )

        return out
项目:dcnn    作者:jcatw    | 项目源码 | 文件源码
def get_output_for(self, inputs, **kwargs):
        """
        Compute diffusion convolution of inputs.

        """

        A = inputs[0]
        X = inputs[1]

        # Normalize by degree.
        A = A / (T.sum(A, 0) + 1.0)

        Apow_list = [T.identity_like(A)]
        for i in range(1, self.parameters.num_hops + 1):
            Apow_list.append(A.dot(Apow_list[-1]))
        Apow = T.stack(Apow_list)

        Apow_dot_X = T.dot(Apow, X)

        Apow_dot_X_times_W = Apow_dot_X * self.W

        out = T.reshape(
            self.nonlinearity(Apow_dot_X_times_W).transpose((1, 0, 2)),
            (A.shape[0], (self.parameters.num_hops + 1) * self.num_features)
        )

        return out
项目:dcnn    作者:jcatw    | 项目源码 | 文件源码
def get_output_for(self, inputs, **kwargs):
        """
        Compute diffusion convolution of inputs.

        """

        A = inputs[0]
        X = inputs[1]

        # Normalize by degree.
        A = A / (T.sum(A, 0) + 1.0)

        Apow_list = [T.identity_like(A)]
        for i in range(1, self.parameters.num_hops + 1):
            Apow_list.append(A.dot(Apow_list[-1]))
        Apow = T.stack(Apow_list)

        Apow_dot_X = T.dot(Apow, X)

        Apow_dot_X_times_W = Apow_dot_X * self.W

        out = self.nonlinearity(
            T.mean(
                T.reshape(
                    T.mean(Apow_dot_X_times_W, 1),
                    (1, (self.parameters.num_hops + 1), self.num_features)
                ),
                2
            )
        )

        return out
项目:dcnn    作者:jcatw    | 项目源码 | 文件源码
def get_output_for(self, incoming):
        A = incoming

        # Normalize by degree.
        # TODO: instead of adding 1.0, set 0.0 to 1.0
        A = A / (T.sum(A, 0) + 1.0)

        Apow_elements = [T.identity_like(A)]
        for i in range(1, self.parameters.num_hops + 1):
            Apow_elements.append(A.dot(Apow_elements[-1]))
        Apow = T.stack(Apow_elements)

        return Apow.dimshuffle([1, 0, 2])
项目:SCFGP    作者:MaxInGaussian    | 项目源码 | 文件源码
def build_theano_models(self, algo, algo_params):
        epsilon = 1e-6
        kl = lambda mu, sig: sig+mu**2-TT.log(sig)
        X, y = TT.dmatrices('X', 'y')
        params = TT.dvector('params')
        a, b, c, l_F, F, l_FC, FC = self.unpack_params(params)
        sig2_n, sig_f = TT.exp(2*a), TT.exp(b)
        l_FF = TT.dot(X, l_F)+l_FC
        FF = TT.concatenate((l_FF, TT.dot(X, F)+FC), 1)
        Phi = TT.concatenate((TT.cos(FF), TT.sin(FF)), 1)
        Phi = sig_f*TT.sqrt(2./self.M)*Phi
        noise = TT.log(1+TT.exp(c))
        PhiTPhi = TT.dot(Phi.T, Phi)
        A = PhiTPhi+(sig2_n+epsilon)*TT.identity_like(PhiTPhi)
        L = Tlin.cholesky(A)
        Li = Tlin.matrix_inverse(L)
        PhiTy = Phi.T.dot(y)
        beta = TT.dot(Li, PhiTy)
        alpha = TT.dot(Li.T, beta)
        mu_f = TT.dot(Phi, alpha)
        var_f = (TT.dot(Phi, Li.T)**2).sum(1)[:, None]
        dsp = noise*(var_f+1)
        mu_l = TT.sum(TT.mean(l_F, axis=1))
        sig_l = TT.sum(TT.std(l_F, axis=1))
        mu_w = TT.sum(TT.mean(F, axis=1))
        sig_w = TT.sum(TT.std(F, axis=1))
        hermgauss = np.polynomial.hermite.hermgauss(30)
        herm_x = Ts(hermgauss[0])[None, None, :]
        herm_w = Ts(hermgauss[1]/np.sqrt(np.pi))[None, None, :]
        herm_f = TT.sqrt(2*var_f[:, :, None])*herm_x+mu_f[:, :, None]
        nlk = (0.5*herm_f**2.-y[:, :, None]*herm_f)/dsp[:, :, None]+0.5*(
            TT.log(2*np.pi*dsp[:, :, None])+y[:, :, None]**2/dsp[:, :, None])
        enll = herm_w*nlk
        nlml = 2*TT.log(TT.diagonal(L)).sum()+2*enll.sum()+1./sig2_n*(
            (y**2).sum()-(beta**2).sum())+2*(X.shape[0]-self.M)*a
        penelty = (kl(mu_w, sig_w)*self.M+kl(mu_l, sig_l)*self.S)/(self.S+self.M)
        cost = (nlml+penelty)/X.shape[0]
        grads = TT.grad(cost, params)
        updates = getattr(OPT, algo)(self.params, grads, **algo_params)
        updates = getattr(OPT, 'apply_nesterov_momentum')(updates, momentum=0.9)
        train_inputs = [X, y]
        train_outputs = [cost, alpha, Li]
        self.train_func = Tf(train_inputs, train_outputs,
            givens=[(params, self.params)])
        self.train_iter_func = Tf(train_inputs, train_outputs,
            givens=[(params, self.params)], updates=updates)
        Xs, Li, alpha = TT.dmatrices('Xs', 'Li', 'alpha')
        l_FFs = TT.dot(Xs, l_F)+l_FC
        FFs = TT.concatenate((l_FFs, TT.dot(Xs, F)+FC), 1)
        Phis = TT.concatenate((TT.cos(FFs), TT.sin(FFs)), 1)
        Phis = sig_f*TT.sqrt(2./self.M)*Phis
        mu_pred = TT.dot(Phis, alpha)
        std_pred = (noise*(1+(TT.dot(Phis, Li.T)**2).sum(1)))**0.5
        pred_inputs = [Xs, alpha, Li]
        pred_outputs = [mu_pred, std_pred]
        self.pred_func = Tf(pred_inputs, pred_outputs,
            givens=[(params, self.params)])