Python torch 模块,eye() 实例源码

我们从Python开源项目中,提取了以下49个代码示例,用于说明如何使用torch.eye()

项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def test_inverse(self):
        M = torch.randn(5,5)
        MI = torch.inverse(M)
        E = torch.eye(5)
        self.assertFalse(MI.is_contiguous(), 'MI is contiguous')
        self.assertEqual(E, torch.mm(M, MI), 1e-8, 'inverse value')
        self.assertEqual(E, torch.mm(MI, M), 1e-8, 'inverse value')

        MII = torch.Tensor(5, 5)
        torch.inverse(MII, M)
        self.assertFalse(MII.is_contiguous(), 'MII is contiguous')
        self.assertEqual(MII, MI, 0, 'inverse value in-place')
        # second call, now that MII is transposed
        torch.inverse(MII, M)
        self.assertFalse(MII.is_contiguous(), 'MII is contiguous')
        self.assertEqual(MII, MI, 0, 'inverse value in-place')
项目:tnt    作者:pytorch    | 项目源码 | 文件源码
def testClassErrorMeter(self):
        mtr = meter.ClassErrorMeter(topk=[1])
        output = torch.eye(3)
        if hasattr(torch, "arange"):
            target = torch.arange(0, 3)
        else:
            target = torch.range(0, 2)
        mtr.add(output, target)
        err = mtr.value()

        self.assertEqual(err, [0], "All should be correct")

        target[0] = 1
        target[1] = 0
        target[2] = 0
        mtr.add(output, target)
        err = mtr.value()
        self.assertEqual(err, [50.0], "Half should be correct")
项目:pyro    作者:uber    | 项目源码 | 文件源码
def __init__(self, z_dim, transition_dim):
        super(GatedTransition, self).__init__()
        # initialize the six linear transformations used in the neural network
        self.lin_gate_z_to_hidden = nn.Linear(z_dim, transition_dim)
        self.lin_gate_hidden_to_z = nn.Linear(transition_dim, z_dim)
        self.lin_proposed_mean_z_to_hidden = nn.Linear(z_dim, transition_dim)
        self.lin_proposed_mean_hidden_to_z = nn.Linear(transition_dim, z_dim)
        self.lin_sig = nn.Linear(z_dim, z_dim)
        self.lin_z_to_mu = nn.Linear(z_dim, z_dim)
        # modify the default initialization of lin_z_to_mu
        # so that it's starts out as the identity function
        self.lin_z_to_mu.weight.data = torch.eye(z_dim)
        self.lin_z_to_mu.bias.data = torch.zeros(z_dim)
        # initialize the three non-linearities used in the neural network
        self.relu = nn.ReLU()
        self.sigmoid = nn.Sigmoid()
        self.softplus = nn.Softplus()
项目:pyro    作者:uber    | 项目源码 | 文件源码
def torch_eye(n, m=None, out=None):
    """
    Like `torch.eye()`, but works with cuda tensors.
    """
    if m is None:
        m = n
    try:
        return torch.eye(n, m, out=out)
    except TypeError:
        # Only catch errors due to torch.eye() not being available for cuda tensors.
        module = torch.Tensor.__module__ if out is None else type(out).__module__
        if module != 'torch.cuda':
            raise
    Tensor = getattr(torch, torch.Tensor.__name__)
    cpu_out = Tensor(n, m)
    cuda_out = torch.eye(m, n, out=cpu_out).cuda()
    return cuda_out if out is None else out.copy_(cuda_out)
项目:jack    作者:uclmr    | 项目源码 | 文件源码
def __init__(self, shared_resources: SharedResources):
        super(FastQAPyTorchModule, self).__init__()
        self._shared_resources = shared_resources
        input_size = shared_resources.config["repr_dim_input"]
        size = shared_resources.config["repr_dim"]
        self._size = size
        self._with_char_embeddings = self._shared_resources.config.get("with_char_embeddings", False)

        # modules & parameters
        if self._with_char_embeddings:
            self._conv_char_embedding = embedding.ConvCharEmbeddingModule(
                len(shared_resources.char_vocab), size)
            self._embedding_projection = nn.Linear(size + input_size, size)
            self._embedding_highway = Highway(size, 1)
            self._v_wiq_w = nn.Parameter(torch.ones(1, 1, input_size + size))
            input_size = size
        else:
            self._v_wiq_w = nn.Parameter(torch.ones(1, 1, input_size))

        self._bilstm = BiLSTM(input_size + 2, size)
        self._answer_layer = FastQAAnswerModule(shared_resources)

        # [size, 2 * size]
        self._question_projection = nn.Parameter(torch.cat([torch.eye(size), torch.eye(size)], dim=1))
        self._support_projection = nn.Parameter(torch.cat([torch.eye(size), torch.eye(size)], dim=1))
项目:open-reid    作者:Cysu    | 项目源码 | 文件源码
def test_forward_backward(self):
        import torch
        import torch.nn.functional as F
        from torch.autograd import Variable
        from reid.loss import OIMLoss
        criterion = OIMLoss(3, 3, scalar=1.0, size_average=False)
        criterion.lut = torch.eye(3)
        x = Variable(torch.randn(3, 3), requires_grad=True)
        y = Variable(torch.range(0, 2).long())
        loss = criterion(x, y)
        loss.backward()
        probs = F.softmax(x)
        grads = probs.data - torch.eye(3)
        abs_diff = torch.abs(grads - x.grad.data)
        self.assertEquals(torch.log(probs).diag().sum(), -loss)
        self.assertTrue(torch.max(abs_diff) < 1e-6)
项目:FewShotLearning    作者:gitabcworld    | 项目源码 | 文件源码
def reset_parameters(self):
        """
        Initialize parameters following the way proposed in the paper.
        """

        # The input-to-hidden weight matrix is initialized orthogonally.
        init.orthogonal(self.weight_ih.data)
        # The hidden-to-hidden weight matrix is initialized as an identity
        # matrix.
        weight_hh_data = torch.eye(self.hidden_size)
        weight_hh_data = weight_hh_data.repeat(1, 4)
        self.weight_hh.data.set_(weight_hh_data)
        # The bias is just set to zero vectors.
        init.constant(self.bias.data, val=0)
        # Initialization of BN parameters.
        self.bn_ih.reset_parameters()
        self.bn_hh.reset_parameters()
        self.bn_c.reset_parameters()
        self.bn_ih.bias.data.fill_(0)
        self.bn_hh.bias.data.fill_(0)
        self.bn_ih.weight.data.fill_(0.1)
        self.bn_hh.weight.data.fill_(0.1)
        self.bn_c.weight.data.fill_(0.1)
项目:e2e-model-learning    作者:locuslab    | 项目源码 | 文件源码
def __init__(self, params, eps=1e-2):
        super(SolveNewsvendor, self).__init__()
        k = len(params['d'])
        self.Q = Variable(torch.diag(torch.Tensor(
            [params['c_quad']] + [params['b_quad']]*k + [params['h_quad']]*k)) \
                .cuda())
        self.p = Variable(torch.Tensor(
            [params['c_lin']] + [params['b_lin']]*k + [params['h_lin']]*k) \
                .cuda())
        self.G = Variable(torch.cat([
            torch.cat([-torch.ones(k,1), -torch.eye(k), torch.zeros(k,k)], 1),
            torch.cat([torch.ones(k,1), torch.zeros(k,k), -torch.eye(k)], 1),
            -torch.eye(1 + 2*k)], 0).cuda())
        self.h = Variable(torch.Tensor(
            np.concatenate([-params['d'], params['d'], np.zeros(1+ 2*k)])).cuda())
        self.one = Variable(torch.Tensor([1])).cuda()
        self.eps_eye = eps * Variable(torch.eye(1 + 2*k).cuda()).unsqueeze(0)
项目:e2c-pytorch    作者:ethanluoyc    | 项目源码 | 文件源码
def forward(self, h, Q, u):
        batch_size = h.size()[0]
        v, r = self.trans(h).chunk(2, dim=1)
        v1 = v.unsqueeze(2)
        rT = r.unsqueeze(1)
        I = Variable(torch.eye(self.dim_z).repeat(batch_size, 1, 1))
        if rT.data.is_cuda:
            I.dada.cuda()
        A = I.add(v1.bmm(rT))

        B = self.fc_B(h).view(-1, self.dim_z, self.dim_u)
        o = self.fc_o(h)

        # need to compute the parameters for distributions
        # as well as for the samples
        u = u.unsqueeze(2)

        d = A.bmm(Q.mu.unsqueeze(2)).add(B.bmm(u)).add(o).squeeze(2)
        sample = A.bmm(h.unsqueeze(2)).add(B.bmm(u)).add(o).squeeze(2)

        return sample, NormalDistribution(d, Q.sigma, Q.logsigma, v=v, r=r)
项目:optnet    作者:locuslab    | 项目源码 | 文件源码
def forward(self, x):
        nBatch = x.size(0)

        x = self.fc1(x)

        L = self.M*self.L
        Q = L.mm(L.t()) + self.args.eps*Variable(torch.eye(self.nHidden)).cuda()
        Q = Q.unsqueeze(0).expand(nBatch, self.nHidden, self.nHidden)
        G = self.G.unsqueeze(0).expand(nBatch, self.nineq, self.nHidden)
        h = self.G.mv(self.z0)+self.s0
        h = h.unsqueeze(0).expand(nBatch, self.nineq)
        e = Variable(torch.Tensor())
        x = QPFunction()(Q, x, G, h, e, e)
        x = x[:,:self.nFeatures]

        return x
项目:optnet    作者:locuslab    | 项目源码 | 文件源码
def __init__(self, nFeatures, args):
        super().__init__()

        nHidden, neq, nineq = 2*nFeatures-1,0,2*nFeatures-2
        assert(neq==0)

        # self.fc1 = nn.Linear(nFeatures, nHidden)
        self.M = Variable(torch.tril(torch.ones(nHidden, nHidden)).cuda())

        Q = 1e-8*torch.eye(nHidden)
        Q[:nFeatures,:nFeatures] = torch.eye(nFeatures)
        self.L = Variable(torch.potrf(Q))

        self.D = Parameter(0.3*torch.randn(nFeatures-1, nFeatures))
        # self.lam = Parameter(20.*torch.ones(1))
        self.h = Variable(torch.zeros(nineq))

        self.nFeatures = nFeatures
        self.nHidden = nHidden
        self.neq = neq
        self.nineq = nineq
        self.args = args
项目:optnet    作者:locuslab    | 项目源码 | 文件源码
def forward(self, x):
        nBatch = x.size(0)

        L = self.M*self.L
        Q = L.mm(L.t()) + self.args.eps*Variable(torch.eye(self.nHidden)).cuda()
        Q = Q.unsqueeze(0).expand(nBatch, self.nHidden, self.nHidden)
        nI = Variable(-torch.eye(self.nFeatures-1).type_as(Q.data))
        G = torch.cat((
              torch.cat(( self.D, nI), 1),
              torch.cat((-self.D, nI), 1)
        ))
        G = G.unsqueeze(0).expand(nBatch, self.nineq, self.nHidden)
        h = self.h.unsqueeze(0).expand(nBatch, self.nineq)
        e = Variable(torch.Tensor())
        # p = torch.cat((-x, self.lam.unsqueeze(0).expand(nBatch, self.nFeatures-1)), 1)
        p = torch.cat((-x, Parameter(13.*torch.ones(nBatch, self.nFeatures-1).cuda())), 1)
        x = QPFunction()(Q.double(), p.double(), G.double(), h.double(), e, e).float()
        x = x[:,:self.nFeatures]

        return x
项目:optnet    作者:locuslab    | 项目源码 | 文件源码
def forward(self, puzzles):
        nBatch = puzzles.size(0)

        x = puzzles.view(nBatch,-1)
        x = self.fc_in(x)

        e = Variable(torch.Tensor())

        h = self.G.mv(self.z)+self.s
        x = QPFunction(verbose=False)(
            self.Q, x, self.G, h, e, e,
        )

        x = self.fc_out(x)
        x = x.view_as(puzzles)
        return x


# if __name__=="__main__":
#     sudoku = SolveSudoku(2, 0.2)
#     puzzle = [[4, 0, 0, 0], [0,0,4,0], [0,2,0,0], [0,0,0,1]]
#     Y = Variable(torch.DoubleTensor(np.array([[np.array(np.eye(5,4,-1)[i,:]) for i in row] for row in puzzle])).cuda())
#     solution = sudoku(Y.unsqueeze(0))
#     print(solution.view(1,4,4,4))
项目:optnet    作者:locuslab    | 项目源码 | 文件源码
def forward(self, x):
        nBatch = x.size(0)

        x = F.max_pool2d(self.conv1(x), 2)
        x = F.max_pool2d(self.conv2(x), 2)
        x = x.view(nBatch, -1)

        L = self.M*self.L
        Q = L.mm(L.t()) + self.eps*Variable(torch.eye(self.nHidden)).cuda()
        Q = Q.unsqueeze(0).expand(nBatch, self.nHidden, self.nHidden)
        G = self.G.unsqueeze(0).expand(nBatch, self.nineq, self.nHidden)
        z0 = self.qp_z0(x)
        s0 = self.qp_s0(x)
        h = z0.mm(self.G.t())+s0
        e = Variable(torch.Tensor())
        inputs = self.qp_o(x)
        x = QPFunction()(Q, inputs, G, h, e, e)
        x = x[:,:10]

        return F.log_softmax(x)
项目:optnet    作者:locuslab    | 项目源码 | 文件源码
def __init__(self, nFeatures, nHidden, nCls, neq, Qpenalty=0.1, eps=1e-4):
        super().__init__()

        self.nFeatures = nFeatures
        self.nHidden = nHidden
        self.nCls = nCls

        self.fc1 = nn.Linear(nFeatures, nHidden)
        self.fc2 = nn.Linear(nHidden, nCls)

        self.Q = Variable(Qpenalty*torch.eye(nHidden).double().cuda())
        self.G = Variable(-torch.eye(nHidden).double().cuda())
        self.h = Variable(torch.zeros(nHidden).double().cuda())
        self.A = Parameter(torch.rand(neq,nHidden).double().cuda())
        self.b = Variable(torch.ones(self.A.size(0)).double().cuda())

        self.neq = neq
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def eye(tensor):
    """Fills the 2-dimensional input Tensor or Variable with the identity matrix. Preserves the identity of the inputs in
    Linear layers, where as many inputs are preserved as possible.

    Args:
        tensor: a 2-dimensional torch.Tensor or autograd.Variable

    Examples:
        >>> w = torch.Tensor(3, 5)
        >>> nn.init.eye(w)
    """
    if tensor.ndimension() != 2:
        raise ValueError("Only tensors with 2 dimensions are supported")

    if isinstance(tensor, Variable):
        eye(tensor.data)
        return tensor

    return tensor.copy_(torch.eye(tensor.size(0), tensor.size(1)))
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_inverse(self):
        M = torch.randn(5, 5)
        MI = torch.inverse(M)
        E = torch.eye(5)
        self.assertFalse(MI.is_contiguous(), 'MI is contiguous')
        self.assertEqual(E, torch.mm(M, MI), 1e-8, 'inverse value')
        self.assertEqual(E, torch.mm(MI, M), 1e-8, 'inverse value')

        MII = torch.Tensor(5, 5)
        torch.inverse(M, out=MII)
        self.assertFalse(MII.is_contiguous(), 'MII is contiguous')
        self.assertEqual(MII, MI, 0, 'inverse value in-place')
        # second call, now that MII is transposed
        torch.inverse(M, out=MII)
        self.assertFalse(MII.is_contiguous(), 'MII is contiguous')
        self.assertEqual(MII, MI, 0, 'inverse value in-place')
项目:benchmark    作者:pytorch    | 项目源码 | 文件源码
def reset_parameters(self):
        """
        Initialize parameters following the way proposed in the paper.
        """

        # The input-to-hidden weight matrix is initialized orthogonally.
        init.orthogonal(self.weight_ih.data)
        # The hidden-to-hidden weight matrix is initialized as an identity
        # matrix.
        weight_hh_data = torch.eye(self.hidden_size)
        weight_hh_data = weight_hh_data.repeat(1, 4)
        self.weight_hh.data.set_(weight_hh_data)
        # The bias is just set to zero vectors.
        init.constant(self.bias.data, val=0)
        # Initialization of BN parameters.
        self.bn_ih.reset_parameters()
        self.bn_hh.reset_parameters()
        self.bn_c.reset_parameters()
        self.bn_ih.bias.data.fill_(0)
        self.bn_hh.bias.data.fill_(0)
        self.bn_ih.weight.data.fill_(0.1)
        self.bn_hh.weight.data.fill_(0.1)
        self.bn_c.weight.data.fill_(0.1)
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def eye(tensor):
    """Fills the 2-dimensional input Tensor or Variable with the identity matrix. Preserves the identity of the inputs in
    Linear layers, where as many inputs are preserved as possible.

    Args:
        tensor: a 2-dimensional torch.Tensor or autograd.Variable

    Examples:
        >>> w = torch.Tensor(3, 5)
        >>> nn.init.eye(w)
    """
    if tensor.ndimension() != 2:
        raise ValueError("Only tensors with 2 dimensions are supported")

    if isinstance(tensor, Variable):
        eye(tensor.data)
        return tensor

    return tensor.copy_(torch.eye(tensor.size(0), tensor.size(1)))
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def test_inverse(self):
        M = torch.randn(5, 5)
        MI = torch.inverse(M)
        E = torch.eye(5)
        self.assertFalse(MI.is_contiguous(), 'MI is contiguous')
        self.assertEqual(E, torch.mm(M, MI), 1e-8, 'inverse value')
        self.assertEqual(E, torch.mm(MI, M), 1e-8, 'inverse value')

        MII = torch.Tensor(5, 5)
        torch.inverse(M, out=MII)
        self.assertFalse(MII.is_contiguous(), 'MII is contiguous')
        self.assertEqual(MII, MI, 0, 'inverse value in-place')
        # second call, now that MII is transposed
        torch.inverse(M, out=MII)
        self.assertFalse(MII.is_contiguous(), 'MII is contiguous')
        self.assertEqual(MII, MI, 0, 'inverse value in-place')
项目:AGE    作者:DmitryUlyanov    | 项目源码 | 文件源码
def sample_entropy(samples):

        # Assume B x C input

    dist_mat = pairwise_euclidean(samples)

    # Get max and add it to diag
    m = dist_mat.max().detach()
    dist_mat_d = dist_mat + \
        Variable(torch.eye(dist_mat.size(0)) * (m.data[0] + 1)).cuda()

    entropy = (dist_mat_d.min(1)[0] + 1e-4).log().sum()

    entropy *= (samples.size(1) + 0.) / samples.size(0)

    return entropy
项目:vae_vpflows    作者:jmtomczak    | 项目源码 | 文件源码
def forward(self, L, z):
        '''
        :param L: batch_size (B) x latent_size^2 (L^2)
        :param z: batch_size (B) x latent_size (L)
        :return: z_new = L*z
        '''
        # L->tril(L)
        L_matrix = L.view( -1, self.args.z1_size, self.args.z1_size ) # resize to get B x L x L
        LTmask = torch.tril( torch.ones(self.args.z1_size, self.args.z1_size), k=-1 ) # lower-triangular mask matrix (1s in lower triangular part)
        I = Variable( torch.eye(self.args.z1_size, self.args.z1_size).expand(L_matrix.size(0), self.args.z1_size, self.args.z1_size) )
        if self.args.cuda:
            LTmask = LTmask.cuda()
            I = I.cuda()
        LTmask = Variable(LTmask)
        LTmask = LTmask.unsqueeze(0).expand( L_matrix.size(0), self.args.z1_size, self.args.z1_size ) # 1 x L x L -> B x L x L
        LT = torch.mul( L_matrix, LTmask ) + I # here we get a batch of lower-triangular matrices with ones on diagonal

        # z_new = L * z
        z_new = torch.bmm( LT , z.unsqueeze(2) ).squeeze(2) # B x L x L * B x L x 1 -> B x L

        return z_new
项目:block    作者:bamos    | 项目源码 | 文件源码
def test_np():
    npr.seed(0)

    nx, nineq, neq = 4, 6, 7
    Q = npr.randn(nx, nx)
    G = npr.randn(nineq, nx)
    A = npr.randn(neq, nx)
    D = np.diag(npr.rand(nineq))

    K_ = np.bmat((
        (Q, np.zeros((nx, nineq)), G.T, A.T),
        (np.zeros((nineq, nx)), D, np.eye(nineq), np.zeros((nineq, neq))),
        (G, np.eye(nineq), np.zeros((nineq, nineq + neq))),
        (A, np.zeros((neq, nineq + nineq + neq)))
    ))

    K = block((
        (Q,   0, G.T, A.T),
        (0,   D, 'I',   0),
        (G, 'I',   0,   0),
        (A,   0,   0,   0)
    ))

    assert np.allclose(K_, K)
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def eye(tensor):
    """Fills the 2-dimensional input Tensor or Variable with the identity
    matrix. Preserves the identity of the inputs in Linear layers, where as
    many inputs are preserved as possible.

    Args:
        tensor: a 2-dimensional torch.Tensor or autograd.Variable

    Examples:
        >>> w = torch.Tensor(3, 5)
        >>> nn.init.eye(w)
    """
    if tensor.ndimension() != 2:
        raise ValueError("Only tensors with 2 dimensions are supported")

    if isinstance(tensor, Variable):
        eye(tensor.data)
        return tensor

    return tensor.copy_(torch.eye(tensor.size(0), tensor.size(1)))
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_inverse(self):
        M = torch.randn(5, 5)
        MI = torch.inverse(M)
        E = torch.eye(5)
        self.assertFalse(MI.is_contiguous(), 'MI is contiguous')
        self.assertEqual(E, torch.mm(M, MI), 1e-8, 'inverse value')
        self.assertEqual(E, torch.mm(MI, M), 1e-8, 'inverse value')

        MII = torch.Tensor(5, 5)
        torch.inverse(M, out=MII)
        self.assertFalse(MII.is_contiguous(), 'MII is contiguous')
        self.assertEqual(MII, MI, 0, 'inverse value in-place')
        # second call, now that MII is transposed
        torch.inverse(M, out=MII)
        self.assertFalse(MII.is_contiguous(), 'MII is contiguous')
        self.assertEqual(MII, MI, 0, 'inverse value in-place')
项目:gpytorch    作者:jrg365    | 项目源码 | 文件源码
def test_computes_radial_basis_function_gradient():
    a = torch.Tensor([4, 2, 8]).view(3, 1)
    b = torch.Tensor([0, 2, 2]).view(3, 1)
    lengthscale = 2

    kernel = RBFKernel().initialize(log_lengthscale=math.log(lengthscale))
    kernel.eval()
    param = Variable(torch.Tensor(3, 3).fill_(math.log(lengthscale)), requires_grad=True)
    diffs = Variable(a.expand(3, 3) - b.expand(3, 3).transpose(0, 1))
    actual_output = (-(diffs ** 2) / param.exp()).exp()
    actual_output.backward(torch.eye(3))
    actual_param_grad = param.grad.data.sum()

    output = kernel(Variable(a), Variable(b))
    output.backward(gradient=torch.eye(3))
    res = kernel.log_lengthscale.grad.data
    assert(torch.norm(res - actual_param_grad) < 1e-5)
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def eye(tensor):
    """Fills the 2-dimensional input Tensor or Variable with the identity
    matrix. Preserves the identity of the inputs in Linear layers, where as
    many inputs are preserved as possible.

    Args:
        tensor: a 2-dimensional torch.Tensor or autograd.Variable

    Examples:
        >>> w = torch.Tensor(3, 5)
        >>> nn.init.eye(w)
    """
    if tensor.ndimension() != 2:
        raise ValueError("Only tensors with 2 dimensions are supported")

    if isinstance(tensor, Variable):
        eye(tensor.data)
        return tensor

    return tensor.copy_(torch.eye(tensor.size(0), tensor.size(1)))
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_inverse(self):
        M = torch.randn(5, 5)
        MI = torch.inverse(M)
        E = torch.eye(5)
        self.assertFalse(MI.is_contiguous(), 'MI is contiguous')
        self.assertEqual(E, torch.mm(M, MI), 1e-8, 'inverse value')
        self.assertEqual(E, torch.mm(MI, M), 1e-8, 'inverse value')

        MII = torch.Tensor(5, 5)
        torch.inverse(M, out=MII)
        self.assertFalse(MII.is_contiguous(), 'MII is contiguous')
        self.assertEqual(MII, MI, 0, 'inverse value in-place')
        # second call, now that MII is transposed
        torch.inverse(M, out=MII)
        self.assertFalse(MII.is_contiguous(), 'MII is contiguous')
        self.assertEqual(MII, MI, 0, 'inverse value in-place')
项目:OpenNMT-py    作者:OpenNMT    | 项目源码 | 文件源码
def forward(self, input):
        laplacian = input.exp() + self.eps
        output = input.clone()
        for b in range(input.size(0)):
            lap = laplacian[b].masked_fill(
                Variable(torch.eye(input.size(1)).cuda().ne(0)), 0)
            lap = -lap + torch.diag(lap.sum(0))
            # store roots on diagonal
            lap[0] = input[b].diag().exp()
            inv_laplacian = lap.inverse()

            factor = inv_laplacian.diag().unsqueeze(1)\
                                         .expand_as(input[b]).transpose(0, 1)
            term1 = input[b].exp().mul(factor).clone()
            term2 = input[b].exp().mul(inv_laplacian.transpose(0, 1)).clone()
            term1[:, 0] = 0
            term2[0] = 0
            output[b] = term1 - term2
            roots_output = input[b].diag().exp().mul(
                inv_laplacian.transpose(0, 1)[0])
            output[b] = output[b] + torch.diag(roots_output)
        return output
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def test_eye(self):
        res1 = torch.eye(100, 100)
        res2 = torch.Tensor()
        torch.eye(res2, 100, 100)
        self.assertEqual(res1, res2)
项目:torch-gel    作者:jayanthkoushik    | 项目源码 | 文件源码
def test_cd_newton(self):
        """Test the CD implementation with Newton internal solver."""
        # Compute the C_js and I_js
        Cs = [(A_j.t()@A_j)/self.m for A_j in self.As]
        Is = [torch.eye(n_j) for n_j in self.ns]
        self._test_implementation(make_A_cd, gel_solve_cd,
                                  block_solve_fun=block_solve_newton,
                                  block_solve_kwargs={
                                      "ls_alpha": 0.01,
                                      "ls_beta": 0.9,
                                      "max_iters": 4,
                                      "tol": 1e-10
                                  }, max_cd_iters=None, rel_tol=1e-6, Cs=Cs,
                                  Is=Is)
项目:sef    作者:passalis    | 项目源码 | 文件源码
def _regularizer(self):
        if self.use_gpu:
            regularizer = torch.mm(self.A.transpose(0, 1), torch.mm(self.symbolic_kernel(self.X_kernel), self.A)) \
                          - Variable(torch.eye(self.A.size(1)).cuda())
        else:
            regularizer = torch.mm(self.A.transpose(0, 1), torch.mm(self.symbolic_kernel(self.X_kernel), self.A)) \
                          - Variable(torch.eye(self.A.size(1)))

        return 0.5 * torch.sum(regularizer ** 2) / (self.A.size(1) ** 2)
项目:sef    作者:passalis    | 项目源码 | 文件源码
def _regularizer(self):

        if self.use_gpu:
            regularizer = torch.mm(self.W.transpose(0, 1), self.W) - Variable(torch.eye(self.W.size(1)).cuda())
        else:
            regularizer = torch.mm(self.W.transpose(0, 1), self.W) - Variable(torch.eye(self.W.size(1)))
        return 0.5 * torch.sum(regularizer ** 2) / (self.W.size(1) ** 2)
项目:allennlp    作者:allenai    | 项目源码 | 文件源码
def test_forward(self):
        # pylint: disable=protected-access
        similarity = MultiHeadedSimilarity(num_heads=3, tensor_1_dim=6)
        similarity._tensor_1_projection = Parameter(torch.eye(6))
        similarity._tensor_2_projection = Parameter(torch.eye(6))
        a_vectors = Variable(torch.FloatTensor([[[[1, 1, -1, -1, 0, 1], [-2, 5, 9, -1, 3, 4]]]]))
        b_vectors = Variable(torch.FloatTensor([[[[1, 1, 1, 0, 2, 5], [0, 1, -1, -7, 1, 2]]]]))
        result = similarity(a_vectors, b_vectors).data.numpy()
        assert result.shape == (1, 1, 2, 3)
        assert_almost_equal(result, [[[[2, -1, 5], [5, -2, 11]]]])
项目:FewShotLearning    作者:gitabcworld    | 项目源码 | 文件源码
def reset_parameters(self):
        """
        Initialize parameters following the way proposed in the paper.
        """

        init.orthogonal(self.weight_ih.data)
        weight_hh_data = torch.eye(self.hidden_size)
        weight_hh_data = weight_hh_data.repeat(1, 4)
        self.weight_hh.data.set_(weight_hh_data)
        # The bias is just set to zero vectors.
        if self.use_bias:
            init.constant(self.bias.data, val=0)
项目:pmet    作者:bkj    | 项目源码 | 文件源码
def __init__(self, n_chars, n_classes, emb_dim=64, rec_hidden_dim=32, att_dim=30, att_channels=16):
        super(ACharacterLSTM, self).__init__()

        self.char_embs = nn.Embedding(n_chars, emb_dim, padding_idx=0)
        self.rnn = nn.LSTM(emb_dim, int(rec_hidden_dim / 2), bias=False, bidirectional=True)

        self.att1 = nn.Linear(rec_hidden_dim, att_dim, bias=False)
        self.att2 = nn.Linear(att_dim, att_channels, bias=False)

        self.fc1 = nn.Linear(att_channels * rec_hidden_dim, n_classes)

        self.I = Variable(torch.eye(att_channels)).cuda()
项目:e2c-pytorch    作者:ethanluoyc    | 项目源码 | 文件源码
def cov(self):
        """This should only be called when NormalDistribution represents one sample"""
        if self.v is not None and self.r is not None:
            assert self.v.dim() == 1
            dim = self.v.dim()
            v = self.v.unsqueeze(1)  # D * 1 vector
            rt = self.r.unsqueeze(0)  # 1 * D vector
            A = torch.eye(dim) + v.mm(rt)
            return A.mm(torch.diag(self.sigma.pow(2)).mm(A.t()))
        else:
            return torch.diag(self.sigma.pow(2))
项目:optnet    作者:locuslab    | 项目源码 | 文件源码
def __init__(self, n, Qpenalty, trueInit=False):
        super().__init__()
        nx = (n**2)**3
        self.Q = Variable(Qpenalty*torch.eye(nx).double().cuda())
        self.G = Variable(-torch.eye(nx).double().cuda())
        self.h = Variable(torch.zeros(nx).double().cuda())
        t = get_sudoku_matrix(n)
        if trueInit:
            self.A = Parameter(torch.DoubleTensor(t).cuda())
        else:
            self.A = Parameter(torch.rand(t.shape).double().cuda())
        self.b = Variable(torch.ones(self.A.size(0)).double().cuda())
项目:optnet    作者:locuslab    | 项目源码 | 文件源码
def __init__(self, n, Qpenalty, nLatent, nineq, trueInit=False):
        super().__init__()
        nx = (n**2)**3
        self.fc_in = nn.Linear(nx, nLatent)
        self.Q = Variable(Qpenalty*torch.eye(nLatent).cuda())
        self.G = Parameter(torch.Tensor(nineq, nLatent).uniform_(-1,1).cuda())
        self.z = Parameter(torch.zeros(nLatent).cuda())
        self.s = Parameter(torch.ones(nineq).cuda())
        self.fc_out = nn.Linear(nLatent, nx)
项目:optnet    作者:locuslab    | 项目源码 | 文件源码
def __init__(self, nHidden, nCls=10, proj='softmax'):
        super(Lenet, self).__init__()
        self.conv1 = nn.Conv2d(1, 20, kernel_size=5)
        self.conv2 = nn.Conv2d(20, 50, kernel_size=5)
        self.fc1 = nn.Linear(50*4*4, nHidden)
        self.fc2 = nn.Linear(nHidden, nCls)

        self.proj = proj
        self.nCls = nCls

        if proj == 'simproj':
            self.Q = Variable(0.5*torch.eye(nCls).double().cuda())
            self.G = Variable(-torch.eye(nCls).double().cuda())
            self.h = Variable(-1e-5*torch.ones(nCls).double().cuda())
            self.A = Variable((torch.ones(1, nCls)).double().cuda())
            self.b = Variable(torch.Tensor([1.]).double().cuda())
            def projF(x):
                nBatch = x.size(0)
                Q = self.Q.unsqueeze(0).expand(nBatch, nCls, nCls)
                G = self.G.unsqueeze(0).expand(nBatch, nCls, nCls)
                h = self.h.unsqueeze(0).expand(nBatch, nCls)
                A = self.A.unsqueeze(0).expand(nBatch, 1, nCls)
                b = self.b.unsqueeze(0).expand(nBatch, 1)
                x = QPFunction()(Q, -x.double(), G, h, A, b).float()
                x = x.log()
                return x
            self.projF = projF
        else:
            self.projF = F.log_softmax
项目:optnet    作者:locuslab    | 项目源码 | 文件源码
def prof_instance(nz, neq, nineq, nIter, cuda):
    L = np.tril(npr.uniform(0,1, (nz,nz))) + np.eye(nz,nz)
    G = npr.randn(nineq,nz)
    A = npr.randn(neq,nz)
    z0 = npr.randn(nz)
    s0 = np.ones(nineq)
    p = npr.randn(nz)

    p, L, G, A, z0, s0 = [torch.Tensor(x) for x in [p, L, G, A, z0, s0]]
    Q = torch.mm(L, L.t())+0.001*torch.eye(nz).type_as(L)
    if cuda:
        p, L, Q, G, A, z0, s0 = [x.cuda() for x in [p, L, Q, G, A, z0, s0]]

    af = adact.AdactFunction()

    start = time.time()
    # One-time cost for numpy conversion.
    p_np, L_np, G_np, A_np, z0_np, s0_np = [adact.toNp(v) for v in [p, L, G, A, z0, s0]]
    cp = time.time()-start
    for i in range(nIter):
        start = time.time()
        zhat, nu, lam = af.forward_single_np(p_np, L_np, G_np, A_np, z0_np, s0_np)
        cp += time.time()-start

    b = torch.mv(A, z0) if neq > 0 else None
    h = torch.mv(G, z0)+s0
    L_Q, L_S, R = aip.pre_factor_kkt(Q, G, A, nineq, neq)
    pdipm = []
    for i in range(nIter):
        start = time.time()
        zhat_ip, nu_ip, lam_ip = aip.forward_single(p, Q, G, A, b, h, L_Q, L_S, R)
        pdipm.append(time.time()-start)
    return cp, np.sum(pdipm)
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def btriunpack(LU_data, LU_pivots, unpack_data=True, unpack_pivots=True):
    """Unpacks the data and pivots from a batched LU factorization (btrifact) of a tensor.

    Returns a tuple indexed by:
      0: The pivots.
      1: The L tensor.
      2: The U tensor.

    Arguments:
        LU_data (Tensor): The packed LU factorization data.
        LU_pivots (Tensor): The packed LU factorization pivots.
        unpack_data (bool): Flag indicating if the data should be unpacked.
        unpack_pivots (bool): Flag indicating if the pivots should be unpacked.
    """

    nBatch, sz, _ = LU_data.size()

    if unpack_data:
        I_U = torch.triu(torch.ones(sz, sz)).type_as(LU_data).byte().unsqueeze(0).expand(nBatch, sz, sz)
        I_L = 1 - I_U
        L = LU_data.new(LU_data.size()).zero_()
        U = LU_data.new(LU_data.size()).zero_()
        I_diag = torch.eye(sz).type_as(LU_data).byte().unsqueeze(0).expand(nBatch, sz, sz)
        L[I_diag] = 1.0
        L[I_L] = LU_data[I_L]
        U[I_U] = LU_data[I_U]
    else:
        L = U = None

    if unpack_pivots:
        P = torch.eye(sz).type_as(LU_data).unsqueeze(0).repeat(nBatch, 1, 1)
        for i in range(nBatch):
            for j in range(sz):
                k = LU_pivots[i, j] - 1
                t = P[i, :, j].clone()
                P[i, :, j] = P[i, :, k]
                P[i, :, k] = t
    else:
        P = None

    return P, L, U
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_eye(self):
        for as_variable in [True, False]:
            input_tensor = self._create_random_nd_tensor(2, size_min=1, size_max=5, as_variable=as_variable)
            init.eye(input_tensor)
            if as_variable:
                input_tensor = input_tensor.data

            # Check every single element
            for i in range(input_tensor.size(0)):
                for j in range(input_tensor.size(1)):
                    if i == j:
                        assert input_tensor[i][j] == 1
                    else:
                        assert input_tensor[i][j] == 0
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_eye_only_works_on_2d_inputs(self):
        for as_variable in [True, False]:
            for dims in [1, 3]:
                with self.assertRaises(ValueError):
                    tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=3, as_variable=as_variable)
                    init.eye(tensor)
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_orthogonal(self):
        for as_variable in [True, False]:
            for use_gain in [True, False]:
                for tensor_size in [[3, 4], [4, 3], [20, 2, 3, 4], [2, 3, 4, 5]]:
                    input_tensor = torch.zeros(tensor_size)
                    gain = 1.0

                    if as_variable:
                        input_tensor = Variable(input_tensor)

                    if use_gain:
                        gain = self._random_float(0.1, 2)
                        init.orthogonal(input_tensor, gain=gain)
                    else:
                        init.orthogonal(input_tensor)

                    if as_variable:
                        input_tensor = input_tensor.data

                    rows, cols = tensor_size[0], reduce(mul, tensor_size[1:])
                    flattened_tensor = input_tensor.view(rows, cols)
                    if rows > cols:
                        self.assertEqual(torch.mm(flattened_tensor.t(), flattened_tensor),
                                         torch.eye(cols) * gain ** 2, prec=1e-6)
                    else:
                        self.assertEqual(torch.mm(flattened_tensor, flattened_tensor.t()),
                                         torch.eye(rows) * gain ** 2, prec=1e-6)
项目:benchmark    作者:pytorch    | 项目源码 | 文件源码
def reset_parameters(self):
        """
        Initialize parameters following the way proposed in the paper.
        """

        init.orthogonal(self.weight_ih.data)
        weight_hh_data = torch.eye(self.hidden_size)
        weight_hh_data = weight_hh_data.repeat(1, 4)
        self.weight_hh.data.set_(weight_hh_data)
        # The bias is just set to zero vectors.
        if self.use_bias:
            init.constant(self.bias.data, val=0)
项目:PyTorchText    作者:chenyuntc    | 项目源码 | 文件源码
def __init__(self, opt ):
        super(MultiModelAll, self).__init__()
        self.model_name = 'MultiModelAll'
        self.opt=opt
        # self.char_models = []
        self.models = []
        self.word_embedding=nn.Embedding(411720,256)
        self.char_embedding=nn.Embedding(11973,256)
        self.word_embedding.weight.data.copy_(t.from_numpy(np.load(opt.embedding_path.replace('char','word'))['vector']))
        self.char_embedding.weight.data.copy_(t.from_numpy(np.load(opt.embedding_path.replace('word','char'))['vector']))

        for _name,_path in zip(opt.model_names, opt.model_paths):
            tmp_config = Config().parse(opt.state_dict(),print_=False)
            tmp_config.embedding_path=None
            _model = getattr(models,_name)(tmp_config)

            if _path is not None:
                _model.load(_path)

            # if _model.opt.type_=='char':
            _model.encoder=(self.char_embedding if _model.opt.type_=='char' else self.word_embedding)
            # else:
                # _model.encoder=self.word_embedding

            self.models.append(_model)



        self.models = nn.ModuleList(self.models)
        # self.word_models = nn.ModuleList(self.word_models)
        self.model_num = len(self.models)
        self.weights = nn.Parameter(t.ones(opt.num_classes,self.model_num))
        assert self.opt.loss=='bceloss'
        # self.weight =[nn.Parameter(t.ones(self.model_num)/self.model_num) for _ in range(self.model_num)]
        # self.label_weight = nn.Parameter(t.eye(opt.num_classes))
项目:PyTorchText    作者:chenyuntc    | 项目源码 | 文件源码
def __init__(self, opt ):
        super(MultiModel, self).__init__()
        self.model_name = 'MultiModel'
        self.opt=opt
        self.models = []
        for _name,_path in zip(opt.model_names, opt.model_paths):
            _model = getattr(models,_name)(Config().parse(opt.state_dict(),print_=False))
            if _path is not None:
                _model.load(_path)
            self.models.append(_model)
        self.models = nn.ModuleList(self.models)
        self.model_num = len(self.models)
        self.weights = nn.Parameter(t.ones(opt.num_classes,self.model_num))
        # self.weight =[nn.Parameter(t.ones(self.model_num)/self.model_num) for _ in range(self.model_num)]
        # self.label_weight = nn.Parameter(t.eye(opt.num_classes))
项目:PyTorchText    作者:chenyuntc    | 项目源码 | 文件源码
def __init__(self, opt ):
        super(MultiModelAll, self).__init__()
        self.model_name = 'MultiModelAll'
        self.opt=opt
        # self.char_models = []
        self.models = []
        self.word_embedding=nn.Embedding(411720,256)
        self.char_embedding=nn.Embedding(11973,256)
        if opt.embedding_path:
            self.word_embedding.weight.data.copy_(t.from_numpy(np.load(opt.embedding_path.replace('char','word'))['vector']))
            self.char_embedding.weight.data.copy_(t.from_numpy(np.load(opt.embedding_path.replace('word','char'))['vector']))

        for _name,_path in zip(opt.model_names, opt.model_paths):
            tmp_config = Config().parse(opt.state_dict(),print_=False)
            tmp_config.embedding_path=None
            _model = getattr(models,_name)(tmp_config)
            # ?????????
            if _path is not None:
                _model.load(_path)
            # ??????????embedding??
            _model.encoder=(self.char_embedding if _model.opt.type_=='char' else self.word_embedding)
            self.models.append(_model)
        self.models = nn.ModuleList(self.models)

        self.model_num = len(self.models)
        self.weights = nn.Parameter(t.ones(opt.num_classes,self.model_num))
        assert self.opt.loss=='bceloss'
        # self.weight =[nn.Parameter(t.ones(self.model_num)/self.model_num) for _ in range(self.model_num)]
        # self.label_weight = nn.Parameter(t.eye(opt.num_classes))