Python torch 模块,norm() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用torch.norm()

项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def updateOutput(self, input):
        assert input.dim() == 2

        inputSize = self.weight.size(1)
        outputSize = self.weight.size(0)

        self._weightNorm = self._weightNorm or self.weight.new()
        self._inputNorm = self._inputNorm or self.weight.new()

        # y_j = (w_j * x) / ( || w_j || * || x || )

        torch.norm(self._weightNorm, self.weight, 2, 1).add_(1e-12)

        batchSize = input.size(0)
        nelement = self.output.nelement()
        self.output.resize_(batchSize, outputSize)
        if self.output.nelement() != nelement:
            self.output.zero_()

        self.output.addmm_(0., 1., input, self.weight.t())

        torch.norm(self._inputNorm, input, 2, 1).add_(1e-12)
        self.output.div_(self._weightNorm.view(1, outputSize).expand_as(self.output))
        self.output.div_(self._inputNorm.expand_as(self.output))
        return self.output
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def cosine_similarity(x1, x2, dim=1, eps=1e-8):
    r"""Returns cosine similarity between x1 and x2, computed along dim.

    Args:
        x1 (Variable): First input.
        x2 (Variable): Second input (of size matching x1).
        dim (int, optional): Dimension of vectors. Default: 1
        eps (float, optional): Small value to avoid division by zero. Default: 1e-8

    Shape:
        - Input: :math:`(\ast_1, D, \ast_2)` where D is at position `dim`.
        - Output: :math:`(\ast_1, \ast_2)` where 1 is at position `dim`.
    """
    w12 = torch.sum(x1 * x2, dim)
    w1 = torch.norm(x1, 2, dim)
    w2 = torch.norm(x2, 2, dim)
    return (w12 / (w1 * w2).clamp(min=eps)).squeeze()
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def normalize(input, p=2, dim=1, eps=1e-12):
    r"""Performs :math:`L_p` normalization of inputs over specified dimension.

    Does:

    .. math::
        v = \frac{v}{\max(\lVert v \rVert_p, \epsilon)}

    for each subtensor v over dimension dim of input. Each subtensor is flattened into a vector,
    i.e. :math:`\lVert v \rVert_p` is not a matrix norm.

    With default arguments normalizes over the second dimension with Euclidean norm.

    Args:
        input: input tensor of any shape
        p (float): the exponent value in the norm formulation
        dim (int): the dimension to reduce
        eps (float): small value to avoid division by zero
    """
    return input / input.norm(p, dim, True).clamp(min=eps).expand_as(input)
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def normalize(input, p=2, dim=1, eps=1e-12):
    r"""Performs :math:`L_p` normalization of inputs over specified dimension.

    Does:

    .. math::
        v = \frac{v}{\max(\lVert v \rVert_p, \epsilon)}

    for each subtensor v over dimension dim of input. Each subtensor is
    flattened into a vector, i.e. :math:`\lVert v \rVert_p` is not a matrix
    norm.

    With default arguments normalizes over the second dimension with Euclidean
    norm.

    Args:
        input: input tensor of any shape
        p (float): the exponent value in the norm formulation. Default: 2
        dim (int): the dimension to reduce. Default: 1
        eps (float): small value to avoid division by zero. Default: 1e-12
    """
    return input / input.norm(p, dim, True).clamp(min=eps).expand_as(input)
项目:gpytorch    作者:jrg365    | 项目源码 | 文件源码
def test_computes_radial_basis_function_gradient():
    a = torch.Tensor([4, 2, 8]).view(3, 1)
    b = torch.Tensor([0, 2, 2]).view(3, 1)
    lengthscale = 2

    kernel = RBFKernel().initialize(log_lengthscale=math.log(lengthscale))
    kernel.eval()
    param = Variable(torch.Tensor(3, 3).fill_(math.log(lengthscale)), requires_grad=True)
    diffs = Variable(a.expand(3, 3) - b.expand(3, 3).transpose(0, 1))
    actual_output = (-(diffs ** 2) / param.exp()).exp()
    actual_output.backward(torch.eye(3))
    actual_param_grad = param.grad.data.sum()

    output = kernel(Variable(a), Variable(b))
    output.backward(gradient=torch.eye(3))
    res = kernel.log_lengthscale.grad.data
    assert(torch.norm(res - actual_param_grad) < 1e-5)
项目:gpytorch    作者:jrg365    | 项目源码 | 文件源码
def test_inv_matmul():
    c_1 = Variable(torch.Tensor([4, 1, 1]), requires_grad=True)
    c_2 = Variable(torch.Tensor([4, 1, 1]), requires_grad=True)
    T_1 = Variable(torch.zeros(3, 3))
    for i in range(3):
        for j in range(3):
            T_1[i, j] = c_1[abs(i - j)]
    T_2 = gpytorch.lazy.ToeplitzLazyVariable(c_2)

    B = Variable(torch.randn(3, 4))

    res_1 = gpytorch.inv_matmul(T_1, B).sum()
    res_2 = gpytorch.inv_matmul(T_2, B).sum()

    res_1.backward()
    res_2.backward()

    assert(torch.norm(res_1.data - res_2.data) < 1e-4)
    assert(torch.norm(c_1.grad.data - c_2.grad.data) < 1e-4)
项目:gpytorch    作者:jrg365    | 项目源码 | 文件源码
def test_exact_posterior():
    train_mean = Variable(torch.randn(4))
    train_y = Variable(torch.randn(4))
    test_mean = Variable(torch.randn(4))

    # Test case
    c1_var = Variable(torch.Tensor([5, 1, 2, 0]), requires_grad=True)
    c2_var = Variable(torch.Tensor([6, 0, 1, -1]), requires_grad=True)
    indices = Variable(torch.arange(0, 4).long().view(4, 1))
    values = Variable(torch.ones(4).view(4, 1))
    toeplitz_1 = InterpolatedLazyVariable(ToeplitzLazyVariable(c1_var), indices, values, indices, values)
    toeplitz_2 = InterpolatedLazyVariable(ToeplitzLazyVariable(c2_var), indices, values, indices, values)
    sum_lv = toeplitz_1 + toeplitz_2

    # Actual case
    actual = sum_lv.evaluate()

    # Test forward
    actual_alpha = gpytorch.posterior_strategy(actual).exact_posterior_alpha(train_mean, train_y)
    actual_mean = gpytorch.posterior_strategy(actual).exact_posterior_mean(test_mean, actual_alpha)
    sum_lv_alpha = sum_lv.posterior_strategy().exact_posterior_alpha(train_mean, train_y)
    sum_lv_mean = sum_lv.posterior_strategy().exact_posterior_mean(test_mean, sum_lv_alpha)
    assert(torch.norm(actual_mean.data - sum_lv_mean.data) < 1e-4)
项目:gpytorch    作者:jrg365    | 项目源码 | 文件源码
def test_gp_prior_and_likelihood():
    gp_model = ExactGPModel()
    gp_model.covar_module.initialize(log_lengthscale=0)  # This shouldn't really do anything now
    gp_model.mean_module.initialize(constant=1)  # Let's have a mean of 1
    gp_model.likelihood.initialize(log_noise=math.log(0.5))
    gp_model.eval()

    # Let's see how our model does, not conditioned on any data
    # The GP prior should predict mean of 1, with a variance of 1
    function_predictions = gp_model(train_x)
    assert(torch.norm(function_predictions.mean().data - 1) < 1e-5)
    assert(torch.norm(function_predictions.var().data - 1.5) < 1e-5)

    # The covariance between the furthest apart points should be 1/e
    least_covar = function_predictions.covar().data[0, -1]
    assert(math.fabs(least_covar - math.exp(-1)) < 1e-5)
项目:gpytorch    作者:jrg365    | 项目源码 | 文件源码
def test_backward_inv_mv():
    a = torch.Tensor([
        [5, -3, 0],
        [-3, 5, 0],
        [0, 0, 2],
    ])
    b = torch.ones(3, 3).fill_(2)
    c = torch.randn(3)
    actual_a_grad = -torch.ger(a.inverse().mul_(0.5).mv(torch.ones(3)), a.inverse().mul_(0.5).mv(c)) * 2 * 2
    actual_c_grad = (a.inverse() / 2).t().mv(torch.ones(3)) * 2

    a_var = Variable(a, requires_grad=True)
    c_var = Variable(c, requires_grad=True)
    out_var = a_var.mul(Variable(b))
    out_var = gpytorch.inv_matmul(out_var, c_var)
    out_var = out_var.sum() * 2
    out_var.backward()
    a_res = a_var.grad.data
    c_res = c_var.grad.data

    assert(torch.norm(actual_a_grad - a_res) < 1e-4)
    assert(torch.norm(actual_c_grad - c_res) < 1e-4)
项目:qpth    作者:locuslab    | 项目源码 | 文件源码
def get_grads(nBatch=1, nz=10, neq=1, nineq=3, Qscale=1.,
              Gscale=1., hscale=1., Ascale=1., bscale=1.):
    assert(nBatch == 1)
    npr.seed(1)
    L = np.random.randn(nz, nz)
    Q = Qscale * L.dot(L.T)
    G = Gscale * npr.randn(nineq, nz)
    # h = hscale*npr.randn(nineq)
    z0 = npr.randn(nz)
    s0 = npr.rand(nineq)
    h = G.dot(z0) + s0
    A = Ascale * npr.randn(neq, nz)
    # b = bscale*npr.randn(neq)
    b = A.dot(z0)

    p = npr.randn(nBatch, nz)
    # print(np.linalg.norm(p))
    truez = npr.randn(nBatch, nz)

    Q, p, G, h, A, b, truez = [x.astype(np.float64) for x in
                               [Q, p, G, h, A, b, truez]]
    _, zhat, nu, lam, slacks = qp_cvxpy.forward_single_np(Q, p[0], G, h, A, b)

    grads = get_grads_torch(Q, p, G, h, A, b, truez)
    return [p[0], Q, G, h, A, b, truez], grads
项目:torchsample    作者:ncullen93    | 项目源码 | 文件源码
def th_matrixcorr(x, y):
    """
    return a correlation matrix between
    columns of x and columns of y.

    So, if X.size() == (1000,4) and Y.size() == (1000,5),
    then the result will be of size (4,5) with the
    (i,j) value equal to the pearsonr correlation coeff
    between column i in X and column j in Y
    """
    mean_x = th.mean(x, 0)
    mean_y = th.mean(y, 0)
    xm = x.sub(mean_x.expand_as(x))
    ym = y.sub(mean_y.expand_as(y))
    r_num = xm.t().mm(ym)
    r_den1 = th.norm(xm,2,0)
    r_den2 = th.norm(ym,2,0)
    r_den = r_den1.t().mm(r_den2)
    r_mat = r_num.div(r_den)
    return r_mat
项目:fast-wavenet.pytorch    作者:dhpollack    | 项目源码 | 文件源码
def normalized_cross_correlation(self):
        w = self.weight.view(self.weight.size(0), -1)
        t_norm = torch.norm(w, p=2, dim=1)
        if self.in_channels == 1 & sum(self.kernel_size) == 1:
            ncc = w.squeeze() / torch.norm(self.t0_norm, p=2)
            ncc = ncc - self.start_ncc
            return ncc
        #mean = torch.mean(w, dim=1).unsqueeze(1).expand_as(w)
        mean = torch.mean(w, dim=1).unsqueeze(1) # 0.2 broadcasting
        t_factor = w - mean
        h_product = self.t0_factor * t_factor
        cov = torch.sum(h_product, dim=1) # (w.size(1) - 1)
        # had normalization code commented out
        denom = self.t0_norm * t_norm

        ncc = cov / denom
        ncc = ncc - self.start_ncc
        return ncc
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def normalize(input, p=2, dim=1, eps=1e-12):
    r"""Performs :math:`L_p` normalization of inputs over specified dimension.

    Does:

    .. math::
        v = \frac{v}{\max(\lVert v \rVert_p, \epsilon)}

    for each subtensor v over dimension dim of input. Each subtensor is
    flattened into a vector, i.e. :math:`\lVert v \rVert_p` is not a matrix
    norm.

    With default arguments normalizes over the second dimension with Euclidean
    norm.

    Args:
        input: input tensor of any shape
        p (float): the exponent value in the norm formulation. Default: 2
        dim (int): the dimension to reduce. Default: 1
        eps (float): small value to avoid division by zero. Default: 1e-12
    """
    return input / input.norm(p, dim, True).clamp(min=eps).expand_as(input)
项目:pytorch-PersonReID    作者:huaijin-chen    | 项目源码 | 文件源码
def cosine_similarity(x1, x2, dim=1, eps=1e-8):
    r"""Returns cosine similarity between x1 and x2, computed along dim.

    Args:
        x1 (Variable): First input.
        x2 (Variable): Second input (of size matching x1).
        dim (int, optional): Dimension of vectors. Default: 1
        eps (float, optional): Small value to avoid division by zero. Default: 1e-8

    Shape:
        - Input: :math:`(\ast_1, D, \ast_2)` where D is at position `dim`.
        - Output: :math:`(\ast_1, \ast_2)` where 1 is at position `dim`.
    """
    w12 = torch.sum(x1 * x2, dim)
    w1 = torch.norm(x1, 2, dim)
    w2 = torch.norm(x2, 2, dim)
    return (w12 / (w1 * w2).clamp(min=eps)).squeeze()
项目:Rocket-Launching    作者:zhougr1993    | 项目源码 | 文件源码
def normalize(input, p=2, dim=1, eps=1e-12):
    r"""Performs :math:`L_p` normalization of inputs over specified dimension.

    Does:

    .. math::
        v = \frac{v}{\max(\lVert v \rVert_p, \epsilon)}

    for each subtensor v over dimension dim of input. Each subtensor is
    flattened into a vector, i.e. :math:`\lVert v \rVert_p` is not a matrix
    norm.

    With default arguments normalizes over the second dimension with Euclidean
    norm.

    Args:
        input: input tensor of any shape
        p (float): the exponent value in the norm formulation
        dim (int): the dimension to reduce
        eps (float): small value to avoid division by zero
    """
    return input / torch.norm(input, p, dim).clamp(min=eps).expand_as(input)
项目:restricted-boltzmann-machine-deep-belief-network-deep-boltzmann-machine-in-pytorch    作者:wmingwei    | 项目源码 | 文件源码
def joint_train(dbm, lr = 1e-3, epoch = 100, batch_size = 50, input_data = None, weight_decay = 0, k_positive=10, k_negative=10, alpha = [1e-1,1e-1,1]):
    u1 = nn.Parameter(torch.zeros(1))
    u2 = nn.Parameter(torch.zeros(1))
    # optimizer = optim.Adam(dbm.parameters(), lr = lr, weight_decay = weight_decay)
    optimizer = optim.SGD(dbm.parameters(), lr = lr, momentum = 0.5)
    train_set = torch.utils.data.dataset.TensorDataset(input_data, torch.zeros(input_data.size()[0]))
    train_loader = torch.utils.data.DataLoader(train_set, batch_size = batch_size, shuffle=True)
    optimizer_u = optim.Adam([u1,u2], lr = lr/1000, weight_decay = weight_decay)
    for _ in range(epoch):
        print("training epoch %i with u1 = %.4f, u2 = %.4f"%(_, u1.data.numpy()[0], u2.data.numpy()[0]))
        for batch_idx, (data, target) in enumerate(train_loader):
            data = Variable(data)
            positive_phase, negative_phase= dbm(v_input = data, k_positive = k_positive, k_negative=k_negative, greedy = False)
            loss = energy(dbm = dbm, layer = positive_phase) - energy(dbm = dbm, layer = negative_phase)+alpha[0] * torch.norm(torch.norm(dbm.W[0],2,1)-u1.repeat(dbm.W[0].size()[0],1))**2 + alpha[1]*torch.norm(torch.norm(dbm.W[1],2,1)-u2.repeat(dbm.W[1].size()[0],1))**2 + alpha[2] * (u1 - u2)**2
            loss.backward()
            optimizer.step()
            optimizer.zero_grad()
            optimizer_u.step()
            optimizer_u.zero_grad()
项目:confusion    作者:abhimanyudubey    | 项目源码 | 文件源码
def PairwiseConfusion(features):
    batch_size = features.size(0)
    if float(batch_size) % 2 != 0:
        raise Exception('Incorrect batch size provided')
    batch_left = features[:int(0.5*batch_size)]
    batch_right = features[int(0.5*batch_size):]
    loss  = torch.norm((batch_left - batch_right).abs(),2, 1).sum() / float(batch_size)

    return loss
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def updateOutput(self, input):
        # lazy initialize buffers
        self._input = self._input or input.new()
        self._weight = self._weight or self.weight.new()
        self._expand = self._expand or self.output.new()
        self._expand2 = self._expand2 or self.output.new()
        self._repeat = self._repeat or self.output.new()
        self._repeat2 = self._repeat2 or self.output.new()

        inputSize, outputSize = self.weight.size(0), self.weight.size(1)

        # y_j = || w_j - x || = || x - w_j ||
        assert input.dim() == 2

        batchSize = input.size(0)
        self._view(self._input, input, batchSize, inputSize, 1)
        self._expand = self._input.expand(batchSize, inputSize, outputSize)
        # make the expanded tensor contiguous (requires lots of memory)
        self._repeat.resize_as_(self._expand).copy_(self._expand)

        self._weight = self.weight.view(1, inputSize, outputSize)
        self._expand2 = self._weight.expand_as(self._repeat)

        if torch.typename(input) == 'torch.cuda.FloatTensor':
            # TODO: after adding new allocators this can be changed
            # requires lots of memory, but minimizes cudaMallocs and loops
            self._repeat2.resize_as_(self._expand2).copy_(self._expand2)
            self._repeat.add_(-1, self._repeat2)
        else:
            self._repeat.add_(-1, self._expand2)

        torch.norm(self.output, self._repeat, 2, 1)
        self.output.resize_(batchSize, outputSize)

        return self.output
项目:ParlAI    作者:facebookresearch    | 项目源码 | 文件源码
def normalize(data, p=2, dim=1, eps=1e-12):
    return data / torch.norm(data, p, dim).clamp(min=eps).expand_as(data)
项目:pyro    作者:uber    | 项目源码 | 文件源码
def test_importance_guide(self):
        posterior = pyro.infer.Importance(self.model, guide=self.guide, num_samples=10000)
        marginal = pyro.infer.Marginal(posterior)
        posterior_samples = [marginal() for i in range(1000)]
        posterior_mean = torch.mean(torch.cat(posterior_samples))
        posterior_stddev = torch.std(torch.cat(posterior_samples), 0)
        self.assertEqual(0, torch.norm(posterior_mean - self.mu_mean).data[0],
                         prec=0.01)
        self.assertEqual(0, torch.norm(posterior_stddev - self.mu_stddev).data[0],
                         prec=0.1)
项目:pyro    作者:uber    | 项目源码 | 文件源码
def test_importance_prior(self):
        posterior = pyro.infer.Importance(self.model, guide=None, num_samples=10000)
        marginal = pyro.infer.Marginal(posterior)
        posterior_samples = [marginal() for i in range(1000)]
        posterior_mean = torch.mean(torch.cat(posterior_samples))
        posterior_stddev = torch.std(torch.cat(posterior_samples), 0)
        self.assertEqual(0, torch.norm(posterior_mean - self.mu_mean).data[0],
                         prec=0.01)
        self.assertEqual(0, torch.norm(posterior_stddev - self.mu_stddev).data[0],
                         prec=0.1)
项目:pyro    作者:uber    | 项目源码 | 文件源码
def eq(x, y, prec=1e-10):
    return (torch.norm(x - y).data[0] < prec)


# XXX name is a bit silly
项目:FlowNetPytorch    作者:ClementPinard    | 项目源码 | 文件源码
def EPE(input_flow, target_flow, sparse=False, mean=True):
    EPE_map = torch.norm(target_flow-input_flow,2,1)
    if sparse:
        EPE_map = EPE_map[target_flow != 0]
    if mean:
        return EPE_map.mean()
    else:
        return EPE_map.sum()
项目:pmet    作者:bkj    | 项目源码 | 文件源码
def _penalty(self, A):
        return torch.norm(torch.mm(A, A.t()) - self.I) ** 2
项目:densenet.pytorch    作者:bamos    | 项目源码 | 文件源码
def printnorm_f(self, input, output):
    print('{} norm: {}'.format(self.__class__.__name__, output.data.norm()))

# def printnorm_back(self, grad_input, grad_output):
    # import IPython, sys; IPython.embed(); sys.exit(-1)
    # print('{} grad_out norm: {}'.format(self.__class__.__name__, self.weight.grad.data.norm()))
项目:densenet.pytorch    作者:bamos    | 项目源码 | 文件源码
def printM(mods):
    for m in mods:
        if isinstance(m, legacy.nn.SpatialConvolution):
            print('Conv2d norm: {}'.format(torch.norm(m.output)))
        elif isinstance(m, legacy.nn.Linear):
            pass
        elif isinstance(m, legacy.nn.Concat) or \
             isinstance(m, legacy.nn.Sequential):
            printM(m.modules)

# printM(net_th.modules)
项目:densenet.pytorch    作者:bamos    | 项目源码 | 文件源码
def getM(mods):
    for m in mods:
        if isinstance(m, legacy.nn.SpatialConvolution):
            m.gradWeight[m.gradWeight.ne(m.gradWeight)] = 0
            l.append(torch.norm(m.gradWeight))
        elif isinstance(m, legacy.nn.Linear):
            l.append(torch.norm(m.gradWeight))
        elif isinstance(m, legacy.nn.Concat) or \
             isinstance(m, legacy.nn.Sequential):
            getM(m.modules)
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def updateOutput(self, input):
        assert input.dim() == 2

        inputSize = self.weight.size(1)
        outputSize = self.weight.size(0)

        if self._weightNorm is None:
            self._weightNorm = self.weight.new()
        if self._inputNorm is None:
            self._inputNorm = self.weight.new()

        # y_j = (w_j * x) / ( || w_j || * || x || )

        torch.norm(self.weight, 2, 1, out=self._weightNorm).add_(1e-12)

        batchSize = input.size(0)
        nelement = self.output.nelement()
        self.output.resize_(batchSize, outputSize)
        if self.output.nelement() != nelement:
            self.output.zero_()

        self.output.addmm_(0., 1., input, self.weight.t())

        torch.norm(input, 2, 1, out=self._inputNorm).add_(1e-12)
        self.output.div_(self._weightNorm.view(1, outputSize).expand_as(self.output))
        self.output.div_(self._inputNorm.expand_as(self.output))
        return self.output
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def updateOutput(self, input):
        # lazy initialize buffers
        if self._input is None:
            self._input = input.new()
        if self._weight is None:
            self._weight = self.weight.new()
        if self._expand is None:
            self._expand = self.output.new()
        if self._expand2 is None:
            self._expand2 = self.output.new()
        if self._repeat is None:
            self._repeat = self.output.new()
        if self._repeat2 is None:
            self._repeat2 = self.output.new()

        inputSize, outputSize = self.weight.size(0), self.weight.size(1)

        # y_j = || w_j - x || = || x - w_j ||
        assert input.dim() == 2

        batchSize = input.size(0)
        self._view(self._input, input, batchSize, inputSize, 1)
        self._expand = self._input.expand(batchSize, inputSize, outputSize)
        # make the expanded tensor contiguous (requires lots of memory)
        self._repeat.resize_as_(self._expand).copy_(self._expand)

        self._weight = self.weight.view(1, inputSize, outputSize)
        self._expand2 = self._weight.expand_as(self._repeat)

        if torch.typename(input) == 'torch.cuda.FloatTensor':
            # TODO: after adding new allocators this can be changed
            # requires lots of memory, but minimizes cudaMallocs and loops
            self._repeat2.resize_as_(self._expand2).copy_(self._expand2)
            self._repeat.add_(-1, self._repeat2)
        else:
            self._repeat.add_(-1, self._expand2)

        torch.norm(self._repeat, 2, 1, out=self.output)
        self.output.resize_(batchSize, outputSize)

        return self.output
项目:dong_iccv_2017    作者:woozzu    | 项目源码 | 文件源码
def pairwise_ranking_loss(margin, x, v):
    zero = torch.zeros(1)
    diag_margin = margin * torch.eye(x.size(0))
    if not args.no_cuda:
        zero, diag_margin = zero.cuda(), diag_margin.cuda()
    zero, diag_margin = Variable(zero), Variable(diag_margin)

    x = x / torch.norm(x, 2, 1, keepdim=True)
    v = v / torch.norm(v, 2, 1, keepdim=True)
    prod = torch.matmul(x, v.transpose(0, 1))
    diag = torch.diag(prod)
    for_x = torch.max(zero, margin - torch.unsqueeze(diag, 1) + prod) - diag_margin
    for_v = torch.max(zero, margin - torch.unsqueeze(diag, 0) + prod) - diag_margin
    return (torch.sum(for_x) + torch.sum(for_v)) / x.size(0)
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def pairwise_distance(x1, x2, p=2, eps=1e-6):
    r"""
    Computes the batchwise pairwise distance between vectors v1,v2:

        .. math ::
            \Vert x \Vert _p := \left( \sum_{i=1}^n  \vert x_i \vert ^ p \right) ^ {1/p}

        Args:
            x1: first input tensor
            x2: second input tensor
            p: the norm degree. Default: 2

        Shape:
            - Input: :math:`(N, D)` where `D = vector dimension`
            - Output: :math:`(N, 1)`

        >>> input1 = autograd.Variable(torch.randn(100, 128))
        >>> input2 = autograd.Variable(torch.randn(100, 128))
        >>> output = F.pairwise_distance(input1, input2, p=2)
        >>> output.backward()
    """
    assert x1.size() == x2.size(), "Input sizes must be equal."
    assert x1.dim() == 2, "Input must be a 2D matrix."
    diff = torch.abs(x1 - x2)
    out = torch.pow(diff + eps, p).sum(dim=1, keepdim=True)
    return torch.pow(out, 1. / p)
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def updateOutput(self, input):
        assert input.dim() == 2

        inputSize = self.weight.size(1)
        outputSize = self.weight.size(0)

        if self._weightNorm is None:
            self._weightNorm = self.weight.new()
        if self._inputNorm is None:
            self._inputNorm = self.weight.new()

        # y_j = (w_j * x) / ( || w_j || * || x || )

        torch.norm(self.weight, 2, 1, out=self._weightNorm, keepdim=True).add_(1e-12)

        batchSize = input.size(0)
        nelement = self.output.nelement()
        self.output.resize_(batchSize, outputSize)
        if self.output.nelement() != nelement:
            self.output.zero_()

        self.output.addmm_(0., 1., input, self.weight.t())

        torch.norm(input, 2, 1, out=self._inputNorm, keepdim=True).add_(1e-12)
        self.output.div_(self._weightNorm.view(1, outputSize).expand_as(self.output))
        self.output.div_(self._inputNorm.expand_as(self.output))
        return self.output
项目:srnn-pytorch    作者:vvanirudh    | 项目源码 | 文件源码
def getMagnitudeAndDirection(*args):
    '''
    Gets the magnitude and direction of the vector corresponding to positions
    params:
    args: Can be a list of two positions or the two positions themselves (variable-length argument)
    '''
    if len(args) == 1:
        pos_list = args[0]
        pos_i = pos_list[0]
        pos_j = pos_list[1]

        vector = np.array(pos_i) - np.array(pos_j)
        magnitude = np.linalg.norm(vector)
        if abs(magnitude) > 1e-4:
            direction = vector / magnitude
        else:
            direction = vector
        return [magnitude] + direction.tolist()

    elif len(args) == 2:
        pos_i = args[0]
        pos_j = args[1]

        ret = torch.zeros(3)
        vector = pos_i - pos_j
        magnitude = torch.norm(vector)
        if abs(magnitude) > 1e-4:
            direction = vector / magnitude
        else:
            direction = vector

        ret[0] = magnitude
        ret[1:3] = direction
        return ret

    else:
        raise NotImplementedError('getMagnitudeAndDirection: Function signature incorrect')
项目:srnn-pytorch    作者:vvanirudh    | 项目源码 | 文件源码
def get_mean_error(ret_nodes, nodes, assumedNodesPresent, trueNodesPresent):
    '''
    Computes average displacement error
    Parameters
    ==========

    ret_nodes : A tensor of shape pred_length x numNodes x 2
    Contains the predicted positions for the nodes

    nodes : A tensor of shape pred_length x numNodes x 2
    Contains the true positions for the nodes

    nodesPresent : A list of lists, of size pred_length
    Each list contains the nodeIDs of the nodes present at that time-step

    Returns
    =======

    Error : Mean euclidean distance between predicted trajectory and the true trajectory
    '''
    pred_length = ret_nodes.size()[0]
    error = torch.zeros(pred_length).cuda()
    counter = 0

    for tstep in range(pred_length):
        counter = 0
        for nodeID in assumedNodesPresent:

            if nodeID not in trueNodesPresent[tstep]:
                continue

            pred_pos = ret_nodes[tstep, nodeID, :]
            true_pos = nodes[tstep, nodeID, :]

            error[tstep] += torch.norm(pred_pos - true_pos, p=2)
            counter += 1

        if counter != 0:
            error[tstep] = error[tstep] / counter

    return torch.mean(error)
项目:srnn-pytorch    作者:vvanirudh    | 项目源码 | 文件源码
def get_final_error(ret_nodes, nodes, assumedNodesPresent, trueNodesPresent):
    '''
    Computes final displacement error
    Parameters
    ==========

    ret_nodes : A tensor of shape pred_length x numNodes x 2
    Contains the predicted positions for the nodes

    nodes : A tensor of shape pred_length x numNodes x 2
    Contains the true positions for the nodes

    nodesPresent : A list of lists, of size pred_length
    Each list contains the nodeIDs of the nodes present at that time-step

    Returns
    =======

    Error : Mean final euclidean distance between predicted trajectory and the true trajectory
    '''
    pred_length = ret_nodes.size()[0]
    error = 0
    counter = 0

    # Last time-step
    tstep = pred_length - 1
    for nodeID in assumedNodesPresent:

        if nodeID not in trueNodesPresent[tstep]:
            continue

        pred_pos = ret_nodes[tstep, nodeID, :]
        true_pos = nodes[tstep, nodeID, :]

        error += torch.norm(pred_pos - true_pos, p=2)
        counter += 1

    if counter != 0:
        error = error / counter

    return error
项目:pytorch-geometric-gan    作者:lim0606    | 项目源码 | 文件源码
def weight_proj_l2norm(param):
    norm = torch.norm(param.data, p=2) + 1e-8
    coeff = min(opt.wproj_upper, 1.0/norm)
    param.data.mul_(coeff)

# custom weights initialization called on netG and netD
项目:PoseNet    作者:bellatoris    | 项目源码 | 文件源码
def pose_loss(input, target):
    x = torch.norm(input-target, dim=1)
    x = torch.mean(x)

    return x
项目:PoseNet    作者:bellatoris    | 项目源码 | 文件源码
def rotation_error(input, target):
    x1 = torch.norm(input, dim=1)
    x2 = torch.norm(target, dim=1)

    x1 = torch.div(input, torch.stack((x1, x1, x1, x1), dim=1))
    x2 = torch.div(target, torch.stack((x2, x2, x2, x2), dim=1))
    d = torch.abs(torch.sum(x1 * x2, dim=1))
    theta = 2 * torch.acos(d) * 180/math.pi
    theta = torch.mean(theta)

    return theta
项目:PoseNet    作者:bellatoris    | 项目源码 | 文件源码
def pose_loss(input, target):
    """Gets l2 loss between input and target"""
    x = torch.norm(input-target, dim=1)
    x = torch.mean(x)

    return x
项目:PoseNet    作者:bellatoris    | 项目源码 | 文件源码
def rotation_error(input, target):
    """Gets cosine distance between input and target """
    x1 = torch.norm(input, dim=1)
    x2 = torch.norm(target, dim=1)

    x1 = torch.div(input, torch.stack((x1, x1, x1, x1), dim=1))
    x2 = torch.div(target, torch.stack((x2, x2, x2, x2), dim=1))
    d = torch.abs(torch.sum(x1 * x2, dim=1))
    theta = 2 * torch.acos(d) * 180/math.pi
    theta = torch.mean(theta)

    return theta
项目:PoseNet    作者:bellatoris    | 项目源码 | 文件源码
def forward(self, inpt):
        batch_size = self.batch_size
        f0 = self.features(inpt[:, 0])
        f0 = f0.view(batch_size, -1)

        f1 = self.features(inpt[:, 1])
        f1 = f1.view(batch_size, -1)

        # f2 = self.features(inpt[:, 2])
        # f2 = f2.view(batch_size, -1)
        #
        # f3 = self.features(inpt[:, 3])
        # f3 = f3.view(batch_size, -1)
        #
        # f4 = self.features(inpt[:, 4])
        # f4 = f4.view(batch_size, -1)
        #
        # f = torch.stack((f0, f1, f2, f3, f4), dim=0).view(self.seq_length, batch_size, -1)

        f = torch.cat((f0, f1), dim=1)

        # _, hn = self.rnn(f, self.hidden)
        # hn = hn[self.gru_layer - 1].view(batch_size, -1)
        # hn = self.relu(hn)
        # hn = self.dropout(hn)
        # hn = self.regressor(hn)
        hn = self.regressor(f)

        trans = self.trans_regressor(hn)

        # trans_norm = torch.norm(trans, dim=1)
        # trans = torch.div(trans, torch.cat((trans_norm, trans_norm, trans_norm), dim=1))

        scale = self.scale_regressor(hn)
        rotation = self.rotation_regressor(hn)

        return trans, scale, rotation
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def pairwise_distance(x1, x2, p=2, eps=1e-6):
    r"""
    Computes the batchwise pairwise distance between vectors v1,v2:

    .. math ::
        \Vert x \Vert _p := \left( \sum_{i=1}^n  \vert x_i \vert ^ p \right) ^ {1/p}

    Args:
        x1: first input tensor
        x2: second input tensor
        p: the norm degree. Default: 2
        eps (float, optional): Small value to avoid division by zero. Default: 1e-6

    Shape:
        - Input: :math:`(N, D)` where `D = vector dimension`
        - Output: :math:`(N, 1)`

    Example::

        >>> input1 = autograd.Variable(torch.randn(100, 128))
        >>> input2 = autograd.Variable(torch.randn(100, 128))
        >>> output = F.pairwise_distance(input1, input2, p=2)
        >>> output.backward()
    """
    assert x1.size() == x2.size(), "Input sizes must be equal."
    assert x1.dim() == 2, "Input must be a 2D matrix."
    diff = torch.abs(x1 - x2)
    out = torch.pow(diff + eps, p).sum(dim=1, keepdim=True)
    return torch.pow(out, 1. / p)
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def cosine_similarity(x1, x2, dim=1, eps=1e-8):
    r"""Returns cosine similarity between x1 and x2, computed along dim.

    .. math ::
        \text{similarity} = \dfrac{x_1 \cdot x_2}{\max(\Vert x_1 \Vert _2 \cdot \Vert x_2 \Vert _2, \epsilon)}

    Args:
        x1 (Variable): First input.
        x2 (Variable): Second input (of size matching x1).
        dim (int, optional): Dimension of vectors. Default: 1
        eps (float, optional): Small value to avoid division by zero.
            Default: 1e-8

    Shape:
        - Input: :math:`(\ast_1, D, \ast_2)` where D is at position `dim`.
        - Output: :math:`(\ast_1, \ast_2)` where 1 is at position `dim`.

    Example::

        >>> input1 = autograd.Variable(torch.randn(100, 128))
        >>> input2 = autograd.Variable(torch.randn(100, 128))
        >>> output = F.cosine_similarity(input1, input2)
        >>> print(output)
    """
    w12 = torch.sum(x1 * x2, dim)
    w1 = torch.norm(x1, 2, dim)
    w2 = torch.norm(x2, 2, dim)
    return (w12 / (w1 * w2).clamp(min=eps)).squeeze()
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def updateOutput(self, input):
        assert input.dim() == 2

        inputSize = self.weight.size(1)
        outputSize = self.weight.size(0)

        if self._weightNorm is None:
            self._weightNorm = self.weight.new()
        if self._inputNorm is None:
            self._inputNorm = self.weight.new()

        # y_j = (w_j * x) / ( || w_j || * || x || )

        torch.norm(self.weight, 2, 1, out=self._weightNorm, keepdim=True).add_(1e-12)

        batchSize = input.size(0)
        nelement = self.output.nelement()
        self.output.resize_(batchSize, outputSize)
        if self.output.nelement() != nelement:
            self.output.zero_()

        self.output.addmm_(0., 1., input, self.weight.t())

        torch.norm(input, 2, 1, out=self._inputNorm, keepdim=True).add_(1e-12)
        self.output.div_(self._weightNorm.view(1, outputSize).expand_as(self.output))
        self.output.div_(self._inputNorm.expand_as(self.output))
        return self.output
项目:gpytorch    作者:jrg365    | 项目源码 | 文件源码
def test_computes_radial_basis_function():
    a = torch.Tensor([4, 2, 8]).view(3, 1)
    b = torch.Tensor([0, 2, 2]).view(3, 1)
    lengthscale = 2

    kernel = RBFKernel().initialize(log_lengthscale=math.log(lengthscale))
    kernel.eval()
    actual = torch.Tensor([
        [16, 4, 4],
        [4, 0, 0],
        [64, 36, 36],
    ]).mul_(-1).div_(lengthscale).exp()

    res = kernel(Variable(a), Variable(b)).data
    assert(torch.norm(res - actual) < 1e-5)
项目:gpytorch    作者:jrg365    | 项目源码 | 文件源码
def test_inv_matmul():
    mat = torch.randn(4, 4)
    res = make_mul_lazy_var()[0].inv_matmul(Variable(mat))
    assert torch.norm(res.data - (t1_t2_t3_eval + added_diag.diag()).inverse().matmul(mat)) < 1e-3
项目:gpytorch    作者:jrg365    | 项目源码 | 文件源码
def test_matmul_deterministic():
    mat = torch.randn(4, 4)
    res = make_mul_lazy_var()[0].matmul(Variable(mat))
    assert torch.norm(res.data - (t1_t2_t3_eval + added_diag.diag()).matmul(mat)) < 1e-3
项目:gpytorch    作者:jrg365    | 项目源码 | 文件源码
def test_matmul_approx():
    class KissGPModel(gpytorch.GridInducingPointModule):
        def __init__(self):
            super(KissGPModel, self).__init__(grid_size=300, grid_bounds=[(0, 1)])
            self.mean_module = ConstantMean(constant_bounds=(-1, 1))
            covar_module = RBFKernel(log_lengthscale_bounds=(-100, 100))
            covar_module.log_lengthscale.data = torch.FloatTensor([-2])
            self.covar_module = covar_module

        def forward(self, x):
            mean_x = self.mean_module(x)
            covar_x = self.covar_module(x)
            return GaussianRandomVariable(mean_x, covar_x)

    model = KissGPModel()

    n = 100
    d = 4

    lazy_var_list = []
    lazy_var_eval_list = []

    for i in range(d):
        x = Variable(torch.rand(n))
        y = Variable(torch.rand(n))
        model.condition(x, y)
        toeplitz_var = model(x).covar()
        lazy_var_list.append(toeplitz_var)
        lazy_var_eval_list.append(toeplitz_var.evaluate().data)

    mul_lazy_var = MulLazyVariable(*lazy_var_list, matmul_mode='approximate', max_iter=30)
    mul_lazy_var_eval = torch.ones(n, n)
    for i in range(d):
        mul_lazy_var_eval *= (lazy_var_eval_list[i].matmul(torch.eye(lazy_var_eval_list[i].size()[0])))

    vec = torch.randn(n)

    actual = mul_lazy_var_eval.matmul(vec)
    res = mul_lazy_var.matmul(Variable(vec)).data

    assert torch.norm(actual - res) / torch.norm(actual) < 1e-2
项目:gpytorch    作者:jrg365    | 项目源码 | 文件源码
def test_trace_log_det_quad_form():
    mu_diffs_var = Variable(torch.arange(1, 5, 1))
    chol_covar_1_var = Variable(torch.eye(4))

    # Test case
    c1_var = Variable(torch.Tensor([5, 1, 2, 0]), requires_grad=True)
    c2_var = Variable(torch.Tensor([[6, 0], [1, -1]]), requires_grad=True)
    c3_var = Variable(torch.Tensor([7, 2, 1, 0]), requires_grad=True)
    diag_var = Variable(torch.Tensor([1]), requires_grad=True)
    diag_var_expand = diag_var.expand(4)
    toeplitz_1 = ToeplitzLazyVariable(c1_var).evaluate()
    kronecker_product = KroneckerProductLazyVariable(c2_var).evaluate()
    toeplitz_2 = ToeplitzLazyVariable(c3_var).evaluate()
    actual = toeplitz_1 * kronecker_product * toeplitz_2 + diag_var_expand.diag()

    # Actual case
    mul_lv, diag = make_mul_lazy_var()
    t1, t2, t3 = mul_lv.lazy_vars

    # Test forward
    tldqf_res = mul_lv.trace_log_det_quad_form(mu_diffs_var, chol_covar_1_var)
    tldqf_actual = gpytorch._trace_logdet_quad_form_factory_class()(mu_diffs_var, chol_covar_1_var, actual)
    assert(math.fabs(tldqf_res.data.squeeze()[0] - tldqf_actual.data.squeeze()[0]) < 1.5)

    # Test backwards
    tldqf_res.backward()
    tldqf_actual.backward()
    assert((c1_var.grad.data - t1.column.grad.data).abs().norm() / c1_var.grad.data.abs().norm() < 1e-1)
    assert((c2_var.grad.data - t2.columns.grad.data).abs().norm() / c2_var.grad.data.abs().norm() < 1e-1)
    assert((c3_var.grad.data - t3.column.grad.data).abs().norm() / c3_var.grad.data.abs().norm() < 1e-1)
    assert((diag_var.grad.data - diag.grad.data).abs().norm() / diag_var.grad.data.abs().norm() < 1e-1)
项目:gpytorch    作者:jrg365    | 项目源码 | 文件源码
def test_getitem():
    res = make_mul_lazy_var()[0][1, 1]
    assert torch.norm(res.evaluate().data - (t1_t2_t3_eval + torch.ones(4))[1, 1]) < 1e-3