Python torch.autograd 模块,gradcheck() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用torch.autograd.gradcheck()

项目:nnmnkwii    作者:r9y9    | 项目源码 | 文件源码
def test_mlpg_gradcheck():
    # MLPG is performed dimention by dimention, so static_dim 1 is enough,
    # 2 just for in case.
    static_dim = 2
    T = 10

    for windows in _get_windows_set():
        torch.manual_seed(1234)
        means = Variable(torch.rand(T, static_dim * len(windows)),
                         requires_grad=True)
        inputs = (means,)

        # Unit variances case
        variances = torch.ones(static_dim * len(windows)
                               ).expand(T, static_dim * len(windows))

        assert gradcheck(MLPG(variances, windows),
                         inputs, eps=1e-3, atol=1e-3)

        # Rand variances case
        variances = torch.rand(static_dim * len(windows)
                               ).expand(T, static_dim * len(windows))

        assert gradcheck(MLPG(variances, windows),
                         inputs, eps=1e-3, atol=1e-3)
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def _assertGradAndGradgradChecks(test_case, apply_fn, inputs):
    # call assert function rather than returning a bool since it's nicer
    # if we get whether this failed on the gradcheck or the gradgradcheck.
    test_case.assertTrue(gradcheck(apply_fn, inputs))
    dummy_out = apply_fn(*inputs)

    def randn_match_cpu_gpu(x):
        a = torch.randn(x.size())
        if x.is_cuda:
            a = a.cuda(x.get_device())
        return a

    if isinstance(dummy_out, tuple):
        grad_y = tuple(Variable(randn_match_cpu_gpu(x), requires_grad=x.requires_grad)
                       for x in dummy_out if isinstance(x, Variable))
    else:
        grad_y = (Variable(randn_match_cpu_gpu(dummy_out), requires_grad=dummy_out.requires_grad),)

    test_case.assertTrue(gradgradcheck(apply_fn, inputs, grad_y,))
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def _assertGradAndGradgradChecks(test_case, apply_fn, inputs):
    # call assert function rather than returning a bool since it's nicer
    # if we get whether this failed on the gradcheck or the gradgradcheck.
    test_case.assertTrue(gradcheck(apply_fn, inputs))
    dummy_out = apply_fn(*inputs)

    def randn_match_cpu_gpu(x):
        a = torch.randn(x.size())
        if x.is_cuda:
            a = a.cuda(x.get_device())
        return a

    if isinstance(dummy_out, tuple):
        grad_y = tuple(Variable(randn_match_cpu_gpu(x), requires_grad=x.requires_grad)
                       for x in dummy_out if isinstance(x, Variable))
    else:
        grad_y = (Variable(randn_match_cpu_gpu(dummy_out), requires_grad=dummy_out.requires_grad),)

    test_case.assertTrue(gradgradcheck(apply_fn, inputs, grad_y,))
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_cosine_similarity(self):
        input1 = Variable(torch.randn(4, 4), requires_grad=True)
        input2 = Variable(torch.randn(4, 4), requires_grad=True)
        self.assertTrue(gradcheck(lambda x, y: F.cosine_similarity(x, y), (input1, input2)))

        input1 = Variable(torch.randn(4, 5, 6), requires_grad=True)
        input2 = Variable(torch.randn(4, 5, 6), requires_grad=True)
        self.assertTrue(gradcheck(lambda x, y: F.cosine_similarity(x, y, dim=0), (input1, input2)))
        self.assertTrue(gradcheck(lambda x, y: F.cosine_similarity(x, y, dim=-1), (input1, input2)))

        # Check cosine_similarity input/output shapes
        input_size = (1, 3, 2, 1)
        expected_size = (1, 2, 1)
        input1 = Variable(torch.randn(input_size), requires_grad=True)
        input2 = Variable(torch.randn(input_size), requires_grad=True)
        self.assertEqual(F.cosine_similarity(input1, input2, dim=1).size(), expected_size)
项目:pyinn    作者:szagoruyko    | 项目源码 | 文件源码
def test_conv2d_depthwise(self):
        n = 6
        x = Variable(torch.randn(1,n,5,5).double().cuda(), requires_grad=True)
        w = Variable(torch.randn(n,1,3,3).double().cuda(), requires_grad=True)
        y_fast = P.conv2d_depthwise(x, w, padding=1)
        y_ref = F.conv2d(x, w, padding=1, groups=n)
        go = torch.randn(y_fast.size()).double().cuda()

        self.assertLess((y_fast - y_ref).data.abs().max(), 1e-9)

        x.requires_grad = True
        w.requires_grad = True
        y_fast.backward(go)
        gx_fast = x.grad.data.clone()
        gw_fast = w.grad.data.clone()

        x.grad.data.zero_()
        w.grad.data.zero_()
        y_ref.backward(go)
        gx_ref = x.grad.data.clone()
        gw_ref = w.grad.data.clone()

        self.assertTrue(gradcheck(partial(P.conv2d_depthwise, padding=1), (x, w,)))
项目:nnmnkwii    作者:r9y9    | 项目源码 | 文件源码
def test_modspec_gradcheck():
    static_dim = 12
    T = 16
    torch.manual_seed(1234)
    inputs = (Variable(torch.rand(T, static_dim), requires_grad=True),)
    n = 16

    for norm in [None, "ortho"]:
        assert gradcheck(ModSpec(n=n, norm=norm), inputs, eps=1e-4, atol=1e-4)
项目:nnmnkwii    作者:r9y9    | 项目源码 | 文件源码
def test_modspec_gradcheck_large_n():
    static_dim = 12
    T = 16
    torch.manual_seed(1234)
    inputs = (Variable(torch.rand(T, static_dim), requires_grad=True),)

    for n in [16, 32]:
        for norm in [None, "ortho"]:
            assert gradcheck(ModSpec(n=n, norm=norm),
                             inputs, eps=1e-4, atol=1e-4)
项目:ParlAI    作者:facebookresearch    | 项目源码 | 文件源码
def test_label_smoothing(self):
        input = Variable(torch.randn(3, 5), requires_grad=True)
        idx = torch.rand(3) * 4
        target = Variable(idx.long())
        criterion = LabelSmoothedNLLLoss()
        self.assertTrue(gradcheck(
            lambda x, y: criterion.apply(x, y, 0.1, 2, None), (input, target)
        ))
        weights = torch.ones(5)
        weights[2] = 0
        self.assertTrue(gradcheck(lambda x, y: criterion.apply(x, y, 0.1, None, weights), (input, target)))
        self.assertTrue(gradcheck(lambda x, y: criterion.apply(x, y, 0.1, None, None), (input, target)))
项目:fairseq-py    作者:facebookresearch    | 项目源码 | 文件源码
def test_label_smoothing(self):
        input = Variable(torch.randn(3, 5), requires_grad=True)
        idx = torch.rand(3) * 4
        target = Variable(idx.long())
        criterion = LabelSmoothedNLLLoss()
        self.assertTrue(gradcheck(
            lambda x, y: criterion.apply(x, y, 0.1, 2, None), (input, target)
        ))
        weights = torch.ones(5)
        weights[2] = 0
        self.assertTrue(gradcheck(lambda x, y: criterion.apply(x, y, 0.1, None, weights), (input, target)))
        self.assertTrue(gradcheck(lambda x, y: criterion.apply(x, y, 0.1, None, None), (input, target)))
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_pad(self):
        inputs = Variable(torch.randn(1, 3, 4, 4), requires_grad=True)
        self.assertTrue(gradcheck(lambda x: F.pad(x, (1, 1, 1, 1)), (inputs,)))
        self.assertTrue(gradcheck(lambda x: F.pad(x, (-1, 1, -2, 1)), (inputs,)))
        self.assertTrue(gradcheck(lambda x: F.pad(x, (-1, 1, -2, 1), value=2), (inputs,)))
        self.assertTrue(gradcheck(lambda x: F.pad(x, (-1, 1, -2, 1), mode='replicate'), (inputs,)))
        self.assertTrue(gradcheck(lambda x: F.pad(x, (-1, 1, -2, 1), mode='reflect'), (inputs,)))

        inputs = Variable(torch.randn(1, 2, 3, 4, 4), requires_grad=True)
        self.assertTrue(gradcheck(lambda x: F.pad(x, (1, 1, 1, 1, 1, 1), mode='replicate'), (inputs,)))
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_pairwise_distance(self):
        input1 = Variable(torch.randn(4, 4), requires_grad=True)
        input2 = Variable(torch.randn(4, 4), requires_grad=True)
        self.assertTrue(gradcheck(lambda x, y: F.pairwise_distance(x, y), (input1, input2)))
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_triplet_margin_loss(self):
        input1 = Variable(torch.randn(4, 4), requires_grad=True)
        input2 = Variable(torch.randn(4, 4), requires_grad=True)
        input3 = Variable(torch.randn(4, 4), requires_grad=True)
        self.assertTrue(gradcheck(lambda x1, x2, x3: F.triplet_margin_loss(
            x1, x2, x3), (input1, input2, input3)))
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_triplet_margin_swap_loss(self):
        input1 = Variable(torch.randn(4, 4), requires_grad=True)
        input2 = Variable(torch.randn(4, 4), requires_grad=True)
        input3 = Variable(torch.randn(4, 4), requires_grad=True)
        self.assertTrue(gradcheck(lambda x1, x2, x3: F.triplet_margin_loss(
            x1, x2, x3, swap=True), (input1, input2, input3)))
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_bilinear(self):
        module = nn.Bilinear(10, 10, 8)
        module2 = legacy.Bilinear(10, 10, 8)

        module2.weight.copy_(module.weight.data)
        module2.bias.copy_(module.bias.data)

        input1 = torch.randn(4, 10)
        input2 = torch.randn(4, 10)

        output = module(Variable(input1), Variable(input2))
        output2 = module2.forward([input1, input2])

        input1_1 = Variable(input1, requires_grad=True)
        input2_1 = Variable(input2, requires_grad=True)

        output3 = module(input1_1, input2_1)
        grad = torch.randn(*output3.size())
        output3.backward(grad)
        gi1 = input1_1.grad.data.clone()
        gi2 = input2_1.grad.data.clone()

        self.assertEqual(output.data, output2)
        self.assertEqual([gi1, gi2], output3)

        self.assertTrue(gradcheck(lambda x1, x2: F.bilinear(x1, x2, module.weight, module.bias), (input1_1, input2_1)))
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def test_pad(self):
        inputs = Variable(torch.randn(1, 3, 4, 4), requires_grad=True)
        self.assertTrue(gradcheck(lambda x: F.pad(x, (1, 1, 1, 1)), (inputs,)))
        self.assertTrue(gradcheck(lambda x: F.pad(x, (-1, 1, -2, 1)), (inputs,)))
        self.assertTrue(gradcheck(lambda x: F.pad(x, (-1, 1, -2, 1), value=2), (inputs,)))
        self.assertTrue(gradcheck(lambda x: F.pad(x, (-1, 1, -2, 1), mode='replicate'), (inputs,)))
        self.assertTrue(gradcheck(lambda x: F.pad(x, (-1, 1, -2, 1), mode='reflect'), (inputs,)))

        inputs = Variable(torch.randn(1, 2, 3, 4, 4), requires_grad=True)
        self.assertTrue(gradcheck(lambda x: F.pad(x, (1, 1, 1, 1, 1, 1), mode='replicate'), (inputs,)))
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def test_normalize(self):
        inputs = Variable(torch.randn(1, 3, 4, 4), requires_grad=True)
        self.assertTrue(gradcheck(lambda x: F.normalize(x, p=1, dim=-1), (inputs,)))
        self.assertTrue(gradcheck(lambda x: F.normalize(x, p=2, dim=-2), (inputs,)))
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def test_pairwise_distance(self):
        input1 = Variable(torch.randn(4, 4), requires_grad=True)
        input2 = Variable(torch.randn(4, 4), requires_grad=True)
        self.assertTrue(gradcheck(lambda x, y: F.pairwise_distance(x, y), (input1, input2)))
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def test_triplet_margin_loss(self):
        input1 = Variable(torch.randn(4, 4), requires_grad=True)
        input2 = Variable(torch.randn(4, 4), requires_grad=True)
        input3 = Variable(torch.randn(4, 4), requires_grad=True)
        self.assertTrue(gradcheck(lambda x1, x2, x3: F.triplet_margin_loss(
            x1, x2, x3), (input1, input2, input3)))
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def test_triplet_margin_swap_loss(self):
        input1 = Variable(torch.randn(4, 4), requires_grad=True)
        input2 = Variable(torch.randn(4, 4), requires_grad=True)
        input3 = Variable(torch.randn(4, 4), requires_grad=True)
        self.assertTrue(gradcheck(lambda x1, x2, x3: F.triplet_margin_loss(
            x1, x2, x3, swap=True), (input1, input2, input3)))
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def test_bilinear(self):
        module = nn.Bilinear(10, 10, 8)
        module2 = legacy.Bilinear(10, 10, 8)

        module2.weight.copy_(module.weight.data)
        module2.bias.copy_(module.bias.data)

        input1 = torch.randn(4, 10)
        input2 = torch.randn(4, 10)

        output = module(Variable(input1), Variable(input2))
        output2 = module2.forward([input1, input2])

        input1_1 = Variable(input1, requires_grad=True)
        input2_1 = Variable(input2, requires_grad=True)

        output3 = module(input1_1, input2_1)
        grad = torch.randn(*output3.size())
        output3.backward(grad)
        gi1 = input1_1.grad.data.clone()
        gi2 = input2_1.grad.data.clone()

        self.assertEqual(output.data, output2)
        # TODO: this assertion is incorrect, fix needed
        # self.assertEqual([gi1, gi2], output3)

        self.assertTrue(gradcheck(lambda x1, x2: F.bilinear(x1, x2, module.weight, module.bias), (input1_1, input2_1)))
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_pad(self):
        inputs = Variable(torch.randn(1, 3, 4, 4), requires_grad=True)
        _assertGradAndGradgradChecks(self, lambda x: F.pad(x, (1, 1, 1, 1)), (inputs,))
        _assertGradAndGradgradChecks(self, lambda x: F.pad(x, (-1, 1, -2, 1)), (inputs,))
        _assertGradAndGradgradChecks(self, lambda x: F.pad(x, (-1, 1, -2, 1), value=2), (inputs,))
        self.assertTrue(gradcheck(lambda x: F.pad(x, (-1, 1, -2, 1), mode='replicate'), (inputs,)))
        self.assertTrue(gradcheck(lambda x: F.pad(x, (-1, 1, -2, 1), mode='reflect'), (inputs,)))

        inputs = Variable(torch.randn(1, 2, 3, 4, 4), requires_grad=True)
        self.assertTrue(gradcheck(lambda x: F.pad(x, (1, 1, 1, 1, 1, 1), mode='replicate'), (inputs,)))
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_normalize(self):
        inputs = Variable(torch.randn(1, 3, 4, 4), requires_grad=True)
        self.assertTrue(gradcheck(lambda x: F.normalize(x, p=1, dim=-1), (inputs,)))
        self.assertTrue(gradcheck(lambda x: F.normalize(x, p=2, dim=-2), (inputs,)))
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_pairwise_distance(self):
        input1 = Variable(torch.randn(4, 4), requires_grad=True)
        input2 = Variable(torch.randn(4, 4), requires_grad=True)
        self.assertTrue(gradcheck(lambda x, y: F.pairwise_distance(x, y), (input1, input2)))
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_triplet_margin_loss(self):
        input1 = Variable(torch.randn(4, 4), requires_grad=True)
        input2 = Variable(torch.randn(4, 4), requires_grad=True)
        input3 = Variable(torch.randn(4, 4), requires_grad=True)
        self.assertTrue(gradcheck(lambda x1, x2, x3: F.triplet_margin_loss(
            x1, x2, x3), (input1, input2, input3)))
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_cosine_similarity(self):
        input1 = Variable(torch.randn(4, 4), requires_grad=True)
        input2 = Variable(torch.randn(4, 4), requires_grad=True)
        self.assertTrue(gradcheck(lambda x, y: F.cosine_similarity(x, y), (input1, input2)))

        input1 = Variable(torch.randn(4, 5, 6), requires_grad=True)
        input2 = Variable(torch.randn(4, 5, 6), requires_grad=True)
        self.assertTrue(gradcheck(lambda x, y: F.cosine_similarity(x, y, dim=0), (input1, input2)))
        self.assertTrue(gradcheck(lambda x, y: F.cosine_similarity(x, y, dim=-1), (input1, input2)))
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_affine_grid(self):
        # test known input on CPU
        input = Variable(torch.arange(1, 7).view(1, 2, 3))
        output = F.affine_grid(input, torch.Size([1, 1, 2, 2]))
        groundtruth = torch.Tensor(
            [[[0, -3], [2, 5]], [[4, 7], [6, 15]]]).view(1, 2, 2, 2)
        self.assertEqual(output.data, groundtruth)

        # do gradcheck
        N = random.randint(1, 8)
        C = random.randint(1, 8)
        H = random.randint(1, 8)
        W = random.randint(1, 8)
        sz = torch.Size([N, C, H, W])
        inp = Variable(torch.randn(N, 2, 3), requires_grad=True)
        self.assertTrue(gradcheck(lambda inp: F.affine_grid(inp, sz), (inp,)))

        # test CPU against CUDA
        if TEST_CUDNN:
            input_cpu = Variable(torch.randn(N, 2, 3), requires_grad=True)
            out_cpu = F.affine_grid(input_cpu, sz)
            gradients = torch.randn(out_cpu.size())
            out_cpu.backward(gradients)
            input_gpu = Variable(input_cpu.data.cuda(), requires_grad=True)
            out_cuda = F.affine_grid(input_gpu, sz)
            out_cuda.backward(gradients.cuda())
            self.assertEqual(out_cpu, out_cuda)
            self.assertEqual(input_cpu.grad, input_gpu.grad)
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_upsamplingNearest1d(self):
        m = nn.Upsample(size=4, mode='nearest')
        in_t = torch.ones(1, 1, 2)
        out_t = m(Variable(in_t))
        self.assertEqual(torch.ones(1, 1, 4), out_t.data)

        input = Variable(torch.randn(1, 1, 2), requires_grad=True)
        self.assertTrue(gradcheck(lambda x: F.upsample(x, 4, mode='nearest'), (input,)))
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_upsamplingLinear1d(self):
        m = nn.Upsample(size=4, mode='linear')
        in_t = torch.ones(1, 1, 2)
        out_t = m(Variable(in_t))
        self.assertEqual(torch.ones(1, 1, 4), out_t.data)

        input = Variable(torch.randn(1, 1, 2), requires_grad=True)
        self.assertTrue(gradcheck(lambda x: F.upsample(x, 4, mode='linear'), (input,)))
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_upsamplingNearest2d(self):
        m = nn.Upsample(size=4, mode='nearest')
        in_t = torch.ones(1, 1, 2, 2)
        out_t = m(Variable(in_t))
        self.assertEqual(torch.ones(1, 1, 4, 4), out_t.data)

        input = Variable(torch.randn(1, 1, 2, 2), requires_grad=True)
        self.assertTrue(gradcheck(lambda x: F.upsample(x, 4, mode='nearest'), (input,)))
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_upsamplingNearest3d(self):
        m = nn.Upsample(size=4, mode='nearest')
        in_t = torch.ones(1, 1, 2, 2, 2)
        out_t = m(Variable(in_t))
        self.assertEqual(torch.ones(1, 1, 4, 4, 4), out_t.data)

        input = Variable(torch.randn(1, 1, 2, 2, 2), requires_grad=True)
        self.assertTrue(gradcheck(lambda x: F.upsample(x, 4, mode='nearest'), (input,)))
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_upsamplingTrilinear3d(self):
        m = nn.Upsample(size=4, mode='trilinear')
        in_t = torch.ones(1, 1, 2, 2, 2)
        out_t = m(Variable(in_t))
        self.assertEqual(torch.ones(1, 1, 4, 4, 4), out_t.data)

        input = Variable(torch.randn(1, 1, 2, 2, 2), requires_grad=True)
        self.assertTrue(gradcheck(lambda x: F.upsample(x, 4, mode='trilinear'), (input,)))
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def run_grad_and_gradgrad_checks(test_case, test_name, apply_method, output_variable, input_variables):
    test_case.assertTrue(gradcheck(apply_method, input_variables, eps=1e-6, atol=PRECISION))

    grad_y = generate_gradoutput(output_variable, non_contiguous=True)
    gradgradcheck_precision_override = gradgradcheck_method_precision_override(test_name)
    if gradgradcheck_precision_override is not None:
        atol = gradgradcheck_precision_override['atol']
        rtol = gradgradcheck_precision_override['rtol']
        test_case.assertTrue(gradgradcheck(apply_method, input_variables, grad_y, atol=atol, rtol=rtol))
    else:
        test_case.assertTrue(gradgradcheck(apply_method, input_variables, grad_y,))
项目:PyTorch-Encoding    作者:zhanghang1989    | 项目源码 | 文件源码
def test_aggregate():
    B,N,K,D = 2,3,4,5
    A = Variable(torch.cuda.DoubleTensor(B,N,K).uniform_(-0.5,0.5), 
        requires_grad=True)
    X = Variable(torch.cuda.DoubleTensor(B,N,D).uniform_(-0.5,0.5), 
        requires_grad=True)
    C = Variable(torch.cuda.DoubleTensor(K,D).uniform_(-0.5,0.5), 
        requires_grad=True)
    input = (A, X, C)
    test = gradcheck(encoding.functions.aggregate, input, eps=1e-6, atol=1e-4)
    print('Testing aggregate(): {}'.format(test))
项目:PyTorch-Encoding    作者:zhanghang1989    | 项目源码 | 文件源码
def test_scaledL2():
    B,N,K,D = 2,3,4,5
    X = Variable(torch.cuda.DoubleTensor(B,N,D).uniform_(-0.5,0.5), 
        requires_grad=True)
    C = Variable(torch.cuda.DoubleTensor(K,D).uniform_(-0.5,0.5), 
        requires_grad=True)
    S = Variable(torch.cuda.DoubleTensor(K).uniform_(-0.5,0.5), 
        requires_grad=True)
    input = (X, C, S)
    test = gradcheck(encoding.functions.scaledL2, input, eps=1e-6, atol=1e-4)
    print('Testing scaledL2(): {}'.format(test))
项目:PyTorch-Encoding    作者:zhanghang1989    | 项目源码 | 文件源码
def test_encoding():
    B,C,H,W,K = 2,3,4,5,6
    X = Variable(torch.cuda.DoubleTensor(B,C,H,W).uniform_(-0.5,0.5), 
        requires_grad=True)
    input = (X,)
    layer = encoding.nn.Encoding(C,K).double().cuda()
    test = gradcheck(layer, input, eps=1e-6, atol=1e-4)
    print('Testing encoding(): {}'.format(test))
项目:PyTorch-Encoding    作者:zhanghang1989    | 项目源码 | 文件源码
def test_sum_square():
    B,C,H,W = 2,3,4,5
    X = Variable(torch.cuda.DoubleTensor(B,C,H,W).uniform_(-0.5,0.5), 
        requires_grad=True)
    input = (X,)
    test = gradcheck(encoding.functions.sum_square, input, eps=1e-6, atol=1e-4)
    print('Testing sum_square(): {}'.format(test))
项目:PyTorch-Encoding    作者:zhanghang1989    | 项目源码 | 文件源码
def test_dilated_avgpool():
    X = Variable(torch.cuda.FloatTensor(1,3,75,75).uniform_(-0.5,0.5))
    input = (X,)
    layer = encoding.nn.DilatedAvgPool2d(kernel_size=2, stride=1, padding=0, dilation=2)
    test = gradcheck(layer, input, eps=1e-6, atol=1e-4)
    print('Testing dilatedavgpool2d(): {}'.format(test))
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_pad(self):
        inputs = Variable(torch.randn(1, 3, 4, 4), requires_grad=True)
        _assertGradAndGradgradChecks(self, lambda x: F.pad(x, (1, 1, 1, 1)), (inputs,))
        _assertGradAndGradgradChecks(self, lambda x: F.pad(x, (-1, 1, -2, 1)), (inputs,))
        _assertGradAndGradgradChecks(self, lambda x: F.pad(x, (-1, 1, -2, 1), value=2), (inputs,))
        self.assertTrue(gradcheck(lambda x: F.pad(x, (-1, 1, -2, 1), mode='replicate'), (inputs,)))
        self.assertTrue(gradcheck(lambda x: F.pad(x, (-1, 1, -2, 1), mode='reflect'), (inputs,)))

        inputs = Variable(torch.randn(1, 2, 3, 4, 4), requires_grad=True)
        self.assertTrue(gradcheck(lambda x: F.pad(x, (1, 1, 1, 1, 1, 1), mode='replicate'), (inputs,)))
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_normalize(self):
        inputs = Variable(torch.randn(1, 3, 4, 4), requires_grad=True)
        self.assertTrue(gradcheck(lambda x: F.normalize(x, p=1, dim=-1), (inputs,)))
        self.assertTrue(gradcheck(lambda x: F.normalize(x, p=2, dim=-2), (inputs,)))
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_elu_inplace_view(self):
        v = Variable(torch.Tensor([1.0, -1.0, 1.0, -1.0]), requires_grad=True)

        def func(root):
            x = root.clone()
            view = x.narrow(0, 1, 2)
            res = F.elu(view, inplace=True)
            self.assertIs(res, view)
            return x

        gradcheck(func, [v])
        gradgradcheck(func, [v])
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_relu_inplace_view(self):
        v = Variable(torch.Tensor([1.0, -1.0, 1.0, -1.0]), requires_grad=True)

        def func(root):
            x = root.clone()
            view = x.narrow(0, 1, 2)
            res = F.relu(view, inplace=True)
            self.assertIs(res, view)
            return x

        gradcheck(func, [v])
        gradgradcheck(func, [v])
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_hardtanh_inplace_gradgrad(self):
        v = Variable(torch.randn(8), requires_grad=True)

        def func(root):
            x = root.clone()
            return F.hardtanh(x, inplace=True)

        gradcheck(func, [v])
        gradgradcheck(func, [v])
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_pairwise_distance(self):
        input1 = Variable(torch.randn(4, 4), requires_grad=True)
        input2 = Variable(torch.randn(4, 4), requires_grad=True)
        self.assertTrue(gradcheck(lambda x, y: F.pairwise_distance(x, y), (input1, input2)))
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_triplet_margin_loss(self):
        input1 = Variable(torch.randn(4, 4), requires_grad=True)
        input2 = Variable(torch.randn(4, 4), requires_grad=True)
        input3 = Variable(torch.randn(4, 4), requires_grad=True)
        self.assertTrue(gradcheck(lambda x1, x2, x3: F.triplet_margin_loss(
            x1, x2, x3), (input1, input2, input3)))
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_triplet_margin_swap_loss(self):
        input1 = Variable(torch.randn(4, 4), requires_grad=True)
        input2 = Variable(torch.randn(4, 4), requires_grad=True)
        input3 = Variable(torch.randn(4, 4), requires_grad=True)
        self.assertTrue(gradcheck(lambda x1, x2, x3: F.triplet_margin_loss(
            x1, x2, x3, swap=True), (input1, input2, input3)))
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_upsamplingNearest1d(self):
        m = nn.Upsample(size=4, mode='nearest')
        in_t = torch.ones(1, 1, 2)
        out_t = m(Variable(in_t))
        self.assertEqual(torch.ones(1, 1, 4), out_t.data)

        input = Variable(torch.randn(1, 1, 2), requires_grad=True)
        gradcheck(lambda x: F.upsample(x, 4, mode='nearest'), [input])
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_upsamplingLinear1d(self):
        m = nn.Upsample(size=4, mode='linear')
        in_t = torch.ones(1, 1, 2)
        out_t = m(Variable(in_t))
        self.assertEqual(torch.ones(1, 1, 4), out_t.data)

        input = Variable(torch.randn(1, 1, 2), requires_grad=True)
        gradcheck(lambda x: F.upsample(x, 4, mode='linear'), (input,))
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_upsamplingNearest2d(self):
        m = nn.Upsample(size=4, mode='nearest')
        in_t = torch.ones(1, 1, 2, 2)
        out_t = m(Variable(in_t))
        self.assertEqual(torch.ones(1, 1, 4, 4), out_t.data)

        input = Variable(torch.randn(1, 1, 2, 2), requires_grad=True)
        self.assertEqual(
            F.upsample(input, 4, mode='nearest'),
            F.upsample(input, scale_factor=2, mode='nearest'))
        gradcheck(lambda x: F.upsample(x, 4, mode='nearest'), [input])
        gradgradcheck(lambda x: F.upsample(x, 4, mode='nearest'), [input])
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_upsamplingBilinear2d(self):
        m = nn.Upsample(size=4, mode='bilinear')
        in_t = torch.ones(1, 1, 2, 2)
        out_t = m(Variable(in_t))
        self.assertEqual(torch.ones(1, 1, 4, 4), out_t.data)

        input = Variable(torch.randn(1, 1, 2, 2), requires_grad=True)
        gradcheck(lambda x: F.upsample(x, 4, mode='bilinear'), [input])
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_upsamplingNearest3d(self):
        m = nn.Upsample(size=4, mode='nearest')
        in_t = torch.ones(1, 1, 2, 2, 2)
        out_t = m(Variable(in_t))
        self.assertEqual(torch.ones(1, 1, 4, 4, 4), out_t.data)

        input = Variable(torch.randn(1, 1, 2, 2, 2), requires_grad=True)
        gradcheck(lambda x: F.upsample(x, 4, mode='nearest'), [input])