Python torch 模块,randn() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用torch.randn()

项目:tensorboard    作者:dmlc    | 项目源码 | 文件源码
def make_sprite(label_img, save_path):
    import math
    import torch
    import torchvision
    # this ensures the sprite image has correct dimension as described in 
    # https://www.tensorflow.org/get_started/embedding_viz
    nrow = int(math.ceil((label_img.size(0)) ** 0.5))

    # augment images so that #images equals nrow*nrow
    label_img = torch.cat((label_img, torch.randn(nrow ** 2 - label_img.size(0), *label_img.size()[1:]) * 255), 0)

    # Dirty fix: no pixel are appended by make_grid call in save_image (https://github.com/pytorch/vision/issues/206)
    xx = torchvision.utils.make_grid(torch.Tensor(1, 3, 32, 32), padding=0)
    if xx.size(2) == 33:
        sprite = torchvision.utils.make_grid(label_img, nrow=nrow, padding=0)
        sprite = sprite[:, 1:, 1:]
        torchvision.utils.save_image(sprite, os.path.join(save_path, 'sprite.png'))
    else:
        torchvision.utils.save_image(label_img, os.path.join(save_path, 'sprite.png'), nrow=nrow, padding=0)
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def test_parallel_apply(self):
        l1 = nn.Linear(10, 5).float().cuda(0)
        l2 = nn.Linear(10, 5).float().cuda(1)
        i1 = Variable(torch.randn(2, 10).float().cuda(0))
        i2 = Variable(torch.randn(2, 10).float().cuda(1))
        expected1 = l1(i1).data
        expected2 = l2(i2).data
        inputs = (i1, i2)
        modules = (l1, l2)
        expected_outputs = (expected1, expected2)
        outputs = dp.parallel_apply(modules, inputs)
        for out, expected in zip(outputs, expected_outputs):
            self.assertEqual(out.data, expected)

        inputs = (i1, Variable(i2.data.new()))
        expected_outputs = (expected1, expected2.new())
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def _test_preserve_sharing(self):
        def do_test():
            x = torch.randn(5, 5)
            data = [x.storage(), x.storage()[1:4], x, x[2], x[:,1]]
            q = mp.Queue()
            q.put(data)
            new_data = q.get()
            self.assertEqual(new_data, data, 0)
            storage_cdata = data[0]._cdata
            self.assertEqual(new_data[0]._cdata, storage_cdata)
            for t in new_data[2:]:
                self.assertEqual(t.storage()._cdata, storage_cdata)
            # TODO: enable after fixing #46
            # new_data[0].fill_(10)
            # self.assertEqual(new_data[1], new_data[0][1:4], 0)

        with leak_checker(self):
            do_test()
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def test_serialization(self):
        x = torch.randn(5, 5).cuda()
        y = torch.IntTensor(2, 5).fill_(0).cuda()
        q = [x, y, x, y.storage()]
        with tempfile.NamedTemporaryFile() as f:
            torch.save(q, f)
            f.seek(0)
            q_copy = torch.load(f)
        self.assertEqual(q_copy, q, 0)
        q_copy[0].fill_(5)
        self.assertEqual(q_copy[0], q_copy[2], 0)
        self.assertTrue(isinstance(q_copy[0], torch.cuda.DoubleTensor))
        self.assertTrue(isinstance(q_copy[1], torch.cuda.IntTensor))
        self.assertTrue(isinstance(q_copy[2], torch.cuda.DoubleTensor))
        self.assertTrue(isinstance(q_copy[3], torch.cuda.IntStorage))
        q_copy[1].fill_(10)
        self.assertTrue(q_copy[3], torch.cuda.IntStorage(10).fill_(10))
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def _test_gather(self, dim):
        if torch.cuda.device_count() < 2:
            raise unittest.SkipTest("only one GPU detected")
        x = torch.randn(2, 5).cuda(0)
        y = torch.randn(2, 5).cuda(1)
        result = comm.gather((x, y), dim)

        expected_size = list(x.size())
        expected_size[dim] += y.size(dim)
        expected_size = torch.Size(expected_size)
        self.assertEqual(result.get_device(), 0)
        self.assertEqual(result.size(), expected_size)

        index = [slice(None, None), slice(None, None)]
        index[dim] = slice(0, x.size(dim))
        self.assertEqual(result[tuple(index)], x)
        index[dim] = slice(x.size(dim), x.size(dim) + y.size(dim))
        self.assertEqual(result[tuple(index)], y)
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def test_Copy(self):
        input = torch.randn(3,4).double()
        c = nn.Copy(torch.DoubleTensor, torch.FloatTensor)
        output = c.forward(input)
        self.assertEqual(torch.typename(output), 'torch.FloatTensor')
        self.assertEqual(output, input.float(), 1e-6)
        gradInput = c.backward(input, output.fill_(1))
        self.assertEqual(torch.typename(gradInput), 'torch.DoubleTensor')
        self.assertEqual(gradInput, output.double(), 1e-6)
        c.dontCast = True
        c.double()
        self.assertEqual(torch.typename(output), 'torch.FloatTensor')

        # Check that these don't raise errors
        c.__repr__()
        str(c)
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def test_Parallel(self):
        input = torch.randn(3, 4, 5)
        m = nn.Parallel(0, 2)
        m.add(nn.View(4, 5, 1))
        m.add(nn.View(4, 5, 1))
        m.add(nn.View(4, 5, 1))

        # Check that these don't raise errors
        m.__repr__()
        str(m)

        output = m.forward(input)
        output2 = input.transpose(0, 2).transpose(0, 1)
        self.assertEqual(output2, output)

        gradInput = m.backward(input, output2)
        self.assertEqual(gradInput, input)
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def test_ParallelTable(self):
        input = torch.randn(3, 4, 5)
        p = nn.ParallelTable()
        p.add(nn.View(4,5,1))
        p.add(nn.View(4,5,1))
        p.add(nn.View(4,5,1))
        m = nn.Sequential()
        m.add(nn.SplitTable(0))
        m.add(p)
        m.add(nn.JoinTable(2))

        # Check that these don't raise errors
        p.__repr__()
        str(p)

        output = m.forward(input)
        output2 = input.transpose(0,2).transpose(0,1)
        self.assertEqual(output2, output)

        gradInput = m.backward(input, output2)
        self.assertEqual(gradInput, input)
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def test_MaskedSelect(self):
        input = torch.randn(4, 5)
        mask = torch.ByteTensor(4, 5).bernoulli_()
        module = nn.MaskedSelect()
        out = module.forward([input, mask])
        self.assertEqual(input.masked_select(mask), out)

        gradOut = torch.Tensor((20, 80))
        input = torch.Tensor(((10, 20), (30, 40)))
        inTarget = torch.Tensor(((20, 0), (0, 80)))
        mask = torch.ByteTensor(((1, 0), (0, 1)))
        module = nn.MaskedSelect()
        module.forward([input, mask])
        gradIn = module.backward([input, mask], gradOut)
        self.assertEqual(inTarget, gradIn[0])

        # Check that these don't raise errors
        module.__repr__()
        str(module)
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def _testMath(self, torchfn, mathfn):
        size = (10, 5)
        # contiguous
        m1 = torch.randn(*size)
        res1 = torchfn(m1[4])
        res2 = res1.clone().zero_()
        for i, v in enumerate(m1[4]):
            res2[i] = mathfn(v)
        self.assertEqual(res1, res2)

        # non-contiguous
        m1 = torch.randn(*size)
        res1 = torchfn(m1[:,4])
        res2 = res1.clone().zero_()
        for i, v in enumerate(m1[:,4]):
            res2[i] = mathfn(v)
        self.assertEqual(res1, res2)
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def test_csub(self):
        # with a tensor
        a = torch.randn(100,90)
        b = a.clone().normal_()

        res_add = torch.add(a, -1, b)
        res_csub = a.clone()
        res_csub.sub_(b)
        self.assertEqual(res_add, res_csub)

        # with a scalar
        a = torch.randn(100,100)

        scalar = 123.5
        res_add = torch.add(a, -scalar)
        res_csub = a.clone()
        res_csub.sub_(scalar)
        self.assertEqual(res_add, res_csub)
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def _test_cop(self, torchfn, mathfn):
        def reference_implementation(res2):
            for i, j in iter_indices(sm1):
                idx1d = i * sm1.size(0) + j
                res2[i,j] = mathfn(sm1[i,j], sm2[idx1d])
            return res2

        # contiguous
        m1 = torch.randn(10, 10, 10)
        m2 = torch.randn(10, 10 * 10)
        sm1 = m1[4]
        sm2 = m2[4]
        res1 = torchfn(sm1, sm2)
        res2 = reference_implementation(res1.clone())
        self.assertEqual(res1, res2)

        # non-contiguous
        m1 = torch.randn(10, 10, 10)
        m2 = torch.randn(10 * 10, 10 * 10)
        sm1 = m1[:,4]
        sm2 = m2[:,4]
        res1 = torchfn(sm1, sm2)
        res2 = reference_implementation(res1.clone())
        self.assertEqual(res1, res2)
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def test_inverse(self):
        M = torch.randn(5,5)
        MI = torch.inverse(M)
        E = torch.eye(5)
        self.assertFalse(MI.is_contiguous(), 'MI is contiguous')
        self.assertEqual(E, torch.mm(M, MI), 1e-8, 'inverse value')
        self.assertEqual(E, torch.mm(MI, M), 1e-8, 'inverse value')

        MII = torch.Tensor(5, 5)
        torch.inverse(MII, M)
        self.assertFalse(MII.is_contiguous(), 'MII is contiguous')
        self.assertEqual(MII, MI, 0, 'inverse value in-place')
        # second call, now that MII is transposed
        torch.inverse(MII, M)
        self.assertFalse(MII.is_contiguous(), 'MII is contiguous')
        self.assertEqual(MII, MI, 0, 'inverse value in-place')
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def test_index_add(self):
        num_copy, num_dest = 3, 3
        dest = torch.randn(num_dest, 4, 5)
        src = torch.randn(num_copy, 4, 5)
        idx = torch.randperm(num_dest).narrow(0, 0, num_copy).long()
        dest2 = dest.clone()
        dest.index_add_(0, idx, src)
        for i in range(idx.size(0)):
            dest2[idx[i]].add_(src[i])
        self.assertEqual(dest, dest2)

        dest = torch.randn(num_dest)
        src = torch.randn(num_copy)
        idx = torch.randperm(num_dest).narrow(0, 0, num_copy).long()
        dest2 = dest.clone()
        dest.index_add_(0, idx, src)
        for i in range(idx.size(0)):
            dest2[idx[i]] = dest2[idx[i]] + src[i]
        self.assertEqual(dest, dest2)

    # Fill idx with valid indices.
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def test_masked_copy(self):
        num_copy, num_dest = 3, 10
        dest = torch.randn(num_dest)
        src = torch.randn(num_copy)
        mask = torch.ByteTensor((0, 0, 0, 0, 1, 0, 1, 0, 1, 0))
        dest2 = dest.clone()
        dest.masked_copy_(mask, src)
        j = 0
        for i in range(num_dest):
            if mask[i]:
                dest2[i] = src[j]
                j += 1
        self.assertEqual(dest, dest2, 0)

        # make source bigger than number of 1s in mask
        src = torch.randn(num_dest)
        dest.masked_copy_(mask, src)

        # make src smaller. this should fail
        src = torch.randn(num_copy - 1)
        with self.assertRaises(RuntimeError):
            dest.masked_copy_(mask, src)
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def test_serialization(self):
        a = [torch.randn(5, 5).float() for i in range(2)]
        b = [a[i % 2] for i in range(4)]
        b += [a[0].storage()]
        b += [a[0].storage()[1:4]]
        for use_name in (False, True):
            with tempfile.NamedTemporaryFile() as f:
                handle = f if not use_name else f.name
                torch.save(b, handle)
                f.seek(0)
                c = torch.load(handle)
            self.assertEqual(b, c, 0)
            self.assertTrue(isinstance(c[0], torch.FloatTensor))
            self.assertTrue(isinstance(c[1], torch.FloatTensor))
            self.assertTrue(isinstance(c[2], torch.FloatTensor))
            self.assertTrue(isinstance(c[3], torch.FloatTensor))
            self.assertTrue(isinstance(c[4], torch.FloatStorage))
            c[0].fill_(10)
            self.assertEqual(c[0], c[2], 0)
            self.assertEqual(c[4], torch.FloatStorage(25).fill_(10), 0)
            c[1].fill_(20)
            self.assertEqual(c[1], c[3], 0)
            self.assertEqual(c[4], c[5][1:4], 0)
项目:tnt    作者:pytorch    | 项目源码 | 文件源码
def testTensorDataset(self):
        # dict input
        data = {
            # 'input': torch.arange(0,8),
            'input': np.arange(0, 8),
            'target': np.arange(0, 8),
        }
        d = dataset.TensorDataset(data)
        self.assertEqual(len(d), 8)
        self.assertEqual(d[2], {'input': 2, 'target': 2})

        # tensor input
        a = torch.randn(8)
        d = dataset.TensorDataset(a)
        self.assertEqual(len(a), len(d))
        self.assertEqual(a[1], d[1])

        # list of tensors input
        d = dataset.TensorDataset([a])
        self.assertEqual(len(a), len(d))
        self.assertEqual(a[1], d[1][0])
项目:allennlp    作者:allenai    | 项目源码 | 文件源码
def test_wrapper_works_when_passed_state_with_zero_length_sequences(self):
        lstm = LSTM(bidirectional=True, num_layers=3, input_size=3, hidden_size=7, batch_first=True)
        encoder = PytorchSeq2SeqWrapper(lstm)
        tensor = torch.rand([5, 7, 3])
        mask = torch.ones(5, 7)
        mask[0, 3:] = 0
        mask[1, 4:] = 0
        mask[2, 0:] = 0
        mask[3, 6:] = 0

        # Initial states are of shape (num_layers * num_directions, batch_size, hidden_dim)
        initial_states = (Variable(torch.randn(6, 5, 7)),
                          Variable(torch.randn(6, 5, 7)))

        input_tensor = Variable(tensor)
        mask = Variable(mask)
        _ = encoder(input_tensor, mask, initial_states)
项目:pyro    作者:uber    | 项目源码 | 文件源码
def guide(data):
    w_mu = Variable(torch.randn(p, 1).type_as(data.data), requires_grad=True)
    w_log_sig = Variable((-3.0 * torch.ones(p, 1) + 0.05 * torch.randn(p, 1)).type_as(data.data), requires_grad=True)
    b_mu = Variable(torch.randn(1).type_as(data.data), requires_grad=True)
    b_log_sig = Variable((-3.0 * torch.ones(1) + 0.05 * torch.randn(1)).type_as(data.data), requires_grad=True)
    # register learnable params in the param store
    mw_param = pyro.param("guide_mean_weight", w_mu)
    sw_param = softplus(pyro.param("guide_log_sigma_weight", w_log_sig))
    mb_param = pyro.param("guide_mean_bias", b_mu)
    sb_param = softplus(pyro.param("guide_log_sigma_bias", b_log_sig))
    # gaussian guide distributions for w and b
    w_dist = Normal(mw_param, sw_param)
    b_dist = Normal(mb_param, sb_param)
    dists = {'linear.weight': w_dist, 'linear.bias': b_dist}
    # overloading the parameters in the module with random samples from the guide distributions
    lifted_module = pyro.random_module("module", regression_model, dists)
    # sample a regressor
    return lifted_module()


# instantiate optim and inference objects
项目:pyro    作者:uber    | 项目源码 | 文件源码
def _test_jacobian(self, input_dim, hidden_dim, multiplier):
        jacobian = torch.zeros(input_dim, input_dim)
        arn = AutoRegressiveNN(input_dim, hidden_dim, multiplier)

        def nonzero(x):
            return torch.sign(torch.abs(x))

        for output_index in range(multiplier):
            for j in range(input_dim):
                for k in range(input_dim):
                    x = Variable(torch.randn(1, input_dim))
                    epsilon_vector = torch.zeros(1, input_dim)
                    epsilon_vector[0, j] = self.epsilon
                    delta = (arn(x + Variable(epsilon_vector)) - arn(x)) / self.epsilon
                    jacobian[j, k] = float(delta[0, k + output_index * input_dim].data.cpu().numpy()[0])

            permutation = arn.get_permutation()
            permuted_jacobian = jacobian.clone()
            for j in range(input_dim):
                for k in range(input_dim):
                    permuted_jacobian[j, k] = jacobian[permutation[j], permutation[k]]

            lower_sum = torch.sum(torch.tril(nonzero(permuted_jacobian), diagonal=0))
            self.assertTrue(lower_sum == float(0.0))
项目:pyro    作者:uber    | 项目源码 | 文件源码
def test_batch_dim(batch_dim):

    data = Variable(torch.randn(4, 5, 7))

    def local_model(ixs, _xs):
        xs = _xs.view(-1, _xs.size(2))
        return pyro.sample("xs", dist.normal,
                           xs, Variable(torch.ones(xs.size())))

    def model():
        return pyro.map_data("md", data, local_model,
                             batch_size=1, batch_dim=batch_dim)

    tr = poutine.trace(model).get_trace()
    assert tr.nodes["xs"]["value"].size(0) == data.size(1 - batch_dim)
    assert tr.nodes["xs"]["value"].size(1) == data.size(2)
项目:pyro    作者:uber    | 项目源码 | 文件源码
def setUp(self):
        # normal-normal; known covariance
        self.lam0 = Variable(torch.Tensor([0.1, 0.1]))   # precision of prior
        self.mu0 = Variable(torch.Tensor([0.0, 0.5]))   # prior mean
        # known precision of observation noise
        self.lam = Variable(torch.Tensor([6.0, 4.0]))
        self.n_outer = 3
        self.n_inner = 3
        self.n_data = Variable(torch.Tensor([self.n_outer * self.n_inner]))
        self.data = []
        self.sum_data = ng_zeros(2)
        for _out in range(self.n_outer):
            data_in = []
            for _in in range(self.n_inner):
                data_in.append(Variable(torch.Tensor([-0.1, 0.3]) + torch.randn(2) / torch.sqrt(self.lam.data)))
                self.sum_data += data_in[-1]
            self.data.append(data_in)
        self.analytic_lam_n = self.lam0 + self.n_data.expand_as(self.lam) * self.lam
        self.analytic_log_sig_n = -0.5 * torch.log(self.analytic_lam_n)
        self.analytic_mu_n = self.sum_data * (self.lam / self.analytic_lam_n) +\
            self.mu0 * (self.lam0 / self.analytic_lam_n)
        self.verbose = True

    # this tests rao-blackwellization in elbo for nested list map_datas
项目:pytorch-tutorial    作者:yunjey    | 项目源码 | 文件源码
def sample(self):

        # Load trained parameters 
        g_path = os.path.join(self.model_path, 'generator-%d.pkl' %(self.num_epochs))
        d_path = os.path.join(self.model_path, 'discriminator-%d.pkl' %(self.num_epochs))
        self.generator.load_state_dict(torch.load(g_path))
        self.discriminator.load_state_dict(torch.load(d_path))
        self.generator.eval()
        self.discriminator.eval()

        # Sample the images
        noise = self.to_variable(torch.randn(self.sample_size, self.z_dim))
        fake_images = self.generator(noise)
        sample_path = os.path.join(self.sample_path, 'fake_samples-final.png')
        torchvision.utils.save_image(self.denorm(fake_images.data), sample_path, nrow=12)

        print("Saved sampled images to '%s'" %sample_path)
项目:open-reid    作者:Cysu    | 项目源码 | 文件源码
def test_forward(self):
        import torch
        from torch.autograd import Variable
        from reid.models.inception import InceptionNet

        # model = Inception(num_classes=5, num_features=256, dropout=0.5)
        # x = Variable(torch.randn(10, 3, 144, 56), requires_grad=False)
        # y = model(x)
        # self.assertEquals(y.size(), (10, 5))

        model = InceptionNet(num_features=8, norm=True, dropout=0)
        x = Variable(torch.randn(10, 3, 144, 56), requires_grad=False)
        y = model(x)
        self.assertEquals(y.size(), (10, 8))
        self.assertEquals(y.norm(2, 1).max(), 1)
        self.assertEquals(y.norm(2, 1).min(), 1)
项目:open-reid    作者:Cysu    | 项目源码 | 文件源码
def test_forward_backward(self):
        import torch
        import torch.nn.functional as F
        from torch.autograd import Variable
        from reid.loss import OIMLoss
        criterion = OIMLoss(3, 3, scalar=1.0, size_average=False)
        criterion.lut = torch.eye(3)
        x = Variable(torch.randn(3, 3), requires_grad=True)
        y = Variable(torch.range(0, 2).long())
        loss = criterion(x, y)
        loss.backward()
        probs = F.softmax(x)
        grads = probs.data - torch.eye(3)
        abs_diff = torch.abs(grads - x.grad.data)
        self.assertEquals(torch.log(probs).diag().sum(), -loss)
        self.assertTrue(torch.max(abs_diff) < 1e-6)
项目:pytorch-cifar    作者:kuangliu    | 项目源码 | 文件源码
def _make_layers(self, cfg):
        layers = []
        in_channels = 3
        for x in cfg:
            if x == 'M':
                layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
            else:
                layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
                           nn.BatchNorm2d(x),
                           nn.ReLU(inplace=True)]
                in_channels = x
        layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
        return nn.Sequential(*layers)

# net = VGG('VGG11')
# x = torch.randn(2,3,32,32)
# print(net(Variable(x)).size())
项目:pytorch-cifar    作者:kuangliu    | 项目源码 | 文件源码
def forward(self, x):
        out = self.pre_layers(x)
        out = self.a3(out)
        out = self.b3(out)
        out = self.maxpool(out)
        out = self.a4(out)
        out = self.b4(out)
        out = self.c4(out)
        out = self.d4(out)
        out = self.e4(out)
        out = self.maxpool(out)
        out = self.a5(out)
        out = self.b5(out)
        out = self.avgpool(out)
        out = out.view(out.size(0), -1)
        out = self.linear(out)
        return out

# net = GoogLeNet()
# x = torch.randn(1,3,32,32)
# y = net(Variable(x))
# print(y.size())
项目:RNN-for-tagging    作者:SendongZhao    | 项目源码 | 文件源码
def __init__(self, vocab_size, tag_to_ix, embedding_dim, hidden_dim):
        super(BiLSTM_CRF, self).__init__()
        self.embedding_dim = embedding_dim
        self.hidden_dim = hidden_dim
        self.vocab_size = vocab_size
        self.tag_to_ix = tag_to_ix
        self.tagset_size = len(tag_to_ix)

        self.word_embeds = nn.Embedding(vocab_size, embedding_dim, padding_idx = 0)
        self.lstm = nn.LSTM(embedding_dim, hidden_dim // 2,
                            num_layers=1, bidirectional=True)

        # Maps the output of the LSTM into tag space.
        self.hidden2tag = nn.Linear(hidden_dim, self.tagset_size)

        # Matrix of transition parameters.  Entry i,j is the score of
        # transitioning *to* i *from* j.
        self.transitions = nn.Parameter(
            torch.randn(self.tagset_size, self.tagset_size))

        self.hidden = self.init_hidden()
项目:R-net    作者:matthew-z    | 项目源码 | 文件源码
def __init__(self, question_size, passage_size, hidden_size, attn_size=None,
                 cell_type=nn.GRUCell, num_layers=1, dropout=0, residual=False, **kwargs):
        super().__init__()
        self.num_layers = num_layers
        if attn_size is None:
            attn_size = question_size

        # TODO: what is V_q? (section 3.4)
        v_q_size = question_size
        self.question_pooling = AttentionPooling(question_size,
                                                 v_q_size, attn_size=attn_size)
        self.passage_pooling = AttentionPooling(passage_size,
                                                question_size, attn_size=attn_size)

        self.V_q = nn.Parameter(torch.randn(1, 1, v_q_size), requires_grad=True)
        self.cell = StackedCell(question_size, question_size, num_layers=num_layers,
                                dropout=dropout, rnn_cell=cell_type, residual=residual, **kwargs)
项目:tensorboard-pytorch    作者:lanpa    | 项目源码 | 文件源码
def make_sprite(label_img, save_path):
    import math
    import torch
    import torchvision
    # this ensures the sprite image has correct dimension as described in 
    # https://www.tensorflow.org/get_started/embedding_viz
    nrow = int(math.ceil((label_img.size(0)) ** 0.5))

    # augment images so that #images equals nrow*nrow
    label_img = torch.cat((label_img, torch.randn(nrow ** 2 - label_img.size(0), *label_img.size()[1:]) * 255), 0)

    # Dirty fix: no pixel are appended by make_grid call in save_image (https://github.com/pytorch/vision/issues/206)
    xx = torchvision.utils.make_grid(torch.Tensor(1, 3, 32, 32), padding=0)
    if xx.size(2) == 33:
        sprite = torchvision.utils.make_grid(label_img, nrow=nrow, padding=0)
        sprite = sprite[:, 1:, 1:]
        torchvision.utils.save_image(sprite, os.path.join(save_path, 'sprite.png'))
    else:
        torchvision.utils.save_image(label_img, os.path.join(save_path, 'sprite.png'), nrow=nrow, padding=0)
项目:sketchnet    作者:jtoy    | 项目源码 | 文件源码
def _make_layers(self, cfg):
        layers = []
        in_channels = 3
        for x in cfg:
            if x == 'M':
                layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
            else:
                layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
                           nn.BatchNorm2d(x),
                           nn.ReLU(inplace=True)]
                in_channels = x
        layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
        return nn.Sequential(*layers)

# net = VGG('VGG11')
# x = torch.randn(2,3,32,32)
# print(net(Variable(x)).size())
项目:optnet    作者:locuslab    | 项目源码 | 文件源码
def __init__(self, nFeatures, args):
        super().__init__()

        nHidden, neq, nineq = 2*nFeatures-1,0,2*nFeatures-2
        assert(neq==0)

        # self.fc1 = nn.Linear(nFeatures, nHidden)
        self.M = Variable(torch.tril(torch.ones(nHidden, nHidden)).cuda())

        Q = 1e-8*torch.eye(nHidden)
        Q[:nFeatures,:nFeatures] = torch.eye(nFeatures)
        self.L = Variable(torch.potrf(Q))

        self.D = Parameter(0.3*torch.randn(nFeatures-1, nFeatures))
        # self.lam = Parameter(20.*torch.ones(1))
        self.h = Variable(torch.zeros(nineq))

        self.nFeatures = nFeatures
        self.nHidden = nHidden
        self.neq = neq
        self.nineq = nineq
        self.args = args
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_parallel_apply(self):
        l1 = nn.Linear(10, 5).float().cuda(0)
        l2 = nn.Linear(10, 5).float().cuda(1)
        i1 = Variable(torch.randn(2, 10).float().cuda(0))
        i2 = Variable(torch.randn(2, 10).float().cuda(1))
        expected1 = l1(i1).data
        expected2 = l2(i2).data
        inputs = ((i1,), (i2,))
        modules = (l1, l2)
        expected_outputs = (expected1, expected2)

        outputs = dp.parallel_apply(modules, inputs, None)
        for out, expected in zip(outputs, expected_outputs):
            self.assertEqual(out.data, expected)

        inputs = (i1, Variable(i2.data.new()))
        expected_outputs = (expected1, expected2.new())
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_data_parallel_nested_output(self):
        def fn(input):
            return [input, (input.sin(), input.cos(), [input.add(1)]), input]

        class Net(nn.Module):
            def forward(self, input):
                return fn(input)

        i = Variable(torch.randn(2, 2).float().cuda(1))
        gpus = range(torch.cuda.device_count())
        output = dp.data_parallel(Net(), i, gpus)
        self.assertEqual(output, fn(i))
        self.assertIsInstance(output[0], Variable)
        self.assertIsInstance(output[1], tuple)
        self.assertIsInstance(output[1][0], Variable)
        self.assertIsInstance(output[1][1], Variable)
        self.assertIsInstance(output[1][2], list)
        self.assertIsInstance(output[1][2][0], Variable)
        self.assertIsInstance(output[2], Variable)
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_Conv2d_large_workspace(self):
        # These sizes require huge cuDNN workspaces. Make sure we choose a
        # reasonable algorithm that does not run out of memory
        sizes = [
            (1, 256, 109, 175),
            (1, 256, 80, 128),
            (1, 256, 120, 192),
        ]
        dtype = torch.cuda.FloatTensor

        def run_test(benchmark):
            torch.backends.cudnn.benchmark = benchmark
            conv = torch.nn.Conv2d(256, 256, kernel_size=3, padding=1).type(dtype)
            for size in sizes:
                x = torch.randn(size).type(dtype)
                out = conv(Variable(x, requires_grad=True))
                out.backward(torch.ones(out.size()).type(dtype))

        b = torch.backends.cudnn.benchmark
        try:
            run_test(benchmark=False)
            run_test(benchmark=True)
        finally:
            torch.backends.cudnn.benchmark = b
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_Conv2d_groups_nobias(self):
        m = nn.Conv2d(4, 4, kernel_size=3, groups=2, bias=False)
        i = Variable(torch.randn(2, 4, 6, 6), requires_grad=True)
        output = m(i)
        grad_output = torch.randn(2, 4, 4, 4)
        output.backward(grad_output)

        m1 = nn.Conv2d(2, 2, kernel_size=3, bias=False)
        m1.weight.data.copy_(m.weight.data[:2])
        i1 = Variable(i.data[:, :2].contiguous(), requires_grad=True)
        output1 = m1(i1)
        output1.backward(grad_output[:, :2].contiguous())

        m2 = nn.Conv2d(2, 2, kernel_size=3, bias=False)
        m2.weight.data.copy_(m.weight.data[2:])
        i2 = Variable(i.data[:, 2:].contiguous(), requires_grad=True)
        output2 = m2(i2)
        output2.backward(grad_output[:, 2:].contiguous())

        self.assertEqual(output, torch.cat([output1, output2], 1))
        self.assertEqual(i.grad.data,
                         torch.cat([i1.grad.data, i2.grad.data], 1))
        self.assertEqual(m.weight.grad.data,
                         torch.cat([m1.weight.grad.data, m2.weight.grad.data], 0))
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_container_copy(self):
        class Model(nn.Module):
            def __init__(self):
                super(Model, self).__init__()
                self.linear = nn.Linear(4, 5)

            def forward(self, input):
                return self.linear(input)

        input = Variable(torch.randn(2, 4))

        model = Model()
        model_cp = deepcopy(model)
        self.assertEqual(model(input).data, model_cp(input).data)

        model_cp.linear.weight.data[:] = 2
        self.assertNotEqual(model(input).data, model_cp(input).data)
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def _function_test(self, cls):
        x = Variable(torch.randn(5, 5), requires_grad=True)
        y = Variable(torch.randn(5, 5), requires_grad=True)
        result = cls.apply(x, 2, y)
        go = Variable(torch.ones(1), requires_grad=True)
        result.sum().backward(go)

        self.assertEqual(x.grad.data, y.data + torch.ones(5, 5))
        self.assertEqual(y.grad.data, x.data + torch.ones(5, 5) * 2)

        self.assertFalse(x.grad.volatile)
        self.assertFalse(y.grad.volatile)
        self.assertIsNotNone(x.grad.grad_fn)
        self.assertIsNotNone(y.grad.grad_fn)

        return x, y
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_accumulate_grad(self):
        import sys

        grad_output = Variable(torch.ones(5, 5))
        for start_volatile, end_volatile in product((True, False), repeat=2):
            go1 = grad_output.data if start_volatile else grad_output
            go2 = grad_output.data if end_volatile else grad_output

            x = Variable(torch.randn(5, 5), requires_grad=True)
            y = x + 2
            y.backward(go1, retain_variables=True)
            x_grad = x.grad
            x_grad_clone = x.grad.data.clone()

            del x
            y.backward(go2)

            # That's the only case when we can accumulate in-place
            if start_volatile and end_volatile:
                expected_grad = x_grad_clone * 2
            else:
                expected_grad = x_grad_clone
            self.assertEqual(x_grad.data, expected_grad)
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_grad_nonleaf_many_outputs(self):
        # This checks an edge case for function callbacks
        # We want to capture two grads of a function, but can only
        # register a single callback.
        x = Variable(torch.randn(4, 2), requires_grad=True)
        a, b = x.chunk(2)

        def hook(*grads):
            hook_called[0] = True
        hook_called = [False]
        x.register_hook(hook)

        go = torch.randn(2, 2)
        grad_a, grad_b = torch.autograd.grad(
            (a + 2 * b), [a, b], grad_outputs=go, create_graph=True)

        self.assertEqual(grad_a, go)
        self.assertEqual(grad_b, go * 2)
        self.assertFalse(hook_called[0])
        self.assertIsNone(x.grad)
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def _test_backward(self):
        v_t = torch.randn(5, 5)
        x_t = torch.randn(5, 5)
        y_t = torch.rand(5, 5) + 0.1
        z_t = torch.randn(5, 5)
        grad_output = torch.randn(5, 5)
        v = Variable(v_t, requires_grad=True)
        x = Variable(x_t, requires_grad=True)
        y = Variable(y_t, requires_grad=True)
        z = Variable(z_t, requires_grad=True)

        v.backward(grad_output)
        self.assertEqual(v.grad.data, grad_output)

        a = x + (y * z) + 4 * z ** 2 * x / y
        a.backward(grad_output)
        x_grad = 4 * z_t.pow(2) / y_t + 1
        y_grad = z_t - 4 * x_t * z_t.pow(2) / y_t.pow(2)
        z_grad = 8 * x_t * z_t / y_t + y_t
        self.assertEqual(x.grad.data, x_grad * grad_output)
        self.assertEqual(y.grad.data, y_grad * grad_output)
        self.assertEqual(z.grad.data, z_grad * grad_output)
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_multi_backward(self):
        x = Variable(torch.randn(5, 5), requires_grad=True)
        y = Variable(torch.randn(5, 5), requires_grad=True)

        q = Variable(torch.randn(5, 5), requires_grad=True)

        a = Variable(torch.randn(5, 5), requires_grad=True)
        b = Variable(torch.randn(5, 5), requires_grad=True)

        q2 = q * 2
        z = x + y + q2
        c = a * b + q2
        grad_z = torch.randn(5, 5)
        grad_c = torch.randn(5, 5)
        torch.autograd.backward([z, c], [grad_z, grad_c])

        self.assertEqual(x.grad.data, grad_z)
        self.assertEqual(y.grad.data, grad_z)
        self.assertEqual(a.grad.data, grad_c * b.data)
        self.assertEqual(b.grad.data, grad_c * a.data)
        self.assertEqual(q.grad.data, (grad_c + grad_z) * 2)
项目:drl.pth    作者:seba-1511    | 项目源码 | 文件源码
def forward(self, x, *args, **kwargs):
        action = super(DiagonalGaussianPolicy, self).forward(x, *args, **kwargs)
        size = action.raw.size()
        std = self.logstd.exp().expand_as(action.raw)
        value = action.raw + std * V(th.randn(size))
        value = value.detach()
        action.value = value
#        action.logstd = self.logstd.clone()
        action.logstd = self.logstd
        action.prob = lambda: self._normal(value, action.raw, action.logstd)
        action.entropy = action.logstd + self.halflog2pie
        var = std.pow(2)
        action.compute_log_prob = lambda a: (- ((a - action.raw).pow(2) / (2.0 * var)) - self.halflog2pi - action.logstd).mean(1)
        action.log_prob = action.compute_log_prob(value)
        return action
项目:YellowFin_Pytorch    作者:JianGoForIt    | 项目源码 | 文件源码
def test_resnext():
    net = ResNeXt29_2x64d()
    x = torch.randn(1,3,32,32)
    y = net(Variable(x))
    print(y.size())

# test_resnext()
项目:diracnets    作者:szagoruyko    | 项目源码 | 文件源码
def test_dirac_property1d(self):
        ni, no, k, pad = 4, 4, 3, 1
        module = DiracConv1d(in_channels=ni, out_channels=no, kernel_size=k, padding=pad, bias=False)
        module.alpha.data.fill_(1)
        module.beta.data.fill_(0)
        x = Variable(torch.randn(4, ni, 5))
        y = module(x)
        self.assertEqual(y.size(), x.size(), 'shape check')
        self.assertEqual((y - x).data.abs().sum(), 0, 'dirac delta property check')
项目:diracnets    作者:szagoruyko    | 项目源码 | 文件源码
def test_dirac_property2d(self):
        ni, no, k, pad = 4, 4, 3, 1
        module = DiracConv2d(in_channels=ni, out_channels=no, kernel_size=k, padding=pad, bias=False)
        module.alpha.data.fill_(1)
        module.beta.data.fill_(0)
        x = Variable(torch.randn(4, ni, 5, 5))
        y = module(x)
        self.assertEqual(y.size(), x.size(), 'shape check')
        self.assertEqual((y - x).data.abs().sum(), 0, 'dirac delta property check')
项目:diracnets    作者:szagoruyko    | 项目源码 | 文件源码
def test_dirac_property3d(self):
        ni, no, k, pad = 4, 4, 3, 1
        module = DiracConv3d(in_channels=ni, out_channels=no, kernel_size=k, padding=pad, bias=False)
        module.alpha.data.fill_(1)
        module.beta.data.fill_(0)
        x = Variable(torch.randn(4, ni, 5, 5, 5))
        y = module(x)
        self.assertEqual(y.size(), x.size(), 'shape check')
        self.assertEqual((y - x).data.abs().sum(), 0, 'dirac delta property check')
项目:diracnets    作者:szagoruyko    | 项目源码 | 文件源码
def test_nonsquare(self):
        ni, no, k, pad = 8, 4, 3, 1
        module = DiracConv2d(in_channels=ni, out_channels=no, kernel_size=k, padding=pad, bias=False)
        x = Variable(torch.randn(4, ni, 5, 5))
        y = module(x)
项目:diracnets    作者:szagoruyko    | 项目源码 | 文件源码
def test_cifar10(self):
        inputs = Variable(torch.randn(1,3,32,32))
        f, params, stats = define_diracnet(34, 1, 'CIFAR10')
        outputs = f(inputs, params, stats, mode=False)
        self.assertEqual(outputs.size(), torch.Size((1, 10)))
项目:RetinaNet    作者:c0nn3r    | 项目源码 | 文件源码
def test_focal_loss():
    loss = FocalLoss()

    input = Variable(torch.randn(3, 5), requires_grad=True)
    target = Variable(torch.LongTensor(3).random_(5))

    print(input)
    print(target)

    output = loss(input, target)
    print(output)
    output.backward()