Python torch 模块,normal() 实例源码

我们从Python开源项目中,提取了以下31个代码示例,用于说明如何使用torch.normal()

项目:Pytorch-Sketch-RNN    作者:alexis-jacq    | 项目源码 | 文件源码
def forward(self, inputs, batch_size, hidden_cell=None):
        if hidden_cell is None:
            # then must init with zeros
            if use_cuda:
                hidden = Variable(torch.zeros(2, batch_size, hp.enc_hidden_size).cuda())
                cell = Variable(torch.zeros(2, batch_size, hp.enc_hidden_size).cuda())
            else:
                hidden = Variable(torch.zeros(2, batch_size, hp.enc_hidden_size))
                cell = Variable(torch.zeros(2, batch_size, hp.enc_hidden_size))
            hidden_cell = (hidden, cell)
        _, (hidden,cell) = self.lstm(inputs.float(), hidden_cell)
        # hidden is (2, batch_size, hidden_size), we want (batch_size, 2*hidden_size):
        hidden_forward, hidden_backward = torch.split(hidden,1,0)
        hidden_cat = torch.cat([hidden_forward.squeeze(0), hidden_backward.squeeze(0)],1)
        # mu and sigma:
        mu = self.fc_mu(hidden_cat)
        sigma_hat = self.fc_sigma(hidden_cat)
        sigma = torch.exp(sigma_hat/2.)
        # N ~ N(0,1)
        z_size = mu.size()
        if use_cuda:
            N = Variable(torch.normal(torch.zeros(z_size),torch.ones(z_size)).cuda())
        else:
            N = Variable(torch.normal(torch.zeros(z_size),torch.ones(z_size)))
        z = mu + sigma*N
        # mu and sigma_hat are needed for LKL loss
        return z, mu, sigma_hat
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_reinforce_check(self):
        x = Variable(torch.randn(5, 5), requires_grad=True)

        # these should be ok
        y = torch.normal(x)
        y.reinforce(torch.randn(5, 5))
        y = torch.normal(x)
        y.reinforce(2)

        # can't call reinforce on non-stochastic variables
        self.assertRaises(RuntimeError, lambda: x.reinforce(2))

        # can't call reinforce twice
        y = torch.normal(x)
        y.reinforce(2)
        self.assertRaises(RuntimeError, lambda: y.reinforce(2))

        # check type of reward
        y = torch.normal(x)
        self.assertRaises(TypeError, lambda: y.reinforce(torch.randn(5, 5).long()))

        # check size of reward
        y = torch.normal(x)
        self.assertRaises(ValueError, lambda: y.reinforce(torch.randn(4, 5)))
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def test_reinforce_check(self):
        x = Variable(torch.randn(5, 5), requires_grad=True)

        # these should be ok
        y = torch.normal(x)
        y.reinforce(torch.randn(5, 5))
        y = torch.normal(x)
        y.reinforce(2)

        # can't call reinforce on non-stochastic variables
        self.assertRaises(RuntimeError, lambda: x.reinforce(2))

        # can't call reinforce twice
        y = torch.normal(x)
        y.reinforce(2)
        self.assertRaises(RuntimeError, lambda: y.reinforce(2))

        # check type of reward
        y = torch.normal(x)
        self.assertRaises(TypeError, lambda: y.reinforce(torch.randn(5, 5).long()))

        # check size of reward
        y = torch.normal(x)
        self.assertRaises(ValueError, lambda: y.reinforce(torch.randn(4, 5)))
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_reinforce_check(self):
        x = Variable(torch.randn(5, 5), requires_grad=True)

        # these should be ok
        y = torch.normal(x)
        y.reinforce(torch.randn(5, 5))
        y = torch.normal(x)
        y.reinforce(2)

        # can't call reinforce on non-stochastic variables
        self.assertRaises(RuntimeError, lambda: x.reinforce(2))

        # can't call reinforce twice
        y = torch.normal(x)
        y.reinforce(2)
        self.assertRaises(RuntimeError, lambda: y.reinforce(2))

        # check type of reward
        y = torch.normal(x)
        self.assertRaises(TypeError, lambda: y.reinforce(torch.randn(5, 5).long()))

        # check size of reward
        y = torch.normal(x)
        self.assertRaises(ValueError, lambda: y.reinforce(torch.randn(4, 5)))
项目:seqmod    作者:emanjavacas    | 项目源码 | 文件源码
def init_output_for(self, hidden):
        """
        Creates a variable to be concatenated with previous target
        embedding as input for the first rnn step. This is used
        for the first decoding step when using the input_feed flag.

        Returns:
        --------
        torch.Tensor(batch x hid_dim)
        """
        if self.cell.startswith('LSTM'):
            hidden = hidden[0]

        _, batch, hid_dim = hidden.size()

        output = torch.normal(hidden.data.new(batch, hid_dim).zero_(), 0.3)

        return Variable(output, volatile=not self.training)
项目:seqmod    作者:emanjavacas    | 项目源码 | 文件源码
def init_hidden_for(self, z):
        batch_size = z.size(0)
        size = (self.num_layers, batch_size, self.hid_dim)

        if self.train_init:
            h_0 = self.h_0.repeat(1, batch_size, 1)
        else:
            h_0 = z.data.new(*size).zero_()
            h_0 = Variable(h_0, volatile=not self.training)

        if self.train_init_add_jitter:
            std = 0.3           # TODO: dehardcode
            h_0 = h_0 + torch.normal(torch.zeros_like(h_0), std)

        if self.cell.startswith('LSTM'):
            c_0 = z.data.new(*size).zero_()
            c_0 = Variable(c_0, volatile=not self.training)
            return h_0, c_0
        else:
            return h_0
项目:seqmod    作者:emanjavacas    | 项目源码 | 文件源码
def init_hidden_for(self, inp):
        batch_size = inp.size(1)
        size = (self.num_dirs * self.num_layers, batch_size, self.hid_dim)

        if self.train_init:
            h_0 = self.h_0.repeat(1, batch_size, 1)
        else:
            h_0 = inp.data.new(*size).zero_()
            h_0 = Variable(h_0, volatile=not self.training)

        if self.add_init_jitter:
            h_0 = h_0 + torch.normal(torch.zeros_like(h_0), 0.3)

        if self.cell.startswith('LSTM'):
            # compute memory cell
            c_0 = inp.data.new(*size).zero_()
            c_0 = Variable(c_0, volatile=not self.training)
            return h_0, c_0
        else:
            return h_0
项目:seqmod    作者:emanjavacas    | 项目源码 | 文件源码
def init_hidden_for(self, inp):
        size = (self.num_layers, inp.size(1), self.hid_dim)
        # create h_0
        if self.train_init:
            h_0 = self.h_0.repeat(1, inp.size(1), 1)
        else:
            h_0 = Variable(inp.data.new(*size).zero_(),
                           volatile=not self.training)
        # eventualy add jitter
        if self.add_init_jitter:
            h_0 = h_0 + torch.normal(torch.zeros_like(h_0), 0.3)
        # return
        if self.cell.startswith('LSTM'):
            return h_0, h_0.zeros_like(h_0)
        else:
            return h_0
项目:python-utils    作者:zhijian-liu    | 项目源码 | 文件源码
def standard_normal(*args):
    return torch.normal(torch.zeros(*args), torch.ones(*args)).cuda()
项目:sru    作者:taolei87    | 项目源码 | 文件源码
def load_data(opt):
    with open('SQuAD/meta.msgpack', 'rb') as f:
        meta = msgpack.load(f, encoding='utf8')
    embedding = torch.Tensor(meta['embedding'])
    opt['pretrained_words'] = True
    opt['vocab_size'] = embedding.size(0)
    opt['embedding_dim'] = embedding.size(1)
    if not opt['fix_embeddings']:
        embedding[1] = torch.normal(means=torch.zeros(opt['embedding_dim']), std=1.)
    with open(args.data_file, 'rb') as f:
        data = msgpack.load(f, encoding='utf8')
    train_orig = pd.read_csv('SQuAD/train.csv')
    dev_orig = pd.read_csv('SQuAD/dev.csv')
    train = list(zip(
        data['trn_context_ids'],
        data['trn_context_features'],
        data['trn_context_tags'],
        data['trn_context_ents'],
        data['trn_question_ids'],
        train_orig['answer_start_token'].tolist(),
        train_orig['answer_end_token'].tolist(),
        data['trn_context_text'],
        data['trn_context_spans']
    ))
    dev = list(zip(
        data['dev_context_ids'],
        data['dev_context_features'],
        data['dev_context_tags'],
        data['dev_context_ents'],
        data['dev_question_ids'],
        data['dev_context_text'],
        data['dev_context_spans']
    ))
    dev_y = dev_orig['answers'].tolist()[:len(dev)]
    dev_y = [eval(y) for y in dev_y]
    return train, dev, dev_y, embedding, opt
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_multi_backward_stochastic(self):
        x = Variable(torch.randn(5, 5), requires_grad=True)
        y = Variable(torch.randn(5, 5), requires_grad=True)

        z = x + y
        q = torch.normal(x)
        q.reinforce(torch.randn(5, 5))

        torch.autograd.backward([z, q], [torch.ones(5, 5), None])
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_stochastic(self):
        x = Variable(torch.rand(2, 10), requires_grad=True)
        stddevs = Variable(torch.rand(2, 10) * 5, requires_grad=True)
        y = (x * 2).clamp(0, 1)
        y = y / y.sum(1).expand_as(y)
        samples_multi = y.multinomial(5)
        samples_multi_flat = y[0].multinomial(5)
        samples_bernoulli = y.bernoulli()
        samples_norm = torch.normal(y)
        samples_norm_std = torch.normal(y, stddevs)
        z = samples_multi * 2 + 4
        z = z + samples_multi_flat.unsqueeze(0).expand_as(samples_multi)
        z = torch.cat([z, z], 1)
        z = z.double()
        z = z + samples_bernoulli + samples_norm + samples_norm_std
        last_sample = torch.normal(z, 4)
        z = last_sample + 2
        self.assertFalse(z.requires_grad)

        self.assertRaises(RuntimeError, lambda: z.backward(retain_variables=True))
        samples_multi.reinforce(torch.randn(2, 5))
        self.assertRaises(RuntimeError, lambda: z.backward(retain_variables=True))
        samples_multi_flat.reinforce(torch.randn(5))
        self.assertRaises(RuntimeError, lambda: z.backward(retain_variables=True))
        samples_bernoulli.reinforce(torch.randn(2, 10))
        self.assertRaises(RuntimeError, lambda: z.backward(retain_variables=True))
        samples_norm.reinforce(torch.randn(2, 10))
        self.assertRaises(RuntimeError, lambda: z.backward(retain_variables=True))
        samples_norm_std.reinforce(torch.randn(2, 10))
        # We don't have to specify rewards w.r.t. last_sample - it doesn't
        # require gradient

        last_sample.backward(retain_variables=True)
        z.backward()

        self.assertGreater(x.grad.data.abs().sum(), 0)
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_stochastic_require_grad(self):
        # This tests a DSD function sequence (D=deterministic, S=stochastic),
        # where all functions require grad.
        x = Variable(torch.randn(2, 10), requires_grad=True)
        y = Variable(torch.randn(2, 10), requires_grad=True)
        z = torch.normal(x + 2, 2)
        o = z + y
        z.reinforce(torch.randn(2, 10))
        o.sum().backward()
        self.assertEqual(y.grad.data, torch.ones(2, 10))
        self.assertGreater(x.grad.data.abs().sum(), 0)
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_stochastic_sequence(self):
        x = Variable(torch.rand(10).clamp_(0, 1), requires_grad=True)
        b = x.bernoulli()
        n1 = torch.normal(b, x)
        n2 = torch.normal(n1, 2)

        b.reinforce(torch.randn(10))
        n1.reinforce(torch.randn(10))
        n2.reinforce(torch.randn(10))

        n2.backward()

        self.assertGreater(x.grad.data.abs().sum(), 0)
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def test_multi_backward_stochastic(self):
        x = Variable(torch.randn(5, 5), requires_grad=True)
        y = Variable(torch.randn(5, 5), requires_grad=True)

        z = x + y
        q = torch.normal(x)
        q.reinforce(torch.randn(5, 5))

        torch.autograd.backward([z, q], [torch.ones(5, 5), None])
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def test_stochastic(self):
        x = Variable(torch.rand(2, 10), requires_grad=True)
        stddevs = Variable(torch.rand(2, 10) * 5, requires_grad=True)
        y = (x * 2).clamp(0, 1)
        y = y / y.sum(1, True).expand_as(y)
        samples_multi = y.multinomial(5)
        samples_multi_flat = y[0].multinomial(5)
        samples_bernoulli = y.bernoulli()
        samples_norm = torch.normal(y)
        samples_norm_std = torch.normal(y, stddevs)
        z = samples_multi * 2 + 4
        z = z + samples_multi_flat.unsqueeze(0).expand_as(samples_multi)
        z = torch.cat([z, z], 1)
        z = z.double()
        z = z + samples_bernoulli + samples_norm + samples_norm_std
        last_sample = torch.normal(z, 4)
        z = last_sample + 2
        self.assertFalse(z.requires_grad)

        self.assertRaises(RuntimeError, lambda: z.backward(retain_graph=True))
        samples_multi.reinforce(torch.randn(2, 5))
        self.assertRaises(RuntimeError, lambda: z.backward(retain_graph=True))
        samples_multi_flat.reinforce(torch.randn(5))
        self.assertRaises(RuntimeError, lambda: z.backward(retain_graph=True))
        samples_bernoulli.reinforce(torch.randn(2, 10))
        self.assertRaises(RuntimeError, lambda: z.backward(retain_graph=True))
        samples_norm.reinforce(torch.randn(2, 10))
        self.assertRaises(RuntimeError, lambda: z.backward(retain_graph=True))
        samples_norm_std.reinforce(torch.randn(2, 10))
        # We don't have to specify rewards w.r.t. last_sample - it doesn't
        # require gradient

        last_sample.backward(retain_graph=True)
        z.backward()

        self.assertGreater(x.grad.data.abs().sum(), 0)
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def test_stochastic_require_grad(self):
        # This tests a DSD function sequence (D=deterministic, S=stochastic),
        # where all functions require grad.
        x = Variable(torch.randn(2, 10), requires_grad=True)
        y = Variable(torch.randn(2, 10), requires_grad=True)
        z = torch.normal(x + 2, 2)
        o = z + y
        z.reinforce(torch.randn(2, 10))
        o.sum().backward()
        self.assertEqual(y.grad.data, torch.ones(2, 10))
        self.assertGreater(x.grad.data.abs().sum(), 0)
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def test_stochastic_sequence(self):
        x = Variable(torch.rand(10).clamp_(0, 1), requires_grad=True)
        b = x.bernoulli()
        n1 = torch.normal(b, x)
        n2 = torch.normal(n1, 2)

        b.reinforce(torch.randn(10))
        n1.reinforce(torch.randn(10))
        n2.reinforce(torch.randn(10))

        n2.backward()

        self.assertGreater(x.grad.data.abs().sum(), 0)
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_multi_backward_stochastic(self):
        x = Variable(torch.randn(5, 5), requires_grad=True)
        y = Variable(torch.randn(5, 5), requires_grad=True)

        z = x + y
        q = torch.normal(x)
        q.reinforce(torch.randn(5, 5))

        torch.autograd.backward([z, q], [torch.ones(5, 5), None])
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_stochastic(self):
        x = Variable(torch.rand(2, 10), requires_grad=True)
        stddevs = Variable(torch.rand(2, 10) * 5, requires_grad=True)
        y = (x * 2).clamp(0, 1)
        y = y / y.sum(1, True).expand_as(y)
        samples_multi = y.multinomial(5)
        samples_multi_flat = y[0].multinomial(5)
        samples_bernoulli = y.bernoulli()
        samples_norm = torch.normal(y)
        samples_norm_std = torch.normal(y, stddevs)
        z = samples_multi * 2 + 4
        z = z + samples_multi_flat.unsqueeze(0).expand_as(samples_multi)
        z = torch.cat([z, z], 1)
        z = z.double()
        z = z + samples_bernoulli + samples_norm + samples_norm_std
        last_sample = torch.normal(z, 4)
        z = last_sample + 2
        self.assertFalse(z.requires_grad)

        self.assertRaises(RuntimeError, lambda: z.backward(retain_graph=True))
        samples_multi.reinforce(torch.randn(2, 5))
        self.assertRaises(RuntimeError, lambda: z.backward(retain_graph=True))
        samples_multi_flat.reinforce(torch.randn(5))
        self.assertRaises(RuntimeError, lambda: z.backward(retain_graph=True))
        samples_bernoulli.reinforce(torch.randn(2, 10))
        self.assertRaises(RuntimeError, lambda: z.backward(retain_graph=True))
        samples_norm.reinforce(torch.randn(2, 10))
        self.assertRaises(RuntimeError, lambda: z.backward(retain_graph=True))
        samples_norm_std.reinforce(torch.randn(2, 10))
        # We don't have to specify rewards w.r.t. last_sample - it doesn't
        # require gradient

        last_sample.backward(retain_graph=True)
        z.backward()

        self.assertGreater(x.grad.data.abs().sum(), 0)
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_stochastic_require_grad(self):
        # This tests a DSD function sequence (D=deterministic, S=stochastic),
        # where all functions require grad.
        x = Variable(torch.randn(2, 10), requires_grad=True)
        y = Variable(torch.randn(2, 10), requires_grad=True)
        z = torch.normal(x + 2, 2)
        o = z + y
        z.reinforce(torch.randn(2, 10))
        o.sum().backward()
        self.assertEqual(y.grad.data, torch.ones(2, 10))
        self.assertGreater(x.grad.data.abs().sum(), 0)
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_stochastic_sequence(self):
        x = Variable(torch.rand(10).clamp_(0, 1), requires_grad=True)
        b = x.bernoulli()
        n1 = torch.normal(b, x)
        n2 = torch.normal(n1, 2)

        b.reinforce(torch.randn(10))
        n1.reinforce(torch.randn(10))
        n2.reinforce(torch.randn(10))

        n2.backward()

        self.assertGreater(x.grad.data.abs().sum(), 0)
项目:seqmod    作者:emanjavacas    | 项目源码 | 文件源码
def init_hidden_for(self, enc_hidden):
        """
        Creates a variable to be fed as init hidden step.

        Returns:
        --------
        torch.Tensor(num_layers x batch x hid_dim)
        """
        # unpack
        if self.cell.startswith('LSTM'):
            h_0, _ = enc_hidden
        else:
            h_0 = enc_hidden

        # compute h_0
        if self.train_init:
            h_0 = self.h_0.repeat(1, h_0.size(1), 1)
        else:
            if not self.reuse_hidden:
                h_0 = h_0.zeros_like(h_0)

        if self.add_init_jitter:
            h_0 = h_0 + torch.normal(torch.zeros_like(h_0), 0.3)

        # pack
        if self.cell.startswith('LSTM'):
            return h_0, h_0.zeros_like(h_0)
        else:
            return h_0
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def sample(self, sample_shape=torch.Size()):
        shape = self._extended_shape(sample_shape)
        return torch.normal(self.mean.expand(shape), self.std.expand(shape))
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def _check_sampler_sampler(self, torch_dist, ref_dist, message, multivariate=False,
                               num_samples=10000, failure_rate=1e-3):
        # Checks that the .sample() method matches a reference function.
        torch_samples = torch_dist.sample_n(num_samples).squeeze()
        if isinstance(torch_samples, Variable):
            torch_samples = torch_samples.data
        torch_samples = torch_samples.cpu().numpy()
        ref_samples = ref_dist.rvs(num_samples)
        if multivariate:
            # Project onto a random axis.
            axis = np.random.normal(size=torch_samples.shape[-1])
            axis /= np.linalg.norm(axis)
            torch_samples = np.dot(torch_samples, axis)
            ref_samples = np.dot(ref_samples, axis)
        samples = [(x, +1) for x in torch_samples] + [(x, -1) for x in ref_samples]
        samples.sort()
        samples = np.array(samples)[:, 1]

        # Aggragate into bins filled with roughly zero-mean unit-variance RVs.
        num_bins = 10
        samples_per_bin = len(samples) // num_bins
        bins = samples.reshape((num_bins, samples_per_bin)).mean(axis=1)
        stddev = samples_per_bin ** -0.5
        threshold = stddev * scipy.special.erfinv(1 - 2 * failure_rate / num_bins)
        message = '{}.sample() is biased:\n{}'.format(message, bins)
        for bias in bins:
            self.assertLess(-threshold, bias, message)
            self.assertLess(bias, threshold, message)
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_beta_log_prob(self):
        for _ in range(100):
            alpha = np.exp(np.random.normal())
            beta = np.exp(np.random.normal())
            dist = Beta(alpha, beta)
            x = dist.sample()
            actual_log_prob = dist.log_prob(x).sum()
            expected_log_prob = scipy.stats.beta.logpdf(x, alpha, beta)
            self.assertAlmostEqual(actual_log_prob, expected_log_prob, places=3)

    # This is a randomized test.
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_normal_shape_scalar_params(self):
        normal = Normal(0, 1)
        self.assertEqual(normal._batch_shape, torch.Size())
        self.assertEqual(normal._event_shape, torch.Size())
        self.assertEqual(normal.sample().size(), torch.Size((1,)))
        self.assertEqual(normal.sample((3, 2)).size(), torch.Size((3, 2)))
        self.assertRaises(ValueError, normal.log_prob, self.scalar_sample)
        self.assertEqual(normal.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))
        self.assertEqual(normal.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)))
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_normal(self):
        q = torch.Tensor(100, 100)
        q.normal_()
        self.assertEqual(q.mean(), 0, 0.2)
        self.assertEqual(q.std(), 1, 0.2)

        q.normal_(2, 3)
        self.assertEqual(q.mean(), 2, 0.3)
        self.assertEqual(q.std(), 3, 0.3)

        mean = torch.Tensor(100, 100)
        std = torch.Tensor(100, 100)
        mean[:50] = 0
        mean[50:] = 1
        std[:, :50] = 4
        std[:, 50:] = 1

        r = torch.normal(mean)
        self.assertEqual(r[:50].mean(), 0, 0.2)
        self.assertEqual(r[50:].mean(), 1, 0.2)
        self.assertEqual(r.std(), 1, 0.2)

        r = torch.normal(mean, 3)
        self.assertEqual(r[:50].mean(), 0, 0.2)
        self.assertEqual(r[50:].mean(), 1, 0.2)
        self.assertEqual(r.std(), 3, 0.2)

        r = torch.normal(2, std)
        self.assertEqual(r.mean(), 2, 0.2)
        self.assertEqual(r[:, :50].std(), 4, 0.3)
        self.assertEqual(r[:, 50:].std(), 1, 0.2)

        r = torch.normal(mean, std)
        self.assertEqual(r[:50].mean(), 0, 0.2)
        self.assertEqual(r[50:].mean(), 1, 0.2)
        self.assertEqual(r[:, :50].std(), 4, 0.3)
        self.assertEqual(r[:, 50:].std(), 1, 0.2)
项目:aorun    作者:ramon-oliveira    | 项目源码 | 文件源码
def forward(self, X):
        X = super(ProbabilisticDense, self).forward(X)
        sigma_prior = math.exp(-3)
        W_eps = Variable(torch.zeros(self.input_dim, self.output_dim))
        W_eps = torch.normal(W_eps, std=sigma_prior)
        self.W = W = self.W_mu + torch.log1p(torch.exp(self.W_rho)) * W_eps
        b_eps = Variable(torch.zeros(self.output_dim))
        b_eps = torch.normal(b_eps, std=sigma_prior)
        self.b = b = self.b_mu + torch.log1p(torch.exp(self.b_rho)) * b_eps
        XW = X @ W
        return XW + b.expand_as(XW)
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch    作者:bamtercelboo    | 项目源码 | 文件源码
def vector_loader_modify(text_field_words):
    # load word2vec_raw
    path = 'word_embedding/glove.6B.300d.txt'
    words = []
    words_dict = {}
    file = open(path, 'rt', encoding='utf-8')
    lines = file.readlines()
    t = 300

    for line in lines:
        line_split = line.split(' ')
        word = line_split[0]
        nums = line_split[1:]
        nums = [float(e) for e in nums]
        # data.append(line_list)
        words.append(word)
        words_dict[word] = nums


    uniform = np.random.uniform(-0.1, 0.1, t).round(6).tolist()     # uniform distribution U(a,b).????
    # match
    count_list2 = []
    count = 0
    dict_cat = []
    for word in text_field_words:
        if word in words_dict:
            count += 1
            dict_cat.append(words_dict[word])
        else:
            # a = torch.normal(mean=0.0, std=torch.arange(0.09, 0, -0.09))
            dict_cat.append(uniform)
            count += 1
            count_list2.append(count - 1)
    # count_data = len(text_field_words) - len(count_list2)

    # # modify uniform
    # sum = []
    # for j in range(t):
    #     sum_col = 0.0
    #     for i in range(len(dict_cat)):
    #         sum_col += dict_cat[i][j]
    #         sum_col = float(sum_col / count_data)
    #         sum_col = round(sum_col, 6)
    #     sum.append(sum_col)

    # sum_none = []
    # for i in range(t):
    #     sum_total = sum[i] / (len(sum) - len(count_list2))
    #     sum_total = round(sum_total, 6)
    #     sum_none.append(sum_total)
    # # print(sum_none)
    #
    # for i in range(len(count_list2)):
    #     dict_cat[count_list2[i]] = sum_none

    return dict_cat
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_normal(self):
        mean = Variable(torch.randn(5, 5), requires_grad=True)
        std = Variable(torch.randn(5, 5).abs(), requires_grad=True)
        mean_1d = Variable(torch.randn(1), requires_grad=True)
        std_1d = Variable(torch.randn(1), requires_grad=True)
        mean_delta = torch.Tensor([1.0, 0.0])
        std_delta = torch.Tensor([1e-5, 1e-5])
        self.assertEqual(Normal(mean, std).sample().size(), (5, 5))
        self.assertEqual(Normal(mean, std).sample_n(7).size(), (7, 5, 5))
        self.assertEqual(Normal(mean_1d, std_1d).sample_n(1).size(), (1, 1))
        self.assertEqual(Normal(mean_1d, std_1d).sample().size(), (1,))
        self.assertEqual(Normal(0.2, .6).sample_n(1).size(), (1,))
        self.assertEqual(Normal(-0.7, 50.0).sample_n(1).size(), (1,))

        # sample check for extreme value of mean, std
        self._set_rng_seed(1)
        self.assertEqual(Normal(mean_delta, std_delta).sample(sample_shape=(1, 2)),
                         torch.Tensor([[[1.0, 0.0], [1.0, 0.0]]]),
                         prec=1e-4)

        self._gradcheck_log_prob(Normal, (mean, std))
        self._gradcheck_log_prob(Normal, (mean, 1.0))
        self._gradcheck_log_prob(Normal, (0.0, std))

        state = torch.get_rng_state()
        eps = torch.normal(torch.zeros_like(mean), torch.ones_like(std))
        torch.set_rng_state(state)
        z = Normal(mean, std).rsample()
        z.backward(torch.ones_like(z))
        self.assertEqual(mean.grad, torch.ones_like(mean))
        self.assertEqual(std.grad, eps)
        mean.grad.zero_()
        std.grad.zero_()
        self.assertEqual(z.size(), (5, 5))

        def ref_log_prob(idx, x, log_prob):
            m = mean.data.view(-1)[idx]
            s = std.data.view(-1)[idx]
            expected = (math.exp(-(x - m) ** 2 / (2 * s ** 2)) /
                        math.sqrt(2 * math.pi * s ** 2))
            self.assertAlmostEqual(log_prob, math.log(expected), places=3)

        self._check_log_prob(Normal(mean, std), ref_log_prob)

    # This is a randomized test.