Python torch 模块,zeros() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用torch.zeros()

项目:YellowFin_Pytorch    作者:JianGoForIt    | 项目源码 | 文件源码
def pad_batch(mini_batch):
    mini_batch_size = len(mini_batch)
#     print mini_batch.shape
#     print mini_batch
    max_sent_len1 = int(np.max([len(x[0]) for x in mini_batch]))
    max_sent_len2 = int(np.max([len(x[1]) for x in mini_batch]))
#     print max_sent_len1, max_sent_len2
#     max_token_len = int(np.mean([len(val) for sublist in mini_batch for val in sublist]))
    main_matrix1 = np.zeros((mini_batch_size, max_sent_len1), dtype= np.int)
    main_matrix2 = np.zeros((mini_batch_size, max_sent_len2), dtype= np.int)
    for idx1, i in enumerate(mini_batch):
        for idx2, j in enumerate(i[0]):
            try:
                main_matrix1[i,j] = j
            except IndexError:
                pass
    for idx1, i in enumerate(mini_batch):
        for idx2, j in enumerate(i[1]):
            try:
                main_matrix2[i,j] = j
            except IndexError:
                pass
    main_matrix1_t = Variable(torch.from_numpy(main_matrix1))
    main_matrix2_t = Variable(torch.from_numpy(main_matrix2))
#     print main_matrix1_t.size()
#     print main_matrix2_t.size()
    return [main_matrix1_t, main_matrix2_t]
#     return [Variable(torch.cat((main_matrix1_t, main_matrix2_t), 0))

# def pad_batch(mini_batch):
# #     print mini_batch
# #     print type(mini_batch)
# #     print mini_batch.shape
# #     for i, _ in enumerate(mini_batch):
# #         print i, _
#     return [Variable(torch.from_numpy(np.asarray(_))) for _ in mini_batch[0]]
项目:YellowFin_Pytorch    作者:JianGoForIt    | 项目源码 | 文件源码
def forward(self, x):
        outputs = []
        h_t = Variable(torch.zeros(x.size(0), self.hidden_size).cuda())
        c_t = Variable(torch.zeros(x.size(0), self.hidden_size).cuda())


        for i, input_t in enumerate(x.chunk(x.size(1), dim=1)):
            input_t = input_t.contiguous().view(input_t.size()[0], 1)
            h_t, c_t = self.lstm1(input_t, (h_t, c_t))
            outputs += [c_t]
        outputs = torch.stack(outputs, 1).squeeze(2)
        shp=(outputs.size()[0], outputs.size()[1])
        out = outputs.contiguous().view(shp[0] *shp[1] , self.hidden_size)
        out = self.fc(out)
        out = out.view(shp[0], shp[1], self.num_classes)

        return out
项目:python-utils    作者:zhijian-liu    | 项目源码 | 文件源码
def forward(self, inputs):
        # set up batch size
        batch_size = inputs.size(0)

        # compute hidden and cell
        hidden = Variable(torch.zeros(self.num_layers * 2, batch_size, self.hidden_size).cuda())
        cell = Variable(torch.zeros(self.num_layers * 2, batch_size, self.hidden_size).cuda())
        hidden_cell = (hidden, cell)

        # recurrent neural networks
        outputs, _ = self.rnn.forward(inputs, hidden_cell)
        outputs = outputs[:, -1, :].contiguous()

        # compute features by outputs
        features = self.feature.forward(outputs)
        return features
项目:python-utils    作者:zhijian-liu    | 项目源码 | 文件源码
def forward(self, inputs):
        # set up batch size
        batch_size = inputs.size(0)

        # compute hidden and cell
        hidden = Variable(torch.zeros(self.num_layers * 2, batch_size, self.hidden_size).cuda())
        cell = Variable(torch.zeros(self.num_layers * 2, batch_size, self.hidden_size).cuda())
        hidden_cell = (hidden, cell)

        # recurrent neural networks
        outputs, _ = self.rnn.forward(inputs, hidden_cell)
        outputs = outputs.contiguous().view(-1, self.hidden_size * 2)

        # compute classifications by outputs
        outputs = self.classifier.forward(outputs)
        outputs = F.softmax(outputs)
        outputs = outputs.view(batch_size, -1, self.num_classes)
        return outputs
项目:generals_a3c    作者:yilundu    | 项目源码 | 文件源码
def init_hidden(self, height, width):
        self.height = height
        self.width = width
        self.batch = height * width

        self.cell_state = Variable(
            torch.zeros(
                self.lstm_layer,
                self.batch,
                self.hidden_dim))
        self.hidden_state = Variable(
            torch.zeros(
                self.lstm_layer,
                self.batch,
                self.hidden_dim))

        if self.on_gpu:
            self.cell_state = self.cell_state.cuda()
            self.hidden_state = self.hidden_state.cuda()
项目:pytorch.rl.learning    作者:moskomule    | 项目源码 | 文件源码
def __init__(self, env_name, num_episodes, alpha, gamma, epsilon, policy, **kwargs):
        """
        base class for RL using lookup table
        :param env_name: name of environment, currently environments whose observation space and action space are
        both Discrete are supported. see https://github.com/openai/gym/wiki/Table-of-environments
        :param num_episodes: number of episode for training
        :param alpha:
        :param gamma:
        :param epsilon:
        :param kwargs: other arguments.
        """
        super(TableBase, self).__init__(env_name, num_episodes, alpha, gamma, policy, epsilon=epsilon, **kwargs)

        if not isinstance(self.env.action_space, gym.spaces.Discrete) or \
                not isinstance(self.env.observation_space, gym.spaces.Discrete):
            raise NotImplementedError("action_space and observation_space should be Discrete")

        self.obs_size = self.env.observation_space.n
        self.action_size = self.env.action_space.n
        self.q_table = torch.zeros(self.obs_size, self.action_size)
项目:treelstm.pytorch    作者:dasguptar    | 项目源码 | 文件源码
def test(self, dataset):
        self.model.eval()
        total_loss = 0
        predictions = torch.zeros(len(dataset))
        indices = torch.arange(1, dataset.num_classes + 1)
        for idx in tqdm(range(len(dataset)),desc='Testing epoch  ' + str(self.epoch) + ''):
            ltree, lsent, rtree, rsent, label = dataset[idx]
            linput, rinput = Var(lsent, volatile=True), Var(rsent, volatile=True)
            target = Var(map_label_to_target(label, dataset.num_classes), volatile=True)
            if self.args.cuda:
                linput, rinput = linput.cuda(), rinput.cuda()
                target = target.cuda()
            output = self.model(ltree, linput, rtree, rinput)
            loss = self.criterion(output, target)
            total_loss += loss.data[0]
            output = output.data.squeeze().cpu()
            predictions[idx] = torch.dot(indices, torch.exp(output))
        return total_loss / len(dataset), predictions
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def __init__(self, nOutput, eps=1e-5, momentum=0.1, affine=True):
        super(BatchNormalization, self).__init__()
        assert nOutput != 0

        self.affine = affine
        self.eps = eps
        self.train = True
        self.momentum = momentum
        self.running_mean = torch.zeros(nOutput)
        self.running_var = torch.ones(nOutput)

        self.save_mean = None
        self.save_std = None

        if self.affine:
           self.weight = torch.Tensor(nOutput)
           self.bias = torch.Tensor(nOutput)
           self.gradWeight = torch.Tensor(nOutput)
           self.gradBias = torch.Tensor(nOutput)
           self.reset()
        else:
           self.weight = None
           self.bias = None
           self.gradWeight = None
           self.gradBias = None
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def __init__(self, inputsize, outputsize, bias=True):
        super(PartialLinear, self).__init__()

        # define the layer as a small network:
        pt = ParallelTable()
        pt.add(Identity()).add(LookupTable(outputsize, inputsize))
        self.network = Sequential().add(pt).add(MM(False, True))
        if bias:
            self.bias     = torch.zeros(1, outputsize)
            self.gradBias = torch.zeros(1, outputsize)
        else:
            self.bias = self.gradBias = None

        # set partition:
        self.inputsize  = inputsize
        self.outputsize = outputsize
        self.allcolumns = torch.range(0, self.outputsize-1).long()
        self.resetPartition()
        self.addBuffer = None
        self.buffer = None
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def _test_sharing(self):
        def do_test():
            x = torch.zeros(5, 5)
            q = mp.Queue()
            e = mp.Event()
            data = [x, x[:, 1]]
            q.put(data)
            p = mp.Process(target=simple_fill, args=(q, e))
            lc.check_pid(p.pid)
            p.start()
            e.wait()
            self.assertTrue(data[0].eq(4).all())
            self.assertTrue(data[1].eq(4).all())
            p.join(1)
            self.assertFalse(p.is_alive())

        with leak_checker(self) as lc:
            do_test()
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def _test_pool(self):
        def do_test():
            p = mp.Pool(2)
            for proc in p._pool:
                lc.check_pid(proc.pid)

            buffers = (torch.zeros(2, 2) for i in range(4))
            results = p.map(simple_pool_fill, buffers, 1)
            for r in results:
                self.assertEqual(r, torch.ones(2, 2) * 5, 0)
            self.assertEqual(len(results), 4)

            p.close()
            p.join()

        with leak_checker(self) as lc:
            do_test()
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def test_linspace(self):
        _from = random.random()
        to = _from + random.random()
        res1 = torch.linspace(_from, to, 137)
        res2 = torch.Tensor()
        torch.linspace(res2, _from, to, 137)
        self.assertEqual(res1, res2, 0)
        self.assertRaises(RuntimeError, lambda: torch.linspace(0, 1, 1))
        self.assertEqual(torch.linspace(0, 0, 1), torch.zeros(1), 0)

        # Check linspace for generating with start > end.
        self.assertEqual(torch.linspace(2, 0, 3), torch.Tensor((2, 1, 0)), 0)

        # Check linspace for non-contiguous tensors.
        x = torch.zeros(2, 3)
        y = torch.linspace(x.narrow(1, 1, 2), 0, 3, 4)
        self.assertEqual(x, torch.Tensor(((0, 0, 1), (0, 2, 3))), 0)
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def test_logspace(self):
        _from = random.random()
        to = _from + random.random()
        res1 = torch.logspace(_from, to, 137)
        res2 = torch.Tensor()
        torch.logspace(res2, _from, to, 137)
        self.assertEqual(res1, res2, 0)
        self.assertRaises(RuntimeError, lambda: torch.logspace(0, 1, 1))
        self.assertEqual(torch.logspace(0, 0, 1), torch.ones(1), 0)

        # Check logspace_ for generating with start > end.
        self.assertEqual(torch.logspace(1, 0, 2), torch.Tensor((10, 1)), 0)

        # Check logspace_ for non-contiguous tensors.
        x = torch.zeros(2, 3)
        y = torch.logspace(x.narrow(1, 1, 2), 0, 3, 4)
        self.assertEqual(x, torch.Tensor(((0, 1, 10), (0, 100, 1000))), 0)
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def test_newindex(self):
        reference = self._consecutive((3, 3, 3))
        # This relies on __index__() being correct - but we have separate tests for that
        def checkPartialAssign(index):
            reference = torch.zeros(3, 3, 3)
            reference[index] = self._consecutive((3, 3, 3))[index]
            self.assertEqual(reference[index], self._consecutive((3, 3, 3))[index], 0)
            reference[index] = 0
            self.assertEqual(reference, torch.zeros(3, 3, 3), 0)

        checkPartialAssign(0)
        checkPartialAssign(1)
        checkPartialAssign(2)
        checkPartialAssign((0, 1))
        checkPartialAssign((1, 2))
        checkPartialAssign((0, 2))

        with self.assertRaises(RuntimeError):
            reference[1, 1, 1, 1] = 1
        with self.assertRaises(RuntimeError):
            reference[1, 1, 1, (1, 1)] = 1
        with self.assertRaises(RuntimeError):
            reference[3, 3, 3, 3, 3, 3, 3, 3] = 1
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def test_scatter(self):
        m, n, o = random.randint(10, 20), random.randint(10, 20), random.randint(10, 20)
        elems_per_row = random.randint(1, 10)
        dim = random.randrange(3)

        idx_size = [m, n, o]
        idx_size[dim] = elems_per_row
        idx = torch.LongTensor().resize_(*idx_size)
        self._fill_indices(idx, dim, ([m, n, o])[dim], elems_per_row, m, n, o)
        src = torch.Tensor().resize_(*idx_size).normal_()

        actual = torch.zeros(m, n, o).scatter_(dim, idx, src)
        expected = torch.zeros(m, n, o)
        for i in range(idx_size[0]):
            for j in range(idx_size[1]):
                for k in range(idx_size[2]):
                    ii = [i, j, k]
                    ii[dim] = idx[i,j,k]
                    expected[tuple(ii)] = src[i,j,k]
        self.assertEqual(actual, expected, 0)

        idx[0][0][0] = 34
        self.assertRaises(RuntimeError, lambda: torch.zeros(m, n, o).scatter_(dim, idx, src))
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def test_scatterFill(self):
        m, n, o = random.randint(10, 20), random.randint(10, 20), random.randint(10, 20)
        elems_per_row = random.randint(1, 10)
        dim = random.randrange(3)

        val = random.random()
        idx_size = [m, n, o]
        idx_size[dim] = elems_per_row
        idx = torch.LongTensor().resize_(*idx_size)
        self._fill_indices(idx, dim, ([m, n, o])[dim], elems_per_row, m, n, o)

        actual = torch.zeros(m, n, o).scatter_(dim, idx, val)
        expected = torch.zeros(m, n, o)
        for i in range(idx_size[0]):
            for j in range(idx_size[1]):
                for k in range(idx_size[2]):
                    ii = [i, j, k]
                    ii[dim] = idx[i,j,k]
                    expected[tuple(ii)] = val
        self.assertEqual(actual, expected, 0)

        idx[0][0][0] = 28
        self.assertRaises(RuntimeError, lambda: torch.zeros(m, n, o).scatter_(dim, idx, val))
项目:torch-gel    作者:jayanthkoushik    | 项目源码 | 文件源码
def make_A(As, ns, use_gpu=False):
    """Create the 3D tensor A as needed by gel_solve, given a list of feature
        matrices.

    Arguments:
        As: list of feature matrices, one per group (size mxn_j).
        ns: LongTensor of group sizes.
        use_gpu: move the final tensor to GPU.
    """
    A = torch.zeros(len(ns), ns.max(), As[0].size()[0])
    for j, n_j in enumerate(ns):
        # Fill A[j] with A_j.T
        A_j = As[j]
        A[j, :n_j, :] = A_j.t()
    if use_gpu:
        A = A.cuda()
    return A
项目:tnt    作者:pytorch    | 项目源码 | 文件源码
def testAUCMeter(self):
        mtr = meter.AUCMeter()

        test_size = 1000
        mtr.add(torch.rand(test_size), torch.zeros(test_size))
        mtr.add(torch.rand(test_size), torch.Tensor(test_size).fill_(1))

        val, tpr, fpr = mtr.value()
        self.assertTrue(math.fabs(val - 0.5) < 0.1, msg="AUC Meter fails")

        mtr.reset()
        mtr.add(torch.Tensor(test_size).fill_(0), torch.zeros(test_size))
        mtr.add(torch.Tensor(test_size).fill_(0.1), torch.zeros(test_size))
        mtr.add(torch.Tensor(test_size).fill_(0.2), torch.zeros(test_size))
        mtr.add(torch.Tensor(test_size).fill_(0.3), torch.zeros(test_size))
        mtr.add(torch.Tensor(test_size).fill_(0.4), torch.zeros(test_size))
        mtr.add(torch.Tensor(test_size).fill_(1),
                torch.Tensor(test_size).fill_(1))
        val, tpr, fpr = mtr.value()

        self.assertEqual(val, 1.0, msg="AUC Meter fails")
项目:allennlp    作者:allenai    | 项目源码 | 文件源码
def test_last_dim_softmax_does_softmax_on_last_dim(self):
        batch_size = 1
        length_1 = 5
        length_2 = 3
        num_options = 4
        options_array = numpy.zeros((batch_size, length_1, length_2, num_options))
        for i in range(length_1):
            for j in range(length_2):
                options_array[0, i, j] = [2, 4, 0, 1]
        options_tensor = Variable(torch.from_numpy(options_array))
        softmax_tensor = util.last_dim_softmax(options_tensor).data.numpy()
        assert softmax_tensor.shape == (batch_size, length_1, length_2, num_options)
        for i in range(length_1):
            for j in range(length_2):
                assert_almost_equal(softmax_tensor[0, i, j],
                                    [0.112457, 0.830953, 0.015219, 0.041371],
                                    decimal=5)
项目:allennlp    作者:allenai    | 项目源码 | 文件源码
def test_last_dim_softmax_handles_mask_correctly(self):
        batch_size = 1
        length_1 = 4
        length_2 = 3
        num_options = 5
        options_array = numpy.zeros((batch_size, length_1, length_2, num_options))
        for i in range(length_1):
            for j in range(length_2):
                options_array[0, i, j] = [2, 4, 0, 1, 6]
        mask = Variable(torch.IntTensor([[1, 1, 1, 1, 0]]))
        options_tensor = Variable(torch.from_numpy(options_array).float())
        softmax_tensor = util.last_dim_softmax(options_tensor, mask).data.numpy()
        assert softmax_tensor.shape == (batch_size, length_1, length_2, num_options)
        for i in range(length_1):
            for j in range(length_2):
                assert_almost_equal(softmax_tensor[0, i, j],
                                    [0.112457, 0.830953, 0.015219, 0.041371, 0.0],
                                    decimal=5)
项目:allennlp    作者:allenai    | 项目源码 | 文件源码
def test_remove_sentence_boundaries(self):
        tensor = Variable(torch.from_numpy(numpy.random.rand(3, 5, 7)))
        mask = Variable(torch.from_numpy(
                # The mask with two elements is to test the corner case
                # of an empty sequence, so here we are removing boundaries
                # from  "<S> </S>"
                numpy.array([[1, 1, 0, 0, 0],
                             [1, 1, 1, 1, 1],
                             [1, 1, 1, 1, 0]]))).long()
        new_tensor, new_mask = util.remove_sentence_boundaries(tensor, mask)

        expected_new_tensor = Variable(torch.zeros(3, 3, 7))
        expected_new_tensor[1, 0:3, :] = tensor[1, 1:4, :]
        expected_new_tensor[2, 0:2, :] = tensor[2, 1:3, :]
        assert_array_almost_equal(new_tensor.data.numpy(), expected_new_tensor.data.numpy())

        expected_new_mask = Variable(torch.from_numpy(
                numpy.array([[0, 0, 0],
                             [1, 1, 1],
                             [1, 1, 0]]))).long()
        assert (new_mask.data.numpy() == expected_new_mask.data.numpy()).all()
项目:allennlp    作者:allenai    | 项目源码 | 文件源码
def test_add_positional_features(self):
        # This is hard to test, so we check that we get the same result as the
        # original tensorflow implementation:
        # https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/layers/common_attention.py#L270
        tensor2tensor_result = numpy.asarray([[0.00000000e+00, 0.00000000e+00, 1.00000000e+00, 1.00000000e+00],
                                              [8.41470957e-01, 9.99999902e-05, 5.40302277e-01, 1.00000000e+00],
                                              [9.09297407e-01, 1.99999980e-04, -4.16146845e-01, 1.00000000e+00]])

        tensor = Variable(torch.zeros([2, 3, 4]))
        result = util.add_positional_features(tensor, min_timescale=1.0, max_timescale=1.0e4)
        numpy.testing.assert_almost_equal(result[0].data.cpu().numpy(), tensor2tensor_result)
        numpy.testing.assert_almost_equal(result[1].data.cpu().numpy(), tensor2tensor_result)

        # Check case with odd number of dimensions.
        tensor2tensor_result = numpy.asarray([[0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00,
                                               1.00000000e+00, 1.00000000e+00, 0.00000000e+00],
                                              [8.41470957e-01, 9.99983307e-03, 9.99999902e-05, 5.40302277e-01,
                                               9.99949992e-01, 1.00000000e+00, 0.00000000e+00],
                                              [9.09297407e-01, 1.99986659e-02, 1.99999980e-04, -4.16146815e-01,
                                               9.99800026e-01, 1.00000000e+00, 0.00000000e+00]])

        tensor = Variable(torch.zeros([2, 3, 7]))
        result = util.add_positional_features(tensor, min_timescale=1.0, max_timescale=1.0e4)
        numpy.testing.assert_almost_equal(result[0].data.cpu().numpy(), tensor2tensor_result)
        numpy.testing.assert_almost_equal(result[1].data.cpu().numpy(), tensor2tensor_result)
项目:deep-text-corrector    作者:andabi    | 项目源码 | 文件源码
def forward(self, hidden, encoder_outputs):
        # hidden.size() = (B, H), encoder_outputs.size() = (B, S, H)
        batch_size, encoder_outputs_len, _ = encoder_outputs.size()

        # Create variable to store attention energies
        # attn_energies.size() = (B, S)
        attn_energies = Variable(torch.zeros((batch_size, encoder_outputs_len)))  # B x S
        if Config.use_cuda: attn_energies = attn_energies.cuda()

        # Calculate energies for each encoder output
        # attn_energies.size() = (B, S)
        for i in range(encoder_outputs_len):
            attn_energies[:, i] = self.score(hidden, encoder_outputs[:, i])
            # print attn_energies[:, i]

        # Normalize energies to weights in range 0 to 1
        return F.softmax(attn_energies)
项目:mss_pytorch    作者:Js-Mim    | 项目源码 | 文件源码
def forward(self, H_enc):
        if torch.has_cudnn:
            # Initialization of the hidden states
            h_t_dec = Variable(torch.zeros(self._B, self._gruout).cuda(), requires_grad=False)

            # Initialization of the decoder output
            H_j_dec = Variable(torch.zeros(self._B, self._T - (self._L * 2), self._gruout).cuda(), requires_grad=False)

        else:
            # Initialization of the hidden states
            h_t_dec = Variable(torch.zeros(self._B, self._gruout), requires_grad=False)

            # Initialization of the decoder output
            H_j_dec = Variable(torch.zeros(self._B, self._T - (self._L * 2), self._gruout), requires_grad=False)

        for ts in range(self._T - (self._L * 2)):
            # GRU Decoding
            h_t_dec = self.gruDec(H_enc[:, ts, :], h_t_dec)
            H_j_dec[:, ts, :] = h_t_dec

        return H_j_dec
项目:Pytorch-Sketch-RNN    作者:alexis-jacq    | 项目源码 | 文件源码
def make_batch(batch_size):
    batch_idx = np.random.choice(len(data),batch_size)
    batch_sequences = [data[idx] for idx in batch_idx]
    strokes = []
    lengths = []
    indice = 0
    for seq in batch_sequences:
        len_seq = len(seq[:,0])
        new_seq = np.zeros((Nmax,5))
        new_seq[:len_seq,:2] = seq[:,:2]
        new_seq[:len_seq-1,2] = 1-seq[:-1,2]
        new_seq[:len_seq,3] = seq[:,2]
        new_seq[(len_seq-1):,4] = 1
        new_seq[len_seq-1,2:4] = 0
        lengths.append(len(seq[:,0]))
        strokes.append(new_seq)
        indice += 1

    if use_cuda:
        batch = Variable(torch.from_numpy(np.stack(strokes,1)).cuda().float())
    else:
        batch = Variable(torch.from_numpy(np.stack(strokes,1)).float())
    return batch, lengths

################################ adaptive lr
项目:Pytorch-Sketch-RNN    作者:alexis-jacq    | 项目源码 | 文件源码
def forward(self, inputs, batch_size, hidden_cell=None):
        if hidden_cell is None:
            # then must init with zeros
            if use_cuda:
                hidden = Variable(torch.zeros(2, batch_size, hp.enc_hidden_size).cuda())
                cell = Variable(torch.zeros(2, batch_size, hp.enc_hidden_size).cuda())
            else:
                hidden = Variable(torch.zeros(2, batch_size, hp.enc_hidden_size))
                cell = Variable(torch.zeros(2, batch_size, hp.enc_hidden_size))
            hidden_cell = (hidden, cell)
        _, (hidden,cell) = self.lstm(inputs.float(), hidden_cell)
        # hidden is (2, batch_size, hidden_size), we want (batch_size, 2*hidden_size):
        hidden_forward, hidden_backward = torch.split(hidden,1,0)
        hidden_cat = torch.cat([hidden_forward.squeeze(0), hidden_backward.squeeze(0)],1)
        # mu and sigma:
        mu = self.fc_mu(hidden_cat)
        sigma_hat = self.fc_sigma(hidden_cat)
        sigma = torch.exp(sigma_hat/2.)
        # N ~ N(0,1)
        z_size = mu.size()
        if use_cuda:
            N = Variable(torch.normal(torch.zeros(z_size),torch.ones(z_size)).cuda())
        else:
            N = Variable(torch.normal(torch.zeros(z_size),torch.ones(z_size)))
        z = mu + sigma*N
        # mu and sigma_hat are needed for LKL loss
        return z, mu, sigma_hat
项目:Pytorch-Sketch-RNN    作者:alexis-jacq    | 项目源码 | 文件源码
def make_target(self, batch, lengths):
        if use_cuda:
            eos = Variable(torch.stack([torch.Tensor([0,0,0,0,1])]\
                *batch.size()[1]).cuda()).unsqueeze(0)
        else:
            eos = Variable(torch.stack([torch.Tensor([0,0,0,0,1])]\
                *batch.size()[1])).unsqueeze(0)
        batch = torch.cat([batch, eos], 0)
        mask = torch.zeros(Nmax+1, batch.size()[1])
        for indice,length in enumerate(lengths):
            mask[:length,indice] = 1
        if use_cuda:
            mask = Variable(mask.cuda()).detach()
        else:
            mask = Variable(mask).detach()
        dx = torch.stack([Variable(batch.data[:,:,0])]*hp.M,2).detach()
        dy = torch.stack([Variable(batch.data[:,:,1])]*hp.M,2).detach()
        p1 = Variable(batch.data[:,:,2]).detach()
        p2 = Variable(batch.data[:,:,3]).detach()
        p3 = Variable(batch.data[:,:,4]).detach()
        p = torch.stack([p1,p2,p3],2)
        return mask,dx,dy,p
项目:Video-Classification-Action-Recognition    作者:qijiezhao    | 项目源码 | 文件源码
def get_crop_ix(self,training_size):
        rescale_sizes=self.rescale_size
        crop_inds=[]
        for size_pair in rescale_sizes:
            mother_w,mother_h=size_pair
            crop_ix=np.zeros([5,4],dtype=np.int16)
            w_indices=(0,mother_w-training_size)
            h_indices=(0,mother_h-training_size)
            w_center=(mother_w-training_size)/2
            h_center=(mother_h-training_size)/2
            crop_ix[4,:]=[w_center,h_center,training_size+w_center,training_size+h_center]
            cnt=0
            for i in w_indices:
                for j in h_indices:
                    crop_ix[cnt,:]=[i,j,i+training_size,j+training_size]
                    cnt+=1
            crop_inds.append(crop_ix)
        return crop_inds
项目:pyro    作者:uber    | 项目源码 | 文件源码
def classifier(self, xs):
        """
        classify an image (or a batch of images)

        :param xs: a batch of scaled vectors of pixels from an image
        :return: a batch of the corresponding class labels (as one-hots)
        """
        # use the trained model q(y|x) = categorical(alpha(x))
        # compute all class probabilities for the image(s)
        alpha = self.encoder_y.forward(xs)

        # get the index (digit) that corresponds to
        # the maximum predicted class probability
        res, ind = torch.topk(alpha, 1)

        # convert the digit(s) to one-hot tensor(s)
        ys = Variable(torch.zeros(alpha.size()))
        ys = ys.scatter_(1, ind, 1.0)
        return ys
项目:pyro    作者:uber    | 项目源码 | 文件源码
def model(data):
    # Create unit normal priors over the parameters
    mu = Variable(torch.zeros(p, 1)).type_as(data)
    sigma = Variable(torch.ones(p, 1)).type_as(data)
    bias_mu = Variable(torch.zeros(1)).type_as(data)
    bias_sigma = Variable(torch.ones(1)).type_as(data)
    w_prior, b_prior = Normal(mu, sigma), Normal(bias_mu, bias_sigma)
    priors = {'linear.weight': w_prior, 'linear.bias': b_prior}
    # lift module parameters to random variables sampled from the priors
    lifted_module = pyro.random_module("module", regression_model, priors)
    # sample a regressor (which also samples w and b)
    lifted_reg_model = lifted_module()

    with pyro.iarange("map", N, subsample=data):
        x_data = data[:, :-1]
        y_data = data[:, -1]
        # run the regressor forward conditioned on inputs
        prediction_mean = lifted_reg_model(x_data).squeeze()
        pyro.observe("obs", Normal(prediction_mean, Variable(torch.ones(data.size(0))).type_as(data)), y_data.squeeze())
项目:pyro    作者:uber    | 项目源码 | 文件源码
def __init__(self, z_dim, transition_dim):
        super(GatedTransition, self).__init__()
        # initialize the six linear transformations used in the neural network
        self.lin_gate_z_to_hidden = nn.Linear(z_dim, transition_dim)
        self.lin_gate_hidden_to_z = nn.Linear(transition_dim, z_dim)
        self.lin_proposed_mean_z_to_hidden = nn.Linear(z_dim, transition_dim)
        self.lin_proposed_mean_hidden_to_z = nn.Linear(transition_dim, z_dim)
        self.lin_sig = nn.Linear(z_dim, z_dim)
        self.lin_z_to_mu = nn.Linear(z_dim, z_dim)
        # modify the default initialization of lin_z_to_mu
        # so that it's starts out as the identity function
        self.lin_z_to_mu.weight.data = torch.eye(z_dim)
        self.lin_z_to_mu.bias.data = torch.zeros(z_dim)
        # initialize the three non-linearities used in the neural network
        self.relu = nn.ReLU()
        self.sigmoid = nn.Sigmoid()
        self.softplus = nn.Softplus()
项目:pyro    作者:uber    | 项目源码 | 文件源码
def setUp(self):

        pyro.clear_param_store()

        def model():
            mu = pyro.sample("mu", Normal(Variable(torch.zeros(1)),
                                          Variable(torch.ones(1))))
            xd = Normal(mu, Variable(torch.ones(1)), batch_size=50)
            pyro.observe("xs", xd, self.data)
            return mu

        def guide():
            return pyro.sample("mu", Normal(Variable(torch.zeros(1)),
                                            Variable(torch.ones(1))))

        # data
        self.data = Variable(torch.zeros(50, 1))
        self.mu_mean = Variable(torch.zeros(1))
        self.mu_stddev = torch.sqrt(Variable(torch.ones(1)) / 51.0)

        # model and guide
        self.model = model
        self.guide = guide
项目:pyro    作者:uber    | 项目源码 | 文件源码
def finite_difference(eval_loss, delta=0.1):
    """
    Computes finite-difference approximation of all parameters.
    """
    params = pyro.get_param_store().get_all_param_names()
    assert params, "no params found"
    grads = {name: Variable(torch.zeros(pyro.param(name).size())) for name in params}
    for name in sorted(params):
        value = pyro.param(name).data
        for index in itertools.product(*map(range, value.size())):
            center = value[index]
            value[index] = center + delta
            pos = eval_loss()
            value[index] = center - delta
            neg = eval_loss()
            value[index] = center
            grads[name][index] = (pos - neg) / (2 * delta)
    return grads
项目:pyro    作者:uber    | 项目源码 | 文件源码
def _test_jacobian(self, input_dim, hidden_dim, multiplier):
        jacobian = torch.zeros(input_dim, input_dim)
        arn = AutoRegressiveNN(input_dim, hidden_dim, multiplier)

        def nonzero(x):
            return torch.sign(torch.abs(x))

        for output_index in range(multiplier):
            for j in range(input_dim):
                for k in range(input_dim):
                    x = Variable(torch.randn(1, input_dim))
                    epsilon_vector = torch.zeros(1, input_dim)
                    epsilon_vector[0, j] = self.epsilon
                    delta = (arn(x + Variable(epsilon_vector)) - arn(x)) / self.epsilon
                    jacobian[j, k] = float(delta[0, k + output_index * input_dim].data.cpu().numpy()[0])

            permutation = arn.get_permutation()
            permuted_jacobian = jacobian.clone()
            for j in range(input_dim):
                for k in range(input_dim):
                    permuted_jacobian[j, k] = jacobian[permutation[j], permutation[k]]

            lower_sum = torch.sum(torch.tril(nonzero(permuted_jacobian), diagonal=0))
            self.assertTrue(lower_sum == float(0.0))
项目:pyro    作者:uber    | 项目源码 | 文件源码
def setUp(self):

        # Simple model with 1 continuous + 1 discrete + 1 continuous variable.
        def model():
            p = Variable(torch.Tensor([0.5]))
            mu = Variable(torch.zeros(1))
            sigma = Variable(torch.ones(1))

            x = pyro.sample("x", Normal(mu, sigma))  # Before the discrete variable.
            y = pyro.sample("y", Bernoulli(p))
            z = pyro.sample("z", Normal(mu, sigma))  # After the discrete variable.
            return dict(x=x, y=y, z=z)

        self.sites = ["x", "y", "z", "_INPUT", "_RETURN"]
        self.model = model
        self.queue = Queue()
        self.queue.put(poutine.Trace())
项目:pytorch-smoothgrad    作者:pkdn    | 项目源码 | 文件源码
def __call__(self, x, index=None):
        output = self.pretrained_model(x)

        if index is None:
            index = np.argmax(output.data.cpu().numpy())

        one_hot = np.zeros((1, output.size()[-1]), dtype=np.float32)
        one_hot[0][index] = 1
        if self.cuda:
            one_hot = Variable(torch.from_numpy(one_hot).cuda(), requires_grad=True)
        else:
            one_hot = Variable(torch.from_numpy(one_hot), requires_grad=True)
        one_hot = torch.sum(one_hot * output)

        one_hot.backward(retain_variables=True)

        grad = x.grad.data.cpu().numpy()
        grad = grad[0, :, :, :]

        return grad
项目:Dynamic-memory-networks-plus-Pytorch    作者:dandelin    | 项目源码 | 文件源码
def forward(self, facts, G):
        '''
        facts.size() -> (#batch, #sentence, #hidden = #embedding)
        fact.size() -> (#batch, #hidden = #embedding)
        G.size() -> (#batch, #sentence)
        g.size() -> (#batch, )
        C.size() -> (#batch, #hidden)
        '''
        batch_num, sen_num, embedding_size = facts.size()
        C = Variable(torch.zeros(self.hidden_size)).cuda()
        for sid in range(sen_num):
            fact = facts[:, sid, :]
            g = G[:, sid]
            if sid == 0:
                C = C.unsqueeze(0).expand_as(fact)
            C = self.AGRUCell(fact, C, g)
        return C
项目:Dynamic-memory-networks-plus-Pytorch    作者:dandelin    | 项目源码 | 文件源码
def forward(self, contexts, word_embedding):
        '''
        contexts.size() -> (#batch, #sentence, #token)
        word_embedding() -> (#batch, #sentence x #token, #embedding)
        position_encoding() -> (#batch, #sentence, #embedding)
        facts.size() -> (#batch, #sentence, #hidden = #embedding)
        '''
        batch_num, sen_num, token_num = contexts.size()

        contexts = contexts.view(batch_num, -1)
        contexts = word_embedding(contexts)

        contexts = contexts.view(batch_num, sen_num, token_num, -1)
        contexts = position_encoding(contexts)
        contexts = self.dropout(contexts)

        h0 = Variable(torch.zeros(2, batch_num, self.hidden_size).cuda())
        facts, hdn = self.gru(contexts, h0)
        facts = facts[:, :, :hidden_size] + facts[:, :, hidden_size:]
        return facts
项目:pytorch-caffe-darknet-convert    作者:marvis    | 项目源码 | 文件源码
def nms(boxes, nms_thresh):
    if len(boxes) == 0:
        return boxes

    det_confs = torch.zeros(len(boxes))
    for i in range(len(boxes)):
        det_confs[i] = 1-boxes[i][4]                

    _,sortIds = torch.sort(det_confs)
    out_boxes = []
    for i in range(len(boxes)):
        box_i = boxes[sortIds[i]]
        if box_i[4] > 0:
            out_boxes.append(box_i)
            for j in range(i+1, len(boxes)):
                box_j = boxes[sortIds[j]]
                if bbox_iou(box_i, box_j, x1y1x2y2=False) > nms_thresh:
                    #print(box_i, box_j, bbox_iou(box_i, box_j, x1y1x2y2=False))
                    box_j[4] = 0
    return out_boxes
项目:faster_rcnn_pytorch    作者:longcw    | 项目源码 | 文件源码
def forward(self, features, rois):
        batch_size, num_channels, data_height, data_width = features.size()
        num_rois = rois.size()[0]
        output = torch.zeros(num_rois, num_channels, self.pooled_height, self.pooled_width)
        argmax = torch.IntTensor(num_rois, num_channels, self.pooled_height, self.pooled_width).zero_()

        if not features.is_cuda:
            _features = features.permute(0, 2, 3, 1)
            roi_pooling.roi_pooling_forward(self.pooled_height, self.pooled_width, self.spatial_scale,
                                            _features, rois, output)
            # output = output.cuda()
        else:
            output = output.cuda()
            argmax = argmax.cuda()
            roi_pooling.roi_pooling_forward_cuda(self.pooled_height, self.pooled_width, self.spatial_scale,
                                                 features, rois, output, argmax)
            self.output = output
            self.argmax = argmax
            self.rois = rois
            self.feature_size = features.size()

        return output
项目:treehopper    作者:tomekkorbak    | 项目源码 | 文件源码
def test(self, dataset):
        self.model.eval()
        self.embedding_model.eval()
        loss = 0
        accuracies = torch.zeros(len(dataset))

        output_trees = []
        outputs = []
        for idx in tqdm(range(len(dataset)), desc='Testing epoch  '+str(self.epoch)+''):
            tree, sent, label = dataset[idx]
            input = Var(sent, volatile=True)
            target = Var(torch.LongTensor([int(label)]), volatile=True)
            if self.args.cuda:
                input = input.cuda()
                target = target.cuda()
            emb = F.torch.unsqueeze(self.embedding_model(input),1)
            output, _, acc, tree = self.model(tree, emb)
            err = self.criterion(output, target)
            loss += err.data[0]
            accuracies[idx] = acc
            output_trees.append(tree)
            outputs.append(tree.output_softmax.data.numpy())
            # predictions[idx] = torch.dot(indices,torch.exp(output.data.cpu()))
        return loss/len(dataset), accuracies, outputs, output_trees
项目:treehopper    作者:tomekkorbak    | 项目源码 | 文件源码
def load_word_vectors(embeddings_path):
    if os.path.isfile(embeddings_path + '.pth') and \
            os.path.isfile(embeddings_path + '.vocab'):
        print('==> File found, loading to memory')
        vectors = torch.load(embeddings_path + '.pth')
        vocab = Vocab(filename=embeddings_path + '.vocab')
        return vocab, vectors
    if os.path.isfile(embeddings_path + '.model'):
        model = KeyedVectors.load(embeddings_path + ".model")
    if os.path.isfile(embeddings_path + '.vec'):
        model = FastText.load_word2vec_format(embeddings_path + '.vec')
    list_of_tokens = model.vocab.keys()
    vectors = torch.zeros(len(list_of_tokens), model.vector_size)
    with open(embeddings_path + '.vocab', 'w', encoding='utf-8') as f:
        for token in list_of_tokens:
            f.write(token+'\n')
    vocab = Vocab(filename=embeddings_path + '.vocab')
    for index, word in enumerate(list_of_tokens):
        vectors[index, :] = torch.from_numpy(model[word])
    return vocab, vectors
项目:FewShotLearning    作者:gitabcworld    | 项目源码 | 文件源码
def forward(self, x, prevState = None ):

        # dimensions
        if len(x.size()) == 2: x = x.unsqueeze(0)
        batch = x.size(0)
        steps = x.size(1)

        if prevState == None: prevState = {}
        hs = {}
        cs = {}
        for t in range(steps):
            # xt
            xt = x[:,t,:]
            # prev h and pre c
            hp = hs[t-1] or prevState.h or torch.zeros()
        a = 0
项目:FewShotLearning    作者:gitabcworld    | 项目源码 | 文件源码
def preProc2(x):
    # Access the global variables
    global P, expP, negExpP
    P = P.type_as(x)
    expP = expP.type_as(x)
    negExpP = negExpP.type_as(x)

    # Create a variable filled with -1. Second part of the condition
    z = Variable(torch.zeros(x.size())).type_as(x)
    absX = torch.abs(x)
    cond1 = torch.gt(absX, negExpP)
    cond2 = torch.le(absX, negExpP)
    if (torch.sum(cond1) > 0).data.all():
        x1 = torch.sign(x[cond1])
        z[cond1] = x1
    if (torch.sum(cond2) > 0).data.all():
        x2 = x[cond2]*expP
        z[cond2] = x2
    return z
项目:FewShotLearning    作者:gitabcworld    | 项目源码 | 文件源码
def __init__(self, num_features, max_length, eps=1e-5, momentum=0.1,
                 affine=True):
        """
        Most parts are copied from
        torch.nn.modules.batchnorm._BatchNorm.
        """

        super(SeparatedBatchNorm1d, self).__init__()
        self.num_features = num_features
        self.max_length = max_length
        self.affine = affine
        self.eps = eps
        self.momentum = momentum
        if self.affine:
            self.weight = nn.Parameter(torch.FloatTensor(num_features))
            self.bias = nn.Parameter(torch.FloatTensor(num_features))
        else:
            self.register_parameter('weight', None)
            self.register_parameter('bias', None)
        for i in range(max_length):
            self.register_buffer(
                'running_mean_{}'.format(i), torch.zeros(num_features))
            self.register_buffer(
                'running_var_{}'.format(i), torch.ones(num_features))
        self.reset_parameters()
项目:drl.pth    作者:seba-1511    | 项目源码 | 文件源码
def sample_lstm_state(args):
    if 'layer_sizes' in vars(args):
        hx = V(th.zeros(1, args.layer_sizes))
        cx = V(th.zeros(1, args.layer_sizes))
        return hx, cx
    else:
        return None
项目:drl.pth    作者:seba-1511    | 项目源码 | 文件源码
def __init__(self, model, action_size=1, init_value=0.0, *args, **kwargs):
        super(DiagonalGaussianPolicy, self).__init__(model, *args, **kwargs)
        self.init_value = init_value
        self.logstd = th.zeros((1, action_size)) + self.init_value
        self.logstd = P(self.logstd)
        self.halflog2pie = V(T([2 * pi * exp(1)])) * 0.5
        self.halflog2pi = V(T([2.0 * pi])) * 0.5
        self.pi = V(T([pi]))
项目:drl.pth    作者:seba-1511    | 项目源码 | 文件源码
def __init__(self, value=0.0):
        super(ConstantCritic, self).__init__()
        self.value = V(th.zeros(1, 1) + value)
项目:YellowFin_Pytorch    作者:JianGoForIt    | 项目源码 | 文件源码
def torch_list_grad_norm(param_list):
    squared_sum = Variable(torch.zeros(1))
    for param in param_list:
        squared_sum += param.grad.norm()**2
    return squared_sum.sqrt()

# Use the nn package to define our model and loss function.
项目:YellowFin_Pytorch    作者:JianGoForIt    | 项目源码 | 文件源码
def init_hidden(self):
        if self.bidirectional == True:
            if self.use_lstm == True:
                return [Variable(torch.zeros(2, self.batch_size, self.word_gru_hidden)), Variable(torch.zeros(2, self.batch_size, self.word_gru_hidden)) ] 
            else:
                return Variable(torch.zeros(2, self.batch_size, self.word_gru_hidden))
        else:
            if self.use_lstm == True:
                return [Variable(torch.zeros(1, self.batch_size, self.word_gru_hidden)), Variable(torch.zeros(1, self.batch_size, self.word_gru_hidden)) ]
            else:
                return Variable(torch.zeros(1, self.batch_size, self.word_gru_hidden))