Python torch 模块,ByteTensor() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用torch.ByteTensor()

项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def test_streams(self):
        default_stream = torch.cuda.current_stream()
        user_stream = torch.cuda.Stream()
        self.assertEqual(torch.cuda.current_stream(), default_stream)
        self.assertNotEqual(default_stream, user_stream)
        self.assertEqual(default_stream.cuda_stream, 0)
        self.assertNotEqual(user_stream.cuda_stream, 0)
        with torch.cuda.stream(user_stream):
            self.assertEqual(torch.cuda.current_stream(), user_stream)
        self.assertTrue(user_stream.query())
        # copy 10 MB tensor from CPU-GPU which should take some time
        tensor1 = torch.ByteTensor(10000000).pin_memory()
        tensor2 = tensor1.cuda(async=True)
        self.assertFalse(default_stream.query())
        default_stream.synchronize()
        self.assertTrue(default_stream.query())
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def test_MaskedSelect(self):
        input = torch.randn(4, 5)
        mask = torch.ByteTensor(4, 5).bernoulli_()
        module = nn.MaskedSelect()
        out = module.forward([input, mask])
        self.assertEqual(input.masked_select(mask), out)

        gradOut = torch.Tensor((20, 80))
        input = torch.Tensor(((10, 20), (30, 40)))
        inTarget = torch.Tensor(((20, 0), (0, 80)))
        mask = torch.ByteTensor(((1, 0), (0, 1)))
        module = nn.MaskedSelect()
        module.forward([input, mask])
        gradIn = module.backward([input, mask], gradOut)
        self.assertEqual(inTarget, gradIn[0])

        # Check that these don't raise errors
        module.__repr__()
        str(module)
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def test_masked_copy(self):
        num_copy, num_dest = 3, 10
        dest = torch.randn(num_dest)
        src = torch.randn(num_copy)
        mask = torch.ByteTensor((0, 0, 0, 0, 1, 0, 1, 0, 1, 0))
        dest2 = dest.clone()
        dest.masked_copy_(mask, src)
        j = 0
        for i in range(num_dest):
            if mask[i]:
                dest2[i] = src[j]
                j += 1
        self.assertEqual(dest, dest2, 0)

        # make source bigger than number of 1s in mask
        src = torch.randn(num_dest)
        dest.masked_copy_(mask, src)

        # make src smaller. this should fail
        src = torch.randn(num_copy - 1)
        with self.assertRaises(RuntimeError):
            dest.masked_copy_(mask, src)
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_type_conversions(self):
        x = Variable(torch.randn(5, 5))
        self.assertIs(type(x.float().data), torch.FloatTensor)
        self.assertIs(type(x.int().data), torch.IntTensor)
        if torch.cuda.is_available():
            self.assertIs(type(x.float().cuda().data), torch.cuda.FloatTensor)
            self.assertIs(type(x.int().cuda().data), torch.cuda.IntTensor)
            self.assertIs(type(x.int().cuda().cpu().data), torch.IntTensor)
            if torch.cuda.device_count() > 2:
                x2 = x.float().cuda(1)
                self.assertIs(type(x2.data), torch.cuda.FloatTensor)
                self.assertIs(x2.get_device(), 1)
                x2 = x.float().cuda()
                self.assertIs(type(x2.data), torch.cuda.FloatTensor)
                self.assertIs(x2.get_device(), 0)
                x2 = x2.cuda(1)
                self.assertIs(type(x2.data), torch.cuda.FloatTensor)
                self.assertIs(x2.get_device(), 1)

        for t in [torch.DoubleTensor, torch.FloatTensor, torch.IntTensor, torch.ByteTensor]:
            y = Variable(torch.randn(5, 5).type(t))
            self.assertIs(type(x.type_as(y).data), t)
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_streams(self):
        default_stream = torch.cuda.current_stream()
        user_stream = torch.cuda.Stream()
        self.assertEqual(torch.cuda.current_stream(), default_stream)
        self.assertNotEqual(default_stream, user_stream)
        self.assertEqual(default_stream.cuda_stream, 0)
        self.assertNotEqual(user_stream.cuda_stream, 0)
        with torch.cuda.stream(user_stream):
            self.assertEqual(torch.cuda.current_stream(), user_stream)
        self.assertTrue(user_stream.query())
        # copy 10 MB tensor from CPU-GPU which should take some time
        tensor1 = torch.ByteTensor(10000000).pin_memory()
        tensor2 = tensor1.cuda(async=True)
        self.assertFalse(default_stream.query())
        default_stream.synchronize()
        self.assertTrue(default_stream.query())
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_MaskedSelect(self):
        input = torch.randn(4, 5)
        mask = torch.ByteTensor(4, 5).bernoulli_()
        module = nn.MaskedSelect()
        out = module.forward([input, mask])
        self.assertEqual(input.masked_select(mask), out)

        gradOut = torch.Tensor((20, 80))
        input = torch.Tensor(((10, 20), (30, 40)))
        inTarget = torch.Tensor(((20, 0), (0, 80)))
        mask = torch.ByteTensor(((1, 0), (0, 1)))
        module = nn.MaskedSelect()
        module.forward([input, mask])
        gradIn = module.backward([input, mask], gradOut)
        self.assertEqual(inTarget, gradIn[0])

        # Check that these don't raise errors
        module.__repr__()
        str(module)
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_masked_copy(self):
        num_copy, num_dest = 3, 10
        dest = torch.randn(num_dest)
        src = torch.randn(num_copy)
        mask = torch.ByteTensor((0, 0, 0, 0, 1, 0, 1, 0, 1, 0))
        dest2 = dest.clone()
        dest.masked_copy_(mask, src)
        j = 0
        for i in range(num_dest):
            if mask[i]:
                dest2[i] = src[j]
                j += 1
        self.assertEqual(dest, dest2, 0)

        # make source bigger than number of 1s in mask
        src = torch.randn(num_dest)
        dest.masked_copy_(mask, src)

        # make src smaller. this should fail
        src = torch.randn(num_copy - 1)
        with self.assertRaises(RuntimeError):
            dest.masked_copy_(mask, src)
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_bernoulli(self):
        t = torch.ByteTensor(10, 10)

        def isBinary(t):
            return torch.ne(t, 0).mul_(torch.ne(t, 1)).sum() == 0

        p = 0.5
        t.bernoulli_(p)
        self.assertTrue(isBinary(t))

        p = torch.rand(SIZE)
        t.bernoulli_(p)
        self.assertTrue(isBinary(t))

        q = torch.rand(5, 5)
        self.assertTrue(isBinary(q.bernoulli()))
项目:rl    作者:Shmuma    | 项目源码 | 文件源码
def calc_loss_dqn(batch, net, tgt_net, gamma, cuda=False):
    states, actions, rewards, dones, next_states = unpack_batch(batch)

    states_v = Variable(torch.from_numpy(states))
    next_states_v = Variable(torch.from_numpy(next_states), volatile=True)
    actions_v = Variable(torch.from_numpy(actions))
    rewards_v = Variable(torch.from_numpy(rewards))
    done_mask = torch.ByteTensor(dones)
    if cuda:
        states_v = states_v.cuda()
        next_states_v = next_states_v.cuda()
        actions_v = actions_v.cuda()
        rewards_v = rewards_v.cuda()
        done_mask = done_mask.cuda()

    state_action_values = net(states_v).gather(1, actions_v.unsqueeze(-1)).squeeze(-1)
    next_state_values = tgt_net(next_states_v).max(1)[0]
    next_state_values[done_mask] = 0.0
    next_state_values.volatile = False

    expected_state_action_values = next_state_values * gamma + rewards_v
    return nn.MSELoss()(state_action_values, expected_state_action_values)
项目:Seq2Seq-PyTorch    作者:MaximumEntropy    | 项目源码 | 文件源码
def forward(self, input, hidden, ctx, ctx_mask=None):
        """Propogate input through the layer."""
        h_0, c_0 = hidden
        h_1, c_1 = [], []
        for i, layer in enumerate(self.layers):
            if ctx_mask is not None:
                ctx_mask = torch.ByteTensor(
                    ctx_mask.data.cpu().numpy().astype(np.int32).tolist()
                ).cuda()
            output, (h_1_i, c_1_i) = layer(input, (h_0, c_0), ctx, ctx_mask)

            input = output

            if i != len(self.layers):
                input = self.dropout(input)

            h_1 += [h_1_i]
            c_1 += [c_1_i]

        h_1 = torch.stack(h_1)
        c_1 = torch.stack(c_1)

        return input, (h_1, c_1)
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def test_type_conversions(self):
        x = Variable(torch.randn(5, 5))
        self.assertIs(type(x.float().data), torch.FloatTensor)
        self.assertIs(type(x.int().data), torch.IntTensor)
        if torch.cuda.is_available():
            self.assertIs(type(x.float().cuda().data), torch.cuda.FloatTensor)
            self.assertIs(type(x.int().cuda().data), torch.cuda.IntTensor)
            self.assertIs(type(x.int().cuda().cpu().data), torch.IntTensor)
            if torch.cuda.device_count() > 2:
                x2 = x.float().cuda(1)
                self.assertIs(type(x2.data), torch.cuda.FloatTensor)
                self.assertIs(x2.get_device(), 1)
                x2 = x.float().cuda()
                self.assertIs(type(x2.data), torch.cuda.FloatTensor)
                self.assertIs(x2.get_device(), 0)
                x2 = x2.cuda(1)
                self.assertIs(type(x2.data), torch.cuda.FloatTensor)
                self.assertIs(x2.get_device(), 1)

        for t in [torch.DoubleTensor, torch.FloatTensor, torch.IntTensor, torch.ByteTensor]:
            for var in (True, False):
                y = torch.randn(5, 5).type(t)
                if var:
                    y = Variable(y)
                self.assertIs(type(x.type_as(y).data), t)
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def test_streams(self):
        default_stream = torch.cuda.current_stream()
        user_stream = torch.cuda.Stream()
        self.assertEqual(torch.cuda.current_stream(), default_stream)
        self.assertNotEqual(default_stream, user_stream)
        self.assertEqual(default_stream.cuda_stream, 0)
        self.assertNotEqual(user_stream.cuda_stream, 0)
        with torch.cuda.stream(user_stream):
            self.assertEqual(torch.cuda.current_stream(), user_stream)
        self.assertTrue(user_stream.query())
        # copy 10 MB tensor from CPU-GPU which should take some time
        tensor1 = torch.ByteTensor(10000000).pin_memory()
        tensor2 = tensor1.cuda(async=True)
        self.assertFalse(default_stream.query())
        default_stream.synchronize()
        self.assertTrue(default_stream.query())
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def test_MaskedSelect(self):
        input = torch.randn(4, 5)
        mask = torch.ByteTensor(4, 5).bernoulli_()
        module = nn.MaskedSelect()
        out = module.forward([input, mask])
        self.assertEqual(input.masked_select(mask), out)

        gradOut = torch.Tensor((20, 80))
        input = torch.Tensor(((10, 20), (30, 40)))
        inTarget = torch.Tensor(((20, 0), (0, 80)))
        mask = torch.ByteTensor(((1, 0), (0, 1)))
        module = nn.MaskedSelect()
        module.forward([input, mask])
        gradIn = module.backward([input, mask], gradOut)
        self.assertEqual(inTarget, gradIn[0])

        # Check that these don't raise errors
        module.__repr__()
        str(module)
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def test_masked_copy(self):
        num_copy, num_dest = 3, 10
        dest = torch.randn(num_dest)
        src = torch.randn(num_copy)
        mask = torch.ByteTensor((0, 0, 0, 0, 1, 0, 1, 0, 1, 0))
        dest2 = dest.clone()
        dest.masked_copy_(mask, src)
        j = 0
        for i in range(num_dest):
            if mask[i]:
                dest2[i] = src[j]
                j += 1
        self.assertEqual(dest, dest2, 0)

        # make source bigger than number of 1s in mask
        src = torch.randn(num_dest)
        dest.masked_copy_(mask, src)

        # make src smaller. this should fail
        src = torch.randn(num_copy - 1)
        with self.assertRaises(RuntimeError):
            dest.masked_copy_(mask, src)
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def test_bernoulli(self):
        t = torch.ByteTensor(10, 10)

        def isBinary(t):
            return torch.ne(t, 0).mul_(torch.ne(t, 1)).sum() == 0

        p = 0.5
        t.bernoulli_(p)
        self.assertTrue(isBinary(t))

        p = torch.rand(SIZE)
        t.bernoulli_(p)
        self.assertTrue(isBinary(t))

        q = torch.rand(5, 5)
        self.assertTrue(isBinary(q.bernoulli()))
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_streams(self):
        default_stream = torch.cuda.current_stream()
        user_stream = torch.cuda.Stream()
        self.assertEqual(torch.cuda.current_stream(), default_stream)
        self.assertNotEqual(default_stream, user_stream)
        self.assertEqual(default_stream.cuda_stream, 0)
        self.assertNotEqual(user_stream.cuda_stream, 0)
        with torch.cuda.stream(user_stream):
            self.assertEqual(torch.cuda.current_stream(), user_stream)
        self.assertTrue(user_stream.query())
        # copy 10 MB tensor from CPU-GPU which should take some time
        tensor1 = torch.ByteTensor(10000000).pin_memory()
        tensor2 = tensor1.cuda(async=True)
        self.assertFalse(default_stream.query())
        default_stream.synchronize()
        self.assertTrue(default_stream.query())
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_MaskedSelect(self):
        input = torch.randn(4, 5)
        mask = torch.ByteTensor(4, 5).bernoulli_()
        module = nn.MaskedSelect()
        out = module.forward([input, mask])
        self.assertEqual(input.masked_select(mask), out)

        gradOut = torch.Tensor((20, 80))
        input = torch.Tensor(((10, 20), (30, 40)))
        inTarget = torch.Tensor(((20, 0), (0, 80)))
        mask = torch.ByteTensor(((1, 0), (0, 1)))
        module = nn.MaskedSelect()
        module.forward([input, mask])
        gradIn = module.backward([input, mask], gradOut)
        self.assertEqual(inTarget, gradIn[0])

        # Check that these don't raise errors
        module.__repr__()
        str(module)
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_masked_scatter(self):
        num_copy, num_dest = 3, 10
        dest = torch.randn(num_dest)
        src = torch.randn(num_copy)
        mask = torch.ByteTensor((0, 0, 0, 0, 1, 0, 1, 0, 1, 0))
        dest2 = dest.clone()
        dest.masked_scatter_(mask, src)
        j = 0
        for i in range(num_dest):
            if mask[i]:
                dest2[i] = src[j]
                j += 1
        self.assertEqual(dest, dest2, 0)

        # make source bigger than number of 1s in mask
        src = torch.randn(num_dest)
        dest.masked_scatter_(mask, src)

        # make src smaller. this should fail
        src = torch.randn(num_copy - 1)
        with self.assertRaises(RuntimeError):
            dest.masked_scatter_(mask, src)
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_bernoulli(self):
        t = torch.ByteTensor(10, 10)

        def isBinary(t):
            return torch.ne(t, 0).mul_(torch.ne(t, 1)).sum() == 0

        p = 0.5
        t.bernoulli_(p)
        self.assertTrue(isBinary(t))

        p = torch.rand(SIZE)
        t.bernoulli_(p)
        self.assertTrue(isBinary(t))

        q = torch.rand(5, 5)
        self.assertTrue(isBinary(q.bernoulli()))
项目:pix2pix.pytorch    作者:taey16    | 项目源码 | 文件源码
def __call__(self, picA, picB):
    pics = [picA, picB]
    output = []
    for pic in pics: 
      if isinstance(pic, np.ndarray):
        # handle numpy array
        img = torch.from_numpy(pic.transpose((2, 0, 1)))
      else:
        # handle PIL Image
        img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
        # PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK
        if pic.mode == 'YCbCr':
          nchannel = 3
        else:
          nchannel = len(pic.mode)
        img = img.view(pic.size[1], pic.size[0], nchannel)
        # put it from HWC to CHW format
        # yikes, this transpose takes 80% of the loading time/CPU
        img = img.transpose(0, 1).transpose(0, 2).contiguous()
        img = img.float().div(255.)
      output.append(img)
    return output[0], output[1]
项目:pytorch-planet-amazon    作者:rwightman    | 项目源码 | 文件源码
def __call__(self, img):
        assert isinstance(img, np.ndarray)
        # handle numpy array
        if img.dtype == np.uint16:
            img = img.astype(np.int32)
            div = 2**16
        elif img.dtype == np.uint32:
            img = img.astype(np.int32)
            div = 2**32
        elif img.dtype == np.int32:
            div = 2**32
        else:
            div = 1.
        img = torch.from_numpy(img.transpose((2, 0, 1)))
        if isinstance(img, torch.ByteTensor):
            return img.float().div(255)
        elif isinstance(img, torch.IntTensor):
            return img.float().div(div)
        else:
            return img
项目:seqmod    作者:emanjavacas    | 项目源码 | 文件源码
def forward(self, dec_out, enc_outs, enc_att=None, mask=None):
        """
        Parameters:
        -----------

        - dec_out: torch.Tensor(batch_size x hid_dim)
        - enc_outs: torch.Tensor(seq_len x batch_size x hid_dim)
        - enc_att: (optional), torch.Tensor(seq_len x batch_size x att_dim)
        - mask: (optional), torch.ByteTensor(batch_size x seq_len)
        """
        # (batch x seq_len)
        weights = self.scorer(dec_out, enc_outs, enc_att=enc_att)

        if mask is not None:
            # weights = weights * mask.float()
            weights.data.masked_fill_(1 - mask.data, -float('inf'))

        weights = F.softmax(weights, dim=1)

        # (eq 7)
        context = weights.unsqueeze(1).bmm(enc_outs.transpose(0, 1)).squeeze(1)
        # (eq 5) linear out combining context and hidden
        context = F.tanh(self.linear_out(torch.cat([context, dec_out], 1)))

        return context, weights
项目:seqmod    作者:emanjavacas    | 项目源码 | 文件源码
def make_length_mask(lengths):
    """
    Compute binary length mask.

    lengths: Variable torch.LongTensor(batch) should be on the desired
        output device.

    Returns:
    --------

    mask: torch.ByteTensor(batch x seq_len)
    """
    maxlen, batch = lengths.data.max(), len(lengths)
    mask = torch.arange(0, maxlen, out=lengths.data.new()) \
                .repeat(batch, 1) \
                .lt(lengths.data.unsqueeze(1))
    return Variable(mask, volatile=lengths.volatile)
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_streams(self):
        default_stream = torch.cuda.current_stream()
        user_stream = torch.cuda.Stream()
        self.assertEqual(torch.cuda.current_stream(), default_stream)
        self.assertNotEqual(default_stream, user_stream)
        self.assertEqual(default_stream.cuda_stream, 0)
        self.assertNotEqual(user_stream.cuda_stream, 0)
        with torch.cuda.stream(user_stream):
            self.assertEqual(torch.cuda.current_stream(), user_stream)
        self.assertTrue(user_stream.query())
        # copy 10 MB tensor from CPU-GPU which should take some time
        tensor1 = torch.ByteTensor(10000000).pin_memory()
        tensor2 = tensor1.cuda(async=True)
        self.assertFalse(default_stream.query())
        default_stream.synchronize()
        self.assertTrue(default_stream.query())
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_MaskedSelect(self):
        input = torch.randn(4, 5)
        mask = torch.ByteTensor(4, 5).bernoulli_()
        module = nn.MaskedSelect()
        out = module.forward([input, mask])
        self.assertEqual(input.masked_select(mask), out)

        gradOut = torch.Tensor((20, 80))
        input = torch.Tensor(((10, 20), (30, 40)))
        inTarget = torch.Tensor(((20, 0), (0, 80)))
        mask = torch.ByteTensor(((1, 0), (0, 1)))
        module = nn.MaskedSelect()
        module.forward([input, mask])
        gradIn = module.backward([input, mask], gradOut)
        self.assertEqual(inTarget, gradIn[0])

        # Check that these don't raise errors
        module.__repr__()
        str(module)
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def _test_neg(self, cast):
        float_types = ['torch.DoubleTensor', 'torch.FloatTensor', 'torch.LongTensor']
        int_types = ['torch.IntTensor', 'torch.ShortTensor', 'torch.ByteTensor',
                     'torch.CharTensor']

        for t in float_types + int_types:
            if t in float_types:
                a = cast(torch.randn(100, 90).type(t))
            else:
                a = cast(torch.Tensor(100, 90).type(t).random_())
            zeros = cast(torch.Tensor().type(t)).resize_as_(a).zero_()

            res_add = torch.add(zeros, -1, a)
            res_neg = a.clone()
            res_neg.neg_()
            self.assertEqual(res_neg, res_add)

            # test out of place as well
            res_neg_out_place = a.clone().neg()
            self.assertEqual(res_neg_out_place, res_add)

            # test via __neg__ operator
            res_neg_op = -a.clone()
            self.assertEqual(res_neg_op, res_add)
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_masked_scatter(self):
        num_copy, num_dest = 3, 10
        dest = torch.randn(num_dest)
        src = torch.randn(num_copy)
        mask = torch.ByteTensor((0, 0, 0, 0, 1, 0, 1, 0, 1, 0))
        dest2 = dest.clone()
        dest.masked_scatter_(mask, src)
        j = 0
        for i in range(num_dest):
            if mask[i]:
                dest2[i] = src[j]
                j += 1
        self.assertEqual(dest, dest2, 0)

        # make source bigger than number of 1s in mask
        src = torch.randn(num_dest)
        dest.masked_scatter_(mask, src)

        # make src smaller. this should fail
        src = torch.randn(num_copy - 1)
        with self.assertRaises(RuntimeError):
            dest.masked_scatter_(mask, src)
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_bernoulli(self):
        t = torch.ByteTensor(10, 10)

        def isBinary(t):
            return torch.ne(t, 0).mul_(torch.ne(t, 1)).sum() == 0

        p = 0.5
        t.bernoulli_(p)
        self.assertTrue(isBinary(t))

        p = torch.rand(10, 10)
        t.bernoulli_(p)
        self.assertTrue(isBinary(t))

        q = torch.rand(5, 5)
        self.assertTrue(isBinary(q.bernoulli()))
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_bernoulli_variable(self):
        # TODO: remove once we merge Variable and Tensor
        t = torch.autograd.Variable(torch.ByteTensor(10, 10))

        def isBinary(t):
            return torch.ne(t, 0).mul_(torch.ne(t, 1)).sum() == 0

        p = 0.5
        t.bernoulli_(p)
        self.assertTrue(isBinary(t))

        p = torch.autograd.Variable(torch.rand(10))
        t.bernoulli_(p)
        self.assertTrue(isBinary(t))

        q = torch.rand(5, 5)
        self.assertTrue(isBinary(q.bernoulli()))
项目:SeqGAN-PyTorch    作者:ZiJianZhao    | 项目源码 | 文件源码
def forward(self, prob, target):
        """
        Args:
            prob: (N, C) 
            target : (N, )
        """
        N = target.size(0)
        C = prob.size(1)
        weight = Variable(self.weight).view((1, -1))
        weight = weight.expand(N, C)  # (N, C)
        if prob.is_cuda:
            weight = weight.cuda()
        prob = weight * prob

        one_hot = torch.zeros((N, C))
        if prob.is_cuda:
            one_hot = one_hot.cuda()
        one_hot.scatter_(1, target.data.view((-1,1)), 1)
        one_hot = one_hot.type(torch.ByteTensor)
        one_hot = Variable(one_hot)
        if prob.is_cuda:
            one_hot = one_hot.cuda()
        loss = torch.masked_select(prob, one_hot)
        return -torch.sum(loss)
项目:SeqGAN-PyTorch    作者:ZiJianZhao    | 项目源码 | 文件源码
def forward(self, prob, target, reward):
        """
        Args:
            prob: (N, C), torch Variable 
            target : (N, ), torch Variable
            reward : (N, ), torch Variable
        """
        N = target.size(0)
        C = prob.size(1)
        one_hot = torch.zeros((N, C))
        if prob.is_cuda:
            one_hot = one_hot.cuda()
        one_hot.scatter_(1, target.data.view((-1,1)), 1)
        one_hot = one_hot.type(torch.ByteTensor)
        one_hot = Variable(one_hot)
        if prob.is_cuda:
            one_hot = one_hot.cuda()
        loss = torch.masked_select(prob, one_hot)
        loss = loss * reward
        loss =  -torch.sum(loss)
        return loss
项目:triplet-network-pytorch    作者:andreasveit    | 项目源码 | 文件源码
def read_image_file(path):
    with open(path, 'rb') as f:
        data = f.read()
        assert get_int(data[:4]) == 2051
        length = get_int(data[4:8])
        num_rows = get_int(data[8:12])
        num_cols = get_int(data[12:16])
        images = []
        idx = 16
        for l in range(length):
            img = []
            images.append(img)
            for r in range(num_rows):
                row = []
                img.append(row)
                for c in range(num_cols):
                    row.append(parse_byte(data[idx]))
                    idx += 1
        assert len(images) == length
        return torch.ByteTensor(images).view(-1, 28, 28)
项目:OpenNMT-py    作者:OpenNMT    | 项目源码 | 文件源码
def test_masked_global_attention(self):
        source_lengths = torch.IntTensor([7, 3, 5, 2])
        illegal_weights_mask = torch.ByteTensor([
            [0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 1, 1, 1, 1],
            [0, 0, 0, 0, 0, 1, 1],
            [0, 0, 1, 1, 1, 1, 1]])

        batch_size = source_lengths.size(0)
        dim = 20

        context = Variable(torch.randn(batch_size, source_lengths.max(), dim))
        hidden = Variable(torch.randn(batch_size, dim))

        attn = onmt.modules.GlobalAttention(dim)

        _, alignments = attn(hidden, context, context_lengths=source_lengths)
        illegal_weights = alignments.masked_select(illegal_weights_mask)

        self.assertEqual(0.0, illegal_weights.data.sum())
项目:FCN    作者:zengxianyu    | 项目源码 | 文件源码
def __call__(self, gray_image):
        size = gray_image.size()
        color_image = torch.ByteTensor(3, size[1], size[2]).fill_(0)

        for label in range(1, len(self.cmap)):
            mask = gray_image[0] == label

            color_image[0][mask] = self.cmap[label][0]
            color_image[1][mask] = self.cmap[label][1]
            color_image[2][mask] = self.cmap[label][2]

        return color_image
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def _new_idx(self, input):
        if torch.typename(input) == 'torch.cuda.FloatTensor':
            return torch.cuda.ByteTensor()
        else:
            return torch.ByteTensor()
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def type(self, type=None, tensorCache=None):
        if not type:
           return self._type

        self._idx = None
        super(CosineEmbeddingCriterion, self).type(type, tensorCache)
        # comparison operators behave differently from cuda/c implementations
        if type == 'torch.cuda.FloatTensor':
           self._idx = torch.cuda.ByteTensor()
        else:
           self._idx = torch.ByteTensor()

        return self
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def __init__(self):
        super(MaskedSelect, self).__init__()
        self._maskIndices = torch.LongTensor()
        self._maskIndexBuffer = torch.LongTensor()
        self._maskIndexBufferCPU = torch.FloatTensor()
        self._gradBuffer = torch.Tensor()
        self._gradMask = torch.ByteTensor()
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def test_numel(self):
        b = torch.ByteTensor(3, 100, 100)
        self.assertEqual(b.nelement(), 3*100*100)
        self.assertEqual(b.numel(), 3*100*100)
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def test_element_size(self):
        byte   =   torch.ByteStorage().element_size()
        char   =   torch.CharStorage().element_size()
        short  =  torch.ShortStorage().element_size()
        int    =    torch.IntStorage().element_size()
        long   =   torch.LongStorage().element_size()
        float  =  torch.FloatStorage().element_size()
        double = torch.DoubleStorage().element_size()

        self.assertEqual(byte,   torch.ByteTensor().element_size())
        self.assertEqual(char,   torch.CharTensor().element_size())
        self.assertEqual(short,  torch.ShortTensor().element_size())
        self.assertEqual(int,    torch.IntTensor().element_size())
        self.assertEqual(long,   torch.LongTensor().element_size())
        self.assertEqual(float,  torch.FloatTensor().element_size())
        self.assertEqual(double, torch.DoubleTensor().element_size())

        self.assertGreater(byte, 0)
        self.assertGreater(char, 0)
        self.assertGreater(short, 0)
        self.assertGreater(int, 0)
        self.assertGreater(long, 0)
        self.assertGreater(float, 0)
        self.assertGreater(double, 0)

        # These tests are portable, not necessarily strict for your system.
        self.assertEqual(byte, 1)
        self.assertEqual(char, 1)
        self.assertGreaterEqual(short, 2)
        self.assertGreaterEqual(int, 2)
        self.assertGreaterEqual(int, short)
        self.assertGreaterEqual(long, 4)
        self.assertGreaterEqual(long, int)
        self.assertGreaterEqual(double, float)
项目:allennlp    作者:allenai    | 项目源码 | 文件源码
def test_forward_does_correct_computation(self):
        encoder = BagOfEmbeddingsEncoder(embedding_dim=2)
        input_tensor = Variable(
                torch.FloatTensor([[[.7, .8], [.1, 1.5], [.3, .6]], [[.5, .3], [1.4, 1.1], [.3, .9]]]))
        mask = Variable(torch.ByteTensor([[1, 1, 1], [1, 1, 0]]))
        encoder_output = encoder(input_tensor, mask)
        assert_almost_equal(encoder_output.data.numpy(),
                            numpy.asarray([[.7 + .1 + .3, .8 + 1.5 + .6], [.5 + 1.4, .3 + 1.1]]))
项目:allennlp    作者:allenai    | 项目源码 | 文件源码
def test_forward_does_correct_computation_with_average(self):
        encoder = BagOfEmbeddingsEncoder(embedding_dim=2, averaged=True)
        input_tensor = Variable(
                torch.FloatTensor([[[.7, .8], [.1, 1.5], [.3, .6]],
                                   [[.5, .3], [1.4, 1.1], [.3, .9]],
                                   [[.4, .3], [.4, .3], [1.4, 1.7]]]))
        mask = Variable(torch.ByteTensor([[1, 1, 1], [1, 1, 0], [0, 0, 0]]))
        encoder_output = encoder(input_tensor, mask)
        assert_almost_equal(encoder_output.data.numpy(),
                            numpy.asarray([[(.7 + .1 + .3)/3, (.8 + 1.5 + .6)/3],
                                           [(.5 + 1.4)/2, (.3 + 1.1)/2],
                                           [0., 0.]]))
项目:allennlp    作者:allenai    | 项目源码 | 文件源码
def test_get_sequence_lengths_from_binary_mask(self):
        binary_mask = torch.ByteTensor([[1, 1, 1, 0, 0, 0],
                                        [1, 1, 0, 0, 0, 0],
                                        [1, 1, 1, 1, 1, 1],
                                        [1, 0, 0, 0, 0, 0]])
        lengths = util.get_lengths_from_binary_sequence_mask(binary_mask)
        numpy.testing.assert_array_equal(lengths.numpy(), numpy.array([3, 2, 6, 1]))
项目:allennlp    作者:allenai    | 项目源码 | 文件源码
def forward(self,
                inputs: torch.Tensor,
                tags: torch.Tensor,
                mask: torch.ByteTensor = None) -> torch.Tensor:
        """
        Computes the log likelihood.
        """
        # pylint: disable=arguments-differ
        if mask is None:
            mask = torch.autograd.Variable(torch.ones(*tags.size()).long())

        log_denominator = self._input_likelihood(inputs, mask)
        log_numerator = self._joint_likelihood(inputs, tags, mask)

        return torch.sum(log_numerator - log_denominator)
项目:inferno    作者:inferno-pytorch    | 项目源码 | 文件源码
def label_to_long_tensor(pic):
    label = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
    label = label.view(pic.size[1], pic.size[0], 1)
    label = label.transpose(0, 1).transpose(0, 2).squeeze().contiguous().long()
    return label
项目:inferno    作者:inferno-pytorch    | 项目源码 | 文件源码
def where(condition, if_true, if_false):
    """
    Torch equivalent of numpy.where.

    Parameters
    ----------
    condition : torch.ByteTensor or torch.cuda.ByteTensor or torch.autograd.Variable
        Condition to check.
    if_true : torch.Tensor or torch.cuda.Tensor or torch.autograd.Variable
        Output value if condition is true.
    if_false: torch.Tensor or torch.cuda.Tensor or torch.autograd.Variable
        Output value if condition is false

    Returns
    -------
    torch.Tensor

    Raises
    ------
    AssertionError
        if if_true and if_false are not both variables or both tensors.
    AssertionError
        if if_true and if_false don't have the same datatype.
    """
    if isinstance(if_true, Variable) or isinstance(if_false, Variable):
        assert isinstance(condition, Variable), \
            "Condition must be a variable if either if_true or if_false is a variable."
        assert isinstance(if_false, Variable) and isinstance(if_false, Variable), \
            "Both if_true and if_false must be variables if either is one."
        assert if_true.data.type() == if_false.data.type(), \
            "Type mismatch: {} and {}".format(if_true.data.type(), if_false.data.type())
    else:
        assert not isinstance(condition, Variable), \
            "Condition must not be a variable because neither if_true nor if_false is one."
        # noinspection PyArgumentList
        assert if_true.type() == if_false.type(), \
            "Type mismatch: {} and {}".format(if_true.data.type(), if_false.data.type())
    casted_condition = condition.type_as(if_true)
    output = casted_condition * if_true + (1 - casted_condition) * if_false
    return output
项目:pytorch-caffe-darknet-convert    作者:marvis    | 项目源码 | 文件源码
def image2torch(img):
    width = img.width
    height = img.height
    img = torch.ByteTensor(torch.ByteStorage.from_buffer(img.tobytes()))
    img = img.view(height, width, 3).transpose(0,1).transpose(0,2).contiguous()
    img = img.view(1, 3, height, width)
    img = img.float().div(255.0)
    return img
项目:clevr-iep    作者:facebookresearch    | 项目源码 | 文件源码
def reinforce_sample(self, x, max_length=30, temperature=1.0, argmax=False):
    N, T = x.size(0), max_length
    encoded = self.encoder(x)
    y = torch.LongTensor(N, T).fill_(self.NULL)
    done = torch.ByteTensor(N).fill_(0)
    cur_input = Variable(x.data.new(N, 1).fill_(self.START))
    h, c = None, None
    self.multinomial_outputs = []
    self.multinomial_probs = []
    for t in range(T):
      # logprobs is N x 1 x V
      logprobs, h, c = self.decoder(encoded, cur_input, h0=h, c0=c)
      logprobs = logprobs / temperature
      probs = F.softmax(logprobs.view(N, -1)) # Now N x V
      if argmax:
        _, cur_output = probs.max(1)
      else:
        cur_output = probs.multinomial() # Now N x 1
      self.multinomial_outputs.append(cur_output)
      self.multinomial_probs.append(probs)
      cur_output_data = cur_output.data.cpu()
      not_done = logical_not(done)
      y[:, t][not_done] = cur_output_data[not_done]
      done = logical_or(done, cur_output_data.cpu() == self.END)
      cur_input = cur_output
      if done.sum() == N:
        break
    return Variable(y.type_as(x.data))
项目:action-detection    作者:yjxiong    | 项目源码 | 文件源码
def __call__(self, pic):
        if isinstance(pic, np.ndarray):
            # handle numpy array
            img = torch.from_numpy(pic).permute(2, 0, 1).contiguous()
        else:
            # handle PIL Image
            img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
            img = img.view(pic.size[1], pic.size[0], len(pic.mode))
            # put it from HWC to CHW format
            # yikes, this transpose takes 80% of the loading time/CPU
            img = img.transpose(0, 1).transpose(0, 2).contiguous()
        return img.float().div(255) if self.div else img.float()
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def type(self, type=None, tensorCache=None):
        if not type:
            return self._type

        self._idx = None
        super(CosineEmbeddingCriterion, self).type(type, tensorCache)
        # comparison operators behave differently from cuda/c implementations
        if type == 'torch.cuda.FloatTensor':
            self._idx = torch.cuda.ByteTensor()
        else:
            self._idx = torch.ByteTensor()

        return self
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def __init__(self):
        super(MaskedSelect, self).__init__()
        self._maskIndices = torch.LongTensor()
        self._maskIndexBuffer = torch.LongTensor()
        self._maskIndexBufferCPU = torch.FloatTensor()
        self._gradBuffer = torch.Tensor()
        self._gradMask = torch.ByteTensor()