Python torch 模块,Size() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用torch.Size()

项目:faster-rcnn.pytorch    作者:jwyang    | 项目源码 | 文件源码
def _affine_grid_gen(rois, input_size, grid_size):

    rois = rois.detach()
    x1 = rois[:, 1::4] / 16.0
    y1 = rois[:, 2::4] / 16.0
    x2 = rois[:, 3::4] / 16.0
    y2 = rois[:, 4::4] / 16.0

    height = input_size[0]
    width = input_size[1]

    zero = Variable(rois.data.new(rois.size(0), 1).zero_())
    theta = torch.cat([\
      (x2 - x1) / (width - 1),
      zero,
      (x1 + x2 - width + 1) / (width - 1),
      zero,
      (y2 - y1) / (height - 1),
      (y1 + y2 - height + 1) / (height - 1)], 1).view(-1, 2, 3)

    grid = F.affine_grid(theta, torch.Size((rois.size(0), 1, grid_size, grid_size)))

    return grid
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def view(self, *args):
        dst = self.new()
        if len(args) == 1 and isinstance(args[0], torch.Size):
            sizes = args[0]
        else:
            sizes = torch.Size(args)
        sizes = _infer_sizes(sizes, self.nelement())
        numel = reduce(lambda a, b: a * b, sizes) if len(sizes) > 0 else 0

        if numel != self.nelement():
            def format_size(size):
                return 'x'.join(str(v) for v in size) if len(size) > 0 else '0'
            raise ValueError(
                "view of size '{0}' is invalid for input of size '{1}'"
                .format(format_size(sizes), format_size(self.size())))
        if not self.is_contiguous():
            raise ValueError("input should be contiguous")
        if self.storage() is not None:
            dst.set_(self.storage(), self.storage_offset(), sizes)
        return dst
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def __init__(self, *args):
        super(CMul, self).__init__()

        if len(args) == 1 and isinstance(args[0], torch.Size):
            self.size = args[0]
        else:
            self.size = torch.Size(args)

        self.weight = torch.Tensor(self.size)
        self.gradWeight = torch.Tensor(self.size)
        self.output.resize_(self.size)
        self.reset()

        self._output = None
        self._weight = None
        self._expand = None
        self._repeat = None
        self._gradOutput = None
        self._gradInput = None
        self._input = None
        self._gradWeight = None
        self._sum = None
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def resetSize(self, *args):
        if len(args) == 1 and isinstance(args[0], torch.Size):
            self.size = args[0]
        else:
            self.size = torch.Size(args)

        self.numElements = 1
        inferdim = False
        for i in range(len(self.size)):
            szi = self.size[i]
            if szi >= 0:
                self.numElements = self.numElements * self.size[i]
            else:
                assert szi == -1
                assert not inferdim
                inferdim = True

        return self
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def updateOutput(self, input):
        outs = []
        for i in range(len(self.modules)):
            currentOutput = self.modules[i].updateOutput(input)
            outs.append(currentOutput)
            if i == 0:
                size = list(currentOutput.size())
            else:
                size[self.dimension] += currentOutput.size(self.dimension)
        self.size = torch.Size(size)
        self.output.resize_(self.size)

        offset = 0
        for i, module in enumerate(self.modules):
           currentOutput = outs[i]
           self.output.narrow(self.dimension, offset, currentOutput.size(self.dimension)).copy_(currentOutput)
           offset = offset + currentOutput.size(self.dimension)

        return self.output
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def updateOutput(self, input):
        outs = []
        for i in range(len(self.modules)):
            currentOutput = self.modules[i].updateOutput(input)
            outs.append(currentOutput)
            if i == 0:
                size = list(currentOutput.size())
            else:
                size[self.dimension] += currentOutput.size(self.dimension)
                for dim in range(len(self.size)):
                    if dim != self.dimension:
                        # take the maximum size (shouldn't change anything for batch dim)
                        size[dim] = max(size[dim], currentOutput.size(dim))

        self.size = torch.Size(size)
        self.output.resize_(self.size).zero_()  # zero for padding

        offset = 0
        for i, module in enumerate(self.modules):
            currentOutput = outs[i]
            outputWindow = self.windowNarrow(self.output, currentOutput, offset)
            outputWindow.copy_(currentOutput)
            offset = offset + currentOutput.size(self.dimension)

        return self.output
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def __init__(self, dim=1):
        super(MixtureTable, self).__init__()
        self.dim = dim
        self.size = torch.Size()
        self.size2 = torch.Size()
        self.batchSize = 0
        self.backwardSetup = False
        self.gradInput = []

        self._gaterView = None
        self._expert = None
        self._expertView = None
        self._sum = None
        self._expertView2 = None
        self._expert2 = None
        self.table = False
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch    作者:bamtercelboo    | 项目源码 | 文件源码
def forward(self, x):
        one_layer = self.embed(x)  # (N,W,D) #  torch.Size([64, 43, 300])
        # one_layer = self.dropout(one_layer)
        one_layer = one_layer.unsqueeze(1)  # (N,Ci,W,D)  #  torch.Size([64, 1, 43, 300])
        # one layer
        one_layer = [torch.transpose(F.relu(conv(one_layer)).squeeze(3), 1, 2) for conv in self.convs1] # torch.Size([64, 100, 36])
        # two layer
        two_layer = [F.relu(conv(one_layer.unsqueeze(1))).squeeze(3) for (conv, one_layer) in zip(self.convs2, one_layer)]
        print("two_layer {}".format(two_layer[0].size()))
        # pooling
        output = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in two_layer]   #  torch.Size([64, 100]) torch.Size([64, 100])
        output = torch.cat(output, 1)  # torch.Size([64, 300])
        # dropout
        output = self.dropout(output)
        # linear
        output = self.fc1(F.relu(output))
        logit = self.fc2(F.relu(output))
        return logit
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def make_tensor_reader(typename):
    python_class = get_python_class(typename)

    def read_tensor(reader, version):
        # source:
        # https://github.com/torch/torch7/blob/master/generic/Tensor.c#L1243
        ndim = reader.read_int()

        # read size:
        size = torch.LongStorage(reader.read_long_array(ndim))
        # read stride:
        stride = torch.LongStorage(reader.read_long_array(ndim))
        # storage offset:
        storage_offset = reader.read_long() - 1
        # read storage:
        storage = reader.read()

        if storage is None or ndim == 0 or len(size) == 0 or len(stride) == 0:
            # empty torch tensor
            return python_class()

        return python_class().set_(storage, storage_offset, torch.Size(size), tuple(stride))
    return read_tensor
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def __init__(self, *args):
        super(CMul, self).__init__()

        if len(args) == 1 and isinstance(args[0], torch.Size):
            self.size = args[0]
        else:
            self.size = torch.Size(args)

        self.weight = torch.Tensor(self.size)
        self.gradWeight = torch.Tensor(self.size)
        self.output.resize_(self.size)
        self.reset()

        self._output = None
        self._weight = None
        self._expand = None
        self._repeat = None
        self._gradOutput = None
        self._gradInput = None
        self._input = None
        self._gradWeight = None
        self._sum = None
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def resetSize(self, *args):
        if len(args) == 1 and isinstance(args[0], torch.Size):
            self.size = args[0]
        else:
            self.size = torch.Size(args)

        self.numElements = 1
        inferdim = False
        for i in range(len(self.size)):
            szi = self.size[i]
            if szi >= 0:
                self.numElements = self.numElements * self.size[i]
            else:
                assert szi == -1
                assert not inferdim
                inferdim = True

        return self
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def updateOutput(self, input):
        outputSize = list(input.size())
        outputSize[self.dim] += abs(self.pad)
        self.outputSize = torch.Size(outputSize)
        dim = self.dim

        self.output.resize_(self.outputSize)
        self.output.fill_(self.value)
        index = self.index
        pad = self.pad
        if pad > 0:
            index = input.size(dim) - index
        else:
            pad = -pad

        if index == 0:
            self.output.narrow(dim, pad, input.size(dim)).copy_(input)
        elif index == input.size(dim):
            self.output.narrow(dim, 0, input.size(dim)).copy_(input)
        else:
            self.output.narrow(dim, 0, index).copy_(input.narrow(dim, 0, index))
            self.output.narrow(dim, index + pad, input.size(dim) -
                               index).copy_(input.narrow(dim, index, input.size(dim) - index))

        return self.output
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def updateOutput(self, input):
        outs = []
        for i in range(len(self.modules)):
            currentOutput = self.modules[i].updateOutput(input)
            outs.append(currentOutput)
            if i == 0:
                size = list(currentOutput.size())
            else:
                size[self.dimension] += currentOutput.size(self.dimension)
        self.outputSize = torch.Size(size)
        self.output.resize_(self.outputSize)

        offset = 0
        for i, module in enumerate(self.modules):
            currentOutput = outs[i]
            self.output.narrow(self.dimension, offset, currentOutput.size(self.dimension)).copy_(currentOutput)
            offset = offset + currentOutput.size(self.dimension)

        return self.output
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def updateOutput(self, input):
        outs = []
        for i in range(len(self.modules)):
            currentOutput = self.modules[i].updateOutput(input)
            outs.append(currentOutput)
            if i == 0:
                size = list(currentOutput.size())
            else:
                size[self.dimension] += currentOutput.size(self.dimension)
                for dim in range(len(self.outputSize)):
                    if dim != self.dimension:
                        # take the maximum size (shouldn't change anything for batch dim)
                        size[dim] = max(size[dim], currentOutput.size(dim))

        self.outputSize = torch.Size(size)
        self.output.resize_(self.outputSize).zero_()  # zero for padding

        offset = 0
        for i, module in enumerate(self.modules):
            currentOutput = outs[i]
            outputWindow = self.windowNarrow(self.output, currentOutput, offset)
            outputWindow.copy_(currentOutput)
            offset = offset + currentOutput.size(self.dimension)

        return self.output
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def updateOutput(self, input):
        dim = self._getPositiveDimension(input)

        for i in range(len(input)):
            currentOutput = input[i]
            if i == 0:
                size = list(currentOutput.size())
            else:
                size[dim] += currentOutput.size(dim)

        self.size = torch.Size(size)
        self.output.resize_(self.size)

        # TODO: use cat?
        offset = 0
        for i in range(len(input)):
            currentOutput = input[i]
            self.output.narrow(dim, offset, currentOutput.size(dim)).copy_(currentOutput)
            offset += currentOutput.size(dim)

        return self.output
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def __init__(self, dim=1):
        super(MixtureTable, self).__init__()
        self.dim = dim
        self.size = torch.Size()
        self.size2 = torch.Size()
        self.batchSize = 0
        self.backwardSetup = False
        self.gradInput = []

        self._gaterView = None
        self._expert = None
        self._expertView = None
        self._sum = None
        self._expertView2 = None
        self._expert2 = None
        self.table = False
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def create_input(call_args, requires_grad=True):
    if not isinstance(call_args, tuple):
        call_args = (call_args,)

    def map_arg(arg):
        if isinstance(arg, torch.Size) or isinstance(arg, dont_convert):
            return arg
        elif isinstance(arg, tuple) and not isinstance(arg[0], Variable):
            return Variable(torch.randn(*arg).double(), requires_grad=requires_grad)
        elif torch.is_tensor(arg):
            if isinstance(arg, torch.FloatTensor):
                return Variable(arg.double(), requires_grad=requires_grad)
            else:
                return Variable(arg, requires_grad=requires_grad)
        else:
            return arg
    return tuple(map_arg(arg) for arg in call_args)
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_sequential_batch(self):
        loader = DataLoader(self.dataset, batch_size=2, shuffle=False)
        batch_size = loader.batch_size
        for i, sample in enumerate(loader):
            idx = i * batch_size
            self.assertEqual(set(sample.keys()), {'a_tensor', 'another_dict'})
            self.assertEqual(set(sample['another_dict'].keys()), {'a_number'})

            t = sample['a_tensor']
            self.assertEqual(t.size(), torch.Size([batch_size, 4, 2]))
            self.assertTrue((t[0] == idx).all())
            self.assertTrue((t[1] == idx + 1).all())

            n = sample['another_dict']['a_number']
            self.assertEqual(n.size(), torch.Size([batch_size]))
            self.assertEqual(n[0], idx)
            self.assertEqual(n[1], idx + 1)
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def _test_sparse_mask_hybrid_fixed(self):
        i = self.IndexTensor([
            [1, 3, 0, 4],
            [2, 1, 2, 3],
        ])
        v = self.ValueTensor([[1, 2], [2, 3], [3, 4], [4, 5]])
        # TODO: This is also testing that, if coalesce is a no-op,
        # the indices don't get permuted. I don't know if we actually
        # want to give this invariant.
        x = self.SparseTensor(i, v, torch.Size([5, 4, 2])).coalesce()
        dense = self.ValueTensor([
            [[1, 3], [2, 2], [3, 3], [4, 2]],
            [[5, 7], [6, 7], [7, 9], [8, 9]],
            [[9, 2], [10, 4], [11, 1], [12, 3]],
            [[13, 5], [14, 1], [15, 1], [16, 6]],
            [[17, 7], [18, 2], [19, 7], [20, 1]],
        ])
        res = dense._sparse_mask(x)
        exp_v = self.ValueTensor([[7, 9], [14, 1], [3, 3], [20, 1]])
        expected = self.SparseTensor(i, exp_v, torch.Size([5, 4, 2]))
        self.assertEqual(res, expected)
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_view(self):
        tensor = torch.rand(15)
        template = torch.rand(3, 5)
        empty = torch.Tensor()
        target = template.size()
        self.assertEqual(tensor.view_as(template).size(), target)
        self.assertEqual(tensor.view(3, 5).size(), target)
        self.assertEqual(tensor.view(torch.Size([3, 5])).size(), target)
        self.assertEqual(tensor.view(-1, 5).size(), target)
        self.assertEqual(tensor.view(3, -1).size(), target)
        tensor_view = tensor.view(5, 3)
        tensor_view.fill_(random.uniform(0, 1))
        self.assertEqual((tensor_view - tensor).abs().max(), 0)
        self.assertEqual(empty.view_as(empty), empty)
        self.assertEqual(empty.view(0), empty)
        self.assertRaises(RuntimeError, lambda: tensor.view(15, 0))
        self.assertRaises(RuntimeError, lambda: tensor.view(7, -1))
        self.assertRaises(RuntimeError, lambda: tensor.view(15, -1, -1))
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def make_tensor_reader(typename):
    python_class = get_python_class(typename)

    def read_tensor(reader, version):
        # source:
        # https://github.com/torch/torch7/blob/master/generic/Tensor.c#L1243
        ndim = reader.read_int()

        # read size:
        size = torch.LongStorage(reader.read_long_array(ndim))
        # read stride:
        stride = torch.LongStorage(reader.read_long_array(ndim))
        # storage offset:
        storage_offset = reader.read_long() - 1
        # read storage:
        storage = reader.read()

        if storage is None or ndim == 0 or len(size) == 0 or len(stride) == 0:
            # empty torch tensor
            return python_class()

        return python_class().set_(storage, storage_offset, torch.Size(size), tuple(stride))
    return read_tensor
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def __init__(self, *args):
        super(CMul, self).__init__()

        if len(args) == 1 and isinstance(args[0], torch.Size):
            self.size = args[0]
        else:
            self.size = torch.Size(args)

        self.weight = torch.Tensor(self.size)
        self.gradWeight = torch.Tensor(self.size)
        self.output.resize_(self.size)
        self.reset()

        self._output = None
        self._weight = None
        self._expand = None
        self._repeat = None
        self._gradOutput = None
        self._gradInput = None
        self._input = None
        self._gradWeight = None
        self._sum = None
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def updateOutput(self, input):
        outputSize = list(input.size())
        outputSize[self.dim] += abs(self.pad)
        self.outputSize = torch.Size(outputSize)
        dim = self.dim

        self.output.resize_(self.outputSize)
        self.output.fill_(self.value)
        index = self.index
        pad = self.pad
        if pad > 0:
            index = input.size(dim) - index
        else:
            pad = -pad

        if index == 0:
            self.output.narrow(dim, pad, input.size(dim)).copy_(input)
        elif index == input.size(dim):
            self.output.narrow(dim, 0, input.size(dim)).copy_(input)
        else:
            self.output.narrow(dim, 0, index).copy_(input.narrow(dim, 0, index))
            self.output.narrow(dim, index + pad, input.size(dim) -
                               index).copy_(input.narrow(dim, index, input.size(dim) - index))

        return self.output
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def updateOutput(self, input):
        outs = []
        for i in range(len(self.modules)):
            currentOutput = self.modules[i].updateOutput(input)
            outs.append(currentOutput)
            if i == 0:
                size = list(currentOutput.size())
            else:
                size[self.dimension] += currentOutput.size(self.dimension)
                for dim in range(len(self.outputSize)):
                    if dim != self.dimension:
                        # take the maximum size (shouldn't change anything for batch dim)
                        size[dim] = max(size[dim], currentOutput.size(dim))

        self.outputSize = torch.Size(size)
        self.output.resize_(self.outputSize).zero_()  # zero for padding

        offset = 0
        for i, module in enumerate(self.modules):
            currentOutput = outs[i]
            outputWindow = self.windowNarrow(self.output, currentOutput, offset)
            outputWindow.copy_(currentOutput)
            offset = offset + currentOutput.size(self.dimension)

        return self.output
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def updateOutput(self, input):
        dim = self._getPositiveDimension(input)

        for i in range(len(input)):
            currentOutput = input[i]
            if i == 0:
                size = list(currentOutput.size())
            else:
                size[dim] += currentOutput.size(dim)

        self.size = torch.Size(size)
        self.output.resize_(self.size)

        # TODO: use cat?
        offset = 0
        for i in range(len(input)):
            currentOutput = input[i]
            self.output.narrow(dim, offset, currentOutput.size(dim)).copy_(currentOutput)
            offset += currentOutput.size(dim)

        return self.output
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def __init__(self, dim=1):
        super(MixtureTable, self).__init__()
        self.dim = dim
        self.size = torch.Size()
        self.size2 = torch.Size()
        self.batchSize = 0
        self.backwardSetup = False
        self.gradInput = []

        self._gaterView = None
        self._expert = None
        self._expertView = None
        self._sum = None
        self._expertView2 = None
        self._expert2 = None
        self.table = False
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def _test_gather(self, output_device):
        inputs = (
            Variable(torch.randn(2, 4).cuda(0), requires_grad=True),
            Variable(torch.randn(2, 4).cuda(1), requires_grad=True)
        )
        result = dp.gather(inputs, output_device)
        self.assertEqual(result.size(), torch.Size([4, 4]))
        self.assertEqual(result[:2], inputs[0])
        self.assertEqual(result[2:], inputs[1])
        if output_device != -1:
            self.assertEqual(result.get_device(), output_device)
        else:
            self.assertFalse(result.is_cuda)
        grad = torch.randn(4, 4)
        if output_device != -1:
            grad = grad.cuda(output_device)
        result.backward(grad)
        self.assertEqual(inputs[0].grad.data, grad[:2])
        self.assertEqual(inputs[1].grad.data, grad[2:])
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def create_input(call_args, requires_grad=True):
    if not isinstance(call_args, tuple):
        call_args = (call_args,)

    def map_arg(arg):
        if isinstance(arg, torch.Size) or isinstance(arg, dont_convert):
            return arg
        elif isinstance(arg, tuple) and not isinstance(arg[0], Variable):
            return Variable(torch.randn(*arg).double(), requires_grad=requires_grad)
        elif torch.is_tensor(arg):
            if isinstance(arg, torch.FloatTensor):
                return Variable(arg.double(), requires_grad=requires_grad)
            else:
                return Variable(arg, requires_grad=requires_grad)
        else:
            return arg
    return tuple(map_arg(arg) for arg in call_args)
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def test_sequential_batch(self):
        loader = DataLoader(self.dataset, batch_size=2, shuffle=False)
        batch_size = loader.batch_size
        for i, sample in enumerate(loader):
            idx = i * batch_size
            self.assertEqual(set(sample.keys()), {'a_tensor', 'another_dict'})
            self.assertEqual(set(sample['another_dict'].keys()), {'a_number'})

            t = sample['a_tensor']
            self.assertEqual(t.size(), torch.Size([batch_size, 4, 2]))
            self.assertTrue((t[0] == idx).all())
            self.assertTrue((t[1] == idx + 1).all())

            n = sample['another_dict']['a_number']
            self.assertEqual(n.size(), torch.Size([batch_size]))
            self.assertEqual(n[0], idx)
            self.assertEqual(n[1], idx + 1)
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def _test_gather(self, dim):
        if torch.cuda.device_count() < 2:
            raise unittest.SkipTest("only one GPU detected")
        x = torch.randn(2, 5).cuda(0)
        y = torch.randn(2, 5).cuda(1)
        result = comm.gather((x, y), dim)

        expected_size = list(x.size())
        expected_size[dim] += y.size(dim)
        expected_size = torch.Size(expected_size)
        self.assertEqual(result.get_device(), 0)
        self.assertEqual(result.size(), expected_size)

        index = [slice(None, None), slice(None, None)]
        index[dim] = slice(0, x.size(dim))
        self.assertEqual(result[tuple(index)], x)
        index[dim] = slice(x.size(dim), x.size(dim) + y.size(dim))
        self.assertEqual(result[tuple(index)], y)
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def _test_sparse_mask_fixed(self):
        i = self.IndexTensor([
            [1, 3, 0, 4],
            [2, 1, 2, 3],
        ])
        v = self.ValueTensor([1, 2, 3, 4])
        x = self.SparseTensor(i, v, torch.Size([5, 4])).coalesce()
        dense = self.ValueTensor([
            [1, 2, 3, 4],
            [5, 6, 7, 8],
            [9, 10, 11, 12],
            [13, 14, 15, 16],
            [17, 18, 19, 20],
        ])
        exp_v = self.ValueTensor([7, 14, 3, 20])
        res = dense._sparse_mask(x)
        expected = self.SparseTensor(i, exp_v, torch.Size([5, 4]))
        self.assertEqual(res, expected)
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def _test_sparse_mask_hybrid_fixed(self):
        i = self.IndexTensor([
            [1, 3, 0, 4],
            [2, 1, 2, 3],
        ])
        v = self.ValueTensor([[1, 2], [2, 3], [3, 4], [4, 5]])
        # TODO: This is also testing that, if coalesce is a no-op,
        # the indices don't get permuted. I don't know if we actually
        # want to give this invariant.
        x = self.SparseTensor(i, v, torch.Size([5, 4, 2])).coalesce()
        dense = self.ValueTensor([
            [[1, 3], [2, 2], [3, 3], [4, 2]],
            [[5, 7], [6, 7], [7, 9], [8, 9]],
            [[9, 2], [10, 4], [11, 1], [12, 3]],
            [[13, 5], [14, 1], [15, 1], [16, 6]],
            [[17, 7], [18, 2], [19, 7], [20, 1]],
        ])
        res = dense._sparse_mask(x)
        exp_v = self.ValueTensor([[7, 9], [14, 1], [3, 3], [20, 1]])
        expected = self.SparseTensor(i, exp_v, torch.Size([5, 4, 2]))
        self.assertEqual(res, expected)
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def test_view(self):
        tensor = torch.rand(15)
        template = torch.rand(3, 5)
        empty = torch.Tensor()
        target = template.size()
        self.assertEqual(tensor.view_as(template).size(), target)
        self.assertEqual(tensor.view(3, 5).size(), target)
        self.assertEqual(tensor.view(torch.Size([3, 5])).size(), target)
        self.assertEqual(tensor.view(-1, 5).size(), target)
        self.assertEqual(tensor.view(3, -1).size(), target)
        tensor_view = tensor.view(5, 3)
        tensor_view.fill_(random.uniform(0, 1))
        self.assertEqual((tensor_view - tensor).abs().max(), 0)
        self.assertEqual(empty.view_as(empty), empty)
        self.assertEqual(empty.view(0), empty)
        self.assertRaises(RuntimeError, lambda: tensor.view(15, 0))
        self.assertRaises(RuntimeError, lambda: tensor.view(7, -1))
        self.assertRaises(RuntimeError, lambda: tensor.view(15, -1, -1))
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def make_tensor_reader(typename):
    python_class = get_python_class(typename)

    def read_tensor(reader, version):
        # source:
        # https://github.com/torch/torch7/blob/master/generic/Tensor.c#L1243
        ndim = reader.read_int()

        # read size:
        size = torch.LongStorage(reader.read_long_array(ndim))
        # read stride:
        stride = torch.LongStorage(reader.read_long_array(ndim))
        # storage offset:
        storage_offset = reader.read_long() - 1
        # read storage:
        storage = reader.read()

        if storage is None or ndim == 0 or len(size) == 0 or len(stride) == 0:
            # empty torch tensor
            return python_class()

        return python_class().set_(storage, storage_offset, torch.Size(size), tuple(stride))
    return read_tensor
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def forward(ctx, theta, size):
        assert type(size) == torch.Size
        N, C, H, W = size
        ctx.size = size
        if theta.is_cuda:
            ctx.is_cuda = True
            AffineGridGenerator._enforce_cudnn(theta)
            grid = theta.new(N, H, W, 2)
            theta = theta.contiguous()
            torch._C._cudnn_affine_grid_generator_forward(theta, grid, N, C, H, W)
        else:
            ctx.is_cuda = False
            base_grid = theta.new(N, H, W, 3)
            linear_points = torch.linspace(-1, 1, W) if W > 1 else torch.Tensor([-1])
            base_grid[:, :, :, 0] = torch.ger(torch.ones(H), linear_points).expand_as(base_grid[:, :, :, 0])
            linear_points = torch.linspace(-1, 1, H) if H > 1 else torch.Tensor([-1])
            base_grid[:, :, :, 1] = torch.ger(linear_points, torch.ones(W)).expand_as(base_grid[:, :, :, 1])
            base_grid[:, :, :, 2] = 1
            ctx.base_grid = base_grid
            grid = torch.bmm(base_grid.view(N, H * W, 3), theta.transpose(1, 2))
            grid = grid.view(N, H, W, 2)
        return grid
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def backward(ctx, grad_grid):
        N, C, H, W = ctx.size
        assert grad_grid.size() == torch.Size([N, H, W, 2])
        assert ctx.is_cuda == grad_grid.is_cuda
        if grad_grid.is_cuda:
            AffineGridGenerator._enforce_cudnn(grad_grid)
            grad_theta = grad_grid.new(N, 2, 3)
            grad_grid = grad_grid.contiguous()
            torch._C._cudnn_affine_grid_generator_backward(grad_theta, grad_grid,
                                                           N, C, H, W)
        else:
            base_grid = ctx.base_grid
            grad_theta = torch.bmm(
                base_grid.view(N, H * W, 3).transpose(1, 2),
                grad_grid.view(N, H * W, 2))
            grad_theta = grad_theta.transpose(1, 2)

        return grad_theta, None
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def __init__(self, *args):
        super(CMul, self).__init__()

        if len(args) == 1 and isinstance(args[0], torch.Size):
            self.size = args[0]
        else:
            self.size = torch.Size(args)

        self.weight = torch.Tensor(self.size)
        self.gradWeight = torch.Tensor(self.size)
        self.output.resize_(self.size)
        self.reset()

        self._output = None
        self._weight = None
        self._expand = None
        self._repeat = None
        self._gradOutput = None
        self._gradInput = None
        self._input = None
        self._gradWeight = None
        self._sum = None
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def resetSize(self, *args):
        if len(args) == 1 and isinstance(args[0], torch.Size):
            self.size = args[0]
        else:
            self.size = torch.Size(args)

        self.numElements = 1
        inferdim = False
        for i in range(len(self.size)):
            szi = self.size[i]
            if szi >= 0:
                self.numElements = self.numElements * self.size[i]
            else:
                assert szi == -1
                assert not inferdim
                inferdim = True

        return self
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def updateOutput(self, input):
        outs = []
        for i in range(len(self.modules)):
            currentOutput = self.modules[i].updateOutput(input)
            outs.append(currentOutput)
            if i == 0:
                size = list(currentOutput.size())
            else:
                size[self.dimension] += currentOutput.size(self.dimension)
        self.outputSize = torch.Size(size)
        self.output.resize_(self.outputSize)

        offset = 0
        for i, module in enumerate(self.modules):
            currentOutput = outs[i]
            self.output.narrow(self.dimension, offset, currentOutput.size(self.dimension)).copy_(currentOutput)
            offset = offset + currentOutput.size(self.dimension)

        return self.output
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def updateOutput(self, input):
        dim = self._getPositiveDimension(input)

        for i in range(len(input)):
            currentOutput = input[i]
            if i == 0:
                size = list(currentOutput.size())
            else:
                size[dim] += currentOutput.size(dim)

        self.size = torch.Size(size)
        self.output.resize_(self.size)

        # TODO: use cat?
        offset = 0
        for i in range(len(input)):
            currentOutput = input[i]
            self.output.narrow(dim, offset, currentOutput.size(dim)).copy_(currentOutput)
            offset += currentOutput.size(dim)

        return self.output
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def __init__(self, dim=1):
        super(MixtureTable, self).__init__()
        self.dim = dim
        self.size = torch.Size()
        self.size2 = torch.Size()
        self.batchSize = 0
        self.backwardSetup = False
        self.gradInput = []

        self._gaterView = None
        self._expert = None
        self._expertView = None
        self._sum = None
        self._expertView2 = None
        self._expert2 = None
        self.table = False
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def _test_gather(self, output_device):
        inputs = (
            Variable(torch.randn(2, 4).cuda(0), requires_grad=True),
            Variable(torch.randn(2, 4).cuda(1), requires_grad=True)
        )
        result = dp.gather(inputs, output_device)
        self.assertEqual(result.size(), torch.Size([4, 4]))
        self.assertEqual(result[:2], inputs[0])
        self.assertEqual(result[2:], inputs[1])
        if output_device != -1:
            self.assertEqual(result.get_device(), output_device)
        else:
            self.assertFalse(result.is_cuda)
        grad = torch.randn(4, 4)
        if output_device != -1:
            grad = grad.cuda(output_device)
        result.backward(grad)
        self.assertEqual(inputs[0].grad.data, grad[:2])
        self.assertEqual(inputs[1].grad.data, grad[2:])
        _assertGradAndGradgradChecks(self, lambda x, y: dp.gather((x, y), output_device), inputs)
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def make_non_contiguous(tensor):
    osize = list(tensor.size())

    # randomly inflate a few dimensions in osize
    for _ in range(2):
        dim = random.randint(0, len(osize) - 1)
        add = random.randint(4, 15)
        osize[dim] = osize[dim] + add

    # narrow doesn't make a non-contiguous tensor if we only narrow the 0-th dimension,
    # (which will always happen with a 1-dimensional tensor), so let's make a new
    # right-most dimension and cut it off

    input = tensor.new(torch.Size(osize + [random.randint(2, 3)]))
    input = input.select(len(input.size()) - 1, random.randint(0, 1))
    # now extract the input of correct size from 'input'
    for i in range(len(osize)):
        if input.size(i) != tensor.size(i):
            bounds = random.randint(1, input.size(i) - tensor.size(i))
            input = input.narrow(i, bounds, tensor.size(i))

    input.copy_(tensor)
    return input
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_sequential_batch(self):
        loader = DataLoader(self.dataset, batch_size=2, shuffle=False)
        batch_size = loader.batch_size
        for i, sample in enumerate(loader):
            idx = i * batch_size
            self.assertEqual(set(sample.keys()), {'a_tensor', 'another_dict'})
            self.assertEqual(set(sample['another_dict'].keys()), {'a_number'})

            t = sample['a_tensor']
            self.assertEqual(t.size(), torch.Size([batch_size, 4, 2]))
            self.assertTrue((t[0] == idx).all())
            self.assertTrue((t[1] == idx + 1).all())

            n = sample['another_dict']['a_number']
            self.assertEqual(n.size(), torch.Size([batch_size]))
            self.assertEqual(n[0], idx)
            self.assertEqual(n[1], idx + 1)
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def _test_gather(self, dim):
        if torch.cuda.device_count() < 2:
            raise unittest.SkipTest("only one GPU detected")
        x = torch.randn(2, 5).cuda(0)
        y = torch.randn(2, 5).cuda(1)
        result = comm.gather((x, y), dim)

        expected_size = list(x.size())
        expected_size[dim] += y.size(dim)
        expected_size = torch.Size(expected_size)
        self.assertEqual(result.get_device(), 0)
        self.assertEqual(result.size(), expected_size)

        index = [slice(None, None), slice(None, None)]
        index[dim] = slice(0, x.size(dim))
        self.assertEqual(result[tuple(index)], x)
        index[dim] = slice(x.size(dim), x.size(dim) + y.size(dim))
        self.assertEqual(result[tuple(index)], y)
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def _test_sparse_mask_fixed(self):
        i = self.IndexTensor([
            [1, 3, 0, 4],
            [2, 1, 2, 3],
        ])
        v = self.ValueTensor([1, 2, 3, 4])
        x = self.SparseTensor(i, v, torch.Size([5, 4])).coalesce()
        dense = self.ValueTensor([
            [1, 2, 3, 4],
            [5, 6, 7, 8],
            [9, 10, 11, 12],
            [13, 14, 15, 16],
            [17, 18, 19, 20],
        ])
        exp_v = self.ValueTensor([7, 14, 3, 20])
        res = dense._sparse_mask(x)
        expected = self.SparseTensor(i, exp_v, torch.Size([5, 4]))
        self.assertEqual(res, expected)
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_view(self):
        tensor = torch.rand(15)
        template = torch.rand(3, 5)
        empty = torch.Tensor()
        target = template.size()
        self.assertEqual(tensor.view_as(template).size(), target)
        self.assertEqual(tensor.view(3, 5).size(), target)
        self.assertEqual(tensor.view(torch.Size([3, 5])).size(), target)
        self.assertEqual(tensor.view(-1, 5).size(), target)
        self.assertEqual(tensor.view(3, -1).size(), target)
        tensor_view = tensor.view(5, 3)
        tensor_view.fill_(random.uniform(0, 1))
        # suppress broadcastable warning
        with warnings.catch_warnings(record=True):
            self.assertEqual((tensor_view - tensor).abs().max(), 0)
        self.assertEqual(empty.view_as(empty), empty)
        self.assertEqual(empty.view(0), empty)
        self.assertRaises(RuntimeError, lambda: tensor.view(15, 0))
        self.assertRaises(RuntimeError, lambda: tensor.view(7, -1))
        self.assertRaises(RuntimeError, lambda: tensor.view(15, -1, -1))
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_tensor_set(self):
        t1 = torch.Tensor()
        t2 = torch.Tensor(3, 4, 9, 10).uniform_()
        t1.set_(t2)
        self.assertEqual(t1.storage()._cdata, t2.storage()._cdata)
        size = torch.Size([9, 3, 4, 10])
        t1.set_(t2.storage(), 0, size)
        self.assertEqual(t1.size(), size)
        t1.set_(t2.storage(), 0, tuple(size))
        self.assertEqual(t1.size(), size)
        self.assertEqual(t1.stride(), (120, 40, 10, 1))
        stride = (10, 360, 90, 1)
        t1.set_(t2.storage(), 0, size, stride)
        self.assertEqual(t1.stride(), stride)
        t1.set_(t2.storage(), 0, size=size, stride=stride)
        self.assertEqual(t1.size(), size)
        self.assertEqual(t1.stride(), stride)
项目:gpytorch    作者:jrg365    | 项目源码 | 文件源码
def bdsmm(sparse, dense):
    """
    Batch dense-sparse matrix multiply
    """
    if sparse.ndimension() > 2:
        batch_size, n_rows, n_cols = sparse.size()
        batch_assignment = sparse._indices()[0]
        indices = sparse._indices()[1:].clone()
        indices[0].add_(n_rows, batch_assignment)
        indices[1].add_(n_cols, batch_assignment)
        sparse_2d = sparse.__class__(indices, sparse._values(),
                                     torch.Size((batch_size * n_rows, batch_size * n_cols)))

        if dense.size(0) == 1:
            dense = dense.repeat(batch_size, 1, 1)
        dense_2d = dense.contiguous().view(batch_size * n_cols, -1)
        res = torch.dsmm(sparse_2d, dense_2d)
        res = res.view(batch_size, n_rows, -1)
        return res
    else:
        return torch.dsmm(sparse, dense)
项目:PyTorch-Encoding    作者:zhanghang1989    | 项目源码 | 文件源码
def view_each(x, size):
    """Multi-GPU version torch.view

    Returns a new tensor with the same data but different size.
    The returned tensor shares the same data and must have the same number
    of elements, but may have a different size. A tensor must be
    :attr:`contiguous` to be viewed.

    Args:
        input: list of multi-gpu tensors
        size (torch.Size or int...): Desired size

    """
    y = []
    for i in range(len(x)):
        y.append(x[i].view(size))
    return y