Python torch 模块,Storage() 实例源码

我们从Python开源项目中,提取了以下15个代码示例,用于说明如何使用torch.Storage()

项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def test_tolist(self):
        list0D = []
        tensor0D = torch.Tensor(list0D)
        self.assertEqual(tensor0D.tolist(), list0D)

        table1D = [1, 2, 3]
        tensor1D = torch.Tensor(table1D)
        storage = torch.Storage(table1D)
        self.assertEqual(tensor1D.tolist(), table1D)
        self.assertEqual(storage.tolist(), table1D)
        self.assertEqual(tensor1D.tolist(), table1D)
        self.assertEqual(storage.tolist(), table1D)

        table2D = [[1, 2], [3, 4]]
        tensor2D = torch.Tensor(table2D)
        self.assertEqual(tensor2D.tolist(), table2D)

        tensor3D = torch.Tensor([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
        tensorNonContig = tensor3D.select(1, 1)
        self.assertFalse(tensorNonContig.is_contiguous())
        self.assertEqual(tensorNonContig.tolist(), [[3, 4], [7, 8]])
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_tolist(self):
        list0D = []
        tensor0D = torch.Tensor(list0D)
        self.assertEqual(tensor0D.tolist(), list0D)

        table1D = [1, 2, 3]
        tensor1D = torch.Tensor(table1D)
        storage = torch.Storage(table1D)
        self.assertEqual(tensor1D.tolist(), table1D)
        self.assertEqual(storage.tolist(), table1D)
        self.assertEqual(tensor1D.tolist(), table1D)
        self.assertEqual(storage.tolist(), table1D)

        table2D = [[1, 2], [3, 4]]
        tensor2D = torch.Tensor(table2D)
        self.assertEqual(tensor2D.tolist(), table2D)

        tensor3D = torch.Tensor([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
        tensorNonContig = tensor3D.select(1, 1)
        self.assertFalse(tensorNonContig.is_contiguous())
        self.assertEqual(tensorNonContig.tolist(), [[3, 4], [7, 8]])
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def test_tolist(self):
        list0D = []
        tensor0D = torch.Tensor(list0D)
        self.assertEqual(tensor0D.tolist(), list0D)

        table1D = [1, 2, 3]
        tensor1D = torch.Tensor(table1D)
        storage = torch.Storage(table1D)
        self.assertEqual(tensor1D.tolist(), table1D)
        self.assertEqual(storage.tolist(), table1D)
        self.assertEqual(tensor1D.tolist(), table1D)
        self.assertEqual(storage.tolist(), table1D)

        table2D = [[1, 2], [3, 4]]
        tensor2D = torch.Tensor(table2D)
        self.assertEqual(tensor2D.tolist(), table2D)

        tensor3D = torch.Tensor([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
        tensorNonContig = tensor3D.select(1, 1)
        self.assertFalse(tensorNonContig.is_contiguous())
        self.assertEqual(tensorNonContig.tolist(), [[3, 4], [7, 8]])
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_tolist(self):
        list0D = []
        tensor0D = torch.Tensor(list0D)
        self.assertEqual(tensor0D.tolist(), list0D)

        table1D = [1, 2, 3]
        tensor1D = torch.Tensor(table1D)
        storage = torch.Storage(table1D)
        self.assertEqual(tensor1D.tolist(), table1D)
        self.assertEqual(storage.tolist(), table1D)
        self.assertEqual(tensor1D.tolist(), table1D)
        self.assertEqual(storage.tolist(), table1D)

        table2D = [[1, 2], [3, 4]]
        tensor2D = torch.Tensor(table2D)
        self.assertEqual(tensor2D.tolist(), table2D)

        tensor3D = torch.Tensor([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
        tensorNonContig = tensor3D.select(1, 1)
        self.assertFalse(tensorNonContig.is_contiguous())
        self.assertEqual(tensorNonContig.tolist(), [[3, 4], [7, 8]])
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_tolist(self):
        list0D = []
        tensor0D = torch.Tensor(list0D)
        self.assertEqual(tensor0D.tolist(), list0D)

        table1D = [1, 2, 3]
        tensor1D = torch.Tensor(table1D)
        storage = torch.Storage(table1D)
        self.assertEqual(tensor1D.tolist(), table1D)
        self.assertEqual(storage.tolist(), table1D)
        self.assertEqual(tensor1D.tolist(), table1D)
        self.assertEqual(storage.tolist(), table1D)

        table2D = [[1, 2], [3, 4]]
        tensor2D = torch.Tensor(table2D)
        self.assertEqual(tensor2D.tolist(), table2D)

        tensor3D = torch.Tensor([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
        tensorNonContig = tensor3D.select(1, 1)
        self.assertFalse(tensorNonContig.is_contiguous())
        self.assertEqual(tensorNonContig.tolist(), [[3, 4], [7, 8]])
项目:allennlp    作者:allenai    | 项目源码 | 文件源码
def device_mapping(cuda_device: int):
    """
    In order to `torch.load()` a GPU-trained model onto a CPU (or specific GPU),
    you have to supply a `map_location` function. Call this with
    the desired `cuda_device` to get the function that `torch.load()` needs.
    """
    def inner_device_mapping(storage: torch.Storage, location) -> torch.Storage:  # pylint: disable=unused-argument
        if cuda_device >= 0:
            return storage.cuda(cuda_device)
        else:
            return storage
    return inner_device_mapping
项目:temperature_scaling    作者:gpleiss    | 项目源码 | 文件源码
def create_multi_gpu_storage(size=1024):
    multi_storage = []
    device_cnt = torch.cuda.device_count()
    for device_no in range(device_cnt):
        with torch.cuda.device(device_no):
            multi_storage.append(torch.Storage(size).cuda())
    return multi_storage
项目:efficient_densenet_pytorch    作者:gpleiss    | 项目源码 | 文件源码
def create_multi_gpu_storage(size=1024):
    multi_storage = []
    device_cnt = torch.cuda.device_count()
    for device_no in range(device_cnt):
        with torch.cuda.device(device_no):
            multi_storage.append(torch.Storage(size).cuda())
    return multi_storage
项目:efficient_densenet_pytorch    作者:gpleiss    | 项目源码 | 文件源码
def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate, storage_size=1024):
        input_storage_1 = torch.Storage(storage_size)
        input_storage_2 = torch.Storage(storage_size)
        self.final_num_features = num_input_features + (growth_rate * num_layers)
        self.shared_allocation_1 = _SharedAllocation(input_storage_1)
        self.shared_allocation_2 = _SharedAllocation(input_storage_2)

        super(_DenseBlock, self).__init__()
        for i in range(num_layers):
            layer = _DenseLayer(self.shared_allocation_1, self.shared_allocation_2, num_input_features + i * growth_rate,
                                growth_rate, bn_size, drop_rate)
            self.add_module('denselayer%d' % (i + 1), layer)
项目:efficient_densenet_pytorch    作者:gpleiss    | 项目源码 | 文件源码
def test_forward_eval_mode_computes_forward_pass():
    momentum = 0.1
    eps = 1e-5

    weight = torch.randn(10).cuda()
    bias = torch.randn(10).cuda()
    running_mean = torch.randn(10).cuda()
    running_var = torch.randn(10).abs().cuda()

    input_1 = torch.randn(4, 5).cuda()
    input_2 = torch.randn(4, 5).cuda()
    storage = torch.Storage(40).cuda()

    bn = F.batch_norm(
        input=Variable(torch.cat([input_1, input_2], dim=1)),
        running_mean=running_mean,
        running_var=running_var,
        weight=Parameter(weight),
        bias=Parameter(bias),
        training=False,
        momentum=momentum,
        eps=eps
    ).data

    input_efficient = torch.cat([input_1, input_2], dim=1)
    func = _EfficientBatchNorm(
        storage=storage,
        running_mean=running_mean,
        running_var=running_var,
        training=False,
        momentum=momentum,
        eps=eps
    )
    bn_efficient = func.forward(weight, bias, input_efficient)

    assert(almost_equal(bn, bn_efficient))
    assert(bn_efficient.storage().data_ptr() == storage.data_ptr())
项目:efficient_densenet_pytorch    作者:gpleiss    | 项目源码 | 文件源码
def test_forward_train_mode_computes_forward_pass():
    momentum = 0.1
    eps = 1e-5

    weight = torch.randn(10).cuda()
    bias = torch.randn(10).cuda()
    running_mean = torch.randn(10).cuda()
    running_var = torch.randn(10).abs().cuda()
    running_mean_efficient = running_mean.clone()
    running_var_efficient = running_var.clone()

    input_1 = torch.randn(4, 5).cuda()
    input_2 = torch.randn(4, 5).cuda()
    storage = torch.Storage(40).cuda()

    bn = F.batch_norm(
        input=Variable(torch.cat([input_1, input_2], dim=1)),
        running_mean=running_mean,
        running_var=running_var,
        weight=Parameter(weight),
        bias=Parameter(bias),
        training=True,
        momentum=momentum,
        eps=eps
    ).data

    input_efficient = torch.cat([input_1, input_2], dim=1)
    func = _EfficientBatchNorm(
        storage=storage,
        running_mean=running_mean_efficient,
        running_var=running_var_efficient,
        training=True,
        momentum=momentum,
        eps=eps
    )
    bn_efficient = func.forward(weight, bias, input_efficient)

    assert(almost_equal(bn, bn_efficient))
    assert(bn_efficient.storage().data_ptr() == storage.data_ptr())
    assert(almost_equal(running_mean, running_mean_efficient))
    assert(almost_equal(running_var, running_var_efficient))
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def new(self, *args, **kwargs):
        r"""Constructs a new tensor of the same data type as :attr:`self` tensor.

        Any valid argument combination to the tensor constructor is accepted by
        this method, including sizes, :class:`torch.Storage`, NumPy ndarray,
        Python Sequence, etc. See :ref:`torch.Tensor <tensor-doc>` for more
        details.

        .. note:: For CUDA tensors, this method will create new tensor on the
                  same device as this tensor.
        """
        return self.__class__(*args, **kwargs)
项目:efficient_densenet_pytorch    作者:gpleiss    | 项目源码 | 文件源码
def test_backward_train_mode_computes_forward_pass():
    momentum = 0.1
    eps = 1e-5

    weight = torch.randn(10).cuda()
    bias = torch.randn(10).cuda()
    running_mean = torch.randn(10).cuda()
    running_var = torch.randn(10).abs().cuda()
    weight_efficient = weight.clone()
    bias_efficient = bias.clone()
    running_mean_efficient = running_mean.clone()
    running_var_efficient = running_var.clone()

    input_1 = torch.randn(4, 5).cuda()
    input_2 = torch.randn(4, 5).cuda()
    storage = torch.Storage(40).cuda()

    input_var = Variable(torch.cat([input_1, input_2], dim=1), requires_grad=True)
    weight_var = Parameter(weight)
    bias_var = Parameter(bias)
    bn_var = F.batch_norm(
        input=input_var,
        running_mean=running_mean,
        running_var=running_var,
        weight=weight_var,
        bias=bias_var,
        training=True,
        momentum=momentum,
        eps=eps
    )
    bn = bn_var.data
    bn_var.backward(gradient=input_var.data.clone().fill_(1))
    input_grad = input_var.grad.data
    weight_grad = weight_var.grad.data
    bias_grad = bias_var.grad.data

    input_efficient = torch.cat([input_1, input_2], dim=1)
    input_efficient_orig = input_efficient.clone()
    func = _EfficientBatchNorm(
        storage=storage,
        running_mean=running_mean_efficient,
        running_var=running_var_efficient,
        training=True,
        momentum=momentum,
        eps=eps
    )
    bn_efficient = func.forward(weight_efficient, bias_efficient, input_efficient)
    grad_out_efficient = bn_efficient.clone().fill_(1)
    weight_grad_efficient, bias_grad_efficient, input_grad_efficient = func.backward(
            weight_efficient, bias_efficient, input_efficient_orig, grad_out_efficient)

    assert(almost_equal(bn, bn_efficient))
    assert(grad_out_efficient.storage().data_ptr() == input_grad_efficient.storage().data_ptr())
    assert(almost_equal(input_grad, input_grad_efficient))
    assert(almost_equal(weight_grad, weight_grad_efficient))
    assert(almost_equal(bias_grad, bias_grad_efficient))
项目:efficient_densenet_pytorch    作者:gpleiss    | 项目源码 | 文件源码
def test_forward_training_false_computes_forward_pass():
    bn_weight = torch.randn(8).cuda()
    bn_bias = torch.randn(8).cuda()
    bn_running_mean = torch.randn(8).cuda()
    bn_running_var = torch.randn(8).abs().cuda()
    conv_weight = torch.randn(4, 8, 1, 1).cuda()
    input_1 = torch.randn(4, 6, 4, 4).cuda()
    input_2 = torch.randn(4, 2, 4, 4).cuda()

    layer = nn.Sequential(OrderedDict([
        ('norm', nn.BatchNorm2d(8)),
        ('relu', nn.ReLU(inplace=True)),
        ('conv', nn.Conv2d(8, 4, bias=None, kernel_size=1, stride=1)),
    ])).cuda()
    layer.train()
    layer.norm.weight.data.copy_(bn_weight)
    layer.norm.bias.data.copy_(bn_bias)
    layer.norm.running_mean.copy_(bn_running_mean)
    layer.norm.running_var.copy_(bn_running_var)
    layer.conv.weight.data.copy_(conv_weight)

    input_1_var = Variable(input_1)
    input_2_var = Variable(input_2)
    out_var = layer(torch.cat([input_1_var, input_2_var], dim=1))

    storage_1 = torch.Storage(4 * 8 * 3 * 3).cuda()
    storage_2 = torch.Storage(4 * 8 * 3 * 3).cuda()
    layer_efficient = _EfficientDensenetBottleneck(
        _SharedAllocation(storage_1), _SharedAllocation(storage_2), 8, 4
    ).cuda()
    layer_efficient.train()
    layer_efficient.norm_weight.data.copy_(bn_weight)
    layer_efficient.norm_bias.data.copy_(bn_bias)
    layer_efficient.norm_running_mean.copy_(bn_running_mean)
    layer_efficient.norm_running_var.copy_(bn_running_var)
    layer_efficient.conv_weight.data.copy_(conv_weight)

    input_efficient_1_var = Variable(input_1)
    input_efficient_2_var = Variable(input_2)
    out_efficient_var = layer_efficient([input_efficient_1_var, input_efficient_2_var])

    assert(almost_equal(out_var.data, out_efficient_var.data))
    assert(almost_equal(layer.norm.running_mean, layer_efficient.norm_running_mean))
    assert(almost_equal(layer.norm.running_var, layer_efficient.norm_running_var))
项目:efficient_densenet_pytorch    作者:gpleiss    | 项目源码 | 文件源码
def test_backward_computes_backward_pass():
    bn_weight = torch.randn(8).cuda()
    bn_bias = torch.randn(8).cuda()
    bn_running_mean = torch.randn(8).cuda()
    bn_running_var = torch.randn(8).abs().cuda()
    conv_weight = torch.randn(4, 8, 1, 1).cuda()
    input_1 = torch.randn(4, 6, 4, 4).cuda()
    input_2 = torch.randn(4, 2, 4, 4).cuda()

    layer = nn.Sequential(OrderedDict([
        ('norm', nn.BatchNorm2d(8)),
        ('relu', nn.ReLU(inplace=True)),
        ('conv', nn.Conv2d(8, 4, bias=None, kernel_size=1, stride=1)),
    ])).cuda()
    layer.train()
    layer.norm.weight.data.copy_(bn_weight)
    layer.norm.bias.data.copy_(bn_bias)
    layer.norm.running_mean.copy_(bn_running_mean)
    layer.norm.running_var.copy_(bn_running_var)
    layer.conv.weight.data.copy_(conv_weight)

    input_1_var = Variable(input_1, requires_grad=True)
    input_2_var = Variable(input_2, requires_grad=True)
    out_var = layer(torch.cat([input_1_var, input_2_var], dim=1))
    out_var.sum().backward()

    storage_1 = torch.Storage(4 * 8 * 3 * 3).cuda()
    storage_2 = torch.Storage(4 * 8 * 3 * 3).cuda()
    layer_efficient = _EfficientDensenetBottleneck(
        _SharedAllocation(storage_1), _SharedAllocation(storage_2), 8, 4
    ).cuda()
    layer_efficient.train()
    layer_efficient.norm_weight.data.copy_(bn_weight)
    layer_efficient.norm_bias.data.copy_(bn_bias)
    layer_efficient.norm_running_mean.copy_(bn_running_mean)
    layer_efficient.norm_running_var.copy_(bn_running_var)
    layer_efficient.conv_weight.data.copy_(conv_weight)

    input_efficient_1_var = Variable(input_1, requires_grad=True)
    input_efficient_2_var = Variable(input_2, requires_grad=True)
    out_efficient_var = layer_efficient([input_efficient_1_var, input_efficient_2_var])
    out_efficient_var.sum().backward()

    # print(input_1_var.grad.data[:, 0], input_efficient_1_var.grad.data[:, 0])
    assert(almost_equal(out_var.data, out_efficient_var.data))
    assert(almost_equal(layer.norm.running_mean, layer_efficient.norm_running_mean))
    assert(almost_equal(layer.norm.running_var, layer_efficient.norm_running_var))
    assert(almost_equal(layer.conv.weight.grad.data, layer_efficient.conv_weight.grad.data))
    assert(almost_equal(layer.norm.weight.grad.data, layer_efficient.norm_weight.grad.data))
    assert(almost_equal(layer.norm.bias.grad.data, layer_efficient.norm_bias.grad.data))
    assert(almost_equal(input_1_var.grad.data, input_efficient_1_var.grad.data))
    assert(almost_equal(input_2_var.grad.data, input_efficient_2_var.grad.data))