Python torch 模块,sparse() 实例源码

我们从Python开源项目中,提取了以下34个代码示例,用于说明如何使用torch.sparse()

项目:gpytorch    作者:jrg365    | 项目源码 | 文件源码
def bdsmm(sparse, dense):
    """
    Batch dense-sparse matrix multiply
    """
    if sparse.ndimension() > 2:
        batch_size, n_rows, n_cols = sparse.size()
        batch_assignment = sparse._indices()[0]
        indices = sparse._indices()[1:].clone()
        indices[0].add_(n_rows, batch_assignment)
        indices[1].add_(n_cols, batch_assignment)
        sparse_2d = sparse.__class__(indices, sparse._values(),
                                     torch.Size((batch_size * n_rows, batch_size * n_cols)))

        if dense.size(0) == 1:
            dense = dense.repeat(batch_size, 1, 1)
        dense_2d = dense.contiguous().view(batch_size * n_cols, -1)
        res = torch.dsmm(sparse_2d, dense_2d)
        res = res.view(batch_size, n_rows, -1)
        return res
    else:
        return torch.dsmm(sparse, dense)
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def cpu(self):
        return self.type(getattr(torch.sparse, self.__class__.__name__))
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def __init__(self, padding_idx, max_norm, norm_type, scale_grad_by_freq,
                 sparse=False):
        super(Embedding, self).__init__()
        self.padding_idx = padding_idx
        self.max_norm = max_norm
        self.norm_type = norm_type
        self.scale_grad_by_freq = scale_grad_by_freq
        self._indices = None
        self.sparse = sparse
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def setUp(self):
        # These parameters control the various ways we can run the test.
        # We will subclass and override this method to implement CUDA
        # tests
        self.is_cuda = False
        self.is_uncoalesced = False
        self.IndexTensor = torch.LongTensor
        self.ValueTensor = torch.DoubleTensor
        self.SparseTensor = torch.sparse.DoubleTensor
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def _gen_sparse(self, d, nnz, with_size):
        # TODO: Consider implementing this in the CUDA case by directly
        # performing the operations on the GPU.  You won't be able to
        # use torch.rand/torch.randn in this case because they are
        # CPU-only.  If you do this, you can remove the is_cuda branch
        # at the end.
        #
        # If you do this, be sure to update assert_uncoalesced too

        if isinstance(with_size, Number):
            with_size = [with_size] * d

        if self.is_uncoalesced:
            # We want to generate a tensor with a lot of uncoalesced
            # entries to stress test whether or not we handle this
            # (subtle) case correctly
            v_size = [nnz * 2] + list(with_size[d:])
            v = torch.randn(*v_size)
            r = torch.rand(d, nnz)
            # Repeat the indexes, so every position shows up twice
            i = torch.cat([r, r], dim=1) * \
                torch.Tensor(with_size[:d]).repeat(nnz * 2, 1).transpose(0, 1)
            i = i.type(torch.LongTensor)
            x = torch.sparse.DoubleTensor(i, v, torch.Size(with_size))
            self.assert_uncoalesced(x)
        else:
            # Generate a sparse tensor with d sparse dimensions; the
            # rest the dimensions with_size[d:] are dense.
            v_size = [nnz] + list(with_size[d:])
            v = torch.randn(*v_size)
            i = torch.rand(d, nnz) * \
                torch.Tensor(with_size[:d]).repeat(nnz, 1).transpose(0, 1)
            i = i.type(torch.LongTensor)
            x = torch.sparse.DoubleTensor(i, v, torch.Size(with_size))

        if self.is_cuda:
            return x.cuda(), i.cuda(), v.cuda()
        else:
            return x, i.clone(), v.clone()
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def setUp(self):
        super(TestCudaSparse, self).setUp()
        self.is_cuda = True
        self.IndexTensor = torch.cuda.LongTensor
        self.ValueTensor = torch.cuda.DoubleTensor
        self.SparseTensor = torch.cuda.sparse.DoubleTensor
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def cpu(self):
        return self.type(getattr(torch.sparse, self.__class__.__name__))
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def __init__(self, padding_idx, max_norm, norm_type, scale_grad_by_freq,
                 sparse=False):
        super(Embedding, self).__init__()
        self.padding_idx = padding_idx
        self.max_norm = max_norm
        self.norm_type = norm_type
        self.scale_grad_by_freq = scale_grad_by_freq
        self._indices = None
        self.sparse = sparse
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def setUp(self):
        # These parameters control the various ways we can run the test.
        # We will subclass and override this method to implement CUDA
        # tests
        self.is_cuda = False
        self.is_uncoalesced = False
        self.IndexTensor = torch.LongTensor
        self.ValueTensor = torch.DoubleTensor
        self.SparseTensor = torch.sparse.DoubleTensor
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def _gen_sparse(self, d, nnz, with_size):
        # TODO: Consider implementing this in the CUDA case by directly
        # performing the operations on the GPU.  You won't be able to
        # use torch.rand/torch.randn in this case because they are
        # CPU-only.  If you do this, you can remove the is_cuda branch
        # at the end.
        #
        # If you do this, be sure to update assert_uncoalesced too

        if isinstance(with_size, Number):
            with_size = [with_size] * d

        if self.is_uncoalesced:
            # We want to generate a tensor with a lot of uncoalesced
            # entries to stress test whether or not we handle this
            # (subtle) case correctly
            v_size = [nnz * 2] + list(with_size[d:])
            v = torch.randn(*v_size)
            r = torch.rand(d, nnz)
            # Repeat the indexes, so every position shows up twice
            i = torch.cat([r, r], dim=1) * \
                torch.Tensor(with_size[:d]).repeat(nnz * 2, 1).transpose(0, 1)
            i = i.type(torch.LongTensor)
            x = torch.sparse.DoubleTensor(i, v, torch.Size(with_size))
            self.assert_uncoalesced(x)
        else:
            # Generate a sparse tensor with d sparse dimensions; the
            # rest the dimensions with_size[d:] are dense.
            v_size = [nnz] + list(with_size[d:])
            v = torch.randn(*v_size)
            i = torch.rand(d, nnz) * \
                torch.Tensor(with_size[:d]).repeat(nnz, 1).transpose(0, 1)
            i = i.type(torch.LongTensor)
            x = torch.sparse.DoubleTensor(i, v, torch.Size(with_size))

        if self.is_cuda:
            return x.cuda(), i.cuda(), v.cuda()
        else:
            return x, i.clone(), v.clone()
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def setUp(self):
        super(TestCudaSparse, self).setUp()
        self.is_cuda = True
        self.IndexTensor = torch.cuda.LongTensor
        self.ValueTensor = torch.cuda.DoubleTensor
        self.SparseTensor = torch.cuda.sparse.DoubleTensor
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def cpu(self):
        return self.type(getattr(torch.sparse, self.__class__.__name__))
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def symbolic(g, indices, weight, padding_idx, max_norm, norm_type, scale_grad_by_freq,
                 sparse=False):
        if max_norm is not None:
            raise ValueError('Right now, re-norm is not supported.')

        output = g.appendNode(g.create("Gather", [weight, indices]))
        return output
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def forward(cls, ctx, indices, weight, padding_idx, max_norm, norm_type, scale_grad_by_freq,
                sparse=False):

        ctx.padding_idx = padding_idx
        ctx.scale_grad_by_freq = scale_grad_by_freq
        ctx._indices = None
        ctx.sparse = sparse

        assert indices.dim() <= 2
        assert not ctx.needs_input_grad[0], "Embedding doesn't " \
            "compute the gradient w.r.t. the indices"

        ctx._backend = type2backend[type(weight)]
        ctx._weight_size = weight.size()

        if not indices.is_contiguous():
            ctx._indices = indices.contiguous()
            indices = ctx._indices
        else:
            ctx.save_for_backward(indices)

        output = weight.new()
        if max_norm is not None:
            cls._renorm(ctx, indices, weight, max_norm, norm_type)

        if indices.dim() == 1:
            output = torch.index_select(weight, 0, indices)
        else:
            output = torch.index_select(weight, 0, indices.view(-1))
            output = output.view(indices.size(0), indices.size(1), weight.size(1))

        return output
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def setUp(self):
        # These parameters control the various ways we can run the test.
        # We will subclass and override this method to implement CUDA
        # tests
        self.is_cuda = False
        self.is_uncoalesced = False
        self.IndexTensor = torch.LongTensor
        self.ValueTensor = torch.DoubleTensor
        self.SparseTensor = torch.sparse.DoubleTensor
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def assert_uncoalesced(self, x):
        """
        Test if a CPU tensor is uncoalesced.  This is used to ensure
        correctness of the uncoalesced tensor generation algorithm.
        """
        assert not x.is_coalesced()
        # Strategy: construct a new sparse tensor with the raw value
        # field overwritten to a tensor of ones, coalesce it, and then
        # check if any value entries are > 1 (which indicates that the
        # original was uncoalesced.)
        i = x._indices().clone()
        v = x._values().clone().fill_(1)
        y = torch.sparse.DoubleTensor(i, v, x.size())
        z = self.safeCoalesce(y)
        assert (z._values() > 1).sum() > 0
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_storage_not_null(self):
        x = torch.cuda.sparse.FloatTensor(2)
        self.assertNotEqual(x.get_device(), -1)
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def setUp(self):
        super(TestCudaSparse, self).setUp()
        self.is_cuda = True
        self.IndexTensor = torch.cuda.LongTensor
        self.ValueTensor = torch.cuda.DoubleTensor
        self.SparseTensor = torch.cuda.sparse.DoubleTensor
项目:gpytorch    作者:jrg365    | 项目源码 | 文件源码
def sparse_eye(size):
    """
    Returns the identity matrix as a sparse matrix
    """
    indices = torch.arange(0, size).long().unsqueeze(0).expand(2, size)
    values = torch.Tensor([1]).expand(size)
    return torch.sparse.FloatTensor(indices, values, torch.Size([size, size]))
项目:gpytorch    作者:jrg365    | 项目源码 | 文件源码
def sparse_repeat(sparse, *repeat_sizes):
    orig_ndim = sparse.ndimension()
    new_ndim = len(repeat_sizes)
    orig_nvalues = sparse._indices().size(1)

    # Expand the number of dimensions to match repeat_sizes
    indices = torch.cat([sparse._indices().new().resize_(new_ndim - orig_ndim, orig_nvalues).zero_(),
                         sparse._indices()])
    values = sparse._values()
    size = [1] * (new_ndim - orig_ndim) + list(sparse.size())

    # Expand each dimension
    new_indices = indices.new().resize_(indices.size(0), indices.size(1) * mul(*repeat_sizes)).zero_()
    new_values = values.repeat(mul(*repeat_sizes))
    new_size = [dim_size * repeat_size for dim_size, repeat_size in zip(size, repeat_sizes)]

    # Fill in new indices
    new_indices[:, :orig_nvalues].copy_(indices)
    unit_size = orig_nvalues
    for i in range(new_ndim)[::-1]:
        repeat_size = repeat_sizes[i]
        for j in range(1, repeat_size):
            new_indices[:, unit_size * j:unit_size * (j + 1)].copy_(new_indices[:, :unit_size])
            new_indices[i, unit_size * j:unit_size * (j + 1)] += j * size[i]
        unit_size *= repeat_size

    return sparse.__class__(new_indices, new_values, torch.Size(new_size))
项目:gpytorch    作者:jrg365    | 项目源码 | 文件源码
def to_sparse(dense):
    mask = dense.ne(0)
    indices = mask.nonzero()
    if indices.storage():
        values = dense[mask]
    else:
        indices = indices.resize_(1, dense.ndimension()).zero_()
        values = dense.new().resize_(1).zero_()

    # Construct sparse tensor
    klass = getattr(torch.sparse, dense.__class__.__name__)
    res = klass(indices.t(), values, dense.size())
    if dense.is_cuda:
        res = res.cuda()
    return res
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def cpu(self):
        return self.type(getattr(torch.sparse, self.__class__.__name__))
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def symbolic(g, indices, weight, padding_idx, max_norm, norm_type, scale_grad_by_freq,
                 sparse=False):
        if max_norm is not None:
            raise ValueError('Right now, re-norm is not supported.')

        return g.op("Gather", weight, indices)
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def forward(cls, ctx, indices, weight, padding_idx, max_norm, norm_type, scale_grad_by_freq,
                sparse=False):

        ctx.padding_idx = padding_idx
        ctx.scale_grad_by_freq = scale_grad_by_freq
        ctx._indices = None
        ctx.sparse = sparse

        assert indices.dim() <= 2
        assert not ctx.needs_input_grad[0], "Embedding doesn't " \
            "compute the gradient w.r.t. the indices"

        ctx._backend = type2backend[type(weight)]
        ctx._weight_size = weight.size()

        if not indices.is_contiguous():
            ctx._indices = indices.contiguous()
            indices = ctx._indices
        else:
            ctx.save_for_backward(indices)

        output = weight.new()
        if max_norm is not None:
            cls._renorm(ctx, indices, weight, max_norm, norm_type)

        if indices.dim() == 1:
            output = torch.index_select(weight, 0, indices)
        else:
            output = torch.index_select(weight, 0, indices.view(-1))
            output = output.view(indices.size(0), indices.size(1), weight.size(1))

        return output
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def setUp(self):
        # These parameters control the various ways we can run the test.
        # We will subclass and override this method to implement CUDA
        # tests
        self.is_cuda = False
        self.is_uncoalesced = False
        self.IndexTensor = torch.LongTensor
        self.ValueTensor = torch.DoubleTensor
        self.SparseTensor = torch.sparse.DoubleTensor
        super(TestSparse, self).setUp()
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def assert_uncoalesced(self, x):
        """
        Test if a CPU tensor is uncoalesced.  This is used to ensure
        correctness of the uncoalesced tensor generation algorithm.
        """
        assert not x.is_coalesced()
        # Strategy: construct a new sparse tensor with the raw value
        # field overwritten to a tensor of ones, coalesce it, and then
        # check if any value entries are > 1 (which indicates that the
        # original was uncoalesced.)
        i = x._indices().clone()
        v = x._values().clone().fill_(1)
        y = torch.sparse.DoubleTensor(i, v, x.size())
        z = self.safeCoalesce(y)
        assert (z._values() > 1).sum() > 0
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_storage_not_null(self):
        x = torch.cuda.sparse.FloatTensor(2)
        self.assertNotEqual(x.get_device(), -1)
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def _test_new_device(self, size, device):
        with torch.cuda.device(device):
            x = torch.cuda.sparse.DoubleTensor(*size)
        self.assertEqual(x.get_device(), device)
        x1 = x.new()
        x2 = x.new(2, 3)
        self.assertEqual(x1.get_device(), device)
        self.assertEqual(x2.get_device(), device)
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def setUp(self):
        super(TestCudaSparse, self).setUp()
        self.is_cuda = True
        self.IndexTensor = torch.cuda.LongTensor
        self.ValueTensor = torch.cuda.DoubleTensor
        self.SparseTensor = torch.cuda.sparse.DoubleTensor
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def backward(self, grad_output):
        if self._indices is not None:
            indices = self._indices
        else:
            indices, = self.saved_tensors

        grad_output = grad_output.contiguous()
        if not self.sparse:
            if indices.dim() == 2:
                indices = indices.view(-1)

            with torch.cuda.device_of(grad_output):
                if grad_output.is_cuda:
                    _sorted = torch.cuda.LongTensor()
                    _indices = torch.cuda.LongTensor()
                    _count = torch.cuda.LongTensor()
                else:
                    _count = torch.IntTensor()
                    _sorted = _indices = None

            grad_weight = grad_output.new(self._weight_size).zero_()
            self._backend.LookupTable_accGradParameters(
                self._backend.library_state,
                indices,
                grad_output,
                grad_weight,
                _count,
                _sorted,
                _indices,
                self.scale_grad_by_freq,
                self.padding_idx,
                1
            )
        else:
            tensor_type = type(grad_output).__name__
            if grad_output.is_cuda:
                SparseTensor = getattr(torch.cuda.sparse, tensor_type)
            else:
                SparseTensor = getattr(torch.sparse, tensor_type)
            grad_weight = SparseTensor(
                indices.view(1, -1),
                grad_output.view(-1, self._weight_size[1]),
                self._weight_size,
            )
        return None, grad_weight
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def backward(self, grad_output):
        if self._indices is not None:
            indices = self._indices
        else:
            indices, = self.saved_tensors

        grad_output = grad_output.contiguous()
        if not self.sparse:
            if indices.dim() == 2:
                indices = indices.view(-1)

            with torch.cuda.device_of(grad_output):
                if grad_output.is_cuda:
                    _sorted = torch.cuda.LongTensor()
                    _indices = torch.cuda.LongTensor()
                    _count = torch.cuda.LongTensor()
                else:
                    _count = torch.IntTensor()
                    _sorted = _indices = None

            grad_weight = grad_output.new(self._weight_size).zero_()
            self._backend.LookupTable_accGradParameters(
                self._backend.library_state,
                indices,
                grad_output,
                grad_weight,
                _count,
                _sorted,
                _indices,
                self.scale_grad_by_freq,
                self.padding_idx,
                1
            )
        else:
            tensor_type = type(grad_output).__name__
            if grad_output.is_cuda:
                SparseTensor = getattr(torch.cuda.sparse, tensor_type)
            else:
                SparseTensor = getattr(torch.sparse, tensor_type)
            grad_weight = SparseTensor(
                indices.view(1, -1),
                grad_output.view(-1, self._weight_size[1]),
                self._weight_size,
            )
        return None, grad_weight
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def backward(ctx, grad_output):
        if ctx._indices is not None:
            indices = ctx._indices
        else:
            indices, = ctx.saved_tensors

        grad_output = grad_output.contiguous()
        if not ctx.sparse:
            if indices.dim() == 2:
                indices = indices.view(-1)

            with torch.cuda.device_of(grad_output):
                if grad_output.is_cuda:
                    _sorted = torch.cuda.LongTensor()
                    _indices = torch.cuda.LongTensor()
                    _count = torch.cuda.LongTensor()
                else:
                    _count = torch.IntTensor()
                    _sorted = _indices = None

            grad_weight = grad_output.new(ctx._weight_size).zero_()
            # Doesn't support Variable grad_output
            ctx._backend.LookupTable_accGradParameters(
                ctx._backend.library_state,
                indices,
                grad_output,
                grad_weight,
                _count,
                _sorted,
                _indices,
                ctx.scale_grad_by_freq,
                ctx.padding_idx,
                1
            )
        else:
            tensor_type = type(grad_output).__name__
            if grad_output.is_cuda:
                SparseTensor = getattr(torch.cuda.sparse, tensor_type)
            else:
                SparseTensor = getattr(torch.sparse, tensor_type)
            grad_weight = SparseTensor(
                indices.view(1, -1),
                grad_output.view(-1, ctx._weight_size[1]),
                ctx._weight_size,
            )
        return None, grad_weight, None, None, None, None, None
项目:gpytorch    作者:jrg365    | 项目源码 | 文件源码
def sparse_getitem(sparse, idxs):
    if not isinstance(idxs, tuple):
        idxs = idxs,

    if not sparse.ndimension() <= 2:
        raise RuntimeError('Must be a 1d or 2d sparse tensor')

    if len(idxs) > sparse.ndimension():
        raise RuntimeError('Invalid index for %d-order tensor' % sparse.ndimension())

    indices = sparse._indices()
    values = sparse._values()
    size = list(sparse.size())

    for i, idx in list(enumerate(idxs))[::-1]:
        if isinstance(idx, int):
            del size[i]
            mask = indices[i].eq(idx)
            if sum(mask):
                new_indices = indices.new().resize_(indices.size(0) - 1, sum(mask)).zero_()
                for j in range(indices.size(0)):
                    if i > j:
                        new_indices[j].copy_(indices[j][mask])
                    elif i < j:
                        new_indices[j - 1].copy_(indices[j][mask])
                indices = new_indices
                values = values[mask]
            else:
                indices.resize_(indices.size(0) - 1, 1).zero_()
                values.resize_(1).zero_()

            if not len(size):
                return sum(values)

        elif isinstance(idx, slice):
            start, stop, step = idx.indices(size[i])
            size = list(size[:i]) + [stop - start] + list(size[i + 1:])
            if step != 1:
                raise RuntimeError('Slicing with step is not supported')
            mask = indices[i].lt(stop) * indices[i].ge(start)
            if sum(mask):
                new_indices = indices.new().resize_(indices.size(0), sum(mask)).zero_()
                for j in range(indices.size(0)):
                    new_indices[j].copy_(indices[j][mask])
                new_indices[i].sub_(start)
                indices = new_indices
                values = values[mask]
            else:
                indices.resize_(indices.size(0), 1).zero_()
                values.resize_(1).zero_()

        else:
            raise RuntimeError('Unknown index type')

    return sparse.__class__(indices, values, torch.Size(size))
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def backward(ctx, grad_output):
        if ctx._indices is not None:
            indices = ctx._indices
        else:
            indices, = ctx.saved_tensors

        grad_output = grad_output.contiguous()
        if not ctx.sparse:
            if indices.dim() == 2:
                indices = indices.view(-1)

            with torch.cuda.device_of(grad_output):
                if grad_output.is_cuda:
                    _sorted = torch.cuda.LongTensor()
                    _indices = torch.cuda.LongTensor()
                    _count = torch.cuda.LongTensor()
                else:
                    _count = torch.IntTensor()
                    _sorted = _indices = None

            grad_weight = grad_output.new(ctx._weight_size).zero_()
            # Doesn't support Variable grad_output
            ctx._backend.LookupTable_accGradParameters(
                ctx._backend.library_state,
                indices,
                grad_output,
                grad_weight,
                _count,
                _sorted,
                _indices,
                ctx.scale_grad_by_freq,
                ctx.padding_idx,
                1
            )
        else:
            tensor_type = type(grad_output).__name__
            if grad_output.is_cuda:
                SparseTensor = getattr(torch.cuda.sparse, tensor_type)
            else:
                SparseTensor = getattr(torch.sparse, tensor_type)
            padding_idx = ctx.padding_idx
            indices = indices.view(1, -1)
            grad_output = grad_output.view(-1, ctx._weight_size[1])
            if padding_idx is not None:
                nonpadding_indices_indices = (indices.view(-1) != padding_idx).nonzero().view(-1)
                indices = indices.index_select(1, nonpadding_indices_indices)
                grad_output = grad_output.index_select(0, nonpadding_indices_indices)
            grad_weight = SparseTensor(
                indices,
                grad_output,
                ctx._weight_size,
            )
        return None, grad_weight, None, None, None, None, None