Python torch 模块,typename() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用torch.typename()

项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def _str(self):
    if self.ndimension() == 0:
        return '[{} with no dimension]\n'.format(torch.typename(self))
    elif self.ndimension() == 1:
        strt = _vector_str(self)
    elif self.ndimension() == 2:
        strt = _matrix_str(self)
    else:
        strt = _tensor_str(self)

    size_str = 'x'.join(str(size) for size in self.size())
    device_str = '' if not self.is_cuda else \
        ' (GPU {})'.format(self.get_device())
    strt += '[{} of size {}{}]\n'.format(torch.typename(self),
                                         size_str, device_str)
    return '\n' + strt
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def forward(self, input, weight, bias=None):
        self._backend = type2backend[type(input)]
        # TODO: free buffers when not needed
        self.buffer1 = input.new()
        self.buffer2 = input.new()
        output = input.new()
        self.with_bias = bias is not None
        if torch.typename(input) == 'torch.cuda.FloatTensor':
            self._backend.VolumetricConvolution_updateOutput(
                self._backend.library_state, input, output, weight, bias,
                self.buffer1, self.buffer2, *self.additional_args[3:])
        else:
            self._backend.VolumetricConvolutionMM_updateOutput(
                self._backend.library_state, input, output, weight,
                bias, self.buffer1, *self.additional_args)
        if self.with_bias:
            self.save_for_backward(input, weight, bias)
        else:
            self.save_for_backward(input, weight)
        return output
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def _compute_grad_weight(self, grad_output):
        input, weight, bias = self._get_saved_tensors()
        # TODO: no zero needed in the future
        grad_weight = weight.new().resize_as_(weight).zero_()
        grad_bias = bias.new().resize_as_(bias).zero_()
        if torch.typename(input) == 'torch.cuda.FloatTensor':
            args = self.additional_args[3:] + (1,)
            self._backend.VolumetricConvolution_accGradParameters(
                self._backend.library_state, input, grad_output, grad_weight,
                grad_bias, self.buffer1, self.buffer2,
                *args)
        else:
            self._backend.VolumetricConvolutionMM_accGradParameters(
                self._backend.library_state, input, grad_output, grad_weight,
                grad_bias, self.buffer1, 1)
        return grad_weight, grad_bias
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def __repr__(self):
        tab = '  '
        line = '\n'
        next = '  |`-> '
        ext = '  |    '
        extlast = '       '
        last = '   +. -> '
        res = torch.typename(self)
        res = res + ' {' + line + tab + 'input'
        for i in range(len(self.modules)):
           if i == len(self.modules)-1:
              res = res + line + tab + next + '(' + str(i) + '): ' + str(self.modules[i]).replace(line, line + tab + extlast)
           else:
              res = res + line + tab + next + '(' + str(i) + '): ' + str(self.modules[i]).replace(line, line + tab + ext)


        res = res + line + tab + last + 'output'
        res = res + line + '}'
        return res
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def updateOutput(self, input):
        # lazy-initialize
        if self._output is None:
            self._output = input.new()
            self._weight = input.new()
            self._expand = input.new()
            self._repeat = input.new()

        self.output.resize_as_(input).copy_(input)
        batchSize = input.size(0)
        # TODO: expand_as_, view_
        self._output = self.output.view(batchSize, -1)
        self._weight = self.weight.view(1, -1)
        self._expand = self._weight.expand_as(self._output)

        if torch.typename(input) == 'torch.cuda.FloatTensor':
            self._repeat.resize_as_(self._expand).copy_(self._expand)
            self._output.mul_(self._repeat)
        else:
            self._output.mul_(self._expand)

        return self.output
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def __repr__(self):
        tab = '  '
        line = '\n'
        next = '  |`-> '
        ext = '  |    '
        extlast = '       '
        last = '   ... -> '
        res = torch.typename(self)
        res += ' {' + line + tab + 'input'
        for i in range(len(self.modules)):
            if i == len(self.modules)-1:
                res += line + tab + next + '(' + str(i) + '): ' + str(self.modules[i]).replace(line, line + tab + extlast)
            else:
                res += line + tab + next + '(' + str(i) + '): ' + str(self.modules[i]).replace(line, line + tab + ext)

        res += line + tab + last + 'output'
        res += line + '}'
        return res
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def test_Copy(self):
        input = torch.randn(3,4).double()
        c = nn.Copy(torch.DoubleTensor, torch.FloatTensor)
        output = c.forward(input)
        self.assertEqual(torch.typename(output), 'torch.FloatTensor')
        self.assertEqual(output, input.float(), 1e-6)
        gradInput = c.backward(input, output.fill_(1))
        self.assertEqual(torch.typename(gradInput), 'torch.DoubleTensor')
        self.assertEqual(gradInput, output.double(), 1e-6)
        c.dontCast = True
        c.double()
        self.assertEqual(torch.typename(output), 'torch.FloatTensor')

        # Check that these don't raise errors
        c.__repr__()
        str(c)
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def _str(self):
    if self.ndimension() == 0:
        return '[{} with no dimension]\n'.format(torch.typename(self))
    elif self.ndimension() == 1:
        strt = _vector_str(self)
    elif self.ndimension() == 2:
        strt = _matrix_str(self)
    else:
        strt = _tensor_str(self)

    size_str = 'x'.join(str(size) for size in self.size())
    device_str = '' if not self.is_cuda else \
        ' (GPU {})'.format(self.get_device())
    strt += '[{} of size {}{}]\n'.format(torch.typename(self),
                                         size_str, device_str)
    return '\n' + strt
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def __repr__(self):
        tab = '  '
        line = '\n'
        next = '  |`-> '
        ext = '  |    '
        extlast = '       '
        last = '   +. -> '
        res = torch.typename(self)
        res = res + ' {' + line + tab + 'input'
        for i in range(len(self.modules)):
            if i == len(self.modules) - 1:
                res = res + line + tab + next + '(' + str(i) + '): ' + \
                    str(self.modules[i]).replace(line, line + tab + extlast)
            else:
                res = res + line + tab + next + '(' + str(i) + '): ' + \
                    str(self.modules[i]).replace(line, line + tab + ext)

        res = res + line + tab + last + 'output'
        res = res + line + '}'
        return res
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def updateOutput(self, input):
        # lazy-initialize
        if self._output is None:
            self._output = input.new()
            self._weight = input.new()
            self._expand = input.new()
            self._repeat = input.new()

        self.output.resize_as_(input).copy_(input)
        batchSize = input.size(0)
        # TODO: expand_as_, view_
        self._output = self.output.view(batchSize, -1)
        self._weight = self.weight.view(1, -1)
        self._expand = self._weight.expand_as(self._output)

        if torch.typename(input) == 'torch.cuda.FloatTensor':
            self._repeat.resize_as_(self._expand).copy_(self._expand)
            self._output.mul_(self._repeat)
        else:
            self._output.mul_(self._expand)

        return self.output
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def updateGradInput(self, input, gradOutput):
        if self.gradInput is None:
            return

        if self._gradOutput is None:
            self._gradOutput = input.new()
            self._gradInput = input.new()

        self.gradInput.resize_as_(input).zero_()
        batchSize = input.size(0)
        contiguousView(self._gradOutput, gradOutput, batchSize, -1)
        contiguousView(self._gradInput, self.gradInput, batchSize, -1)
        self._weight = self.weight.view(1, -1)
        self._expand = self._weight.expand_as(self._gradOutput)

        if torch.typename(input) == 'torch.cuda.FloatTensor':
            self._repeat.resize_as_(self._expand).copy_(self._expand)
            self._gradInput.addcmul_(1, self._repeat, self._gradOutput)
        else:
            self._gradInput.addcmul_(1, self._expand, self._gradOutput)

        return self.gradInput
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def __repr__(self):
        tab = '  '
        line = '\n'
        next = '  |`-> '
        ext = '  |    '
        extlast = '       '
        last = '   ... -> '
        res = torch.typename(self)
        res += ' {' + line + tab + 'input'
        for i in range(len(self.modules)):
            if i == len(self.modules) - 1:
                res += line + tab + next + '(' + str(i) + '): ' + \
                    str(self.modules[i]).replace(line, line + tab + extlast)
            else:
                res += line + tab + next + '(' + str(i) + '): ' + str(self.modules[i]).replace(line, line + tab + ext)

        res += line + tab + last + 'output'
        res += line + '}'
        return res
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def __repr__(self):
        tab = '  '
        line = '\n'
        next = '  |`-> '
        ext = '  |    '
        extlast = '       '
        last = '   ... -> '
        res = torch.typename(self)
        res = res + ' {' + line + tab + 'input'
        for i in range(len(self.modules)):
            if i == len(self.modules) - 1:
                res = res + line + tab + next + '(' + str(i) + '): ' + \
                    str(self.modules[i]).replace(line, line + tab + extlast)
            else:
                res = res + line + tab + next + '(' + str(i) + '): ' + \
                    str(self.modules[i]).replace(line, line + tab + ext)

        res = res + line + tab + last + 'output'
        res = res + line + '}'
        return res
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_Copy(self):
        input = torch.randn(3, 4).double()
        c = nn.Copy(torch.DoubleTensor, torch.FloatTensor)
        output = c.forward(input)
        self.assertEqual(torch.typename(output), 'torch.FloatTensor')
        self.assertEqual(output, input.float(), 1e-6)
        gradInput = c.backward(input, output.fill_(1))
        self.assertEqual(torch.typename(gradInput), 'torch.DoubleTensor')
        self.assertEqual(gradInput, output.double(), 1e-6)
        c.dontCast = True
        c.double()
        self.assertEqual(torch.typename(output), 'torch.FloatTensor')

        # Check that these don't raise errors
        c.__repr__()
        str(c)
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def _str(self):
    if self.ndimension() == 0:
        return '[{} with no dimension]\n'.format(torch.typename(self))
    elif self.ndimension() == 1:
        strt = _vector_str(self)
    elif self.ndimension() == 2:
        strt = _matrix_str(self)
    else:
        strt = _tensor_str(self)

    size_str = 'x'.join(str(size) for size in self.size())
    device_str = '' if not self.is_cuda else \
        ' (GPU {})'.format(self.get_device())
    strt += '[{} of size {}{}]\n'.format(torch.typename(self),
                                         size_str, device_str)
    return '\n' + strt
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def __repr__(self):
        tab = '  '
        line = '\n'
        next = '  |`-> '
        ext = '  |    '
        extlast = '       '
        last = '   +. -> '
        res = torch.typename(self)
        res = res + ' {' + line + tab + 'input'
        for i in range(len(self.modules)):
            if i == len(self.modules) - 1:
                res = res + line + tab + next + '(' + str(i) + '): ' + \
                    str(self.modules[i]).replace(line, line + tab + extlast)
            else:
                res = res + line + tab + next + '(' + str(i) + '): ' + \
                    str(self.modules[i]).replace(line, line + tab + ext)

        res = res + line + tab + last + 'output'
        res = res + line + '}'
        return res
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def updateOutput(self, input):
        # lazy-initialize
        if self._output is None:
            self._output = input.new()
            self._weight = input.new()
            self._expand = input.new()
            self._repeat = input.new()

        self.output.resize_as_(input).copy_(input)
        batchSize = input.size(0)
        # TODO: expand_as_, view_
        self._output = self.output.view(batchSize, -1)
        self._weight = self.weight.view(1, -1)
        self._expand = self._weight.expand_as(self._output)

        if torch.typename(input) == 'torch.cuda.FloatTensor':
            self._repeat.resize_as_(self._expand).copy_(self._expand)
            self._output.mul_(self._repeat)
        else:
            self._output.mul_(self._expand)

        return self.output
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def updateGradInput(self, input, gradOutput):
        if self.gradInput is None:
            return

        if self._gradOutput is None:
            self._gradOutput = input.new()
            self._gradInput = input.new()

        self.gradInput.resize_as_(input).zero_()
        batchSize = input.size(0)
        contiguousView(self._gradOutput, gradOutput, batchSize, -1)
        contiguousView(self._gradInput, self.gradInput, batchSize, -1)
        self._weight = self.weight.view(1, -1)
        self._expand = self._weight.expand_as(self._gradOutput)

        if torch.typename(input) == 'torch.cuda.FloatTensor':
            self._repeat.resize_as_(self._expand).copy_(self._expand)
            self._gradInput.addcmul_(1, self._repeat, self._gradOutput)
        else:
            self._gradInput.addcmul_(1, self._expand, self._gradOutput)

        return self.gradInput
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def __repr__(self):
        tab = '  '
        line = '\n'
        next = '  |`-> '
        ext = '  |    '
        extlast = '       '
        last = '   ... -> '
        res = torch.typename(self)
        res = res + ' {' + line + tab + 'input'
        for i in range(len(self.modules)):
            if i == len(self.modules) - 1:
                res = res + line + tab + next + '(' + str(i) + '): ' + \
                    str(self.modules[i]).replace(line, line + tab + extlast)
            else:
                res = res + line + tab + next + '(' + str(i) + '): ' + \
                    str(self.modules[i]).replace(line, line + tab + ext)

        res = res + line + tab + last + 'output'
        res = res + line + '}'
        return res
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def default_tensor_type(type):
    type_str = torch.typename(type)

    def decorator(fn):
        @wraps(fn)
        def wrapper(*args, **kwargs):
            old_type = torch.typename(torch.Tensor())
            torch.set_default_tensor_type(type_str)
            try:
                return fn(*args, **kwargs)
            finally:
                torch.set_default_tensor_type(old_type)

        return wrapper

    return decorator
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def test_Copy(self):
        input = torch.randn(3, 4).double()
        c = nn.Copy(torch.DoubleTensor, torch.FloatTensor)
        output = c.forward(input)
        self.assertEqual(torch.typename(output), 'torch.FloatTensor')
        self.assertEqual(output, input.float(), 1e-6)
        gradInput = c.backward(input, output.fill_(1))
        self.assertEqual(torch.typename(gradInput), 'torch.DoubleTensor')
        self.assertEqual(gradInput, output.double(), 1e-6)
        c.dontCast = True
        c.double()
        self.assertEqual(torch.typename(output), 'torch.FloatTensor')

        # Check that these don't raise errors
        c.__repr__()
        str(c)
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def _str(self):
    if self.ndimension() == 0:
        return '[{} with no dimension]\n'.format(torch.typename(self))
    elif self.ndimension() == 1:
        strt = _vector_str(self)
    elif self.ndimension() == 2:
        strt = _matrix_str(self)
    else:
        strt = _tensor_str(self)

    size_str = 'x'.join(str(size) for size in self.size())
    device_str = '' if not self.is_cuda else \
        ' (GPU {})'.format(self.get_device())
    strt += '[{} of size {}{}]\n'.format(torch.typename(self),
                                         size_str, device_str)
    return '\n' + strt
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def __init__(self, params, defaults):
        self.defaults = defaults

        if isinstance(params, Variable) or torch.is_tensor(params):
            raise TypeError("params argument given to the optimizer should be "
                            "an iterable of Variables or dicts, but got " +
                            torch.typename(params))

        self.state = defaultdict(dict)
        self.param_groups = []

        param_groups = list(params)
        if len(param_groups) == 0:
            raise ValueError("optimizer got an empty parameter list")
        if not isinstance(param_groups[0], dict):
            param_groups = [{'params': param_groups}]

        for param_group in param_groups:
            self.add_param_group(param_group)
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def updateOutput(self, input):
        # lazy-initialize
        if self._output is None:
            self._output = input.new()
            self._weight = input.new()
            self._expand = input.new()
            self._repeat = input.new()

        self.output.resize_as_(input).copy_(input)
        batchSize = input.size(0)
        # TODO: expand_as_, view_
        self._output = self.output.view(batchSize, -1)
        self._weight = self.weight.view(1, -1)
        self._expand = self._weight.expand_as(self._output)

        if torch.typename(input) == 'torch.cuda.FloatTensor':
            self._repeat.resize_as_(self._expand).copy_(self._expand)
            self._output.mul_(self._repeat)
        else:
            self._output.mul_(self._expand)

        return self.output
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def updateGradInput(self, input, gradOutput):
        if self.gradInput is None:
            return

        if self._gradOutput is None:
            self._gradOutput = input.new()
            self._gradInput = input.new()

        self.gradInput.resize_as_(input).zero_()
        batchSize = input.size(0)
        contiguousView(self._gradOutput, gradOutput, batchSize, -1)
        contiguousView(self._gradInput, self.gradInput, batchSize, -1)
        self._weight = self.weight.view(1, -1)
        self._expand = self._weight.expand_as(self._gradOutput)

        if torch.typename(input) == 'torch.cuda.FloatTensor':
            self._repeat.resize_as_(self._expand).copy_(self._expand)
            self._gradInput.addcmul_(1, self._repeat, self._gradOutput)
        else:
            self._gradInput.addcmul_(1, self._expand, self._gradOutput)

        return self.gradInput
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def __repr__(self):
        tab = '  '
        line = '\n'
        next = '  |`-> '
        ext = '  |    '
        extlast = '       '
        last = '   ... -> '
        res = torch.typename(self)
        res += ' {' + line + tab + 'input'
        for i in range(len(self.modules)):
            if i == len(self.modules) - 1:
                res += line + tab + next + '(' + str(i) + '): ' + \
                    str(self.modules[i]).replace(line, line + tab + extlast)
            else:
                res += line + tab + next + '(' + str(i) + '): ' + str(self.modules[i]).replace(line, line + tab + ext)

        res += line + tab + last + 'output'
        res += line + '}'
        return res
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def __repr__(self):
        tab = '  '
        line = '\n'
        next = '  |`-> '
        ext = '  |    '
        extlast = '       '
        last = '   ... -> '
        res = torch.typename(self)
        res = res + ' {' + line + tab + 'input'
        for i in range(len(self.modules)):
            if i == len(self.modules) - 1:
                res = res + line + tab + next + '(' + str(i) + '): ' + \
                    str(self.modules[i]).replace(line, line + tab + extlast)
            else:
                res = res + line + tab + next + '(' + str(i) + '): ' + \
                    str(self.modules[i]).replace(line, line + tab + ext)

        res = res + line + tab + last + 'output'
        res = res + line + '}'
        return res
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def default_tensor_type(type):
    type_str = torch.typename(type)

    def decorator(fn):
        @wraps(fn)
        def wrapper(*args, **kwargs):
            old_type = torch.typename(torch.Tensor())
            torch.set_default_tensor_type(type_str)
            try:
                return fn(*args, **kwargs)
            finally:
                torch.set_default_tensor_type(old_type)

        return wrapper

    return decorator
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_Copy(self):
        input = torch.randn(3, 4).double()
        c = nn.Copy(torch.DoubleTensor, torch.FloatTensor)
        output = c.forward(input)
        self.assertEqual(torch.typename(output), 'torch.FloatTensor')
        self.assertEqual(output, input.float(), 1e-6)
        gradInput = c.backward(input, output.fill_(1))
        self.assertEqual(torch.typename(gradInput), 'torch.DoubleTensor')
        self.assertEqual(gradInput, output.double(), 1e-6)
        c.dontCast = True
        c.double()
        self.assertEqual(torch.typename(output), 'torch.FloatTensor')

        # Check that these don't raise errors
        c.__repr__()
        str(c)
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def _str(self):
    if self.ndimension() == 0:
        return '[{} with no dimension]\n'.format(torch.typename(self))
    elif self.ndimension() == 1:
        strt = _vector_str(self)
    elif self.ndimension() == 2:
        strt = _matrix_str(self)
    else:
        strt = _tensor_str(self)

    size_str = 'x'.join(str(size) for size in self.size())
    device_str = '' if not self.is_cuda else \
        ' (GPU {})'.format(self.get_device())
    strt += '[{} of size {}{}]\n'.format(torch.typename(self),
                                         size_str, device_str)
    return '\n' + strt
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def __init__(self, params, defaults):
        self.defaults = defaults

        if isinstance(params, Variable) or torch.is_tensor(params):
            raise TypeError("params argument given to the optimizer should be "
                            "an iterable of Variables or dicts, but got " +
                            torch.typename(params))

        self.state = defaultdict(dict)
        self.param_groups = []

        param_groups = list(params)
        if len(param_groups) == 0:
            raise ValueError("optimizer got an empty parameter list")
        if not isinstance(param_groups[0], dict):
            param_groups = [{'params': param_groups}]

        for param_group in param_groups:
            self.add_param_group(param_group)
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def add_module(self, name, module):
        """Adds a child module to the current module.

        The module can be accessed as an attribute using the given name.

        Args:
            name (string): name of the child module. The child module can be
                accessed from this module using the given name
            parameter (Module): child module to be added to the module.
        """
        if not isinstance(module, Module) and module is not None:
            raise TypeError("{} is not a Module subclass".format(
                torch.typename(module)))
        if hasattr(self, name) and name not in self._modules:
            raise KeyError("attribute '{}' already exists".format(name))
        self._modules[name] = module
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def _iter_filter(condition, skip_unknown=False, condition_msg=None):
    def _iter(obj):
        if condition(obj):
            yield obj
        elif obj is None:
            return
        elif isinstance(obj, (list, tuple)):
            for o in obj:
                for var in _iter(o):
                    yield var
        elif not skip_unknown:
            raise ValueError("Auto nesting doesn't know how to process "
                             "an input object of type " + torch.typename(obj) +
                             (". Accepted types: " + condition_msg +
                              ", or lists/tuples of them"
                              if condition_msg else ""))

    return _iter
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def __repr__(self):
        tab = '  '
        line = '\n'
        next = '  |`-> '
        ext = '  |    '
        extlast = '       '
        last = '   +. -> '
        res = torch.typename(self)
        res = res + ' {' + line + tab + 'input'
        for i in range(len(self.modules)):
            if i == len(self.modules) - 1:
                res = res + line + tab + next + '(' + str(i) + '): ' + \
                    str(self.modules[i]).replace(line, line + tab + extlast)
            else:
                res = res + line + tab + next + '(' + str(i) + '): ' + \
                    str(self.modules[i]).replace(line, line + tab + ext)

        res = res + line + tab + last + 'output'
        res = res + line + '}'
        return res
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def updateOutput(self, input):
        # lazy-initialize
        if self._output is None:
            self._output = input.new()
            self._weight = input.new()
            self._expand = input.new()
            self._repeat = input.new()

        self.output.resize_as_(input).copy_(input)
        batchSize = input.size(0)
        # TODO: expand_as_, view_
        self._output = self.output.view(batchSize, -1)
        self._weight = self.weight.view(1, -1)
        self._expand = self._weight.expand_as(self._output)

        if torch.typename(input) == 'torch.cuda.FloatTensor':
            self._repeat.resize_as_(self._expand).copy_(self._expand)
            self._output.mul_(self._repeat)
        else:
            self._output.mul_(self._expand)

        return self.output
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def __repr__(self):
        tab = '  '
        line = '\n'
        next = '  |`-> '
        ext = '  |    '
        extlast = '       '
        last = '   ... -> '
        res = torch.typename(self)
        res += ' {' + line + tab + 'input'
        for i in range(len(self.modules)):
            if i == len(self.modules) - 1:
                res += line + tab + next + '(' + str(i) + '): ' + \
                    str(self.modules[i]).replace(line, line + tab + extlast)
            else:
                res += line + tab + next + '(' + str(i) + '): ' + str(self.modules[i]).replace(line, line + tab + ext)

        res += line + tab + last + 'output'
        res += line + '}'
        return res
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def default_tensor_type(type):
    type_str = torch.typename(type)

    def decorator(fn):
        @wraps(fn)
        def wrapper(*args, **kwargs):
            old_type = torch.typename(torch.Tensor())
            torch.set_default_tensor_type(type_str)
            try:
                return fn(*args, **kwargs)
            finally:
                torch.set_default_tensor_type(old_type)

        return wrapper

    return decorator
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_Copy(self):
        input = torch.randn(3, 4).double()
        c = nn.Copy(torch.DoubleTensor, torch.FloatTensor)
        output = c.forward(input)
        self.assertEqual(torch.typename(output), 'torch.FloatTensor')
        self.assertEqual(output, input.float(), 1e-6)
        gradInput = c.backward(input, output.fill_(1))
        self.assertEqual(torch.typename(gradInput), 'torch.DoubleTensor')
        self.assertEqual(gradInput, output.double(), 1e-6)
        c.dontCast = True
        c.double()
        self.assertEqual(torch.typename(output), 'torch.FloatTensor')

        # Check that these don't raise errors
        c.__repr__()
        str(c)
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def location_tag(storage):
    for _, tagger, _ in _package_registry:
        location = tagger(storage)
        if location:
            return location
    raise RuntimeError("don't know how to determine data location of " +
            torch.typename(storage))
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def default_restore_location(storage, location):
    for _, _, fn in _package_registry:
        result = fn(storage, location)
        if result is not None:
            return result
    raise RuntimeError("don't know how to restore data location of " +
            torch.typename(storage) + " (tagged with " + location + ")")
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def add_module(self, name, module):
        if hasattr(self, name):
            raise KeyError("attribute already exists '{}'".format(name))
        if not isinstance(module, Module) and module is not None:
            raise TypeError("{} is not a Module subclass".format(
                torch.typename(module)))
        self._modules[name] = module
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def __setattr__(self, name, value):
        _modules = self.__dict__.get('_modules')
        if isinstance(value, Module):
            if _modules is None:
                raise AttributeError(
                    "cannot assign module before Container.__init__() call")
            _modules[name] = value
        elif _modules is not None and name in _modules:
            if value is not None:
                raise TypeError("cannot assign '{}' as child module '{}' "
                                "(torch.nn.Module or None expected)"
                                 .format(torch.typename(value), name))
            _modules[name] = value
        else:
            Module.__setattr__(self, name, value)
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def _new_idx(self, input):
        if torch.typename(input) == 'torch.cuda.FloatTensor':
            return torch.cuda.ByteTensor()
        else:
            return torch.ByteTensor()
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def _compute_grad_input(self, grad_output):
        input, weight, bias = self._get_saved_tensors()
        # TODO: no zero needed in the future
        grad_input = input.new().resize_as_(input).zero_()
        if torch.typename(input) == 'torch.cuda.FloatTensor':
            self._backend.VolumetricConvolution_updateGradInput(
                self._backend.library_state, input, grad_output, grad_input,
                weight, self.buffer1, *self.additional_args[3:])
        else:
            self._backend.VolumetricConvolutionMM_updateGradInput(
                self._backend.library_state, input, grad_output, grad_input,
                weight, self.buffer1, self.buffer2, *self.additional_args)
        return grad_input
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def backward(self, grad_output):
        if self._indices is not None:
            indices = self._indices
        else:
            indices, = self.saved_tensors

        if indices.dim() == 2:
            indices = indices.view(-1)

        grad_output = grad_output.contiguous()

        if torch.typename(grad_output) == 'torch.cuda.FloatTensor':
            _sorted = torch.cuda.LongTensor()
            _indices = torch.cuda.LongTensor()
            _count = torch.cuda.LongTensor()
        else:
            _count = torch.IntTensor()
            _sorted = _indices = None

        # TODO: sparse updates...
        grad_weight = type(grad_output)(self._weight_size).zero_()
        self._backend.LookupTable_accGradParameters(
            self._backend.library_state,
            indices,
            grad_output,
            grad_weight,
            _count,
            _sorted,
            _indices,
            self.scale_grad_by_freq,
            self.padding_idx,
            1
        )
        return None, grad_weight
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def _iter_filter(condition):
    def _iter(obj):
        if condition(obj):
            yield obj
        elif obj is None:
            return
        elif isinstance(obj, (list, tuple)):
            for o in obj:
                for var in _iter(o):
                    yield var
        else:
            raise ValueError("NestedIOFunction doesn't know how to process "
                "an input object of type " + torch.typename(obj))
    return _iter
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def __str__(self):
        content = ' ' + '\n '.join(str(self[i]) for i in _range(len(self)))
        return content + '\n[{} of size {}]'.format(torch.typename(self), len(self))
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def updateOutput(self, input):
        assert input.dim() == 2
        input_size = input.size()

        self._output = self._output or input.new()
        self.norm = self.norm or input.new()
        self.buffer = self.buffer or input.new()

        self._output.resize_as_(input)

        # specialization for the infinity norm
        if self.p == float('inf'):
            if not self._indices:
                self._indices = torch.cuda.FloatTensor() if torch.typename(self.output) == 'torch.cuda.FloatTensor' \
                    else torch.LongTensor()

            torch.abs(self.buffer, input)
            torch.max(self.norm, self._indices, self.buffer, 1)
            self.norm.add_(self.eps)
        else:
            self.normp = self.normp or input.new()
            if self.p % 2 != 0:
                torch.abs(self.buffer, input).pow_(self.p)
            else:
                torch.pow(self.buffer, input, self.p)

            torch.sum(self.normp, self.buffer, 1).add_(self.eps)
            torch.pow(self.norm, self.normp, 1./self.p)

        torch.div(self._output, input, self.norm.view(-1, 1).expand_as(input))

        self.output = self._output.view(input_size)
        return self.output
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def _lazyInit(self):
        self._output = self._output or self.output.new()
        self._indices = self._indices or \
           (torch.cuda.LongTensor() if torch.typename(self.output) == 'torch.cuda.FloatTensor' else torch.LongTensor())
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def updateOutput(self, input):
        # lazy initialize buffers
        self._input = self._input or input.new()
        self._weight = self._weight or self.weight.new()
        self._expand = self._expand or self.output.new()
        self._expand2 = self._expand2 or self.output.new()
        self._repeat = self._repeat or self.output.new()
        self._repeat2 = self._repeat2 or self.output.new()

        inputSize, outputSize = self.weight.size(0), self.weight.size(1)

        # y_j = || w_j - x || = || x - w_j ||
        assert input.dim() == 2

        batchSize = input.size(0)
        self._view(self._input, input, batchSize, inputSize, 1)
        self._expand = self._input.expand(batchSize, inputSize, outputSize)
        # make the expanded tensor contiguous (requires lots of memory)
        self._repeat.resize_as_(self._expand).copy_(self._expand)

        self._weight = self.weight.view(1, inputSize, outputSize)
        self._expand2 = self._weight.expand_as(self._repeat)

        if torch.typename(input) == 'torch.cuda.FloatTensor':
            # TODO: after adding new allocators this can be changed
            # requires lots of memory, but minimizes cudaMallocs and loops
            self._repeat2.resize_as_(self._expand2).copy_(self._expand2)
            self._repeat.add_(-1, self._repeat2)
        else:
            self._repeat.add_(-1, self._expand2)

        torch.norm(self.output, self._repeat, 2, 1)
        self.output.resize_(batchSize, outputSize)

        return self.output