Python chainer 模块,utils() 实例源码

我们从Python开源项目中,提取了以下11个代码示例,用于说明如何使用chainer.utils()

项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def forward(self, inputs):
        xp = cuda.get_array_module(inputs[0])
        self.input_length = inputs[0]

        # The length of path is (2 * label_length + 1)
        self.path_length = 2 * inputs[1] + 1

        batch_size = len(inputs[2])
        yseq_shape = (len(inputs) - 3,) + inputs[3].shape
        self.yseq = _softmax(xp.vstack(inputs[3::]).reshape(yseq_shape), xp)
        log_yseq = self.log_matrix(self.yseq, xp)
        self.path = _label_to_path(inputs[2], self.blank_symbol, xp)
        self.prob_trans = self.calc_trans(self.path, log_yseq, xp)

        loss = utils.force_array(xp.sum(
            _logsumexp(self.prob_trans[0], xp, axis=1)))
        loss /= -batch_size
        return loss,
项目:chainermn    作者:chainer    | 项目源码 | 文件源码
def __init__(self, comm, eps=2e-5, mean=None, var=None, decay=0.9):
        chainer.utils.experimental(
            'chainermn.functions.MultiNodeBatchNormalizationFunction')

        self.comm = comm
        self.running_mean = mean
        self.running_var = var

        # Note: cuDNN v5 requires that eps be greater than 1e-5. Otherwise, an
        # error will occur.
        # See CUDNN_BN_MIN_EPSILON value in cudnn.h to verify minimum allowable
        # value.
        self.eps = eps
        if chainer.should_use_cudnn('>=auto'):
            if eps < 1e-5:
                msg = 'cuDNN does not allow an eps value less than 1e-5.'
                raise RuntimeError(msg)
        self.mean_cache = None
        self.decay = decay

        # We need to delay importing MPI4py (and momdules that import MPI4py)
        import chainermn.communicators._memory_utility as memory_utility_module
        from mpi4py import MPI as mpi4py_module
        self.memory_utility_module = memory_utility_module
        self.mpi4py_module = mpi4py_module
项目:chainer-speech-recognition    作者:musyoku    | 项目源码 | 文件源码
def forward(self, inputs):
        xp = cuda.get_array_module(inputs[0])
        self.input_length = inputs[0]
        length_unigram = inputs[1]
        label_unigram = inputs[2]
        label_bigram = inputs[3]
        length_bigram = length_unigram
        xs = inputs[4:]

        if chainer.is_debug():
            # batch size check.
            assert len(xs[0]) == len(label_unigram)
            assert len(xs[0]) == len(self.input_length)
            assert len(xs[0]) == len(length_unigram)

            # length check.
            assert len(xs) >= xp.max(self.input_length)
            assert len(label_unigram[0]) >= xp.max(length_unigram)

            # unit check
            assert xs[0].shape[1] > xp.max(label_unigram)
            assert xs[0].shape[1] > xp.max(label_bigram)
            assert xs[0].shape[1] > self.blank_symbol

        self.path_length = length_unigram * 3 + 1

        yseq_shape = (len(xs),) + xs[0].shape
        self.yseq = _softmax(xp.vstack(xs).reshape(yseq_shape), xp)
        log_yseq = _log_matrix(self.yseq, xp, self.zero_padding)
        self.path = _label_to_path(label_unigram, label_bigram, self.blank_symbol, xp)
        self.prob_trans = _compute_transition_probability(log_yseq, self.input_length, 
            label_unigram, length_unigram, label_bigram, length_bigram, self.path, self.path_length, xp, self.zero_padding)

        loss = -_logsumexp(self.prob_trans[0], xp, axis=1)
        if self.reduce == 'mean':
            loss = utils.force_array(xp.mean(loss))
        return loss,
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def check_type_forward(self, in_types):
        """Checks types of input data before forward propagation.

        Before :meth:`forward` is called, this function is called.
        You need to validate types of input data in this function
        using :ref:`the type checking utilities <type-check-utils>`.

        Args:
            in_types (~chainer.utils.type_check.TypeInfoTuple): The type
                information of input data for :meth:`forward`.
        """
        pass
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def test_forward_invalid(self):
        f = F.Linear(5, 5)

        # OK
        v = chainer.Variable(numpy.random.randn(1, 5).astype(numpy.float32))
        result = f(v)
        assert isinstance(result, chainer.Variable)

        # Incorrect dtype
        # in py3, numpy dtypes are represented as class
        msg = """\
Invalid operation is performed in: LinearFunction \\(Forward\\)

Expect: in_types\\[0\\]\\.dtype == <(type|class) 'numpy\\.float32'>
Actual: float64 \\!= <(type|class) 'numpy\\.float32'>"""

        v = chainer.Variable(numpy.random.randn(1, 5))
        with six.assertRaisesRegex(self, chainer.utils.type_check.InvalidType,
                                   msg):
            f(v)

        # Incorrect dim
        msg = """\
Invalid operation is performed in: LinearFunction \\(Forward\\)

Expect: in_types\\[0\\]\\.ndim >= 2
Actual: 1 < 2"""

        v = chainer.Variable(numpy.random.randn(5).astype(numpy.float32))
        with six.assertRaisesRegex(self, chainer.utils.type_check.InvalidType,
                                   msg):
            f(v)
项目:convolutional_seq2seq    作者:soskek    | 项目源码 | 文件源码
def backward_cpu(self, x, gy):
        return utils.force_array(gy[0] * self.coefficient),
项目:chainermn    作者:chainer    | 项目源码 | 文件源码
def assert_not_allclose(self, x, y, atol=1e-5, rtol=1e-4, verbose=True):
        x = chainer.cuda.to_cpu(chainer.utils.force_array(x))
        y = chainer.cuda.to_cpu(chainer.utils.force_array(y))

        with self.assertRaises(AssertionError):
            numpy.testing.assert_allclose(
                x, y, atol=atol, rtol=rtol, verbose=verbose)
项目:chainermn    作者:chainer    | 项目源码 | 文件源码
def __init__(self, size, comm, decay=0.9, eps=2e-5, dtype=numpy.float32,
                 use_gamma=True, use_beta=True,
                 initial_gamma=None, initial_beta=None):
        chainer.utils.experimental(
            'chainermn.links.MultiNodeBatchNormalization')

        if chainer.__version__.startswith('1.'):
            raise RuntimeError(
                'MultiNodeBatchNormalization works only with '
                'chainer >= 2.0.0.')

        super(MultiNodeBatchNormalization, self).__init__()
        self.comm = comm
        self.avg_mean = numpy.zeros(size, dtype=dtype)
        self.register_persistent('avg_mean')
        self.avg_var = numpy.zeros(size, dtype=dtype)
        self.register_persistent('avg_var')
        self.N = 0
        self.register_persistent('N')
        self.decay = decay
        self.eps = eps

        with self.init_scope():
            if use_gamma:
                if initial_gamma is None:
                    initial_gamma = 1
                initial_gamma = initializers._get_initializer(initial_gamma)
                initial_gamma.dtype = dtype
                self.gamma = variable.Parameter(initial_gamma, size)
            if use_beta:
                if initial_beta is None:
                    initial_beta = 0
                initial_beta = initializers._get_initializer(initial_beta)
                initial_beta.dtype = dtype
                self.beta = variable.Parameter(initial_beta, size)
项目:chainermn    作者:chainer    | 项目源码 | 文件源码
def __init__(self, comm, peer_rank, peer_tag):
        chainer.utils.experimental('chainermn.functions.Send')
        self.comm = comm
        self.peer_rank = peer_rank
        self.peer_tag = peer_tag
项目:chainermn    作者:chainer    | 项目源码 | 文件源码
def __init__(self, comm, peer_rank, peer_tag, device=-1):
        chainer.utils.experimental('chainermn.functions.Recv')
        self.comm = comm
        self.peer_rank = peer_rank
        self.peer_tag = peer_tag
        self.device = device
项目:chainermn    作者:chainer    | 项目源码 | 文件源码
def recv(
        communicator, rank, delegate_variable=None, tag=0, device=-1,
        force_tuple=False):
    """Receive elements from target process.

    This function returns data received from target process. If ``backward()``
    is invoked, it will try to send gradients to the target process.

    .. note::
        If you define non-connected computational graph on one process,
        you have to use ``delegate_variable`` to specify the output of
        previous computational graph component.
        Otherwise ``backward()`` does not work well.
        Please refer ``chainermn.functions.pseudo_connect`` for detail.

    Args:
        communicator (chainer.communicators.CommunicatorBase):
            ChainerMN communicator.
        rank (int): Target process specifier.
        delegate_variable (chainer.Variable):
            Pointer to the other non-connected component.
        tag (int): Optional message ID (MPI feature).
        device (int): Target device specifier.
        force_tuple (bool): If ``False`` (the default) a Variable will be
            returned when the number of outputs is one. Otherwise, this
            method returns a tuple even when the number of outputs is one.

    Returns:
        ~chainer.Variable:
            Data received from target process. If ``backward()`` is invoked
            by this variable, it will send gradients to the target process.

    """
    chainer.utils.experimental('chainermn.functions.recv')

    if rank == communicator.rank:
        raise ValueError(
            'rank must be different from communicator rank, '
            'otherwise deadlock occurs')

    if delegate_variable is None:
        res = Recv(
            communicator,
            peer_rank=rank,
            peer_tag=tag,
            device=device)()
    else:
        delegate_variable.name = 'delegate_variable'
        res = Recv(
            communicator,
            peer_rank=rank,
            peer_tag=tag,
            device=device)(delegate_variable)

    if force_tuple and not isinstance(res, tuple):
        return tuple([res])
    else:
        return res