Python chainer 模块,is_debug() 实例源码

我们从Python开源项目中,提取了以下21个代码示例,用于说明如何使用chainer.is_debug()

项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def forward(self, inputs):
        x, t = inputs
        if chainer.is_debug():
            if not ((0 <= t).all() and
                    (t < x.shape[1]).all()):
                msg = 'Each label `t` need to satisfty `0 <= t < x.shape[1]`'
                raise ValueError(msg)

        xp = cuda.get_array_module(x)
        if xp is numpy:
            # This code is equivalent to `t.choose(x.T)`, but `numpy.choose`
            # does not work when `x.shape[1] > 32`.
            return x[six.moves.range(t.size), t],
        else:
            y = cuda.elementwise(
                'S t, raw T x',
                'T y',
                'int ind[] = {i, t}; y = x[ind];',
                'getitem_fwd'
            )(t, x)
            return y,
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def forward(self, inputs):
        x, W = inputs

        if chainer.is_debug():
            if not ((0 <= x).all() and
                    (x < len(W)).all()):
                msg = 'Each `x` value need to satisfty `0 <= x < len(W)`'
                raise ValueError(msg)

        if self.ignore_label is not None:
            xp = cuda.get_array_module(*inputs)
            mask = (x == self.ignore_label)
            return xp.where(
                mask[..., None], 0, W.take(xp.where(mask, 0, x), axis=0)),

        return W.take(x, axis=0),
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def forward_cpu(self, inputs):
        x, t = inputs
        if chainer.is_debug():
            self._check_input_values(x, t)

        log_y = softmax_log(x, False)
        if self.cache_score:
            self.y = numpy.exp(log_y)
        log_yd = numpy.rollaxis(log_y, 1)
        log_yd = log_yd.reshape(len(log_yd), -1)

        log_p = log_yd[numpy.maximum(t.ravel(), 0), six.moves.range(t.size)]
        # deal with the case where the SoftmaxCrossEntropy is
        # unpickled from the old version
        if getattr(self, 'normalize', True):
            count = (t != self.ignore_label).sum()
        else:
            count = len(x)
        self._coeff = 1.0 / max(count, 1)

        y = (log_p * (t.ravel() != self.ignore_label)).sum(keepdims=True) \
            * (-self._coeff)
        return y.reshape(()),
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def forward_gpu(self, inputs):
        cupy = cuda.cupy
        x, t = inputs
        if chainer.is_debug():
            self._check_input_values(x, t)

        log_y = softmax_log(x, self.use_cudnn)
        if self.cache_score:
            self.y = cupy.exp(log_y)
        if getattr(self, 'normalize', True):
            coeff = cupy.maximum(1, (t != self.ignore_label).sum())
        else:
            coeff = max(1, len(t))
        self._coeff = cupy.divide(1.0, coeff, dtype=x.dtype)

        log_y = cupy.rollaxis(log_y, 1, log_y.ndim)
        ret = cuda.reduce(
            'S t, raw T log_y, int32 n_channel, raw T coeff', 'T out',
            't == -1 ? T(0) : log_y[_j * n_channel + t]',
            'a + b', 'out = a * -coeff[0]', '0', 'crossent_fwd'
        )(t, log_y.reduced_view(), log_y.shape[-1], self._coeff)
        return ret,
项目:nmtrain    作者:philip30    | 项目源码 | 文件源码
def forward_cpu(self, inputs):
    x, t = inputs
    if chainer.is_debug():
      self._check_input_values(x, t)

    log_y = numpy.log(x)
    if self.cache_score:
      self.y = x
    log_yd = numpy.rollaxis(log_y, 1)
    log_yd = log_yd.reshape(len(log_yd), -1)
    log_p = log_yd[numpy.maximum(t.ravel(), 0), six.moves.range(t.size)]
    if getattr(self, 'normalize', True):
      count = (t != self.ignore_label).sum()
    else:
      count = len(x)
    self._coeff = 1.0 / max(count, 1)
    y = (log_p * (t.ravel() != self.ignore_label)).sum(keepdims=True) * (-self._coeff)
    return y.reshape(()),
项目:nmtrain    作者:philip30    | 项目源码 | 文件源码
def forward_gpu(self, inputs):
    cupy = cuda.cupy
    x, t = inputs
    if chainer.is_debug():
      self._check_input_values(x, t)

    log_y = cupy.log(x)
    if self.cache_score:
      self.y = x
    if getattr(self, 'normalize', True):
      coeff = cupy.maximum(1, (t != self.ignore_label).sum())
    else:
      coeff = max(1, len(t))
    self._coeff = cupy.divide(1.0, coeff, dtype=x.dtype)

    log_y = cupy.rollaxis(log_y, 1, log_y.ndim)
    ret = cuda.reduce(
      'S t, raw T log_y, int32 n_channel, raw T coeff', 'T out',
      't == -1 ? 0 : log_y[_j * n_channel + t]',
      'a + b', 'out = a * -coeff[0]', '0', 'crossent_fwd'
    )(t, log_y.reduced_view(), log_y.shape[-1], self._coeff)
    return ret,
项目:chainer-cf-nade    作者:dsanno    | 项目源码 | 文件源码
def forward_cpu(self, inputs):
        x, t, w = inputs
        if chainer.is_debug():
            self._check_input_values(x, t)

        log_y = softmax_log(x, False)
        if self.cache_score:
            self.y = numpy.exp(log_y)
        log_yd = numpy.rollaxis(log_y, 1)
        log_yd = log_yd.reshape(len(log_yd), -1)

        log_p = log_yd[numpy.maximum(t.ravel(), 0), six.moves.range(t.size)]
        # deal with the case where the SoftmaxCrossEntropy is
        # unpickled from the old version
        if getattr(self, 'normalize', True):
            count = (t != self.ignore_label).sum()
        else:
            count = len(x)
        self._coeff = 1.0 / max(count, 1)

        y = (log_p * (t.ravel() != self.ignore_label) * w.ravel()).sum(keepdims=True) \
            * (-self._coeff)
        return y.reshape(()),
项目:chainer-cf-nade    作者:dsanno    | 项目源码 | 文件源码
def forward_gpu(self, inputs):
        cupy = cuda.cupy
        x, t, w = inputs
        if chainer.is_debug():
            self._check_input_values(x, t)

        log_y = softmax_log(x, self.use_cudnn)
        if self.cache_score:
            self.y = cupy.exp(log_y)
        if getattr(self, 'normalize', True):
            coeff = cupy.maximum(1, (t != self.ignore_label).sum())
        else:
            coeff = max(1, len(t))
        self._coeff = cupy.divide(1.0, coeff, dtype=x.dtype)

        log_y = cupy.rollaxis(log_y, 1, log_y.ndim)
        ret = cuda.reduce(
            'S t, T w, raw T log_y, int32 n_channel, raw T coeff', 'T out',
            't == -1 ? T(0) : log_y[_j * n_channel + t] * w',
            'a + b', 'out = a * -coeff[0]', '0', 'crossent_fwd'
        )(t, w, log_y.reduced_view(), log_y.shape[-1], self._coeff)
        return ret,
项目:chainer-segnet    作者:pfnet-research    | 项目源码 | 文件源码
def forward_cpu(self, inputs):
        x, t = inputs
        if chainer.is_debug():
            self._check_input_values(x, t)

        log_y = log_softmax._log_softmax(x, self.use_cudnn)
        if self.cache_score:
            self.y = numpy.exp(log_y)
        if self.class_weight is not None:
            if self.class_weight.shape != x.shape:
                shape = [1 if d != 1 else -1 for d in six.moves.range(x.ndim)]
                self.class_weight = numpy.broadcast_to(
                    self.class_weight.reshape(shape), x.shape)
            log_y *= self.class_weight
        log_yd = numpy.rollaxis(log_y, 1)
        log_yd = log_yd.reshape(len(log_yd), -1)
        log_p = log_yd[numpy.maximum(t.ravel(), 0), numpy.arange(t.size)]

        # deal with the case where the SoftmaxCrossEntropy is
        # unpickled from the old version
        if self.normalize:
            count = (t != self.ignore_label).sum()
        else:
            count = len(x)
        self._coeff = 1.0 / max(count, 1)

        y = (log_p * (t.ravel() != self.ignore_label)).sum(keepdims=True) \
            * (-self._coeff)
        return y.reshape(()),
项目:chainer-segnet    作者:pfnet-research    | 项目源码 | 文件源码
def forward_gpu(self, inputs):
        cupy = cuda.cupy
        x, t = inputs
        if chainer.is_debug():
            self._check_input_values(x, t)

        log_y = log_softmax._log_softmax(x, self.use_cudnn)
        if self.cache_score:
            self.y = cupy.exp(log_y)
        if self.class_weight is not None:
            shape = [1 if d != 1 else -1 for d in six.moves.range(x.ndim)]
            log_y *= cupy.broadcast_to(
                self.class_weight.reshape(shape), x.shape)
        if self.normalize:
            coeff = cupy.maximum(1, (t != self.ignore_label).sum())
        else:
            coeff = max(1, len(t))
        self._coeff = cupy.divide(1.0, coeff, dtype=x.dtype)

        log_y = cupy.rollaxis(log_y, 1, log_y.ndim)
        ret = cuda.reduce(
            'S t, raw T log_y, int32 n_channel, raw T coeff', 'T out',
            't == -1 ? T(0) : log_y[_j * n_channel + t]',
            'a + b', 'out = a * -coeff[0]', '0', 'crossent_fwd'
        )(t, log_y.reduced_view(), log_y.shape[-1], self._coeff)
        return ret,
项目:chainer-segnet    作者:pfnet-research    | 项目源码 | 文件源码
def setUp(self):
        self.x = numpy.random.uniform(-1, 1, (2, 2)).astype(numpy.float32)
        # `0` is required to avoid NaN
        self.t = numpy.array([self.t_value, 0], dtype=numpy.int32)
        self.original_debug = chainer.is_debug()
        chainer.set_debug(True)
项目:chainer-speech-recognition    作者:musyoku    | 项目源码 | 文件源码
def forward(self, inputs):
        xp = cuda.get_array_module(inputs[0])
        self.input_length = inputs[0]
        length_unigram = inputs[1]
        label_unigram = inputs[2]
        label_bigram = inputs[3]
        length_bigram = length_unigram
        xs = inputs[4:]

        if chainer.is_debug():
            # batch size check.
            assert len(xs[0]) == len(label_unigram)
            assert len(xs[0]) == len(self.input_length)
            assert len(xs[0]) == len(length_unigram)

            # length check.
            assert len(xs) >= xp.max(self.input_length)
            assert len(label_unigram[0]) >= xp.max(length_unigram)

            # unit check
            assert xs[0].shape[1] > xp.max(label_unigram)
            assert xs[0].shape[1] > xp.max(label_bigram)
            assert xs[0].shape[1] > self.blank_symbol

        self.path_length = length_unigram * 3 + 1

        yseq_shape = (len(xs),) + xs[0].shape
        self.yseq = _softmax(xp.vstack(xs).reshape(yseq_shape), xp)
        log_yseq = _log_matrix(self.yseq, xp, self.zero_padding)
        self.path = _label_to_path(label_unigram, label_bigram, self.blank_symbol, xp)
        self.prob_trans = _compute_transition_probability(log_yseq, self.input_length, 
            label_unigram, length_unigram, label_bigram, length_bigram, self.path, self.path_length, xp, self.zero_padding)

        loss = -_logsumexp(self.prob_trans[0], xp, axis=1)
        if self.reduce == 'mean':
            loss = utils.force_array(xp.mean(loss))
        return loss,
项目:chainer-qrnn    作者:musyoku    | 项目源码 | 文件源码
def forward_cpu(self, inputs):
        x, t = inputs
        if chainer.is_debug():
            self._check_input_values(x, t)

        log_y = log_softmax._log_softmax(x)
        if self.cache_score:
            self.y = np.exp(log_y)
        if self.class_weight is not None:
            shape = [1 if d != 1 else -1 for d in six.moves.range(x.ndim)]
            log_y *= _broadcast_to(self.class_weight.reshape(shape), x.shape)
        log_yd = np.rollaxis(log_y, 1)
        log_yd = log_yd.reshape(len(log_yd), -1)
        log_p = log_yd[np.maximum(t.ravel(), 0), np.arange(t.size)]

        log_p *= (t.ravel() != self.ignore_label)
        if self.reduce == 'mean':
            # deal with the case where the SoftmaxCrossEntropy is
            # unpickled from the old version
            if self.normalize:
                count = (t != self.ignore_label).sum()
            else:
                count = len(x)
            self._coeff = 1.0 / max(count, 1)

            y = log_p.sum(keepdims=True) * (-self._coeff)
            return y.reshape(()),
        else:
            return -log_p.reshape(t.shape),
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def setUp(self):
        self.link = links.EmbedID(2, 2)
        self.t = numpy.array([self.t_value], dtype=numpy.int32)
        self.original_debug = chainer.is_debug()
        chainer.set_debug(True)
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def setUp(self):
        self.original_debug = chainer.is_debug()
        chainer.set_debug(True)
        self.one = numpy.array([1], numpy.float32)
        self.f = chainer.Function()
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def setUp(self):
        self.x = numpy.random.uniform(-1, 1, (2, 2)).astype(numpy.float32)
        # `0` is required to avoid NaN
        self.t = numpy.array([self.t_value, 0], dtype=numpy.int32)
        self.original_debug = chainer.is_debug()
        chainer.set_debug(True)
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def setUp(self):
        self.x = numpy.random.uniform(-1, 1, (1, 2)).astype(numpy.float32)
        self.t = numpy.array([self.t_value], dtype=numpy.int32)
        self.original_debug = chainer.is_debug()
        chainer.set_debug(True)
项目:nmtrain    作者:philip30    | 项目源码 | 文件源码
def setUp(self):
    self.x = numpy.random.uniform(-1, 1, (2, 2)).astype(numpy.float32)
    # `0` is required to avoid NaN
    self.t = numpy.array([self.t_value, 0], dtype=numpy.int32)
    self.original_debug = chainer.is_debug()
    chainer.set_debug(True)
项目:chainer-glu    作者:musyoku    | 项目源码 | 文件源码
def forward_cpu(self, inputs):
        x, t = inputs
        if chainer.is_debug():
            self._check_input_values(x, t)

        log_y = log_softmax._log_softmax(x)
        if self.cache_score:
            self.y = np.exp(log_y)
        if self.class_weight is not None:
            shape = [1 if d != 1 else -1 for d in xrange(x.ndim)]
            log_y *= _broadcast_to(self.class_weight.reshape(shape), x.shape)
        log_yd = np.rollaxis(log_y, 1)
        log_yd = log_yd.reshape(len(log_yd), -1)
        log_p = log_yd[np.maximum(t.ravel(), 0), np.arange(t.size)]

        log_p *= (t.ravel() != self.ignore_label)
        if self.reduce == 'mean':
            # deal with the case where the SoftmaxCrossEntropy is
            # unpickled from the old version
            if self.normalize:
                count = (t != self.ignore_label).sum()
            else:
                count = len(x)
            self._coeff = 1.0 / max(count, 1)

            y = log_p.sum(keepdims=True) * (-self._coeff)
            return y.reshape(()),
        else:
            return -log_p.reshape(t.shape),
项目:chainer-qrnn    作者:musyoku    | 项目源码 | 文件源码
def forward_gpu(self, inputs):
        cupy = cuda.cupy
        x, t = inputs
        if chainer.is_debug():
            self._check_input_values(x, t)

        log_y = log_softmax._log_softmax(x)
        if self.cache_score:
            self.y = cupy.exp(log_y)
        if self.class_weight is not None:
            shape = [1 if d != 1 else -1 for d in six.moves.range(x.ndim)]
            log_y *= cupy.broadcast_to(
                self.class_weight.reshape(shape), x.shape)
        if self.normalize:
            coeff = cupy.maximum(1, (t != self.ignore_label).sum())
        else:
            coeff = max(1, len(t))
        self._coeff = cupy.divide(1.0, coeff, dtype=x.dtype)

        log_y = cupy.rollaxis(log_y, 1, log_y.ndim)
        if self.reduce == 'mean':
            ret = cuda.reduce(
                'S t, raw T log_y, int32 n_channel, raw T coeff, '
                'S ignore_label',
                'T out',
                't == ignore_label ? T(0) : log_y[_j * n_channel + t]',
                'a + b', 'out = a * -coeff[0]', '0', 'crossent_fwd'
            )(t, log_y.reduced_view(), log_y.shape[-1],
              self._coeff, self.ignore_label)
        else:
            ret = cuda.elementwise(
                'S t, raw T log_y, int32 n_channel, T ignore', 'T out',
                '''
                if (t == ignore) {
                  out = 0;
                } else {
                  out = -log_y[i * n_channel + t];
                }
                ''',
                'softmax_crossent_no_reduce_fwd'
            )(t, log_y.reduced_view(), log_y.shape[-1], self.ignore_label)
            ret = ret.reshape(t.shape)
        return ret,
项目:chainer-glu    作者:musyoku    | 项目源码 | 文件源码
def forward_gpu(self, inputs):
        cupy = cuda.cupy
        x, t = inputs
        if chainer.is_debug():
            self._check_input_values(x, t)

        log_y = log_softmax._log_softmax(x)
        if self.cache_score:
            self.y = cupy.exp(log_y)
        if self.class_weight is not None:
            shape = [1 if d != 1 else -1 for d in xrange(x.ndim)]
            log_y *= cupy.broadcast_to(
                self.class_weight.reshape(shape), x.shape)
        if self.normalize:
            coeff = cupy.maximum(1, (t != self.ignore_label).sum())
        else:
            coeff = max(1, len(t))
        self._coeff = cupy.divide(1.0, coeff, dtype=x.dtype)

        log_y = cupy.rollaxis(log_y, 1, log_y.ndim)
        if self.reduce == 'mean':
            ret = cuda.reduce(
                'S t, raw T log_y, int32 n_channel, raw T coeff, '
                'S ignore_label',
                'T out',
                't == ignore_label ? T(0) : log_y[_j * n_channel + t]',
                'a + b', 'out = a * -coeff[0]', '0', 'crossent_fwd'
            )(t, log_y.reduced_view(), log_y.shape[-1],
              self._coeff, self.ignore_label)
        else:
            ret = cuda.elementwise(
                'S t, raw T log_y, int32 n_channel, T ignore', 'T out',
                '''
                if (t == ignore) {
                  out = 0;
                } else {
                  out = -log_y[i * n_channel + t];
                }
                ''',
                'softmax_crossent_no_reduce_fwd'
            )(t, log_y.reduced_view(), log_y.shape[-1], self.ignore_label)
            ret = ret.reshape(t.shape)
        return ret,