Python chainer.cuda 模块,to_cpu() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用chainer.cuda.to_cpu()

项目:chainer_pong    作者:icoxfog417    | 项目源码 | 文件源码
def calc_loss(self, states, actions, rewards, next_states, episode_ends):
        qv = self.agent.q(states)
        q_t = self.target(next_states)  # Q(s', *)
        max_q_prime = np.array(list(map(np.max, q_t.data)), dtype=np.float32)  # max_a Q(s', a)

        target = cuda.to_cpu(qv.data.copy())
        for i in range(self.replay_size):
            if episode_ends[i][0] is True:
                _r = np.sign(rewards[i])
            else:
                _r = np.sign(rewards[i]) + self.gamma * max_q_prime[i]

            target[i, actions[i]] = _r

        td = Variable(self.target.arr_to_gpu(target)) - qv
        td_tmp = td.data + 1000.0 * (abs(td.data) <= 1)  # Avoid zero division
        td_clip = td * (abs(td.data) <= 1) + td/abs(td_tmp) * (abs(td.data) > 1)

        zeros = Variable(self.target.arr_to_gpu(np.zeros((self.replay_size, self.target.n_action), dtype=np.float32)))
        loss = F.mean_squared_error(td_clip, zeros)
        self._loss = loss.data
        self._qv = np.max(qv.data)
        return loss
项目:ROCStory_skipthought_baseline    作者:soskek    | 项目源码 | 文件源码
def save(model, optimizer, save_name, args):
    serializers.save_npz(save_name + "model", copy.deepcopy(model).to_cpu())
    serializers.save_npz(save_name + "optimizer", optimizer)
    print('save', save_name)
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def act(self, state):

        with chainer.using_config('train', False):
            s = self.batch_states([state], self.xp, self.phi)
            if self.act_deterministically:
                action = self.policy(s).most_probable
            else:
                action = self.policy(s).sample()
            # Q is not needed here, but log it just for information
            q = self.q_function(s, action)

        # Update stats
        self.average_q *= self.average_q_decay
        self.average_q += (1 - self.average_q_decay) * float(q.data)

        self.logger.debug('t:%s a:%s q:%s',
                          self.t, action.data[0], q.data)
        return cuda.to_cpu(action.data[0])
项目:chainer-cyclegan    作者:Aixile    | 项目源码 | 文件源码
def getAndUpdateBufferY(self, data):

        if  self._iter < self._max_buffer_size:
            self._buffer_y[self._iter, :] = data[0]
            return data

        self._buffer_y[0:self._max_buffer_size-2, :] = self._buffer_y[1:self._max_buffer_size-1, :]
        self._buffer_y[self._max_buffer_size-1, : ]=data[0]

        if np.random.rand() < 0.5:
            return data
        id = np.random.randint(0, self._max_buffer_size)
        return self._buffer_y[id, :].reshape((1, 3, self._image_size, self._image_size))
        """
    def save_images(self,img, w=2, h=3):
        img = cuda.to_cpu(img)
        img = img.reshape((w, h, 3, self._image_size, self._image_size))
        img = img.transpose(0,1,3,4,2)
        img = (img + 1) *127.5
        img = np.clip(img, 0, 255)
        img = img.astype(np.uint8)
        img = img.reshape((w, h, self._image_size, self._image_size, 3)).transpose(0,2,1,3,4).reshape((w*self._image_size, h*self._image_size, 3))[:,:,::-1]
        Image.fromarray(img).save(self._eval_foler+"/iter_"+str(self._iter)+".jpg")
        """
项目:chainer-cyclegan    作者:Aixile    | 项目源码 | 文件源码
def copy_to_cpu(imgs):
    if type(imgs) == chainer.variable.Variable :
        imgs = imgs.data
    try:
        if type(imgs) == cupy.core.core.ndarray:
            imgs = cuda.to_cpu(imgs)
    except:
        pass
    return imgs
项目:gconv_experiments    作者:tscohen    | 项目源码 | 文件源码
def validate(test_data, test_labels, model, batchsize, silent, gpu):
    N_test = test_data.shape[0]
    pbar = ProgressBar(0, N_test)
    sum_accuracy = 0
    sum_loss = 0

    for i in range(0, N_test, batchsize):
        x_batch = test_data[i:i + batchsize]
        y_batch = test_labels[i:i + batchsize]

        if gpu >= 0:
            x_batch = cuda.to_gpu(x_batch.astype(np.float32))
            y_batch = cuda.to_gpu(y_batch.astype(np.int32))

        x = Variable(x_batch)
        t = Variable(y_batch)
        loss, acc = model(x, t, train=False)

        sum_loss += float(cuda.to_cpu(loss.data)) * y_batch.size
        sum_accuracy += float(cuda.to_cpu(acc.data)) * y_batch.size
        if not silent:
            pbar.update(i + y_batch.size)

    return sum_loss, sum_accuracy
项目:GrouPy    作者:tscohen    | 项目源码 | 文件源码
def test_index_group_func():
    import numpy as np
    import cupy as cp
    from chainer import cuda
    input = np.random.randn(2, 3, 4, 5, 6)
    I = np.random.randint(0, 4, (7, 8, 9, 10))
    J = np.random.randint(0, 5, (7, 8, 9, 10))
    K = np.random.randint(0, 6, (7, 8, 9, 10))

    output = input[..., I, J, K].swapaxes(1, 2)

    cpoutput = cp.zeros(output.shape)
    cpinput = cuda.to_gpu(input)
    cpI = cuda.to_gpu(I)
    cpJ = cuda.to_gpu(J)
    cpK = cuda.to_gpu(K)

    index_group_func_kernel(cpinput, cpI, cpJ, cpK, cpoutput)

    cpoutput = cuda.to_cpu(cpoutput)

    error = np.abs(cpoutput - output).sum()
    print(error)
    assert np.isclose(error, 0.)
项目:GrouPy    作者:tscohen    | 项目源码 | 文件源码
def check_transform_grad(inds, w, transformer, dtype, toll):
    from chainer import gradient_check

    inds = cuda.to_gpu(inds)

    W = Variable(w.astype(dtype))
    R = transformer(inds)

    RW = R(W)

    RW.grad = cp.random.randn(*RW.data.shape).astype(dtype)
    RW.backward(retain_grad=True)

    func = RW.creator
    fn = lambda: func.forward((W.data,))
    gW, = gradient_check.numerical_grad(fn, (W.data,), (RW.grad,))

    gan = cuda.to_cpu(gW)
    gat = cuda.to_cpu(W.grad)

    relerr = np.max(np.abs(gan - gat) / np.maximum(np.abs(gan), np.abs(gat)))

    print (dtype, toll, relerr)
    assert relerr < toll
项目:GrouPy    作者:tscohen    | 项目源码 | 文件源码
def check_equivariance(im, layers, input_array, output_array, point_group):

    # Transform the image
    f = input_array(im)
    g = point_group.rand()
    gf = g * f
    im1 = gf.v

    # Apply layers to both images
    im = Variable(cuda.to_gpu(im))
    im1 = Variable(cuda.to_gpu(im1))

    fmap = im
    fmap1 = im1
    for layer in layers:
        layer.to_gpu()
        fmap = layer(fmap)
        fmap1 = layer(fmap1)

    # Transform the computed feature maps
    fmap1_garray = output_array(cuda.to_cpu(fmap1.data))
    r_fmap1_data = (g.inv() * fmap1_garray).v

    fmap_data = cuda.to_cpu(fmap.data)
    assert np.allclose(fmap_data, r_fmap1_data, rtol=1e-5, atol=1e-3)
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def concat_examples(batch, device=None):
    if len(batch) == 0:
        raise ValueError('batch is empty')

    if device is None:
        def to_device(x):
            return x
    elif device < 0:
        to_device = cuda.to_cpu
    else:
        def to_device(x):
            return cuda.to_gpu(x, device, cuda.Stream.null)

    result = [to_device(_concat_arrays([s[0] for s in batch], -1)), # ws
              to_device(_concat_arrays([s[1] for s in batch], -1)), # ps
              to_device(_concat_arrays([s[2] for s in batch], -1)), # ss
              [s[3] for s in batch]]                                # ls

    if len(batch[0]) == 7:
        result.append([to_device(s[4]) for s in batch])            # cat_ts
        result.append([to_device(s[5]) for s in batch])            # dep_ts
        result.append(to_device(_concat_arrays([s[6] for s in batch], None))) # weights

    return tuple(result)
项目:chainer-dfi    作者:dsanno    | 项目源码 | 文件源码
def mean_feature(net, paths, image_size, base_feature, top_num, batch_size, clip_rect=None):
    xp = net.xp
    image_num = len(paths)
    features = []
    for i in six.moves.range(0, image_num, batch_size):
        x = [preprocess_image(Image.open(path).convert('RGB'), image_size, clip_rect) for path in paths[i:i + batch_size]]
        x = xp.asarray(np.concatenate(x, axis=0))
        y = feature(net, x)
        features.append([cuda.to_cpu(layer.data) for layer in y])
    if image_num > top_num:
        last_features = np.concatenate([f[-1] for f in features], axis=0)
        last_features = last_features.reshape((last_features.shape[0], -1))
        base_feature = cuda.to_cpu(base_feature).reshape((1, -1,))
        diff = np.sum((last_features - base_feature) ** 2, axis=1)

        nearest_indices = np.argsort(diff)[:top_num]
        nearests = [np.concatenate(xs, axis=0)[nearest_indices] for xs in zip(*features)]
    else:
        nearests = [np.concatenate(xs, axis=0) for xs in zip(*features)]

    return [xp.asarray(np.mean(f, axis=0, keepdims=True)) for f in nearests]
项目:self-driving-cars    作者:musyoku    | 项目源码 | 文件源码
def eps_greedy(self, state_batch, exploration_rate):
        if state_batch.ndim == 1:
            state_batch = state_batch.reshape(1, -1)
        elif state_batch.ndim == 3:
            state_batch = state_batch.reshape(-1, 34 * config.rl_history_length)
        prop = np.random.uniform()
        if prop < exploration_rate:
            action_batch = np.random.randint(0, len(config.actions), (state_batch.shape[0],))
            q = None
        else:
            state_batch = Variable(state_batch)
            if config.use_gpu:
                state_batch.to_gpu()
            q = self.compute_q_variable(state_batch, test=True)
            if config.use_gpu:
                q.to_cpu()
            q = q.data
            action_batch = np.argmax(q, axis=1)
        for i in xrange(action_batch.shape[0]):
            action_batch[i] = self.get_action_for_index(action_batch[i])
        return action_batch, q
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def check_backward(self, x_data, t_data, y_grad):
        x = chainer.Variable(x_data)
        t = chainer.Variable(t_data)
        W = self.link.W

        y = self.link(x, t)
        y.grad = y_grad
        y.backward()

        # fix samples
        negative_sampling.NegativeSamplingFunction.samples = y.creator.samples

        def f():
            return self.link(x, t).data,
        gx, gW = gradient_check.numerical_grad(
            f, (x.data, W.data), (y.grad,), eps=1e-2)
        del negative_sampling.NegativeSamplingFunction.samples  # clean up

        gradient_check.assert_allclose(
            cuda.to_cpu(gx), cuda.to_cpu(x.grad), atol=1.e-4)
        gradient_check.assert_allclose(
            cuda.to_cpu(gW), cuda.to_cpu(W.grad), atol=1.e-4)
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def check_forward(self, x0_data, x1_data, t_data):
        x0_val = chainer.Variable(x0_data)
        x1_val = chainer.Variable(x1_data)
        t_val = chainer.Variable(t_data)
        loss = functions.contrastive(x0_val, x1_val, t_val, self.margin)
        self.assertEqual(loss.data.shape, ())
        self.assertEqual(loss.data.dtype, numpy.float32)
        loss_value = float(cuda.to_cpu(loss.data))

        # Compute expected value
        loss_expect = 0
        for i in six.moves.range(self.x0.shape[0]):
            x0d, x1d, td = self.x0[i], self.x1[i], self.t[i]
            d = numpy.sum((x0d - x1d) ** 2)
            if td == 1:  # similar pair
                loss_expect += d
            elif td == 0:  # dissimilar pair
                loss_expect += max(self.margin - math.sqrt(d), 0) ** 2
        loss_expect /= 2.0 * self.t.shape[0]
        self.assertAlmostEqual(loss_expect, loss_value, places=5)
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def check_forward(self, x_data, use_cudnn=True):
        x = chainer.Variable(x_data)
        y = functions.max_pooling_2d(x, 3, stride=2, pad=1,
                                     cover_all=self.cover_all,
                                     use_cudnn=use_cudnn)
        self.assertEqual(y.data.dtype, self.dtype)
        y_data = cuda.to_cpu(y.data)

        self.assertEqual(self.gy.shape, y_data.shape)
        for k in six.moves.range(2):
            for c in six.moves.range(3):
                x = self.x[k, c]
                if self.cover_all:
                    expect = numpy.array([
                        [x[0:2, 0:2].max(), x[0:2, 1:3].max()],
                        [x[1:4, 0:2].max(), x[1:4, 1:3].max()],
                        [x[3:4, 0:2].max(), x[3:4, 1:3].max()]])
                else:
                    expect = numpy.array([
                        [x[0:2, 0:2].max(), x[0:2, 1:3].max()],
                        [x[1:4, 0:2].max(), x[1:4, 1:3].max()]])
                gradient_check.assert_allclose(expect, y_data[k, c])
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def check_backward(self, x_data, roi_data, y_grad):
        x = chainer.Variable(x_data)
        rois = chainer.Variable(roi_data)
        y = functions.roi_pooling_2d(x, rois, outh=self.outh, outw=self.outw,
                                     spatial_scale=self.spatial_scale)
        y.grad = y_grad
        y.backward()

        xs = (x.data, rois.data)

        def f():
            func = y.creator
            return func.forward(xs)

        gx, _ = gradient_check.numerical_grad(f, xs, (y.grad,))
        gradient_check.assert_allclose(cuda.to_cpu(gx), cuda.to_cpu(x.grad))
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def check_forward(self, x_data, use_cudnn=True):
        x = chainer.Variable(x_data)
        y = functions.average_pooling_2d(x, 3, stride=2,
                                         pad=1, use_cudnn=use_cudnn)
        self.assertEqual(y.data.dtype, self.dtype)
        y_data = cuda.to_cpu(y.data)

        self.assertEqual(self.gy.shape, y_data.shape)
        for k in six.moves.range(2):
            for c in six.moves.range(3):
                x = self.x[k, c]
                expect = numpy.array([
                    [x[0:2, 0:2].sum(), x[0:2, 1:3].sum()],
                    [x[1:4, 0:2].sum(), x[1:4, 1:3].sum()]]) / 9
                gradient_check.assert_allclose(
                    expect, y_data[k, c], **self.check_forward_options)
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def check_forward(self, x_data):
        x = chainer.Variable(x_data)
        y = functions.local_response_normalization(x)
        self.assertEqual(y.data.dtype, self.dtype)
        y_data = cuda.to_cpu(y.data)

        # Naive implementation
        y_expect = numpy.zeros_like(self.x)
        for n, c, h, w in numpy.ndindex(self.x.shape):
            s = 0
            for i in six.moves.range(max(0, c - 2), min(7, c + 2)):
                s += self.x[n, i, h, w] ** 2
            denom = (2 + 1e-4 * s) ** .75
            y_expect[n, c, h, w] = self.x[n, c, h, w] / denom

        gradient_check.assert_allclose(
            y_expect, y_data, **self.check_forward_optionss)
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def check_forward(self, x_data, t_data):
        x = chainer.Variable(x_data)
        t = chainer.Variable(t_data)
        y = chainer.functions.binary_accuracy(x, t)
        self.assertEqual(y.data.dtype, self.dtype)
        self.assertEqual((), y.data.shape)

        count = 0
        correct = 0
        x_flatten = self.x.ravel()
        t_flatten = self.t.ravel()
        for i in six.moves.range(t_flatten.size):
            if t_flatten[i] == -1:
                continue
            pred = int(x_flatten[i] >= 0)
            if pred == t_flatten[i]:
                correct += 1
            count += 1
        expected = float(correct) / count
        gradient_check.assert_allclose(
            expected, cuda.to_cpu(y.data), **self.check_forward_options)
项目:deep_metric_learning    作者:ronekko    | 项目源码 | 文件源码
def iterate_forward(model, epoch_iterator, normalize=False):
    xp = model.xp
    y_batches = []
    c_batches = []
    for batch in tqdm(copy.copy(epoch_iterator)):
        x_batch_data, c_batch_data = batch
        x_batch = Variable(xp.asarray(x_batch_data))
        y_batch = model(x_batch)
        if normalize:
            y_batch_data = y_batch.data / xp.linalg.norm(
                y_batch.data, axis=1, keepdims=True)
        else:
            y_batch_data = y_batch.data
        y_batches.append(y_batch_data)
        y_batch = None
        c_batches.append(c_batch_data)
    y_data = cuda.to_cpu(xp.concatenate(y_batches))
    c_data = np.concatenate(c_batches)
    return y_data, c_data


# memory friendly average accuracy for test data
项目:deep_metric_learning    作者:ronekko    | 项目源码 | 文件源码
def check_extract(self):
        x1 = numpy.random.uniform(0, 255, (320, 240, 3)).astype(numpy.uint8)
        x2 = numpy.random.uniform(0, 255, (320, 240)).astype(numpy.uint8)

        result = self.link.extract([x1, x2], layers=['pool5', 'loss3_fc'])
        self.assertEqual(len(result), 2)
        y1 = cuda.to_cpu(result['pool5'].data)
        self.assertEqual(y1.shape, (2, 1024, 1, 1))
        self.assertEqual(y1.dtype, numpy.float32)
        y2 = cuda.to_cpu(result['loss3_fc'].data)
        self.assertEqual(y2.shape, (2, 1000))
        self.assertEqual(y2.dtype, numpy.float32)

        x3 = numpy.random.uniform(0, 255, (80, 60)).astype(numpy.uint8)
        result = self.link.extract([x3], layers=['pool1'], size=None)
        self.assertEqual(len(result), 1)
        y3 = cuda.to_cpu(result['pool1'].data)
        self.assertEqual(y3.shape, (1, 64, 20, 15))
        self.assertEqual(y3.dtype, numpy.float32)
项目:deep_metric_learning    作者:ronekko    | 项目源码 | 文件源码
def check_forward(self, x_data, c_data, gamma, T, y_star, y_pam):
        num_examples = len(x_data)
        x = chainer.Variable(x_data)
        c = chainer.Variable(c_data)

        loss = clustering_loss(x, c, gamma, T)

        sq_distances_ij = []
        for i, j in zip(range(num_examples), y_pam):
            sqd_ij = np.sum((x_data[i] - x_data[j]) ** 2)
            sq_distances_ij.append(sqd_ij)
        f = -sum(sq_distances_ij)

        sq_distances_ij = []
        for i, j in zip(range(num_examples), y_star):
            sqd_ij = np.sum((x_data[i] - x_data[j]) ** 2)
            sq_distances_ij.append(sqd_ij)
        f_tilde = -sum(sq_distances_ij)

        delta = 1.0 - normalized_mutual_info_score(cuda.to_cpu(c_data), y_pam)
        loss_expected = f + gamma * delta - f_tilde

        testing.assert_allclose(loss.data, loss_expected)
项目:DeepPoseComparison    作者:ynaka81    | 项目源码 | 文件源码
def check_forward(self, x_data, t_data, v_data, use_visibility):
        x = chainer.Variable(x_data)
        t = chainer.Variable(t_data)
        v = chainer.Variable(v_data)
        loss = mean_squared_error(x, t, v, use_visibility)
        loss_value = cuda.to_cpu(loss.data)
        eq_(loss_value.dtype, np.float32)
        eq_(loss_value.shape, ())
        # compute expected value.
        loss_expect = 0.
        for i in np.ndindex(self.x.shape):
            diff = self.x[i] - self.t[i]
            if use_visibility:
                diff *= self.v[i[:-1]]
            loss_expect += diff**2
        if use_visibility:
            N = self.v.sum()/2
        else:
            N = self.x.size/2
        loss_expect /= N
        self.assertAlmostEqual(loss_expect, loss_value, places=5)
项目:vsmlib    作者:undertherain    | 项目源码 | 文件源码
def concat_examples(batch, device=None, padding=0):
    if len(batch) == 0:
        raise ValueError('batch is empty')

    if device is None:
        def to_device(x):
            return x
    elif device < 0:
        to_device = cuda.to_cpu
    else:
        def to_device(x):
            return cuda.to_gpu(x, device, cuda.Stream.null)

    first_elem = batch[0]

    if isinstance(first_elem, tuple):
        result = []
        if not isinstance(padding, tuple):
            padding = [padding] * len(first_elem)

        for i in six.moves.range(len(first_elem)):
            result.append(to_device(_concat_arrays(
                [example[i] for example in batch], padding[i])))

        return tuple(result)
项目:TOHO_AI    作者:re53min    | 项目源码 | 文件源码
def test_decode(self, start, eos, limit):
        output = []
        y = chainer.Variable(np.array([[start]], dtype=np.int32))

        for i in range(limit):
            decode0 = self.output_embed(y)
            decode1 = self.decode1(decode0)
            decode2 = self.decode2(decode1)
            z = self.output(decode2)
            prob = F.softmax(z)

            index = np.argmax(cuda.to_cpu(prob.data))

            if index == eos:
                break
            output.append(index)
            y = chainer.Variable(np.array([index], dtype=np.int32))
        return output
项目:SketchSimplification    作者:La4La    | 项目源码 | 文件源码
def save_as_img(array, name, origin, transposed=False):
    if transposed:
        origin = origin.transpose(2, 1, 0)
        array = array.transpose(2, 1, 0)
    else:
        origin = origin.transpose(1, 2, 0)
        array = array.transpose(1, 2, 0)

    array = array * 255
    array = array.clip(0, 255).astype(np.uint8)
    img = cuda.to_cpu(array)
    origin = origin.clip(0, 255).astype(np.uint8)

    if args.concat:
        img_concat = cv2.hconcat([origin, img])
        cv2.imwrite(name, img_concat)
    else:
        cv2.imwrite(name, img)
项目:SketchSimplification    作者:La4La    | 项目源码 | 文件源码
def save_as_img(array, name, origin, transposed=False):
    if transposed:
        origin = origin.transpose(2, 1, 0)
        array = array.transpose(2, 1, 0)
    else:
        origin = origin.transpose(1, 2, 0)
        array = array.transpose(1, 2, 0)

    array = array * 255
    array = array.clip(0, 255).astype(np.uint8)
    img = cuda.to_cpu(array)
    origin = origin.clip(0, 255).astype(np.uint8)

    if args.concat:
        img_concat = cv2.hconcat([origin, img])
        cv2.imwrite(name, img_concat)
    else:
        cv2.imwrite(name, img)
项目:SketchSimplification    作者:La4La    | 项目源码 | 文件源码
def save_as_img(model1, model2, name, origin, transposed=False):
    if transposed:
        origin = origin.transpose(2, 1, 0)
        model1 = model1.transpose(2, 1, 0)
        model2 = model2.transpose(2, 1, 0)
    else:
        origin = origin.transpose(1, 2, 0)
        model1 = model1.transpose(1, 2, 0)
        model2 = model2.transpose(1, 2, 0)

    model1 = model1 * 255
    model1 = model1.clip(0, 255).astype(np.uint8)
    img1 = cuda.to_cpu(model1)

    model2 = model2 * 255
    model2 = model2.clip(0, 255).astype(np.uint8)
    img2 = cuda.to_cpu(model2)

    origin = origin.clip(0, 255).astype(np.uint8)

    img_concat = cv2.hconcat([origin, img1])
    img_concat = cv2.hconcat([img_concat, img2])
    cv2.imwrite(name, img_concat)
项目:chainercv    作者:chainer    | 项目源码 | 文件源码
def check_call(self, x, expects):
        outs = self.link(x)

        if isinstance(self.pick, tuple):
            pick = self.pick
        else:
            if self.pick is None:
                pick = ('l2',)
            else:
                pick = (self.pick,)
            outs = (outs,)

        self.assertEqual(len(outs), len(pick))

        for out, layer_name in zip(outs, pick):
            self.assertIsInstance(out, chainer.Variable)
            self.assertIsInstance(out.array, self.link.xp.ndarray)

            out = to_cpu(out.array)
            np.testing.assert_equal(out, to_cpu(expects[layer_name].array))
项目:chainercv    作者:chainer    | 项目源码 | 文件源码
def check_non_maximum_suppression_options(
            self, bbox, threshold, score, limit):
        # Pass all options to the tested function
        scored_selec = non_maximum_suppression(bbox, threshold, score, limit)
        self.assertIsInstance(scored_selec, type(bbox))

        # Reorder inputs befor passing it to the function.
        # Reorder the outputs according to scores.
        order = score.argsort()[::-1]
        reordered_selec = non_maximum_suppression(
            bbox[order], threshold, score=None, limit=None)
        reordered_selec = reordered_selec[:limit]
        reordered_selec = order[reordered_selec]

        np.testing.assert_equal(
            cuda.to_cpu(scored_selec), cuda.to_cpu(reordered_selec))
项目:deel    作者:uei    | 项目源码 | 文件源码
def concat(x,y,train='on'):
    xdim = 1
    xp=Deel.xp
    if Deel.gpu>=0:
        x = cuda.to_cpu(x)
        y= cuda.to_cpu(y)
    x = x.copy()
    y = y.copy()
    for n in x.shape:
        xdim *= n
    if len(x.shape)>1:
        x = x.reshape((xdim,))
    ydim=1
    for n in y.shape:
        ydim *= n
    if len(y.shape)>1:
        y = y.reshape((ydim,))
    z = np.r_[x,y]

    z = xp.asarray(z,dtype=xp.float32)

    return z
项目:der-network    作者:soskek    | 项目源码 | 文件源码
def evaluate(dataset, model, args, n_query_data=None):
    pool, modelL = make_pool(model, args.n_pool)
    correct_per, sub_correct_per, n_choice_per = 0., 0., 0.
    sum_loss_data = xp.zeros(())

    idsL = model.make_efficient_chunk(list(six.moves.range(len(dataset))), dataset)
    all_datasL = [[dataset[idx] for idx in ids] for ids in idsL]

    # Split dataset into some part
    n_ch = len(all_datasL[0])/6+1
    for j in six.moves.range(6):
        datasL = [each_datas[j*n_ch:(j+1)*n_ch] for each_datas in all_datasL]

        for result in pool.imap_unordered(
                wrapper_solve, zip(modelL, datasL, [False]*args.n_pool)):
            sum_loss_one, n_T, n_choice, n_s = result
            sum_loss_data += sum_loss_one
            correct_per += n_T
            sub_correct_per += n_s
            n_choice_per += n_choice
    if n_query_data is None:
        n_query_data = sum([len(v_["queries"]) for v_ in dataset])

    pool.close()
    return cuda.to_cpu(sum_loss_data) / n_query_data, correct_per, n_choice_per, sub_correct_per
项目:chainer-graph-cnn    作者:pfnet-research    | 项目源码 | 文件源码
def test_forward_consistency(self, nobias=False):

        x_cpu = chainer.Variable(self.x)
        W_cpu = chainer.Variable(self.W)
        b_cpu = None if nobias else chainer.Variable(self.b)
        func_cpu = graph_convolution.GraphConvolutionFunction(self.L, self.K)
        func_cpu.to_cpu()
        args_cpu = (x_cpu, W_cpu)
        if b_cpu is not None:
            args_cpu += (b_cpu, )
        y_cpu = func_cpu(*args_cpu)

        x_gpu = chainer.Variable(cuda.to_gpu(self.x))
        W_gpu = chainer.Variable(cuda.to_gpu(self.W))
        b_gpu = None if nobias else chainer.Variable(cuda.to_gpu(self.b))
        func_gpu = graph_convolution.GraphConvolutionFunction(self.L, self.K)
        func_gpu.to_gpu()
        args_gpu = (x_gpu, W_gpu)
        if b_gpu is not None:
            args_gpu += (b_gpu, )
        y_gpu = func_gpu(*args_gpu)

        testing.assert_allclose(
            y_cpu.data, y_gpu.data.get(), **self.check_forward_options)
项目:chainer-image-generation    作者:fukuta0614    | 项目源码 | 文件源码
def visualize(gen, epoch, savedir, batch_size=36, image_type='sigmoid'):
    z = chainer.Variable(gen.xp.asarray(gen.make_hidden(batch_size)), volatile=True)
    x_fake = gen(z, train=False)
    if image_type == 'sigmoid':
        img_gen = ((cuda.to_cpu(x_fake.data)) * 255).clip(0, 255).astype(np.uint8)
    else:
        img_gen = ((cuda.to_cpu(x_fake.data) + 1) * 127.5).clip(0, 255).astype(np.uint8)

    fig = plt.figure(figsize=(9, 9))
    fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
    for i in range(36):
        ax = fig.add_subplot(6, 6, i + 1, xticks=[], yticks=[])
        ax.imshow(img_gen[i].transpose(1, 2, 0))
    fig.savefig('{}/generate_{:03d}'.format(savedir, epoch))
    # plt.show()
    plt.close()
项目:chainer-image-generation    作者:fukuta0614    | 项目源码 | 文件源码
def visualize(gen, epoch, savedir, batch_size=36, image_type='sigmoid'):
    z = chainer.Variable(gen.xp.asarray(gen.make_hidden(batch_size)), volatile=True)
    x_fake = gen(z, train=False)
    if image_type == 'sigmoid':
        img_gen = ((cuda.to_cpu(x_fake.data)) * 255).clip(0, 255).astype(np.uint8)
    else:
        img_gen = ((cuda.to_cpu(x_fake.data) + 1) * 127.5).clip(0, 255).astype(np.uint8)

    fig = plt.figure(figsize=(9, 9))
    fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
    for i in range(36):
        ax = fig.add_subplot(6, 6, i + 1, xticks=[], yticks=[])
        ax.imshow(img_gen[i].transpose(1, 2, 0))
    fig.savefig('{}/generate_{:03d}'.format(savedir, epoch))
    # plt.show()
    plt.close()
项目:chainer-image-generation    作者:fukuta0614    | 项目源码 | 文件源码
def visualize(gen, epoch, savedir, batch_size=36, image_type='sigmoid'):
    z = chainer.Variable(gen.xp.asarray(gen.make_hidden(batch_size)), volatile=True)
    x_fake = gen(z, train=False)
    if image_type == 'sigmoid':
        img_gen = ((cuda.to_cpu(x_fake.data)) * 255).clip(0, 255).astype(np.uint8)
    else:
        img_gen = ((cuda.to_cpu(x_fake.data) + 1) * 127.5).clip(0, 255).astype(np.uint8)

    fig = plt.figure(figsize=(9, 9))
    fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
    for i in range(36):
        ax = fig.add_subplot(6, 6, i + 1, xticks=[], yticks=[])
        ax.imshow(img_gen[i].transpose(1, 2, 0))
    fig.savefig('{}/generate_{:03d}'.format(savedir, epoch))
    # plt.show()
    plt.close()
项目:unrolled-gan    作者:musyoku    | 项目源码 | 文件源码
def _update_d_and_f(self, state):
        d, f = state['d'], state['f']
        if self.t > 1:
            old_f = float(cuda.to_cpu(state['f']))
            if self.loss > old_f:
                delta = self.lower_threshold + 1.
                Delta = self.upper_threshold + 1.
            else:
                delta = 1. / (self.upper_threshold + 1.)
                Delta = 1. / (self.lower_threshold + 1.)
            c = min(max(delta, self.loss / (old_f + 1e-12)), Delta)
            new_f = c * old_f
            r = abs(new_f - old_f) / (min(new_f, old_f) + 1e-12)
            d += (1 - self.beta3) * (r - d)
            f[:] = new_f
        else:
            f[:] = self.loss
项目:chainer-gan-experiments    作者:Aixile    | 项目源码 | 文件源码
def copy_to_cpu(imgs):
    if type(imgs) == chainer.variable.Variable :
        imgs = imgs.data
    try:
        if type(imgs) == cupy.core.core.ndarray:
            imgs = cuda.to_cpu(imgs)
    except:
        pass
    return imgs
项目:vaelm    作者:TatsuyaShirakawa    | 项目源码 | 文件源码
def save_hdf5(filename, obj):
    gpu = (hasattr(obj, "xp") and obj.xp == cuda.cupy)
    if gpu: obj.to_cpu()
    serializers.save_hdf5(filename, obj)
    if gpu: obj.to_gpu()
项目:ROCStory_skipthought_baseline    作者:soskek    | 项目源码 | 文件源码
def evaluate(dataset, model, args):
    sum_correct = 0.
    sum_loss_data = xp.zeros(())
    for i in six.moves.range(0, len(dataset), args.batchsize):
        x_batch_seq = make_batch([dataset[i + j:i + j + 1]
                                  for j in range(args.batchsize)], train=False)
        x_batch_seq, pos, neg = x_batch_seq[:4], x_batch_seq[4], x_batch_seq[5]
        loss, correct = model.solve(
            x_batch_seq, pos, neg, train=False, variablize=True)
        sum_loss_data += loss.data
        sum_correct += correct
    return cuda.to_cpu(sum_loss_data) / len(dataset), sum_correct
项目:chainer-object-detection    作者:dsanno    | 项目源码 | 文件源码
def to_cpu(self):
        self.model.to_cpu()
项目:chainer-object-detection    作者:dsanno    | 项目源码 | 文件源码
def to_cpu(self):
        self.model.to_cpu()
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def act(self, state):

        with chainer.using_config('train', False):
            s = self.batch_states([state], self.xp, self.phi)
            action = self.policy(s).sample()
            # Q is not needed here, but log it just for information
            q = self.q_function(s, action)

        # Update stats
        self.average_q *= self.average_q_decay
        self.average_q += (1 - self.average_q_decay) * float(q.data)

        self.logger.debug('t:%s a:%s q:%s',
                          self.t, action.data[0], q.data)
        return cuda.to_cpu(action.data[0])
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def _act(self, state):
        xp = self.xp
        with chainer.using_config('train', False):
            b_state = batch_states([state], xp, self.phi)
            with chainer.no_backprop_mode():
                action_distrib, v = self.model(b_state)
                action = action_distrib.sample()
            return cuda.to_cpu(action.data)[0], cuda.to_cpu(v.data)[0]
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def _lossfun(self,
                 distribs, vs_pred, log_probs,
                 vs_pred_old, target_log_probs,
                 advs, vs_teacher):
        prob_ratio = F.exp(log_probs - target_log_probs)
        ent = distribs.entropy

        prob_ratio = F.expand_dims(prob_ratio, axis=-1)
        loss_policy = - F.mean(F.minimum(
            prob_ratio * advs,
            F.clip(prob_ratio, 1-self.clip_eps, 1+self.clip_eps) * advs))

        if self.clip_eps_vf is None:
            loss_value_func = F.mean_squared_error(vs_pred, vs_teacher)
        else:
            loss_value_func = F.mean(F.maximum(
                F.square(vs_pred - vs_teacher),
                F.square(_elementwise_clip(vs_pred,
                                           vs_pred_old - self.clip_eps_vf,
                                           vs_pred_old + self.clip_eps_vf)
                         - vs_teacher)
                ))

        loss_entropy = -F.mean(ent)

        # Update stats
        self.average_loss_policy += (
            (1 - self.average_loss_decay) *
            (cuda.to_cpu(loss_policy.data) - self.average_loss_policy))
        self.average_loss_value_func += (
            (1 - self.average_loss_decay) *
            (cuda.to_cpu(loss_value_func.data) - self.average_loss_value_func))
        self.average_loss_entropy += (
            (1 - self.average_loss_decay) *
            (cuda.to_cpu(loss_entropy.data) - self.average_loss_entropy))

        return (
            loss_policy
            + self.value_func_coef * loss_value_func
            + self.entropy_coef * loss_entropy
            )
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def _compute_loss(self, exp_batch, gamma, errors_out=None):
        """Compute the Q-learning loss for a batch of experiences


        Args:
          experiences (list): see update()'s docstring
          gamma (float): discount factor
        Returns:
          loss
        """

        y, t = self._compute_y_and_t(exp_batch, gamma)

        if errors_out is not None:
            del errors_out[:]
            delta = F.sum(abs(y - t), axis=1)
            delta = cuda.to_cpu(delta.data)
            for e in delta:
                errors_out.append(e)

        if 'weights' in exp_batch:
            return compute_weighted_value_loss(
                y, t, exp_batch['weights'],
                clip_delta=self.clip_delta,
                batch_accumulator=self.batch_accumulator)
        else:
            return compute_value_loss(y, t, clip_delta=self.clip_delta,
                                      batch_accumulator=self.batch_accumulator)
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def act(self, state):
        with chainer.using_config('train', False):
            with chainer.no_backprop_mode():
                action_value = self.model(
                    self.batch_states([state], self.xp, self.phi))
                q = float(action_value.max.data)
                action = cuda.to_cpu(action_value.greedy_actions.data)[0]

        # Update stats
        self.average_q *= self.average_q_decay
        self.average_q += (1 - self.average_q_decay) * q

        self.logger.debug('t:%s q:%s action_value:%s', self.t, q, action_value)
        return action
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def check_forward(self, xs):
        y = chainerrl.functions.weighted_sum_arrays(xs, weights=self.weights)
        correct_y = sum(x * w for x, w in zip(self.xs, self.weights))
        gradient_check.assert_allclose(correct_y, cuda.to_cpu(y.data))
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def check_forward(self, diag_data, non_diag_data):
        diag = chainer.Variable(diag_data)
        non_diag = chainer.Variable(non_diag_data)
        y = lower_triangular_matrix(diag, non_diag)

        correct_y = numpy.zeros(
            (self.batch_size, self.n, self.n), dtype=numpy.float32)

        tril_rows, tril_cols = numpy.tril_indices(self.n, -1)
        correct_y[:, tril_rows, tril_cols] = cuda.to_cpu(non_diag_data)

        diag_rows, diag_cols = numpy.diag_indices(self.n)
        correct_y[:, diag_rows, diag_cols] = cuda.to_cpu(diag_data)

        gradient_check.assert_allclose(correct_y, cuda.to_cpu(y.data))
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def check_forward(self, xs):
        y = chainerrl.functions.sum_arrays(xs)
        correct_y = sum(self.xs)
        gradient_check.assert_allclose(correct_y, cuda.to_cpu(y.data))