Python chainer.functions 模块,mean_squared_error() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用chainer.functions.mean_squared_error()

项目:chainer_pong    作者:icoxfog417    | 项目源码 | 文件源码
def calc_loss(self, states, actions, rewards, next_states, episode_ends):
        qv = self.agent.q(states)
        q_t = self.target(next_states)  # Q(s', *)
        max_q_prime = np.array(list(map(np.max, q_t.data)), dtype=np.float32)  # max_a Q(s', a)

        target = cuda.to_cpu(qv.data.copy())
        for i in range(self.replay_size):
            if episode_ends[i][0] is True:
                _r = np.sign(rewards[i])
            else:
                _r = np.sign(rewards[i]) + self.gamma * max_q_prime[i]

            target[i, actions[i]] = _r

        td = Variable(self.target.arr_to_gpu(target)) - qv
        td_tmp = td.data + 1000.0 * (abs(td.data) <= 1)  # Avoid zero division
        td_clip = td * (abs(td.data) <= 1) + td/abs(td_tmp) * (abs(td.data) > 1)

        zeros = Variable(self.target.arr_to_gpu(np.zeros((self.replay_size, self.target.n_action), dtype=np.float32)))
        loss = F.mean_squared_error(td_clip, zeros)
        self._loss = loss.data
        self._qv = np.max(qv.data)
        return loss
项目:video_labelling_using_youtube8m    作者:LittleWat    | 项目源码 | 文件源码
def __call__(self, x, pcaed_x=None, test=False):
        h = self.enc_l(x)
        rec = self.dec_l(h)

        if test:
            return rec, h

        ae_loss = F.mean_squared_error(x, rec)
        pca_loss = F.mean_squared_error(pcaed_x, h) * self.c

        #         self.loss = ae_loss + pca_loss
        self.loss = pca_loss

        chainer.reporter.report({'loss': self.loss,
                                 'ae_loss': ae_loss,
                                 'pca_loss': pca_loss,
                                 }, self)

        return self.loss
项目:dockerfiles    作者:floydhub    | 项目源码 | 文件源码
def linear_train(train_data, train_target, n_epochs=200):
    for _ in range(n_epochs):
        # Get the result of the forward pass.
        output = linear_forward(train_data)

        # Calculate the loss between the training data and target data.
        loss = F.mean_squared_error(train_target, output)

        # Zero all gradients before updating them.
        linear_function.zerograds()

        # Calculate and update all gradients.
        loss.backward()

        # Use the optmizer to move all parameters of the network
        # to values which will reduce the loss.
        optimizer.update()
项目:soft-dtw    作者:mblondel    | 项目源码 | 文件源码
def __call__(self, x, t):
        y = self.predictor(x)

        if self.loss == "euclidean":
            return F.mean_squared_error(y, t)

        elif self.loss == "sdtw":
            loss = 0
            for i in range(y.shape[0]):
                y_i = F.reshape(y[i], (-1,1))
                t_i = F.reshape(t[i], (-1,1))
                loss += SoftDTWLoss(self.gamma)(y_i, t_i)
            return loss

        else:
            raise ValueError("Unknown loss")
项目:vfm    作者:cemoody    | 项目源码 | 文件源码
def __call__(self, loc, val, y, train=True):
        bs = val.data.shape[0]
        pred, kld0, kld1, kld2 = self.forward(loc, val, y, train=train)

        # Compute MSE loss
        mse = F.mean_squared_error(pred, y)
        rmse = F.sqrt(mse)  # Only used for reporting

        # Now compute the total KLD loss
        kldt = kld0 * self.lambda0 + kld1 * self.lambda1 + kld2 * self.lambda2

        # Total loss is MSE plus regularization losses
        loss = mse + kldt * (1.0 / self.total_nobs)

        # Log the errors
        logs = {'loss': loss, 'rmse': rmse, 'kld0': kld0, 'kld1': kld1,
                'kld2': kld2, 'kldt': kldt, 'bias': F.sum(self.bias_mu.b)}
        reporter.report(logs, self)
        return loss
项目:vfm    作者:cemoody    | 项目源码 | 文件源码
def __call__(self, loc, val, y, train=True):
        bs = val.data.shape[0]
        ret = self.forward(loc, val, y, train=train)
        pred, kld0, kld1, kldg, kldi, hypg, hypi = ret

        # Compute MSE loss
        mse = F.mean_squared_error(pred, y)
        rmse = F.sqrt(mse)  # Only used for reporting

        # Now compute the total KLD loss
        kldt = kld0 * self.lambda0 + kld1 * self.lambda1
        kldt += kldg + kldi + hypg + hypi

        # Total loss is MSE plus regularization losses
        loss = mse + kldt * (1.0 / self.total_nobs)

        # Log the errors
        logs = {'loss': loss, 'rmse': rmse, 'kld0': kld0, 'kld1': kld1,
                'kldg': kldg, 'kldi': kldi, 'hypg': hypg, 'hypi': hypi,
                'hypglv': F.sum(self.hyper_feat_lv_vec.b),
                'hypilv': F.sum(self.hyper_feat_delta_lv.b),
                'kldt': kldt, 'bias': F.sum(self.bias_mu.b)}
        reporter.report(logs, self)
        return loss
项目:gan-rl    作者:iaroslav-ai    | 项目源码 | 文件源码
def __call__(self, X, Yt, D, G):
        D.reset_state()

        r = 0.0
        mg = w_init
        for x, yt in zip(X, Yt):
            t = D(x, yt)
            r += F.mean_squared_error(t, t*0.0 + 1.0)*mg
            mg = 1.0

        D.reset_state()
        G.reset_state()

        mg = w_init
        for x, yt in zip(X, Yt):
            f = D(x, G(x))
            r += F.mean_squared_error(f, f * 0.0)*mg
            mg = 1.0

        return r
项目:SeRanet    作者:corochann    | 项目源码 | 文件源码
def __call__(self, x, t=None):
        self.clear()
        #x = Variable(x_data)  # x_data.astype(np.float32)

        h = F.leaky_relu(self.conv1(x), slope=0.1)
        h = F.leaky_relu(self.conv2(h), slope=0.1)
        h = F.leaky_relu(self.conv3(h), slope=0.1)
        h = F.leaky_relu(self.conv4(h), slope=0.1)
        h = F.leaky_relu(self.conv5(h), slope=0.1)
        h = F.leaky_relu(self.conv6(h), slope=0.1)
        h = F.clipped_relu(self.conv7(h), z=1.0)
        if self.train:
            self.loss = F.mean_squared_error(h, t)
            return self.loss
        else:
            return h
项目:NeuralStyleTransfer    作者:Francis-Hsu    | 项目源码 | 文件源码
def loss_style(self, gen_img_rep):
        feat_cor_gen = self.feature_cor(gen_img_rep)

        feat_loss = 0
        for i in range(len(feat_cor_gen)):
            orig_shape = self.style_rep[i].shape
            feat_map_size = orig_shape[2] * orig_shape[3]  # M_l

            layer_wt = 4.0 * feat_map_size ** 2.0

            feat_loss += F.mean_squared_error(self.style_feat_cor[i], feat_cor_gen[i]) / layer_wt

        return feat_loss

    # total loss function
    # cf. equation (7) of the article
项目:Multitask-and-Transfer-Learning    作者:AI-ON    | 项目源码 | 文件源码
def __call__(self, x_image, t_image, x_action, t_action):
        self.y_image, self.y_action = self.predictor(x_image, x_action)

        predicted_action = self.action_meaning(
            F.argmax(self.y_action, axis=1).data[0])
        real_action = self.action_meaning(t_action)
        if predicted_action != real_action:
            print("Predicted action:", predicted_action,
                  "it was actually", real_action)
        image_loss = F.mean_squared_error(self.y_image, t_image)
        self.error_mask = normalize_2d(F.squared_error(self.y_image, t_image))
        action_loss = F.softmax_cross_entropy(
            self.y_action,
            F.expand_dims(np.array(t_action, dtype=np.int32), axis=0),
        )
        print('Image loss', image_loss.data, ', Action loss:', action_loss.data)
        return self.weight * image_loss + (1.0 - self.weight) * action_loss
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def _lossfun(self,
                 distribs, vs_pred, log_probs,
                 vs_pred_old, target_log_probs,
                 advs, vs_teacher):
        prob_ratio = F.exp(log_probs - target_log_probs)
        ent = distribs.entropy

        prob_ratio = F.expand_dims(prob_ratio, axis=-1)
        loss_policy = - F.mean(F.minimum(
            prob_ratio * advs,
            F.clip(prob_ratio, 1-self.clip_eps, 1+self.clip_eps) * advs))

        if self.clip_eps_vf is None:
            loss_value_func = F.mean_squared_error(vs_pred, vs_teacher)
        else:
            loss_value_func = F.mean(F.maximum(
                F.square(vs_pred - vs_teacher),
                F.square(_elementwise_clip(vs_pred,
                                           vs_pred_old - self.clip_eps_vf,
                                           vs_pred_old + self.clip_eps_vf)
                         - vs_teacher)
                ))

        loss_entropy = -F.mean(ent)

        # Update stats
        self.average_loss_policy += (
            (1 - self.average_loss_decay) *
            (cuda.to_cpu(loss_policy.data) - self.average_loss_policy))
        self.average_loss_value_func += (
            (1 - self.average_loss_decay) *
            (cuda.to_cpu(loss_value_func.data) - self.average_loss_value_func))
        self.average_loss_entropy += (
            (1 - self.average_loss_decay) *
            (cuda.to_cpu(loss_entropy.data) - self.average_loss_entropy))

        return (
            loss_policy
            + self.value_func_coef * loss_value_func
            + self.entropy_coef * loss_entropy
            )
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def compute_value_loss(y, t, clip_delta=True, batch_accumulator='mean'):
    """Compute a loss for value prediction problem.

    Args:
        y (Variable or ndarray): Predicted values.
        t (Variable or ndarray): Target values.
        clip_delta (bool): Use the Huber loss function if set True.
        batch_accumulator (str): 'mean' or 'sum'. 'mean' will use the mean of
            the loss values in a batch. 'sum' will use the sum.
    Returns:
        (Variable) scalar loss
    """
    assert batch_accumulator in ('mean', 'sum')
    y = F.reshape(y, (-1, 1))
    t = F.reshape(t, (-1, 1))
    if clip_delta:
        loss_sum = F.sum(F.huber_loss(y, t, delta=1.0))
        if batch_accumulator == 'mean':
            loss = loss_sum / y.shape[0]
        elif batch_accumulator == 'sum':
            loss = loss_sum
    else:
        loss_mean = F.mean_squared_error(y, t) / 2
        if batch_accumulator == 'mean':
            loss = loss_mean
        elif batch_accumulator == 'sum':
            loss = loss_mean * y.shape[0]
    return loss
项目:chainer-cyclegan    作者:Aixile    | 项目源码 | 文件源码
def loss_func_rec_l2(x_out, t):
    return F.mean_squared_error(x_out, t)
项目:pfi-internship2016    作者:hvy    | 项目源码 | 文件源码
def __call__(self, x, t):
        """Perform a forward pass and compute the loss. This method ultimately
        defines the model.

        Args:
            x (chainer.Variable): Input vector.
            t (chainer.Variable): Target vector. Usually identical to `x` in
                the case of an Autoencoder.

        Returns:
            chainer.Variable: Loss.
        """
        # Test different activation functions and dropout.
        h = self.l1(x)
        y = self.l2(h)

        if self.train:
            # Scale the MSE by 5, i.e  0.5 * 10 so that the loss can be compared to
            # the loss computed in Assignment 4. Factor 0.5, since the Chainer
            # implementation doesn't scale the error by 0.5 and factor 10, since
            # the previous assignment loss functions does not compute the mean,
            # and the number of summed elements are 10.
            self.loss = 5 * F.mean_squared_error(y, t)

            return self.loss
        else:
            return y
项目:chainer-neural-style    作者:dsanno    | 项目源码 | 文件源码
def __fit_one(self, link, content_layers, style_grams):
        xp = self.xp
        link.zerograds()
        layers = self.model(link.x)
        if self.keep_color:
            trans_layers = self.model(util.gray(link.x))
        else:
            trans_layers = layers
        loss_info = []
        loss = Variable(xp.zeros((), dtype=np.float32))
        for name, content_layer in content_layers:
            layer = layers[name]
            content_loss = self.content_weight * F.mean_squared_error(layer, content_layer)
            loss_info.append(('content_' + name, float(content_loss.data)))
            loss += content_loss
        for name, style_gram in style_grams:
            gram = util.gram_matrix(trans_layers[name])
            style_loss = self.style_weight * F.mean_squared_error(gram, style_gram)
            loss_info.append(('style_' + name, float(style_loss.data)))
            loss += style_loss
        tv_loss = self.tv_weight * util.total_variation(link.x)
        loss_info.append(('tv', float(tv_loss.data)))
        loss += tv_loss
        loss.backward()
        self.optimizer.update()
        return loss_info
项目:chainer-neural-style    作者:dsanno    | 项目源码 | 文件源码
def __fit_one(self, link, content_layers, style_patches):
        xp = self.xp
        link.zerograds()
        layers = self.model(link.x)
        if self.keep_color:
            trans_layers = self.model(util.gray(link.x))
        else:
            trans_layers = layers
        loss_info = []
        loss = Variable(xp.zeros((), dtype=np.float32))
        for name, content_layer in content_layers:
            layer = layers[name]
            content_loss = self.content_weight * F.mean_squared_error(layer, content_layer)
            loss_info.append(('content_' + name, float(content_loss.data)))
            loss += content_loss
        for name, style_patch, style_patch_norm in style_patches:
            patch = util.patch(trans_layers[name])
            style_loss = self.style_weight * F.mean_squared_error(patch, util.nearest_neighbor_patch(patch, style_patch, style_patch_norm))
            loss_info.append(('style_' + name, float(style_loss.data)))
            loss += style_loss
        tv_loss = self.tv_weight * util.total_variation(link.x)
        loss_info.append(('tv', float(tv_loss.data)))
        loss += tv_loss
        loss.backward()
        self.optimizer.update()
        return loss_info
项目:chainer-stack-gan    作者:dsanno    | 项目源码 | 文件源码
def update(gen, dis, optimizer_gen, optimizer_dis, x_batch, margin):
    xp = gen.xp
    batch_size = len(x_batch)

    # from generated image
    z = xp.random.normal(0, 1, (batch_size, latent_size)).astype(np.float32)
    z = z / (xp.linalg.norm(z, axis=1, keepdims=True) + 1e-12)
    x_gen = gen(z)
    total_size = np.prod(x_gen.shape)
    y_gen, h_gen = dis(x_gen)
    h_gen = F.normalize(F.reshape(h_gen, (batch_size, -1)))
    similarity = F.sum(F.matmul(h_gen, h_gen, transb=True)) / (batch_size * batch_size)
    loss_gen = F.mean_squared_error(x_gen, y_gen) + 0.1 * similarity
    loss_dis = F.sum(F.relu(margin * margin - F.batch_l2_norm_squared(x_gen - y_gen))) / total_size
    # from real image
    x = xp.asarray(x_batch)
    y, h = dis(x)
    loss_dis += F.mean_squared_error(x, y)

    gen.cleargrads()
    loss_gen.backward()
    optimizer_gen.update()

    dis.cleargrads()
    loss_dis.backward()
    optimizer_dis.update()

    return float(loss_gen.data), float(loss_dis.data)
项目:chainer-stack-gan    作者:dsanno    | 项目源码 | 文件源码
def update(gen1, gen2, dis, optimizer_gen, optimizer_dis, x_batch, margin):
    xp = gen1.xp
    batch_size = len(x_batch)

    # from generated image
    z = xp.random.normal(0, 1, (batch_size, latent_size)).astype(np.float32)
    z = z / (xp.linalg.norm(z, axis=1, keepdims=True) + 1e-12)
    x_stack1 = gen1(Variable(z, volatile=True), train=False)
    x_gen = gen2(x_stack1.data)
    total_size = np.prod(x_gen.shape)
    del z
    del x_stack1
    y_gen, h_gen = dis(x_gen)
    h_gen = F.normalize(F.reshape(h_gen, (batch_size, -1)))
    similarity = F.sum(F.matmul(h_gen, h_gen, transb=True)) / (batch_size * batch_size)
    del h_gen
    loss_gen = F.mean_squared_error(x_gen, y_gen) + 0.1 * similarity
    loss_dis = F.sum(F.relu(margin * margin - F.batch_l2_norm_squared(x_gen - y_gen))) / total_size
    del x_gen
    del y_gen
    del similarity
    # from real image
    x = xp.asarray(x_batch)
    y, h = dis(x)
    loss_dis += F.mean_squared_error(x, y)

    gen2.cleargrads()
    loss_gen.backward()
    optimizer_gen.update()
    loss_gen_data = float(loss_gen.data)
    del loss_gen

    dis.cleargrads()
    loss_dis.backward()
    optimizer_dis.update()

    return loss_gen_data, float(loss_dis.data)
项目:chainer-began    作者:hvy    | 项目源码 | 文件源码
def pixel_wise_loss(self, x, y):
        if self.loss_norm == 1:
            return F.mean_absolute_error(x, y)
        elif self.loss_norm == 2:
            return F.mean_squared_error(x, y)
        else:
            raise ValueError('Invalid norm {}'.format(self.loss_norm))
项目:chainer-dfi    作者:dsanno    | 项目源码 | 文件源码
def update(net, optimizer, link, target_layers, tv_weight=0.001):
    layers = feature(net, link.x)
    total_loss = 0
    losses = []
    for layer, target in zip(layers, target_layers):
        loss = F.mean_squared_error(layer, target)
        losses.append(float(loss.data))
        total_loss += loss
    tv_loss = tv_weight * total_variation(link.x)
    losses.append(float(tv_loss.data))
    total_loss += tv_loss
    link.cleargrads()
    total_loss.backward()
    optimizer.update()
    return losses
项目:self-driving-cars    作者:musyoku    | 项目源码 | 文件源码
def forward_one_step(self, state, action, reward, next_state, test=False):
        xp = cuda.cupy if config.use_gpu else np
        n_batch = state.shape[0]
        state = Variable(state)
        next_state = Variable(next_state)
        if config.use_gpu:
            state.to_gpu()
            next_state.to_gpu()
        q = self.compute_q_variable(state, test=test)

        max_target_q = self.compute_target_q_variable(next_state, test=test)
        max_target_q = xp.amax(max_target_q.data, axis=1)

        target = q.data.copy()

        for i in xrange(n_batch):
            if episode_ends[i] is True:
                target_value = np.sign(reward[i])
            else:
                target_value = np.sign(reward[i]) + config.rl_discount_factor * max_target_q[i]
            action_index = self.get_index_with_action(action[i])
            old_value = target[i, action_index]
            diff = target_value - old_value
            if diff > 1.0:
                target_value = 1.0 + old_value  
            elif diff < -1.0:
                target_value = -1.0 + old_value 
            target[i, action_index] = target_value

        target = Variable(target)

        loss = F.mean_squared_error(target, q)
        return loss, q
项目:self-driving-cars    作者:musyoku    | 项目源码 | 文件源码
def forward_one_step(self, state, action, reward, next_state, test=False):
        xp = cuda.cupy if config.use_gpu else np
        n_batch = state.shape[0]
        state = Variable(state.reshape((n_batch, config.rl_history_length * 34)))
        next_state = Variable(next_state.reshape((n_batch, config.rl_history_length * 34)))
        if config.use_gpu:
            state.to_gpu()
            next_state.to_gpu()
        q = self.compute_q_variable(state, test=test)
        q_ = self.compute_q_variable(next_state, test=test)
        max_action_indices = xp.argmax(q_.data, axis=1)
        if config.use_gpu:
            max_action_indices = cuda.to_cpu(max_action_indices)

        target_q = self.compute_target_q_variable(next_state, test=test)

        target = q.data.copy()

        for i in xrange(n_batch):
            max_action_index = max_action_indices[i]
            target_value = reward[i] + config.rl_discount_factor * target_q.data[i][max_action_indices[i]]
            action_index = self.get_index_for_action(action[i])
            old_value = target[i, action_index]
            diff = target_value - old_value
            if diff > 1.0:
                target_value = 1.0 + old_value  
            elif diff < -1.0:
                target_value = -1.0 + old_value 
            target[i, action_index] = target_value

        target = Variable(target)
        loss = F.mean_squared_error(target, q)
        return loss, q
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def check_forward(self, x0_data, x1_data):
        x0 = chainer.Variable(x0_data)
        x1 = chainer.Variable(x1_data)
        loss = functions.mean_squared_error(x0, x1)
        loss_value = cuda.to_cpu(loss.data)
        self.assertEqual(loss_value.dtype, numpy.float32)
        self.assertEqual(loss_value.shape, ())

        # Compute expected value
        loss_expect = 0.
        for i in numpy.ndindex(self.x0.shape):
            loss_expect += (self.x0[i] - self.x1[i]) ** 2
        loss_expect /= self.x0.size

        self.assertAlmostEqual(loss_expect, loss_value, places=5)
项目:stock_dqn_f    作者:wdy06    | 项目源码 | 文件源码
def forward(self, state, action, Reward, state_dash, episode_end):

        num_of_batch = state.shape[0]

        Q = self.model.Q_func(state)  # Get Q-value

        # Generate Target Signals
        tmp = self.model_target.Q_func(state_dash)  # Q(s',*)
        tmp = list(map(np.max, tmp.data))  # max_a Q(s',a)
        max_Q_dash = np.asanyarray(tmp, dtype=np.float32)
        target = np.asanyarray(Q.data, dtype=np.float32)

        for i in xrange(num_of_batch):
            if not episode_end:
                tmp_ = Reward + self.gamma * max_Q_dash[i]
            else:
                tmp_ = Reward
            #print action
            action_index = self.action_to_index(action)
            target[i, action_index] = tmp_

        # TD-error clipping
        td = Variable(target) - Q  # TD error
        td_tmp = td.data + 1000.0 * (abs(td.data) <= 1)  # Avoid zero division
        td_clip = td * (abs(td.data) <= 1) + td/abs(td_tmp) * (abs(td.data) > 1)

        zero_val = Variable(np.zeros((self.replay_size, self.num_of_actions), dtype=np.float32))
        loss = F.mean_squared_error(td_clip, zero_val)
        return loss, Q
项目:stock_dqn_f    作者:wdy06    | 项目源码 | 文件源码
def forward(self, state, action, Reward, state_dash, episode_end):
        num_of_batch = state.shape[0]

        Q = self.model.Q_func(state)  # Get Q-value

        # Generate Target Signals
        tmp = self.model_target.Q_func(state_dash)  # Q(s',*)
        tmp = list(map(np.max, tmp.data.get()))  # max_a Q(s',a)
        max_Q_dash = np.asanyarray(tmp, dtype=np.float32)
        target = np.asanyarray(Q.data.get(), dtype=np.float32)

        for i in xrange(num_of_batch):
            if not episode_end[i][0]:
                tmp_ = Reward[i] + self.gamma * max_Q_dash[i]
            else:
                tmp_ = Reward[i]
            #print action
            action_index = self.action_to_index(action[i])
            target[i, action_index] = tmp_

        # TD-error clipping
        td = Variable(cuda.to_gpu(target,self.gpu_id)) - Q  # TD error
        td_tmp = td.data + 1000.0 * (abs(td.data) <= 1)  # Avoid zero division
        td_clip = td * (abs(td.data) <= 1) + td/abs(td_tmp) * (abs(td.data) > 1)

        zero_val = Variable(cuda.to_gpu(np.zeros((self.replay_size, self.num_of_actions), dtype=np.float32),self.gpu_id))
        loss = F.mean_squared_error(td_clip, zero_val)
        return loss, Q
项目:masalachai    作者:DaikiShimada    | 项目源码 | 文件源码
def __init__(self, encoder, decoder, lossfun=mean_squared_error):
        super(AutoencoderModel, self).__init__(
            encoder, decoder=decoder, lossfun=lossfun)
        self.decoder = decoder
        self.z = None
项目:convolutional-pose-machines-chainer    作者:tomoyukun    | 项目源码 | 文件源码
def __call__(self, image, cmap, t):
        self.clear()
        h1 = self.stage1(image)
        h2 = self.branch(image)
        self.loss = F.mean_squared_error(h1, t)

        for name, _ in self.forward:
            f = getattr(self, name)
            h1 = f(h1, h2, cmap)
            self.loss += F.mean_squared_error(h1, t)

        return h1, self.loss
项目:convolutional-pose-machines-chainer    作者:tomoyukun    | 项目源码 | 文件源码
def __call__(self, image, cmap, t):
        self.clear()
        h1 = self.stage1(image)
        h2 = self.branch(image)
        self.loss = F.mean_squared_error(h1, t)

        for name, _ in self.forward:
            f = getattr(self, name)
            h1 = f(h1, h2, cmap, train=self.train)
            self.loss += F.mean_squared_error(h1, t)

        if self.train:
            return h1, self.loss
        else:
            return h1
项目:stock_dqn    作者:wdy06    | 项目源码 | 文件源码
def forward(self, state, action, Reward, state_dash, episode_end):
        num_of_batch = state.shape[0]
        s = Variable(state)
        s_dash = Variable(state_dash)

        Q = self.model.Q_func(s,train=True)  # Get Q-value

        # Generate Target Signals
        tmp = self.model_target.Q_func(s_dash,train=self.targetFlag)  # Q(s',*)
        tmp = list(map(np.max, tmp.data.get()))  # max_a Q(s',a)
        max_Q_dash = np.asanyarray(tmp, dtype=np.float32)
        target = np.asanyarray(Q.data.get(), dtype=np.float32)

        for i in xrange(num_of_batch):
            if not episode_end[i][0]:
                tmp_ = np.sign(Reward[i]) + self.gamma * max_Q_dash[i]
            else:
                tmp_ = np.sign(Reward[i])
            #print action
            action_index = self.action_to_index(action[i])
            target[i, action_index] = tmp_

        # TD-error clipping
        td = Variable(cuda.to_gpu(target,self.gpu_id)) - Q  # TD error
        td_tmp = td.data + 1000.0 * (abs(td.data) <= 1)  # Avoid zero division
        td_clip = td * (abs(td.data) <= 1) + td/abs(td_tmp) * (abs(td.data) > 1)

        zero_val = Variable(cuda.to_gpu(np.zeros((self.replay_size, self.num_of_actions), dtype=np.float32),self.gpu_id))
        loss = F.mean_squared_error(td_clip, zero_val)
        return loss, Q
项目:SketchSimplification    作者:La4La    | 项目源码 | 文件源码
def loss_gen(self, gen, G_p_rough, D_p_rough, p_line, D_u_rough, batchsize, alpha=0.1, beta=0.1):
        xp = self.gen.xp
        loss_L = F.mean_squared_error(G_p_rough, p_line) * G_p_rough.data.shape[0]
        loss_adv = F.softmax_cross_entropy(D_p_rough, Variable(xp.zeros(batchsize, dtype=np.int32)))
        loss_adv_unpaired = F.softmax_cross_entropy(D_u_rough, Variable(xp.zeros(batchsize, dtype=np.int32)))
        #loss_line = self.line_loss(G_p_rough, p_line)
        loss = loss_L + alpha * loss_adv + beta * loss_adv_unpaired #+ loss_line
        chainer.report({'loss': loss, "loss_L": loss_L, 'loss_adv': loss_adv, 'loss_adv_u': loss_adv_unpaired}, gen)
        return loss
项目:SketchSimplification    作者:La4La    | 项目源码 | 文件源码
def loss_gen(self, gen, G_out, gt, batchsize, alpha=1):
        xp = self.gen.xp
        loss_L = F.mean_squared_error(G_out, gt) * G_out.data.size
        loss = loss_L
        chainer.report({'loss': loss, "loss_L": loss_L}, gen)
        return loss
项目:SketchSimplification    作者:La4La    | 项目源码 | 文件源码
def loss_gen(self, gen, G_p_rough, D_p_rough, p_line, batchsize, alpha=0.1):
        xp = self.gen.xp
        loss_L = F.mean_squared_error(G_p_rough, p_line) * G_p_rough.data.shape[0]
        loss_adv = F.softmax_cross_entropy(D_p_rough, Variable(xp.zeros(batchsize, dtype=np.int32)))
        #loss_line = self.line_loss(G_p_rough, p_line)
        loss = loss_L + alpha * loss_adv #+ loss_line
        chainer.report({'loss': loss, "loss_L": loss_L, 'loss_adv': loss_adv}, gen)
        return loss
项目:cloud-ml-sdk    作者:XiaoMi    | 项目源码 | 文件源码
def main():
  # Define train function
  def linear_train(train_data, train_target, n_epochs=200):
    for _ in range(n_epochs):
      output = linear_function(train_data)
      loss = F.mean_squared_error(train_target, output)
      linear_function.zerograds()
      loss.backward()
      optimizer.update()

  # Construct train data
  x = 30 * np.random.rand(1000).astype(np.float32)
  y = 7 * x + 10
  y += 10 * np.random.randn(1000).astype(np.float32)

  linear_function = L.Linear(1, 1)

  x_var = Variable(x.reshape(1000, -1))
  y_var = Variable(y.reshape(1000, -1))

  optimizer = optimizers.MomentumSGD(lr=0.001)
  optimizer.setup(linear_function)

  for i in range(150):
    linear_train(x_var, y_var, n_epochs=20)
    y_pred = linear_function(x_var).data

  slope = linear_function.W.data[0, 0]
  intercept = linear_function.b.data[0]

  print("Final Line: {0:.3}x + {1:.3}".format(slope, intercept))
项目:doubleDQN    作者:masataka46    | 项目源码 | 文件源码
def forward(self, state, action, Reward, state_dash, episode_end):
        num_of_batch = state.shape[0]
        s = Variable(state)
        s_dash = Variable(state_dash)

        Q = self.Q_func(s)  # Get Q-value
        # Generate Target Signals
        tmp2 = self.Q_func(s_dash)
        tmp2 = list(map(np.argmax, tmp2.data.get()))  # argmaxQ(s',a)
        tmp = self.Q_func_target(s_dash)  # Q'(s',*)
        tmp = list(tmp.data.get())
        # select Q'(s',*) due to argmaxQ(s',a)
        res1 = []
        for i in range(num_of_batch):
            res1.append(tmp[i][tmp2[i]])

        #max_Q_dash = np.asanyarray(tmp, dtype=np.float32)
        max_Q_dash = np.asanyarray(res1, dtype=np.float32)
        target = np.asanyarray(Q.data.get(), dtype=np.float32)
        for i in xrange(num_of_batch):
            if not episode_end[i][0]:
                tmp_ = np.sign(Reward[i]) + self.gamma * max_Q_dash[i]
            else:
                tmp_ = np.sign(Reward[i])

            action_index = self.action_to_index(action[i])
            target[i, action_index] = tmp_
        # TD-error clipping
        td = Variable(cuda.to_gpu(target)) - Q  # TD error
        td_tmp = td.data + 1000.0 * (abs(td.data) <= 1)  # Avoid zero division
        td_clip = td * (abs(td.data) <= 1) + td/abs(td_tmp) * (abs(td.data) > 1)

        zero_val = Variable(cuda.to_gpu(np.zeros((self.replay_size, self.num_of_actions), dtype=np.float32)))
        loss = F.mean_squared_error(td_clip, zero_val)
        return loss, Q
项目:learning2rank    作者:shiba24    | 项目源码 | 文件源码
def __call__(self, x, t):
        h1 = F.relu(self.l1(x))
        h2 = F.relu(self.l2(h1))
        h = F.relu(self.l3(h2))
        self.loss = F.mean_squared_error(h, t)
        return self.loss
项目:deel    作者:uei    | 项目源码 | 文件源码
def getLossDistill(self,x,t):
        self.loss = F.mean_squared_error(x, t)

        return self.loss
项目:deel    作者:uei    | 项目源码 | 文件源码
def getLossDistill(self,x,t):
        _t = chainer.Variable(t.data, volatile='off')
        self.loss = F.mean_squared_error(x, _t)

        return self.loss
项目:deel    作者:uei    | 项目源码 | 文件源码
def backprop(self,t,x=None):
        if x is None:
            x=Tensor.context
        #loss = F.mean_squared_error(x.content,t.content)
        loss = F.softmax_cross_entropy(x.content,t.content)
        if  Deel.train:
            loss.backward()
        accuracy = F.accuracy(x.content,t.content)
        self.optimizer.update()
        return loss.data,accuracy.data
项目:chainer-examples    作者:nocotan    | 项目源码 | 文件源码
def __call__(self, x, t):
        h = F.sigmoid(self.l1(x))
        h = F.sigmoid(self.l2(h))
        y = F.mean_squared_error(h, t)
        return y
项目:vfm    作者:cemoody    | 项目源码 | 文件源码
def __init__(self, n_features=None, n_dim=8, lossfun=F.mean_squared_error,
                 lambda0=1, lambda1=1, lambda2=1, init_bias_mu=0.0,
                 init_bias_lv=0.0, intx_term=True, total_nobs=1):
        self.n_dim = n_dim
        self.n_features = n_features
        self.lossfun = lossfun
        self.lambda0 = lambda0
        self.lambda1 = lambda1
        self.lambda2 = lambda2
        self.intx_term = intx_term
        self.total_nobs = total_nobs

        # In contrast to the FM model, the slopes and latent vectors
        # will have means (mu) and log variances (lv) for each component.
        super(VFM, self).__init__(bias_mu=L.Bias(shape=(1,)),
                                  bias_lv=L.Bias(shape=(1,)),
                                  slop_mu=L.Bias(shape=(1, 1)),
                                  slop_lv=L.Bias(shape=(1, 1)),
                                  slop_delta_mu=L.EmbedID(n_features, 1,
                                                          ignore_label=-1),
                                  slop_delta_lv=L.EmbedID(n_features, 1,
                                                          ignore_label=-1),
                                  feat_mu_vec=L.Bias(shape=(1, 1, n_dim)),
                                  feat_lv_vec=L.Bias(shape=(1, 1, n_dim)),
                                  feat_delta_mu=L.EmbedID(n_features, n_dim,
                                                          ignore_label=-1),
                                  feat_delta_lv=L.EmbedID(n_features, n_dim,
                                                          ignore_label=-1))

        # Xavier initialize weights
        c = np.sqrt(n_features * n_dim) * 1e3
        d = np.sqrt(n_features) * 1e3
        self.feat_delta_mu.W.data[...] = np.random.randn(n_features, n_dim) / c
        self.feat_delta_lv.W.data[...] = np.random.randn(n_features, n_dim) / c
        self.slop_delta_mu.W.data[...] = np.random.randn(n_features, 1) / d
        self.slop_delta_lv.W.data[...] = np.random.randn(n_features, 1) / d
        self.bias_mu.b.data[...] *= 0.0
        self.bias_mu.b.data[...] += init_bias_mu
        self.bias_lv.b.data[...] *= 0.0
        self.bias_lv.b.data[...] += init_bias_lv
项目:vfm    作者:cemoody    | 项目源码 | 文件源码
def __init__(self, n_features=None, n_dim=8, lossfun=F.mean_squared_error,
                 lambda0=5e-3, lambda1=5e-3, lambda2=5e-3, init_bias=0.0,
                 intx_term=True, total_nobs=1):
        self.n_dim = n_dim
        self.n_features = n_features
        self.lossfun = lossfun
        self.lambda0 = lambda0
        self.lambda1 = lambda1
        self.lambda2 = lambda2
        self.intx_term = intx_term
        self.total_nobs = total_nobs

        # These are all the learned weights corresponding
        # to the overall bias, slope per feature, and latent
        # interaction vector per feature
        super(FM, self).__init__(bias=L.Bias(shape=(1,)),
                                 slope=L.EmbedID(n_features, 1),
                                 latent=L.EmbedID(n_features, n_dim))

        # Xavier initialize weights
        c = np.sqrt(n_features * n_dim)
        self.latent.W.data[...] = np.random.randn(n_features, n_dim) / c
        d = np.sqrt(n_features)
        self.slope.W.data[...] = np.random.randn(n_features, 1) / d
        self.bias.b.data[...] *= 0.0
        self.bias.b.data[...] += init_bias
项目:ram    作者:amasky    | 项目源码 | 文件源码
def __call__(self, x, t, train=True):
        x = chainer.Variable(self.xp.asarray(x), volatile=not train)
        t = chainer.Variable(self.xp.asarray(t), volatile=not train)
        bs = x.data.shape[0] # batch size
        self.clear(bs, train)

        # init mean location
        l = np.random.uniform(-1, 1, size=(bs,2)).astype(np.float32)
        l = chainer.Variable(self.xp.asarray(l), volatile=not train)

        # forward n_steps time
        sum_ln_pi = 0
        self.forward(x, train, action=False, init_l=l)
        for i in range(1, self.n_steps):
            action = True if (i == self.n_steps - 1) else False
            l, ln_pi, y, b = self.forward(x, train, action)
            if train: sum_ln_pi += ln_pi

        # loss with softmax cross entropy
        self.loss_action = F.softmax_cross_entropy(y, t)
        self.loss = self.loss_action
        self.accuracy = F.accuracy(y, t)

        if train:
            # reward
            conditions = self.xp.argmax(y.data, axis=1) == t.data
            r = self.xp.where(conditions, 1., 0.).astype(np.float32)

            # squared error between reward and baseline
            self.loss_base = F.mean_squared_error(r, b)
            self.loss += self.loss_base

            # loss with reinforce rule
            mean_ln_pi = sum_ln_pi / (self.n_steps - 1)
            self.loss_reinforce = F.sum(-mean_ln_pi * (r-b))/bs
            self.loss += self.loss_reinforce

        return self.loss
项目:workspace    作者:nojima    | 项目源码 | 文件源码
def __call__(self, x, y):
        return F.mean_squared_error(self.forward(x), y)
项目:workspace    作者:nojima    | 项目源码 | 文件源码
def __call__(self, x):
        return F.mean_squared_error(self.forward(x), x)
项目:workspace    作者:nojima    | 项目源码 | 文件源码
def __call__(self, x: Variable) -> Variable:
        output = self.forward(x)
        return F.mean_squared_error(output, x)
项目:gan-rl    作者:iaroslav-ai    | 项目源码 | 文件源码
def __call__(self, X, D, G):
        D.reset_state()
        G.reset_state()

        r = 0.0

        mg = w_init
        for x in X:
            f = D(x, G(x))
            r += F.mean_squared_error(f, f*0.0 + 1.0)*mg
            mg = 1.0

        return r
项目:chainer_sklearn    作者:corochann    | 项目源码 | 文件源码
def __init__(self,
                 predictor=None,
                 lossfun=mean_squared_error,
                 accfun=None,
                 device=-1,
                 **sk_params
                 ):
        super(SklearnWrapperRegressor, self).__init__(
            predictor=predictor,
            lossfun=lossfun,
            accfun=accfun,
            device=device,
            **sk_params
        )
项目:RL_reversi    作者:ryogrid    | 项目源码 | 文件源码
def __call__(self, x, t=None, train=False):
        h = F.leaky_relu(self.l1(x))
        h = F.leaky_relu(self.l2(h))
        h = F.leaky_relu(self.l3(h))
        h = self.l4(h)

        if train:
            return F.mean_squared_error(h,t)
        else:
            return h
项目:RL_reversi    作者:ryogrid    | 项目源码 | 文件源码
def __call__(self, x, t=None, train=False):
        h = F.leaky_relu(self.l1(x))
        h = F.leaky_relu(self.l2(h))
        h = F.leaky_relu(self.l3(h))
        h = self.l4(h)

        if train:
            return F.mean_squared_error(h,t)
        else:
            return h
项目:SeRanet    作者:corochann    | 项目源码 | 文件源码
def __call__(self, x, t=None):
        self.clear()
        h1 = F.leaky_relu(self.conv1(x), slope=0.1)
        h1 = F.leaky_relu(self.conv2(h1), slope=0.1)
        h1 = F.leaky_relu(self.conv3(h1), slope=0.1)

        h2 = self.seranet_v1_crbm(x)
        # Fusion
        h12 = F.concat((h1, h2), axis=1)

        lu = F.leaky_relu(self.convlu6(h12), slope=0.1)
        lu = F.leaky_relu(self.convlu7(lu), slope=0.1)
        lu = F.leaky_relu(self.convlu8(lu), slope=0.1)
        ru = F.leaky_relu(self.convru6(h12), slope=0.1)
        ru = F.leaky_relu(self.convru7(ru), slope=0.1)
        ru = F.leaky_relu(self.convru8(ru), slope=0.1)
        ld = F.leaky_relu(self.convld6(h12), slope=0.1)
        ld = F.leaky_relu(self.convld7(ld), slope=0.1)
        ld = F.leaky_relu(self.convld8(ld), slope=0.1)
        rd = F.leaky_relu(self.convrd6(h12), slope=0.1)
        rd = F.leaky_relu(self.convrd7(rd), slope=0.1)
        rd = F.leaky_relu(self.convrd8(rd), slope=0.1)

        # Splice
        h = CF.splice(lu, ru, ld, rd)

        h = F.leaky_relu(self.conv9(h), slope=0.1)
        h = F.leaky_relu(self.conv10(h), slope=0.1)
        h = F.leaky_relu(self.conv11(h), slope=0.1)
        h = F.clipped_relu(self.conv12(h), z=1.0)
        if self.train:
            self.loss = F.mean_squared_error(h, t)
            return self.loss
        else:
            return h