Python chainer.functions 模块,square() 实例源码

我们从Python开源项目中,提取了以下10个代码示例,用于说明如何使用chainer.functions.square()

项目:chainer-EWC    作者:okdshin    | 项目源码 | 文件源码
def __call__(self, *args):
        x = args[:-1]
        t = args[-1]
        self.y = None
        self.loss = None
        self.accuracy = None
        self.y = self.predictor(*x)
        self.loss = F.softmax_cross_entropy(self.y, t)

        if self.stored_variable_list is not None and \
                self.fisher_list is not None:  # i.e. Stored
            for i in range(len(self.variable_list)):
                self.loss += self.lam/2. * F.sum(
                        self.fisher_list[i] *
                        F.square(self.variable_list[i][1] -
                                 self.stored_variable_list[i]))
        reporter.report({'loss': self.loss}, self)
        if self.compute_accuracy:
            self.accuracy = F.accuracy(self.y, t)
            reporter.report({'accuracy': self.accuracy}, self)
        return self.loss
项目:chainer-EWC    作者:okdshin    | 项目源码 | 文件源码
def compute_fisher(self, dataset):
        fisher_accum_list = [
                np.zeros(var[1].shape) for var in self.variable_list]

        for _ in range(self.num_samples):
            x, _ = dataset[np.random.randint(len(dataset))]
            y = self.predictor(np.array([x]))
            prob_list = F.softmax(y)[0].data
            class_index = np.random.choice(len(prob_list), p=prob_list)
            loss = F.log_softmax(y)[0, class_index]
            self.cleargrads()
            loss.backward()
            for i in range(len(self.variable_list)):
                fisher_accum_list[i] += np.square(
                        self.variable_list[i][1].grad)

        self.fisher_list = [
                F_accum / self.num_samples for F_accum in fisher_accum_list]
        return self.fisher_list
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def _lossfun(self,
                 distribs, vs_pred, log_probs,
                 vs_pred_old, target_log_probs,
                 advs, vs_teacher):
        prob_ratio = F.exp(log_probs - target_log_probs)
        ent = distribs.entropy

        prob_ratio = F.expand_dims(prob_ratio, axis=-1)
        loss_policy = - F.mean(F.minimum(
            prob_ratio * advs,
            F.clip(prob_ratio, 1-self.clip_eps, 1+self.clip_eps) * advs))

        if self.clip_eps_vf is None:
            loss_value_func = F.mean_squared_error(vs_pred, vs_teacher)
        else:
            loss_value_func = F.mean(F.maximum(
                F.square(vs_pred - vs_teacher),
                F.square(_elementwise_clip(vs_pred,
                                           vs_pred_old - self.clip_eps_vf,
                                           vs_pred_old + self.clip_eps_vf)
                         - vs_teacher)
                ))

        loss_entropy = -F.mean(ent)

        # Update stats
        self.average_loss_policy += (
            (1 - self.average_loss_decay) *
            (cuda.to_cpu(loss_policy.data) - self.average_loss_policy))
        self.average_loss_value_func += (
            (1 - self.average_loss_decay) *
            (cuda.to_cpu(loss_value_func.data) - self.average_loss_value_func))
        self.average_loss_entropy += (
            (1 - self.average_loss_decay) *
            (cuda.to_cpu(loss_entropy.data) - self.average_loss_entropy))

        return (
            loss_policy
            + self.value_func_coef * loss_value_func
            + self.entropy_coef * loss_entropy
            )
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def compute_weighted_value_loss(y, t, weights,
                                clip_delta=True, batch_accumulator='mean'):
    """Compute a loss for value prediction problem.

    Args:
        y (Variable or ndarray): Predicted values.
        t (Variable or ndarray): Target values.
        weights (ndarray): Weights for y, t.
        clip_delta (bool): Use the Huber loss function if set True.
        batch_accumulator (str): 'mean' will devide loss by batchsize
    Returns:
        (Variable) scalar loss
    """
    assert batch_accumulator in ('mean', 'sum')
    y = F.reshape(y, (-1, 1))
    t = F.reshape(t, (-1, 1))
    if clip_delta:
        losses = F.huber_loss(y, t, delta=1.0)
    else:
        losses = F.square(y - t) / 2
    losses = F.reshape(losses, (-1,))
    loss_sum = F.sum(losses * weights)
    if batch_accumulator == 'mean':
        loss = loss_sum / y.shape[0]
    elif batch_accumulator == 'sum':
        loss = loss_sum
    return loss
项目:chainer-fcis    作者:knorth55    | 项目源码 | 文件源码
def _smooth_l1_loss(x, t, in_weight, sigma):
    sigma2 = sigma ** 2
    diff = in_weight * (x - t)
    abs_diff = F.absolute(diff)
    flag = (abs_diff.array < (1. / sigma2)).astype(np.float32)

    y = (flag * (sigma2 / 2.) * F.square(diff) +
         (1 - flag) * (abs_diff - 0.5 / sigma2))

    return F.sum(y)
项目:dgm    作者:ashwindcruz    | 项目源码 | 文件源码
def house_transform(self,z):
        vec_t = self.qh_vec_0

        for i in range(self.num_trans):
            vec_t = F.identity(self.qlin_h_vec_t(vec_t))
            vec_t_product = F.matmul(vec_t, vec_t, transb=True)
            vec_t_norm_sqr = F.tile(F.sum(F.square(vec_t)), (z.shape[0], z.shape[1]))
            z = z - 2*F.matmul(vec_t_product,  z)/vec_t_norm_sqr
        return z
项目:dgm    作者:ashwindcruz    | 项目源码 | 文件源码
def house_transform(self,z):
        vec_t = self.qh_vec_0

        for i in range(self.num_trans):
            vec_t = F.identity(self.qlin_h_vec_t(vec_t))
            vec_t_product = F.matmul(vec_t, vec_t, transb=True)
            vec_t_norm_sqr = F.tile(F.sum(F.square(vec_t)), (z.shape[0], z.shape[1]))
            z = z - 2*F.matmul(vec_t_product,  z)/vec_t_norm_sqr
        return z
项目:dgm    作者:ashwindcruz    | 项目源码 | 文件源码
def house_transform(self,z):
        vec_t = self.qh_vec_0

        for i in range(self.num_trans):
            vec_t = F.identity(self.qlin_h_vec_t(vec_t))
            vec_t_product = F.matmul(vec_t, vec_t, transb=True)
            vec_t_norm_sqr = F.tile(F.sum(F.square(vec_t)), (z.shape[0], z.shape[1]))
            z = z - 2*F.matmul(vec_t_product,  z)/vec_t_norm_sqr
        return z
项目:chainercv    作者:chainer    | 项目源码 | 文件源码
def _smooth_l1_loss(x, t, in_weight, sigma):
    sigma2 = sigma ** 2
    diff = in_weight * (x - t)
    abs_diff = F.absolute(diff)
    flag = (abs_diff.array < (1. / sigma2)).astype(np.float32)

    y = (flag * (sigma2 / 2.) * F.square(diff) +
         (1 - flag) * (abs_diff - 0.5 / sigma2))

    return F.sum(y)
项目:VSBIQA    作者:JayMarx    | 项目源码 | 文件源码
def average_loss(self, h, a, t):
        ## print F.reshape(t, (-1, 1)).data        
        ## print (h-F.reshape(t, (-1, 1))).data
        self.loss = F.sum(abs(h - F.reshape(t, (-1,1)))) 
        ## self.loss = F.sqrt(F.sum(F.square(h - F.reshape(t, (-1,1))))) 
        self.loss /= self.n_patches
        if self.n_images > 1:
            h = F.split_axis(h, self.n_images, 0)
            a = F.split_axis(a, self.n_images, 0)
        else:
            h, a = [h], [a]

        self.y = h
        self.a = a