Python chainer.functions 模块,clip() 实例源码

我们从Python开源项目中,提取了以下10个代码示例,用于说明如何使用chainer.functions.clip()

项目:chainer-gan-experiments    作者:Aixile    | 项目源码 | 文件源码
def weight_clipping(model, lower=-0.01, upper=0.01):
    for params in model.params():
        params_clipped = F.clip(params, lower, upper)
        params.data = params_clipped.data
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def _elementwise_clip(x, x_min, x_max):
    """Elementwise clipping

    Note: chainer.functions.clip supports clipping to constant intervals
    """
    return F.minimum(F.maximum(x, x_min), x_max)
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def _lossfun(self,
                 distribs, vs_pred, log_probs,
                 vs_pred_old, target_log_probs,
                 advs, vs_teacher):
        prob_ratio = F.exp(log_probs - target_log_probs)
        ent = distribs.entropy

        prob_ratio = F.expand_dims(prob_ratio, axis=-1)
        loss_policy = - F.mean(F.minimum(
            prob_ratio * advs,
            F.clip(prob_ratio, 1-self.clip_eps, 1+self.clip_eps) * advs))

        if self.clip_eps_vf is None:
            loss_value_func = F.mean_squared_error(vs_pred, vs_teacher)
        else:
            loss_value_func = F.mean(F.maximum(
                F.square(vs_pred - vs_teacher),
                F.square(_elementwise_clip(vs_pred,
                                           vs_pred_old - self.clip_eps_vf,
                                           vs_pred_old + self.clip_eps_vf)
                         - vs_teacher)
                ))

        loss_entropy = -F.mean(ent)

        # Update stats
        self.average_loss_policy += (
            (1 - self.average_loss_decay) *
            (cuda.to_cpu(loss_policy.data) - self.average_loss_policy))
        self.average_loss_value_func += (
            (1 - self.average_loss_decay) *
            (cuda.to_cpu(loss_value_func.data) - self.average_loss_value_func))
        self.average_loss_entropy += (
            (1 - self.average_loss_decay) *
            (cuda.to_cpu(loss_entropy.data) - self.average_loss_entropy))

        return (
            loss_policy
            + self.value_func_coef * loss_value_func
            + self.entropy_coef * loss_entropy
            )
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def pi_and_v(self, state):
        if self.normalize_obs:
            state = F.clip(self.obs_filter(state, update=False),
                           -5.0, 5.0)

        return self.pi(state), self.v(state)
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def check_forward(self, x_data):
        x = chainer.Variable(x_data)
        y = functions.clip(x, self.x_min, self.x_max)
        self.assertEqual(y.data.dtype, numpy.float32)

        y_expect = self.x.copy()
        for i in numpy.ndindex(self.x.shape):
            if self.x[i] < self.x_min:
                y_expect[i] = self.x_min
            elif self.x[i] > self.x_max:
                y_expect[i] = self.x_max

        gradient_check.assert_allclose(y_expect, y.data)
项目:chainer-wasserstein-gan    作者:hvy    | 项目源码 | 文件源码
def clamp(self, lower=-0.01, upper=0.01):

        """Clamp all parameters, including the batch normalization
        parameters."""

        for params in self.params():
            params_clipped = F.clip(params, lower, upper)
            params.data = params_clipped.data
项目:adgm    作者:musyoku    | 项目源码 | 文件源码
def gaussian_nll_keepbatch(self, x, mean, ln_var, clip=True):
        if clip:
            clip_min = math.log(0.01)
            clip_max = math.log(10)
            ln_var = F.clip(ln_var, clip_min, clip_max)
        x_prec = F.exp(-ln_var)
        x_diff = x - mean
        x_power = (x_diff * x_diff) * x_prec * 0.5
        # print "nll"
        # print cuda.cupy.amax(x.data), cuda.cupy.amin(x.data)
        # print cuda.cupy.amax(ln_var.data), cuda.cupy.amin(ln_var.data)
        # print cuda.cupy.amax(x_prec.data), cuda.cupy.amin(x_prec.data)
        # print cuda.cupy.amax(x_power.data), cuda.cupy.amin(x_power.data)
        return F.sum((math.log(2.0 * math.pi) + ln_var) * 0.5 + x_power, axis=1)
项目:variational-autoencoder    作者:musyoku    | 项目源码 | 文件源码
def gaussian_nll_keepbatch(self, x, mean, ln_var, clip=True):
        if clip:
            clip_min = math.log(0.001)
            clip_max = math.log(10)
            ln_var = F.clip(ln_var, clip_min, clip_max)
        x_prec = F.exp(-ln_var)
        x_diff = x - mean
        x_power = (x_diff * x_diff) * x_prec * 0.5
        return F.sum((math.log(2.0 * math.pi) + ln_var) * 0.5 + x_power, axis=1)
项目:adversarial-autoencoder    作者:musyoku    | 项目源码 | 文件源码
def compute_distance_of_cluster_heads(self):
        # list all possible combinations of two cluster heads
        num_combination = self.nCr(self.ndim_y, 2)

        # a_labels
        # [0, 1, 0, 0]
        # [0, 0, 1, 0]
        # [0, 0, 1, 0]
        # [0, 0, 0, 1]
        # [0, 0, 0, 1]
        # [0, 0, 0, 1]
        a_labels = np.zeros((num_combination, self.ndim_y), dtype=np.float32)
        for i in range(1, self.ndim_y):
            for n in range(i):
                j = int(0.5 * i * (i - 1) + n)
                a_labels[j, i] = 1

        # b_labels
        # [1, 0, 0, 0]
        # [1, 0, 0, 0]
        # [0, 1, 0, 0]
        # [1, 0, 0, 0]
        # [0, 1, 0, 0]
        # [0, 0, 1, 0]
        b_labels = np.zeros((num_combination, self.ndim_y), dtype=np.float32)
        for i in range(1, self.ndim_y):
            for n in range(i):
                j = int(0.5 * i * (i - 1) + n)
                b_labels[j, n] = 1


        xp = self.xp
        if xp is not np:
            a_labels = cuda.to_gpu(a_labels)
            b_labels = cuda.to_gpu(b_labels)

        a_vector = self.cluster_head(a_labels)
        b_vector = self.cluster_head(b_labels)
        distance = functions.sqrt(functions.sum((a_vector - b_vector) ** 2, axis=1))

        # clip
        distance = functions.clip(distance, 0.0, float(self.cluster_head_distance_threshold))

        return distance
项目:adversarial-autoencoder    作者:musyoku    | 项目源码 | 文件源码
def compute_distance_of_cluster_heads(self):
        # list all possible combinations of two cluster heads
        num_combination = self.nCr(self.ndim_y, 2)

        # a_labels
        # [0, 1, 0, 0]
        # [0, 0, 1, 0]
        # [0, 0, 1, 0]
        # [0, 0, 0, 1]
        # [0, 0, 0, 1]
        # [0, 0, 0, 1]
        a_labels = np.zeros((num_combination, self.ndim_y), dtype=np.float32)
        for i in range(1, self.ndim_y):
            for n in range(i):
                j = int(0.5 * i * (i - 1) + n)
                a_labels[j, i] = 1

        # b_labels
        # [1, 0, 0, 0]
        # [1, 0, 0, 0]
        # [0, 1, 0, 0]
        # [1, 0, 0, 0]
        # [0, 1, 0, 0]
        # [0, 0, 1, 0]
        b_labels = np.zeros((num_combination, self.ndim_y), dtype=np.float32)
        for i in range(1, self.ndim_y):
            for n in range(i):
                j = int(0.5 * i * (i - 1) + n)
                b_labels[j, n] = 1


        xp = self.xp
        if xp is not np:
            a_labels = cuda.to_gpu(a_labels)
            b_labels = cuda.to_gpu(b_labels)

        a_vector = self.cluster_head(a_labels)
        b_vector = self.cluster_head(b_labels)
        distance = functions.sqrt(functions.sum((a_vector - b_vector) ** 2, axis=1))

        # clip
        distance = functions.clip(distance, 0.0, float(self.cluster_head_distance_threshold))

        return distance