Python chainer.functions 模块,softplus() 实例源码

我们从Python开源项目中,提取了以下32个代码示例,用于说明如何使用chainer.functions.softplus()

项目:ddnn    作者:kunglab    | 项目源码 | 文件源码
def to_function(self):
        if self.nonlinearity.lower() == "clipped_relu":
            return clipped_relu()
        if self.nonlinearity.lower() == "crelu":
            return crelu()
        if self.nonlinearity.lower() == "elu":
            return elu()
        if self.nonlinearity.lower() == "hard_sigmoid":
            return hard_sigmoid()
        if self.nonlinearity.lower() == "leaky_relu":
            return leaky_relu()
        if self.nonlinearity.lower() == "relu":
            return relu()
        if self.nonlinearity.lower() == "sigmoid":
            return sigmoid()
        if self.nonlinearity.lower() == "softmax":
            return softmax()
        if self.nonlinearity.lower() == "softplus":
            return softplus()
        if self.nonlinearity.lower() == "tanh":
            return tanh()
        if self.nonlinearity.lower() == "bst":
            return bst()
        raise NotImplementedError()
项目:unrolled-gan    作者:musyoku    | 项目源码 | 文件源码
def to_function(self):
        if self.nonlinearity.lower() == "clipped_relu":
            return clipped_relu()
        if self.nonlinearity.lower() == "crelu":
            return crelu()
        if self.nonlinearity.lower() == "elu":
            return elu()
        if self.nonlinearity.lower() == "hard_sigmoid":
            return hard_sigmoid()
        if self.nonlinearity.lower() == "leaky_relu":
            return leaky_relu()
        if self.nonlinearity.lower() == "relu":
            return relu()
        if self.nonlinearity.lower() == "sigmoid":
            return sigmoid()
        if self.nonlinearity.lower() == "softmax":
            return softmax()
        if self.nonlinearity.lower() == "softplus":
            return softplus()
        if self.nonlinearity.lower() == "tanh":
            return tanh()
        raise NotImplementedError()
项目:LSGAN    作者:musyoku    | 项目源码 | 文件源码
def to_function(self):
        if self.nonlinearity.lower() == "clipped_relu":
            return clipped_relu()
        if self.nonlinearity.lower() == "crelu":
            return crelu()
        if self.nonlinearity.lower() == "elu":
            return elu()
        if self.nonlinearity.lower() == "hard_sigmoid":
            return hard_sigmoid()
        if self.nonlinearity.lower() == "leaky_relu":
            return leaky_relu()
        if self.nonlinearity.lower() == "relu":
            return relu()
        if self.nonlinearity.lower() == "sigmoid":
            return sigmoid()
        if self.nonlinearity.lower() == "softmax":
            return softmax()
        if self.nonlinearity.lower() == "softplus":
            return softplus()
        if self.nonlinearity.lower() == "tanh":
            return tanh()
        raise NotImplementedError()
项目:adgm    作者:musyoku    | 项目源码 | 文件源码
def to_function(self):
        if self.nonlinearity.lower() == "clipped_relu":
            return clipped_relu()
        if self.nonlinearity.lower() == "crelu":
            return crelu()
        if self.nonlinearity.lower() == "elu":
            return elu()
        if self.nonlinearity.lower() == "hard_sigmoid":
            return hard_sigmoid()
        if self.nonlinearity.lower() == "leaky_relu":
            return leaky_relu()
        if self.nonlinearity.lower() == "relu":
            return relu()
        if self.nonlinearity.lower() == "sigmoid":
            return sigmoid()
        if self.nonlinearity.lower() == "softmax":
            return softmax()
        if self.nonlinearity.lower() == "softplus":
            return softplus()
        if self.nonlinearity.lower() == "tanh":
            return tanh()
        raise NotImplementedError()
项目:chainer-gan-experiments    作者:Aixile    | 项目源码 | 文件源码
def loss_func_dcgan_dis_real(y_real):
    return F.sum(F.softplus(-y_real)) / np.prod(y_real.data.shape)
项目:chainer-gan-experiments    作者:Aixile    | 项目源码 | 文件源码
def loss_func_dcgan_dis_fake(y_fake):
    return F.sum(F.softplus(y_fake)) / np.prod(y_fake.data.shape)
项目:chainer-gan-experiments    作者:Aixile    | 项目源码 | 文件源码
def loss_sigmoid_cross_entropy_with_logits(x, t):
    return F.average(x - x*t + F.softplus(-x))# / x.data.shape[0]
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def compute_mean_and_var(self, x):
        h = x
        for layer in self.hidden_layers:
            h = self.nonlinearity(layer(h))
        mean = self.mean_layer(h)
        if self.bound_mean:
            mean = bound_by_tanh(mean, self.min_action, self.max_action)
        var = F.broadcast_to(F.softplus(self.var_layer(h)), mean.shape) + \
            self.min_var
        return mean, var
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def __call__(self, x):
        mean = self.hidden_layers(x)
        var = F.broadcast_to(
            F.softplus(self.var_param),
            mean.shape)
        return distribution.GaussianDistribution(mean, var)
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def compute_mean_and_var(self, x):
        # mean = self.mean_layer(x)
        mean = F.tanh(self.mean_layer(x)) * 2.0
        var = F.softplus(self.var_layer(x))
        return mean, var
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def compute_mean_and_var(self, x):
        # mean = self.mean_layer(x)
        mean = F.tanh(self.mean_layer(x)) * 2.0
        var = F.softplus(F.broadcast_to(self.var_layer(x), mean.data.shape))
        return mean, var
项目:chainer-speech-recognition    作者:musyoku    | 项目源码 | 文件源码
def __call__(self, x):
        return functions.softplus(x, self.beta)
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def check_forward(self, x_data):
        x = chainer.Variable(x_data)
        y = functions.softplus(x, beta=self.beta)
        x_value = cuda.to_cpu(x_data)
        y_exp = numpy.log(1 + numpy.exp(self.beta * x_value)) / self.beta
        self.assertEqual(y.data.dtype, self.dtype)
        gradient_check.assert_allclose(
            y_exp, y.data, **self.check_forward_options)
项目:ddnn    作者:kunglab    | 项目源码 | 文件源码
def __init__(self, use_cudnn=True):
        self._function = "softplus"
        self.use_cudnn = use_cudnn
项目:ddnn    作者:kunglab    | 项目源码 | 文件源码
def __call__(self, x):
        return F.softplus(x, self.use_cudnn)
项目:adversarial-autoencoder    作者:musyoku    | 项目源码 | 文件源码
def __call__(self, x):
        return functions.softplus(x, self.beta)
项目:unrolled-gan    作者:musyoku    | 项目源码 | 文件源码
def __init__(self, use_cudnn=True):
        self._function = "softplus"
        self.use_cudnn = use_cudnn
项目:unrolled-gan    作者:musyoku    | 项目源码 | 文件源码
def __call__(self, x):
        return F.softplus(x, self.use_cudnn)
项目:LSGAN    作者:musyoku    | 项目源码 | 文件源码
def __init__(self, use_cudnn=True):
        self._function = "softplus"
        self.use_cudnn = use_cudnn
项目:LSGAN    作者:musyoku    | 项目源码 | 文件源码
def __call__(self, x):
        return F.softplus(x, self.use_cudnn)
项目:adgm    作者:musyoku    | 项目源码 | 文件源码
def __init__(self, use_cudnn=True):
        self._function = "softplus"
        self.use_cudnn = use_cudnn
项目:adgm    作者:musyoku    | 项目源码 | 文件源码
def __call__(self, x):
        return F.softplus(x, self.use_cudnn)
项目:adgm    作者:musyoku    | 项目源码 | 文件源码
def bernoulli_nll_keepbatch(self, x, y):
        nll = F.softplus(y) - x * y
        return F.sum(nll, axis=1)
项目:variational-autoencoder    作者:musyoku    | 项目源码 | 文件源码
def __init__(self):
        self.image_width = 28
        self.image_height = 28
        self.ndim_x = 28 * 28
        self.ndim_y = 10
        self.ndim_z = 50

        # True : y = f(BN(Wx + b))
        # False: y = f(W*BN(x) + b)
        self.batchnorm_before_activation = True

        # gaussianmarg | gaussian
        self.type_pz = "gaussianmarg"
        self.type_qz = "gaussianmarg"

        self.encoder_xy_z_hidden_units = [500]
        self.encoder_xy_z_activation_function = "softplus"
        self.encoder_xy_z_apply_dropout = False
        self.encoder_xy_z_apply_batchnorm = True
        self.encoder_xy_z_apply_batchnorm_to_input = True

        self.encoder_x_y_hidden_units = [500]
        self.encoder_x_y_activation_function = "softplus"
        self.encoder_x_y_apply_dropout = False
        self.encoder_x_y_apply_batchnorm = True
        self.encoder_x_y_apply_batchnorm_to_input = True

        self.decoder_hidden_units = [500]
        self.decoder_activation_function = "softplus"
        self.decoder_apply_dropout = False
        self.decoder_apply_batchnorm = True
        self.decoder_apply_batchnorm_to_input = True

        self.gpu_enabled = True
        self.learning_rate = 0.0003
        self.gradient_momentum = 0.9
        self.gradient_clipping = 5.0
项目:variational-autoencoder    作者:musyoku    | 项目源码 | 文件源码
def bernoulli_nll_keepbatch(self, x, y):
        nll = F.softplus(y) - x * y
        return F.sum(nll, axis=1)
项目:variational-autoencoder    作者:musyoku    | 项目源码 | 文件源码
def __init__(self, **layers):
        super(SoftmaxEncoder, self).__init__(**layers)
        self.activation_function = "softplus"
        self.apply_batchnorm_to_input = True
        self.apply_batchnorm = True
        self.apply_dropout = False
        self.batchnorm_before_activation = True
项目:variational-autoencoder    作者:musyoku    | 项目源码 | 文件源码
def __init__(self, **layers):
        super(GaussianEncoder, self).__init__(**layers)
        self.activation_function = "softplus"
        self.apply_batchnorm_to_input = True
        self.apply_batchnorm = True
        self.apply_dropout = False
        self.batchnorm_before_activation = True
项目:variational-autoencoder    作者:musyoku    | 项目源码 | 文件源码
def __init__(self):
        self.image_width = 28
        self.image_height = 28
        self.ndim_x = 28 * 28
        self.ndim_z = 100
        self.batchnorm_before_activation = True

        # gaussianmarg | gaussian
        # We recommend you to use "gaussianmarg" when decoder is gaussian.
        self.type_pz = "gaussianmarg"
        self.type_qz = "gaussianmarg"

        # e.g.
        # ndim_x (input) -> 2000 -> 1000 -> 100 (output)
        # encoder_hidden_units = [2000, 1000]
        self.encoder_hidden_units = [600, 600]
        self.encoder_activation_function = "softplus"
        self.encoder_apply_dropout = True
        self.encoder_apply_batchnorm = True
        self.encoder_apply_batchnorm_to_input = True

        self.decoder_hidden_units = [600, 600]
        self.decoder_activation_function = "softplus"
        self.decoder_apply_dropout = True
        self.decoder_apply_batchnorm = True
        self.decoder_apply_batchnorm_to_input = True

        self.gpu_enabled = True
        self.learning_rate = 0.0003
        self.gradient_momentum = 0.9
        self.gradient_clipping = 1.0
项目:variational-autoencoder    作者:musyoku    | 项目源码 | 文件源码
def __init__(self, **layers):
        super(Encoder, self).__init__(**layers)
        self.activation_function = "softplus"
        self.apply_batchnorm_to_input = True
        self.apply_batchnorm = True
        self.apply_dropout = True
        self.batchnorm_before_activation = True
项目:variational-autoencoder    作者:musyoku    | 项目源码 | 文件源码
def __init__(self, **layers):
        super(BernoulliDecoder, self).__init__(**layers)
        self.activation_function = "softplus"
        self.apply_batchnorm_to_input = True
        self.apply_batchnorm = True
        self.apply_dropout = True
        self.batchnorm_before_activation = True
项目:chainer_img2img_example    作者:taizan    | 项目源码 | 文件源码
def loss_cnn(self, cnn, x_out, dst, dis_out, lam1=100, lam2=1):
        loss_rec = lam1 * ( F.mean_absolute_error(x_out, dst) )
        batchsize,_,w,h = dis_out.data.shape
        loss_adv = lam2 * F.sum( F.softplus(-dis_out) ) / batchsize / w / h

        loss = loss_rec + loss_adv 
        chainer.report({'loss': loss,"loss_rec":loss_rec, 'loss_adv': loss_adv }, cnn)        

        return loss
项目:chainer_img2img_example    作者:taizan    | 项目源码 | 文件源码
def loss_dis(self, dis, dis_real, dis_fake):
        batchsize,_,w,h = dis_real.data.shape

        L1 = (2+np.random.rand()) * F.sum(F.softplus(-dis_real)) / batchsize / w / h
        L2 = (2+np.random.rand()) * F.sum(F.softplus(dis_fake)) / batchsize / w / h
        loss =  L1 + L2 
        chainer.report({'loss': loss}, dis)
        return loss