Python tensorflow 模块,log1p() 实例源码

我们从Python开源项目中,提取了以下15个代码示例,用于说明如何使用tensorflow.log1p()

项目:vae-npvc    作者:JeremyCCHsu    | 项目源码 | 文件源码
def mu_law_encode_nonlinear(audio, quantization_channels=256):
    '''
    Compress the waveform amplitudes using mu-law non-linearity. 
    NOTE: This mu-law functions as a non-linear function as opposed to 
          quantization.
    '''
    with tf.name_scope('encode'):
        mu = tf.to_float(quantization_channels - 1)
        # Perform mu-law companding transformation (ITU-T, 1988).
        # Minimum operation is here to deal with rare large amplitudes caused
        # by resampling.
        safe_audio_abs = tf.minimum(tf.abs(audio), 1.0)
        magnitude = tf.log1p(mu * safe_audio_abs) / tf.log1p(mu)
        signal = tf.multiply(tf.sign(audio), magnitude, name='mulaw')
        # Quantize signal to the specified number of levels.
        # return tf.to_int32((signal + 1) / 2 * mu + 0.5)
        return signal
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def _sample(self, n_samples):
        # samples must be sampled from (-1, 1) rather than [-1, 1)
        loc, scale = self.loc, self.scale
        if not self.is_reparameterized:
            loc = tf.stop_gradient(loc)
            scale = tf.stop_gradient(scale)
        shape = tf.concat([[n_samples], self.batch_shape], 0)
        uniform_samples = tf.random_uniform(
            shape=shape,
            minval=np.nextafter(self.dtype.as_numpy_dtype(-1.),
                                self.dtype.as_numpy_dtype(0.)),
            maxval=1.,
            dtype=self.dtype)
        samples = loc - scale * tf.sign(uniform_samples) * \
            tf.log1p(-tf.abs(uniform_samples))
        static_n_samples = n_samples if isinstance(n_samples, int) else None
        samples.set_shape(
            tf.TensorShape([static_n_samples]).concatenate(
                self.get_batch_shape()))
        return samples
项目:Mendelssohn    作者:diggerdu    | 项目源码 | 文件源码
def _get_loss(self, name):
        return tf.reduce_mean(tf.multiply(self.target/tf.reduce_mean(self.target),
                              tf.div(tf.log1p(self.target), tf.log1p(self.output))),
                              name=name)

        '''
        return tf.reduce_mean(tf.multiply(tf.log1p(self.target/tf.reduce_mean(self.target)),
                              tf.abs(tf.subtract(self.target, self.output))),
                              name=name)

        return tf.reduce_mean(tf.multiply(self.target/tf.reduce_mean(self.target),
                              tf.abs(tf.subtract(self.target, self.output))),
                              name=name)
        return tf.reduce_mean(tf.multiply(1.0,
                              tf.abs(tf.subtract(self.target, self.output))),
                              name=name)
        '''
项目:nengo_dl    作者:nengo    | 项目源码 | 文件源码
def build_step(self, signals):
        j = signals.gather(self.J_data)
        j -= self.one

        # note: we convert all the j to be positive before this calculation
        # (even though we'll only use the values that are already positive),
        # otherwise we can end up with nans in the gradient
        rates = self.amplitude / (
            self.tau_ref + self.tau_rc * tf.log1p(tf.reciprocal(
                tf.maximum(j, self.epsilon))))

        signals.scatter(self.output_data, tf.where(j > self.zero, rates,
                                                   self.zeros))
项目:nengo_dl    作者:nengo    | 项目源码 | 文件源码
def build_step(self, signals):
        J = signals.gather(self.J_data)
        voltage = signals.gather(self.voltage_data)
        refractory = signals.gather(self.refractory_data)

        refractory -= signals.dt
        delta_t = tf.clip_by_value(signals.dt - refractory, self.zero,
                                   signals.dt)

        voltage -= (J - voltage) * tf.expm1(-delta_t / self.tau_rc)

        spiked = voltage > self.one
        spikes = tf.cast(spiked, signals.dtype) * self.amplitude
        signals.scatter(self.output_data, spikes)

        t_spike = (self.tau_ref + signals.dt +
                   self.tau_rc * tf.log1p((self.one - voltage) /
                                          (J - self.one)))
        refractory = tf.where(spiked, t_spike, refractory)

        signals.mark_gather(self.J_data)
        signals.scatter(self.refractory_data, refractory)

        voltage = tf.where(spiked, self.zeros,
                           tf.maximum(voltage, self.min_voltage))
        signals.scatter(self.voltage_data, voltage)
项目:nengo_dl    作者:nengo    | 项目源码 | 文件源码
def build_step(self, signals):
        j = signals.gather(self.J_data)

        j -= self.one

        z = tf.nn.softplus(j / self.sigma) * self.sigma
        z += self.epsilon

        rates = self.amplitude / (
            self.tau_ref + self.tau_rc * tf.log1p(tf.reciprocal(z)))

        signals.scatter(self.output_data, rates)
项目:tf-variational-dropout    作者:BayesWatch    | 项目源码 | 文件源码
def dkl_qp(log_alpha):
    k1, k2, k3 = 0.63576, 1.8732, 1.48695; C = -k1
    mdkl = k1 * tf.nn.sigmoid(k2 + k3 * log_alpha) - 0.5 * tf.log1p(tf.exp(-log_alpha)) + C
    return -tf.reduce_sum(mdkl)

# handy function to keep track of sparsity
项目:Mendelssohn    作者:diggerdu    | 项目源码 | 文件源码
def _get_loss(self, name):
        return tf.reduce_mean(tf.multiply(tf.log1p(self.output),\
                tf.abs(tf.subtract(self.target, self.output))), name = name)
项目:Mendelssohn    作者:diggerdu    | 项目源码 | 文件源码
def _get_loss(self, name):
        return tf.reduce_mean(tf.multiply(tf.log1p(self.output),\
                tf.abs(tf.subtract(self.target, self.output))), name = name)
项目:tensorforce    作者:reinforceio    | 项目源码 | 文件源码
def tf_log_probability(self, distr_params, action):
        alpha, beta, _, log_norm = distr_params
        action = (action - self.min_value) / (self.max_value - self.min_value)
        action = tf.minimum(x=action, y=(1.0 - util.epsilon))
        return (beta - 1.0) * tf.log(x=tf.maximum(x=action, y=util.epsilon)) + \
            (alpha - 1.0) * tf.log1p(x=-action) - log_norm
项目:tensorflow-wavenet    作者:ibab    | 项目源码 | 文件源码
def mu_law_encode(audio, quantization_channels):
    '''Quantizes waveform amplitudes.'''
    with tf.name_scope('encode'):
        mu = tf.to_float(quantization_channels - 1)
        # Perform mu-law companding transformation (ITU-T, 1988).
        # Minimum operation is here to deal with rare large amplitudes caused
        # by resampling.
        safe_audio_abs = tf.minimum(tf.abs(audio), 1.0)
        magnitude = tf.log1p(mu * safe_audio_abs) / tf.log1p(mu)
        signal = tf.sign(audio) * magnitude
        # Quantize signal to the specified number of levels.
        return tf.to_int32((signal + 1) / 2 * mu + 0.5)
项目:variational-dropout    作者:cjratcliff    | 项目源码 | 文件源码
def eval_reg(log_sigma2, W):
    # Approximates the negative of the KL-divergence according to eqn 14.
    # This is a key part of the loss function (see eqn 3).
    k1, k2, k3 = 0.63576, 1.8732, 1.48695
    C = -k1
    log_alpha = clip(log_sigma2 - tf.log(W**2))
    mdkl = k1 * tf.nn.sigmoid(k2 + k3 * log_alpha) - 0.5 * tf.log1p(tf.exp(-log_alpha)) + C
    return -tf.reduce_sum(mdkl)
项目:EnglishSpeechUpsampler    作者:jhetherly    | 项目源码 | 文件源码
def geo_mean(sname, true, model):
    with tf.name_scope(sname):
        waveform_loss = tf.exp(tf.reduce_mean(tf.log1p(
                                tf.abs(tf.subtract(true, model)))))
    tf.summary.scalar(sname, waveform_loss)
    return waveform_loss
项目:Mendelssohn    作者:diggerdu    | 项目源码 | 文件源码
def build(self):
        self.output = self._generator(self.input, name='gene')
        self.content_loss = tf.reduce_mean(tf.multiply(tf.log1p(self.output),\
                tf.abs(tf.subtract(self.target, self.output))))
        assert ten_sh(self.output) == ten_sh(self.target)
        self.concat_output  = tf.concat(1, (self.input, self.output))
        self.concat_target  = tf.concat(1, (self.input, self.target))
        self.fake_em = self._critic(self.concat_output, name='critic')
        self.true_em = self._critic(self.concat_target, name='critic', reuse=True)
        self.c_loss = tf.reduce_mean(self.fake_em - self.true_em, name='c_loss')
        self.g_loss = tf.reduce_mean(-self.fake_em, name='g_loss')


        ####summary####
        conntent_loss_sum = tf.summary.scalar('content_loss', self.content_loss)
        c_loss_sum = tf.summary.scalar('c_loss', self.c_loss)
        g_loss_sum = tf.summary.scalar('g_loss', self.g_loss)
        img_sum = tf.summary.image('gene_img', self.concat_output, max_outputs=1)
        img_sum = tf.summary.image('tar_img', self.concat_target, max_outputs=1)
        self.summary = tf.summary.merge_all()
        ##############

        theta_g = tf.get_collection(
                         tf.GraphKeys.TRAINABLE_VARIABLES, scope='gene')
        theta_c = tf.get_collection(
                          tf.GraphKeys.TRAINABLE_VARIABLES, scope='critic')
        counter_g = tf.Variable(trainable=False, initial_value=0, dtype=tf.int32)
        counter_c = tf.Variable(trainable=False, initial_value=0, dtype=tf.int32)
        self.c_opt = ly.optimize_loss(loss=self.c_loss, learning_rate=self.c_lr,\
                optimizer=tf.train.RMSPropOptimizer,\
                variables=theta_c,\
                global_step=counter_c)
        self.g_opt = ly.optimize_loss(loss=self.g_loss, learning_rate=self.g_lr,\
                optimizer=tf.train.RMSPropOptimizer,\
                variables=theta_g,\
                global_step=counter_g)
        self.content_opt = ly.optimize_loss(loss=self.content_loss, learning_rate=self.g_lr,\
                optimizer=tf.train.RMSPropOptimizer,\
                variables=theta_g,\
                global_step=counter_g)
        clipped_c_var = [tf.assign(var, tf.clip_by_value(var, self.clamp_lower, self.clamp_upper)) \
                for var in theta_c]
        with tf.control_dependencies([self.c_opt]):
            self.c_opt = tf.tuple(clipped_c_var)
项目:Mendelssohn    作者:diggerdu    | 项目源码 | 文件源码
def build(self):
        self.output = self._generator(self.input, name='gene')
        self.content_loss = tf.reduce_mean(tf.multiply(tf.log1p(self.output),\
                tf.abs(tf.subtract(self.target, self.output))))
        assert ten_sh(self.output) == ten_sh(self.target)
        self.eva_op = tf.concat(1, \
            (tf.exp(self.input*12.0)-1, tf.exp(self.output*8.0)-1), name='eva_op')
        self.concat_output  = tf.exp(tf.concat(1, (self.input, self.output)))
        self.concat_target  = tf.exp(tf.concat(1, (self.input, self.target)))
        self.fake_em = self._critic(self.concat_output, name='critic')
        self.true_em = self._critic(self.concat_target, name='critic', reuse=True)
        self.c_loss = tf.reduce_mean(self.fake_em - self.true_em, name='c_loss')
        self.g_loss = tf.reduce_mean(-self.fake_em, name='g_loss')


        ####summary####
        conntent_loss_sum = tf.summary.scalar('content_loss', self.content_loss)
        c_loss_sum = tf.summary.scalar('c_loss', self.c_loss)
        g_loss_sum = tf.summary.scalar('g_loss', self.g_loss)
        img_sum = tf.summary.image('gene_img', self.concat_output, max_outputs=1)
        img_sum = tf.summary.image('tar_img', self.concat_target, max_outputs=1)
        self.summary = tf.summary.merge_all()
        ##############

        theta_g = tf.get_collection(
                         tf.GraphKeys.TRAINABLE_VARIABLES, scope='gene')
        theta_c = tf.get_collection(
                          tf.GraphKeys.TRAINABLE_VARIABLES, scope='critic')
        counter_g = tf.Variable(trainable=False, initial_value=0, dtype=tf.int32)
        counter_c = tf.Variable(trainable=False, initial_value=0, dtype=tf.int32)
        self.c_opt = ly.optimize_loss(loss=self.c_loss, learning_rate=self.c_lr,\
                optimizer=tf.train.RMSPropOptimizer,\
                variables=theta_c,\
                global_step=counter_c)
        self.g_opt = ly.optimize_loss(self.g_loss, learning_rate=self.g_lr,\
                optimizer=tf.train.RMSPropOptimizer,\
                variables=theta_g,\
                global_step=counter_g)
        self.content_opt = ly.optimize_loss(self.content_loss, learning_rate=self.g_lr,\
                optimizer=tf.train.RMSPropOptimizer,\
                variables=theta_g,\
                global_step=counter_g)
        clipped_c_var = [tf.assign(var, tf.clip_by_value(var, self.clamp_lower, self.clamp_upper)) \
                for var in theta_c]
        with tf.control_dependencies([self.c_opt]):
            self.c_opt = tf.tuple(clipped_c_var)