Python keras.backend 模块,random_normal() 实例源码

我们从Python开源项目中,提取了以下49个代码示例,用于说明如何使用keras.backend.random_normal()

项目:keras-molecules    作者:maxhodak    | 项目源码 | 文件源码
def _buildEncoder(self, x, latent_rep_size, max_length, epsilon_std = 0.01):
        h = Convolution1D(9, 9, activation = 'relu', name='conv_1')(x)
        h = Convolution1D(9, 9, activation = 'relu', name='conv_2')(h)
        h = Convolution1D(10, 11, activation = 'relu', name='conv_3')(h)
        h = Flatten(name='flatten_1')(h)
        h = Dense(435, activation = 'relu', name='dense_1')(h)

        def sampling(args):
            z_mean_, z_log_var_ = args
            batch_size = K.shape(z_mean_)[0]
            epsilon = K.random_normal(shape=(batch_size, latent_rep_size), mean=0., std = epsilon_std)
            return z_mean_ + K.exp(z_log_var_ / 2) * epsilon

        z_mean = Dense(latent_rep_size, name='z_mean', activation = 'linear')(h)
        z_log_var = Dense(latent_rep_size, name='z_log_var', activation = 'linear')(h)

        def vae_loss(x, x_decoded_mean):
            x = K.flatten(x)
            x_decoded_mean = K.flatten(x_decoded_mean)
            xent_loss = max_length * objectives.binary_crossentropy(x, x_decoded_mean)
            kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis = -1)
            return xent_loss + kl_loss

        return (vae_loss, Lambda(sampling, output_shape=(latent_rep_size,), name='lambda')([z_mean, z_log_var]))
项目:adversarial-variational-bayes    作者:gdikov    | 项目源码 | 文件源码
def sample_standard_normal_noise(inputs, **kwargs):
    from keras.backend import shape, random_normal
    n_samples = kwargs.get('n_samples', shape(inputs)[0])
    n_basis_noise_vectors = kwargs.get('n_basis', -1)
    data_dim = kwargs.get('data_dim', 1)
    noise_dim = kwargs.get('noise_dim', data_dim)
    seed = kwargs.get('seed', 7)

    if n_basis_noise_vectors > 0:
        samples_isotropic = random_normal(shape=(n_samples, n_basis_noise_vectors, noise_dim),
                                          mean=0, stddev=1, seed=seed)
    else:
        samples_isotropic = random_normal(shape=(n_samples, noise_dim),
                                          mean=0, stddev=1, seed=seed)
    op_mode = kwargs.get('mode', 'none')
    if op_mode == 'concatenate':
        concat = Concatenate(axis=1, name='enc_noise_concatenation')([inputs, samples_isotropic])
        return concat
    elif op_mode == 'add':
        resized_noise = Dense(data_dim, activation=None, name='enc_resized_noise_sampler')(samples_isotropic)
        added_noise_data = Add(name='enc_adding_noise_data')([inputs, resized_noise])
        return added_noise_data
    return samples_isotropic
项目:adversarial-variational-bayes    作者:gdikov    | 项目源码 | 文件源码
def sample_adaptive_normal_noise(inputs, **kwargs):
    from keras.backend import shape, random_normal, sqrt

    seed = kwargs.get('seed', 7)
    latent_dim = kwargs.get('latent_dim', 2)

    if isinstance(inputs, list):
        mu, sigma2 = inputs
        n_samples = kwargs.get('n_samples', shape(mu)[0])
        samples_isotropic = random_normal(shape=(n_samples, latent_dim),
                                          mean=0, stddev=1, seed=seed)
        samples = mu + sqrt(sigma2) * samples_isotropic
        return samples
    else:
        samples_isotropic = random_normal(shape=(shape(inputs)[0], latent_dim),
                                          mean=0, stddev=1, seed=seed)
        return samples_isotropic
项目:VASC    作者:wang-research    | 项目源码 | 文件源码
def sampling(args):
    epsilon_std = 1.0

    if len(args) == 2:
        z_mean, z_log_var = args
        epsilon = K.random_normal(shape=K.shape(z_mean), 
                              mean=0.,
                              stddev=epsilon_std)
    #
        return z_mean + K.exp( z_log_var / 2 ) * epsilon
    else:
        z_mean = args[0]
        epsilon = K.random_normal(shape=K.shape(z_mean), 
                              mean=0.,
                              stddev=epsilon_std)
        return z_mean + K.exp( 1.0 / 2 ) * epsilon
项目:VAE_NOTES    作者:FanhuaandLuomu    | 项目源码 | 文件源码
def sampling(args):
    z_mean, z_log_var = args
    epsilon = K.random_normal(shape=(batch_size, latent_dim), mean=0.,
                              std=epsilon_std)
    return z_mean + K.exp(z_log_var / 2) * epsilon

# note that "output_shape" isn't necessary with the TensorFlow backend
项目:VAE_NOTES    作者:FanhuaandLuomu    | 项目源码 | 文件源码
def sampling(args):
    z_mean, z_log_var = args
    # N(0,1) ??????
    epsilon = K.random_normal(shape=(batch_size, latent_dim),
                              mean=0., std=epsilon_std)
    return z_mean + K.exp(z_log_var) * epsilon

# note that "output_shape" isn't necessary with the TensorFlow backend
# so you could write `Lambda(sampling)([z_mean, z_log_var])`
项目:KATE    作者:hugochan    | 项目源码 | 文件源码
def sampling(self, args):
        z_mean, z_log_var = args
        epsilon = K.random_normal(shape=(K.shape(z_mean)[0], self.dim[1]), mean=0.,\
                                  stddev=self.epsilon_std)

        return z_mean + K.exp(z_log_var / 2) * epsilon
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def sampling(args):
    z_mean, z_log_var = args
    epsilon = K.random_normal(shape=(batch_size, latent_dim),
                              mean=0., std=epsilon_std)
    return z_mean + K.exp(z_log_var) * epsilon

# note that "output_shape" isn't necessary with the TensorFlow backend
# so you could write `Lambda(sampling)([z_mean, z_log_var])`
项目:pCVR    作者:xjtushilei    | 项目源码 | 文件源码
def sampling(args):
    z_mean, z_log_var = args
    epsilon = K.random_normal(shape=(batch_size, latent_dim), mean=0.,
                              stddev=epsilon_std)
    return z_mean + K.exp(z_log_var / 2) * epsilon

# note that "output_shape" isn't necessary with the TensorFlow backend
项目:pCVR    作者:xjtushilei    | 项目源码 | 文件源码
def sampling(args):
    z_mean, z_log_var = args
    epsilon = K.random_normal(shape=(batch_size, latent_dim),
                              mean=0., stddev=epsilon_std)
    return z_mean + K.exp(z_log_var) * epsilon

# note that "output_shape" isn't necessary with the TensorFlow backend
# so you could write `Lambda(sampling)([z_mean, z_log_var])`
项目:SONGSHTR    作者:songeater    | 项目源码 | 文件源码
def build_model(epsilon_std, batchsize, seqlen, blocksize):
    dropout_val = 0.1
    lstm_dims = 128
    latent_dims = lstm_dims

    lstm_input = Input(batch_shape=((batchsize, seqlen, blocksize)))

    lstm_model = TimeDistributed(Dense(lstm_dims))(lstm_input)
    lstm_model = LSTM(lstm_dims, stateful=True, return_sequences=False)(lstm_model)
    lstm_model = Dropout(dropout_val)(lstm_model)
    #lstm_model = LSTM(lstm_dims, stateful=True, return_sequences=True)(lstm_model)
    #lstm_model = Dropout(dropout_val)(lstm_model)
    #lstm_model = LSTM(lstm_dims, stateful=True, return_sequences=False)(lstm_model)
    #lstm_model = Dropout(dropout_val)(lstm_model)

    z_mean = Dense(latent_dims, activation = 'relu')(lstm_model)
    z_log_var = Dense(latent_dims, activation = 'sigmoid')(lstm_model)

    def sampling(args):
        z_mean, z_log_var = args
        epsilon = K.random_normal(shape=(batchsize, latent_dims), mean=0.,std=epsilon_std)
        return z_mean + K.exp(z_log_var / 2) * epsilon

    vae = Lambda(sampling)([z_mean, z_log_var])
    vae = Dense(lstm_dims, activation = 'relu')(vae)
    vae = Dense(blocksize, activation = 'sigmoid')(vae)

    model = Model(input=lstm_input, output=vae)
    optimizer = RMSprop(lr=0.002, decay = 0.0005, clipvalue=5)

    def vae_loss(x, x_decoded_mean):
        mean_loss = objectives.mean_squared_logarithmic_error(x, x_decoded_mean)
        kl_loss = - 0.25 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
        return mean_loss + kl_loss

    model.compile(loss=vae_loss, optimizer=optimizer)

    return model
项目:kfs    作者:the-moliver    | 项目源码 | 文件源码
def call(self, inputs, training=None):
        def noised():
            stddev = K.stop_gradient(K.sqrt(K.clip(self.factor * K.abs(inputs),
                                                   self.epsilon, None)))
            return inputs + K.random_normal(shape=K.shape(inputs),
                                            mean=0.0,
                                            stddev=stddev)
        return K.in_train_phase(noised, inputs, training=training)
项目:pydl    作者:rafaeltg    | 项目源码 | 文件源码
def _sampling(args):
        z_mean, z_log_var = args
        epsilon = K.random_normal(shape=K.shape(z_log_var), mean=0., stddev=1.)
        return z_mean + K.exp(z_log_var / 2) * epsilon
项目:Generative-models    作者:aalitaiga    | 项目源码 | 文件源码
def sampling(args):
    mean, std = args
    eps = K.random_normal(
        shape=(batch_size,latent_dim),
        mean=0.0,
        std=epsilon
    )
    return mean + std * eps
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def sampling(args):
    z_mean, z_log_var = args
    epsilon = K.random_normal(shape=(batch_size, latent_dim), mean=0.,
                              std=epsilon_std)
    return z_mean + K.exp(z_log_var / 2) * epsilon

# note that "output_shape" isn't necessary with the TensorFlow backend
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def sampling(args):
    z_mean, z_log_var = args
    epsilon = K.random_normal(shape=(batch_size, latent_dim),
                              mean=0., std=epsilon_std)
    return z_mean + K.exp(z_log_var) * epsilon

# note that "output_shape" isn't necessary with the TensorFlow backend
# so you could write `Lambda(sampling)([z_mean, z_log_var])`
项目:kaos    作者:RuiShu    | 项目源码 | 文件源码
def sampling(z_par):
    z_mu, z_var = z_par
    epsilon = K.random_normal(shape=z_mu.shape)
    return z_mu + K.sqrt(z_var) * epsilon
项目:kaos    作者:RuiShu    | 项目源码 | 文件源码
def sampling(z_par):
    batch_size = z_par.shape[0]
    dim = z_par.shape[1]//2
    z_mu, z_lv = z_par[:, :dim], z_par[:, dim:]
    epsilon = K.random_normal(shape=(batch_size, dim))
    return z_mu + K.exp(z_lv/2) * epsilon
项目:keras-autoencoder    作者:Rentier    | 项目源码 | 文件源码
def sampling(args):
        z_mean, z_log_std = args
        epsilon = K.random_normal(shape=(batch_size, latent_dim),
                                  mean=0., std=epsilon_std)
        return z_mean + K.exp(z_log_std) * epsilon

    # note that "output_shape" isn't necessary with the TensorFlow backend
    # so you could write `Lambda(sampling)([z_mean, z_log_std])`
项目:keras-mxnet-benchmarks    作者:sandeep-krishnamurthy    | 项目源码 | 文件源码
def sampling(args):
    z_mean, z_log_var = args
    epsilon = K.random_normal(shape=(batch_size, latent_dim),
                              mean=0., std=epsilon_std)
    return z_mean + K.exp(z_log_var) * epsilon

# note that "output_shape" isn't necessary with the TensorFlow backend
# so you could write `Lambda(sampling)([z_mean, z_log_var])`
项目:keras-mxnet-benchmarks    作者:sandeep-krishnamurthy    | 项目源码 | 文件源码
def sampling(args):
    z_mean, z_log_var = args
    epsilon = K.random_normal(shape=(batch_size/int(os.environ['GPU_NUM']), latent_dim), mean=0.,
                              std=epsilon_std)
    return z_mean + K.exp(z_log_var / 2) * epsilon

# note that "output_shape" isn't necessary with the TensorFlow backend
项目:KerasCog    作者:ABAtanasov    | 项目源码 | 文件源码
def build(self, input_shape):
        self.input_spec = [InputSpec(shape=input_shape)]
        if self.stateful:
            self.reset_states()
        else:
            self.states = [K.random_normal(shape=(self.output_dim,), mean=0.5, std=0.5)]
        input_dim = input_shape[2]
        self.input_dim = input_dim
        self.W = self.init((input_dim, self.output_dim), name='{}_W'.format(self.name))
        self.U = self.inner_init((self.output_dim, self.output_dim), name='{}_U'.format(self.name))
        self.b = K.zeros((self.output_dim,), name='{}_b'.format(self.name))
        self.regularizers = []
        if self.W_regularizer:
            self.W_regularizer.set_param(self.W)
            self.regularizers.append(self.W_regularizer)
        if self.U_regularizer:
            self.U_regularizer.set_param(self.U)
            self.regularizers.append(self.U_regularizer)
        if self.b_regularizer:
            self.b_regularizer.set_param(self.b)
            self.regularizers.append(self.b_regularizer)
        self.trainable_weights = [self.W, self.U]
        if self.dale_ratio:
            self.non_trainable_weights = [self.Dale]
        if self.initial_weights is not None:
            self.set_weights(self.initial_weights)
            del self.initial_weights
项目:KerasCog    作者:ABAtanasov    | 项目源码 | 文件源码
def step(self, x, states):
        prev_output = states[0]
        tau = self.tau
        dt = self.dt
        noise = self.noise
        alpha = dt/tau

        if self.consume_less == 'cpu':
            h = x
        else:
            if(self.dale_ratio):
                h = K.dot(x, K.abs(self.W)) # + self.b
            else:
                h = K.dot(x, self.W)

        # For our case, h = W * x is the input component fed in
        #noise = self.noise * np.random.randn(self.output_dim)
        #noise = K.variable(noise)

        if(self.dale_ratio):
            output = prev_output*(1-alpha) + \
                 alpha*(h + K.dot(self.activation(prev_output) , K.abs(self.U) * self.Dale)) \
                 + K.random_normal(shape=K.shape(self.b), mean=0.0, std=noise)

        else:
            output = prev_output * (1 - alpha) + \
                     alpha * (h + K.dot(self.activation(prev_output), self.U )) \
                     + K.random_normal(shape=K.shape(self.b), mean=0.0, std=noise)

        return (output, [output])
项目:KerasCog    作者:ABAtanasov    | 项目源码 | 文件源码
def call(self, x, mask=None):
        noise_x = x + K.random_normal(shape=K.shape(x),
                                      mean=0.,
                                      std=self.sigma)
        return noise_x
项目:academic    作者:xinchrome    | 项目源码 | 文件源码
def call(self, inputs, training=None):
        return inputs + K.random_normal(shape=K.shape(inputs),
                                            mean=0.,
                                            stddev=self.stddev)
项目:deep-motion-analysis    作者:Brimborough    | 项目源码 | 文件源码
def sampling(args):
    z_mean, z_log_std = args
    epsilon = K.random_normal(shape=(batch_size, 60, 128),
                              mean=0., std=epsilon_std)
    return z_mean + K.exp(z_log_std) * epsilon
项目:neural-segmentation    作者:melsner    | 项目源码 | 文件源码
def sampling(args):
                word_mean, word_log_var = args
                epsilon = K.random_normal(shape=K.shape(word_mean), mean=0., stddev=1.0)
                return word_mean + K.exp(word_log_var/2) * epsilon
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def sampling(args):
    z_mean, z_log_var = args
    epsilon = K.random_normal(shape=(batch_size, latent_dim), mean=0.,
                              std=epsilon_std)
    return z_mean + K.exp(z_log_var / 2) * epsilon

# note that "output_shape" isn't necessary with the TensorFlow backend
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def sampling(args):
    z_mean, z_log_var = args
    epsilon = K.random_normal(shape=(batch_size, latent_dim),
                              mean=0., std=epsilon_std)
    return z_mean + K.exp(z_log_var) * epsilon

# note that "output_shape" isn't necessary with the TensorFlow backend
# so you could write `Lambda(sampling)([z_mean, z_log_var])`
项目:VariationalAutoEncoder    作者:despoisj    | 项目源码 | 文件源码
def call(self, x, mask=None):
        z_mean, z_log_var = x
        dynamicBatchSize = K.shape(z_mean)[0]
        epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim), mean=0., std=1.)
        return z_mean + K.exp(z_log_var / 2) * epsilon
项目:2017_iv_deep_radar    作者:tawheeler    | 项目源码 | 文件源码
def sampling(sampling_args):
    z_mean, z_logvar = sampling_args
    epsilon = K.random_normal(shape=(args.batch_size, args.dim_z),
                              mean=0.0, std=args.epsilon_std)
    return z_mean + K.exp(z_logvar) * epsilon
项目:2017_iv_deep_radar    作者:tawheeler    | 项目源码 | 文件源码
def sampling(sampling_args):
    z_mean, z_logvar = sampling_args
    epsilon = K.random_normal(shape=(args.batch_size, args.dim_z),
                              mean=0.0, std=args.epsilon_std)
    return z_mean + K.exp(z_logvar) * epsilon
项目:DeepIV    作者:jhartford    | 项目源码 | 文件源码
def random_normal(shape, mean=0.0, std=1.0):
    return K.random_normal(shape, mean, std)
项目:DeepIV    作者:jhartford    | 项目源码 | 文件源码
def random_gmm(pi, mu, sig):
    '''
    Sample from a gaussian mixture model. Returns one sample for each row in
    the pi, mu and sig matrices... this is potentially wasteful (because you have to repeat
    the matrices n times if you want to get n samples), but makes it easy to implment
    code where the parameters vary as they are conditioned on different datapoints.
    '''
    normals = random_normal(K.shape(mu), mu, sig)
    k = random_multinomial(pi)
    return K.sum(normals * k, axis=1, keepdims=True)
项目:actinf    作者:x75    | 项目源码 | 文件源码
def sampling(args):
    z_mean, z_log_var = args
    epsilon = K.random_normal(shape=(batch_size, latent_dim), mean=0.)
    return z_mean + K.exp(z_log_var / 2) * epsilon

# note that "output_shape" isn't necessary with the TensorFlow backend
项目:actinf    作者:x75    | 项目源码 | 文件源码
def sampling(args):
    z_mean, z_log_var = args
    epsilon = K.random_normal(shape=(batch_size, latent_dim), mean=0.)
    return z_mean + K.exp(z_log_var / 2) * epsilon

# note that "output_shape" isn't necessary with the TensorFlow backend
项目:dem    作者:hengyuan-hu    | 项目源码 | 文件源码
def sampling(args):
    z_mean, z_log_var = args
    epsilon = K.random_normal(shape=(batch_size, latent_dim), mean=0.,
                              std=epsilon_std)
    return z_mean + K.exp(z_log_var / 2) * epsilon

# note that "output_shape" isn't necessary with the TensorFlow backend
项目:vae_example    作者:DingKe    | 项目源码 | 文件源码
def sampling(args):
    z_mean, z_log_var = args
    epsilon = K.random_normal(shape=(batch_size, m), mean=0.,
                              std=epsilon_std)
    return z_mean + K.exp(z_log_var / 2) * epsilon
项目:DLPlaying    作者:Honlan    | 项目源码 | 文件源码
def sampling(args):
    z_mean, z_log_var = args
    epsilon = K.random_normal(shape=(batch_size, latent_dim), mean=0.)
    return z_mean + K.exp(z_log_var / 2) * epsilon

# note that "output_shape" isn't necessary with the TensorFlow backend
项目:keras_bn_library    作者:bnsnapper    | 项目源码 | 文件源码
def nrlu(x):
   std = K.mean(K.sigmoid(x))
   eta = K.random_normal(shape=x.shape, std=std)
   y = K.maximum(x + eta, 0)
   return y
项目:keras_bn_library    作者:bnsnapper    | 项目源码 | 文件源码
def sampling(args):
    z_mean, z_log_var = args
    epsilon = K.random_normal(shape=(batch_size, latent_dim), mean=0.,
                                     std=epsilon_std)
    return z_mean + K.exp(z_log_var / 2) * epsilon
项目:nonlinearIB    作者:artemyk    | 项目源码 | 文件源码
def get_noise(self, x):

        return K.exp(0.5*self.logvar) * K.random_normal(shape=K.shape(x), mean=0., stddev=1)
项目:nonlinearIB    作者:artemyk    | 项目源码 | 文件源码
def get_noise(self, sigmas):
        return sigmas * K.random_normal(shape=K.shape(sigmas), mean=0., stddev=1)
项目:value_gradient    作者:rarilurelo    | 项目源码 | 文件源码
def build(self):
        model = self.net.model
        mu_model = self.net.mu_model
        log_std_model = self.net.log_std_model
        q_model = self.net.q_model
        target_model = self.net.target_model
        target_mu_model = self.net.target_mu_model
        target_log_std_model = self.net.target_log_std_model
        target_q_model = self.net.target_q_model

        self.states = tf.placeholder(tf.float32, shape=(None, self.in_dim), name='states')
        self.actions = tf.placeholder(tf.float32, shape=[None, self.action_dim], name='actions')
        self.rewards = tf.placeholder(tf.float32, shape=[None], name='rewards')
        self.next_states = tf.placeholder(tf.float32, shape=[None, self.in_dim], name='next_states')
        self.ys = tf.placeholder(tf.float32, shape=[None])

        # There are other implementations about how can we take aciton.
        # Taking next action version or using only mu version or searching action which maximize Q.
        target_mu = target_mu_model(self.states)
        target_log_std = target_log_std_model(self.states)
        target_action = target_mu + K.random_normal(K.shape(target_mu), dtype=tf.float32) * K.exp(target_log_std)
        self.target_q = K.sum(target_q_model(Concatenate()([target_model(self.states), target_action])), axis=-1)

        self.q = K.sum(q_model(Concatenate()([model(self.states), self.actions])), axis=-1)
        self.q_loss = K.mean(K.square(self.ys-self.q))

        self.mu = mu_model(self.states)
        self.log_std = log_std_model(self.states)
        self.eta = (self.actions - self.mu) / K.exp(self.log_std)
        inferred_action = self.mu + K.stop_gradient(self.eta) * K.exp(self.log_std)
        self.pi_loss = - K.mean(q_model(Concatenate()([model(self.states), inferred_action])))

        self.q_updater = self.q_optimizer.minimize(self.q_loss, var_list=self.net.var_q)
        self.pi_updater = self.pi_opimizer.minimize(self.pi_loss, var_list=self.net.var_pi)

        self.soft_updater = [K.update(t_p, t_p*(1-self.tau)+p*self.tau) for p, t_p in zip(self.net.var_all, self.net.var_target_all)]
        self.sync = [K.update(t_p, p) for p, t_p in zip(self.net.var_all, self.net.var_target_all)]

        self.sess.run(tf.global_variables_initializer())
        self.built = True
项目:kapre    作者:keunwoochoi    | 项目源码 | 文件源码
def call(self, x):
        if self.random_gain:
            noise_x = x + K.random_normal(shape=K.shape(x),
                                          mean=0.,
                                          stddev=np.random.uniform(0.0, self.power))
        else:
            noise_x = x + K.random_normal(shape=K.shape(x),
                                          mean=0.,
                                          stddev=self.power)

        return K.in_train_phase(noise_x, x)
项目:Siamese    作者:ascourge21    | 项目源码 | 文件源码
def sampling(args):
    z_mean, z_log_sigma = args
    epsilon = K.random_normal(shape=(batch_size, latent_dim),
                              mean=0., std=epsilon_std)
    return z_mean + K.exp(z_log_sigma) * epsilon

# note that "output_shape" isn't necessary with the TensorFlow backend
# so you could write `Lambda(sampling)([z_mean, z_log_sigma])`
项目:Siamese    作者:ascourge21    | 项目源码 | 文件源码
def sampling(args):
    z_mean, z_log_sigma = args
    epsilon = K.random_normal(shape=(batch_size, latent_dim[0], latent_dim[1], latent_dim[2], latent_dim[3]),
                              mean=0., std=epsilon_std)
    return z_mean + K.exp(z_log_sigma) * epsilon

# note that "output_shape" isn't necessary with the TensorFlow backend
# so you could write `Lambda(sampling)([z_mean, z_log_sigma])`
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def sampling(args):
    z_mean, z_log_var = args
    epsilon = K.random_normal(shape=(batch_size, latent_dim), mean=0.,
                              std=epsilon_std)
    return z_mean + K.exp(z_log_var / 2) * epsilon

# note that "output_shape" isn't necessary with the TensorFlow backend
项目:nli_generation    作者:jstarc    | 项目源码 | 文件源码
def autoe_train(hidden_size, noise_dim, glove, hypo_len, version):

    prem_input = Input(shape=(None,), dtype='int32', name='prem_input')
    hypo_input = Input(shape=(hypo_len + 1,), dtype='int32', name='hypo_input')
    train_input = Input(shape=(None,), dtype='int32', name='train_input')
    class_input = Input(shape=(3,), name='class_input')

    prem_embeddings = make_fixed_embeddings(glove, None)(prem_input)
    hypo_embeddings = make_fixed_embeddings(glove, hypo_len + 1)(hypo_input)
    premise_encoder = LSTM(output_dim=hidden_size, return_sequences=True,
                            inner_activation='sigmoid', name='premise_encoder')(prem_embeddings)

    hypo_encoder = LSTM(output_dim=hidden_size, return_sequences=True,
                            inner_activation='sigmoid', name='hypo_encoder')(hypo_embeddings)
    class_encoder = Dense(hidden_size, activation='tanh')(class_input)

    encoder = LstmAttentionLayer(output_dim=hidden_size, return_sequences=False,
                  feed_state = True, name='encoder') ([hypo_encoder, premise_encoder, class_encoder])
    if version == 6:
        reduction = Dense(noise_dim, name='reduction', activation='tanh')(encoder)
    elif version == 7:
        z_mean = Dense(noise_dim, name='z_mean')(encoder)
        z_log_sigma = Dense(noise_dim, name='z_log_sigma')(encoder)

        def sampling(args):
            z_mean, z_log_sigma = args
            epsilon = K.random_normal(shape=(64, noise_dim,),
                              mean=0., std=0.01)
            return z_mean + K.exp(z_log_sigma) * epsilon
        reduction = Lambda(sampling, output_shape=lambda sh: (sh[0][0], noise_dim,), name = 'reduction')([z_mean, z_log_sigma])
        def vae_loss(args):
            z_mean, z_log_sigma = args
            return - 0.5 * K.mean(1 + z_log_sigma - K.square(z_mean) - K.exp(z_log_sigma), axis=-1)    
        vae = Lambda(vae_loss, output_shape=lambda sh: (sh[0][0], 1,), name = 'vae_output')([z_mean, z_log_sigma])

    merged = merge([class_input, reduction], mode='concat')
    creative = Dense(hidden_size, name = 'expansion', activation ='tanh')(merged)
    premise_decoder = LSTM(output_dim=hidden_size, return_sequences=True,
                            inner_activation='sigmoid', name='premise')(prem_embeddings)

    hypo_decoder = LSTM(output_dim=hidden_size, return_sequences=True,
                            inner_activation='sigmoid', name='hypo')(hypo_embeddings)
    attention = LstmAttentionLayer(output_dim=hidden_size, return_sequences=True,
                     feed_state = True, name='attention') ([hypo_decoder, premise_decoder, creative])

    hs = HierarchicalSoftmax(len(glove), trainable = True, name='hs')([attention, train_input])

    inputs = [prem_input, hypo_input, train_input, class_input]

    model_name = 'version' + str(version)
    model = Model(input=inputs, output=(hs if version == 6 else [hs, vae]), name = model_name)
    if version == 6:
        model.compile(loss=hs_categorical_crossentropy, optimizer='adam')
    elif version == 7:
        def minimize(y_true, y_pred):
            return y_pred
        def metric(y_true, y_pred):
            return K.mean(y_pred)
        model.compile(loss=[hs_categorical_crossentropy, minimize], metrics={'hs':word_loss, 'vae_output': metric}, optimizer='adam')
    return model