Python tensorflow.contrib.layers 模块,fully_connected() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.contrib.layers.fully_connected()

项目:Semi_Supervised_GAN    作者:ChunyuanLI    | 项目源码 | 文件源码
def generator(input_latent):
    # input_latent = Input(batch_shape=noise_dim, dtype=im_dtype)
    with tf.variable_scope('Net_Gen') as scope:
        weight_decay = 0.0001
        xx = layers.fully_connected(input_latent, num_outputs=4*4*512, activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)
        xx = tf.reshape(xx, (batch_size, 4,4,512))
        xx = layers.conv2d_transpose(xx, 256, kernel_size=(5,5), stride=(2, 2), padding='SAME', activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)
        xx = layers.conv2d_transpose(xx, 128, kernel_size=(5,5), stride=(2, 2), padding='SAME', activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)    
        xx = layers.conv2d_transpose(xx, 3, kernel_size=(5,5), stride=(2, 2),  padding='SAME', activation_fn=None)
        xx = layers.batch_norm(xx)
        gen_dat = tf.nn.tanh(xx)

    return gen_dat     

# specify discriminative model
项目:Semi_Supervised_GAN    作者:ChunyuanLI    | 项目源码 | 文件源码
def generator(input_latent):
    # input_latent = Input(batch_shape=noise_dim, dtype=im_dtype)
    with tf.variable_scope('Net_Gen') as scope:
        xx = layers.fully_connected(input_latent, num_outputs=4*4*512, activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)
        xx = tf.reshape(xx, (batch_size, 4,4,512))
        xx = layers.conv2d_transpose(xx, 256, kernel_size=(5,5), stride=(2, 2), padding='SAME', activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)
        xx = layers.conv2d_transpose(xx, 128, kernel_size=(5,5), stride=(2, 2), padding='SAME', activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)    
        xx = layers.conv2d_transpose(xx, 3, kernel_size=(5,5), stride=(2, 2),  padding='SAME', activation_fn=None)
        xx = layers.batch_norm(xx)
        gen_dat = tf.nn.tanh(xx)

    return gen_dat
项目:Semi_Supervised_GAN    作者:ChunyuanLI    | 项目源码 | 文件源码
def inference(input_img):
    # input_latent = Input(batch_shape=noise_dim, dtype=im_dtype)
    with tf.variable_scope('Net_Inf') as scope:
        xx = layers.convolution2d(input_img, 128, kernel_size=(5,5), stride=(2, 2), padding='SAME', activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)
        xx = layers.convolution2d(xx, 256, kernel_size=(5,5), stride=(2, 2), padding='SAME', activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)    
        xx = layers.convolution2d(xx, 512, kernel_size=(5,5), stride=(2, 2),  padding='SAME', activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)  
        xx = layers.flatten(xx)
        xx = layers.fully_connected(xx, num_outputs=latent_size, activation_fn=None)
        xx = layers.batch_norm(xx)
        inf_latent = tf.nn.tanh(xx)
    return inf_latent

# specify discriminative model
项目:Semi_Supervised_GAN    作者:ChunyuanLI    | 项目源码 | 文件源码
def generator(input_latent):
    # input_latent = Input(batch_shape=noise_dim, dtype=im_dtype)
    with tf.variable_scope('Net_Gen') as scope:
        xx = layers.fully_connected(input_latent, num_outputs=500, activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)
        xx = layers.fully_connected(xx, num_outputs=500, activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)
        xx = layers.fully_connected(xx, num_outputs=28**2, activation_fn=None)
        xx = layers.batch_norm(xx)
        gen_dat = tf.nn.sigmoid(xx)

    return gen_dat  

# specify inference model
项目:Semi_Supervised_GAN    作者:ChunyuanLI    | 项目源码 | 文件源码
def generator(input_latent):
    # input_latent = Input(batch_shape=noise_dim, dtype=im_dtype)
    with tf.variable_scope('Net_Gen') as scope:
        xx = layers.fully_connected(input_latent, num_outputs=500, activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)
        xx = layers.fully_connected(xx, num_outputs=500, activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)
        xx = layers.fully_connected(xx, num_outputs=28**2, activation_fn=None)
        xx = layers.batch_norm(xx)
        gen_dat = tf.nn.sigmoid(xx)

    return gen_dat

# specify discriminative model
项目:decorrelated-adversarial-autoencoder    作者:patrickgadd    | 项目源码 | 文件源码
def semi_supervised_encoder_convolutional(input_tensor, z_dim, y_dim, batch_size, network_scale=1.0, img_res=28, img_channels=1):
    f_multiplier = network_scale

    net = tf.reshape(input_tensor, [-1, img_res, img_res, img_channels])

    net = layers.conv2d(net, int(16*f_multiplier), 3, stride=2)
    net = layers.conv2d(net, int(16*f_multiplier), 3, stride=1)
    net = layers.conv2d(net, int(32*f_multiplier), 3, stride=2)
    net = layers.conv2d(net, int(32*f_multiplier), 3, stride=1)
    net = layers.conv2d(net, int(64*f_multiplier), 3, stride=2)
    net = layers.conv2d(net, int(64*f_multiplier), 3, stride=1)
    net = layers.conv2d(net, int(128*f_multiplier), 3, stride=2)

    net = tf.reshape(net, [batch_size, -1])
    net = layers.fully_connected(net, 1000)

    y = layers.fully_connected(net, y_dim, activation_fn=None, normalizer_fn=None)

    z = layers.fully_connected(net, z_dim, activation_fn=None)

    return y, z
项目:combine-DT-with-NN-in-RL    作者:Burning-Bear    | 项目源码 | 文件源码
def model(img_in, num_actions, scope, reuse=False):
    """As described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf"""
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        with tf.variable_scope("convnet"):
            # original architecture
            out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
        out = layers.flatten(out)

        with tf.variable_scope("action_value"):
            out = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
            out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)

        return out
项目:combine-DT-with-NN-in-RL    作者:Burning-Bear    | 项目源码 | 文件源码
def dueling_model(img_in, num_actions, scope, reuse=False):
    """As described in https://arxiv.org/abs/1511.06581"""
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        with tf.variable_scope("convnet"):
            # original architecture
            out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
        out = layers.flatten(out)

        with tf.variable_scope("state_value"):
            state_hidden = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
            state_score = layers.fully_connected(state_hidden, num_outputs=1, activation_fn=None)
        with tf.variable_scope("action_value"):
            actions_hidden = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
            action_scores = layers.fully_connected(actions_hidden, num_outputs=num_actions, activation_fn=None)
            action_scores_mean = tf.reduce_mean(action_scores, 1)
            action_scores = action_scores - tf.expand_dims(action_scores_mean, 1)

        return state_score + action_scores
项目:combine-DT-with-NN-in-RL    作者:Burning-Bear    | 项目源码 | 文件源码
def dueling_model(img_in, num_actions, scope, reuse=False):
    """As described in https://arxiv.org/abs/1511.06581"""
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        with tf.variable_scope("convnet"):
            # original architecture
            out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
        out = layers.flatten(out)

        with tf.variable_scope("state_value"):
            state_hidden = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
            state_score = layers.fully_connected(state_hidden, num_outputs=1, activation_fn=None)
        with tf.variable_scope("action_value"):
            actions_hidden = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
            action_scores = layers.fully_connected(actions_hidden, num_outputs=num_actions, activation_fn=None)
            action_scores_mean = tf.reduce_mean(action_scores, 1)
            action_scores = action_scores - tf.expand_dims(action_scores_mean, 1)

        return state_score + action_scores
项目:DRLModule    作者:halleanwoo    | 项目源码 | 文件源码
def _action_norm_dist(inpt, num_actions, w_init, activation_fn_v, activation_fn_a):
    mu = layers.fully_connected(inpt, num_outputs=num_actions, weights_initializer=w_init, activation_fn=activation_fn_v)
    sigma = layers.fully_connected(inpt, num_outputs=num_actions, weights_initializer=w_init, activation_fn=activation_fn_a)
    return mu, sigma



# # cnn network frame
# def cnn_frame_continu(hiddens, kerners, strides, inpt, num_actions, scope=None, activation_fn=tf.nn.relu, activation_fn_mu=tf.nn.relu, activation_fn_sigma=tf.nn.relu, reuse=None):
#     with tf.variable_scope(scope, reuse=reuse):
#         out = inpt
#         for kerner, stride in kerners, strides:
#             out = tf.nn.conv2d(input=out, filter=kerner, stride=stride)
#         out = layers.flatten(out)
#         with tf.name_scope("out"):
#             mu = layers.fully_connected(out, num_outputs=num_actions, weights_initializer=tf.truncated_normal_initializer(0 , 0.3), activation_fn=None)
#             sigma = layers.fully_connected(out, num_outputs=num_actions, weights_initializer=tf.truncated_normal_initializer(0 , 0.3), activation_fn=tf.nn.softplus)
#         return mu, sigma
项目:rl-attack-detection    作者:yenchenlin    | 项目源码 | 文件源码
def model(img_in, num_actions, scope, reuse=False, concat_softmax=False):
    """As described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf"""
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        with tf.variable_scope("convnet"):
            # original architecture
            out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
        out = layers.flatten(out)

        with tf.variable_scope("action_value"):
            out = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
            out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)
            if concat_softmax:
                out = tf.nn.softmax(out)

        return out
项目:rl-attack-detection    作者:yenchenlin    | 项目源码 | 文件源码
def dueling_model(img_in, num_actions, scope, reuse=False):
    """As described in https://arxiv.org/abs/1511.06581"""
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        with tf.variable_scope("convnet"):
            # original architecture
            out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
        out = layers.flatten(out)

        with tf.variable_scope("state_value"):
            state_hidden = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
            state_score = layers.fully_connected(state_hidden, num_outputs=1, activation_fn=None)
        with tf.variable_scope("action_value"):
            actions_hidden = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
            action_scores = layers.fully_connected(actions_hidden, num_outputs=num_actions, activation_fn=None)
            action_scores_mean = tf.reduce_mean(action_scores, 1)
            action_scores = action_scores - tf.expand_dims(action_scores_mean, 1)

        return state_score + action_scores
项目:baselines    作者:openai    | 项目源码 | 文件源码
def model(img_in, num_actions, scope, reuse=False, layer_norm=False):
    """As described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf"""
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        with tf.variable_scope("convnet"):
            # original architecture
            out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
        conv_out = layers.flatten(out)

        with tf.variable_scope("action_value"):
            value_out = layers.fully_connected(conv_out, num_outputs=512, activation_fn=None)
            if layer_norm:
                value_out = layer_norm_fn(value_out, relu=True)
            else:
                value_out = tf.nn.relu(value_out)
            value_out = layers.fully_connected(value_out, num_outputs=num_actions, activation_fn=None)
        return value_out
项目:rl_algorithms    作者:DanielTakeshi    | 项目源码 | 文件源码
def _build_net(self, input_BO, scope):
        """ The Actor network.

        Uses ReLUs for all hidden layers, but a tanh to the output to bound the
        action. This follows their 'low-dimensional networks' using 400 and 300
        units for the hidden layers. Set `reuse=False`. I don't use batch
        normalization or their precise weight initialization.
        """
        with tf.variable_scope(scope, reuse=False):
            hidden1 = layers.fully_connected(input_BO,
                    num_outputs=400,
                    weights_initializer=layers.xavier_initializer(),
                    activation_fn=tf.nn.relu)
            hidden2 = layers.fully_connected(hidden1, 
                    num_outputs=300,
                    weights_initializer=layers.xavier_initializer(),
                    activation_fn=tf.nn.relu)
            actions_BA = layers.fully_connected(hidden2,
                    num_outputs=self.ac_dim,
                    weights_initializer=layers.xavier_initializer(),
                    activation_fn=tf.nn.tanh) # Note the tanh!
            # This should broadcast, but haven't tested with ac_dim > 1.
            actions_BA = tf.multiply(actions_BA, self.ac_high)
            return actions_BA
项目:rl_algorithms    作者:DanielTakeshi    | 项目源码 | 文件源码
def _make_network(self, data_in, out_dim):
        """ Build the network with the same architecture following OpenAI's paper.

        Returns the final *layer* of the network, which corresponds to our
        chosen action.  There is no non-linearity for the last layer because
        different envs have different action ranges.
        """
        with tf.variable_scope("ESAgent", reuse=False):
            out = data_in
            out = layers.fully_connected(out, num_outputs=64,
                    weights_initializer = layers.xavier_initializer(uniform=True),
                    #weights_initializer = utils.normc_initializer(0.5),
                    activation_fn = tf.nn.tanh)
            out = layers.fully_connected(out, num_outputs=64,
                    weights_initializer = layers.xavier_initializer(uniform=True),
                    #weights_initializer = utils.normc_initializer(0.5),
                    activation_fn = tf.nn.tanh)
            out = layers.fully_connected(out, num_outputs=out_dim,
                    weights_initializer = layers.xavier_initializer(uniform=True),
                    #weights_initializer = utils.normc_initializer(0.5),
                    activation_fn = None)
            return out
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def var_dropout(observed, x, n, net_size, n_particles, is_training):
    with zs.BayesianNet(observed=observed) as model:
        h = x
        normalizer_params = {'is_training': is_training,
                             'updates_collections': None}
        for i, [n_in, n_out] in enumerate(zip(net_size[:-1], net_size[1:])):
            eps_mean = tf.ones([n, n_in])
            eps = zs.Normal(
                'layer' + str(i) + '/eps', eps_mean, std=1.,
                n_samples=n_particles, group_ndims=1)
            h = layers.fully_connected(
                h * eps, n_out, normalizer_fn=layers.batch_norm,
                normalizer_params=normalizer_params)
            if i < len(net_size) - 2:
                h = tf.nn.relu(h)
        y = zs.OnehotCategorical('y', h)
    return model, h
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def q_net(x, n_xl, n_z, n_particles, is_training):
    with zs.BayesianNet() as variational:
        normalizer_params = {'is_training': is_training,
                             'updates_collections': None}
        lz_x = tf.reshape(tf.to_float(x), [-1, n_xl, n_xl, 1])
        lz_x = layers.conv2d(
            lz_x, 32, kernel_size=5, stride=2,
            normalizer_fn=layers.batch_norm,
            normalizer_params=normalizer_params)
        lz_x = layers.conv2d(
            lz_x, 64, kernel_size=5, stride=2,
            normalizer_fn=layers.batch_norm,
            normalizer_params=normalizer_params)
        lz_x = layers.conv2d(
            lz_x, 128, kernel_size=5, padding='VALID',
            normalizer_fn=layers.batch_norm,
            normalizer_params=normalizer_params)
        lz_x = layers.dropout(lz_x, keep_prob=0.9, is_training=is_training)
        lz_x = tf.reshape(lz_x, [-1, 128 * 3 * 3])
        lz_mean = layers.fully_connected(lz_x, n_z, activation_fn=None)
        lz_logstd = layers.fully_connected(lz_x, n_z, activation_fn=None)
        z = zs.Normal('z', lz_mean, logstd=lz_logstd, n_samples=n_particles,
                      group_ndims=1)
    return variational
项目:kaggle_redefining_cancer_treatment    作者:jorgemf    | 项目源码 | 文件源码
def doc2vec_prediction_model(input_vectors, input_gene, input_variation, output_label, batch_size,
                             is_training, embedding_size, output_classes):
    # inputs/outputs
    input_vectors = tf.reshape(input_vectors, [batch_size, embedding_size])
    input_gene = tf.reshape(input_gene, [batch_size, embedding_size])
    input_variation = tf.reshape(input_variation, [batch_size, embedding_size])
    targets = None
    if output_label is not None:
        output_label = tf.reshape(output_label, [batch_size, 1])
        targets = tf.one_hot(output_label, axis=-1, depth=output_classes, on_value=1.0,
                             off_value=0.0)
        targets = tf.squeeze(targets, axis=1)

    net = tf.concat([input_vectors, input_gene, input_variation], axis=1)
    net = layers.fully_connected(net, embedding_size * 2, activation_fn=tf.nn.relu)
    net = layers.dropout(net, keep_prob=0.85, is_training=is_training)
    net = layers.fully_connected(net, embedding_size, activation_fn=tf.nn.relu)
    net = layers.dropout(net, keep_prob=0.85, is_training=is_training)
    net = layers.fully_connected(net, embedding_size // 4, activation_fn=tf.nn.relu)
    logits = layers.fully_connected(net, output_classes, activation_fn=None)

    return logits, targets
项目:kaggle_redefining_cancer_treatment    作者:jorgemf    | 项目源码 | 文件源码
def _attention(self, inputs, output_size, gene, variation, activation_fn=tf.tanh):
        inputs_shape = inputs.get_shape()
        if len(inputs_shape) != 3 and len(inputs_shape) != 4:
            raise ValueError('Shape of input must have 3 or 4 dimensions')
        input_projection = layers.fully_connected(inputs, output_size,
                                                  activation_fn=activation_fn)
        doc_context = tf.concat([gene, variation], axis=1)
        doc_context_vector = layers.fully_connected(doc_context, output_size,
                                                    activation_fn=activation_fn)
        doc_context_vector = tf.expand_dims(doc_context_vector, 1)
        if len(inputs_shape) == 4:
            doc_context_vector = tf.expand_dims(doc_context_vector, 1)

        vector_attn = input_projection * doc_context_vector
        vector_attn = tf.reduce_sum(vector_attn, axis=-1, keep_dims=True)
        attention_weights = tf.nn.softmax(vector_attn, dim=1)
        weighted_projection = input_projection * attention_weights
        outputs = tf.reduce_sum(weighted_projection, axis=-2)

        return outputs
项目:gait-recognition    作者:marian-margeta    | 项目源码 | 文件源码
def get_arg_scope(is_training):
        weight_decay_l2 = 0.1
        batch_norm_decay = 0.999
        batch_norm_epsilon = 0.0001

        with slim.arg_scope([slim.conv2d, slim.fully_connected, layers.separable_convolution2d],
                            weights_regularizer = slim.l2_regularizer(weight_decay_l2),
                            biases_regularizer = slim.l2_regularizer(weight_decay_l2),
                            weights_initializer = layers.variance_scaling_initializer(),
                            ):
            batch_norm_params = {
                'decay': batch_norm_decay,
                'epsilon': batch_norm_epsilon
            }
            with slim.arg_scope([slim.batch_norm, slim.dropout],
                                is_training = is_training):
                with slim.arg_scope([slim.batch_norm],
                                    **batch_norm_params):
                    with slim.arg_scope([slim.conv2d, layers.separable_convolution2d, layers.fully_connected],
                                        activation_fn = tf.nn.elu,
                                        normalizer_fn = slim.batch_norm,
                                        normalizer_params = batch_norm_params) as scope:
                        return scope
项目:chi    作者:rmst    | 项目源码 | 文件源码
def test_dqn(env='Chain-v0'):
    import gym_mix
    env = gym.make(env)

    def pp(x):
        x = layers.fully_connected(x, 32)
        x = layers.fully_connected(x, 32)
        return x

    def head(x):
        x = layers.fully_connected(x, 32)
        x = layers.fully_connected(x, env.action_space.n, activation_fn=None,
                                                             weights_initializer=tf.random_normal_initializer(0, 1e-4))
        return x

    agent = BootstrappedDQNAg(env, pp, head, replay_start=64)

    for ep in range(100000):
        R, _ = agent.play_episode()

        if ep % 100 == 0:
            print(f'Return after episode {ep} is {R}')
项目:chi    作者:rmst    | 项目源码 | 文件源码
def deep_q_network():
    """ Architecture according to:
    http://www.nature.com/nature/journal/v518/n7540/full/nature14236.html
    """
    @tt.model(tracker=tf.train.ExponentialMovingAverage(1 - .0005),    # TODO: replace with original weight freeze
                         optimizer=tf.train.RMSPropOptimizer(.00025, .95, .95, .01))
    def q_network(x):
        x /= 255
        x = layers.conv2d(x, 32, 8, 4)
        x = layers.conv2d(x, 64, 4, 2)
        x = layers.conv2d(x, 64, 3, 1)
        x = layers.flatten(x)
        x = layers.fully_connected(x, 512)
        x = layers.fully_connected(x, env.action_space.n, activation_fn=None)
        x = tf.identity(x, name='Q')
        return x

    return q_network
项目:chi    作者:rmst    | 项目源码 | 文件源码
def dqn_test(env='OneRoundDeterministicReward-v0'):
    env = gym.make(env)
    env = ObservationShapeWrapper(env)

    @tt.model(tracker=tf.train.ExponentialMovingAverage(1-.01),
                         optimizer=tf.train.AdamOptimizer(.01))
    def q_network(x):
        x = layers.fully_connected(x, 32)
        x = layers.fully_connected(x, env.action_space.n, activation_fn=None,
                                                             weights_initializer=tf.random_normal_initializer(0, 1e-4))
        return x

    agent = DqnAgent(env, q_network, double_dqn=False, replay_start=100, annealing_time=100)

    rs = []
    for ep in range(10000):
        r, _ = agent.play_episode()

        rs.append(r)

        if ep % 100 == 0:
            print(f'Return after episode {ep} is {sum(rs)/len(rs)}')
            rs = []
项目:RL_FlappyBird    作者:iGuaZi    | 项目源码 | 文件源码
def model(img_in, num_actions, scope, reuse=False):
    """As described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf"""
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        with tf.variable_scope("convnet"):
            # original architecture
            out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
        out = layers.flatten(out)

        with tf.variable_scope("action_value"):
            out = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
            out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)

        return out
项目:RL_FlappyBird    作者:iGuaZi    | 项目源码 | 文件源码
def dueling_model(img_in, num_actions, scope, reuse=False):
    """As described in https://arxiv.org/abs/1511.06581"""
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        with tf.variable_scope("convnet"):
            # original architecture
            out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
        out = layers.flatten(out)

        with tf.variable_scope("state_value"):
            state_hidden = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
            state_score = layers.fully_connected(state_hidden, num_outputs=1, activation_fn=None)
        with tf.variable_scope("action_value"):
            actions_hidden = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
            action_scores = layers.fully_connected(actions_hidden, num_outputs=num_actions, activation_fn=None)
            action_scores_mean = tf.reduce_mean(action_scores, 1)
            action_scores = action_scores - tf.expand_dims(action_scores_mean, 1)

        return state_score + action_scores
项目:RL_FlappyBird    作者:iGuaZi    | 项目源码 | 文件源码
def Actor(img_in, num_actions, scope, reuse=False):
    """As described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf"""
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        with tf.variable_scope("convnet"):
            # original architecture
            out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
        out = layers.flatten(out)

        with tf.variable_scope("action_value"):
            out = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
            out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)

        with tf.variable_scope("action_prob"):
            out = tf.nn.softmax(out)
        return out
项目:RL_FlappyBird    作者:iGuaZi    | 项目源码 | 文件源码
def _build_net(self):
        with tf.variable_scope("conv"):
            out = layers.convolution2d(self.s, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
        out = layers.flatten(out)

        with tf.variable_scope("actor"):
            a_prob = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
            a_prob = layers.fully_connected(a_prob, num_outputs=N_A, activation_fn=tf.nn.softmax)

        with tf.variable_scope("critic"):
            v = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
            v = layers.fully_connected(v, num_outputs=1, activation_fn=tf.nn.softmax)

        return a_prob, v
项目:tf_practice    作者:juho-lee    | 项目源码 | 文件源码
def to_loc(input, is_simple=False):
    if len(input.get_shape()) == 4:
        input = layers.flatten(input)
    num_inputs = input.get_shape()[1]
    num_outputs = 3 if is_simple else 6
    W_init = tf.constant_initializer(
            np.zeros((num_inputs, num_outputs)))
    if is_simple:
        b_init = tf.constant_initializer(np.array([1.,0.,0.]))
    else:
        b_init = tf.constant_initializer(np.array([1.,0.,0.,0.,1.,0.]))

    return layers.fully_connected(input, num_outputs,
            activation_fn=None,
            weights_initializer=W_init,
            biases_initializer=b_init)
项目:tf_practice    作者:juho-lee    | 项目源码 | 文件源码
def to_loc(input, is_simple=False):
    if len(input.get_shape()) == 4:
        input = layers.flatten(input)
    num_inputs = input.get_shape()[1]
    num_outputs = 3 if is_simple else 6
    W_init = tf.constant_initializer(
            np.zeros((num_inputs, num_outputs)))
    if is_simple:
        b_init = tf.constant_initializer(np.array([1.,0.,0.]))
    else:
        b_init = tf.constant_initializer(np.array([1.,0.,0.,0.,1.,0.]))

    return layers.fully_connected(input, num_outputs,
            activation_fn=None,
            weights_initializer=W_init,
            biases_initializer=b_init)
项目:NAF-tensorflow    作者:carpedm20    | 项目源码 | 文件源码
def fc(layer, output_size, is_training, 
       weight_init, weight_reg=None, activation_fn=None, 
       use_batch_norm=False, scope='fc'):
  if use_batch_norm:
    batch_norm_args = {
      'normalizer_fn': batch_norm,
      'normalizer_params': {
        'is_training': is_training,
      }
    }
  else:
    batch_norm_args = {}

  with tf.variable_scope(scope):
    return fully_connected(
      layer,
      num_outputs=output_size,
      activation_fn=activation_fn,
      weights_initializer=weight_init,
      weights_regularizer=weight_reg,
      biases_initializer=tf.constant_initializer(0.0),
      scope=scope,
      **batch_norm_args
    )
项目:reinforceflow    作者:dbobrenko    | 项目源码 | 文件源码
def __init__(self, input_space, output_space, trainable=True):
        if isinstance(input_space, Tuple) or isinstance(output_space, Tuple):
            raise ValueError('For tuple action and observation spaces '
                             'consider implementing custom network architecture.')
        self._input_ph = tf.placeholder('float32', shape=[None] + list(input_space.shape),
                                        name='inputs')
        net, end_points = make_dqn_body(self.input_ph, trainable)
        end_points['fc1'] = layers.fully_connected(net, num_outputs=256, activation_fn=tf.nn.relu,
                                                   scope='fc1', trainable=trainable)
        gaussian = tf.random_normal_initializer
        v = layers.fully_connected(end_points['fc1'], num_outputs=1,
                                   activation_fn=None,
                                   weights_initializer=gaussian(0.0, 0.1),
                                   biases_initializer=gaussian(0.05, 0.1),
                                   scope='out_value',
                                   trainable=trainable)
        end_points['out_value'] = tf.squeeze(v)
        header_endpoints = make_a3c_header(net, input_space, output_space, trainable)
        end_points.update(header_endpoints)
        self.end_points = end_points
        self.output_policy = self.output
项目:reinforceflow    作者:dbobrenko    | 项目源码 | 文件源码
def __init__(self, input_space, output_space, layer_sizes=(512, 512, 512), trainable=True):
        if isinstance(input_space, Tuple) or isinstance(output_space, Tuple):
            raise ValueError('For tuple action and observation spaces '
                             'consider implementing custom network architecture.')
        self._input_ph = tf.placeholder('float32', shape=[None] + list(input_space.shape),
                                        name='inputs')
        end_points = {}
        net = layers.flatten(self._input_ph)
        for i, units in enumerate(layer_sizes):
            name = 'fc%d' % i
            net = layers.fully_connected(net, num_outputs=units, activation_fn=tf.nn.relu,
                                         trainable=trainable, scope=name)
            end_points[name] = net
        gaussian = tf.random_normal_initializer
        v = layers.fully_connected(net, num_outputs=1,
                                   activation_fn=None,
                                   weights_initializer=gaussian(0.0, 0.1),
                                   biases_initializer=gaussian(0.05, 0.1),
                                   scope='out_value',
                                   trainable=trainable)
        end_points['out_value'] = tf.squeeze(v)
        header_endpoints = make_a3c_header(net, input_space, output_space, trainable)
        end_points.update(header_endpoints)
        self.end_points = end_points
        self.output_policy = self.output
项目:reinforceflow    作者:dbobrenko    | 项目源码 | 文件源码
def __init__(self, input_space, output_space, layer_sizes=(512, 512, 512),
                 output_activation=None, trainable=True):
        if isinstance(input_space, Tuple) or isinstance(output_space, Tuple):
            raise ValueError('For tuple action and observation spaces '
                             'consider implementing custom network architecture.')
        end_points = {}
        self._input_ph = tf.placeholder('float32', shape=[None] + list(input_space.shape),
                                        name='inputs')
        net = self._input_ph
        for i, units in enumerate(layer_sizes):
            name = 'fc%d' % i
            net = layers.fully_connected(net, num_outputs=units, activation_fn=tf.nn.relu,
                                         trainable=trainable, scope=name)
            end_points[name] = net
        end_points['out'] = layers.fully_connected(net, num_outputs=output_space.shape[0],
                                                   activation_fn=output_activation,
                                                   trainable=trainable, scope='out')
        self.end_points = end_points
项目:reinforceflow    作者:dbobrenko    | 项目源码 | 文件源码
def __init__(self, input_space, output_space, layer_sizes=(512, 512), dueling_type='mean',
                 advantage_layers=(256,), value_layers=(256,), trainable=True):
        if isinstance(input_space, Tuple) or isinstance(output_space, Tuple):
            raise ValueError('For tuple action and observation spaces '
                             'consider implementing custom network architecture.')
        self._input_ph = tf.placeholder('float32', shape=[None] + list(input_space.shape),
                                        name='inputs')

        end_points = {}
        net = layers.flatten(self.input_ph)
        for i, units in enumerate(layer_sizes):
            name = 'fc%d' % i
            net = layers.fully_connected(net, num_outputs=units, activation_fn=tf.nn.relu,
                                         trainable=trainable, scope=name)
            end_points[name] = net
        net, dueling_endpoints = make_dueling_header(input_layer=net,
                                                     output_size=output_space.shape[0],
                                                     dueling_type=dueling_type,
                                                     advantage_layers=advantage_layers,
                                                     value_layers=value_layers,
                                                     trainable=trainable)
        end_points.update(dueling_endpoints)
        self._output = net
        self.end_points = end_points
项目:reinforceflow    作者:dbobrenko    | 项目源码 | 文件源码
def __init__(self, input_space, output_space, trainable=True, use_nature=True):
        if isinstance(input_space, Tuple) or isinstance(output_space, Tuple):
            raise ValueError('For tuple action and observation spaces '
                             'consider implementing custom network architecture.')
        self._input_ph = tf.placeholder('float32', shape=[None] + list(input_space.shape),
                                        name='inputs')
        if use_nature:
            net, end_points = make_dqn_body_nature(self.input_ph, trainable)
        else:
            net, end_points = make_dqn_body(self.input_ph, trainable)
        net = layers.fully_connected(net, num_outputs=512, activation_fn=tf.nn.relu,
                                     scope='fc1', trainable=trainable)
        end_points['fc1'] = net
        end_points['out'] = layers.fully_connected(net,
                                                   num_outputs=output_space.shape[0],
                                                   activation_fn=None, scope='out',
                                                   trainable=trainable)
        self.end_points = end_points
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __init__(self,
               params,
               device_assigner=None,
               optimizer_class=adagrad.AdagradOptimizer,
               **kwargs):

    super(HardDecisionsToDataThenNN, self).__init__(
        params,
        device_assigner=device_assigner,
        optimizer_class=optimizer_class,
        **kwargs)

    self.layers = [decisions_to_data.HardDecisionsToDataLayer(
        params, 0, device_assigner),
                   fully_connected.FullyConnectedLayer(
                       params, 1, device_assigner=device_assigner)]
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def inference_graph(self, data):
    with ops.device(self.device_assigner.get_device(self.layer_num)):
      # Compute activations for the neural network.
      nn_activations = [layers.fully_connected(data, self.params.layer_size)]

      for _ in range(1, self.params.num_layers):
        # pylint: disable=W0106
        nn_activations.append(
            layers.fully_connected(
                nn_activations[-1],
                self.params.layer_size))

      nn_activations_tensor = array_ops.concat(
          1, nn_activations, name="flattened_nn_activations")

      return nn_activations_tensor
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __init__(self,
               params,
               device_assigner=None,
               optimizer_class=adagrad.AdagradOptimizer,
               **kwargs):

    super(HardDecisionsToDataThenNN, self).__init__(
        params,
        device_assigner=device_assigner,
        optimizer_class=optimizer_class,
        **kwargs)

    self.layers = [decisions_to_data.HardDecisionsToDataLayer(
        params, 0, device_assigner),
                   fully_connected.FullyConnectedLayer(
                       params, 1, device_assigner=device_assigner)]
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def inference_graph(self, data):
    with ops.device(self.device_assigner.get_device(self.layer_num)):
      # Compute activations for the neural network.
      nn_activations = [layers.fully_connected(data, self.params.layer_size)]

      for _ in range(1, self.params.num_layers):
        # pylint: disable=W0106
        nn_activations.append(
            layers.fully_connected(
                nn_activations[-1],
                self.params.layer_size))

      nn_activations_tensor = array_ops.concat(
          1, nn_activations, name="flattened_nn_activations")

      return nn_activations_tensor
项目:ste-GAN-ography2    作者:bin2415    | 项目源码 | 文件源码
def discriminator_stego_nn(self, img, batch_size, name):
        eve_input = self.image_processing_layer(img)
        eve_conv1 = convolution2d(eve_input, 64, kernel_size = [5, 5], stride = [2,2],
        activation_fn= tf.nn.relu, normalizer_fn = BatchNorm, scope = 'eve/' + name + '/conv1')

        eve_conv2 = convolution2d(eve_conv1, 64 * 2, kernel_size = [5, 5], stride = [2,2],
        activation_fn= tf.nn.relu, normalizer_fn = BatchNorm, scope = 'eve/' + name + '/conv2')

        eve_conv3 = convolution2d(eve_conv2, 64 * 4,kernel_size = [5, 5], stride = [2,2],
        activation_fn= tf.nn.relu, normalizer_fn = BatchNorm, scope = 'eve/' + name + '/conv3')

        eve_conv4 = convolution2d(eve_conv3, 64* 8, kernel_size = [5, 5], stride = [2,2],
        activation_fn= tf.nn.relu, normalizer_fn = BatchNorm, scope = 'eve/' + name + '/conv4')

        eve_conv4 = tf.reshape(eve_conv4, [batch_size, -1])

        #eve_fc = fully_connected(eve_conv4, 1, activation_fn = tf.nn.sigmoid, normalizer_fn = BatchNorm,
        #weights_initializer=tf.random_normal_initializer(stddev=1.0))
        eve_fc = fully_connected(eve_conv4, 1, normalizer_fn = BatchNorm, 
        weights_initializer=tf.random_normal_initializer(stddev=1.0), scope = 'eve' + name + '/final_fc')
        return eve_fc
项目:tensorflow-infogan    作者:JonathanRaiman    | 项目源码 | 文件源码
def discriminator_forward(img,
                          network_description,
                          is_training,
                          reuse=None,
                          name="discriminator",
                          use_batch_norm=True,
                          debug=False):
    with tf.variable_scope(name, reuse=reuse):
        out = run_network(img,
                          network_description,
                          is_training=is_training,
                          use_batch_norm=use_batch_norm,
                          debug=debug)
        out = layers.flatten(out)
        prob = layers.fully_connected(
            out,
            num_outputs=1,
            activation_fn=tf.nn.sigmoid,
            scope="prob_projection"
        )

    return {"prob":prob, "hidden":out}
项目:Mendelssohn    作者:diggerdu    | 项目源码 | 文件源码
def _critic(self, input_, name='critic', reuse=False):
        with tf.variable_scope(name) as scope:
            if reuse:
                scope.reuse_variables()
            co_0 = tf.nn.relu(conv2d(input_, o_dim=64, \
                k_size=[3,1], st=[2,1], name='co_0'))
            co_1 = tf.nn.relu(conv2d(co_0, o_dim=128, \
                k_size=[3,2], st=[2,1], name='co_1'))
            co_2 = tf.nn.relu(conv2d(co_1, o_dim=256, \
                k_size=[3,2], st=[2,2], name='co_2'))
            co_3 = tf.nn.relu(conv2d(co_2, o_dim=512, \
                k_size=[3,2], st=[3,3], name='co_3'))
            co_4 = tf.nn.relu(conv2d(co_3, o_dim=1024, \
                k_size=[2,2], st=[3,3], name='co_4'))
            co_5 = tf.nn.relu(conv2d(co_4, o_dim=self.o_size[-2]*self.o_size[-1],\
                k_size=[3,1], st=[4,2], name='co_5'))
            em_distance = ly.fully_connected(tf.reshape(co_5, [-1, np.prod(ten_sh(co_5)[1:])]), \
                    1, activation_fn=None, scope='em_dis')
        return em_distance
项目:Mendelssohn    作者:diggerdu    | 项目源码 | 文件源码
def _critic(self, input_, name='critic', reuse=False):
        with tf.variable_scope(name) as scope:
            if reuse:
                scope.reuse_variables()
            co_0 = tf.nn.relu(conv2d(input_, o_dim=64, \
                k_size=[3,1], st=[2,1], name='co_0'))
            co_1 = tf.nn.relu(conv2d(co_0, o_dim=128, \
                k_size=[3,2], st=[2,1], name='co_1'))
            co_2 = tf.nn.relu(conv2d(co_1, o_dim=256, \
                k_size=[3,2], st=[2,2], name='co_2'))
            co_3 = tf.nn.relu(conv2d(co_2, o_dim=512, \
                k_size=[3,2], st=[3,3], name='co_3'))
            co_4 = tf.nn.relu(conv2d(co_3, o_dim=1024, \
                k_size=[2,2], st=[3,3], name='co_4'))
            co_5 = tf.nn.relu(conv2d(co_4, o_dim=self.o_size[-2]*self.o_size[-1],\
                k_size=[3,1], st=[4,2], name='co_5'))
            em_distance = ly.fully_connected(tf.reshape(co_5, [-1, np.prod(ten_sh(co_5)[1:])]), \
                    1, activation_fn=None, scope='em_dis')
        return em_distance
项目:document-qa    作者:allenai    | 项目源码 | 文件源码
def apply(self, is_train, context_embed, answer, context_mask=None):
        init_fn = get_keras_initialization(self.init)
        with tf.variable_scope("bounds_encoding"):
            m1, m2 = self.predictor.apply(is_train, context_embed, context_mask)

        with tf.variable_scope("start_pred"):
            logits1 = fully_connected(m1, 1, activation_fn=None,
                                      weights_initializer=init_fn)
            logits1 = tf.squeeze(logits1, squeeze_dims=[2])

        with tf.variable_scope("end_pred"):
            logits2 = fully_connected(m2, 1, activation_fn=None, weights_initializer=init_fn)
            logits2 = tf.squeeze(logits2, squeeze_dims=[2])

        with tf.variable_scope("predict_span"):
            return self.span_predictor.predict(answer, logits1, logits2, context_mask)
项目:document-qa    作者:allenai    | 项目源码 | 文件源码
def apply(self, is_train, x, memories, answer: List[Tensor], x_mask=None, memory_mask=None):
        with tf.variable_scope("map_context"):
            memories = self.context_mapper.apply(is_train, memories, memory_mask)
        with tf.variable_scope("encode_context"):
            encoded = self.context_encoder.apply(is_train, memories, memory_mask)
        with tf.variable_scope("merge"):
            x = self.merge.apply(is_train, x, encoded, x_mask)
        with tf.variable_scope("predict"):
            m1, m2 = self.bounds_predictor.apply(is_train, x, x_mask)

        init = get_keras_initialization(self.init)
        with tf.variable_scope("logits1"):
            l1 = fully_connected(m1, 1, activation_fn=None, weights_initializer=init)
            l1 = tf.squeeze(l1, squeeze_dims=[2])
        with tf.variable_scope("logits2"):
            l2 = fully_connected(m2, 1, activation_fn=None, weights_initializer=init)
            l2 = tf.squeeze(l2, squeeze_dims=[2])

        with tf.variable_scope("predict_span"):
            return self.span_predictor.predict(answer, l1, l2, x_mask)
项目:GAN_Theories    作者:YadiraF    | 项目源码 | 文件源码
def __call__(self, x, reuse=False):
        with tf.variable_scope(self.name) as scope:
            if reuse:
                scope.reuse_variables()
            size = 64
            d = tcl.conv2d(x, num_outputs=size, kernel_size=3, # bzx64x64x3 -> bzx32x32x64
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.conv2d(d, num_outputs=size * 2, kernel_size=3, # 16x16x128
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.conv2d(d, num_outputs=size * 4, kernel_size=3, # 8x8x256
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.conv2d(d, num_outputs=size * 8, kernel_size=3, # 4x4x512
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))

            d = tcl.fully_connected(tcl.flatten(d), 256, activation_fn=lrelu, weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.fully_connected(d, 1, activation_fn=None, weights_initializer=tf.random_normal_initializer(0, 0.02))

            return d
项目:GAN_Theories    作者:YadiraF    | 项目源码 | 文件源码
def __call__(self, x, reuse=False):
        with tf.variable_scope(self.name) as scope:
            if reuse:
                scope.reuse_variables()
            size = 64
            d = tcl.conv2d(x, num_outputs=size, kernel_size=3, # bzx64x64x3 -> bzx32x32x64
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.conv2d(d, num_outputs=size * 2, kernel_size=3, # 16x16x128
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.conv2d(d, num_outputs=size * 4, kernel_size=3, # 8x8x256
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.conv2d(d, num_outputs=size * 8, kernel_size=3, # 4x4x512
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))

            d = tcl.fully_connected(tcl.flatten(d), 256, activation_fn=lrelu, weights_initializer=tf.random_normal_initializer(0, 0.02))
            mu = tcl.fully_connected(d, 100, activation_fn=None, weights_initializer=tf.random_normal_initializer(0, 0.02))
            sigma = tcl.fully_connected(d, 100, activation_fn=None, weights_initializer=tf.random_normal_initializer(0, 0.02))

            return mu, sigma
项目:NoisyNet-DQN    作者:andrewliao11    | 项目源码 | 文件源码
def model(img_in, num_actions, scope, noisy=False, reuse=False):
    """As described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf"""
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        with tf.variable_scope("convnet"):
            # original architecture
            out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
        out = layers.flatten(out)

        with tf.variable_scope("action_value"):
            if noisy:
                # Apply noisy network on fully connected layers
                # ref: https://arxiv.org/abs/1706.10295
                out = noisy_dense(out, name='noisy_fc1', size=512, activation_fn=tf.nn.relu)
                out = noisy_dense(out, name='noisy_fc2', size=num_actions)
            else:
                out = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
                out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)

        return out
项目:googlenet    作者:da-steve101    | 项目源码 | 文件源码
def aux_logit_layer( inputs, num_classes, is_training ):
    with tf.variable_scope("pool2d"):
        pooled = layers.avg_pool2d(inputs, [ 5, 5 ], stride = 3 )
    with tf.variable_scope("conv11"):
        conv11 = layers.conv2d( pooled, 128, [1, 1] )
    with tf.variable_scope("flatten"):
        flat = tf.reshape( conv11, [-1, 2048] )
    with tf.variable_scope("fc"):
        fc = layers.fully_connected( flat, 1024, activation_fn=None )
    with tf.variable_scope("drop"):
        drop = layers.dropout( fc, 0.3, is_training = is_training )
    with tf.variable_scope( "linear" ):
        linear = layers.fully_connected( drop, num_classes, activation_fn=None )
    with tf.variable_scope("soft"):
        soft = tf.nn.softmax( linear )
    return soft
项目:deep_rl_vizdoom    作者:mihahauke    | 项目源码 | 文件源码
def create_architecture(self, **specs):
        self.vars.sequence_length = tf.placeholder(tf.int64, [1], name="sequence_length")

        fc_input = self.get_input_layers()

        fc1 = fully_connected(fc_input, num_outputs=self.fc_units_num,
                              scope=self._name_scope + "/fc1")

        fc1_reshaped = tf.reshape(fc1, [1, -1, self.fc_units_num])
        self.recurrent_cells = self.ru_class(self._recurrent_units_num)
        state_c = tf.placeholder(tf.float32, [1, self.recurrent_cells.state_size.c], name="initial_lstm_state_c")
        state_h = tf.placeholder(tf.float32, [1, self.recurrent_cells.state_size.h], name="initial_lstm_state_h")
        self.vars.initial_network_state = LSTMStateTuple(state_c, state_h)
        rnn_outputs, self.ops.network_state = tf.nn.dynamic_rnn(self.recurrent_cells,
                                                                fc1_reshaped,
                                                                initial_state=self.vars.initial_network_state,
                                                                sequence_length=self.vars.sequence_length,
                                                                time_major=False,
                                                                scope=self._name_scope)
        reshaped_rnn_outputs = tf.reshape(rnn_outputs, [-1, self._recurrent_units_num])

        self.reset_state()
        self.ops.pi, self.ops.v = self.policy_value_layer(reshaped_rnn_outputs)