Python tensorflow.contrib.layers 模块,flatten() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.contrib.layers.flatten()

项目:Semi_Supervised_GAN    作者:ChunyuanLI    | 项目源码 | 文件源码
def inference(input_img):
    # input_latent = Input(batch_shape=noise_dim, dtype=im_dtype)
    with tf.variable_scope('Net_Inf') as scope:
        xx = layers.convolution2d(input_img, 128, kernel_size=(5,5), stride=(2, 2), padding='SAME', activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)
        xx = layers.convolution2d(xx, 256, kernel_size=(5,5), stride=(2, 2), padding='SAME', activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)    
        xx = layers.convolution2d(xx, 512, kernel_size=(5,5), stride=(2, 2),  padding='SAME', activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)  
        xx = layers.flatten(xx)
        xx = layers.fully_connected(xx, num_outputs=latent_size, activation_fn=None)
        xx = layers.batch_norm(xx)
        inf_latent = tf.nn.tanh(xx)
    return inf_latent

# specify discriminative model
项目:combine-DT-with-NN-in-RL    作者:Burning-Bear    | 项目源码 | 文件源码
def model(img_in, num_actions, scope, reuse=False):
    """As described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf"""
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        with tf.variable_scope("convnet"):
            # original architecture
            out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
        out = layers.flatten(out)

        with tf.variable_scope("action_value"):
            out = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
            out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)

        return out
项目:combine-DT-with-NN-in-RL    作者:Burning-Bear    | 项目源码 | 文件源码
def dueling_model(img_in, num_actions, scope, reuse=False):
    """As described in https://arxiv.org/abs/1511.06581"""
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        with tf.variable_scope("convnet"):
            # original architecture
            out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
        out = layers.flatten(out)

        with tf.variable_scope("state_value"):
            state_hidden = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
            state_score = layers.fully_connected(state_hidden, num_outputs=1, activation_fn=None)
        with tf.variable_scope("action_value"):
            actions_hidden = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
            action_scores = layers.fully_connected(actions_hidden, num_outputs=num_actions, activation_fn=None)
            action_scores_mean = tf.reduce_mean(action_scores, 1)
            action_scores = action_scores - tf.expand_dims(action_scores_mean, 1)

        return state_score + action_scores
项目:combine-DT-with-NN-in-RL    作者:Burning-Bear    | 项目源码 | 文件源码
def model(img_in, num_actions, scope, reuse=False):
    """As described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf"""
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        with tf.variable_scope("convnet"):
            # original architecture
            out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
        out = layers.flatten(out)

        with tf.variable_scope("action_value"):
            out = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
            out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)

        return out
项目:combine-DT-with-NN-in-RL    作者:Burning-Bear    | 项目源码 | 文件源码
def dueling_model(img_in, num_actions, scope, reuse=False):
    """As described in https://arxiv.org/abs/1511.06581"""
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        with tf.variable_scope("convnet"):
            # original architecture
            out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
        out = layers.flatten(out)

        with tf.variable_scope("state_value"):
            state_hidden = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
            state_score = layers.fully_connected(state_hidden, num_outputs=1, activation_fn=None)
        with tf.variable_scope("action_value"):
            actions_hidden = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
            action_scores = layers.fully_connected(actions_hidden, num_outputs=num_actions, activation_fn=None)
            action_scores_mean = tf.reduce_mean(action_scores, 1)
            action_scores = action_scores - tf.expand_dims(action_scores_mean, 1)

        return state_score + action_scores
项目:DRLModule    作者:halleanwoo    | 项目源码 | 文件源码
def _action_norm_dist(inpt, num_actions, w_init, activation_fn_v, activation_fn_a):
    mu = layers.fully_connected(inpt, num_outputs=num_actions, weights_initializer=w_init, activation_fn=activation_fn_v)
    sigma = layers.fully_connected(inpt, num_outputs=num_actions, weights_initializer=w_init, activation_fn=activation_fn_a)
    return mu, sigma



# # cnn network frame
# def cnn_frame_continu(hiddens, kerners, strides, inpt, num_actions, scope=None, activation_fn=tf.nn.relu, activation_fn_mu=tf.nn.relu, activation_fn_sigma=tf.nn.relu, reuse=None):
#     with tf.variable_scope(scope, reuse=reuse):
#         out = inpt
#         for kerner, stride in kerners, strides:
#             out = tf.nn.conv2d(input=out, filter=kerner, stride=stride)
#         out = layers.flatten(out)
#         with tf.name_scope("out"):
#             mu = layers.fully_connected(out, num_outputs=num_actions, weights_initializer=tf.truncated_normal_initializer(0 , 0.3), activation_fn=None)
#             sigma = layers.fully_connected(out, num_outputs=num_actions, weights_initializer=tf.truncated_normal_initializer(0 , 0.3), activation_fn=tf.nn.softplus)
#         return mu, sigma
项目:rl-attack-detection    作者:yenchenlin    | 项目源码 | 文件源码
def model(img_in, num_actions, scope, reuse=False, concat_softmax=False):
    """As described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf"""
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        with tf.variable_scope("convnet"):
            # original architecture
            out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
        out = layers.flatten(out)

        with tf.variable_scope("action_value"):
            out = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
            out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)
            if concat_softmax:
                out = tf.nn.softmax(out)

        return out
项目:rl-attack-detection    作者:yenchenlin    | 项目源码 | 文件源码
def dueling_model(img_in, num_actions, scope, reuse=False):
    """As described in https://arxiv.org/abs/1511.06581"""
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        with tf.variable_scope("convnet"):
            # original architecture
            out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
        out = layers.flatten(out)

        with tf.variable_scope("state_value"):
            state_hidden = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
            state_score = layers.fully_connected(state_hidden, num_outputs=1, activation_fn=None)
        with tf.variable_scope("action_value"):
            actions_hidden = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
            action_scores = layers.fully_connected(actions_hidden, num_outputs=num_actions, activation_fn=None)
            action_scores_mean = tf.reduce_mean(action_scores, 1)
            action_scores = action_scores - tf.expand_dims(action_scores_mean, 1)

        return state_score + action_scores
项目:baselines    作者:openai    | 项目源码 | 文件源码
def model(img_in, num_actions, scope, reuse=False, layer_norm=False):
    """As described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf"""
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        with tf.variable_scope("convnet"):
            # original architecture
            out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
        conv_out = layers.flatten(out)

        with tf.variable_scope("action_value"):
            value_out = layers.fully_connected(conv_out, num_outputs=512, activation_fn=None)
            if layer_norm:
                value_out = layer_norm_fn(value_out, relu=True)
            else:
                value_out = tf.nn.relu(value_out)
            value_out = layers.fully_connected(value_out, num_outputs=num_actions, activation_fn=None)
        return value_out
项目:chi    作者:rmst    | 项目源码 | 文件源码
def deep_q_network():
    """ Architecture according to:
    http://www.nature.com/nature/journal/v518/n7540/full/nature14236.html
    """
    @tt.model(tracker=tf.train.ExponentialMovingAverage(1 - .0005),    # TODO: replace with original weight freeze
                         optimizer=tf.train.RMSPropOptimizer(.00025, .95, .95, .01))
    def q_network(x):
        x /= 255
        x = layers.conv2d(x, 32, 8, 4)
        x = layers.conv2d(x, 64, 4, 2)
        x = layers.conv2d(x, 64, 3, 1)
        x = layers.flatten(x)
        x = layers.fully_connected(x, 512)
        x = layers.fully_connected(x, env.action_space.n, activation_fn=None)
        x = tf.identity(x, name='Q')
        return x

    return q_network
项目:chi    作者:rmst    | 项目源码 | 文件源码
def deep_q_network():
    """ Architecture according to:
    http://www.nature.com/nature/journal/v518/n7540/full/nature14236.html
    """
    @tt.model(tracker=tf.train.ExponentialMovingAverage(1 - .0005),    # TODO: replace with original weight freeze
                         optimizer=tf.train.RMSPropOptimizer(.00025, .95, .95, .01))
    def q_network(x):
        x /= 255
        x = layers.conv2d(x, 32, 8, 4)
        x = layers.conv2d(x, 64, 4, 2)
        x = layers.conv2d(x, 64, 3, 1)
        x = layers.flatten(x)
        x = layers.fully_connected(x, 512)
        x = layers.fully_connected(x, env.action_space.n, activation_fn=None)
        x = tf.identity(x, name='Q')
        return x

    return q_network
项目:RL_FlappyBird    作者:iGuaZi    | 项目源码 | 文件源码
def model(img_in, num_actions, scope, reuse=False):
    """As described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf"""
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        with tf.variable_scope("convnet"):
            # original architecture
            out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
        out = layers.flatten(out)

        with tf.variable_scope("action_value"):
            out = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
            out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)

        return out
项目:RL_FlappyBird    作者:iGuaZi    | 项目源码 | 文件源码
def dueling_model(img_in, num_actions, scope, reuse=False):
    """As described in https://arxiv.org/abs/1511.06581"""
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        with tf.variable_scope("convnet"):
            # original architecture
            out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
        out = layers.flatten(out)

        with tf.variable_scope("state_value"):
            state_hidden = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
            state_score = layers.fully_connected(state_hidden, num_outputs=1, activation_fn=None)
        with tf.variable_scope("action_value"):
            actions_hidden = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
            action_scores = layers.fully_connected(actions_hidden, num_outputs=num_actions, activation_fn=None)
            action_scores_mean = tf.reduce_mean(action_scores, 1)
            action_scores = action_scores - tf.expand_dims(action_scores_mean, 1)

        return state_score + action_scores
项目:RL_FlappyBird    作者:iGuaZi    | 项目源码 | 文件源码
def Actor(img_in, num_actions, scope, reuse=False):
    """As described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf"""
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        with tf.variable_scope("convnet"):
            # original architecture
            out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
        out = layers.flatten(out)

        with tf.variable_scope("action_value"):
            out = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
            out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)

        with tf.variable_scope("action_prob"):
            out = tf.nn.softmax(out)
        return out
项目:RL_FlappyBird    作者:iGuaZi    | 项目源码 | 文件源码
def Critic(img_in, scope, reuse=False):
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        with tf.variable_scope("convnet"):
            # original architecture
            out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
        out = layers.flatten(out)

        with tf.variable_scope("state_value"):
            out = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
            out = layers.fully_connected(out, num_outputs=1, activation_fn=None)

        return out




# models defined in the original code
项目:tf_practice    作者:juho-lee    | 项目源码 | 文件源码
def to_loc(input, is_simple=False):
    if len(input.get_shape()) == 4:
        input = layers.flatten(input)
    num_inputs = input.get_shape()[1]
    num_outputs = 3 if is_simple else 6
    W_init = tf.constant_initializer(
            np.zeros((num_inputs, num_outputs)))
    if is_simple:
        b_init = tf.constant_initializer(np.array([1.,0.,0.]))
    else:
        b_init = tf.constant_initializer(np.array([1.,0.,0.,0.,1.,0.]))

    return layers.fully_connected(input, num_outputs,
            activation_fn=None,
            weights_initializer=W_init,
            biases_initializer=b_init)
项目:tf_practice    作者:juho-lee    | 项目源码 | 文件源码
def to_loc(input, is_simple=False):
    if len(input.get_shape()) == 4:
        input = layers.flatten(input)
    num_inputs = input.get_shape()[1]
    num_outputs = 3 if is_simple else 6
    W_init = tf.constant_initializer(
            np.zeros((num_inputs, num_outputs)))
    if is_simple:
        b_init = tf.constant_initializer(np.array([1.,0.,0.]))
    else:
        b_init = tf.constant_initializer(np.array([1.,0.,0.,0.,1.,0.]))

    return layers.fully_connected(input, num_outputs,
            activation_fn=None,
            weights_initializer=W_init,
            biases_initializer=b_init)
项目:reinforceflow    作者:dbobrenko    | 项目源码 | 文件源码
def __init__(self, input_space, output_space, layer_sizes=(512, 512, 512), trainable=True):
        if isinstance(input_space, Tuple) or isinstance(output_space, Tuple):
            raise ValueError('For tuple action and observation spaces '
                             'consider implementing custom network architecture.')
        self._input_ph = tf.placeholder('float32', shape=[None] + list(input_space.shape),
                                        name='inputs')
        end_points = {}
        net = layers.flatten(self._input_ph)
        for i, units in enumerate(layer_sizes):
            name = 'fc%d' % i
            net = layers.fully_connected(net, num_outputs=units, activation_fn=tf.nn.relu,
                                         trainable=trainable, scope=name)
            end_points[name] = net
        gaussian = tf.random_normal_initializer
        v = layers.fully_connected(net, num_outputs=1,
                                   activation_fn=None,
                                   weights_initializer=gaussian(0.0, 0.1),
                                   biases_initializer=gaussian(0.05, 0.1),
                                   scope='out_value',
                                   trainable=trainable)
        end_points['out_value'] = tf.squeeze(v)
        header_endpoints = make_a3c_header(net, input_space, output_space, trainable)
        end_points.update(header_endpoints)
        self.end_points = end_points
        self.output_policy = self.output
项目:reinforceflow    作者:dbobrenko    | 项目源码 | 文件源码
def __init__(self, input_space, output_space, layer_sizes=(512, 512), dueling_type='mean',
                 advantage_layers=(256,), value_layers=(256,), trainable=True):
        if isinstance(input_space, Tuple) or isinstance(output_space, Tuple):
            raise ValueError('For tuple action and observation spaces '
                             'consider implementing custom network architecture.')
        self._input_ph = tf.placeholder('float32', shape=[None] + list(input_space.shape),
                                        name='inputs')

        end_points = {}
        net = layers.flatten(self.input_ph)
        for i, units in enumerate(layer_sizes):
            name = 'fc%d' % i
            net = layers.fully_connected(net, num_outputs=units, activation_fn=tf.nn.relu,
                                         trainable=trainable, scope=name)
            end_points[name] = net
        net, dueling_endpoints = make_dueling_header(input_layer=net,
                                                     output_size=output_space.shape[0],
                                                     dueling_type=dueling_type,
                                                     advantage_layers=advantage_layers,
                                                     value_layers=value_layers,
                                                     trainable=trainable)
        end_points.update(dueling_endpoints)
        self._output = net
        self.end_points = end_points
项目:reinforceflow    作者:dbobrenko    | 项目源码 | 文件源码
def make_dqn_body(input_layer, trainable=True):
    end_points = {}
    net = layers.conv2d(inputs=input_layer,
                        num_outputs=16,
                        kernel_size=[8, 8],
                        stride=[4, 4],
                        activation_fn=tf.nn.relu,
                        padding="same",
                        scope="conv1",
                        trainable=trainable)
    end_points['conv1'] = net
    net = layers.conv2d(inputs=net,
                        num_outputs=32,
                        kernel_size=[4, 4],
                        stride=[2, 2],
                        activation_fn=tf.nn.relu,
                        padding="same",
                        scope="conv2",
                        trainable=trainable)
    end_points['conv2'] = net
    out = layers.flatten(net)
    end_points['conv2_flatten'] = out
    return out, end_points
项目:tensorflow-infogan    作者:JonathanRaiman    | 项目源码 | 文件源码
def discriminator_forward(img,
                          network_description,
                          is_training,
                          reuse=None,
                          name="discriminator",
                          use_batch_norm=True,
                          debug=False):
    with tf.variable_scope(name, reuse=reuse):
        out = run_network(img,
                          network_description,
                          is_training=is_training,
                          use_batch_norm=use_batch_norm,
                          debug=debug)
        out = layers.flatten(out)
        prob = layers.fully_connected(
            out,
            num_outputs=1,
            activation_fn=tf.nn.sigmoid,
            scope="prob_projection"
        )

    return {"prob":prob, "hidden":out}
项目:NoisyNet-DQN    作者:andrewliao11    | 项目源码 | 文件源码
def model(img_in, num_actions, scope, noisy=False, reuse=False):
    """As described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf"""
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        with tf.variable_scope("convnet"):
            # original architecture
            out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
        out = layers.flatten(out)

        with tf.variable_scope("action_value"):
            if noisy:
                # Apply noisy network on fully connected layers
                # ref: https://arxiv.org/abs/1706.10295
                out = noisy_dense(out, name='noisy_fc1', size=512, activation_fn=tf.nn.relu)
                out = noisy_dense(out, name='noisy_fc2', size=num_actions)
            else:
                out = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
                out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)

        return out
项目:tensorflow-litterbox    作者:rwightman    | 项目源码 | 文件源码
def _block_output(net, endpoints, num_classes, dropout_keep_prob=0.5):
    with tf.variable_scope('Output'):
        net = layers.flatten(net, scope='Flatten')

        # 7 x 7 x 512
        net = layers.fully_connected(net, 4096, scope='Fc1')
        net = endpoints['Output/Fc1'] = layers.dropout(net, dropout_keep_prob, scope='Dropout1')

        # 1 x 1 x 4096
        net = layers.fully_connected(net, 4096, scope='Fc2')
        net = endpoints['Output/Fc2'] = layers.dropout(net, dropout_keep_prob, scope='Dropout2')

        logits = layers.fully_connected(net, num_classes, activation_fn=None, scope='Logits')
        # 1 x 1 x num_classes
        endpoints['Logits'] = logits
    return logits
项目:distributional_perspective_on_RL    作者:Kiwoo    | 项目源码 | 文件源码
def _cnn_to_mlp(convs, hiddens, dueling, inpt, num_actions, scope, reuse=False):
    with tf.variable_scope(scope, reuse=reuse):
        out = inpt
        with tf.variable_scope("convnet"):
            for num_outputs, kernel_size, stride in convs:
                out = layers.convolution2d(out,
                                           num_outputs=num_outputs,
                                           kernel_size=kernel_size,
                                           stride=stride,
                                           activation_fn=tf.nn.relu)
        out = layers.flatten(out)
        with tf.variable_scope("action_value"):
            action_out = out
            for hidden in hiddens:
                action_out = layers.fully_connected(action_out, num_outputs=hidden, activation_fn=tf.nn.relu)
            action_scores = layers.fully_connected(action_out, num_outputs=num_actions, activation_fn=None)

        if dueling:
            with tf.variable_scope("state_value"):
                state_out = out
                for hidden in hiddens:
                    state_out = layers.fully_connected(state_out, num_outputs=hidden, activation_fn=tf.nn.relu)
                state_score = layers.fully_connected(state_out, num_outputs=1, activation_fn=None)
            action_scores_mean = tf.reduce_mean(action_scores, 1)
            action_scores_centered = action_scores - tf.expand_dims(action_scores_mean, 1)
            return state_score + action_scores_centered
        else:
            return action_scores
        return out
项目:DeepWorks    作者:daigo0927    | 项目源码 | 文件源码
def __call__(self, inputs, reuse = True):
        with tf.variable_scope(self.name) as vs:
            # tf.get_variable_scope()
            if reuse:
                vs.reuse_variables()

            x = tcl.conv2d(inputs,
                           num_outputs = 64,
                           kernel_size = (4, 4),
                           stride = (1, 1),
                           padding = 'SAME')
            x = tcl.batch_norm(x)
            x = tf.nn.relu(x)
            x = tcl.max_pool2d(x, (2, 2), (2, 2), 'SAME')
            x = tcl.conv2d(x,
                           num_outputs = 128,
                           kernel_size = (4, 4),
                           stride = (1, 1),
                           padding = 'SAME')
            x = tcl.batch_norm(x)
            x = tf.nn.relu(x)
            x = tcl.max_pool2d(x, (2, 2), (2, 2), 'SAME')
            x = tcl.flatten(x)
            logits = tcl.fully_connected(x, num_outputs = self.num_output)

            return logits
项目:DeepWorks    作者:daigo0927    | 项目源码 | 文件源码
def __call__(self, inputs, reuse = True):
        with tf.variable_scope(self.name) as vs:
            tf.get_variable_scope()
            if reuse:
                vs.reuse_variables()

            conv1 = tcl.conv2d(inputs,
                               num_outputs = 64,
                               kernel_size = (7, 7),
                               stride = (2, 2),
                               padding = 'SAME')
            conv1 = tcl.batch_norm(conv1)
            conv1 = tf.nn.relu(conv1)
            conv1 = tcl.max_pool2d(conv1,
                                   kernel_size = (3, 3),
                                   stride = (2, 2),
                                   padding = 'SAME')

            x = conv1
            filters = 64
            first_layer = True
            for i, r in enumerate(self.repetitions):
                x = _residual_block(self.block_fn,
                                    filters = filters,
                                    repetition = r,
                                    is_first_layer = first_layer)(x)
                filters *= 2
                if first_layer:
                    first_layer = False

            _, h, w, ch = x.shape.as_list()
            outputs = tcl.avg_pool2d(x,
                                     kernel_size = (h, w),
                                     stride = (1, 1))
            outputs = tcl.flatten(outputs)
            logits = tcl.fully_connected(outputs, num_outputs = self.num_output,
                                         activation_fn = None)
            return logits
项目:deep-q-learning    作者:alvinwan    | 项目源码 | 文件源码
def atari_model(img_in, num_actions, scope, reuse=False):
    # as described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        with tf.variable_scope("convnet"):
            # original architecture
            out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
        out = layers.flatten(out)
        with tf.variable_scope("action_value"):
            out = layers.fully_connected(out, num_outputs=512,         activation_fn=tf.nn.relu)
            out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)

        return out
项目:deep-q-learning    作者:alvinwan    | 项目源码 | 文件源码
def simple_model(img_in, num_actions, scope, reuse=False, num_filters=64):
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        gauss_initializer = initializers.xavier_initializer(uniform=False)  # stddev = 1/n
        with tf.variable_scope("convnet"):
            out = layers.convolution2d(
                out, num_outputs=num_filters, kernel_size=8, stride=4,
                activation_fn=tf.nn.relu, weights_initializer=gauss_initializer,
                trainable=False)
        out = layers.flatten(out)
        with tf.variable_scope("action_value"):
            out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)

        return out
项目:deep-q-learning    作者:alvinwan    | 项目源码 | 文件源码
def simple_model_w_feat_eng(img_in, num_actions, scope, reuse=False):
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        out = layers.flatten(out)
        # stddev = 1/n, where n = number of inputs
        gauss_initializer = initializers.xavier_initializer(uniform=False)
        with tf.variable_scope("action_value"):
            out = layers.fully_connected(
                out,
                num_outputs=num_actions,
                activation_fn=tf.nn.relu,
                biases_initializer=None,
                weights_initializer=gauss_initializer,
                weights_regularizer=None)
        return out
项目:decorrelated-adversarial-autoencoder    作者:patrickgadd    | 项目源码 | 文件源码
def semi_supervised_decoder_convolutional(input_tensor, batch_size, n_dimensions, network_scale=1.0, img_res=28, img_channels=1):
    f_multiplier = network_scale

    net = layers.fully_connected(input_tensor, 2*2*int(128*f_multiplier))
    net = tf.reshape(net, [-1, 2, 2, int(128*f_multiplier)])

    assert(img_res in [28, 32])

    if img_res==28:
        net = layers.conv2d_transpose(net, int(64*f_multiplier), 3, stride=2)
        net = layers.conv2d_transpose(net, int(64*f_multiplier), 3, stride=1)
        net = layers.conv2d_transpose(net, int(32*f_multiplier), 4, stride=1, padding='VALID')
        net = layers.conv2d_transpose(net, int(32*f_multiplier), 4, stride=1)
        net = layers.conv2d_transpose(net, int(16*f_multiplier), 3, stride=2)
        net = layers.conv2d_transpose(net, int(16*f_multiplier), 3, stride=1)
        net = layers.conv2d_transpose(net, int(8*f_multiplier), 3, stride=2)
        net = layers.conv2d_transpose(net, int(8*f_multiplier), 3, stride=1)
    else:
        net = layers.conv2d_transpose(net, int(64*f_multiplier), 3, stride=2)
        net = layers.conv2d_transpose(net, int(64*f_multiplier), 3, stride=1)
        net = layers.conv2d_transpose(net, int(32*f_multiplier), 3, stride=2)
        net = layers.conv2d_transpose(net, int(32*f_multiplier), 3, stride=1)
        net = layers.conv2d_transpose(net, int(16*f_multiplier), 3, stride=2)
        net = layers.conv2d_transpose(net, int(16*f_multiplier), 3, stride=1)
        net = layers.conv2d_transpose(net, int(8*f_multiplier), 3, stride=2)
        net = layers.conv2d_transpose(net, int(8*f_multiplier), 3, stride=1)

    net = layers.conv2d_transpose(net, img_channels, 5, stride=1, activation_fn=tf.nn.sigmoid)
    net = layers.flatten(net)

    return net
项目:combine-DT-with-NN-in-RL    作者:Burning-Bear    | 项目源码 | 文件源码
def _cnn_to_mlp(convs, hiddens, dueling, inpt, num_actions, scope, reuse=False):
    with tf.variable_scope(scope, reuse=reuse):
        out = inpt
        with tf.variable_scope("convnet"):
            for num_outputs, kernel_size, stride in convs:
                out = layers.convolution2d(out,
                                           num_outputs=num_outputs,
                                           kernel_size=kernel_size,
                                           stride=stride,
                                           activation_fn=tf.nn.relu)
        out = layers.flatten(out)
        with tf.variable_scope("action_value"):
            action_out = out
            for hidden in hiddens:
                action_out = layers.fully_connected(action_out, num_outputs=hidden, activation_fn=tf.nn.relu)
            action_scores = layers.fully_connected(action_out, num_outputs=num_actions, activation_fn=None)

        if dueling:
            with tf.variable_scope("state_value"):
                state_out = out
                for hidden in hiddens:
                    state_out = layers.fully_connected(state_out, num_outputs=hidden, activation_fn=tf.nn.relu)
                state_score = layers.fully_connected(state_out, num_outputs=1, activation_fn=None)
            action_scores_mean = tf.reduce_mean(action_scores, 1)
            action_scores_centered = action_scores - tf.expand_dims(action_scores_mean, 1)
            return state_score + action_scores_centered
        else:
            return action_scores
        return out
项目:Densenet-Tensorflow    作者:taki0112    | 项目源码 | 文件源码
def Dense_net(self, input_x):
        x = conv_layer(input_x, filter=2 * self.filters, kernel=[7,7], stride=2, layer_name='conv0')
        x = Max_Pooling(x, pool_size=[3,3], stride=2)



        for i in range(self.nb_blocks) :
            # 6 -> 12 -> 48
            x = self.dense_block(input_x=x, nb_layers=4, layer_name='dense_'+str(i))
            x = self.transition_layer(x, scope='trans_'+str(i))


        """
        x = self.dense_block(input_x=x, nb_layers=6, layer_name='dense_1')
        x = self.transition_layer(x, scope='trans_1')

        x = self.dense_block(input_x=x, nb_layers=12, layer_name='dense_2')
        x = self.transition_layer(x, scope='trans_2')

        x = self.dense_block(input_x=x, nb_layers=48, layer_name='dense_3')
        x = self.transition_layer(x, scope='trans_3')
        """

        x = self.dense_block(input_x=x, nb_layers=32, layer_name='dense_final')

        # 100 Layer
        x = Batch_Normalization(x, training=self.training, scope='linear_batch')
        x = Relu(x)
        x = Global_Average_Pooling(x)
        x = flatten(x)
        x = Linear(x)


        # x = tf.reshape(x, [-1, 10])
        return x
项目:Densenet-Tensorflow    作者:taki0112    | 项目源码 | 文件源码
def Dense_net(self, input_x):
        x = conv_layer(input_x, filter=2 * self.filters, kernel=[7,7], stride=2, layer_name='conv0')
        # x = Max_Pooling(x, pool_size=[3,3], stride=2)


        """
        for i in range(self.nb_blocks) :
            # 6 -> 12 -> 48
            x = self.dense_block(input_x=x, nb_layers=4, layer_name='dense_'+str(i))
            x = self.transition_layer(x, scope='trans_'+str(i))
        """




        x = self.dense_block(input_x=x, nb_layers=6, layer_name='dense_1')
        x = self.transition_layer(x, scope='trans_1')

        x = self.dense_block(input_x=x, nb_layers=12, layer_name='dense_2')
        x = self.transition_layer(x, scope='trans_2')

        x = self.dense_block(input_x=x, nb_layers=48, layer_name='dense_3')
        x = self.transition_layer(x, scope='trans_3')

        x = self.dense_block(input_x=x, nb_layers=32, layer_name='dense_final')



        # 100 Layer
        x = Batch_Normalization(x, training=self.training, scope='linear_batch')
        x = Relu(x)
        x = Global_Average_Pooling(x)
        x = flatten(x)
        x = Linear(x)


        # x = tf.reshape(x, [-1, 10])
        return x
项目:rl-attack-detection    作者:yenchenlin    | 项目源码 | 文件源码
def _cnn_to_mlp(convs, hiddens, dueling, inpt, num_actions, scope, reuse=False):
    with tf.variable_scope(scope, reuse=reuse):
        out = inpt
        with tf.variable_scope("convnet"):
            for num_outputs, kernel_size, stride in convs:
                out = layers.convolution2d(out,
                                           num_outputs=num_outputs,
                                           kernel_size=kernel_size,
                                           stride=stride,
                                           activation_fn=tf.nn.relu)
        out = layers.flatten(out)
        with tf.variable_scope("action_value"):
            action_out = out
            for hidden in hiddens:
                action_out = layers.fully_connected(action_out, num_outputs=hidden, activation_fn=tf.nn.relu)
            action_scores = layers.fully_connected(action_out, num_outputs=num_actions, activation_fn=None)

        if dueling:
            with tf.variable_scope("state_value"):
                state_out = out
                for hidden in hiddens:
                    state_out = layers.fully_connected(state_out, num_outputs=hidden, activation_fn=tf.nn.relu)
                state_score = layers.fully_connected(state_out, num_outputs=1, activation_fn=None)
            action_scores_mean = tf.reduce_mean(action_scores, 1)
            action_scores_centered = action_scores - tf.expand_dims(action_scores_mean, 1)
            return state_score + action_scores_centered
        else:
            return action_scores
        return out
项目:baselines    作者:openai    | 项目源码 | 文件源码
def dueling_model(img_in, num_actions, scope, reuse=False, layer_norm=False):
    """As described in https://arxiv.org/abs/1511.06581"""
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        with tf.variable_scope("convnet"):
            # original architecture
            out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
        conv_out = layers.flatten(out)

        with tf.variable_scope("state_value"):
            state_hidden = layers.fully_connected(conv_out, num_outputs=512, activation_fn=None)
            if layer_norm:
                state_hidden = layer_norm_fn(state_hidden, relu=True)
            else:
                state_hidden = tf.nn.relu(state_hidden)
            state_score = layers.fully_connected(state_hidden, num_outputs=1, activation_fn=None)
        with tf.variable_scope("action_value"):
            actions_hidden = layers.fully_connected(conv_out, num_outputs=512, activation_fn=None)
            if layer_norm:
                actions_hidden = layer_norm_fn(actions_hidden, relu=True)
            else:
                actions_hidden = tf.nn.relu(actions_hidden)
            action_scores = layers.fully_connected(actions_hidden, num_outputs=num_actions, activation_fn=None)
            action_scores_mean = tf.reduce_mean(action_scores, 1)
            action_scores = action_scores - tf.expand_dims(action_scores_mean, 1)
        return state_score + action_scores
项目:baselines    作者:openai    | 项目源码 | 文件源码
def _cnn_to_mlp(convs, hiddens, dueling, inpt, num_actions, scope, reuse=False, layer_norm=False):
    with tf.variable_scope(scope, reuse=reuse):
        out = inpt
        with tf.variable_scope("convnet"):
            for num_outputs, kernel_size, stride in convs:
                out = layers.convolution2d(out,
                                           num_outputs=num_outputs,
                                           kernel_size=kernel_size,
                                           stride=stride,
                                           activation_fn=tf.nn.relu)
        conv_out = layers.flatten(out)
        with tf.variable_scope("action_value"):
            action_out = conv_out
            for hidden in hiddens:
                action_out = layers.fully_connected(action_out, num_outputs=hidden, activation_fn=None)
                if layer_norm:
                    action_out = layers.layer_norm(action_out, center=True, scale=True)
                action_out = tf.nn.relu(action_out)
            action_scores = layers.fully_connected(action_out, num_outputs=num_actions, activation_fn=None)

        if dueling:
            with tf.variable_scope("state_value"):
                state_out = conv_out
                for hidden in hiddens:
                    state_out = layers.fully_connected(state_out, num_outputs=hidden, activation_fn=None)
                    if layer_norm:
                        state_out = layers.layer_norm(state_out, center=True, scale=True)
                    state_out = tf.nn.relu(state_out)
                state_score = layers.fully_connected(state_out, num_outputs=1, activation_fn=None)
            action_scores_mean = tf.reduce_mean(action_scores, 1)
            action_scores_centered = action_scores - tf.expand_dims(action_scores_mean, 1)
            q_out = state_score + action_scores_centered
        else:
            q_out = action_scores
        return q_out
项目:cancer    作者:yancz1989    | 项目源码 | 文件源码
def model(H, x, training):
  net = dropout(x, 0.5, is_training = training)
  # net = conv2d(net, 64, [3, 3], activation_fn = tf.nn.relu)
  # net = conv2d(net, 64, [3, 3], activation_fn = tf.nn.relu)
  # net = max_pool2d(net, [2, 2], padding = 'VALID')
  # net = conv2d(net, 128, [3, 3], activation_fn = tf.nn.relu)
  # net = conv2d(net, 128, [3, 3], activation_fn = tf.nn.relu)
  # net = max_pool2d(net, [2, 2], padding = 'VALID')
  # ksize = net.get_shape().as_list()
  # net = max_pool2d(net, [ksize[1], ksize[2]])
  net = fully_connected(flatten(net), 256, activation_fn = tf.nn.relu)
  net = dropout(net, 0.5, is_training = training)
  logits = fully_connected(net, 1, activation_fn = tf.nn.sigmoid)
  preds = tf.cast(tf.greater(logits, 0.5), tf.int64)
  return logits, preds
项目:rl_algorithms    作者:DanielTakeshi    | 项目源码 | 文件源码
def atari_model(img_in, num_actions, scope, reuse=False):
    # as described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        with tf.variable_scope("convnet"):
            # original architecture
            out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
        out = layers.flatten(out)
        with tf.variable_scope("action_value"):
            out = layers.fully_connected(out, num_outputs=512,         activation_fn=tf.nn.relu)
            out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)

        return out
项目:chi    作者:rmst    | 项目源码 | 文件源码
def delling_network():
    """ Architecture according to Duelling DQN:
    https://arxiv.org/abs/1511.06581
    """

    @tt.model(tracker=tf.train.ExponentialMovingAverage(1 - .0005),    # TODO: replace with original weight freeze
                         optimizer=tf.train.RMSPropOptimizer(6.25e-5, .95, .95, .01))
    def q_network(x):
        x /= 255
        x = layers.conv2d(x, 32, 8, 4)
        x = layers.conv2d(x, 64, 4, 2)
        x = layers.conv2d(x, 64, 3, 1)
        x = layers.flatten(x)

        xv = layers.fully_connected(x, 512)
        val = layers.fully_connected(xv, 1, activation_fn=None)
        # val = tf.squeeze(val, 1)

        xa = layers.fully_connected(x, 512)
        adv = layers.fully_connected(xa, env.action_space.n, activation_fn=None)

        q = val + adv - tf.reduce_mean(adv, axis=1, keep_dims=True)
        q = tf.identity(q, name='Q')
        return q


# Tests
项目:chi    作者:rmst    | 项目源码 | 文件源码
def delling_network():
    """ Architecture according to Duelling DQN:
    https://arxiv.org/abs/1511.06581
    """

    @tt.model(tracker=tf.train.ExponentialMovingAverage(1 - .0005),    # TODO: replace with original weight freeze
                         optimizer=tf.train.RMSPropOptimizer(6.25e-5, .95, .95, .01))
    def q_network(x):
        x /= 255
        x = layers.conv2d(x, 32, 8, 4)
        x = layers.conv2d(x, 64, 4, 2)
        x = layers.conv2d(x, 64, 3, 1)
        x = layers.flatten(x)

        xv = layers.fully_connected(x, 512)
        val = layers.fully_connected(xv, 1, activation_fn=None)
        # val = tf.squeeze(val, 1)

        xa = layers.fully_connected(x, 512)
        adv = layers.fully_connected(xa, env.action_space.n, activation_fn=None)

        q = val + adv - tf.reduce_mean(adv, axis=1, keep_dims=True)
        q = tf.identity(q, name='Q')
        return q


# Tests
项目:RL_FlappyBird    作者:iGuaZi    | 项目源码 | 文件源码
def _cnn_to_mlp(convs, hiddens, dueling, inpt, num_actions, scope, reuse=False):
    with tf.variable_scope(scope, reuse=reuse):
        out = inpt
        with tf.variable_scope("convnet"):
            for num_outputs, kernel_size, stride in convs:
                out = layers.convolution2d(out,
                                           num_outputs=num_outputs,
                                           kernel_size=kernel_size,
                                           stride=stride,
                                           activation_fn=tf.nn.relu)
        out = layers.flatten(out)
        with tf.variable_scope("action_value"):
            action_out = out
            for hidden in hiddens:
                action_out = layers.fully_connected(action_out, num_outputs=hidden, activation_fn=tf.nn.relu)
            action_scores = layers.fully_connected(action_out, num_outputs=num_actions, activation_fn=None)

        if dueling:
            with tf.variable_scope("state_value"):
                state_out = out
                for hidden in hiddens:
                    state_out = layers.fully_connected(state_out, num_outputs=hidden, activation_fn=tf.nn.relu)
                state_score = layers.fully_connected(state_out, num_outputs=1, activation_fn=None)
            action_scores_mean = tf.reduce_mean(action_scores, 1)
            action_scores_centered = action_scores - tf.expand_dims(action_scores_mean, 1)
            return state_score + action_scores_centered
        else:
            return action_scores
        return out
项目:ssd_tensorflow    作者:railsnoob    | 项目源码 | 文件源码
def flatten(self, x_tensor):
        batch_size = x_tensor.shape[0]
        mult = 1
        for a in range(1,len(x_tensor.shape)):
            mult = mult * int(x_tensor.shape[a])
        return tf.reshape(x_tensor,[-1,mult])
项目:ssd_tensorflow    作者:railsnoob    | 项目源码 | 文件源码
def convolve_and_collect(self,fmap,name,y_box_coords,y_class,phase):
        # Apply 2 convolutions and get predictions for coordinates and classes and store them 
        # Get both of these guys and convolve
        b = self.conv_layer_optional_pooling(fmap,4*self.num_default_boxes,(3,3),(1,1),name+"box_coords",phase,padding_type="SAME")
        print("   =====> ",name+"box_coords",self.flatten(b))
        y_box_coords.insert(0,flatten(b))

        c = self.conv_layer_optional_pooling(fmap,self.num_classes*self.num_default_boxes,(3,3),(1,1),name+"class",phase,padding_type="SAME")
        print("   =====> ",name+"class",self.flatten(c))
        y_class.insert(0,flatten(c))
项目:tf_practice    作者:juho-lee    | 项目源码 | 文件源码
def to_trans(input):
    if len(input.get_shape()) == 4:
        input = layers.flatten(input)
    num_inputs = input.get_shape()[1]
    W_init = tf.constant_initializer(np.zeros((num_inputs, 2)))
    b_init = tf.constant_initializer(np.array([0.,0.]))
    return layers.fully_connected(input, 2,
            weights_initializer=W_init,
            biases_initializer=b_init)
项目:tf_practice    作者:juho-lee    | 项目源码 | 文件源码
def to_trans(input):
    if len(input.get_shape()) == 4:
        input = layers.flatten(input)
    num_inputs = input.get_shape()[1]
    W_init = tf.constant_initializer(np.zeros((num_inputs, 2)))
    b_init = tf.constant_initializer(np.array([0.,0.]))
    return layers.fully_connected(input, 2,
            weights_initializer=W_init,
            biases_initializer=b_init)
项目:reinforceflow    作者:dbobrenko    | 项目源码 | 文件源码
def make_dqn_body_nature(input_layer, trainable=True):
    end_points = {}
    net = layers.conv2d(inputs=input_layer,
                        num_outputs=32,
                        kernel_size=[8, 8],
                        stride=[4, 4],
                        activation_fn=tf.nn.relu,
                        padding="same",
                        scope="conv1",
                        trainable=trainable)
    end_points['conv1'] = net
    net = layers.conv2d(inputs=net,
                        num_outputs=64,
                        kernel_size=[4, 4],
                        stride=[2, 2],
                        activation_fn=tf.nn.relu,
                        padding="same",
                        scope="conv2",
                        trainable=trainable)
    end_points['conv2'] = net
    net = layers.conv2d(inputs=net,
                        num_outputs=64,
                        kernel_size=[3, 3],
                        stride=[1, 1],
                        activation_fn=tf.nn.relu,
                        padding="same",
                        scope="conv3",
                        trainable=trainable)
    end_points['conv3'] = net
    out = layers.flatten(net)
    end_points['conv3_flatten'] = out
    return out, end_points
项目:a3c    作者:siemanko    | 项目源码 | 文件源码
def forward(image, num_actions):
    # Conv1
    out = layers.convolution2d(image, num_outputs=16, kernel_size=8, stride=4, activation_fn=tf.nn.relu, scope='conv1')
    out = layers.convolution2d(out, num_outputs=32, kernel_size=4, stride=2, activation_fn=tf.nn.relu, scope='conv2')
    out = layers.flatten(out, scope='flatten')
    out = layers.fully_connected(out, num_outputs=256, activation_fn=tf.nn.relu, scope='fc1')

    action_logprobs = tf.nn.log_softmax(layers.fully_connected(out, num_outputs=num_actions, activation_fn=None, scope='fc_actor'))
    value           = layers.fully_connected(out, num_outputs=1, activation_fn=None, scope='fc_critic')
    value           = tf.reshape(value, [-1])
    return action_logprobs, value
项目:information-dropout    作者:ucla-vision    | 项目源码 | 文件源码
def batch_average(x):
    '''Sum over all dimensions and averages over the first'''
    return tf.reduce_mean(tf.reduce_sum(flatten(x),1))
项目:ICGANs    作者:cameronfabbri    | 项目源码 | 文件源码
def encZ(x, ACTIVATION):

   conv1 = tcl.conv2d(x, 32, 5, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='conv1')
   conv1 = activate(conv1, ACTIVATION)

   conv2 = tcl.conv2d(conv1, 64, 5, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='conv2')
   conv2 = activate(conv2, ACTIVATION)

   conv3 = tcl.conv2d(conv2, 128, 5, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='conv3')
   conv3 = activate(conv3, ACTIVATION)

   conv4 = tcl.conv2d(conv3, 256, 5, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='conv4')
   conv4 = activate(conv4, ACTIVATION)

   conv4_flat = tcl.flatten(conv4)

   fc1 = tcl.fully_connected(conv4_flat, 4096, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='fc1')
   fc1 = activate(fc1, ACTIVATION)
   #fc1 = tcl.dropout(fc1, 0.5)

   fc2 = tcl.fully_connected(fc1, 100, activation_fn=tf.identity, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='fc2')

   print 'input:',x
   print 'conv1:',conv1
   print 'conv2:',conv2
   print 'conv3:',conv3
   print 'conv4:',conv4
   print 'fc1:',fc1
   print 'fc2:',fc2
   print 'END ENCODER\n'

   tf.add_to_collection('vars', conv1)
   tf.add_to_collection('vars', conv2)
   tf.add_to_collection('vars', conv3)
   tf.add_to_collection('vars', conv4)
   tf.add_to_collection('vars', fc1)
   tf.add_to_collection('vars', fc2)

   return fc2
项目:ICGANs    作者:cameronfabbri    | 项目源码 | 文件源码
def encZ(x, ACTIVATION):

   conv1 = tcl.conv2d(x, 32, 5, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='conv1')
   conv1 = activate(conv1, ACTIVATION)

   conv2 = tcl.conv2d(conv1, 64, 5, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='conv2')
   conv2 = activate(conv2, ACTIVATION)

   conv3 = tcl.conv2d(conv2, 128, 5, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='conv3')
   conv3 = activate(conv3, ACTIVATION)

   conv4 = tcl.conv2d(conv3, 256, 5, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='conv4')
   conv4 = activate(conv4, ACTIVATION)

   conv4_flat = tcl.flatten(conv4)

   fc1 = tcl.fully_connected(conv4_flat, 4096, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='fc1')
   fc1 = activate(fc1, ACTIVATION)

   fc2 = tcl.fully_connected(fc1, 100, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='fc2')

   print 'input:',x
   print 'conv1:',conv1
   print 'conv2:',conv2
   print 'conv3:',conv3
   print 'conv4:',conv4
   print 'fc1:',fc1
   print 'fc2:',fc2
   print 'END ENCODER\n'

   tf.add_to_collection('vars', conv1)
   tf.add_to_collection('vars', conv2)
   tf.add_to_collection('vars', conv3)
   tf.add_to_collection('vars', conv4)
   tf.add_to_collection('vars', fc1)
   tf.add_to_collection('vars', fc2)

   return fc2