Python tensorflow.contrib.layers 模块,conv2d() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.contrib.layers.conv2d()

项目:TensorFlow-World    作者:astorfi    | 项目源码 | 文件源码
def autoencoder(inputs):
    # encoder
    # 32 x 32 x 1   ->  16 x 16 x 32
    # 16 x 16 x 32  ->  8 x 8 x 16
    # 8 x 8 x 16    ->  2 x 2 x 8
    net = lays.conv2d(inputs, 32, [5, 5], stride=2, padding='SAME')
    net = lays.conv2d(net, 16, [5, 5], stride=2, padding='SAME')
    net = lays.conv2d(net, 8, [5, 5], stride=4, padding='SAME')
    # decoder
    # 2 x 2 x 8    ->  8 x 8 x 16
    # 8 x 8 x 16   ->  16 x 16 x 32
    # 16 x 16 x 32  ->  32 x 32 x 1
    net = lays.conv2d_transpose(net, 16, [5, 5], stride=4, padding='SAME')
    net = lays.conv2d_transpose(net, 32, [5, 5], stride=2, padding='SAME')
    net = lays.conv2d_transpose(net, 1, [5, 5], stride=2, padding='SAME', activation_fn=tf.nn.tanh)
    return net

# read MNIST dataset
项目:predictron    作者:zhongwen    | 项目源码 | 文件源码
def predictron_arg_scope(weight_decay=0.0001,
                         batch_norm_decay=0.997,
                         batch_norm_epsilon=1e-5,
                         batch_norm_scale=True):
  batch_norm_params = {
    'decay': batch_norm_decay,
    'epsilon': batch_norm_epsilon,
    'scale': batch_norm_scale,
    'updates_collections': tf.GraphKeys.UPDATE_OPS,
  }

  # Set weight_decay for weights in Conv and FC layers.
  with arg_scope(
      [layers.conv2d, layers_lib.fully_connected],
      weights_regularizer=regularizers.l2_regularizer(weight_decay)):
    with arg_scope(
        [layers.conv2d],
        weights_initializer=initializers.variance_scaling_initializer(),
        activation_fn=None,
        normalizer_fn=layers_lib.batch_norm,
        normalizer_params=batch_norm_params) as sc:
      return sc
项目:DeepWorks    作者:daigo0927    | 项目源码 | 文件源码
def _shortcut(inputs, x): # x = f(inputs)
    # shortcut path
    _, inputs_h, inputs_w, inputs_ch = inputs.shape.as_list()
    _, x_h, x_w, x_ch = x.shape.as_list()
    stride_h = int(round(inputs_h / x_h))
    stride_w = int(round(inputs_w / x_w))
    equal_ch = inputs_ch == x_ch

    if stride_h>1 or stride_w>1 or not equal_ch:
        shortcut = tcl.conv2d(inputs,
                              num_outputs = x_ch,
                              kernel_size = (1, 1),
                              stride = (stride_h, stride_w),
                              padding = 'VALID')
    else:
        shortcut = inputs

    merged = tf.add(shortcut, x)
    return merged
项目:DeepWorks    作者:daigo0927    | 项目源码 | 文件源码
def __call__(self, inputs, reuse = True):
        with tf.variable_scope(self.name) as vs:
            tf.get_variable_scope()
            if reuse:
                vs.reuse_variables()

            x1, down1 = down_block(self.block_fn, 64)(inputs)
            x2, down2 = down_block(self.block_fn, 128)(down1)
            x3, down3 = down_block(self.block_fn, 256)(down2)

            down3 = self.block_fn(512)(down3)

            up3 = up_block(self.block_fn, 256)(x3, down3)
            up2 = up_block(self.block_fn, 128)(x2, up3)
            up1 = up_block(self.block_fn, 64)(x1, up2)

            outputs = tcl.conv2d(up1,
                                 num_outputs = self.output_ch,
                                 kernel_size = (1, 1),
                                 stride = (1, 1),
                                 padding = 'SAME')

            return outputs
项目:decorrelated-adversarial-autoencoder    作者:patrickgadd    | 项目源码 | 文件源码
def semi_supervised_encoder_convolutional(input_tensor, z_dim, y_dim, batch_size, network_scale=1.0, img_res=28, img_channels=1):
    f_multiplier = network_scale

    net = tf.reshape(input_tensor, [-1, img_res, img_res, img_channels])

    net = layers.conv2d(net, int(16*f_multiplier), 3, stride=2)
    net = layers.conv2d(net, int(16*f_multiplier), 3, stride=1)
    net = layers.conv2d(net, int(32*f_multiplier), 3, stride=2)
    net = layers.conv2d(net, int(32*f_multiplier), 3, stride=1)
    net = layers.conv2d(net, int(64*f_multiplier), 3, stride=2)
    net = layers.conv2d(net, int(64*f_multiplier), 3, stride=1)
    net = layers.conv2d(net, int(128*f_multiplier), 3, stride=2)

    net = tf.reshape(net, [batch_size, -1])
    net = layers.fully_connected(net, 1000)

    y = layers.fully_connected(net, y_dim, activation_fn=None, normalizer_fn=None)

    z = layers.fully_connected(net, z_dim, activation_fn=None)

    return y, z
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def q_net(x, n_xl, n_z, n_particles, is_training):
    with zs.BayesianNet() as variational:
        normalizer_params = {'is_training': is_training,
                             'updates_collections': None}
        lz_x = tf.reshape(tf.to_float(x), [-1, n_xl, n_xl, 1])
        lz_x = layers.conv2d(
            lz_x, 32, kernel_size=5, stride=2,
            normalizer_fn=layers.batch_norm,
            normalizer_params=normalizer_params)
        lz_x = layers.conv2d(
            lz_x, 64, kernel_size=5, stride=2,
            normalizer_fn=layers.batch_norm,
            normalizer_params=normalizer_params)
        lz_x = layers.conv2d(
            lz_x, 128, kernel_size=5, padding='VALID',
            normalizer_fn=layers.batch_norm,
            normalizer_params=normalizer_params)
        lz_x = layers.dropout(lz_x, keep_prob=0.9, is_training=is_training)
        lz_x = tf.reshape(lz_x, [-1, 128 * 3 * 3])
        lz_mean = layers.fully_connected(lz_x, n_z, activation_fn=None)
        lz_logstd = layers.fully_connected(lz_x, n_z, activation_fn=None)
        z = zs.Normal('z', lz_mean, logstd=lz_logstd, n_samples=n_particles,
                      group_ndims=1)
    return variational
项目:canshi    作者:hungsing92    | 项目源码 | 文件源码
def vgg_arg_scope(weight_decay=0.0005):
  """Defines the VGG arg scope.

  Args:
    weight_decay: The l2 regularization coefficient.

  Returns:
    An arg_scope.
  """
  with arg_scope(
      [layers.conv2d, layers_lib.fully_connected],
      activation_fn=nn_ops.relu,
      weights_regularizer=regularizers.l2_regularizer(weight_decay),
      biases_initializer=init_ops.zeros_initializer()):
    with arg_scope([layers.conv2d], padding='SAME') as arg_sc:
      return arg_sc
项目:chi    作者:rmst    | 项目源码 | 文件源码
def deep_q_network():
    """ Architecture according to:
    http://www.nature.com/nature/journal/v518/n7540/full/nature14236.html
    """
    @tt.model(tracker=tf.train.ExponentialMovingAverage(1 - .0005),    # TODO: replace with original weight freeze
                         optimizer=tf.train.RMSPropOptimizer(.00025, .95, .95, .01))
    def q_network(x):
        x /= 255
        x = layers.conv2d(x, 32, 8, 4)
        x = layers.conv2d(x, 64, 4, 2)
        x = layers.conv2d(x, 64, 3, 1)
        x = layers.flatten(x)
        x = layers.fully_connected(x, 512)
        x = layers.fully_connected(x, env.action_space.n, activation_fn=None)
        x = tf.identity(x, name='Q')
        return x

    return q_network
项目:chi    作者:rmst    | 项目源码 | 文件源码
def deep_q_network():
    """ Architecture according to:
    http://www.nature.com/nature/journal/v518/n7540/full/nature14236.html
    """
    @tt.model(tracker=tf.train.ExponentialMovingAverage(1 - .0005),    # TODO: replace with original weight freeze
                         optimizer=tf.train.RMSPropOptimizer(.00025, .95, .95, .01))
    def q_network(x):
        x /= 255
        x = layers.conv2d(x, 32, 8, 4)
        x = layers.conv2d(x, 64, 4, 2)
        x = layers.conv2d(x, 64, 3, 1)
        x = layers.flatten(x)
        x = layers.fully_connected(x, 512)
        x = layers.fully_connected(x, env.action_space.n, activation_fn=None)
        x = tf.identity(x, name='Q')
        return x

    return q_network
项目:reinforceflow    作者:dbobrenko    | 项目源码 | 文件源码
def make_dqn_body(input_layer, trainable=True):
    end_points = {}
    net = layers.conv2d(inputs=input_layer,
                        num_outputs=16,
                        kernel_size=[8, 8],
                        stride=[4, 4],
                        activation_fn=tf.nn.relu,
                        padding="same",
                        scope="conv1",
                        trainable=trainable)
    end_points['conv1'] = net
    net = layers.conv2d(inputs=net,
                        num_outputs=32,
                        kernel_size=[4, 4],
                        stride=[2, 2],
                        activation_fn=tf.nn.relu,
                        padding="same",
                        scope="conv2",
                        trainable=trainable)
    end_points['conv2'] = net
    out = layers.flatten(net)
    end_points['conv2_flatten'] = out
    return out, end_points
项目:information-dropout    作者:ucla-vision    | 项目源码 | 文件源码
def conv(self, inputs, num_outputs, activations, normalizer_fn = batch_norm, kernel_size=3, stride=1, scope=None):
        '''Creates a convolutional layer with default arguments'''
        if activations == 'relu':
            activation_fn = tf.nn.relu
        elif activations == 'softplus':
            activation_fn = tf.nn.softplus
        else:
            raise ValueError("Invalid activation function.")
        return conv2d( inputs = inputs,
            num_outputs = num_outputs,
            kernel_size = kernel_size,
            stride = stride,
            padding = 'SAME',
            activation_fn = activation_fn,
            normalizer_fn = batch_norm,
            scope=scope )
项目:information-dropout    作者:ucla-vision    | 项目源码 | 文件源码
def information_pool(self, inputs, max_alpha, alpha_mode, lognorm_prior, num_outputs=None, stride=2, scope=None):
        if num_outputs is None:
            num_ouputs = inputs.get_shape()[-1]
        # Creates the output convolutional layer
        network = self.conv(inputs, num_outputs=int(num_outputs), stride=stride)
        with tf.variable_scope(scope,'information_dropout'):
            # Computes the noise parameter alpha for the output
            alpha = conv2d(inputs, num_outputs=int(num_outputs), kernel_size=3,
                stride=stride, activation_fn=tf.sigmoid, scope='alpha')
            # Rescale alpha in the allowed range and add a small value for numerical stability
            alpha = 0.001 + max_alpha * alpha
            # Computes the KL divergence using either log-uniform or log-normal prior
            if not lognorm_prior:
                kl = - tf.log(alpha/(max_alpha + 0.001))
            else:
                mu1 = tf.get_variable('mu1', [], initializer=tf.constant_initializer(0.))
                sigma1 = tf.get_variable('sigma1', [], initializer=tf.constant_initializer(1.))
                kl = KL_div2(tf.log(tf.maximum(network,1e-4)), alpha, mu1, sigma1)
            tf.add_to_collection('kl_terms', kl)
        # Samples the noise with the given parameter
        e = sample_lognormal(mean=tf.zeros_like(network), sigma = alpha, sigma0 = self.sigma0)
        # Returns the noisy output of the dropout
        return network * e
项目:information-dropout    作者:ucla-vision    | 项目源码 | 文件源码
def conv(self, inputs, num_outputs, activations, normalizer_fn = batch_norm, kernel_size=3, stride=1, scope=None):
        '''Creates a convolutional layer with default arguments'''
        if activations == 'relu':
            activation_fn = tf.nn.relu
        elif activations == 'softplus':
            activation_fn = tf.nn.softplus
        else:
            raise ValueError("Invalid activation function.")
        return conv2d( inputs = inputs,
            num_outputs = num_outputs,
            kernel_size = kernel_size,
            stride = stride,
            padding = 'SAME',
            activation_fn = activation_fn,
            normalizer_fn = normalizer_fn,
            normalizer_params = {'is_training' : self.is_training, 'updates_collections': None, 'decay': 0.9},
            scope=scope )
项目:information-dropout    作者:ucla-vision    | 项目源码 | 文件源码
def information_pool(self, inputs, max_alpha, alpha_mode, lognorm_prior, num_outputs=None, stride=2, scope=None):
        if num_outputs is None:
            num_ouputs = inputs.get_shape()[-1]
        # Creates the output convolutional layer
        network = self.conv(inputs, num_outputs=int(num_outputs), stride=stride)
        with tf.variable_scope(scope,'information_dropout'):
            # Computes the noise parameter alpha for the output
            alpha = conv2d(inputs, num_outputs=int(num_outputs), kernel_size=3,
                stride=stride, activation_fn=tf.sigmoid, scope='alpha')
            # Rescale alpha in the allowed range and add a small value for numerical stability
            alpha = 0.001 + max_alpha * alpha
            # Computes the KL divergence using either log-uniform or log-normal prior
            if not lognorm_prior:
                kl = - tf.log(alpha/(max_alpha + 0.001))
            else:
                mu1 = tf.get_variable('mu1', [], initializer=tf.constant_initializer(0.))
                sigma1 = tf.get_variable('sigma1', [], initializer=tf.constant_initializer(1.))
                kl = KL_div2(tf.log(tf.maximum(network,1e-4)), alpha, mu1, sigma1)
            tf.add_to_collection('kl_terms', kl)
        # Samples the noise with the given parameter
        e = sample_lognormal(mean=tf.zeros_like(network), sigma = alpha, sigma0 = self.sigma0)
        # Saves the log-output of the network (useful to compute the total correlation)
        tf.add_to_collection('log_network', tf.log(network * e))
        # Returns the noisy output of the dropout
        return network * e
项目:Mendelssohn    作者:diggerdu    | 项目源码 | 文件源码
def conv2d(input_, o_dim, k_size, st, name='conv2d'):
    '''
    with tf.variable_scope(name):
        init = ly.xavier_initializer_conv2d()
        output = ly.conv2d(input_, num_outputs=o_dim, kernel_size=k_size, stride=st,\
                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME',\
                weights_initializer=init)
        return output
    '''
    with tf.variable_scope(name):
        init = ly.xavier_initializer_conv2d()
        fil = tf.get_variable('co_f', k_size+\
                [ten_sh(input_)[-1], o_dim],initializer=init)
        co = tf.nn.conv2d(input_, fil, strides=[1]+st+[1], \
                padding='SAME')
        bia = tf.get_variable('co_b', [o_dim])
        co = tf.nn.bias_add(co, bia)
        return co
项目:Mendelssohn    作者:diggerdu    | 项目源码 | 文件源码
def conv2d(input_, o_dim, k_size, st, name='conv2d'):
    with tf.variable_scope(name):
        init = ly.xavier_initializer_conv2d()
        output = ly.conv2d(input_, num_outputs=o_dim, kernel_size=k_size, stride=st,\
                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME',\
                weights_initializer=init)
        return output
    '''
    with tf.variable_scope(name):
        init = ly.xavier_initializer_conv2d()
        fil = tf.get_variable('co_f', k_size+\
                [ten_sh(input_)[-1], o_dim],initializer=init)
        co = tf.nn.conv2d(input_, fil, strides=[1]+st+[1], \
                padding='SAME')
        bia = tf.get_variable('co_b', [o_dim])
        co = tf.nn.bias_add(co, bia)
        return co
    '''
项目:neuralmonkey    作者:ufal    | 项目源码 | 文件源码
def image_processing_layers(self) -> List[tf.Tensor]:
        """Do all convolutions and return the last conditional map.

        Applies convolutions on the input tensor with optional max pooling.
        All the intermediate layers are stored in the `image_processing_layers`
        attribute.  There is not dropout between the convolutional layers, by
        default the activation function is ReLU.
        """
        last_layer = self.image_input
        image_processing_layers = []  # type: List[tf.Tensor]

        with tf.variable_scope("convolutions"):
            for i, (filter_size,
                    n_filters,
                    pool_size) in enumerate(self.convolutions):
                with tf.variable_scope("cnn_layer_{}".format(i)):
                    last_layer = conv2d(last_layer, n_filters, filter_size)
                    image_processing_layers.append(last_layer)

                    if pool_size:
                        last_layer = max_pool2d(last_layer, pool_size)
                        image_processing_layers.append(last_layer)

        return image_processing_layers
项目:neuralmonkey    作者:ufal    | 项目源码 | 文件源码
def image_processing_layers(self) -> List[tf.Tensor]:
        """Do all convolutions and return the last conditional map.

        Applies convolutions on the input tensor with optional max pooling.
        All the intermediate layers are stored in the `image_processing_layers`
        attribute.  There is not dropout between the convolutional layers, by
        default the activation function is ReLU.
        """
        last_layer = self.image_input
        image_processing_layers = []  # type: List[tf.Tensor]

        with tf.variable_scope("convolutions"):
            for i, (filter_size,
                    n_filters,
                    pool_size) in enumerate(self.convolutions):
                with tf.variable_scope("cnn_layer_{}".format(i)):
                    last_layer = conv2d(last_layer, n_filters, filter_size)
                    image_processing_layers.append(last_layer)

                    if pool_size:
                        last_layer = max_pool2d(last_layer, pool_size)
                        image_processing_layers.append(last_layer)

        return image_processing_layers
项目:neuralmonkey    作者:ufal    | 项目源码 | 文件源码
def image_processing_layers(self) -> List[tf.Tensor]:
        """Do all convolutions and return the last conditional map.

        Applies convolutions on the input tensor with optional max pooling.
        All the intermediate layers are stored in the `image_processing_layers`
        attribute.  There is not dropout between the convolutional layers, by
        default the activation function is ReLU.
        """
        last_layer = self.image_input
        image_processing_layers = []  # type: List[tf.Tensor]

        with tf.variable_scope("convolutions"):
            for i, (filter_size,
                    n_filters,
                    pool_size) in enumerate(self.convolutions):
                with tf.variable_scope("cnn_layer_{}".format(i)):
                    last_layer = conv2d(last_layer, n_filters, filter_size)
                    image_processing_layers.append(last_layer)

                    if pool_size:
                        last_layer = max_pool2d(last_layer, pool_size)
                        image_processing_layers.append(last_layer)

        return image_processing_layers
项目:GAN_Theories    作者:YadiraF    | 项目源码 | 文件源码
def __call__(self, x, reuse=False):
        with tf.variable_scope(self.name) as scope:
            if reuse:
                scope.reuse_variables()
            size = 64
            d = tcl.conv2d(x, num_outputs=size, kernel_size=3, # bzx64x64x3 -> bzx32x32x64
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.conv2d(d, num_outputs=size * 2, kernel_size=3, # 16x16x128
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.conv2d(d, num_outputs=size * 4, kernel_size=3, # 8x8x256
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.conv2d(d, num_outputs=size * 8, kernel_size=3, # 4x4x512
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))

            d = tcl.fully_connected(tcl.flatten(d), 256, activation_fn=lrelu, weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.fully_connected(d, 1, activation_fn=None, weights_initializer=tf.random_normal_initializer(0, 0.02))

            return d
项目:GAN_Theories    作者:YadiraF    | 项目源码 | 文件源码
def __call__(self, x, reuse=False):
        with tf.variable_scope(self.name) as scope:
            if reuse:
                scope.reuse_variables()
            size = 64
            d = tcl.conv2d(x, num_outputs=size, kernel_size=3, # bzx64x64x3 -> bzx32x32x64
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.conv2d(d, num_outputs=size * 2, kernel_size=3, # 16x16x128
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.conv2d(d, num_outputs=size * 4, kernel_size=3, # 8x8x256
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.conv2d(d, num_outputs=size * 8, kernel_size=3, # 4x4x512
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))

            d = tcl.fully_connected(tcl.flatten(d), 256, activation_fn=lrelu, weights_initializer=tf.random_normal_initializer(0, 0.02))
            mu = tcl.fully_connected(d, 100, activation_fn=None, weights_initializer=tf.random_normal_initializer(0, 0.02))
            sigma = tcl.fully_connected(d, 100, activation_fn=None, weights_initializer=tf.random_normal_initializer(0, 0.02))

            return mu, sigma
项目:googlenet    作者:da-steve101    | 项目源码 | 文件源码
def get_inception_layer( inputs, conv11_size, conv33_11_size, conv33_size,
                         conv55_11_size, conv55_size, pool11_size ):
    with tf.variable_scope("conv_1x1"):
        conv11 = layers.conv2d( inputs, conv11_size, [ 1, 1 ] )
    with tf.variable_scope("conv_3x3"):
        conv33_11 = layers.conv2d( inputs, conv33_11_size, [ 1, 1 ] )
        conv33 = layers.conv2d( conv33_11, conv33_size, [ 3, 3 ] )
    with tf.variable_scope("conv_5x5"):
        conv55_11 = layers.conv2d( inputs, conv55_11_size, [ 1, 1 ] )
        conv55 = layers.conv2d( conv55_11, conv55_size, [ 5, 5 ] )
    with tf.variable_scope("pool_proj"):
        pool_proj = layers.max_pool2d( inputs, [ 3, 3 ], stride = 1 )
        pool11 = layers.conv2d( pool_proj, pool11_size, [ 1, 1 ] )
    if tf.__version__ == '0.11.0rc0':
        return tf.concat(3, [conv11, conv33, conv55, pool11])
    return tf.concat([conv11, conv33, conv55, pool11], 3)
项目:googlenet    作者:da-steve101    | 项目源码 | 文件源码
def aux_logit_layer( inputs, num_classes, is_training ):
    with tf.variable_scope("pool2d"):
        pooled = layers.avg_pool2d(inputs, [ 5, 5 ], stride = 3 )
    with tf.variable_scope("conv11"):
        conv11 = layers.conv2d( pooled, 128, [1, 1] )
    with tf.variable_scope("flatten"):
        flat = tf.reshape( conv11, [-1, 2048] )
    with tf.variable_scope("fc"):
        fc = layers.fully_connected( flat, 1024, activation_fn=None )
    with tf.variable_scope("drop"):
        drop = layers.dropout( fc, 0.3, is_training = is_training )
    with tf.variable_scope( "linear" ):
        linear = layers.fully_connected( drop, num_classes, activation_fn=None )
    with tf.variable_scope("soft"):
        soft = tf.nn.softmax( linear )
    return soft
项目:GAN-general    作者:weilinie    | 项目源码 | 文件源码
def generatorResNet(z, hidden_num, output_dim, kern_size, out_channels):
    with tf.variable_scope("G") as vs:
        fc = tcl.fully_connected(z, hidden_num*output_dim, activation_fn=None)
        fc = tf.reshape(fc, [-1, output_dim, hidden_num]) # data_format: 'NWC'

        res1 = resBlock(fc, hidden_num, kern_size)
        res2 = resBlock(res1, hidden_num, kern_size)
        res3 = resBlock(res2, hidden_num, kern_size)
        res4 = resBlock(res3, hidden_num, kern_size)
        res5 = resBlock(res4, hidden_num, kern_size)

        logits = tcl.conv2d(res5, out_channels, kernel_size=1)
        fake_data_softmax = tf.reshape(
            tf.nn.softmax(tf.reshape(logits, [-1, out_channels])),
            tf.shape(logits)
        )

    g_vars = tf.contrib.framework.get_variables(vs)
    return fake_data_softmax, g_vars
项目:GAN-general    作者:weilinie    | 项目源码 | 文件源码
def discriminatorResNet(x, hidden_num, output_dim, kern_size, in_channels, reuse):
    with tf.variable_scope("D") as vs:
        if reuse:
            vs.reuse_variables()
        conv = tcl.conv2d(x, hidden_num, kernel_size=1)

        res1 = resBlock(conv, hidden_num, kern_size)
        res2 = resBlock(res1, hidden_num, kern_size)
        res3 = resBlock(res2, hidden_num, kern_size)
        res4 = resBlock(res3, hidden_num, kern_size)
        res5 = resBlock(res4, hidden_num, kern_size)

        res5 = tf.reshape(res5, [-1, output_dim*hidden_num])  # data_format: 'NWC'
        disc_out = tcl.fully_connected(res5, 1, activation_fn=None)

    d_vars = tf.contrib.framework.get_variables(vs)
    return disc_out, d_vars
项目:tensorflow-litterbox    作者:rwightman    | 项目源码 | 文件源码
def resnet_arg_scope(
        weight_decay=0.0001,
        batch_norm_decay=0.997,
        batch_norm_epsilon=1e-5,
        batch_norm_scale=True,
):
    batch_norm_params = {
        'decay': batch_norm_decay,
        'epsilon': batch_norm_epsilon,
        'scale': batch_norm_scale,
    }
    l2_regularizer = layers.l2_regularizer(weight_decay)

    arg_scope_layers = arg_scope(
        [layers.conv2d, my_layers.preact_conv2d, layers.fully_connected],
        weights_initializer=layers.variance_scaling_initializer(),
        weights_regularizer=l2_regularizer,
        activation_fn=tf.nn.relu)
    arg_scope_conv = arg_scope(
        [layers.conv2d, my_layers.preact_conv2d],
        normalizer_fn=layers.batch_norm,
        normalizer_params=batch_norm_params)
    with arg_scope_layers, arg_scope_conv as arg_sc:
        return arg_sc
项目:tensorflow-litterbox    作者:rwightman    | 项目源码 | 文件源码
def _block_a(net, scope='BlockA'):
    # 35 x 35 x 384 grid
    # default padding = SAME
    # default stride = 1
    with tf.variable_scope(scope):
        with tf.variable_scope('Br1_Pool'):
            br1 = layers.avg_pool2d(net, [3, 3], scope='Pool1_3x3')
            br1 = layers.conv2d(br1, 96, [1, 1], scope='Conv1_1x1')
        with tf.variable_scope('Br2_1x1'):
            br2 = layers.conv2d(net, 96, [1, 1], scope='Conv1_1x1')
        with tf.variable_scope('Br3_3x3'):
            br3 = layers.conv2d(net, 64, [1, 1], scope='Conv1_1x1')
            br3 = layers.conv2d(br3, 96, [3, 3], scope='Conv2_3x3')
        with tf.variable_scope('Br4_3x3Dbl'):
            br4 = layers.conv2d(net, 64, [1, 1], scope='Conv1_1x1')
            br4 = layers.conv2d(br4, 96, [3, 3], scope='Conv2_3x3')
            br4 = layers.conv2d(br4, 96, [3, 3], scope='Conv3_3x3')
        net = tf.concat(3, [br1, br2, br3, br4], name='Concat1')
        # 35 x 35 x 384
    return net
项目:tensorflow-litterbox    作者:rwightman    | 项目源码 | 文件源码
def _block_b(net, scope='BlockB'):
    # 17 x 17 x 1024 grid
    # default padding = SAME
    # default stride = 1
    with tf.variable_scope(scope):
        with tf.variable_scope('Br1_Pool'):
            br1 = layers.avg_pool2d(net, [3, 3], scope='Pool1_3x3')
            br1 = layers.conv2d(br1, 128, [1, 1], scope='Conv1_1x1')
        with tf.variable_scope('Br2_1x1'):
            br2 = layers.conv2d(net, 384, [1, 1], scope='Conv1_1x1')
        with tf.variable_scope('Br3_7x7'):
            br3 = layers.conv2d(net, 192, [1, 1], scope='Conv1_1x1')
            br3 = layers.conv2d(br3, 224, [1, 7], scope='Conv2_1x7')
            br3 = layers.conv2d(br3, 256, [7, 1], scope='Conv3_7x1')
        with tf.variable_scope('Br4_7x7Dbl'):
            br4 = layers.conv2d(net, 192, [1, 1], scope='Conv1_1x1')
            br4 = layers.conv2d(br4, 192, [1, 7], scope='Conv2_1x7')
            br4 = layers.conv2d(br4, 224, [7, 1], scope='Conv3_7x1')
            br4 = layers.conv2d(br4, 224, [1, 7], scope='Conv4_1x7')
            br4 = layers.conv2d(br4, 256, [7, 1], scope='Conv5_7x1')
        net = tf.concat(3, [br1, br2, br3, br4], name='Concat1')
        # 17 x 17 x 1024
    return net
项目:tensorflow-litterbox    作者:rwightman    | 项目源码 | 文件源码
def _block_b_reduce(net, endpoints, scope='BlockReduceB'):
    # 17 x 17 -> 8 x 8 reduce
    with arg_scope([layers.conv2d, layers.max_pool2d, layers.avg_pool2d], padding='VALID'):
        with tf.variable_scope(scope):
            with tf.variable_scope('Br1_Pool'):
                br1 = layers.max_pool2d(net, [3, 3], stride=2, scope='Pool1_3x3/2')
            with tf.variable_scope('Br2_3x3'):
                br2 = layers.conv2d(net, 192, [1, 1], padding='SAME', scope='Conv1_1x1')
                br2 = layers.conv2d(br2, 192, [3, 3], stride=2, scope='Conv2_3x3/2')
            with tf.variable_scope('Br3_7x7x3'):
                br3 = layers.conv2d(net, 256, [1, 1], padding='SAME', scope='Conv1_1x1')
                br3 = layers.conv2d(br3, 256, [1, 7], padding='SAME', scope='Conv2_1x7')
                br3 = layers.conv2d(br3, 320, [7, 1], padding='SAME', scope='Conv3_7x1')
                br3 = layers.conv2d(br3, 320, [3, 3], stride=2, scope='Conv4_3x3/2')
            net = tf.concat(3, [br1, br2, br3], name='Concat1')
            endpoints[scope] = net
            print('%s output shape: %s' % (scope, net.get_shape()))
    return net
项目:tensorflow-litterbox    作者:rwightman    | 项目源码 | 文件源码
def _block_c(net, scope='BlockC'):
    # 8 x 8 x 1536 grid
    # default padding = SAME
    # default stride = 1
    with tf.variable_scope(scope):
        with tf.variable_scope('Br1_Pool'):
            br1 = layers.avg_pool2d(net, [3, 3], scope='Pool1_3x3')
            br1 = layers.conv2d(br1, 256, [1, 1], scope='Conv1_1x1')
        with tf.variable_scope('Br2_1x1'):
            br2 = layers.conv2d(net, 256, [1, 1], scope='Conv1_1x1')
        with tf.variable_scope('Br3_3x3'):
            br3 = layers.conv2d(net, 384, [1, 1], scope='Conv1_1x1')
            br3a = layers.conv2d(br3, 256, [1, 3], scope='Conv2_1x3')
            br3b = layers.conv2d(br3, 256, [3, 1], scope='Conv3_3x1')
        with tf.variable_scope('Br4_7x7Dbl'):
            br4 = layers.conv2d(net, 384, [1, 1], scope='Conv1_1x1')
            br4 = layers.conv2d(br4, 448, [1, 7], scope='Conv2_1x7')
            br4 = layers.conv2d(br4, 512, [7, 1], scope='Conv3_7x1')
            br4a = layers.conv2d(br4, 256, [1, 7], scope='Conv4a_1x7')
            br4b = layers.conv2d(br4, 256, [7, 1], scope='Conv4b_7x1')
        net = tf.concat(3, [br1, br2, br3a, br3b, br4a, br4b], name='Concat1')
        # 8 x 8 x 1536
    return net
项目:Machine-Learning    作者:hadikazemi    | 项目源码 | 文件源码
def autoencoder(inputs):
    # encoder
    # 32 x 32 x 1   ->  16 x 16 x 32
    # 16 x 16 x 32  ->  8 x 8 x 16
    # 8 x 8 x 16    ->  2 x 2 x 8
    net = lays.conv2d(inputs, 32, [5, 5], stride=2, padding='SAME')
    net = lays.conv2d(net, 16, [5, 5], stride=2, padding='SAME')
    net = lays.conv2d(net, 8, [5, 5], stride=4, padding='SAME')
    # decoder
    # 2 x 2 x 8    ->  8 x 8 x 16
    # 8 x 8 x 16   ->  16 x 16 x 32
    # 16 x 16 x 32  ->  32 x 32 x 1
    net = lays.conv2d_transpose(net, 16, [5, 5], stride=4, padding='SAME')
    net = lays.conv2d_transpose(net, 32, [5, 5], stride=2, padding='SAME')
    net = lays.conv2d_transpose(net, 1, [5, 5], stride=2, padding='SAME', activation_fn=tf.nn.tanh)
    return net

# read MNIST dataset
项目:DeepWorks    作者:daigo0927    | 项目源码 | 文件源码
def __call__(self, inputs, reuse = True):
        with tf.variable_scope(self.name) as vs:
            # tf.get_variable_scope()
            if reuse:
                vs.reuse_variables()

            x = tcl.conv2d(inputs,
                           num_outputs = 64,
                           kernel_size = (4, 4),
                           stride = (1, 1),
                           padding = 'SAME')
            x = tcl.batch_norm(x)
            x = tf.nn.relu(x)
            x = tcl.max_pool2d(x, (2, 2), (2, 2), 'SAME')
            x = tcl.conv2d(x,
                           num_outputs = 128,
                           kernel_size = (4, 4),
                           stride = (1, 1),
                           padding = 'SAME')
            x = tcl.batch_norm(x)
            x = tf.nn.relu(x)
            x = tcl.max_pool2d(x, (2, 2), (2, 2), 'SAME')
            x = tcl.flatten(x)
            logits = tcl.fully_connected(x, num_outputs = self.num_output)

            return logits
项目:DeepWorks    作者:daigo0927    | 项目源码 | 文件源码
def _bn_relu_conv(filters, kernel_size = (3, 3), stride = (1, 1)):
    def f(inputs):
        x = tcl.batch_norm(inputs)
        x = tf.nn.relu(x)
        x = tcl.conv2d(x,
                       num_outputs = filters,
                       kernel_size = kernel_size,
                       stride = stride,
                       padding = 'SAME')
        return x
    return f
项目:DeepWorks    作者:daigo0927    | 项目源码 | 文件源码
def __call__(self, inputs, reuse = True):
        with tf.variable_scope(self.name) as vs:
            tf.get_variable_scope()
            if reuse:
                vs.reuse_variables()

            conv1 = tcl.conv2d(inputs,
                               num_outputs = 64,
                               kernel_size = (7, 7),
                               stride = (2, 2),
                               padding = 'SAME')
            conv1 = tcl.batch_norm(conv1)
            conv1 = tf.nn.relu(conv1)
            conv1 = tcl.max_pool2d(conv1,
                                   kernel_size = (3, 3),
                                   stride = (2, 2),
                                   padding = 'SAME')

            x = conv1
            filters = 64
            first_layer = True
            for i, r in enumerate(self.repetitions):
                x = _residual_block(self.block_fn,
                                    filters = filters,
                                    repetition = r,
                                    is_first_layer = first_layer)(x)
                filters *= 2
                if first_layer:
                    first_layer = False

            _, h, w, ch = x.shape.as_list()
            outputs = tcl.avg_pool2d(x,
                                     kernel_size = (h, w),
                                     stride = (1, 1))
            outputs = tcl.flatten(outputs)
            logits = tcl.fully_connected(outputs, num_outputs = self.num_output,
                                         activation_fn = None)
            return logits
项目:DeepWorks    作者:daigo0927    | 项目源码 | 文件源码
def _conv_relu(filters, kernel_size = (3, 3), stride = (1, 1)):
    def f(inputs):
        x = tcl.conv2d(inputs,
                       num_outputs = filters,
                       kernel_size = kernel_size,
                       stride = stride,
                       padding = 'SAME')
        x = tf.nn.relu(x)
        return x
    return f
项目:DeepWorks    作者:daigo0927    | 项目源码 | 文件源码
def bn_relu_conv(inputs, num_outputs, kernel_size, stride = (1, 1), padding = 'SAME'):
    x = tcl.batch_norm(inputs)
    x = tf.nn.relu(x)
    x = tcl.conv2d(x,
                   num_outputs = num_outputs,
                   kernel_size = kernel_size,
                   stride = stride,
                   padding = padding)
    return x
项目:DeepWorks    作者:daigo0927    | 项目源码 | 文件源码
def PhaseShift_withConv(x, r, filters, kernel_size = (3, 3), stride = (1, 1)):
    # output shape(batch, r*x_h, r*x_w, filters)

    x = tcl.conv2d(x,
                   num_outputs = filters*r**2,
                   kernel_size = kernel_size,
                   stride = stride,
                   padding = 'SAME')
    x = PhaseShift(x, r)
    return x
项目:DocumentSegmentation    作者:SeguinBe    | 项目源码 | 文件源码
def vgg_16_fn(input_tensor: tf.Tensor, scope='vgg_16', blocks=5, weight_decay=0.0005) \
        -> (tf.Tensor, list):  # list of tf.Tensors (layers)
    intermediate_levels = []
    # intermediate_levels.append(input_tensor)
    with slim.arg_scope(nets.vgg.vgg_arg_scope(weight_decay=weight_decay)):
        with tf.variable_scope(scope, 'vgg_16', [input_tensor]) as sc:
            input_tensor = mean_substraction(input_tensor)
            end_points_collection = sc.original_name_scope + '_end_points'
            # Collect outputs for conv2d, fully_connected and max_pool2d.
            with slim.arg_scope(
                    [layers.conv2d, layers.fully_connected, layers.max_pool2d],
                    outputs_collections=end_points_collection):
                net = layers.repeat(
                    input_tensor, 2, layers.conv2d, 64, [3, 3], scope='conv1')
                intermediate_levels.append(net)
                net = layers.max_pool2d(net, [2, 2], scope='pool1')
                if blocks >= 2:
                    net = layers.repeat(net, 2, layers.conv2d, 128, [3, 3], scope='conv2')
                    intermediate_levels.append(net)
                    net = layers.max_pool2d(net, [2, 2], scope='pool2')
                if blocks >= 3:
                    net = layers.repeat(net, 3, layers.conv2d, 256, [3, 3], scope='conv3')
                    intermediate_levels.append(net)
                    net = layers.max_pool2d(net, [2, 2], scope='pool3')
                if blocks >= 4:
                    net = layers.repeat(net, 3, layers.conv2d, 512, [3, 3], scope='conv4')
                    intermediate_levels.append(net)
                    net = layers.max_pool2d(net, [2, 2], scope='pool4')
                if blocks >= 5:
                    net = layers.repeat(net, 3, layers.conv2d, 512, [3, 3], scope='conv5')
                    intermediate_levels.append(net)
                    net = layers.max_pool2d(net, [2, 2], scope='pool5')

                return net, intermediate_levels
项目:chi    作者:rmst    | 项目源码 | 文件源码
def delling_network():
    """ Architecture according to Duelling DQN:
    https://arxiv.org/abs/1511.06581
    """

    @tt.model(tracker=tf.train.ExponentialMovingAverage(1 - .0005),    # TODO: replace with original weight freeze
                         optimizer=tf.train.RMSPropOptimizer(6.25e-5, .95, .95, .01))
    def q_network(x):
        x /= 255
        x = layers.conv2d(x, 32, 8, 4)
        x = layers.conv2d(x, 64, 4, 2)
        x = layers.conv2d(x, 64, 3, 1)
        x = layers.flatten(x)

        xv = layers.fully_connected(x, 512)
        val = layers.fully_connected(xv, 1, activation_fn=None)
        # val = tf.squeeze(val, 1)

        xa = layers.fully_connected(x, 512)
        adv = layers.fully_connected(xa, env.action_space.n, activation_fn=None)

        q = val + adv - tf.reduce_mean(adv, axis=1, keep_dims=True)
        q = tf.identity(q, name='Q')
        return q


# Tests
项目:chi    作者:rmst    | 项目源码 | 文件源码
def delling_network():
    """ Architecture according to Duelling DQN:
    https://arxiv.org/abs/1511.06581
    """

    @tt.model(tracker=tf.train.ExponentialMovingAverage(1 - .0005),    # TODO: replace with original weight freeze
                         optimizer=tf.train.RMSPropOptimizer(6.25e-5, .95, .95, .01))
    def q_network(x):
        x /= 255
        x = layers.conv2d(x, 32, 8, 4)
        x = layers.conv2d(x, 64, 4, 2)
        x = layers.conv2d(x, 64, 3, 1)
        x = layers.flatten(x)

        xv = layers.fully_connected(x, 512)
        val = layers.fully_connected(xv, 1, activation_fn=None)
        # val = tf.squeeze(val, 1)

        xa = layers.fully_connected(x, 512)
        adv = layers.fully_connected(xa, env.action_space.n, activation_fn=None)

        q = val + adv - tf.reduce_mean(adv, axis=1, keep_dims=True)
        q = tf.identity(q, name='Q')
        return q


# Tests
项目:tensorflow_face    作者:ZhihengCV    | 项目源码 | 文件源码
def alexnet_v2_arg_scope(weight_decay=0.0005):
    with arg_scope(
            [layers.conv2d, layers_lib.fully_connected],
            activation_fn=nn_ops.relu,
            biases_initializer=init_ops.constant_initializer(0.1),
            weights_regularizer=regularizers.l2_regularizer(weight_decay)):
        with arg_scope([layers.conv2d], padding='SAME'):
            with arg_scope([layers_lib.max_pool2d], padding='VALID') as arg_sc:
                return arg_sc
项目:reinforceflow    作者:dbobrenko    | 项目源码 | 文件源码
def make_dqn_body_nature(input_layer, trainable=True):
    end_points = {}
    net = layers.conv2d(inputs=input_layer,
                        num_outputs=32,
                        kernel_size=[8, 8],
                        stride=[4, 4],
                        activation_fn=tf.nn.relu,
                        padding="same",
                        scope="conv1",
                        trainable=trainable)
    end_points['conv1'] = net
    net = layers.conv2d(inputs=net,
                        num_outputs=64,
                        kernel_size=[4, 4],
                        stride=[2, 2],
                        activation_fn=tf.nn.relu,
                        padding="same",
                        scope="conv2",
                        trainable=trainable)
    end_points['conv2'] = net
    net = layers.conv2d(inputs=net,
                        num_outputs=64,
                        kernel_size=[3, 3],
                        stride=[1, 1],
                        activation_fn=tf.nn.relu,
                        padding="same",
                        scope="conv3",
                        trainable=trainable)
    end_points['conv3'] = net
    out = layers.flatten(net)
    end_points['conv3_flatten'] = out
    return out, end_points
项目:ICGANs    作者:cameronfabbri    | 项目源码 | 文件源码
def encZ(x, ACTIVATION):

   conv1 = tcl.conv2d(x, 32, 5, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='conv1')
   conv1 = activate(conv1, ACTIVATION)

   conv2 = tcl.conv2d(conv1, 64, 5, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='conv2')
   conv2 = activate(conv2, ACTIVATION)

   conv3 = tcl.conv2d(conv2, 128, 5, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='conv3')
   conv3 = activate(conv3, ACTIVATION)

   conv4 = tcl.conv2d(conv3, 256, 5, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='conv4')
   conv4 = activate(conv4, ACTIVATION)

   conv4_flat = tcl.flatten(conv4)

   fc1 = tcl.fully_connected(conv4_flat, 4096, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='fc1')
   fc1 = activate(fc1, ACTIVATION)
   #fc1 = tcl.dropout(fc1, 0.5)

   fc2 = tcl.fully_connected(fc1, 100, activation_fn=tf.identity, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='fc2')

   print 'input:',x
   print 'conv1:',conv1
   print 'conv2:',conv2
   print 'conv3:',conv3
   print 'conv4:',conv4
   print 'fc1:',fc1
   print 'fc2:',fc2
   print 'END ENCODER\n'

   tf.add_to_collection('vars', conv1)
   tf.add_to_collection('vars', conv2)
   tf.add_to_collection('vars', conv3)
   tf.add_to_collection('vars', conv4)
   tf.add_to_collection('vars', fc1)
   tf.add_to_collection('vars', fc2)

   return fc2
项目:ICGANs    作者:cameronfabbri    | 项目源码 | 文件源码
def encoder(x,y):

   y_dim = int(y.get_shape().as_list()[-1])

   # reshape so it's batchx1x1xy_size
   y = tf.reshape(y, shape=[BATCH_SIZE, 1, 1, y_dim])
   input_ = conv_cond_concat(x, y)

   conv1 = tcl.conv2d(input_, 64, 4, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='g_enc_conv1')
   conv1 = lrelu(conv1)
   conv1 = conv_cond_concat(conv1, y)

   conv2 = tcl.conv2d(conv1, 128, 4, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='g_enc_conv2')
   conv2 = lrelu(conv2)
   conv2 = conv_cond_concat(conv2, y)

   conv3 = tcl.conv2d(conv2, 256, 4, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='g_enc_conv3')
   conv3 = lrelu(conv3)
   conv3 = conv_cond_concat(conv3, y)

   conv4 = tcl.conv2d(conv3, 512, 4, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='g_enc_conv4')
   conv4 = lrelu(conv4)
   conv4 = conv_cond_concat(conv4, y)

   conv5 = tcl.conv2d(conv4, 512, 4, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='g_enc_conv5')
   conv5 = lrelu(conv5)
   conv5 = conv_cond_concat(conv5, y)

   conv6 = tcl.conv2d(conv5, 512, 4, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='g_enc_conv6')
   conv6 = lrelu(conv6)

   print 'conv1:',conv1
   print 'conv2:',conv2
   print 'conv3:',conv3
   print 'conv4:',conv4
   print 'conv5:',conv5
   print 'conv6:',conv6
   out = [conv1, conv2, conv3, conv4, conv5, conv6]
   return out,y
项目:ICGANs    作者:cameronfabbri    | 项目源码 | 文件源码
def netD(input_images, y, BATCH_SIZE, reuse=False):

   print 'DISCRIMINATOR reuse = '+str(reuse)
   sc = tf.get_variable_scope()
   with tf.variable_scope(sc, reuse=reuse):

      y_dim = int(y.get_shape().as_list()[-1])

      # reshape so it's batchx1x1xy_size
      y = tf.reshape(y, shape=[BATCH_SIZE, 1, 1, y_dim])
      input_ = conv_cond_concat(input_images, y)

      conv1 = tcl.conv2d(input_, 64, 5, 2, activation_fn=tf.identity, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='d_conv1')
      conv1 = lrelu(conv1)

      conv2 = tcl.conv2d(conv1, 128, 5, 2, activation_fn=tf.identity, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='d_conv2')
      conv2 = lrelu(conv2)

      conv3 = tcl.conv2d(conv2, 256, 5, 2, activation_fn=tf.identity, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='d_conv3')
      conv3 = lrelu(conv3)

      conv4 = tcl.conv2d(conv3, 512, 5, 2, activation_fn=tf.identity, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='d_conv4')
      conv4 = lrelu(conv4)

      conv5 = tcl.conv2d(conv4, 1, 4, 1, activation_fn=tf.identity, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='d_conv5')

      print 'input images:',input_images
      print 'conv1:',conv1
      print 'conv2:',conv2
      print 'conv3:',conv3
      print 'conv4:',conv4
      print 'conv5:',conv5
      print 'END D\n'

      tf.add_to_collection('vars', conv1)
      tf.add_to_collection('vars', conv2)
      tf.add_to_collection('vars', conv3)
      tf.add_to_collection('vars', conv4)
      tf.add_to_collection('vars', conv5)

      return conv5
项目:ICGANs    作者:cameronfabbri    | 项目源码 | 文件源码
def encZ(x, ACTIVATION):

   conv1 = tcl.conv2d(x, 32, 5, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='conv1')
   conv1 = activate(conv1, ACTIVATION)

   conv2 = tcl.conv2d(conv1, 64, 5, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='conv2')
   conv2 = activate(conv2, ACTIVATION)

   conv3 = tcl.conv2d(conv2, 128, 5, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='conv3')
   conv3 = activate(conv3, ACTIVATION)

   conv4 = tcl.conv2d(conv3, 256, 5, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='conv4')
   conv4 = activate(conv4, ACTIVATION)

   conv4_flat = tcl.flatten(conv4)

   fc1 = tcl.fully_connected(conv4_flat, 4096, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='fc1')
   fc1 = activate(fc1, ACTIVATION)

   fc2 = tcl.fully_connected(fc1, 100, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='fc2')

   print 'input:',x
   print 'conv1:',conv1
   print 'conv2:',conv2
   print 'conv3:',conv3
   print 'conv4:',conv4
   print 'fc1:',fc1
   print 'fc2:',fc2
   print 'END ENCODER\n'

   tf.add_to_collection('vars', conv1)
   tf.add_to_collection('vars', conv2)
   tf.add_to_collection('vars', conv3)
   tf.add_to_collection('vars', conv4)
   tf.add_to_collection('vars', fc1)
   tf.add_to_collection('vars', fc2)

   return fc2
项目:ICGANs    作者:cameronfabbri    | 项目源码 | 文件源码
def encY(x, ACTIVATION):

   conv1 = tcl.conv2d(x, 64, 5, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='conv1')
   conv1 = activate(conv1, ACTIVATION)

   conv2 = tcl.conv2d(conv1, 128, 5, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='conv2')
   conv2 = activate(conv2, ACTIVATION)

   conv3 = tcl.conv2d(conv2, 256, 5, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='conv3')
   conv3 = activate(conv3, ACTIVATION)

   conv4 = tcl.conv2d(conv3, 512, 5, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='conv4')
   conv4 = activate(conv4, ACTIVATION)

   conv4_flat = tcl.flatten(conv4)

   fc1 = tcl.fully_connected(conv4_flat, 512, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='fc1')
   fc1 = activate(fc1, ACTIVATION)

   fc2 = tcl.fully_connected(fc1, 10, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='fc2')

   print 'input:',x
   print 'conv1:',conv1
   print 'conv2:',conv2
   print 'conv3:',conv3
   print 'conv4:',conv4
   print 'fc1:',fc1
   print 'fc2:',fc2
   print 'END ENCODER\n'

   tf.add_to_collection('vars', conv1)
   tf.add_to_collection('vars', conv2)
   tf.add_to_collection('vars', conv3)
   tf.add_to_collection('vars', conv4)
   tf.add_to_collection('vars', fc1)
   tf.add_to_collection('vars', fc2)

   return fc2
项目:Mendelssohn    作者:diggerdu    | 项目源码 | 文件源码
def reconv2d(input_, o_size, k_size, name='reconv2d'):
    print name, 'input', ten_sh(input_)
    print name, 'output', o_size
    input_ = tf.image.resize_nearest_neighbor(input_, o_size[:3])
    with tf.variable_scope(name):
        init = ly.xavier_initializer_conv2d()
        output = ly.conv2d(input_, num_outputs=o_size[-1], kernel_size=k_size, stride=1,\
                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME',\
                weights_initializer=init)
        return output
项目:GAN_Theories    作者:YadiraF    | 项目源码 | 文件源码
def __call__(self, x, reuse=False):
        with tf.variable_scope(self.name) as scope:
            if reuse:
                scope.reuse_variables()
            # --- conv
            size = 64
            d = tcl.conv2d(x, num_outputs=size, kernel_size=3, # bzx64x64x3 -> bzx32x32x64
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.conv2d(d, num_outputs=size * 2, kernel_size=3, # 16x16x128
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.conv2d(d, num_outputs=size * 4, kernel_size=3, # 8x8x256
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.conv2d(d, num_outputs=size * 8, kernel_size=3, # 4x4x512
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))

            h = tcl.fully_connected(tcl.flatten(d), self.n_hidden, activation_fn=lrelu, weights_initializer=tf.random_normal_initializer(0, 0.02))

            # -- deconv
            d = tcl.fully_connected(h, 4 * 4 * 512, activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm)
            d = tf.reshape(d, (-1, 4, 4, 512))  # size
            d = tcl.conv2d_transpose(d, 256, 3, stride=2, # size*2
                                    activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.conv2d_transpose(d, 128, 3, stride=2, # size*4
                                    activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.conv2d_transpose(d, 64, 3, stride=2, # size*8
                                    activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))

            d = tcl.conv2d_transpose(d, 3, 3, stride=2, # size*16
                                    activation_fn=tf.nn.sigmoid, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            return d
项目:deep_rl_vizdoom    作者:mihahauke    | 项目源码 | 文件源码
def __init__(self,
                 initial_entropy_beta=0.05,
                 final_entropy_beta=0.0,
                 decay_steps=1e5,
                 thread="global",
                 **settings):

        super(_BaseACNet, self).__init__(**settings)
        self.network_state = None
        self._name_scope = "net_" + str(thread)

        if initial_entropy_beta == final_entropy_beta:
            self._entropy_beta = initial_entropy_beta
        else:
            self._entropy_beta = tf.train.polynomial_decay(
                name="entropy_beta",
                learning_rate=initial_entropy_beta,
                end_learning_rate=final_entropy_beta,
                decay_steps=decay_steps,
                global_step=tf.train.get_global_step())

        with arg_scope([conv2d], data_format="NCHW"), \
             arg_scope([fully_connected, conv2d],
                       activation_fn=self.activation_fn,
                       biases_initializer=tf.constant_initializer(self.init_bias)):
            self.create_architecture()

        self._prepare_loss_op()
        self.params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self._name_scope)