Python tensorflow.contrib.layers 模块,convolution2d_transpose() 实例源码

我们从Python开源项目中,提取了以下8个代码示例,用于说明如何使用tensorflow.contrib.layers.convolution2d_transpose()

项目:GAN    作者:ilblackdragon    | 项目源码 | 文件源码
def conv_generator(x, output_dim, n_filters, scope='Generator'):
  with tf.variable_scope(scope):
    s4, s2 = int(output_dim / 4), int(output_dim / 2)
    z_ = layers.linear(x, s4 * s4 * n_filters * 2)
    h0 = tf.reshape(z_, [-1, s4, s4, n_filters * 2])
    h1 = layers.convolution2d_transpose(h0, n_filters, [5, 5], stride=2)
    h1 = tf.nn.elu(h1)
    h2 = layers.convolution2d_transpose(h1, 1, [5, 5], stride=2)
    return tf.reshape(tf.nn.tanh(h2), [-1, output_dim * output_dim])
项目:gait-recognition    作者:marian-margeta    | 项目源码 | 文件源码
def get_network(self, input_tensor, is_training):
        # Load pre-trained inception-resnet model
        with slim.arg_scope(inception_resnet_v2_arg_scope(batch_norm_decay = 0.999, weight_decay = 0.0001)):
            net, end_points = inception_resnet_v2(input_tensor, is_training = is_training)

        # Adding some modification to original InceptionResnetV2 - changing scoring of AUXILIARY TOWER
        weight_decay = 0.0005
        with tf.variable_scope('NewInceptionResnetV2'):
            with tf.variable_scope('AuxiliaryScoring'):
                with slim.arg_scope([layers.convolution2d, layers.convolution2d_transpose],
                                    weights_regularizer = slim.l2_regularizer(weight_decay),
                                    biases_regularizer = slim.l2_regularizer(weight_decay),
                                    activation_fn = None):
                    tf.summary.histogram('Last_layer/activations', net, [KEY_SUMMARIES])

                    # Scoring
                    net = slim.dropout(net, 0.7, is_training = is_training, scope = 'Dropout')
                    net = layers.convolution2d(net, num_outputs = self.FEATURES, kernel_size = 1, stride = 1,
                                               scope = 'Scoring_layer')
                    feature = net
                    tf.summary.histogram('Scoring_layer/activations', net, [KEY_SUMMARIES])

                    # Upsampling
                    net = layers.convolution2d_transpose(net, num_outputs = 16, kernel_size = 17, stride = 17,
                                                         padding = 'VALID', scope = 'Upsampling_layer')

                    tf.summary.histogram('Upsampling_layer/activations', net, [KEY_SUMMARIES])

            # Smoothing layer - separable gaussian filters
            net = super()._get_gauss_smoothing_net(net, size = self.SMOOTH_SIZE, std = 1.0, kernel_sum = 0.2)

            return net, feature
项目:ICGANs    作者:cameronfabbri    | 项目源码 | 文件源码
def netG(z, y, BATCH_SIZE):

   # concat attribute y onto z
   z = tf.concat([z,y], axis=1)
   z = tcl.fully_connected(z, 4*4*512, activation_fn=tf.identity, scope='g_z')
   #z = tcl.batch_norm(z)
   z = tf.reshape(z, [BATCH_SIZE, 4, 4, 512])
   #z = tf.nn.relu(z)

   conv1 = tcl.convolution2d_transpose(z, 256, 5, 2, normalizer_fn=tcl.batch_norm, activation_fn=tf.nn.relu, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='g_conv1')

   conv2 = tcl.convolution2d_transpose(conv1, 128, 5, 2, normalizer_fn=tcl.batch_norm, activation_fn=tf.nn.relu, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='g_conv2')

   conv3 = tcl.convolution2d_transpose(conv2, 64, 5, 2, normalizer_fn=tcl.batch_norm, activation_fn=tf.nn.relu, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='g_conv3')

   conv4 = tcl.convolution2d_transpose(conv3, 3, 5, 2, activation_fn=tf.nn.tanh, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='g_conv4')

   print 'z:',z
   print 'conv1:',conv1
   print 'conv2:',conv2
   print 'conv3:',conv3
   print 'conv4:',conv4
   print
   print 'END G'
   print
   tf.add_to_collection('vars', z)
   tf.add_to_collection('vars', conv1)
   tf.add_to_collection('vars', conv2)
   tf.add_to_collection('vars', conv3)
   tf.add_to_collection('vars', conv4)
   return conv4
项目:ICGANs    作者:cameronfabbri    | 项目源码 | 文件源码
def netG(z, y, BATCH_SIZE):

   # concat attribute y onto z
   z = tf.concat([z,y], axis=1)
   z = tcl.fully_connected(z, 4*4*512, activation_fn=tf.identity, scope='g_z')
   #z = tcl.batch_norm(z)
   z = tf.reshape(z, [BATCH_SIZE, 4, 4, 512])
   #z = tf.nn.relu(z)

   conv1 = tcl.convolution2d_transpose(z, 256, 5, 2, normalizer_fn=tcl.batch_norm, activation_fn=tf.nn.relu, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='g_conv1')

   conv2 = tcl.convolution2d_transpose(conv1, 128, 5, 2, normalizer_fn=tcl.batch_norm, activation_fn=tf.nn.relu, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='g_conv2')

   conv3 = tcl.convolution2d_transpose(conv2, 64, 5, 2, normalizer_fn=tcl.batch_norm, activation_fn=tf.nn.relu, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='g_conv3')

   conv4 = tcl.convolution2d_transpose(conv3, 3, 5, 2, activation_fn=tf.nn.tanh, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='g_conv4')

   print 'z:',z
   print 'conv1:',conv1
   print 'conv2:',conv2
   print 'conv3:',conv3
   print 'conv4:',conv4
   print
   print 'END G'
   print
   tf.add_to_collection('vars', z)
   tf.add_to_collection('vars', conv1)
   tf.add_to_collection('vars', conv2)
   tf.add_to_collection('vars', conv3)
   tf.add_to_collection('vars', conv4)
   return conv4
项目:ICGANs    作者:cameronfabbri    | 项目源码 | 文件源码
def netG(z, y, BATCH_SIZE):

   # concat attribute y onto z
   z = tf.concat([z,y], axis=1)
   print 'z:',z

   z = tcl.fully_connected(z, 4*4*512, activation_fn=tf.identity, scope='g_z')
   z = tf.reshape(z, [BATCH_SIZE, 4, 4, 512])
   z = tcl.batch_norm(z)
   z = tf.nn.relu(z)

   conv1 = tcl.convolution2d_transpose(z, 256, 5, 2, normalizer_fn=tcl.batch_norm, activation_fn=tf.nn.relu, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='g_conv1')
   conv2 = tcl.convolution2d_transpose(conv1, 128, 5, 2, normalizer_fn=tcl.batch_norm, activation_fn=tf.nn.relu, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='g_conv2')
   conv3 = tcl.convolution2d_transpose(conv2, 1, 5, 2, normalizer_fn=tcl.batch_norm, activation_fn=tf.nn.relu, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='g_conv3')
   conv3 = conv3[:,:28,:28,:]

   print 'z:',z
   print 'conv1:',conv1
   print 'conv2:',conv2
   print 'conv3:',conv3
   print
   print 'END G'
   print
   tf.add_to_collection('vars', z)
   tf.add_to_collection('vars', conv1)
   tf.add_to_collection('vars', conv2)
   tf.add_to_collection('vars', conv3)
   return conv3
项目:Mendelssohn    作者:diggerdu    | 项目源码 | 文件源码
def deconv2d(input_, o_size, k_size, name='deconv2d'):
    print name, 'input', ten_sh(input_)
    print name, 'output', o_size
    assert np.sum(np.mod(o_size[1:3], ten_sh(input_)[1:3]) - [0,0]) == 0
    with tf.variable_scope(name):
        init = ly.xavier_initializer_conv2d()
        output = ly.convolution2d_transpose(input_, num_outputs=o_size[-1], \
                kernel_size=k_size, stride=np.divide(o_size[1:3], ten_sh(input_)[1:3]), \
                padding='SAME', weights_initializer=init)
        return output
项目:Mendelssohn    作者:diggerdu    | 项目源码 | 文件源码
def deconv2d(input_, o_size, k_size, name='deconv2d'):
    print name, 'input', ten_sh(input_)
    print name, 'output', o_size
    assert np.sum(np.mod(o_size[1:3], ten_sh(input_)[1:3]) - [0,0]) == 0
    with tf.variable_scope(name):
        init = ly.xavier_initializer_conv2d()
        output = ly.convolution2d_transpose(input_, num_outputs=o_size[-1], \
                kernel_size=k_size, stride=np.divide(o_size[1:3], ten_sh(input_)[1:3]), \
                padding='SAME', weights_initializer=init, \
                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm)
        return output
项目:ICGANs    作者:cameronfabbri    | 项目源码 | 文件源码
def decoder(layers, y, BATCH_SIZE):

   layers = layers[0]
   # get all the layers from the encoder for skip connections.
   enc_conv1 = layers[0]
   enc_conv2 = layers[1]
   enc_conv3 = layers[2]
   enc_conv4 = layers[3]
   enc_conv5 = layers[4]
   enc_conv6 = layers[5]
   '''
   print 'enc_conv1:',enc_conv1
   print 'enc_conv2:',enc_conv2
   print 'enc_conv3:',enc_conv3
   print 'enc_conv4:',enc_conv4
   print 'enc_conv5:',enc_conv5
   print 'enc_conv6:',enc_conv6
   '''

   # z is the latent encoding (conv6)
   z = tcl.flatten(layers[-1])
   z = tf.concat([z,y], axis=1)
   print 'z:',z

   # reshape z to put through transpose convolutions
   s = z.get_shape().as_list()[-1]
   z = tf.reshape(z, [BATCH_SIZE, 1, 1, s])
   print 'z:',z

   dec_conv1 = tcl.convolution2d_transpose(z, 512, 4, 2, activation_fn=tf.nn.relu, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='g_dec_conv1')
   #dec_conv1 = tf.concat([dec_conv1, enc_conv5], axis=3)
   print 'dec_conv1:',dec_conv1

   dec_conv2 = tcl.convolution2d_transpose(dec_conv1, 512, 4, 2, activation_fn=tf.nn.relu, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='g_dec_conv2')
   #dec_conv2 = tf.concat([dec_conv2, enc_conv4], axis=3)
   print 'dec_conv2:',dec_conv2

   dec_conv3 = tcl.convolution2d_transpose(dec_conv2, 256, 4, 2, activation_fn=tf.nn.relu, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='g_dec_conv3')
   #dec_conv3 = tf.concat([dec_conv3, enc_conv3], axis=3)
   print 'dec_conv3:',dec_conv3

   dec_conv4 = tcl.convolution2d_transpose(dec_conv3, 128, 4, 2, activation_fn=tf.nn.relu, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='g_dec_conv4')
   #dec_conv3 = tf.concat([dec_conv4, enc_conv2], axis=3)
   print 'dec_conv4:',dec_conv4

   dec_conv5 = tcl.convolution2d_transpose(dec_conv4, 64, 4, 2, activation_fn=tf.nn.relu, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='g_dec_conv5')
   #dec_conv3 = tf.concat([dec_conv5, enc_conv1], axis=3)
   print 'dec_conv5:',dec_conv5

   dec_conv6 = tcl.convolution2d_transpose(dec_conv5, 3, 4, 2, activation_fn=tf.nn.relu, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='g_dec_conv6')
   print 'dec_conv6:',dec_conv6

   print
   print 'END G'
   print
   return dec_conv6