Python tensorflow.contrib.layers 模块,conv2d_transpose() 实例源码

我们从Python开源项目中,提取了以下41个代码示例,用于说明如何使用tensorflow.contrib.layers.conv2d_transpose()

项目:TensorFlow-World    作者:astorfi    | 项目源码 | 文件源码
def autoencoder(inputs):
    # encoder
    # 32 x 32 x 1   ->  16 x 16 x 32
    # 16 x 16 x 32  ->  8 x 8 x 16
    # 8 x 8 x 16    ->  2 x 2 x 8
    net = lays.conv2d(inputs, 32, [5, 5], stride=2, padding='SAME')
    net = lays.conv2d(net, 16, [5, 5], stride=2, padding='SAME')
    net = lays.conv2d(net, 8, [5, 5], stride=4, padding='SAME')
    # decoder
    # 2 x 2 x 8    ->  8 x 8 x 16
    # 8 x 8 x 16   ->  16 x 16 x 32
    # 16 x 16 x 32  ->  32 x 32 x 1
    net = lays.conv2d_transpose(net, 16, [5, 5], stride=4, padding='SAME')
    net = lays.conv2d_transpose(net, 32, [5, 5], stride=2, padding='SAME')
    net = lays.conv2d_transpose(net, 1, [5, 5], stride=2, padding='SAME', activation_fn=tf.nn.tanh)
    return net

# read MNIST dataset
项目:Semi_Supervised_GAN    作者:ChunyuanLI    | 项目源码 | 文件源码
def generator(input_latent):
    # input_latent = Input(batch_shape=noise_dim, dtype=im_dtype)
    with tf.variable_scope('Net_Gen') as scope:
        weight_decay = 0.0001
        xx = layers.fully_connected(input_latent, num_outputs=4*4*512, activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)
        xx = tf.reshape(xx, (batch_size, 4,4,512))
        xx = layers.conv2d_transpose(xx, 256, kernel_size=(5,5), stride=(2, 2), padding='SAME', activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)
        xx = layers.conv2d_transpose(xx, 128, kernel_size=(5,5), stride=(2, 2), padding='SAME', activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)    
        xx = layers.conv2d_transpose(xx, 3, kernel_size=(5,5), stride=(2, 2),  padding='SAME', activation_fn=None)
        xx = layers.batch_norm(xx)
        gen_dat = tf.nn.tanh(xx)

    return gen_dat     

# specify discriminative model
项目:Semi_Supervised_GAN    作者:ChunyuanLI    | 项目源码 | 文件源码
def generator(input_latent):
    # input_latent = Input(batch_shape=noise_dim, dtype=im_dtype)
    with tf.variable_scope('Net_Gen') as scope:
        xx = layers.fully_connected(input_latent, num_outputs=4*4*512, activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)
        xx = tf.reshape(xx, (batch_size, 4,4,512))
        xx = layers.conv2d_transpose(xx, 256, kernel_size=(5,5), stride=(2, 2), padding='SAME', activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)
        xx = layers.conv2d_transpose(xx, 128, kernel_size=(5,5), stride=(2, 2), padding='SAME', activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)    
        xx = layers.conv2d_transpose(xx, 3, kernel_size=(5,5), stride=(2, 2),  padding='SAME', activation_fn=None)
        xx = layers.batch_norm(xx)
        gen_dat = tf.nn.tanh(xx)

    return gen_dat
项目:unrolled-GAN    作者:Zardinality    | 项目源码 | 文件源码
def generator(z):
    # because up to now we can not derive bias_add's higher order derivative in tensorflow, 
    # so I use vanilla implementation of FC instead of a FC layer in tensorflow.contrib.layers
    # the following conv case is out of the same reason
    weights = slim.model_variable(
        'fn_weights', shape=(FLAGS.z_dim, 4 * 4 * 512), initializer=ly.xavier_initializer())
    bias = slim.model_variable(
        'fn_bias', shape=(4 * 4 * 512, ), initializer=tf.zeros_initializer)
    train = tf.nn.relu(ly.batch_norm(fully_connected(z, weights, bias)))
    train = tf.reshape(train, (-1, 4, 4, 512))
    train = ly.conv2d_transpose(train, 256, 3, stride=2,
                                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME')
    train = ly.conv2d_transpose(train, 128, 3, stride=2,
                                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME')
    train = ly.conv2d_transpose(train, 64, 3, stride=2,
                                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME')
    train = ly.conv2d_transpose(train, 1, 3, stride=1,
                                activation_fn=None, padding='SAME', biases_initializer=None)
    bias = slim.model_variable('bias', shape=(
        1, ), initializer=tf.zeros_initializer)
    train += bias
    train = tf.nn.tanh(train)
    return train
项目:unrolled-GAN    作者:Zardinality    | 项目源码 | 文件源码
def generator(z, label):
    z = tf.concat(1, [z,label])
    train = ly.fully_connected(
        z, 1024, activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm)
    train = tf.concat(1, [train, label])
    train = ly.fully_connected(
        z, 4*4*512, activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm)
    train = tf.reshape(train, (-1, 4, 4, 512))
    yb = tf.ones([FLAGS.batch_size, 4, 4, 10])*tf.reshape(label, [FLAGS.batch_size, 1, 1, 10]) 
    train = tf.concat(3, [train, yb])
    train = ly.conv2d_transpose(train, 256, 3, stride=2,
                                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
    train = ly.conv2d_transpose(train, 128, 3, stride=2,
                                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
    train = ly.conv2d_transpose(train, 64, 3, stride=2,
                                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
    train = ly.conv2d_transpose(train, 1, 3, stride=1,
                                activation_fn=tf.nn.tanh, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
    return train
项目:unrolled-GAN    作者:Zardinality    | 项目源码 | 文件源码
def generator(z):
    weights = slim.model_variable(
        'fn_weights', shape=(FLAGS.z_dim, 4 * 4 * 512), initializer=ly.xavier_initializer())
    bias = slim.model_variable(
        'fn_bias', shape=(4 * 4 * 512, ), initializer=tf.zeros_initializer)
    train = tf.nn.relu(fully_connected(z, weights, bias))
    train = tf.reshape(train, (-1, 4, 4, 512))
    train = ly.conv2d_transpose(train, 256, 3, stride=2,
                                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
    train = ly.conv2d_transpose(train, 128, 3, stride=2,
                                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
    train = ly.conv2d_transpose(train, 64, 3, stride=2,
                                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
    train = ly.conv2d_transpose(train, 1, 3, stride=1,
                                activation_fn=None, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02), biases_initializer=None)
    bias = slim.model_variable('bias', shape=(
        1, ), initializer=tf.zeros_initializer)
    train += bias
    train = tf.nn.tanh(train)
    return train
项目:unrolled-GAN    作者:Zardinality    | 项目源码 | 文件源码
def generator(z, label):
    z = tf.concat(1, [z,label])
    train = ly.fully_connected(
        z, 1024, activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm)
    train = tf.concat(1, [train, label])
    train = ly.fully_connected(
        z, 4*4*512, activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm)
    train = tf.reshape(train, (-1, 4, 4, 512))
    yb = tf.ones([FLAGS.batch_size, 4, 4, 10])*tf.reshape(label, [FLAGS.batch_size, 1, 1, 10]) 
    train = tf.concat(3, [train, yb])
    train = ly.conv2d_transpose(train, 256, 3, stride=2,
                                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
    train = ly.conv2d_transpose(train, 128, 3, stride=2,
                                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
    train = ly.conv2d_transpose(train, 64, 3, stride=2,
                                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
    train = ly.conv2d_transpose(train, 1, 3, stride=1,
                                activation_fn=tf.nn.tanh, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
    return train
项目:Machine-Learning    作者:hadikazemi    | 项目源码 | 文件源码
def autoencoder(inputs):
    # encoder
    # 32 x 32 x 1   ->  16 x 16 x 32
    # 16 x 16 x 32  ->  8 x 8 x 16
    # 8 x 8 x 16    ->  2 x 2 x 8
    net = lays.conv2d(inputs, 32, [5, 5], stride=2, padding='SAME')
    net = lays.conv2d(net, 16, [5, 5], stride=2, padding='SAME')
    net = lays.conv2d(net, 8, [5, 5], stride=4, padding='SAME')
    # decoder
    # 2 x 2 x 8    ->  8 x 8 x 16
    # 8 x 8 x 16   ->  16 x 16 x 32
    # 16 x 16 x 32  ->  32 x 32 x 1
    net = lays.conv2d_transpose(net, 16, [5, 5], stride=4, padding='SAME')
    net = lays.conv2d_transpose(net, 32, [5, 5], stride=2, padding='SAME')
    net = lays.conv2d_transpose(net, 1, [5, 5], stride=2, padding='SAME', activation_fn=tf.nn.tanh)
    return net

# read MNIST dataset
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testOutputSizeWithStrideOneSamePaddingNCHW(self):
    # `NCHW` data fomat is only supported for `GPU` device.
    if test.is_gpu_available(cuda_only=True):
      with self.test_session(use_gpu=True) as sess:
        num_filters = 32
        input_size = [5, 3, 10, 12]
        expected_size = [5, num_filters, 10, 12]

        images = random_ops.random_uniform(input_size, seed=1)
        output = layers_lib.conv2d_transpose(
            images,
            num_filters, [3, 3],
            stride=1,
            padding='SAME',
            data_format='NCHW')
        self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')

        sess.run(variables_lib.global_variables_initializer())
        self.assertListEqual(list(output.eval().shape), expected_size)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testOutputSizeWithStrideOneValidPaddingNCHW(self):
    if test.is_gpu_available(cuda_only=True):
      with self.test_session(use_gpu=True) as sess:
        num_filters = 32
        input_size = [5, 3, 10, 12]
        expected_size = [5, num_filters, 12, 14]

        images = random_ops.random_uniform(input_size, seed=1)
        output = layers_lib.conv2d_transpose(
            images,
            num_filters, [3, 3],
            stride=1,
            padding='VALID',
            data_format='NCHW')
        self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')

        sess.run(variables_lib.global_variables_initializer())
        self.assertListEqual(list(output.eval().shape), expected_size)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testOutputSizeWithStrideTwoValidPaddingNCHW(self):
    if test.is_gpu_available(cuda_only=True):
      with self.test_session(use_gpu=True) as sess:
        num_filters = 32
        input_size = [5, 3, 9, 11]
        expected_size = [5, num_filters, 19, 23]

        images = random_ops.random_uniform(input_size, seed=1)
        output = layers_lib.conv2d_transpose(
            images,
            num_filters, [3, 3],
            stride=[2, 2],
            padding='VALID',
            data_format='NCHW')
        self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
        self.assertListEqual(list(output.get_shape().as_list()), expected_size)

        sess.run(variables_lib.global_variables_initializer())
        self.assertListEqual(list(output.eval().shape), expected_size)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testOutputSizeWith1x1StrideTwoSamePaddingNCHW(self):
    if test.is_gpu_available(cuda_only=True):
      with self.test_session(use_gpu=True) as sess:
        num_filters = 1
        input_size = [1, 1, 1, 1]
        expected_size = [1, num_filters, 2, 2]

        images = random_ops.random_uniform(input_size, seed=1)
        output = layers_lib.conv2d_transpose(
            images,
            num_filters, [2, 2],
            stride=[2, 2],
            padding='SAME',
            data_format='NCHW')
        self.assertListEqual(list(output.get_shape().as_list()), expected_size)

        sess.run(variables_lib.global_variables_initializer())
        self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
        self.assertListEqual(list(output.eval().shape), expected_size)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testOutputSizeWith2x2StrideTwoSamePaddingNCHW(self):
    if test.is_gpu_available(cuda_only=True):
      with self.test_session(use_gpu=True) as sess:
        num_filters = 1
        input_size = [1, 1, 2, 2]
        expected_size = [1, num_filters, 4, 4]

        images = random_ops.random_uniform(input_size, seed=1)
        output = layers_lib.conv2d_transpose(
            images,
            num_filters, [2, 2],
            stride=[2, 2],
            padding='SAME',
            data_format='NCHW')
        sess.run(variables_lib.global_variables_initializer())
        self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
        self.assertListEqual(list(output.eval().shape), expected_size)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testOutputSizeWith2x2StrideTwoValidPaddingNCHW(self):
    if test.is_gpu_available(cuda_only=True):
      with self.test_session(use_gpu=True) as sess:
        num_filters = 1
        input_size = [1, 1, 2, 2]
        expected_size = [1, num_filters, 4, 4]

        images = random_ops.random_uniform(input_size, seed=1)
        output = layers_lib.conv2d_transpose(
            images,
            num_filters, [2, 2],
            stride=[2, 2],
            padding='VALID',
            data_format='NCHW')
        sess.run(variables_lib.global_variables_initializer())
        self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
        self.assertListEqual(list(output.eval().shape), expected_size)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testOutputSizeWithStride2x1NCHW(self):
    if test.is_gpu_available(cuda_only=True):
      with self.test_session(use_gpu=True) as sess:
        num_filters = 1
        input_size = [1, 1, 3, 2]
        expected_size = [1, num_filters, 6, 5]

        images = random_ops.random_uniform(input_size, seed=1)
        output = layers_lib.conv2d_transpose(
            images,
            num_filters, [2, 4],
            stride=[2, 1],
            padding='VALID',
            data_format='NCHW')
        sess.run(variables_lib.global_variables_initializer())
        self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
        self.assertListEqual(list(output.eval().shape), expected_size)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testOutputSizeWithStride2x4NCHW(self):
    if test.is_gpu_available(cuda_only=True):
      with self.test_session(use_gpu=True) as sess:
        num_filters = 1
        input_size = [1, 1, 3, 2]
        expected_size = [1, num_filters, 6, 8]

        images = random_ops.random_uniform(input_size, seed=1)
        output = layers_lib.conv2d_transpose(
            images,
            num_filters, [2, 4],
            stride=[2, 4],
            padding='VALID',
            data_format='NCHW')
        sess.run(variables_lib.global_variables_initializer())
        self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
        self.assertListEqual(list(output.eval().shape), expected_size)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testOutputSizeWithStride2x5NCHW(self):
    if test.is_gpu_available(cuda_only=True):
      with self.test_session(use_gpu=True) as sess:
        num_filters = 1
        input_size = [1, 1, 3, 2]
        expected_size = [1, num_filters, 6, 10]

        images = random_ops.random_uniform(input_size, seed=1)
        output = layers_lib.conv2d_transpose(
            images,
            num_filters, [2, 4],
            stride=[2, 5],
            padding='VALID',
            data_format='NCHW')
        sess.run(variables_lib.global_variables_initializer())
        self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
        self.assertListEqual(list(output.eval().shape), expected_size)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testDynamicOutputSizeWithStrideTwoValidPadding(self):
    num_filters = 32
    input_size = [5, 9, 11, 3]
    expected_size = [None, None, None, num_filters]
    expected_size_dynamic = [5, 19, 23, num_filters]

    images = array_ops.placeholder(np.float32,
                                   [None, None, None, input_size[3]])
    output = layers_lib.conv2d_transpose(
        images, num_filters, [3, 3], stride=[2, 2], padding='VALID')
    self.assertListEqual(output.get_shape().as_list(), expected_size)

    with self.test_session() as sess:
      sess.run(variables_lib.global_variables_initializer())
      self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
      eval_output = output.eval({images: np.zeros(input_size, np.float32)})
      self.assertListEqual(list(eval_output.shape), expected_size_dynamic)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testWithScopeWithoutActivation(self):
    num_filters = 32
    input_size = [5, 9, 11, 3]
    expected_size = [5, 19, 23, num_filters]

    images = random_ops.random_uniform(input_size, seed=1)
    output = layers_lib.conv2d_transpose(
        images,
        num_filters, [3, 3],
        stride=2,
        padding='VALID',
        activation_fn=None,
        scope='conv7')
    self.assertEqual(output.op.name, 'conv7/BiasAdd')

    with self.test_session() as sess:
      sess.run(variables_lib.global_variables_initializer())
      self.assertListEqual(list(output.eval().shape), expected_size)
项目:DeepWorks    作者:daigo0927    | 项目源码 | 文件源码
def up_block(block_fn, filters):
    def f(inputs, down):
        inputs_ = tcl.conv2d_transpose(down, # double size of 'down'
                                       num_outputs = inputs.shape.as_list()[3],
                                       kernel_size = (2, 2),
                                       stride = (2, 2),
                                       padding = 'SAME')
        x = tf.concat([inputs, inputs_], axis = 3)
        x = block_fn(filters)(x)
        x = block_fn(filters)(x)
        return x # same size of 'inputs'
    return f
项目:decorrelated-adversarial-autoencoder    作者:patrickgadd    | 项目源码 | 文件源码
def semi_supervised_decoder_convolutional(input_tensor, batch_size, n_dimensions, network_scale=1.0, img_res=28, img_channels=1):
    f_multiplier = network_scale

    net = layers.fully_connected(input_tensor, 2*2*int(128*f_multiplier))
    net = tf.reshape(net, [-1, 2, 2, int(128*f_multiplier)])

    assert(img_res in [28, 32])

    if img_res==28:
        net = layers.conv2d_transpose(net, int(64*f_multiplier), 3, stride=2)
        net = layers.conv2d_transpose(net, int(64*f_multiplier), 3, stride=1)
        net = layers.conv2d_transpose(net, int(32*f_multiplier), 4, stride=1, padding='VALID')
        net = layers.conv2d_transpose(net, int(32*f_multiplier), 4, stride=1)
        net = layers.conv2d_transpose(net, int(16*f_multiplier), 3, stride=2)
        net = layers.conv2d_transpose(net, int(16*f_multiplier), 3, stride=1)
        net = layers.conv2d_transpose(net, int(8*f_multiplier), 3, stride=2)
        net = layers.conv2d_transpose(net, int(8*f_multiplier), 3, stride=1)
    else:
        net = layers.conv2d_transpose(net, int(64*f_multiplier), 3, stride=2)
        net = layers.conv2d_transpose(net, int(64*f_multiplier), 3, stride=1)
        net = layers.conv2d_transpose(net, int(32*f_multiplier), 3, stride=2)
        net = layers.conv2d_transpose(net, int(32*f_multiplier), 3, stride=1)
        net = layers.conv2d_transpose(net, int(16*f_multiplier), 3, stride=2)
        net = layers.conv2d_transpose(net, int(16*f_multiplier), 3, stride=1)
        net = layers.conv2d_transpose(net, int(8*f_multiplier), 3, stride=2)
        net = layers.conv2d_transpose(net, int(8*f_multiplier), 3, stride=1)

    net = layers.conv2d_transpose(net, img_channels, 5, stride=1, activation_fn=tf.nn.sigmoid)
    net = layers.flatten(net)

    return net
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def vae_conv(observed, n, n_x, n_z, n_particles, is_training):
    with zs.BayesianNet(observed=observed) as model:
        normalizer_params = {'is_training': is_training,
                             'updates_collections': None}
        z_mean = tf.zeros([n, n_z])
        z = zs.Normal('z', z_mean, std=1., n_samples=n_particles,
                      group_ndims=1)
        lx_z = tf.reshape(z, [-1, 1, 1, n_z])
        lx_z = layers.conv2d_transpose(
            lx_z, 128, kernel_size=3, padding='VALID',
            normalizer_fn=layers.batch_norm,
            normalizer_params=normalizer_params)
        lx_z = layers.conv2d_transpose(
            lx_z, 64, kernel_size=5, padding='VALID',
            normalizer_fn=layers.batch_norm,
            normalizer_params=normalizer_params)
        lx_z = layers.conv2d_transpose(
            lx_z, 32, kernel_size=5, stride=2,
            normalizer_fn=layers.batch_norm,
            normalizer_params=normalizer_params)
        lx_z = layers.conv2d_transpose(
            lx_z, 1, kernel_size=5, stride=2,
            activation_fn=None)
        x_logits = tf.reshape(lx_z, [n_particles, n, -1])
        x = zs.Bernoulli('x', x_logits, group_ndims=1)
    return model
项目:GAN_Theories    作者:YadiraF    | 项目源码 | 文件源码
def __call__(self, z):
        with tf.variable_scope(self.name) as scope:
            g = tcl.fully_connected(z, self.size * self.size * 512, activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm)
            g = tf.reshape(g, (-1, self.size, self.size, 512))  # size
            g = tcl.conv2d_transpose(g, 256, 3, stride=2, # size*2
                                    activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            g = tcl.conv2d_transpose(g, 128, 3, stride=2, # size*4
                                    activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            g = tcl.conv2d_transpose(g, 64, 3, stride=2, # size*8 32x32x64
                                    activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))

            g = tcl.conv2d_transpose(g, self.channel, 3, stride=2, # size*16 
                                        activation_fn=tf.nn.sigmoid, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            return g
项目:GAN_Theories    作者:YadiraF    | 项目源码 | 文件源码
def __call__(self, x, reuse=False):
        with tf.variable_scope(self.name) as scope:
            if reuse:
                scope.reuse_variables()
            # --- conv
            size = 64
            d = tcl.conv2d(x, num_outputs=size, kernel_size=3, # bzx64x64x3 -> bzx32x32x64
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.conv2d(d, num_outputs=size * 2, kernel_size=3, # 16x16x128
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.conv2d(d, num_outputs=size * 4, kernel_size=3, # 8x8x256
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.conv2d(d, num_outputs=size * 8, kernel_size=3, # 4x4x512
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))

            h = tcl.fully_connected(tcl.flatten(d), self.n_hidden, activation_fn=lrelu, weights_initializer=tf.random_normal_initializer(0, 0.02))

            # -- deconv
            d = tcl.fully_connected(h, 4 * 4 * 512, activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm)
            d = tf.reshape(d, (-1, 4, 4, 512))  # size
            d = tcl.conv2d_transpose(d, 256, 3, stride=2, # size*2
                                    activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.conv2d_transpose(d, 128, 3, stride=2, # size*4
                                    activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.conv2d_transpose(d, 64, 3, stride=2, # size*8
                                    activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))

            d = tcl.conv2d_transpose(d, 3, 3, stride=2, # size*16
                                    activation_fn=tf.nn.sigmoid, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            return d
项目:GAN-general    作者:weilinie    | 项目源码 | 文件源码
def resBlock(inputs, input_num, output_num, kernel_size, resample=None):
    """
    resample: None, 'down', or 'up'
    """
    if resample == 'down':
        conv_shortcut = functools.partial(tcl.conv2d, stride=2)
        conv_1 = functools.partial(tcl.conv2d, num_outputs=input_num/2)
        conv_1b = functools.partial(tcl.conv2d, num_outputs=output_num/2, stride=2)
        conv_2 = functools.partial(tcl.conv2d, num_outputs=output_num)
    elif resample == 'up':
        conv_shortcut = subpixelConv2D
        conv_1 = functools.partial(tcl.conv2d, num_outputs=input_num/2)
        conv_1b = functools.partial(tcl.conv2d_transpose, num_outputs=output_num/2, stride=2)
        conv_2 = functools.partial(tcl.conv2d, num_outputs=output_num)
    elif resample == None:
        conv_shortcut = tcl.conv2d
        conv_1 = functools.partial(tcl.conv2d, num_outputs=input_num/2)
        conv_1b = functools.partial(tcl.conv2d, num_outputs=output_num/2)
        conv_2 = functools.partial(tcl.conv2d, num_outputs=output_num)

    else:
        raise Exception('invalid resample value')

    if output_num==input_num and resample==None:
        shortcut = inputs # Identity skip-connection
    else:
        shortcut = conv_shortcut(inputs=inputs, num_outputs=output_num, kernel_size=1) # Should kernel_size be larger?

    output = inputs
    output = conv_1(inputs=output, kernel_size=1)
    output = conv_1b(inputs=output, kernel_size=kernel_size)
    output = conv_2(inputs=output, kernel_size=1, biases_initializer=None) # Should skip bias here?
    # output = Batchnorm(name+'.BN', [0,2,3], output) # Should skip BN op here?

    return shortcut + (0.3*output)


# use depth-to-space for upsampling image
项目:EDSR    作者:iwtw    | 项目源码 | 文件源码
def conv2d_transpose( inputs , outputs_dim , kernel_size , stride , padding = "SAME" , he_init = False , activation_fn = None , regularization_scale = 0.0   ):
    C = inputs.get_shape()[-1].value
    fan_in = C * kernel_size**2 / stride**2
    fan_out = C * kernel_size**2 
    avg_fan = ( fan_in + fan_out ) / 2 
    if he_init:
        var = 2.0/avg_fan
    else :
        var = 1.0/avg_fan
    # var = ( b - a )**2 /12 , b==-a , (zero mean)
    upper_bound = np.sqrt( 12.0 * var ) *0.5
    weights_initializer = tf.random_uniform_initializer( -upper_bound , upper_bound , seed = None , dtype = tf.float32 )
    weights_regularizer = layers.l2_regularizer( scale = regularization_scale )
    return layers.conv2d_transpose( inputs , outputs_dim , kernel_size = kernel_size , stride = stride , padding = padding ,  weights_initializer = weights_initializer ,  activation_fn = activation_fn , weights_regularizer = weights_regularizer )
项目:EDSR    作者:iwtw    | 项目源码 | 文件源码
def upsample( inputs , scale , dim    , upsample_method = "subpixel" ,  activation_fn = None , regularization_scale = 0.0 ):
    "upsample layer"
    act = activation_fn
    if act == None:
        act = tf.identity
    #with tf.variable_scope(scope) as scope :
    if upsample_method == "subpixel":
        if scale == 2 :
            outputs = conv2d(  inputs ,  dim * 2**2, 3 , 1 , he_init = (activation_fn == tf.nn.relu ) , activation_fn = activation_fn ,  regularization_scale = regularization_scale ) 
            outputs = tf.depth_to_space( outputs , 2 )
            outputs = act( outputs )
        elif scale == 3 :
            outputs = conv2d( inputs , dim * 3**2 , 3 , 1 ,  he_init = (activation_fn == tf.nn.relu ) , activation_fn = activation_fn , regularization_scale = regularization_scale  )
            outputs = tf.depth_to_space( outputs , 3 )
            outputs = act( outputs )
        elif scale == 4 :
            outputs = conv2d(  inputs ,  dim * 2**2, 3 , 1 , regularization_scale = regularization_scale  ) 
            outputs = tf.depth_to_space( outputs , 2 )
            outputs = conv2d(  outputs ,  dim * 2**2 , 3 , 1 , he_init = (activation_fn == tf.nn.relu ) , activation_fn = activation_fn , regularization_scale = regularization_scale   ) 
            outputs = tf.depth_to_space( outputs , 2 )
            outputs = act( outputs )
    elif upsample_method == "conv_transpose":
        if scale == 2 :
            outputs = utils.conv2d_transpose( inputs , dim , 3 , 2 , he_init = (activation_fn == tf.nn.relu ) , activation_fn = activation_fn , regularization_scale = regularization_scale   )
            outputs = act( outputs )
        elif scale == 3:
            outputs = utils.conv2d_transpose( inputs , dim , 3 , 3 , he_init = (activation_fn == tf.nn.relu ) , activation_fn = activation_fn , regularization_scale = regularization_scale   )
            outputs = act( outputs )
        elif scale == 4:
            outputs = utils.conv2d_transpose( inputs , dim , 3 , 2 , regularization_scale = regularization_scale )  
            outputs = utils.conv2d_transpose( outputs , dim , 3 , 2 , he_init = (activation_fn == tf.nn.relu ) , activation_fn = activation_fn , regularization_scale = regularization_scale  )
            outputs = act( outputs )

    return outputs
项目:Machine-Learning    作者:hadikazemi    | 项目源码 | 文件源码
def generator(inputs, reuse=False):
    with tf.variable_scope('generator'):
        if reuse:
            tf.get_variable_scope().reuse_variables()
        net = lays.fully_connected(inputs, 4*4*256, scope='fc1')
        net = tf.reshape(net, (batch_size, 4, 4, 256))
        net = lays.conv2d_transpose(net, 128, 3, stride=2, scope='conv1', padding='SAME', activation_fn=leaky_relu)
        net = lays.conv2d_transpose(net, 64, 3, stride=2, scope='conv2', padding='SAME', activation_fn=leaky_relu)
        net = lays.conv2d_transpose(net, 64, 3, stride=2, scope='conv3', padding='SAME', activation_fn=leaky_relu)
        net = lays.conv2d(net, 3, 3, scope='conv4', padding='SAME', activation_fn=tf.nn.tanh)
        return net


# Define the Discriminator, a simple CNN with 3 convolution and 2 fully connected layers
项目:Machine-Learning    作者:hadikazemi    | 项目源码 | 文件源码
def discriminator(inputs, reuse=False):
    with tf.variable_scope('discriminator'):
        if reuse:
            tf.get_variable_scope().reuse_variables()
        net = lays.conv2d_transpose(inputs, 64, 3, stride=1, scope='conv1', padding='SAME', activation_fn=leaky_relu)
        net = lays.max_pool2d(net, 2, 2, 'SAME', scope='max1')
        net = lays.conv2d_transpose(net, 128, 3, stride=1, scope='conv2', padding='SAME', activation_fn=leaky_relu)
        net = lays.max_pool2d(net, 2, 2, 'SAME', scope='max2')
        net = lays.conv2d_transpose(net, 256, 3, stride=1, scope='conv3', padding='SAME', activation_fn=leaky_relu)
        net = lays.max_pool2d(net, 2, 2, 'SAME', scope='max3')
        net = tf.reshape(net, (batch_size, 4 * 4 * 256))
        net = lays.fully_connected(net, 128, scope='fc1', activation_fn=leaky_relu)
        net = lays.dropout(net, 0.5)
        net = lays.fully_connected(net, 1, scope='fc2', activation_fn=None)
        return net
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testTrainableFlagIsPassedOn(self):
    for trainable in [True, False]:
      with ops.Graph().as_default():
        num_filters = 32
        input_size = [5, 10, 12, 3]

        images = random_ops.random_uniform(input_size, seed=1)
        layers_lib.conv2d_transpose(
            images, num_filters, [3, 3], stride=1, trainable=trainable)
        model_variables = variables.get_model_variables()
        trainable_variables = variables_lib.trainable_variables()
        for model_variable in model_variables:
          self.assertEqual(trainable, model_variable in trainable_variables)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testOutputSizeWithStrideOneValidPadding(self):
    num_filters = 32
    input_size = [5, 10, 12, 3]
    expected_size = [5, 12, 14, num_filters]

    images = random_ops.random_uniform(input_size, seed=1)
    output = layers_lib.conv2d_transpose(
        images, num_filters, [3, 3], stride=1, padding='VALID')
    self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')

    with self.test_session() as sess:
      sess.run(variables_lib.global_variables_initializer())
      self.assertListEqual(list(output.eval().shape), expected_size)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testOutputSizeWithStrideTwoValidPadding(self):
    num_filters = 32
    input_size = [5, 9, 11, 3]
    expected_size = [5, 19, 23, num_filters]

    images = random_ops.random_uniform(input_size, seed=1)
    output = layers_lib.conv2d_transpose(
        images, num_filters, [3, 3], stride=[2, 2], padding='VALID')
    self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
    self.assertListEqual(list(output.get_shape().as_list()), expected_size)

    with self.test_session() as sess:
      sess.run(variables_lib.global_variables_initializer())
      self.assertListEqual(list(output.eval().shape), expected_size)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testOutputSizeWith1x1StrideTwoSamePadding(self):
    num_filters = 1
    input_size = [1, 1, 1, 1]
    expected_size = [1, 2, 2, num_filters]

    images = random_ops.random_uniform(input_size, seed=1)
    output = layers_lib.conv2d_transpose(
        images, num_filters, [2, 2], stride=[2, 2], padding='SAME')
    self.assertListEqual(list(output.get_shape().as_list()), expected_size)

    with self.test_session() as sess:
      sess.run(variables_lib.global_variables_initializer())
      self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
      self.assertListEqual(list(output.eval().shape), expected_size)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testOutputSizeWith1x1StrideTwoValidPadding(self):
    num_filters = 1
    input_size = [1, 1, 1, 1]
    expected_size = [1, 2, 2, num_filters]

    images = random_ops.random_uniform(input_size, seed=1)
    output = layers_lib.conv2d_transpose(
        images, num_filters, [2, 2], stride=[2, 2], padding='VALID')
    with self.test_session() as sess:
      sess.run(variables_lib.global_variables_initializer())
      self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
      self.assertListEqual(list(output.eval().shape), expected_size)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testOutputSizeWith2x2StrideTwoSamePadding(self):
    num_filters = 1
    input_size = [1, 2, 2, 1]
    expected_size = [1, 4, 4, num_filters]

    images = random_ops.random_uniform(input_size, seed=1)
    output = layers_lib.conv2d_transpose(
        images, num_filters, [2, 2], stride=[2, 2], padding='SAME')
    with self.test_session() as sess:
      sess.run(variables_lib.global_variables_initializer())
      self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
      self.assertListEqual(list(output.eval().shape), expected_size)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testOutputSizeWithStride2x1(self):
    num_filters = 1
    input_size = [1, 3, 2, 1]
    expected_size = [1, 6, 5, num_filters]

    images = random_ops.random_uniform(input_size, seed=1)
    output = layers_lib.conv2d_transpose(
        images, num_filters, [2, 4], stride=[2, 1], padding='VALID')
    with self.test_session() as sess:
      sess.run(variables_lib.global_variables_initializer())
      self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
      self.assertListEqual(list(output.eval().shape), expected_size)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testOutputSizeWithStride2x4(self):
    num_filters = 1
    input_size = [1, 3, 2, 1]
    expected_size = [1, 6, 8, num_filters]

    images = random_ops.random_uniform(input_size, seed=1)
    output = layers_lib.conv2d_transpose(
        images, num_filters, [2, 4], stride=[2, 4], padding='VALID')
    with self.test_session() as sess:
      sess.run(variables_lib.global_variables_initializer())
      self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
      self.assertListEqual(list(output.eval().shape), expected_size)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testOutputSizeWithStride2x5(self):
    num_filters = 1
    input_size = [1, 3, 2, 1]
    expected_size = [1, 6, 10, num_filters]

    images = random_ops.random_uniform(input_size, seed=1)
    output = layers_lib.conv2d_transpose(
        images, num_filters, [2, 4], stride=[2, 5], padding='VALID')
    with self.test_session() as sess:
      sess.run(variables_lib.global_variables_initializer())
      self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
      self.assertListEqual(list(output.eval().shape), expected_size)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testOutputSizeRandomSizesAndStridesValidPadding(self):
    np.random.seed(0)
    max_image_size = 10

    for _ in range(10):
      num_filters = 1
      input_size = [
          1, np.random.randint(1, max_image_size),
          np.random.randint(1, max_image_size), 1
      ]
      filter_size = [
          np.random.randint(1, input_size[1] + 1),
          np.random.randint(1, input_size[2] + 1)
      ]
      stride = [np.random.randint(1, 3), np.random.randint(1, 3)]

      ops.reset_default_graph()
      graph = ops.Graph()
      with graph.as_default():
        images = random_ops.random_uniform(input_size, seed=1)
        transpose = layers_lib.conv2d_transpose(
            images, num_filters, filter_size, stride=stride, padding='VALID')
        conv = layers_lib.conv2d(
            transpose, num_filters, filter_size, stride=stride, padding='VALID')

        with self.test_session(graph=graph) as sess:
          sess.run(variables_lib.global_variables_initializer())
          self.assertListEqual(list(conv.eval().shape), input_size)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testWithScope(self):
    num_filters = 32
    input_size = [5, 9, 11, 3]
    expected_size = [5, 19, 23, num_filters]

    images = random_ops.random_uniform(input_size, seed=1)
    output = layers_lib.conv2d_transpose(
        images, num_filters, [3, 3], stride=2, padding='VALID', scope='conv7')
    self.assertEqual(output.op.name, 'conv7/Relu')

    with self.test_session() as sess:
      sess.run(variables_lib.global_variables_initializer())
      self.assertListEqual(list(output.eval().shape), expected_size)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testDeconvWithoutBiasesProducesConv2dTranspose(self):
    num_filters = 32
    input_size = [5, 9, 11, 3]
    expected_size = [5, 19, 23, num_filters]
    stride = 2
    padding = 'VALID'

    with self.test_session() as sess:
      images = random_ops.random_uniform(input_size, seed=1)
      output_deconv = layers_lib.conv2d_transpose(
          images,
          num_filters, [3, 3],
          stride=stride,
          padding=padding,
          activation_fn=None,
          scope='conv7')

      weights = variables.get_variables_by_name('conv7/weights')[0]
      output_conv2d_transpose = nn_ops.conv2d_transpose(
          images,
          weights,
          expected_size, [1, stride, stride, 1],
          padding=padding)

      sess.run(variables_lib.global_variables_initializer())

      output_deconv, output_conv2d_transpose = sess.run(
          [output_deconv, output_conv2d_transpose])

      self.assertTrue(
          np.isclose(output_deconv, output_conv2d_transpose, 1e-5, 1e-5).all())