Python tensorflow.contrib.layers 模块,batch_norm() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.contrib.layers.batch_norm()

项目:Semi_Supervised_GAN    作者:ChunyuanLI    | 项目源码 | 文件源码
def generator(input_latent):
    # input_latent = Input(batch_shape=noise_dim, dtype=im_dtype)
    with tf.variable_scope('Net_Gen') as scope:
        weight_decay = 0.0001
        xx = layers.fully_connected(input_latent, num_outputs=4*4*512, activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)
        xx = tf.reshape(xx, (batch_size, 4,4,512))
        xx = layers.conv2d_transpose(xx, 256, kernel_size=(5,5), stride=(2, 2), padding='SAME', activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)
        xx = layers.conv2d_transpose(xx, 128, kernel_size=(5,5), stride=(2, 2), padding='SAME', activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)    
        xx = layers.conv2d_transpose(xx, 3, kernel_size=(5,5), stride=(2, 2),  padding='SAME', activation_fn=None)
        xx = layers.batch_norm(xx)
        gen_dat = tf.nn.tanh(xx)

    return gen_dat     

# specify discriminative model
项目:Semi_Supervised_GAN    作者:ChunyuanLI    | 项目源码 | 文件源码
def generator(input_latent):
    # input_latent = Input(batch_shape=noise_dim, dtype=im_dtype)
    with tf.variable_scope('Net_Gen') as scope:
        xx = layers.fully_connected(input_latent, num_outputs=4*4*512, activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)
        xx = tf.reshape(xx, (batch_size, 4,4,512))
        xx = layers.conv2d_transpose(xx, 256, kernel_size=(5,5), stride=(2, 2), padding='SAME', activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)
        xx = layers.conv2d_transpose(xx, 128, kernel_size=(5,5), stride=(2, 2), padding='SAME', activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)    
        xx = layers.conv2d_transpose(xx, 3, kernel_size=(5,5), stride=(2, 2),  padding='SAME', activation_fn=None)
        xx = layers.batch_norm(xx)
        gen_dat = tf.nn.tanh(xx)

    return gen_dat
项目:Semi_Supervised_GAN    作者:ChunyuanLI    | 项目源码 | 文件源码
def inference(input_img):
    # input_latent = Input(batch_shape=noise_dim, dtype=im_dtype)
    with tf.variable_scope('Net_Inf') as scope:
        xx = layers.convolution2d(input_img, 128, kernel_size=(5,5), stride=(2, 2), padding='SAME', activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)
        xx = layers.convolution2d(xx, 256, kernel_size=(5,5), stride=(2, 2), padding='SAME', activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)    
        xx = layers.convolution2d(xx, 512, kernel_size=(5,5), stride=(2, 2),  padding='SAME', activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)  
        xx = layers.flatten(xx)
        xx = layers.fully_connected(xx, num_outputs=latent_size, activation_fn=None)
        xx = layers.batch_norm(xx)
        inf_latent = tf.nn.tanh(xx)
    return inf_latent

# specify discriminative model
项目:various_residual_networks    作者:yuhui-lin    | 项目源码 | 文件源码
def BN_ReLU(self, net):
        """Batch Normalization and ReLU."""
        # 'gamma' is not used as the next layer is ReLU
        net = batch_norm(net,
                         center=True,
                         scale=False,
                         activation_fn=tf.nn.relu, )
        self._activation_summary(net)
        return net

        # def conv2d(self, net, num_ker, ker_size, stride):
        # 1D-convolution
        net = convolution2d(
            net,
            num_outputs=num_ker,
            kernel_size=[ker_size, 1],
            stride=[stride, 1],
            padding='SAME',
            activation_fn=None,
            normalizer_fn=None,
            weights_initializer=variance_scaling_initializer(),
            weights_regularizer=l2_regularizer(self.weight_decay),
            biases_initializer=tf.zeros_initializer)
        return net
项目:Semi_Supervised_GAN    作者:ChunyuanLI    | 项目源码 | 文件源码
def generator(input_latent):
    # input_latent = Input(batch_shape=noise_dim, dtype=im_dtype)
    with tf.variable_scope('Net_Gen') as scope:
        xx = layers.fully_connected(input_latent, num_outputs=500, activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)
        xx = layers.fully_connected(xx, num_outputs=500, activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)
        xx = layers.fully_connected(xx, num_outputs=28**2, activation_fn=None)
        xx = layers.batch_norm(xx)
        gen_dat = tf.nn.sigmoid(xx)

    return gen_dat  

# specify inference model
项目:Semi_Supervised_GAN    作者:ChunyuanLI    | 项目源码 | 文件源码
def generator(input_latent):
    # input_latent = Input(batch_shape=noise_dim, dtype=im_dtype)
    with tf.variable_scope('Net_Gen') as scope:
        xx = layers.fully_connected(input_latent, num_outputs=500, activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)
        xx = layers.fully_connected(xx, num_outputs=500, activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)
        xx = layers.fully_connected(xx, num_outputs=28**2, activation_fn=None)
        xx = layers.batch_norm(xx)
        gen_dat = tf.nn.sigmoid(xx)

    return gen_dat

# specify discriminative model
项目:website-fingerprinting    作者:AxelGoetz    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
    """Gated recurrent unit (GRU) with nunits cells."""
    with _checked_scope(self, scope or "gru_cell"):
      with vs.variable_scope("gates"):  # Reset gate and update gate.
        # We start with bias of 1.0 to not reset and not update.
        value = sigmoid(_linear(
          [inputs, state], 2 * self._num_units, True, 1.0))
        r, u = array_ops.split(
            value=value,
            num_or_size_splits=2,
            axis=1)
      with vs.variable_scope("candidate"):
        res = self._activation(_linear([inputs, r * state],
                                     self._num_units, True))

        if self._batch_norm:
          c = batch_norm(res,
                         center=True, scale=True,
                         is_training=self._is_training,
                         scope='bn1')
        else:
          c = res

      new_h = u * state + (1 - u) * c
    return new_h, new_h
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def var_dropout(observed, x, n, net_size, n_particles, is_training):
    with zs.BayesianNet(observed=observed) as model:
        h = x
        normalizer_params = {'is_training': is_training,
                             'updates_collections': None}
        for i, [n_in, n_out] in enumerate(zip(net_size[:-1], net_size[1:])):
            eps_mean = tf.ones([n, n_in])
            eps = zs.Normal(
                'layer' + str(i) + '/eps', eps_mean, std=1.,
                n_samples=n_particles, group_ndims=1)
            h = layers.fully_connected(
                h * eps, n_out, normalizer_fn=layers.batch_norm,
                normalizer_params=normalizer_params)
            if i < len(net_size) - 2:
                h = tf.nn.relu(h)
        y = zs.OnehotCategorical('y', h)
    return model, h
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def q_net(x, n_xl, n_z, n_particles, is_training):
    with zs.BayesianNet() as variational:
        normalizer_params = {'is_training': is_training,
                             'updates_collections': None}
        lz_x = tf.reshape(tf.to_float(x), [-1, n_xl, n_xl, 1])
        lz_x = layers.conv2d(
            lz_x, 32, kernel_size=5, stride=2,
            normalizer_fn=layers.batch_norm,
            normalizer_params=normalizer_params)
        lz_x = layers.conv2d(
            lz_x, 64, kernel_size=5, stride=2,
            normalizer_fn=layers.batch_norm,
            normalizer_params=normalizer_params)
        lz_x = layers.conv2d(
            lz_x, 128, kernel_size=5, padding='VALID',
            normalizer_fn=layers.batch_norm,
            normalizer_params=normalizer_params)
        lz_x = layers.dropout(lz_x, keep_prob=0.9, is_training=is_training)
        lz_x = tf.reshape(lz_x, [-1, 128 * 3 * 3])
        lz_mean = layers.fully_connected(lz_x, n_z, activation_fn=None)
        lz_logstd = layers.fully_connected(lz_x, n_z, activation_fn=None)
        z = zs.Normal('z', lz_mean, logstd=lz_logstd, n_samples=n_particles,
                      group_ndims=1)
    return variational
项目:gait-recognition    作者:marian-margeta    | 项目源码 | 文件源码
def get_arg_scope(is_training):
        weight_decay_l2 = 0.1
        batch_norm_decay = 0.999
        batch_norm_epsilon = 0.0001

        with slim.arg_scope([slim.conv2d, slim.fully_connected, layers.separable_convolution2d],
                            weights_regularizer = slim.l2_regularizer(weight_decay_l2),
                            biases_regularizer = slim.l2_regularizer(weight_decay_l2),
                            weights_initializer = layers.variance_scaling_initializer(),
                            ):
            batch_norm_params = {
                'decay': batch_norm_decay,
                'epsilon': batch_norm_epsilon
            }
            with slim.arg_scope([slim.batch_norm, slim.dropout],
                                is_training = is_training):
                with slim.arg_scope([slim.batch_norm],
                                    **batch_norm_params):
                    with slim.arg_scope([slim.conv2d, layers.separable_convolution2d, layers.fully_connected],
                                        activation_fn = tf.nn.elu,
                                        normalizer_fn = slim.batch_norm,
                                        normalizer_params = batch_norm_params) as scope:
                        return scope
项目:deep_unsupervised_posets    作者:asanakoy    | 项目源码 | 文件源码
def conv_relu(self, input_tensor, kernel_size, kernels_num, stride, batch_norm=True,
                  group=1, name=None):
        with tf.variable_scope(name) as scope:
            assert int(input_tensor.get_shape()[3]) % group == 0
            num_input_channels = int(input_tensor.get_shape()[3]) / group
            w, b = self.get_conv_weights(kernel_size, num_input_channels, kernels_num)
            conv = Convnet.conv(input_tensor, w, b, stride, padding="SAME", group=group)
            if batch_norm:
                conv = tf.cond(self.is_phase_train,
                               lambda: tflayers.batch_norm(conv,
                                                           decay=self.batch_norm_decay,
                                                           is_training=True,
                                                           trainable=True,
                                                           reuse=None,
                                                           scope=scope),
                               lambda: tflayers.batch_norm(conv,
                                                           decay=self.batch_norm_decay,
                                                           is_training=False,
                                                           trainable=True,
                                                           reuse=True,
                                                           scope=scope))
            conv = tf.nn.relu(conv, name=name)
        return conv
项目:NAF-tensorflow    作者:carpedm20    | 项目源码 | 文件源码
def fc(layer, output_size, is_training, 
       weight_init, weight_reg=None, activation_fn=None, 
       use_batch_norm=False, scope='fc'):
  if use_batch_norm:
    batch_norm_args = {
      'normalizer_fn': batch_norm,
      'normalizer_params': {
        'is_training': is_training,
      }
    }
  else:
    batch_norm_args = {}

  with tf.variable_scope(scope):
    return fully_connected(
      layer,
      num_outputs=output_size,
      activation_fn=activation_fn,
      weights_initializer=weight_init,
      weights_regularizer=weight_reg,
      biases_initializer=tf.constant_initializer(0.0),
      scope=scope,
      **batch_norm_args
    )
项目:ste-GAN-ography2    作者:bin2415    | 项目源码 | 文件源码
def discriminator_stego_nn(self, img, batch_size, name):
        eve_input = self.image_processing_layer(img)
        eve_conv1 = convolution2d(eve_input, 64, kernel_size = [5, 5], stride = [2,2],
        activation_fn= tf.nn.relu, normalizer_fn = BatchNorm, scope = 'eve/' + name + '/conv1')

        eve_conv2 = convolution2d(eve_conv1, 64 * 2, kernel_size = [5, 5], stride = [2,2],
        activation_fn= tf.nn.relu, normalizer_fn = BatchNorm, scope = 'eve/' + name + '/conv2')

        eve_conv3 = convolution2d(eve_conv2, 64 * 4,kernel_size = [5, 5], stride = [2,2],
        activation_fn= tf.nn.relu, normalizer_fn = BatchNorm, scope = 'eve/' + name + '/conv3')

        eve_conv4 = convolution2d(eve_conv3, 64* 8, kernel_size = [5, 5], stride = [2,2],
        activation_fn= tf.nn.relu, normalizer_fn = BatchNorm, scope = 'eve/' + name + '/conv4')

        eve_conv4 = tf.reshape(eve_conv4, [batch_size, -1])

        #eve_fc = fully_connected(eve_conv4, 1, activation_fn = tf.nn.sigmoid, normalizer_fn = BatchNorm,
        #weights_initializer=tf.random_normal_initializer(stddev=1.0))
        eve_fc = fully_connected(eve_conv4, 1, normalizer_fn = BatchNorm, 
        weights_initializer=tf.random_normal_initializer(stddev=1.0), scope = 'eve' + name + '/final_fc')
        return eve_fc
项目:information-dropout    作者:ucla-vision    | 项目源码 | 文件源码
def conv(self, inputs, num_outputs, activations, normalizer_fn = batch_norm, kernel_size=3, stride=1, scope=None):
        '''Creates a convolutional layer with default arguments'''
        if activations == 'relu':
            activation_fn = tf.nn.relu
        elif activations == 'softplus':
            activation_fn = tf.nn.softplus
        else:
            raise ValueError("Invalid activation function.")
        return conv2d( inputs = inputs,
            num_outputs = num_outputs,
            kernel_size = kernel_size,
            stride = stride,
            padding = 'SAME',
            activation_fn = activation_fn,
            normalizer_fn = batch_norm,
            scope=scope )
项目:information-dropout    作者:ucla-vision    | 项目源码 | 文件源码
def conv(self, inputs, num_outputs, activations, normalizer_fn = batch_norm, kernel_size=3, stride=1, scope=None):
        '''Creates a convolutional layer with default arguments'''
        if activations == 'relu':
            activation_fn = tf.nn.relu
        elif activations == 'softplus':
            activation_fn = tf.nn.softplus
        else:
            raise ValueError("Invalid activation function.")
        return conv2d( inputs = inputs,
            num_outputs = num_outputs,
            kernel_size = kernel_size,
            stride = stride,
            padding = 'SAME',
            activation_fn = activation_fn,
            normalizer_fn = normalizer_fn,
            normalizer_params = {'is_training' : self.is_training, 'updates_collections': None, 'decay': 0.9},
            scope=scope )
项目:Mendelssohn    作者:diggerdu    | 项目源码 | 文件源码
def conv2d(input_, o_dim, k_size, st, name='conv2d'):
    '''
    with tf.variable_scope(name):
        init = ly.xavier_initializer_conv2d()
        output = ly.conv2d(input_, num_outputs=o_dim, kernel_size=k_size, stride=st,\
                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME',\
                weights_initializer=init)
        return output
    '''
    with tf.variable_scope(name):
        init = ly.xavier_initializer_conv2d()
        fil = tf.get_variable('co_f', k_size+\
                [ten_sh(input_)[-1], o_dim],initializer=init)
        co = tf.nn.conv2d(input_, fil, strides=[1]+st+[1], \
                padding='SAME')
        bia = tf.get_variable('co_b', [o_dim])
        co = tf.nn.bias_add(co, bia)
        return co
项目:Mendelssohn    作者:diggerdu    | 项目源码 | 文件源码
def conv2d(input_, o_dim, k_size, st, name='conv2d'):
    with tf.variable_scope(name):
        init = ly.xavier_initializer_conv2d()
        output = ly.conv2d(input_, num_outputs=o_dim, kernel_size=k_size, stride=st,\
                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME',\
                weights_initializer=init)
        return output
    '''
    with tf.variable_scope(name):
        init = ly.xavier_initializer_conv2d()
        fil = tf.get_variable('co_f', k_size+\
                [ten_sh(input_)[-1], o_dim],initializer=init)
        co = tf.nn.conv2d(input_, fil, strides=[1]+st+[1], \
                padding='SAME')
        bia = tf.get_variable('co_b', [o_dim])
        co = tf.nn.bias_add(co, bia)
        return co
    '''
项目:GAN_Theories    作者:YadiraF    | 项目源码 | 文件源码
def __call__(self, x, reuse=False):
        with tf.variable_scope(self.name) as scope:
            if reuse:
                scope.reuse_variables()
            size = 64
            d = tcl.conv2d(x, num_outputs=size, kernel_size=3, # bzx64x64x3 -> bzx32x32x64
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.conv2d(d, num_outputs=size * 2, kernel_size=3, # 16x16x128
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.conv2d(d, num_outputs=size * 4, kernel_size=3, # 8x8x256
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.conv2d(d, num_outputs=size * 8, kernel_size=3, # 4x4x512
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))

            d = tcl.fully_connected(tcl.flatten(d), 256, activation_fn=lrelu, weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.fully_connected(d, 1, activation_fn=None, weights_initializer=tf.random_normal_initializer(0, 0.02))

            return d
项目:GAN_Theories    作者:YadiraF    | 项目源码 | 文件源码
def __call__(self, x, reuse=False):
        with tf.variable_scope(self.name) as scope:
            if reuse:
                scope.reuse_variables()
            size = 64
            d = tcl.conv2d(x, num_outputs=size, kernel_size=3, # bzx64x64x3 -> bzx32x32x64
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.conv2d(d, num_outputs=size * 2, kernel_size=3, # 16x16x128
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.conv2d(d, num_outputs=size * 4, kernel_size=3, # 8x8x256
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.conv2d(d, num_outputs=size * 8, kernel_size=3, # 4x4x512
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))

            d = tcl.fully_connected(tcl.flatten(d), 256, activation_fn=lrelu, weights_initializer=tf.random_normal_initializer(0, 0.02))
            mu = tcl.fully_connected(d, 100, activation_fn=None, weights_initializer=tf.random_normal_initializer(0, 0.02))
            sigma = tcl.fully_connected(d, 100, activation_fn=None, weights_initializer=tf.random_normal_initializer(0, 0.02))

            return mu, sigma
项目:generating_people    作者:classner    | 项目源码 | 文件源码
def batchnorm(input, orig_graph, is_training):
    return tfl.batch_norm(
        input,
        decay=0.9,
        scale=True,
        epsilon=1E-5,
        activation_fn=None,
        param_initializers={
            'beta': get_val_or_initializer(orig_graph,
                                           tf.constant_initializer(0.),
                                           'BatchNorm/beta'),
            'gamma': get_val_or_initializer(orig_graph,
                                            tf.random_normal_initializer(1.0,
                                                                         0.02),
                                            'BatchNorm/gamma'),
            'moving_mean': get_val_or_initializer(orig_graph,
                                                  tf.constant_initializer(0.),
                                                  'BatchNorm/moving_mean'),
            'moving_variance': get_val_or_initializer(orig_graph,
                                                      tf.ones_initializer(),
                                                      'BatchNorm/moving_variance')
        },
        is_training=is_training,
        fused=True,  # new implementation with a fused kernel => speedup.
    )
项目:generating_people    作者:classner    | 项目源码 | 文件源码
def batchnorm(input, orig_graph, is_training):
    return tfl.batch_norm(
        input,
        decay=0.9,
        scale=True,
        epsilon=1E-5,
        activation_fn=None,
        param_initializers={
            'beta': get_val_or_initializer(orig_graph,
                                           tf.constant_initializer(0.),
                                           'BatchNorm/beta'),
            'gamma': get_val_or_initializer(orig_graph,
                                            tf.random_normal_initializer(1.0,
                                                                         0.02),
                                            'BatchNorm/gamma'),
            'moving_mean': get_val_or_initializer(orig_graph,
                                                  tf.constant_initializer(0.),
                                                  'BatchNorm/moving_mean'),
            'moving_variance': get_val_or_initializer(orig_graph,
                                                      tf.ones_initializer(),
                                                      'BatchNorm/moving_variance')
        },
        is_training=is_training,
        fused=True,  # new implementation with a fused kernel => speedup.
    )
项目:generating_people    作者:classner    | 项目源码 | 文件源码
def batchnorm(input, orig_graph, is_training):
    return tfl.batch_norm(
        input,
        decay=0.9,
        scale=True,
        epsilon=1E-5,
        activation_fn=None,
        param_initializers={
            'beta': get_val_or_initializer(orig_graph,
                                           tf.constant_initializer(0.),
                                           'BatchNorm/beta'),
            'gamma': get_val_or_initializer(orig_graph,
                                            tf.random_normal_initializer(1.0,
                                                                         0.02),
                                            'BatchNorm/gamma'),
            'moving_mean': get_val_or_initializer(orig_graph,
                                                  tf.constant_initializer(0.),
                                                  'BatchNorm/moving_mean'),
            'moving_variance': get_val_or_initializer(orig_graph,
                                                      tf.ones_initializer(),
                                                      'BatchNorm/moving_variance')
        },
        is_training=is_training,
        fused=True,  # new implementation with a fused kernel => speedup.
    )
项目:generating_people    作者:classner    | 项目源码 | 文件源码
def batchnorm(input, orig_graph, is_training):
    return tfl.batch_norm(
        input,
        decay=0.9,
        scale=True,
        epsilon=1E-5,
        activation_fn=None,
        param_initializers={
            'beta': get_val_or_initializer(orig_graph,
                                           tf.constant_initializer(0.),
                                           'BatchNorm/beta'),
            'gamma': get_val_or_initializer(orig_graph,
                                            tf.random_normal_initializer(1.0,
                                                                         0.02),
                                            'BatchNorm/gamma'),
            'moving_mean': get_val_or_initializer(orig_graph,
                                                  tf.constant_initializer(0.),
                                                  'BatchNorm/moving_mean'),
            'moving_variance': get_val_or_initializer(orig_graph,
                                                      tf.ones_initializer(),
                                                      'BatchNorm/moving_variance')
        },
        is_training=is_training,
        fused=True,  # new implementation with a fused kernel => speedup.
    )
项目:generating_people    作者:classner    | 项目源码 | 文件源码
def batchnorm(input, orig_graph, is_training):
    return tfl.batch_norm(
        input,
        decay=0.9,
        scale=True,
        epsilon=1E-5,
        activation_fn=None,
        param_initializers={
            'beta': get_val_or_initializer(orig_graph,
                                           tf.constant_initializer(0.),
                                           'BatchNorm/beta'),
            'gamma': get_val_or_initializer(orig_graph,
                                            tf.random_normal_initializer(1.0,
                                                                         0.02),
                                            'BatchNorm/gamma'),
            'moving_mean': get_val_or_initializer(orig_graph,
                                                  tf.constant_initializer(0.),
                                                  'BatchNorm/moving_mean'),
            'moving_variance': get_val_or_initializer(orig_graph,
                                                      tf.ones_initializer(),
                                                      'BatchNorm/moving_variance')
        },
        is_training=is_training,
        fused=True,  # new implementation with a fused kernel => speedup.
    )
项目:unrolled-GAN    作者:Zardinality    | 项目源码 | 文件源码
def generator(z):
    # because up to now we can not derive bias_add's higher order derivative in tensorflow, 
    # so I use vanilla implementation of FC instead of a FC layer in tensorflow.contrib.layers
    # the following conv case is out of the same reason
    weights = slim.model_variable(
        'fn_weights', shape=(FLAGS.z_dim, 4 * 4 * 512), initializer=ly.xavier_initializer())
    bias = slim.model_variable(
        'fn_bias', shape=(4 * 4 * 512, ), initializer=tf.zeros_initializer)
    train = tf.nn.relu(ly.batch_norm(fully_connected(z, weights, bias)))
    train = tf.reshape(train, (-1, 4, 4, 512))
    train = ly.conv2d_transpose(train, 256, 3, stride=2,
                                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME')
    train = ly.conv2d_transpose(train, 128, 3, stride=2,
                                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME')
    train = ly.conv2d_transpose(train, 64, 3, stride=2,
                                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME')
    train = ly.conv2d_transpose(train, 1, 3, stride=1,
                                activation_fn=None, padding='SAME', biases_initializer=None)
    bias = slim.model_variable('bias', shape=(
        1, ), initializer=tf.zeros_initializer)
    train += bias
    train = tf.nn.tanh(train)
    return train
项目:unrolled-GAN    作者:Zardinality    | 项目源码 | 文件源码
def generator(z, label):
    z = tf.concat(1, [z,label])
    train = ly.fully_connected(
        z, 1024, activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm)
    train = tf.concat(1, [train, label])
    train = ly.fully_connected(
        z, 4*4*512, activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm)
    train = tf.reshape(train, (-1, 4, 4, 512))
    yb = tf.ones([FLAGS.batch_size, 4, 4, 10])*tf.reshape(label, [FLAGS.batch_size, 1, 1, 10]) 
    train = tf.concat(3, [train, yb])
    train = ly.conv2d_transpose(train, 256, 3, stride=2,
                                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
    train = ly.conv2d_transpose(train, 128, 3, stride=2,
                                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
    train = ly.conv2d_transpose(train, 64, 3, stride=2,
                                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
    train = ly.conv2d_transpose(train, 1, 3, stride=1,
                                activation_fn=tf.nn.tanh, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
    return train
项目:unrolled-GAN    作者:Zardinality    | 项目源码 | 文件源码
def generator(z, label):
    z = tf.concat(1, [z,label])
    train = ly.fully_connected(
        z, 1024, activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm)
    train = tf.concat(1, [train, label])
    train = ly.fully_connected(
        z, 4*4*512, activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm)
    train = tf.reshape(train, (-1, 4, 4, 512))
    yb = tf.ones([FLAGS.batch_size, 4, 4, 10])*tf.reshape(label, [FLAGS.batch_size, 1, 1, 10]) 
    train = tf.concat(3, [train, yb])
    train = ly.conv2d_transpose(train, 256, 3, stride=2,
                                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
    train = ly.conv2d_transpose(train, 128, 3, stride=2,
                                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
    train = ly.conv2d_transpose(train, 64, 3, stride=2,
                                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
    train = ly.conv2d_transpose(train, 1, 3, stride=1,
                                activation_fn=tf.nn.tanh, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
    return train
项目:tensorflow-litterbox    作者:rwightman    | 项目源码 | 文件源码
def resnet_arg_scope(
        weight_decay=0.0001,
        batch_norm_decay=0.997,
        batch_norm_epsilon=1e-5,
        batch_norm_scale=True,
):
    batch_norm_params = {
        'decay': batch_norm_decay,
        'epsilon': batch_norm_epsilon,
        'scale': batch_norm_scale,
    }
    l2_regularizer = layers.l2_regularizer(weight_decay)

    arg_scope_layers = arg_scope(
        [layers.conv2d, my_layers.preact_conv2d, layers.fully_connected],
        weights_initializer=layers.variance_scaling_initializer(),
        weights_regularizer=l2_regularizer,
        activation_fn=tf.nn.relu)
    arg_scope_conv = arg_scope(
        [layers.conv2d, my_layers.preact_conv2d],
        normalizer_fn=layers.batch_norm,
        normalizer_params=batch_norm_params)
    with arg_scope_layers, arg_scope_conv as arg_sc:
        return arg_sc
项目:DCGAN-WGAN-TF    作者:lovecambi    | 项目源码 | 文件源码
def generator(self, z, Cc=128, f_h=5, f_w=5):
        with tf.variable_scope("g_deconv0",reuse=None):
            deconv0 = deconv2d(z, [self.batch_size, 4, 4, 8*Cc], 4, 4, 1, 1, bias=not self.Bn, padding='VALID')
            deconv0 = tf.nn.relu(tcl.batch_norm(deconv0)) if self.Bn else tf.nn.relu(deconv0)
        with tf.variable_scope("g_deconv1",reuse=None):
            deconv1 = deconv2d(deconv0, [self.batch_size, 8, 8, 4*Cc], f_h, f_w, 2, 2, bias=not self.Bn, padding='SAME')
            deconv1 = tf.nn.relu(tcl.batch_norm(deconv1)) if self.Bn else tf.nn.relu(deconv1)
        with tf.variable_scope("g_deconv2",reuse=None):
            deconv2 = deconv2d(deconv1, [self.batch_size, 16, 16, 2*Cc], f_h, f_w, 2, 2, bias=not self.Bn, padding='SAME')
            deconv2 = tf.nn.relu(tcl.batch_norm(deconv2)) if self.Bn else tf.nn.relu(deconv2)
        with tf.variable_scope("g_deconv3",reuse=None):
            deconv3 = deconv2d(deconv2, [self.batch_size, 32, 32, Cc], f_h, f_w, 2, 2, bias=not self.Bn, padding='SAME')
            deconv3 = tf.nn.relu(tcl.batch_norm(deconv3)) if self.Bn else tf.nn.relu(deconv3)
        with tf.variable_scope("g_deconv4",reuse=None):
            deconv4 = deconv2d(deconv3, [self.batch_size, 64, 64, self.C], f_h, f_w, 2, 2, bias=not self.Bn, padding='SAME')
        return tf.tanh(deconv4)
项目:DCGAN-WGAN-TF    作者:lovecambi    | 项目源码 | 文件源码
def discriminator(self, x, Cc=128, f_h=5, f_w=5):
        with tf.variable_scope("d_conv1",reuse=self.DO_SHARE):
            conv1 = conv2d(x, self.C, Cc, f_h, f_w, 2, 2, bias=not self.Bn, padding='SAME') # H/2 x W/2
            conv1 = lrelu(conv1)
        with tf.variable_scope("d_conv2",reuse=self.DO_SHARE):
            conv2 = conv2d(conv1, Cc, 2*Cc, f_h, f_w, 2, 2, bias=not self.Bn, padding='SAME') # H/4 x W/4
            conv2 = lrelu(tcl.batch_norm(conv2)) if self.Bn else lrelu(conv2)
        with tf.variable_scope("d_conv3",reuse=self.DO_SHARE):
            conv3 = conv2d(conv2, 2*Cc, 4*Cc, f_h, f_w, 2, 2, bias=not self.Bn, padding='SAME') # H/8 x W/8
            conv3 = lrelu(tcl.batch_norm(conv3)) if self.Bn else lrelu(conv3)
        with tf.variable_scope("d_conv4",reuse=self.DO_SHARE):
            conv4 = conv2d(conv3, 4*Cc, 8*Cc, f_h, f_w, 2, 2, bias=not self.Bn, padding='SAME') # H/16 x W/16
            conv4 = lrelu(tcl.batch_norm(conv4)) if self.Bn else lrelu(conv4)
        with tf.variable_scope("d_conv5",reuse=self.DO_SHARE):
            conv5 = conv2d(conv4, 8*Cc, 1, 4, 4, 1, 1, bias=not self.Bn, padding='VALID') # 1 x 1
        return tf.reshape(conv5, [-1, 1])
项目:DeepWorks    作者:daigo0927    | 项目源码 | 文件源码
def __call__(self, inputs, reuse = True):
        with tf.variable_scope(self.name) as vs:
            # tf.get_variable_scope()
            if reuse:
                vs.reuse_variables()

            x = tcl.conv2d(inputs,
                           num_outputs = 64,
                           kernel_size = (4, 4),
                           stride = (1, 1),
                           padding = 'SAME')
            x = tcl.batch_norm(x)
            x = tf.nn.relu(x)
            x = tcl.max_pool2d(x, (2, 2), (2, 2), 'SAME')
            x = tcl.conv2d(x,
                           num_outputs = 128,
                           kernel_size = (4, 4),
                           stride = (1, 1),
                           padding = 'SAME')
            x = tcl.batch_norm(x)
            x = tf.nn.relu(x)
            x = tcl.max_pool2d(x, (2, 2), (2, 2), 'SAME')
            x = tcl.flatten(x)
            logits = tcl.fully_connected(x, num_outputs = self.num_output)

            return logits
项目:DeepWorks    作者:daigo0927    | 项目源码 | 文件源码
def _bn_relu_conv(filters, kernel_size = (3, 3), stride = (1, 1)):
    def f(inputs):
        x = tcl.batch_norm(inputs)
        x = tf.nn.relu(x)
        x = tcl.conv2d(x,
                       num_outputs = filters,
                       kernel_size = kernel_size,
                       stride = stride,
                       padding = 'SAME')
        return x
    return f
项目:DeepWorks    作者:daigo0927    | 项目源码 | 文件源码
def __call__(self, inputs, reuse = True):
        with tf.variable_scope(self.name) as vs:
            tf.get_variable_scope()
            if reuse:
                vs.reuse_variables()

            conv1 = tcl.conv2d(inputs,
                               num_outputs = 64,
                               kernel_size = (7, 7),
                               stride = (2, 2),
                               padding = 'SAME')
            conv1 = tcl.batch_norm(conv1)
            conv1 = tf.nn.relu(conv1)
            conv1 = tcl.max_pool2d(conv1,
                                   kernel_size = (3, 3),
                                   stride = (2, 2),
                                   padding = 'SAME')

            x = conv1
            filters = 64
            first_layer = True
            for i, r in enumerate(self.repetitions):
                x = _residual_block(self.block_fn,
                                    filters = filters,
                                    repetition = r,
                                    is_first_layer = first_layer)(x)
                filters *= 2
                if first_layer:
                    first_layer = False

            _, h, w, ch = x.shape.as_list()
            outputs = tcl.avg_pool2d(x,
                                     kernel_size = (h, w),
                                     stride = (1, 1))
            outputs = tcl.flatten(outputs)
            logits = tcl.fully_connected(outputs, num_outputs = self.num_output,
                                         activation_fn = None)
            return logits
项目:DeepWorks    作者:daigo0927    | 项目源码 | 文件源码
def _conv_bn_relu(filters, kernel_size = (3, 3), stride = (1, 1)):
    def f(inputs):
        x = tcl.conv2d(inputs,
                       num_outputs = filters,
                       kernel_size = kernel_size,
                       stride = stride,
                       padding = 'SAME')
        x = tcl.batch_norm(x)
        x = tf.nn.relu(x)
        return x
    return f
项目:DeepWorks    作者:daigo0927    | 项目源码 | 文件源码
def __init__(self,
                 output_ch, # outputs channel, size is same as inputs
                 block_fn = 'origin',
                 name = 'unet'):
        self.output_ch = output_ch
        self.name = name

        assert block_fn in ['batch_norm', 'origin'], 'choose \'batch_norm\' or \'origin\''
        if block_fn == 'batch_norm':
            self.block_fn = _conv_bn_relu
        elif block_fn == 'origin':
            self.block_fn = _conv_relu
项目:deligan    作者:val-iisc    | 项目源码 | 文件源码
def discriminator(image, Reuse=False):
    with tf.variable_scope('disc', reuse=Reuse):
        image = tf.reshape(image, [-1, 28, 28, 1])
        h0 = lrelu(conv(image, 5, 5, 1, df_dim, stridex=2, stridey=2, name='d_h0_conv'))
        h1 = lrelu( batch_norm(conv(h0, 5, 5, df_dim,df_dim*2,stridex=2,stridey=2,name='d_h1_conv'), decay=0.9, scale=True, updates_collections=None, is_training=phase_train, reuse=Reuse, scope='d_bn1'))
        h2 = lrelu(batch_norm(conv(h1, 3, 3, df_dim*2, df_dim*4, stridex=2, stridey=2,name='d_h2_conv'), decay=0.9,scale=True, updates_collections=None, is_training=phase_train, reuse=Reuse, scope='d_bn2'))
        h3 = tf.nn.max_pool(h2, ksize=[1,4,4,1], strides=[1,1,1,1],padding='VALID')
        h6 = tf.reshape(h2,[-1, 4*4*df_dim*4])
        h7 = Minibatch_Discriminator(h3, num_kernels=df_dim*4, name = 'd_MD')
        h8 = dense(tf.reshape(h7, [batchsize, -1]), df_dim*4*2, 1, scope='d_h8_lin')
        return tf.nn.sigmoid(h8), h8
项目:Semi_Supervised_GAN    作者:ChunyuanLI    | 项目源码 | 文件源码
def inference(input_img):
    with tf.variable_scope('Net_Inf') as scope:
        xx = layers.fully_connected(input_img, num_outputs=500, activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)
        xx = layers.fully_connected(xx, num_outputs=500, activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)
        xx = layers.fully_connected(xx, num_outputs=latent_size, activation_fn=None)
        xx = layers.batch_norm(xx)
        inf_latent = tf.nn.tanh(xx)
    return inf_latent

# specify discriminative model
项目:Semi_Supervised_GAN    作者:ChunyuanLI    | 项目源码 | 文件源码
def discriminator(input_img):
    # input_img = Input(batch_shape=(None, 3, 32, 32), dtype=im_dtype)
    with tf.variable_scope('Net_Dis') as scope:
        xx = layers.fully_connected(input_img, num_outputs=1000, activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)
        xx = tf.nn.dropout(xx, 0.5)
        xx = layers.fully_connected(xx, num_outputs=500, activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)
        xx = tf.nn.dropout(xx, 0.5)
        xx = layers.fully_connected(xx, num_outputs=250, activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)
        xx = tf.nn.dropout(xx, 0.5)
        xx = layers.fully_connected(xx, num_outputs=250, activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)
        xx = tf.nn.dropout(xx, 0.5)
        xx0 = layers.fully_connected(xx, num_outputs=250, activation_fn=None)
        xx = layers.batch_norm(xx0)
        xx = tf.nn.relu(xx)
        logits = layers.fully_connected(xx, label_size, activation_fn=None)

    return  logits, xx0

# pdb.set_trace()
项目:Automatic-Image-Colorization    作者:Armour    | 项目源码 | 文件源码
def batch_normal_new(input_data, scope, training_flag):
        """
        Doing batch normalization, this is the new version with build-in batch_norm function
        :param input_data: the input data
        :param scope: scope
        :param training_flag: the flag indicate if it is training
        :return: normalized data
        """
        return tf.cond(training_flag,
                       lambda: batch_norm(input_data, decay=0.9999, is_training=True, center=True, scale=True,
                                          updates_collections=None, scope=scope),
                       lambda: batch_norm(input_data, decay=0.9999, is_training=False, center=True, scale=True,
                                          updates_collections=None, scope=scope, reuse=True),
                       name='batch_normalization')
项目:Densenet-Tensorflow    作者:taki0112    | 项目源码 | 文件源码
def Batch_Normalization(x, training, scope):
    with arg_scope([batch_norm],
                   scope=scope,
                   updates_collections=None,
                   decay=0.9,
                   center=True,
                   scale=True,
                   zero_debias_moving_mean=True) :
        return tf.cond(training,
                       lambda : batch_norm(inputs=x, is_training=training, reuse=None),
                       lambda : batch_norm(inputs=x, is_training=training, reuse=True))
项目:Densenet-Tensorflow    作者:taki0112    | 项目源码 | 文件源码
def Batch_Normalization(x, training, scope):
    with arg_scope([batch_norm],
                   scope=scope,
                   updates_collections=None,
                   decay=0.9,
                   center=True,
                   scale=True,
                   zero_debias_moving_mean=True) :
        return tf.cond(training,
                       lambda : batch_norm(inputs=x, is_training=training, reuse=None),
                       lambda : batch_norm(inputs=x, is_training=training, reuse=True))
项目:website-fingerprinting    作者:AxelGoetz    | 项目源码 | 文件源码
def __init__(self, num_units, input_size=None, activation=tanh, is_training=True, batch_norm=True):
    self._is_training = is_training
    self._batch_norm = batch_norm

    super().__init__(num_units, input_size, activation)
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def vae_conv(observed, n, n_x, n_z, n_particles, is_training):
    with zs.BayesianNet(observed=observed) as model:
        normalizer_params = {'is_training': is_training,
                             'updates_collections': None}
        z_mean = tf.zeros([n, n_z])
        z = zs.Normal('z', z_mean, std=1., n_samples=n_particles,
                      group_ndims=1)
        lx_z = tf.reshape(z, [-1, 1, 1, n_z])
        lx_z = layers.conv2d_transpose(
            lx_z, 128, kernel_size=3, padding='VALID',
            normalizer_fn=layers.batch_norm,
            normalizer_params=normalizer_params)
        lx_z = layers.conv2d_transpose(
            lx_z, 64, kernel_size=5, padding='VALID',
            normalizer_fn=layers.batch_norm,
            normalizer_params=normalizer_params)
        lx_z = layers.conv2d_transpose(
            lx_z, 32, kernel_size=5, stride=2,
            normalizer_fn=layers.batch_norm,
            normalizer_params=normalizer_params)
        lx_z = layers.conv2d_transpose(
            lx_z, 1, kernel_size=5, stride=2,
            activation_fn=None)
        x_logits = tf.reshape(lx_z, [n_particles, n, -1])
        x = zs.Bernoulli('x', x_logits, group_ndims=1)
    return model
项目:gait-recognition    作者:marian-margeta    | 项目源码 | 文件源码
def residual_block(net, ch = 256, ch_inner = 128, scope = None, reuse = None, stride = 1):
        """
        Bottleneck v2
        """

        with slim.arg_scope([layers.convolution2d],
                            activation_fn = None,
                            normalizer_fn = None):
            with tf.variable_scope(scope, 'ResidualBlock', reuse = reuse):
                in_net = net

                if stride > 1:
                    net = layers.convolution2d(net, ch, kernel_size = 1, stride = stride)

                in_net = layers.batch_norm(in_net)
                in_net = tf.nn.relu(in_net)
                in_net = layers.convolution2d(in_net, ch_inner, 1)

                in_net = layers.batch_norm(in_net)
                in_net = tf.nn.relu(in_net)
                in_net = layers.convolution2d(in_net, ch_inner, 3, stride = stride)

                in_net = layers.batch_norm(in_net)
                in_net = tf.nn.relu(in_net)
                in_net = layers.convolution2d(in_net, ch, 1, activation_fn = None)

                net = tf.nn.relu(in_net + net)

        return net
项目:DocumentSegmentation    作者:SeguinBe    | 项目源码 | 文件源码
def resnet_v1_50_fn(input_tensor: tf.Tensor, is_training=False, blocks=4, weight_decay=0.0001, renorm=True) -> tf.Tensor:
    with slim.arg_scope(nets.resnet_v1.resnet_arg_scope(weight_decay=weight_decay, batch_norm_decay=0.999)), \
         slim.arg_scope([layers.batch_norm], renorm_decay=0.95, renorm=renorm):
        input_tensor = mean_substraction(input_tensor)
        assert 0 < blocks <= 4
        blocks_list = [
              nets.resnet_v1.resnet_v1_block('block1', base_depth=64, num_units=3, stride=2),
              nets.resnet_v1.resnet_v1_block('block2', base_depth=128, num_units=4, stride=2),
              nets.resnet_v1.resnet_v1_block('block3', base_depth=256, num_units=6, stride=2),
              nets.resnet_v1.resnet_v1_block('block4', base_depth=512, num_units=3, stride=1),
        ]
        net, endpoints = nets.resnet_v1.resnet_v1(input_tensor,
                                                  blocks=blocks_list[:blocks],
                                                  num_classes=None,
                                                  is_training=is_training,
                                                  global_pool=False,
                                                  output_stride=None,
                                                  include_root_block=True,
                                                  reuse=None,
                                                  scope='resnet_v1_50')

        desired_endpoints = ['resnet_augmented/resnet_v1_50/conv1',
                             'resnet_v1_50/block1/unit_2/bottleneck_v1',
                             'resnet_v1_50/block2/unit_3/bottleneck_v1',
                             'resnet_v1_50/block3/unit_5/bottleneck_v1',
                             'resnet_v1_50/block4/unit_2/bottleneck_v1'
                             ]

        intermediate_layers = list()
        for d in desired_endpoints:
            intermediate_layers.append(endpoints[d])

        return net, intermediate_layers
项目:web_page_classification    作者:yuhui-lin    | 项目源码 | 文件源码
def BN_ReLU(self, net):
        # Batch Normalization and ReLU
        # 'gamma' is not used as the next layer is ReLU
        net = batch_norm(net,
                         center=True,
                         scale=False,
                         activation_fn=tf.nn.relu, )
        # net = tf.nn.relu(net)
        # activation summary ??
        self._activation_summary(net)
        return net
项目:deep_unsupervised_posets    作者:asanakoy    | 项目源码 | 文件源码
def fc_relu(self, input_tensor, num_outputs, relu=False, batch_norm=False, weight_std=0.005,
                bias_init_value=0.1, name=None):
        if batch_norm and not relu:
            raise ValueError('Cannot use batch normalization without following RELU')
        with tf.variable_scope(name) as scope:
            num_inputs = int(np.prod(input_tensor.get_shape()[1:]))
            w, b = self.get_fc_weights(num_inputs, num_outputs,
                                       weight_std=weight_std,
                                       bias_init_value=bias_init_value)

            fc_relu = None
            input_tensor_reshaped = tf.reshape(input_tensor, [-1, num_inputs])
            fc = tf.add(tf.matmul(input_tensor_reshaped, w), b, name='fc' if relu or batch_norm else name)
            if batch_norm:
                fc = tf.cond(self.is_phase_train,
                             lambda: tflayers.batch_norm(fc,
                                                           decay=self.batch_norm_decay,
                                                           is_training=True,
                                                           trainable=True,
                                                           reuse=None,
                                                           scope=scope),
                              lambda: tflayers.batch_norm(fc,
                                                           decay=self.batch_norm_decay,
                                                           is_training=False,
                                                           trainable=True,
                                                           reuse=True,
                                                           scope=scope))
            if relu:
                fc_relu = tf.nn.relu(fc, name=name)
        return fc, fc_relu
项目:Img2Img-Translation-Tensorflow    作者:lovecambi    | 项目源码 | 文件源码
def batch_norm(x, train_mode=True, epsilon=1e-5, momentum=0.9, name="bn"):
    with tf.variable_scope(name):
        return tcl.batch_norm(x, 
                              decay=momentum, 
                              updates_collections=None, 
                              epsilon=epsilon, 
                              scale=True, 
                              is_training=train_mode, 
                              trainable=True, 
                              scope=name)
项目:predictron    作者:zhongwen    | 项目源码 | 文件源码
def iter_func(self, state):
    sc = predictron_arg_scope()

    with tf.variable_scope('value'):
      value_net = slim.fully_connected(slim.flatten(state), 32, scope='fc0')
      value_net = layers.batch_norm(value_net, activation_fn=tf.nn.relu, scope='fc0/preact')
      value_net = slim.fully_connected(value_net, self.maze_size, activation_fn=None, scope='fc1')

    with slim.arg_scope(sc):
      net = slim.conv2d(state, 32, [3, 3], scope='conv1')
      net = layers.batch_norm(net, activation_fn=tf.nn.relu, scope='conv1/preact')
      net_flatten = slim.flatten(net, scope='conv1/flatten')

      with tf.variable_scope('reward'):
        reward_net = slim.fully_connected(net_flatten, 32, scope='fc0')
        reward_net = layers.batch_norm(reward_net, activation_fn=tf.nn.relu, scope='fc0/preact')
        reward_net = slim.fully_connected(reward_net, self.maze_size, activation_fn=None, scope='fc1')

      with tf.variable_scope('gamma'):
        gamma_net = slim.fully_connected(net_flatten, 32, scope='fc0')
        gamma_net = layers.batch_norm(gamma_net, activation_fn=tf.nn.relu, scope='fc0/preact')
        gamma_net = slim.fully_connected(gamma_net, self.maze_size, activation_fn=tf.nn.sigmoid, scope='fc1')

      with tf.variable_scope('lambda'):
        lambda_net = slim.fully_connected(net_flatten, 32, scope='fc0')
        lambda_net = layers.batch_norm(lambda_net, activation_fn=tf.nn.relu, scope='fc0/preact')
        lambda_net = slim.fully_connected(lambda_net, self.maze_size, activation_fn=tf.nn.sigmoid, scope='fc1')

      net = slim.conv2d(net, 32, [3, 3], scope='conv2')
      net = layers.batch_norm(net, activation_fn=tf.nn.relu, scope='conv2/preact')

      net = slim.conv2d(net, 32, [3, 3], scope='conv3')
      net = layers.batch_norm(net, activation_fn=tf.nn.relu, scope='conv3/preact')
    return net, reward_net, gamma_net, lambda_net, value_net
项目:learning-tensorflow    作者:Salon-sai    | 项目源码 | 文件源码
def __init__(self, epsilon=1e-5, momentum=0.9, name="batch_norm"):
        with tf.variable_scope(name):
            self.epsilon = epsilon
            self.momentum = momentum
            self.name = name