Python tensorflow.contrib.slim 模块,conv2d() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.contrib.slim.conv2d()

项目:TensorFlow-ADGM    作者:dancsalo    | 项目源码 | 文件源码
def create_architecture(self, mode, tag=None):

        training = mode == 'TRAIN'
        testing = mode == 'TEST'

        assert tag != None

        # handle most of the regularizers here
        weights_regularizer = tf.contrib.layers.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY)
        biases_regularizer = weights_regularizer

        # list as many types of layers as possible, even if they are not used now
        with arg_scope([slim.conv2d, slim.conv2d_in_plane,
                        slim.conv2d_transpose, slim.separable_conv2d, slim.fully_connected],
                       weights_regularizer=weights_regularizer,
                       biases_regularizer=biases_regularizer,
                       biases_initializer=tf.constant_initializer(0.0)):
            self.build_network()

        elbo = self.add_losses()
        self._summary_op = tf.summary.merge_all()
        return elbo
项目:social-scene-understanding    作者:cvlab-epfl    | 项目源码 | 文件源码
def multiscale_features(graph, names, dims, size, scope='features'):
  """
  extract features from multiple endpoints, do dimensionality
  reduction and resize to the given size
  """
  with tf.variable_scope(scope):
    endpoints = []
    for i, name in enumerate(names):
      endpoint = graph.get_tensor_by_name(name)
      if not dims is None:
        endpoint = slim.conv2d(endpoint, dims[i], 1,
                               activation_fn=None,
                               normalizer_fn=None)
      endpoint = tf.image.resize_images(endpoint, size[0], size[1])
      endpoints.append(endpoint)
  return tf.concat(3, endpoints)
项目:HandDetection    作者:YunqiuXu    | 项目源码 | 文件源码
def resnet_arg_scope(is_training=True,
                     batch_norm_decay=0.997,
                     batch_norm_epsilon=1e-5,
                     batch_norm_scale=True):
  batch_norm_params = {
    'is_training': False,
    'decay': batch_norm_decay,
    'epsilon': batch_norm_epsilon,
    'scale': batch_norm_scale,
    'trainable': False,
    'updates_collections': tf.GraphKeys.UPDATE_OPS
  }

  with arg_scope(
      [slim.conv2d],
      weights_regularizer=slim.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY),
      weights_initializer=slim.variance_scaling_initializer(),
      trainable=is_training,
      activation_fn=tf.nn.relu,
      normalizer_fn=slim.batch_norm,
      normalizer_params=batch_norm_params):
    with arg_scope([slim.batch_norm], **batch_norm_params) as arg_sc:
      return arg_sc
项目:convolutional-pose-machines-tensorflow    作者:timctho    | 项目源码 | 文件源码
def _middle_conv(self, stage):
        with tf.variable_scope('stage_' + str(stage)):
            self.current_featuremap = tf.concat([self.stage_heatmap[stage-2],
                                                 self.sub_stage_img_feature,
                                                 self.center_map],
                                                axis=3)
            with slim.arg_scope([slim.conv2d],
                                padding='SAME',
                                activation_fn=tf.nn.relu,
                                weights_initializer=tf.contrib.layers.xavier_initializer()):
                mid_net = slim.conv2d(self.current_featuremap, 128, [7, 7], scope='mid_conv1')
                mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv2')
                mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv3')
                mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv4')
                mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv5')
                mid_net = slim.conv2d(mid_net, 128, [1, 1], scope='mid_conv6')
                self.current_heatmap = slim.conv2d(mid_net, self.joints, [1, 1],
                                                   scope='mid_conv7')
                self.stage_heatmap.append(self.current_heatmap)
项目:convolutional-pose-machines-tensorflow    作者:timctho    | 项目源码 | 文件源码
def _middle_conv(self, stage):
        with tf.variable_scope('stage_' + str(stage)):
            self.current_featuremap = tf.concat([self.stage_heatmap[stage-2],
                                                 self.sub_stage_img_feature,
                                                 # self.center_map,
                                                 ],
                                                axis=3)
            with slim.arg_scope([slim.conv2d],
                                padding='SAME',
                                activation_fn=tf.nn.relu,
                                weights_initializer=tf.contrib.layers.xavier_initializer()):
                mid_net = slim.conv2d(self.current_featuremap, 128, [7, 7], scope='mid_conv1')
                mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv2')
                mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv3')
                mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv4')
                mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv5')
                mid_net = slim.conv2d(mid_net, 128, [1, 1], scope='mid_conv6')
                self.current_heatmap = slim.conv2d(mid_net, self.joints, [1, 1],
                                                   scope='mid_conv7')
                self.stage_heatmap.append(self.current_heatmap)
项目:easy-tensorflow    作者:khanhptnk    | 项目源码 | 文件源码
def arg_scope(self):
    """Configure the neural network's layers."""
    batch_norm_params = {
      "is_training" : self.is_training,
      "decay" : 0.9997,
      "epsilon" : 0.001,
      "variables_collections" : {
        "beta" : None,
        "gamma" : None,
        "moving_mean" : ["moving_vars"],
        "moving_variance" : ["moving_vars"]
      }
    }

    with slim.arg_scope([slim.conv2d, slim.fully_connected],
                        weights_initializer=tf.truncated_normal_initializer(
                          stddev=self._hparams.init_stddev),
                        weights_regularizer=slim.l2_regularizer(
                          self._hparams.regularize_constant),
                        activation_fn=tf.nn.relu,
                        normalizer_fn=slim.batch_norm,
                        normalizer_params=batch_norm_params) as sc:
      return sc
项目:EDSR-Tensorflow    作者:jmiller656    | 项目源码 | 文件源码
def upsample(x,scale=2,features=64,activation=tf.nn.relu):
    assert scale in [2,3,4]
    x = slim.conv2d(x,features,[3,3],activation_fn=activation)
    if scale == 2:
        ps_features = 3*(scale**2)
        x = slim.conv2d(x,ps_features,[3,3],activation_fn=activation)
        #x = slim.conv2d_transpose(x,ps_features,6,stride=1,activation_fn=activation)
        x = PS(x,2,color=True)
    elif scale == 3:
        ps_features =3*(scale**2)
        x = slim.conv2d(x,ps_features,[3,3],activation_fn=activation)
        #x = slim.conv2d_transpose(x,ps_features,9,stride=1,activation_fn=activation)
        x = PS(x,3,color=True)
    elif scale == 4:
        ps_features = 3*(2**2)
        for i in range(2):
            x = slim.conv2d(x,ps_features,[3,3],activation_fn=activation)
            #x = slim.conv2d_transpose(x,ps_features,6,stride=1,activation_fn=activation)
            x = PS(x,2,color=True)
    return x
项目:taskcv-2017-public    作者:VisionLearningGroup    | 项目源码 | 文件源码
def vgg_arg_scope(weight_decay=0.0005):
  """Defines the VGG arg scope.

  Args:
    weight_decay: The l2 regularization coefficient.

  Returns:
    An arg_scope.
  """
  with slim.arg_scope([slim.conv2d, slim.fully_connected],
                      activation_fn=tf.nn.relu,
                      weights_regularizer=slim.l2_regularizer(weight_decay),
                      biases_initializer=tf.zeros_initializer()):
      with slim.arg_scope([slim.conv2d], padding='SAME') as arg_sc:
          with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc:
              return arg_sc
项目:canshi    作者:hungsing92    | 项目源码 | 文件源码
def _extra_conv_arg_scope_with_bn(weight_decay=0.00001,
                     activation_fn=None,
                     batch_norm_decay=0.997,
                     batch_norm_epsilon=1e-5,
                     batch_norm_scale=True):

  batch_norm_params = {
      'decay': batch_norm_decay,
      'epsilon': batch_norm_epsilon,
      'scale': batch_norm_scale,
      'updates_collections': tf.GraphKeys.UPDATE_OPS,
  }

  with slim.arg_scope(
      [slim.conv2d],
      weights_regularizer=slim.l2_regularizer(weight_decay),
      weights_initializer=slim.variance_scaling_initializer(),
      activation_fn=tf.nn.relu,
      normalizer_fn=slim.batch_norm,
      normalizer_params=batch_norm_params):
    with slim.arg_scope([slim.batch_norm], **batch_norm_params):
      with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc:
        return arg_sc
项目:canshi    作者:hungsing92    | 项目源码 | 文件源码
def _extra_conv_arg_scope(weight_decay=0.00001, activation_fn=None, normalizer_fn=None):

  with slim.arg_scope(
      [slim.conv2d, slim.conv2d_transpose],
      padding='SAME',
      weights_regularizer=slim.l2_regularizer(weight_decay),
      weights_initializer=tf.truncated_normal_initializer(stddev=0.001),
      activation_fn=activation_fn,
      normalizer_fn=normalizer_fn,) as arg_sc:
    with slim.arg_scope(
      [slim.fully_connected],
          weights_regularizer=slim.l2_regularizer(weight_decay),
          weights_initializer=tf.truncated_normal_initializer(stddev=0.001),
          activation_fn=activation_fn,
          normalizer_fn=normalizer_fn) as arg_sc:
          return arg_sc
项目:TensorFlow-ADGM    作者:dancsalo    | 项目源码 | 文件源码
def gaussian_stochastic(self, input_tensor, num_maps, scope):
        """
        :param inputs_list: list of Tensors to be added and input into the block
        :return: random variable single draw, mean, standard deviation, and intermediate representation
        """
        with tf.variable_scope(scope):
            input_tensor = tf.expand_dims(tf.expand_dims(input_tensor, 1), 1) if len(input_tensor.get_shape()) != 4 \
                else input_tensor
            intermediate = slim.conv2d(input_tensor, self._hidden_size, [1, 1], weights_initializer=self._initializer,
                                       scope='conv1')
            mean = slim.conv2d(intermediate, num_maps, [1, 1], weights_initializer=self._initializer,
                               activation_fn=None, scope='mean')
            sigma2 = tf.nn.softplus(
                slim.conv2d(intermediate, num_maps, [1, 1], weights_initializer=self._initializer,
                            activation_fn=None, scope='sigma2'))
            rv_single_draw = mean + tf.sqrt(sigma2) * tf.random_normal(tf.shape(mean))

        self.split_labeled_unlabeled(mean, '{}_mu'.format(scope))
        self.split_labeled_unlabeled(sigma2, '{}_sigma2'.format(scope))
        self.split_labeled_unlabeled(rv_single_draw, '{}_sample'.format(scope))
        return rv_single_draw
项目:TensorFlow-ADGM    作者:dancsalo    | 项目源码 | 文件源码
def multinomial_stochastic(self, input_tensor, num_maps, scope):
        """
        :param inputs_list: list of Tensors to be added and input into the block
        :return: random variable single draw, mean, and intermediate representation
        """
        with tf.variable_scope(scope):
            input_tensor = tf.expand_dims(tf.expand_dims(input_tensor, 1), 1) if len(input_tensor.get_shape()) != 4 \
                else input_tensor
            intermediate = slim.conv2d(input_tensor, self._hidden_size, [1, 1], weights_initializer=self._initializer,
                                  scope='conv1')
            pi = slim.conv2d(intermediate, num_maps, [1, 1], weights_initializer=self._initializer,
                                 activation_fn=None, scope='mean')
            rv_single_draw = tf.nn.softmax(pi)
        self.split_labeled_unlabeled(pi, '{}_pi'.format(scope))
        self.split_labeled_unlabeled(rv_single_draw, '{}_sample'.format(scope))
        return rv_single_draw
项目:TensorFlow-ADGM    作者:dancsalo    | 项目源码 | 文件源码
def create_architecture(self, mode, tag=None):

        training = mode == 'TRAIN'
        testing = mode == 'TEST'

        assert tag != None

        # handle most of the regularizers here
        weights_regularizer = tf.contrib.layers.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY)
        biases_regularizer = weights_regularizer

        # list as many types of layers as possible, even if they are not used now
        with arg_scope([slim.conv2d, slim.conv2d_in_plane,
                        slim.conv2d_transpose, slim.separable_conv2d, slim.fully_connected],
                       weights_regularizer=weights_regularizer,
                       biases_regularizer=biases_regularizer,
                       biases_initializer=tf.constant_initializer(0.0)):
            self.build_network()

        elbo = self.add_losses()
        self._summary_op = tf.summary.merge_all()
        return elbo
项目:TensorFlow-ADGM    作者:dancsalo    | 项目源码 | 文件源码
def gaussian_stochastic(self, input_tensor, num_maps, scope):
        """
        :param inputs_list: list of Tensors to be added and input into the block
        :return: random variable single draw, mean, standard deviation, and intermediate representation
        """
        with tf.variable_scope(scope):
            input_tensor = tf.expand_dims(tf.expand_dims(input_tensor, 1), 1) if len(input_tensor.get_shape()) != 4 \
                else input_tensor
            intermediate = slim.conv2d(input_tensor, self._hidden_size, [1, 1], weights_initializer=self._initializer,
                                       scope='conv1')
            mean = slim.conv2d(intermediate, num_maps, [1, 1], weights_initializer=self._initializer,
                               activation_fn=None, scope='mean')
            sigma2 = tf.nn.softplus(
                slim.conv2d(intermediate, num_maps, [1, 1], weights_initializer=self._initializer,
                            activation_fn=None, scope='sigma2'))
            rv_single_draw = mean + tf.sqrt(sigma2) * tf.random_normal(tf.shape(mean))

        self.split_labeled_unlabeled(mean, '{}_mu'.format(scope))
        self.split_labeled_unlabeled(sigma2, '{}_sigma2'.format(scope))
        self.split_labeled_unlabeled(rv_single_draw, '{}_sample'.format(scope))
        return rv_single_draw
项目:TensorFlow-ADGM    作者:dancsalo    | 项目源码 | 文件源码
def multinomial_stochastic(self, input_tensor, num_maps, scope):
        """
        :param inputs_list: list of Tensors to be added and input into the block
        :return: random variable single draw, mean, and intermediate representation
        """
        with tf.variable_scope(scope):
            input_tensor = tf.expand_dims(tf.expand_dims(input_tensor, 1), 1) if len(input_tensor.get_shape()) != 4 \
                else input_tensor
            intermediate = slim.conv2d(input_tensor, self._hidden_size, [1, 1], weights_initializer=self._initializer,
                                  scope='conv1')
            pi = slim.conv2d(intermediate, num_maps, [1, 1], weights_initializer=self._initializer,
                                 activation_fn=None, scope='mean')
            rv_single_draw = tf.nn.softmax(pi)
        self.split_labeled_unlabeled(pi, '{}_pi'.format(scope))
        self.split_labeled_unlabeled(rv_single_draw, '{}_sample'.format(scope))
        return rv_single_draw
项目:DenseHumanBodyCorrespondences    作者:halimacc    | 项目源码 | 文件源码
def classify(model_range, seg_range, feature_lr, classifier_lr):
        feat_opt = tf.train.AdamOptimizer(feature_lr)
        clas_opt = tf.train.AdamOptimizer(classifier_lr)
        for model in model_range:
            for seg in seg_range:
                with tf.variable_scope('classifier-{}-{}'.format(model, seg)):
                    self.preds[(model, seg)] = slim.conv2d(self.feature, 500, [1, 1])
                    self.clas_vars[(model, seg)] = slim.get_model_variables()[-2:]

                with tf.variable_scope('losses-{}-{}'.format(model, seg)):
                    self.losses[(model, seg)] = self.loss(self.labels, self.preds[(model, seg)])
                    grad = tf.gradients(self.losses[(model, seg)], self.feat_vars + self.clas_vars[(model, seg)])
                    train_op_feat = feat_opt.apply_gradients(zip(grad[:-2], self.feat_vars))
                    train_op_clas = clas_opt.apply_gradients(zip(grad[-2:], self.clas_vars[(model, seg)]))
                    self.train_ops[(model, seg)] = tf.group(train_op_feat, train_op_clas)
        return self.losses, self.train_ops
项目:tensorflow_face    作者:ZhihengCV    | 项目源码 | 文件源码
def squeezenet(inputs,
               num_classes=1000,
               is_training=True,
               keep_prob=0.5,
               spatial_squeeze=True,
               scope='squeeze'):
    """
    squeezenetv1.1
    """
    with tf.name_scope(scope, 'squeeze', [inputs]) as sc:
        end_points_collection = sc + '_end_points'
        # Collect outputs for conv2d, fully_connected and max_pool2d.
        with slim.arg_scope([slim.conv2d, slim.max_pool2d,
                             slim.avg_pool2d, fire_module],
                            outputs_collections=end_points_collection):
            nets = squeezenet_inference(inputs, is_training, keep_prob)
            nets = slim.conv2d(nets, num_classes, [1, 1],
                               activation_fn=None,
                               normalizer_fn=None,
                               scope='logits')
            end_points = slim.utils.convert_collection_to_dict(end_points_collection)
            if spatial_squeeze:
                nets = tf.squeeze(nets, [1, 2], name='logits/squeezed')
            return nets, end_points
项目:tensorflow_face    作者:ZhihengCV    | 项目源码 | 文件源码
def densenet_block(inputs, layer_num, growth, bc_mode, scope, is_training, keep_prob):
    with tf.variable_scope(scope, 'block1', [inputs]):
        currents = inputs
        for idx in xrange(layer_num):
            if not bc_mode:
                new_feature = slim.conv2d(currents, growth,
                                          [3, 3], scope='conv_{:d}'.format(idx))
                new_feature = slim.dropout(new_feature, keep_prob=keep_prob,
                                           is_training=is_training,
                                           scope='dropout_{:d}'.format(idx))
            else:
                new_feature = slim.conv2d(currents, growth*4,
                                          [1, 1], scope='bottom_{:d}'.format(idx))
                new_feature = slim.dropout(new_feature, keep_prob=keep_prob,
                                           is_training=is_training,
                                           scope='dropout_b_{:d}'.format(idx))
                new_feature = slim.conv2d(new_feature, growth,
                                          [3, 3], scope='conv_{:d}'.format(idx))
                new_feature = slim.dropout(new_feature, keep_prob=keep_prob,
                                           is_training=is_training,
                                           scope='dropout_{:d}'.format(idx))
            currents = tf.concat([currents, new_feature], axis=3)
        return currents
项目:nec_tensorflow    作者:toth-adam    | 项目源码 | 文件源码
def _create_conv_layers(self):
        """
        Create convolutional layers in the Tensorflow graph according to the hyperparameters, using Tensorflow slim
        library.

        Returns
        -------
        conv_layers: list
            The list of convolutional operations.

        """
        lengths_set = {len(o) for o in (self._num_outputs, self._kernel_size, self._stride)}
        if len(lengths_set) != 1:
            msg = "The lengths of the conv. layers params vector should be same. Lengths: {}, Vectors: {}".format(
                [len(o) for o in (self._num_outputs, self._kernel_size, self._stride)],
                (self._num_outputs, self._kernel_size, self._stride))
            raise ValueError(msg)
        conv_layers = []
        inputs = [self.state]
        for i, (num_out, kernel, stride) in enumerate(zip(self._num_outputs, self._kernel_size, self._stride)):
            layer = slim.conv2d(activation_fn=tf.nn.elu, inputs=inputs[i], num_outputs=num_out,
                                kernel_size=kernel, stride=stride, padding='SAME')
            conv_layers.append(layer)
            inputs.append(layer)
        return conv_layers
项目:supic    作者:Hirico    | 项目源码 | 文件源码
def build_model(self):
        with slim.arg_scope([slim.conv2d, slim.conv2d_transpose], activation_fn=tf.nn.elu):
            with tf.variable_scope('model', reuse=self.reuse_variables):

                self.left_pyramid  = self.scale_pyramid(self.left,  4)
                if self.mode == 'train':
                    self.right_pyramid = self.scale_pyramid(self.right, 4)

                if self.params.do_stereo:
                    self.model_input = tf.concat([self.left, self.right], 3)
                else:
                    self.model_input = self.left

                #build model
                if self.params.encoder == 'vgg':
                    self.build_vgg()
                elif self.params.encoder == 'resnet50':
                    self.build_resnet50()
                else:
                    return None
项目:antgo    作者:jianzfb    | 项目源码 | 文件源码
def _bn_relu_conv_block(input,
                        filters,
                        kernel=(3, 3),
                        stride=(1, 1),
                        weight_decay=5e-4):
  ''' Adds a Batchnorm-Relu-Conv block for DPN
  Args:
      input: input tensor
      filters: number of output filters
      kernel: convolution kernel size
      stride: stride of convolution
  Returns: a keras tensor
  '''
  channel_axis = -1

  x = slim.conv2d(input, filters, kernel, padding='SAME', stride=stride,
                  weights_regularizer=slim.l2_regularizer(weight_decay),
                  weights_initializer=tf.contrib.layers.xavier_initializer(),
                  biases_initializer=None)

  x = slim.batch_norm(x)
  x = tf.nn.relu(x)
  return x
项目:antgo    作者:jianzfb    | 项目源码 | 文件源码
def _root_block(input,
                initial_conv_filters,
                weight_decay=5e-4,
                ksize=(7,7),
                is_pool=True):
  ''' Adds an initial conv block, with batch norm and relu for the DPN
  Args:
      input: input tensor
      initial_conv_filters: number of filters for initial conv block
      weight_decay: weight decay factor
  Returns: a keras tensor
  '''
  x = slim.conv2d(input,
                  initial_conv_filters,
                  ksize,
                  padding='SAME',
                  stride=(1, 1),
                  weights_regularizer=slim.l2_regularizer(weight_decay),
                  weights_initializer=tf.contrib.layers.xavier_initializer(),
                  biases_initializer=None)
  x = slim.batch_norm(x)
  x = tf.nn.relu(x)
  if is_pool:
    x = slim.max_pool2d(x, (3, 3), stride=(2, 2), padding='SAME')
  return x
项目:antgo    作者:jianzfb    | 项目源码 | 文件源码
def gcn_block(inputs,
              num_class,
              kernel_size,
              scope=None):
  with tf.variable_scope(scope, 'gcn_block', [inputs]):
    with slim.arg_scope([slim.conv2d],
                        padding='SAME',
                        activation_fn=None,
                        normalizer_fn=None,
                        normalizer_params=None,
                        weights_initializer=tf.contrib.layers.xavier_initializer(),
                        weights_regularizer=tf.contrib.layers.l2_regularizer(0.0001),
                        biases_initializer=tf.zeros_initializer(),
                        biases_regularizer=tf.contrib.layers.l2_regularizer(0.0002)):
      left_conv1 = slim.conv2d(inputs, num_class, [kernel_size, 1])
      left_conv2 = slim.conv2d(left_conv1, num_class, [1, kernel_size])

      right_conv1 = slim.conv2d(inputs, num_class, [1, kernel_size])
      right_conv2 = slim.conv2d(right_conv1, num_class, [kernel_size, 1])

      result_sum = tf.add(left_conv2, right_conv2, name='gcn_module')
      return result_sum
项目:antgo    作者:jianzfb    | 项目源码 | 文件源码
def gcn_br(inputs, scope):
  with tf.variable_scope(scope, 'gcn_br', [inputs]):
    with slim.arg_scope([slim.conv2d],
                        padding='SAME',
                        activation_fn=tf.nn.relu,
                        normalizer_fn=None,
                        normalizer_params=None,
                        weights_initializer=tf.contrib.layers.xavier_initializer(),
                        weights_regularizer=tf.contrib.layers.l2_regularizer(0.0001),
                        biases_initializer=tf.zeros_initializer(),
                        biases_regularizer=tf.contrib.layers.l2_regularizer(0.0002)):
      num_class = inputs.get_shape()[3]
      conv = slim.conv2d(inputs, num_class, [3, 3])
      conv = slim.conv2d(conv, num_class, [3, 3], activation_fn=None)
      result_sum = tf.add(inputs, conv, name='fcn_br')
      return result_sum
项目:faceNet_RealTime    作者:jack55436001    | 项目源码 | 文件源码
def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
    """Builds the 35x35 resnet block."""
    with tf.variable_scope(scope, 'Block35', [net], reuse=reuse):
        with tf.variable_scope('Branch_0'):
            tower_conv = slim.conv2d(net, 32, 1, scope='Conv2d_1x1')
        with tf.variable_scope('Branch_1'):
            tower_conv1_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
            tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3')
        with tf.variable_scope('Branch_2'):
            tower_conv2_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
            tower_conv2_1 = slim.conv2d(tower_conv2_0, 48, 3, scope='Conv2d_0b_3x3')
            tower_conv2_2 = slim.conv2d(tower_conv2_1, 64, 3, scope='Conv2d_0c_3x3')
        mixed = tf.concat([tower_conv, tower_conv1_1, tower_conv2_2], 3)
        up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
                         activation_fn=None, scope='Conv2d_1x1')
        net += scale * up
        if activation_fn:
            net = activation_fn(net)
    return net

# Inception-Renset-B
项目:faceNet_RealTime    作者:jack55436001    | 项目源码 | 文件源码
def block8(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
    """Builds the 8x8 resnet block."""
    with tf.variable_scope(scope, 'Block8', [net], reuse=reuse):
        with tf.variable_scope('Branch_0'):
            tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
        with tf.variable_scope('Branch_1'):
            tower_conv1_0 = slim.conv2d(net, 192, 1, scope='Conv2d_0a_1x1')
            tower_conv1_1 = slim.conv2d(tower_conv1_0, 224, [1, 3],
                                        scope='Conv2d_0b_1x3')
            tower_conv1_2 = slim.conv2d(tower_conv1_1, 256, [3, 1],
                                        scope='Conv2d_0c_3x1')
        mixed = tf.concat([tower_conv, tower_conv1_2], 3)
        up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
                         activation_fn=None, scope='Conv2d_1x1')
        net += scale * up
        if activation_fn:
            net = activation_fn(net)
    return net
项目:faceNet_RealTime    作者:jack55436001    | 项目源码 | 文件源码
def inference(images, keep_probability, phase_train=True, 
              bottleneck_layer_size=128, weight_decay=0.0, reuse=None):
    batch_norm_params = {
        # Decay for the moving averages.
        'decay': 0.995,
        # epsilon to prevent 0s in variance.
        'epsilon': 0.001,
        # force in-place updates of mean and variance estimates
        'updates_collections': None,
        # Moving averages ends up in the trainable variables collection
        'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
}
    with slim.arg_scope([slim.conv2d, slim.fully_connected],
                        weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
                        weights_regularizer=slim.l2_regularizer(weight_decay),
                        normalizer_fn=slim.batch_norm,
                        normalizer_params=batch_norm_params):
        return inception_resnet_v2(images, is_training=phase_train,
              dropout_keep_prob=keep_probability, bottleneck_layer_size=bottleneck_layer_size, reuse=reuse)
项目:faceNet_RealTime    作者:jack55436001    | 项目源码 | 文件源码
def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
    """Builds the 35x35 resnet block."""
    with tf.variable_scope(scope, 'Block35', [net], reuse=reuse):
        with tf.variable_scope('Branch_0'):
            tower_conv = slim.conv2d(net, 32, 1, scope='Conv2d_1x1')
        with tf.variable_scope('Branch_1'):
            tower_conv1_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
            tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3')
        with tf.variable_scope('Branch_2'):
            tower_conv2_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
            tower_conv2_1 = slim.conv2d(tower_conv2_0, 32, 3, scope='Conv2d_0b_3x3')
            tower_conv2_2 = slim.conv2d(tower_conv2_1, 32, 3, scope='Conv2d_0c_3x3')
        mixed = tf.concat([tower_conv, tower_conv1_1, tower_conv2_2], 3)
        up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
                         activation_fn=None, scope='Conv2d_1x1')
        net += scale * up
        if activation_fn:
            net = activation_fn(net)
    return net

# Inception-Renset-B
项目:faceNet_RealTime    作者:jack55436001    | 项目源码 | 文件源码
def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
    """Builds the 17x17 resnet block."""
    with tf.variable_scope(scope, 'Block17', [net], reuse=reuse):
        with tf.variable_scope('Branch_0'):
            tower_conv = slim.conv2d(net, 128, 1, scope='Conv2d_1x1')
        with tf.variable_scope('Branch_1'):
            tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1')
            tower_conv1_1 = slim.conv2d(tower_conv1_0, 128, [1, 7],
                                        scope='Conv2d_0b_1x7')
            tower_conv1_2 = slim.conv2d(tower_conv1_1, 128, [7, 1],
                                        scope='Conv2d_0c_7x1')
        mixed = tf.concat([tower_conv, tower_conv1_2], 3)
        up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
                         activation_fn=None, scope='Conv2d_1x1')
        net += scale * up
        if activation_fn:
            net = activation_fn(net)
    return net


# Inception-Resnet-C
项目:faceNet_RealTime    作者:jack55436001    | 项目源码 | 文件源码
def block8(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
    """Builds the 8x8 resnet block."""
    with tf.variable_scope(scope, 'Block8', [net], reuse=reuse):
        with tf.variable_scope('Branch_0'):
            tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
        with tf.variable_scope('Branch_1'):
            tower_conv1_0 = slim.conv2d(net, 192, 1, scope='Conv2d_0a_1x1')
            tower_conv1_1 = slim.conv2d(tower_conv1_0, 192, [1, 3],
                                        scope='Conv2d_0b_1x3')
            tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [3, 1],
                                        scope='Conv2d_0c_3x1')
        mixed = tf.concat([tower_conv, tower_conv1_2], 3)
        up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
                         activation_fn=None, scope='Conv2d_1x1')
        net += scale * up
        if activation_fn:
            net = activation_fn(net)
    return net
项目:faceNet_RealTime    作者:jack55436001    | 项目源码 | 文件源码
def reduction_b(net):
    with tf.variable_scope('Branch_0'):
        tower_conv = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
        tower_conv_1 = slim.conv2d(tower_conv, 384, 3, stride=2,
                                   padding='VALID', scope='Conv2d_1a_3x3')
    with tf.variable_scope('Branch_1'):
        tower_conv1 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
        tower_conv1_1 = slim.conv2d(tower_conv1, 256, 3, stride=2,
                                    padding='VALID', scope='Conv2d_1a_3x3')
    with tf.variable_scope('Branch_2'):
        tower_conv2 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
        tower_conv2_1 = slim.conv2d(tower_conv2, 256, 3,
                                    scope='Conv2d_0b_3x3')
        tower_conv2_2 = slim.conv2d(tower_conv2_1, 256, 3, stride=2,
                                    padding='VALID', scope='Conv2d_1a_3x3')
    with tf.variable_scope('Branch_3'):
        tower_pool = slim.max_pool2d(net, 3, stride=2, padding='VALID',
                                     scope='MaxPool_1a_3x3')
    net = tf.concat([tower_conv_1, tower_conv1_1,
                        tower_conv2_2, tower_pool], 3)
    return net
项目:faceNet_RealTime    作者:jack55436001    | 项目源码 | 文件源码
def inference(images, keep_probability, phase_train=True, 
              bottleneck_layer_size=128, weight_decay=0.0, reuse=None):
    batch_norm_params = {
        # Decay for the moving averages.
        'decay': 0.995,
        # epsilon to prevent 0s in variance.
        'epsilon': 0.001,
        # force in-place updates of mean and variance estimates
        'updates_collections': None,
        # Moving averages ends up in the trainable variables collection
        'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
    }

    with slim.arg_scope([slim.conv2d, slim.fully_connected],
                        weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
                        weights_regularizer=slim.l2_regularizer(weight_decay),
                        normalizer_fn=slim.batch_norm,
                        normalizer_params=batch_norm_params):
        return inception_resnet_v1(images, is_training=phase_train,
              dropout_keep_prob=keep_probability, bottleneck_layer_size=bottleneck_layer_size, reuse=reuse)
项目:faceNet_RealTime    作者:jack55436001    | 项目源码 | 文件源码
def encoder(self, images, is_training):
        activation_fn = leaky_relu  # tf.nn.relu
        weight_decay = 0.0
        with tf.variable_scope('encoder'):
            with slim.arg_scope([slim.batch_norm],
                                is_training=is_training):
                with slim.arg_scope([slim.conv2d, slim.fully_connected],
                                    weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
                                    weights_regularizer=slim.l2_regularizer(weight_decay),
                                    normalizer_fn=slim.batch_norm,
                                    normalizer_params=self.batch_norm_params):
                    net = images

                    net = slim.conv2d(net, 32, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_1a')
                    net = slim.repeat(net, 3, conv2d_block, 0.1, 32, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_1b')

                    net = slim.conv2d(net, 64, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_2a')
                    net = slim.repeat(net, 3, conv2d_block, 0.1, 64, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_2b')

                    net = slim.conv2d(net, 128, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_3a')
                    net = slim.repeat(net, 3, conv2d_block, 0.1, 128, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_3b')

                    net = slim.conv2d(net, 256, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_4a')
                    net = slim.repeat(net, 3, conv2d_block, 0.1, 256, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_4b')

                    net = slim.flatten(net)
                    fc1 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_1')
                    fc2 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_2')
        return fc1, fc2
项目:faceNet_RealTime    作者:jack55436001    | 项目源码 | 文件源码
def encoder(self, images, is_training):
        activation_fn = leaky_relu  # tf.nn.relu
        weight_decay = 0.0
        with tf.variable_scope('encoder'):
            with slim.arg_scope([slim.batch_norm],
                                is_training=is_training):
                with slim.arg_scope([slim.conv2d, slim.fully_connected],
                                    weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
                                    weights_regularizer=slim.l2_regularizer(weight_decay),
                                    normalizer_fn=slim.batch_norm,
                                    normalizer_params=self.batch_norm_params):
                    net = slim.conv2d(images, 32, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_1')
                    net = slim.conv2d(net, 64, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_2')
                    net = slim.conv2d(net, 128, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_3')
                    net = slim.conv2d(net, 256, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_4')
                    net = slim.conv2d(net, 512, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_5')
                    net = slim.flatten(net)
                    fc1 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_1')
                    fc2 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_2')
        return fc1, fc2
项目:ProgressiveNeuralNetwork    作者:GoingMyWay    | 项目源码 | 文件源码
def __create_network(self, scope, img_shape=(80, 80)):
        with tf.variable_scope(self.task_name):
            with tf.variable_scope(scope):
                with tf.variable_scope('input_data'):
                    self.inputs = tf.placeholder(shape=[None, *img_shape, cfg.HIST_LEN], dtype=tf.float32)
                with tf.variable_scope('networks'):
                    with tf.variable_scope('conv_1'):
                        self.conv_1 = slim.conv2d(activation_fn=tf.nn.relu, inputs=self.inputs, num_outputs=32,
                                                  kernel_size=[8, 8], stride=4, padding='SAME', trainable=self.is_train)
                    with tf.variable_scope('conv_2'):
                        self.conv_2 = slim.conv2d(activation_fn=tf.nn.relu, inputs=self.conv_1, num_outputs=64,
                                                  kernel_size=[4, 4], stride=2, padding='SAME', trainable=self.is_train)
                    with tf.variable_scope('conv_3'):
                        self.conv_3 = slim.conv2d(activation_fn=tf.nn.relu, inputs=self.conv_2, num_outputs=64,
                                                  kernel_size=[3, 3], stride=1, padding='SAME', trainable=self.is_train)
                    with tf.variable_scope('f_c'):
                        self.fc = slim.fully_connected(slim.flatten(self.conv_3), 512,
                                                       activation_fn=tf.nn.elu, trainable=self.is_train)
项目:tf_face    作者:ZhijianChan    | 项目源码 | 文件源码
def block17(x, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
    """Builds the 17x17 ResNet block."""
    with tf.variable_scope(scope, 'Block17', [x], reuse=reuse):
        with tf.variable_scope('Branch_0'):
            tower_conv = slim.conv2d(x, 192, 1, scope='Conv2d_1x1')
        with tf.variable_scope('Branch_1'):
            tower_conv1_0 = slim.conv2d(x, 128, 1, scope='Conv2d_0a_1x1')
            tower_conv1_1 = slim.conv2d(tower_conv1_0, 160, [1, 7], scope='Conv2d_0b_1x7')
            tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [7, 1], scope='Conv2d_0c_7x1')
        mixed = tf.concat(values=[tower_conv, tower_conv1_2], axis=3)
        up = slim.conv2d(mixed, x.get_shape()[3], 1,
                         normalizer_fn=None,
                         activation_fn=None,
                         scope='Conv2d_1x1')
        x += scale * up
        if activation_fn:
            x = activation_fn(x)
    return x


# Inception-ResNet-C
# (2 branches)
项目:tf_face    作者:ZhijianChan    | 项目源码 | 文件源码
def block8(x, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
    """Builds the 8x8 ResNet block."""
    with tf.variable_scope(scope, 'Block8', [x], reuse=reuse):
        with tf.variable_scope('Branch_0'):
            tower_conv = slim.conv2d(x, 192, 1, scope='Conv2d_1x1')
        with tf.variable_scope('Branch_1'):
            tower_conv1_0 = slim.conv2d(x, 192, 1, scope='Conv2d_0a_1x1')
            tower_conv1_1 = slim.conv2d(tower_conv1_0, 224, [1, 3], scope='Conv2d_0b_1x3')
            tower_conv1_2 = slim.conv2d(tower_conv1_1, 256, [3, 1], scope='Conv2d_0c_3x1')
        mixed = tf.concat(values=[tower_conv, tower_conv1_2], axis=3)
        up = slim.conv2d(mixed, x.get_shape()[3], 1,
                         normalizer_fn=None,
                         activation_fn=None,
                         scope='Conv2d_1x1')
        x += scale * up
        if activation_fn:
            x = activation_fn(x)
    return x


# Reduce-A
# (3 branches)
项目:tf_face    作者:ZhijianChan    | 项目源码 | 文件源码
def block17(x, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
    """Builds the 17x17 ResNet block."""
    with tf.variable_scope(scope, 'Block17', [x], reuse=reuse):
        with tf.variable_scope('Branch_0'):
            tower_conv = slim.conv2d(x, 192, 1, scope='Conv2d_1x1')
        with tf.variable_scope('Branch_1'):
            tower_conv1_0 = slim.conv2d(x, 128, 1, scope='Conv2d_0a_1x1')
            tower_conv1_1 = slim.conv2d(tower_conv1_0, 160, [1, 7], scope='Conv2d_0b_1x7')
            tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [7, 1], scope='Conv2d_0c_7x1')
        mixed = tf.concat(values=[tower_conv, tower_conv1_2], axis=3)
        up = slim.conv2d(mixed, x.get_shape()[3], 1,
                         normalizer_fn=None,
                         activation_fn=None,
                         scope='Conv2d_1x1')
        x += scale * up
        if activation_fn:
            x = activation_fn(x)
    return x


# Inception-ResNet-C
# (2 branches)
项目:tf_face    作者:ZhijianChan    | 项目源码 | 文件源码
def block8(x, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
    """Builds the 8x8 ResNet block."""
    with tf.variable_scope(scope, 'Block8', [x], reuse=reuse):
        with tf.variable_scope('Branch_0'):
            tower_conv = slim.conv2d(x, 192, 1, scope='Conv2d_1x1')
        with tf.variable_scope('Branch_1'):
            tower_conv1_0 = slim.conv2d(x, 192, 1, scope='Conv2d_0a_1x1')
            tower_conv1_1 = slim.conv2d(tower_conv1_0, 224, [1, 3], scope='Conv2d_0b_1x3')
            tower_conv1_2 = slim.conv2d(tower_conv1_1, 256, [3, 1], scope='Conv2d_0c_3x1')
        mixed = tf.concat(values=[tower_conv, tower_conv1_2], axis=3)
        up = slim.conv2d(mixed, x.get_shape()[3], 1,
                         normalizer_fn=None,
                         activation_fn=None,
                         scope='Conv2d_1x1')
        x += scale * up
        if activation_fn:
            x = activation_fn(x)
    return x
项目:tf_face    作者:ZhijianChan    | 项目源码 | 文件源码
def inference(inputs, keep_prob,
              bottleneck_size=128,
              phase_train=True,
              weight_decay=0.0,
              reuse=None):
    batch_norm_params = {
        'decay': 0.995,
        'epsilon': 0.001,
        'updates_collections': None,
        # 'scale': True}  # [test1: add 'gamma']
        'variables_collections': [tf.GraphKeys.TRAINABLE_VARIABLES]}  # [test2: removed from 'trainable_variables']
    with slim.arg_scope(
            [slim.conv2d, slim.fully_connected],
            weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            biases_regularizer=slim.l2_regularizer(weight_decay),
            normalizer_fn=slim.batch_norm,
            normalizer_params=batch_norm_params):  # [test4: add weight_decay to biases]):
        return inception_resnet_v2(
            inputs,
            is_training=phase_train,
            keep_prob=keep_prob,
            bottleneck_size=bottleneck_size,
            reuse=reuse)
项目:tf_face    作者:ZhijianChan    | 项目源码 | 文件源码
def block35(x, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
    """Builds the 35x35 resx block."""
    with tf.variable_scope(scope, 'Block35', [x], reuse=reuse):
        with tf.variable_scope('Branch_0'):
            tower_conv = slim.conv2d(x, 32, 1, scope='Conv2d_1x1')
        with tf.variable_scope('Branch_1'):
            tower_conv1_0 = slim.conv2d(x, 32, 1, scope='Conv2d_0a_1x1')
            tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3')
        with tf.variable_scope('Branch_2'):
            tower_conv2_0 = slim.conv2d(x, 32, 1, scope='Conv2d_0a_1x1')
            tower_conv2_1 = slim.conv2d(tower_conv2_0, 32, 3, scope='Conv2d_0b_3x3')
            tower_conv2_2 = slim.conv2d(tower_conv2_1, 32, 3, scope='Conv2d_0c_3x3')
        # tensor dimension: NxWxHxC, concat at dim-c
        mixed = tf.concat([tower_conv, tower_conv1_1, tower_conv2_2], 3)
        # output_num of up should be equal to input_num of layer
        up = slim.conv2d(mixed, x.get_shape()[3], 1, normalizer_fn=None, activation_fn=None, scope='Conv2d_1x1')
        x += scale * up
        if activation_fn:
            x = activation_fn(x)
    return x


# Inception-ResNet-B
# (2 branches)
项目:tf_face    作者:ZhijianChan    | 项目源码 | 文件源码
def block8(x, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
    """Builds the 8x8 ResNet block."""
    with tf.variable_scope(scope, 'Block8', [x], reuse=reuse):
        with tf.variable_scope('Branch_0'):
            tower_conv = slim.conv2d(x, 192, 1, scope='Conv2d_1x1')
        with tf.variable_scope('Branch_1'):
            tower_conv1_0 = slim.conv2d(x, 192, 1, scope='Conv2d_0a_1x1')
            tower_conv1_1 = slim.conv2d(tower_conv1_0, 192, [1, 3], scope='Conv2d_0b_1x3')
            tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [3, 1], scope='Conv2d_0c_3x1')
        mixed = tf.concat([tower_conv, tower_conv1_2], 3)
        up = slim.conv2d(mixed, x.get_shape()[3], 1, normalizer_fn=None, activation_fn=None, scope='Conv2d_1x1')
        x += scale * up
        if activation_fn:
            x = activation_fn(x)
    return x


# 35x35x256 -> 17x17x896
# (3 branches)
项目:tf_face    作者:ZhijianChan    | 项目源码 | 文件源码
def inference(inputs, keep_prob,
              bottleneck_size=128,
              phase_train=True,
              weight_decay=0.0,
              reuse=None):
    batch_norm_params = {
        'decay': 0.995,
        'epsilon': 0.001,
        'updates_collections': None,
        # 'scale': True,  # [test1]
        'variables_collections': [tf.GraphKeys.TRAINABLE_VARIABLES]}  # [test2]
    with slim.arg_scope(
            [slim.conv2d, slim.fully_connected],
            weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            normalizer_fn=slim.batch_norm,
            normalizer_params=batch_norm_params):
        return inception_resnet_v1(inputs, is_training=phase_train, keep_prob=keep_prob,
                                   bottleneck_size=bottleneck_size, reuse=reuse)
项目:Master-R-CNN    作者:Mark110    | 项目源码 | 文件源码
def _extra_conv_arg_scope_with_bn(weight_decay=0.00001,
                     activation_fn=None,
                     batch_norm_decay=0.997,
                     batch_norm_epsilon=1e-5,
                     batch_norm_scale=True):

  batch_norm_params = {
      'decay': batch_norm_decay,
      'epsilon': batch_norm_epsilon,
      'scale': batch_norm_scale,
      'updates_collections': tf.GraphKeys.UPDATE_OPS_EXTRA,
  }

  with slim.arg_scope(
      [slim.conv2d],
      weights_regularizer=slim.l2_regularizer(weight_decay),
      weights_initializer=slim.variance_scaling_initializer(),
      activation_fn=tf.nn.relu,
      normalizer_fn=slim.batch_norm,
      normalizer_params=batch_norm_params):
    with slim.arg_scope([slim.batch_norm], **batch_norm_params):
      with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc:
        return arg_sc
项目:Master-R-CNN    作者:Mark110    | 项目源码 | 文件源码
def _extra_conv_arg_scope(weight_decay=0.00001, activation_fn=None, normalizer_fn=None):

  with slim.arg_scope(
      [slim.conv2d, slim.conv2d_transpose],
      padding='SAME',
      weights_regularizer=slim.l2_regularizer(weight_decay),
      weights_initializer=tf.truncated_normal_initializer(stddev=0.001),
      activation_fn=activation_fn,
      normalizer_fn=normalizer_fn,) as arg_sc:
    with slim.arg_scope(
      [slim.fully_connected],
          weights_regularizer=slim.l2_regularizer(weight_decay),
          weights_initializer=tf.truncated_normal_initializer(stddev=0.001),
          activation_fn=activation_fn,
          normalizer_fn=normalizer_fn) as arg_sc:
          return arg_sc
项目:tf-faster-rcnn    作者:endernewton    | 项目源码 | 文件源码
def resnet_arg_scope(is_training=True,
                     batch_norm_decay=0.997,
                     batch_norm_epsilon=1e-5,
                     batch_norm_scale=True):
  batch_norm_params = {
    'is_training': False,
    'decay': batch_norm_decay,
    'epsilon': batch_norm_epsilon,
    'scale': batch_norm_scale,
    'trainable': False,
    'updates_collections': tf.GraphKeys.UPDATE_OPS
  }

  with arg_scope(
      [slim.conv2d],
      weights_regularizer=slim.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY),
      weights_initializer=slim.variance_scaling_initializer(),
      trainable=is_training,
      activation_fn=tf.nn.relu,
      normalizer_fn=slim.batch_norm,
      normalizer_params=batch_norm_params):
    with arg_scope([slim.batch_norm], **batch_norm_params) as arg_sc:
      return arg_sc
项目:tf-faster-rcnn    作者:endernewton    | 项目源码 | 文件源码
def _image_to_head(self, is_training, reuse=None):
    with tf.variable_scope(self._scope, self._scope, reuse=reuse):
      net = slim.repeat(self._image, 2, slim.conv2d, 64, [3, 3],
                          trainable=False, scope='conv1')
      net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='pool1')
      net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3],
                        trainable=False, scope='conv2')
      net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='pool2')
      net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3],
                        trainable=is_training, scope='conv3')
      net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='pool3')
      net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3],
                        trainable=is_training, scope='conv4')
      net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='pool4')
      net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3],
                        trainable=is_training, scope='conv5')

    self._act_summaries.append(net)
    self._layers['head'] = net

    return net
项目:DQN    作者:pekaalto    | 项目源码 | 文件源码
def create_network(self, input, trainable):
        if trainable:
            wr = slim.l2_regularizer(self.regularization)
        else:
            wr = None

        # the input is stack of black and white frames.
        # put the stack in the place of channel (last in tf)
        input_t = tf.transpose(input, [0, 2, 3, 1])

        net = slim.conv2d(input_t, 8, (7, 7), data_format="NHWC",
            activation_fn=tf.nn.relu, stride=3, weights_regularizer=wr, trainable=trainable)
        net = slim.max_pool2d(net, 2, 2)
        net = slim.conv2d(net, 16, (3, 3), data_format="NHWC",
            activation_fn=tf.nn.relu, weights_regularizer=wr, trainable=trainable)
        net = slim.max_pool2d(net, 2, 2)
        net = slim.flatten(net)
        net = slim.fully_connected(net, 256, activation_fn=tf.nn.relu,
            weights_regularizer=wr, trainable=trainable)
        q_state_action_values = slim.fully_connected(net, self.dim_actions,
            activation_fn=None, weights_regularizer=wr, trainable=trainable)

        return q_state_action_values
项目:SSD_tensorflow_VOC    作者:LevinJ    | 项目源码 | 文件源码
def __arg_scope(self, weight_decay=0.0005, data_format='NHWC'):
        """Defines the VGG arg scope.

        Args:
          weight_decay: The l2 regularization coefficient.

        Returns:
          An arg_scope.
        """
        with slim.arg_scope([slim.conv2d, slim.fully_connected],
                            activation_fn=tf.nn.relu,
                            weights_regularizer=slim.l2_regularizer(weight_decay),
                            weights_initializer=tf.contrib.layers.xavier_initializer(),
                            biases_initializer=tf.zeros_initializer()):
            with slim.arg_scope([slim.conv2d, slim.max_pool2d],
                                padding='SAME',
                                data_format=data_format):
                with slim.arg_scope([custom_layers.pad2d,
                                     custom_layers.l2_normalization,
                                     custom_layers.channel_to_last],
                                    data_format=data_format) as sc:
                    return sc
项目:Neural-EM    作者:sjoerdvansteenkiste    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
        output, res_state = self._cell(inputs, state)

        projected = None
        with tf.variable_scope((scope or self._name)):
            if self._spec['name'] == 'fc':
                projected = slim.fully_connected(output, self._spec['size'], activation_fn=None)
            elif self._spec['name'] == 't_conv':
                projected = slim.layers.conv2d_transpose(output, self._spec['size'], self._spec['kernel'], self._spec['stride'], activation_fn=None)
            elif self._spec['name'] == 'r_conv':
                resized = tf.image.resize_images(output, (self._spec['stride'][0] * output.get_shape()[1].value,
                                                          self._spec['stride'][1] * output.get_shape()[2].value), method=1)
                projected = slim.layers.conv2d(resized, self._spec['size'], self._spec['kernel'], activation_fn=None)
            else:
                raise ValueError('Unknown layer name "{}"'.format(self._spec['name']))

        return projected, res_state