Python tensorflow.contrib.slim 模块,avg_pool2d() 实例源码

我们从Python开源项目中,提取了以下18个代码示例,用于说明如何使用tensorflow.contrib.slim.avg_pool2d()

项目:tensorflow_face    作者:ZhihengCV    | 项目源码 | 文件源码
def squeezenet(inputs,
               num_classes=1000,
               is_training=True,
               keep_prob=0.5,
               spatial_squeeze=True,
               scope='squeeze'):
    """
    squeezenetv1.1
    """
    with tf.name_scope(scope, 'squeeze', [inputs]) as sc:
        end_points_collection = sc + '_end_points'
        # Collect outputs for conv2d, fully_connected and max_pool2d.
        with slim.arg_scope([slim.conv2d, slim.max_pool2d,
                             slim.avg_pool2d, fire_module],
                            outputs_collections=end_points_collection):
            nets = squeezenet_inference(inputs, is_training, keep_prob)
            nets = slim.conv2d(nets, num_classes, [1, 1],
                               activation_fn=None,
                               normalizer_fn=None,
                               scope='logits')
            end_points = slim.utils.convert_collection_to_dict(end_points_collection)
            if spatial_squeeze:
                nets = tf.squeeze(nets, [1, 2], name='logits/squeezed')
            return nets, end_points
项目:tensorflow_face    作者:ZhihengCV    | 项目源码 | 文件源码
def densenet_inference(inputs, is_training, keep_prob, growth_rate, reduction):

    first_output_fea = growth_rate * 2

    nets = slim.conv2d(inputs, first_output_fea,
                       [5, 5], scope='conv0')
    nets = slim.max_pool2d(nets, [3, 3], padding='SAME', scope='pool0')  # 56*48*64

    nets = densenet_block(nets, 6, growth_rate, True,
                          'block1', is_training, keep_prob)
    nets = transition_block(nets, reduction, 'trans1', is_training, keep_prob)  # 28*24*256

    nets = densenet_block(nets, 12, growth_rate, True,
                          'block2', is_training, keep_prob)
    nets = transition_block(nets, reduction, 'trans2', is_training, keep_prob)  # 14*12*640

    nets = densenet_block(nets, 24, growth_rate, True,
                          'block3', is_training, keep_prob)
    nets = transition_block(nets, reduction, 'trans3', is_training, keep_prob)  # 7*6*1408

    nets = densenet_block(nets, 16, growth_rate, True,
                          'block4', is_training, keep_prob)  # 7*6*1920
    nets = slim.avg_pool2d(nets, [7, 6], scope='pool4')  # 1*1*1920
    return nets
项目:supic    作者:Hirico    | 项目源码 | 文件源码
def SSIM(self, x, y):
        C1 = 0.01 ** 2
        C2 = 0.03 ** 2

        mu_x = slim.avg_pool2d(x, 3, 1, 'VALID')
        mu_y = slim.avg_pool2d(y, 3, 1, 'VALID')

        sigma_x  = slim.avg_pool2d(x ** 2, 3, 1, 'VALID') - mu_x ** 2
        sigma_y  = slim.avg_pool2d(y ** 2, 3, 1, 'VALID') - mu_y ** 2
        sigma_xy = slim.avg_pool2d(x * y , 3, 1, 'VALID') - mu_x * mu_y

        SSIM_n = (2 * mu_x * mu_y + C1) * (2 * sigma_xy + C2)
        SSIM_d = (mu_x ** 2 + mu_y ** 2 + C1) * (sigma_x + sigma_y + C2)

        SSIM = SSIM_n / SSIM_d

        return tf.clip_by_value((1 - SSIM) / 2, 0, 1)
项目:Awesome-GANs    作者:kozistr    | 项目源码 | 文件源码
def encoder(self, x, embedding, reuse=None):
        with tf.variable_scope("encoder", reuse=reuse):
            with slim.arg_scope([slim.conv2d],
                                stride=1, activation_fn=tf.nn.elu, padding="SAME",
                                weights_initializer=tf.contrib.layers.variance_scaling_initializer(),
                                weights_regularizer=slim.l2_regularizer(5e-4),
                                bias_initializer=tf.zeros_initializer()):
                x = slim.conv2d(x, embedding, 3)

                for i in range(self.conv_repeat_num):
                    channel_num = embedding * (i + 1)
                    x = slim.repeat(x, 2, slim.conv2d, channel_num, 3)
                    if i < self.conv_repeat_num - 1:
                        # Is using stride pooling more better method than max pooling?
                        # or average pooling
                        # x = slim.conv2d(x, channel_num, kernel_size=3, stride=2)  # sub-sampling
                        x = slim.avg_pool2d(x, kernel_size=2, stride=2)
                        # x = slim.max_pooling2d(x, 3, 2)

                x = tf.reshape(x, [-1, np.prod([8, 8, channel_num])])
        return x
项目:Deep_Learning_In_Action    作者:SunnyMarkLiu    | 项目源码 | 文件源码
def build_inception_v1(self, prediction_fn=tf.nn.relu, scope='InceptionV1'):
        """
        build basic inception v1 model
        """
        # input features [batch_size, height, width, channels]
        self.x = tf.placeholder(tf.float32, shape=[None, 224, 224, 3], name='input_layer')
        self.y = tf.placeholder(tf.float32, [None, self.num_classes], name='output_layer')

        # learning_rate placeholder
        self.learning_rate = tf.placeholder(tf.float32, name='learning_rate')
        # dropout layer: keep probability, vgg default value:0.5
        self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')

        with tf.variable_scope(name_or_scope=scope, reuse=False) as scope:
            net, ent_point_nets = self.inception_v1_base(self.x, scope=scope)
            with tf.variable_scope('Logits'):
                net = slim.avg_pool2d(net, kernel_size=[7, 7], stride=1, scope='MaxPool_0a_7x7')
                net = slim.dropout(net, self.keep_prob, scope='Dropout_0b')
                # translate [1, 1, 1024] -> [1024]
                net = net[:, 0, 0, :]
                self.logits = slim.fully_connected(net, num_outputs=self.num_classes)
                self.read_out_logits = prediction_fn(self.logits, name='Predictions')
项目:monodepth360    作者:srijanparmeshwar    | 项目源码 | 文件源码
def SSIM(self, x, y):
        C1 = 0.01 ** 2
        C2 = 0.03 ** 2

        mu_x = slim.avg_pool2d(x, 3, 1, 'VALID')
        mu_y = slim.avg_pool2d(y, 3, 1, 'VALID')

        sigma_x  = slim.avg_pool2d(x ** 2, 3, 1, 'VALID') - mu_x ** 2
        sigma_y  = slim.avg_pool2d(y ** 2, 3, 1, 'VALID') - mu_y ** 2
        sigma_xy = slim.avg_pool2d(x * y , 3, 1, 'VALID') - mu_x * mu_y

        SSIM_n = (2 * mu_x * mu_y + C1) * (2 * sigma_xy + C2)
        SSIM_d = (mu_x ** 2 + mu_y ** 2 + C1) * (sigma_x + sigma_y + C2)

        SSIM = SSIM_n / SSIM_d

        return tf.clip_by_value((1 - SSIM) / 2, 0, 1)
项目:monodepth    作者:mrharicot    | 项目源码 | 文件源码
def SSIM(self, x, y):
        C1 = 0.01 ** 2
        C2 = 0.03 ** 2

        mu_x = slim.avg_pool2d(x, 3, 1, 'VALID')
        mu_y = slim.avg_pool2d(y, 3, 1, 'VALID')

        sigma_x  = slim.avg_pool2d(x ** 2, 3, 1, 'VALID') - mu_x ** 2
        sigma_y  = slim.avg_pool2d(y ** 2, 3, 1, 'VALID') - mu_y ** 2
        sigma_xy = slim.avg_pool2d(x * y , 3, 1, 'VALID') - mu_x * mu_y

        SSIM_n = (2 * mu_x * mu_y + C1) * (2 * sigma_xy + C2)
        SSIM_d = (mu_x ** 2 + mu_y ** 2 + C1) * (sigma_x + sigma_y + C2)

        SSIM = SSIM_n / SSIM_d

        return tf.clip_by_value((1 - SSIM) / 2, 0, 1)
项目:vessel-classification    作者:GlobalFishingWatch    | 项目源码 | 文件源码
def misconception_with_bypass(input,
                              window_size,
                              stride,
                              depth,
                              is_training,
                              scope=None):
    with tf.name_scope(scope):
        with slim.arg_scope(
            [slim.conv2d],
                padding='SAME',
                activation_fn=tf.nn.relu,
                normalizer_fn=slim.batch_norm,
                normalizer_params={'is_training': is_training}):
            residual = misconception_layer(input, window_size, stride, depth,
                                           is_training, scope)

            if stride > 1:
                input = slim.avg_pool2d(
                    input, [1, stride], stride=[1, stride], padding='SAME')

            input = zero_pad_features(input, depth)

            return input + residual
项目:tensorflow_face    作者:ZhihengCV    | 项目源码 | 文件源码
def squeezenet_inference(inputs, is_training, keep_prob):
    nets = slim.conv2d(inputs, 64,
                       [3, 3], scope='conv1')
    nets = slim.max_pool2d(nets, [3, 3], padding='SAME', scope='pool1')  # 56*48*64

    nets = fire_module(nets, 16, 64, scope='fire2')

    nets = fire_module(nets, 16, 64, scope='fire3')

    nets = slim.max_pool2d(nets, [3, 3], padding='SAME', scope='pool1')  # 28*24*128

    nets = fire_module(nets, 32, 128, scope='fire4')

    nets = fire_module(nets, 32, 128, scope='fire5')

    nets = slim.max_pool2d(nets, [3, 3], padding='SAME', scope='pool5')  # 14*12*256

    nets = fire_module(nets, 48, 192, scope='fire6')

    nets = fire_module(nets, 48, 192, scope='fire7')

    nets = slim.max_pool2d(nets, [3, 3], padding='SAME', scope='pool6')  # 7*6*384

    nets = fire_module(nets, 64, 256, scope='fire8')

    nets = fire_module(nets, 64, 256, scope='fire9')  # 7*6*512

    nets = slim.dropout(nets, keep_prob, is_training=is_training, scope='dropout9')

    nets = slim.avg_pool2d(nets, [7, 6], scope='pool9')  # 1*1*512

    return nets
项目:tensorflow_face    作者:ZhihengCV    | 项目源码 | 文件源码
def transition_block(inputs, reduction, scope, is_training, keep_prob):
    """Call H_l composite function with 1x1 kernel and after average
    pooling
    """
    with tf.variable_scope(scope, 'trans1', [inputs]):
        # call composite function with 1x1 kernel
        out_features = int(int(inputs.get_shape()[-1]) * reduction)
        nets = slim.conv2d(inputs, out_features,
                           [1, 1], scope='conv')
        nets = slim.dropout(nets, keep_prob=keep_prob,
                            is_training=is_training,
                            scope='dropout')
        # run average pooling
        nets = slim.avg_pool2d(nets, [2, 2], scope='pool')
        return nets
项目:tensorflow_face    作者:ZhihengCV    | 项目源码 | 文件源码
def densenet_a(inputs,
               num_classes=1000,
               is_training=True,
               keep_prob=0.2,
               growth_rate=32,
               reduction=0.6,
               spatial_squeeze=True,
               scope='densenet_121'):
    """
    Densenet 121-Layers version.
    """
    with tf.name_scope(scope, 'densenet_121', [inputs]) as sc:
        end_points_collection = sc + '_end_points'
        # Collect outputs for conv2d, fully_connected and max_pool2d.
        with slim.arg_scope([slim.conv2d, slim.max_pool2d,
                             slim.avg_pool2d],
                            outputs_collections=end_points_collection):

            nets = densenet_inference(inputs, is_training, keep_prob, growth_rate, reduction)
            nets = slim.conv2d(nets, num_classes, [1, 1],
                               activation_fn=None,
                               normalizer_fn=None,
                               scope='logits')
            end_points = slim.utils.convert_collection_to_dict(end_points_collection)
            if spatial_squeeze:
                nets = tf.squeeze(nets, [1, 2], name='logits/squeezed')
            return nets, end_points
项目:faceNet_RealTime    作者:jack55436001    | 项目源码 | 文件源码
def inference(images, keep_probability, phase_train=True, bottleneck_layer_size=128, weight_decay=0.0, reuse=None):
    batch_norm_params = {
        # Decay for the moving averages.
        'decay': 0.995,
        # epsilon to prevent 0s in variance.
        'epsilon': 0.001,
        # force in-place updates of mean and variance estimates
        'updates_collections': None,
        # Moving averages ends up in the trainable variables collection
        'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
    }
    with slim.arg_scope([slim.conv2d, slim.fully_connected],
                        weights_initializer=slim.xavier_initializer_conv2d(uniform=True),
                        weights_regularizer=slim.l2_regularizer(weight_decay),
                        normalizer_fn=slim.batch_norm,
                        normalizer_params=batch_norm_params):
        with tf.variable_scope('squeezenet', [images], reuse=reuse):
            with slim.arg_scope([slim.batch_norm, slim.dropout],
                                is_training=phase_train):
                net = slim.conv2d(images, 96, [7, 7], stride=2, scope='conv1')
                net = slim.max_pool2d(net, [3, 3], stride=2, scope='maxpool1')
                net = fire_module(net, 16, 64, scope='fire2')
                net = fire_module(net, 16, 64, scope='fire3')
                net = fire_module(net, 32, 128, scope='fire4')
                net = slim.max_pool2d(net, [2, 2], stride=2, scope='maxpool4')
                net = fire_module(net, 32, 128, scope='fire5')
                net = fire_module(net, 48, 192, scope='fire6')
                net = fire_module(net, 48, 192, scope='fire7')
                net = fire_module(net, 64, 256, scope='fire8')
                net = slim.max_pool2d(net, [3, 3], stride=2, scope='maxpool8')
                net = fire_module(net, 64, 256, scope='fire9')
                net = slim.dropout(net, keep_probability)
                net = slim.conv2d(net, 1000, [1, 1], activation_fn=None, normalizer_fn=None, scope='conv10')
                net = slim.avg_pool2d(net, net.get_shape()[1:3], scope='avgpool10')
                net = tf.squeeze(net, [1, 2], name='logits')
                net = slim.fully_connected(net, bottleneck_layer_size, activation_fn=None, 
                        scope='Bottleneck', reuse=False)
    return net, None
项目:Deep_Learning_In_Action    作者:SunnyMarkLiu    | 项目源码 | 文件源码
def golbal_average_pooling(self, x):
        """
        golbal average pooling
        :param x: [batch, height, width, channels]
        """
        shapes = x.get_shape().as_list()
        kernel_height = shapes[1]
        kernel_width = shapes[2]
        return slim.avg_pool2d(x, kernel_size=[kernel_height, kernel_width], stride=1, padding='VALID',
                               scope='golbal_average_pooling')
项目:facenet    作者:davidsandberg    | 项目源码 | 文件源码
def inference(images, keep_probability, phase_train=True, bottleneck_layer_size=128, weight_decay=0.0, reuse=None):
    batch_norm_params = {
        # Decay for the moving averages.
        'decay': 0.995,
        # epsilon to prevent 0s in variance.
        'epsilon': 0.001,
        # force in-place updates of mean and variance estimates
        'updates_collections': None,
        # Moving averages ends up in the trainable variables collection
        'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
    }
    with slim.arg_scope([slim.conv2d, slim.fully_connected],
                        weights_initializer=slim.xavier_initializer_conv2d(uniform=True),
                        weights_regularizer=slim.l2_regularizer(weight_decay),
                        normalizer_fn=slim.batch_norm,
                        normalizer_params=batch_norm_params):
        with tf.variable_scope('squeezenet', [images], reuse=reuse):
            with slim.arg_scope([slim.batch_norm, slim.dropout],
                                is_training=phase_train):
                net = slim.conv2d(images, 96, [7, 7], stride=2, scope='conv1')
                net = slim.max_pool2d(net, [3, 3], stride=2, scope='maxpool1')
                net = fire_module(net, 16, 64, scope='fire2')
                net = fire_module(net, 16, 64, scope='fire3')
                net = fire_module(net, 32, 128, scope='fire4')
                net = slim.max_pool2d(net, [2, 2], stride=2, scope='maxpool4')
                net = fire_module(net, 32, 128, scope='fire5')
                net = fire_module(net, 48, 192, scope='fire6')
                net = fire_module(net, 48, 192, scope='fire7')
                net = fire_module(net, 64, 256, scope='fire8')
                net = slim.max_pool2d(net, [3, 3], stride=2, scope='maxpool8')
                net = fire_module(net, 64, 256, scope='fire9')
                net = slim.dropout(net, keep_probability)
                net = slim.conv2d(net, 1000, [1, 1], activation_fn=None, normalizer_fn=None, scope='conv10')
                net = slim.avg_pool2d(net, net.get_shape()[1:3], scope='avgpool10')
                net = tf.squeeze(net, [1, 2], name='logits')
                net = slim.fully_connected(net, bottleneck_layer_size, activation_fn=None, 
                        scope='Bottleneck', reuse=False)
    return net, None
项目:convolutional-pose-machines-tensorflow    作者:timctho    | 项目源码 | 文件源码
def build_model(self, input_image, center_map, batch_size):
        self.batch_size = batch_size
        self.input_image = input_image
        self.center_map = center_map
        with tf.variable_scope('pooled_center_map'):
            self.center_map = slim.avg_pool2d(self.center_map,
                                              [9, 9], stride=8,
                                              padding='SAME',
                                              scope='center_map')
        with slim.arg_scope([slim.conv2d],
                            padding='SAME',
                            activation_fn=tf.nn.relu,
                            weights_initializer=tf.contrib.layers.xavier_initializer()):
            with tf.variable_scope('sub_stages'):
                net = slim.conv2d(input_image, 64, [3, 3], scope='sub_conv1')
                net = slim.conv2d(net, 64, [3, 3], scope='sub_conv2')
                net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='sub_pool1')
                net = slim.conv2d(net, 128, [3, 3], scope='sub_conv3')
                net = slim.conv2d(net, 128, [3, 3], scope='sub_conv4')
                net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='sub_pool2')
                net = slim.conv2d(net, 256, [3, 3], scope='sub_conv5')
                net = slim.conv2d(net, 256, [3, 3], scope='sub_conv6')
                net = slim.conv2d(net, 256, [3, 3], scope='sub_conv7')
                net = slim.conv2d(net, 256, [3, 3], scope='sub_conv8')
                net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='sub_pool3')
                net = slim.conv2d(net, 512, [3, 3], scope='sub_conv9')
                net = slim.conv2d(net, 512, [3, 3], scope='sub_conv10')
                net = slim.conv2d(net, 256, [3, 3], scope='sub_conv11')
                net = slim.conv2d(net, 256, [3, 3], scope='sub_conv12')
                net = slim.conv2d(net, 256, [3, 3], scope='sub_conv13')
                net = slim.conv2d(net, 256, [3, 3], scope='sub_conv14')
                self.sub_stage_img_feature = slim.conv2d(net, 128, [3, 3],
                                                         scope='sub_stage_img_feature')

            with tf.variable_scope('stage_1'):
                conv1 = slim.conv2d(self.sub_stage_img_feature, 512, [1, 1],
                                    scope='conv1')
                self.stage_heatmap.append(slim.conv2d(conv1, self.joints, [1, 1],
                                                      scope='stage_heatmap'))

            for stage in range(2, self.stages+1):
                self._middle_conv(stage)
项目:convolutional-pose-machines-tensorflow    作者:timctho    | 项目源码 | 文件源码
def build_model(self, input_image, center_map, batch_size):
        self.batch_size = batch_size
        self.input_image = input_image
        self.center_map = center_map
        with tf.variable_scope('pooled_center_map'):
            # center map is a gaussion template which gather the respose
            self.center_map = slim.avg_pool2d(self.center_map,
                                              [9, 9], stride=8,
                                              padding='SAME',
                                              scope='center_map')

        with slim.arg_scope([slim.conv2d],
                            padding='SAME',
                            activation_fn=tf.nn.relu,
                            weights_initializer=tf.contrib.layers.xavier_initializer()):
            with tf.variable_scope('sub_stages'):
                net = slim.conv2d(input_image, 64, [3, 3], scope='sub_conv1')
                net = slim.conv2d(net, 64, [3, 3], scope='sub_conv2')
                net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='sub_pool1')
                net = slim.conv2d(net, 128, [3, 3], scope='sub_conv3')
                net = slim.conv2d(net, 128, [3, 3], scope='sub_conv4')
                net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='sub_pool2')
                net = slim.conv2d(net, 256, [3, 3], scope='sub_conv5')
                net = slim.conv2d(net, 256, [3, 3], scope='sub_conv6')
                net = slim.conv2d(net, 256, [3, 3], scope='sub_conv7')
                net = slim.conv2d(net, 256, [3, 3], scope='sub_conv8')
                net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='sub_pool3')
                net = slim.conv2d(net, 512, [3, 3], scope='sub_conv9')
                net = slim.conv2d(net, 512, [3, 3], scope='sub_conv10')
                net = slim.conv2d(net, 512, [3, 3], scope='sub_conv11')
                net = slim.conv2d(net, 512, [3, 3], scope='sub_conv12')
                net = slim.conv2d(net, 512, [3, 3], scope='sub_conv13')
                net = slim.conv2d(net, 512, [3, 3], scope='sub_conv14')

                self.sub_stage_img_feature = slim.conv2d(net, 128, [3, 3],
                                                         scope='sub_stage_img_feature')

            with tf.variable_scope('stage_1'):
                conv1 = slim.conv2d(self.sub_stage_img_feature, 512, [1, 1],
                                    scope='conv1')
                self.stage_heatmap.append(slim.conv2d(conv1, self.joints, [1, 1],
                                                      scope='stage_heatmap'))

            for stage in range(2, self.stages + 1):
                self._middle_conv(stage)
项目:sact    作者:mfigurnov    | 项目源码 | 文件源码
def residual(inputs,
             depth,
             stride,
             activate_before_residual,
             residual_mask=None,
             scope=None):
  with tf.variable_scope(scope, 'residual', [inputs]):
    depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
    preact = slim.batch_norm(inputs, scope='preact')
    if activate_before_residual:
      shortcut = preact
    else:
      shortcut = inputs

    if residual_mask is not None:
      # Max-pooling trick only works correctly when stride is 1.
      # We assume that stride=2 happens in the first layer where
      # residual_mask is None.
      assert stride == 1
      diluted_residual_mask = slim.max_pool2d(
          residual_mask, [3, 3], stride=1, padding='SAME')
    else:
      diluted_residual_mask = None

    flops = 0
    conv_output, current_flops = flopsometer.conv2d(
        preact,
        depth,
        3,
        stride=stride,
        padding='SAME',
        output_mask=diluted_residual_mask,
        scope='conv1')
    flops += current_flops

    conv_output, current_flops = flopsometer.conv2d(
        conv_output,
        depth,
        3,
        stride=1,
        padding='SAME',
        activation_fn=None,
        normalizer_fn=None,
        output_mask=residual_mask,
        scope='conv2')
    flops += current_flops

    if depth_in != depth:
      shortcut = slim.avg_pool2d(shortcut, stride, stride, padding='VALID')
      value = (depth - depth_in) // 2
      shortcut = tf.pad(shortcut, [[0, 0], [0, 0], [0, 0], [value, value]])

    if residual_mask is not None:
      conv_output *= residual_mask

    outputs = shortcut + conv_output

    return outputs, flops
项目:vessel-classification    作者:GlobalFishingWatch    | 项目源码 | 文件源码
def misconception_model(input,
                        window_size,
                        depths,
                        strides,
                        objective_functions,
                        is_training,
                        sub_count=128,
                        sub_layers=2,
                        keep_prob=0.5):
    """ A misconception tower.

  Args:
    input: a tensor of size [batch_size, 1, width, depth].
    window_size: the width of the conv and pooling filters to apply.
    depth: the depth of the output tensor.
    levels: the height of the tower in misconception layers.
    objective_functions: a list of objective functions to add to the top of
                         the network.
    is_training: whether the network is training.

  Returns:
    a tensor of size [batch_size, num_classes].
  """
    layers = []
    with slim.arg_scope([slim.batch_norm], decay=0.999):
        with slim.arg_scope([slim.fully_connected], activation_fn=tf.nn.relu):
            net = input
            layers.append(net)
            for depth, stride in zip(depths, strides):
                net = misconception_with_bypass(net, window_size, stride,
                                                depth, is_training)
                layers.append(net)
            outputs = []
            for ofunc in objective_functions:
                onet = net
                for _ in range(sub_layers - 1):
                    onet = slim.conv2d(
                        onet,
                        sub_count, [1, 1],
                        activation_fn=tf.nn.relu,
                        normalizer_fn=slim.batch_norm,
                        normalizer_params={'is_training': is_training})

                # Don't use batch norm on last layer, just use dropout.
                onet = slim.conv2d(onet, sub_count, [1, 1], normalizer_fn=None)
                # Global average pool
                n = int(onet.get_shape().dims[1])
                onet = slim.avg_pool2d(onet, [1, n], stride=[1, n])
                onet = slim.flatten(onet)
                #
                onet = slim.dropout(onet, keep_prob, is_training=is_training)
                outputs.append(ofunc.build(onet))

    return outputs, layers