Python ops 模块,conv2d() 实例源码

我们从Python开源项目中,提取了以下34个代码示例,用于说明如何使用ops.conv2d()

项目:ICGan-tensorflow    作者:zhangqianhui    | 项目源码 | 文件源码
def discriminate(self, x_var, y, weights, biases, reuse=False):

        y1 =  tf.reshape(y, shape=[self.batch_size, 1, 1, self.y_dim])
        x_var = conv_cond_concat(x_var, y1)

        conv1= lrelu(conv2d(x_var, weights['wc1'], biases['bc1']))

        conv1 = conv_cond_concat(conv1, y1)

        conv2= lrelu(batch_normal(conv2d(conv1, weights['wc2'], biases['bc2']), scope='dis_bn1', reuse=reuse))

        conv2 = tf.reshape(conv2, [self.batch_size, -1])

        conv2 = tf.concat([conv2, y], 1)

        fc1 = lrelu(batch_normal(fully_connect(conv2, weights['wc3'], biases['bc3']), scope='dis_bn2', reuse=reuse))

        fc1 = tf.concat([fc1, y], 1)
        #for D
        output= fully_connect(fc1, weights['wd'], biases['bd'])

        return tf.nn.sigmoid(output)
项目:ICGan-tensorflow    作者:zhangqianhui    | 项目源码 | 文件源码
def encode_z(self, x, weights, biases):

        c1 = tf.nn.relu(batch_normal(conv2d(x, weights['e1'], biases['eb1']), scope='enz_bn1'))

        c2 = tf.nn.relu(batch_normal(conv2d(c1, weights['e2'], biases['eb2']), scope='enz_bn2'))

        c2 = tf.reshape(c2, [self.batch_size, 128*7*7])

        #using tanh instead of tf.nn.relu.
        result_z = batch_normal(fully_connect(c2, weights['e3'], biases['eb3']), scope='enz_bn3')

        #result_c = tf.nn.sigmoid(fully_connect(c2, weights['e4'], biases['eb4']))

        #Transforming one-hot form
        #sparse_label = tf.arg_max(result_c, 1)

        #y_vec = tf.one_hot(sparse_label, 10)

        return result_z
项目:CausalGAN    作者:mkocaoglu    | 项目源码 | 文件源码
def discriminator_labeler(image, output_dim, config, reuse=None):
    batch_size=tf.shape(image)[0]
    with tf.variable_scope("disc_labeler",reuse=reuse) as vs:
        dl_bn1 = batch_norm(name='dl_bn1')
        dl_bn2 = batch_norm(name='dl_bn2')
        dl_bn3 = batch_norm(name='dl_bn3')

        h0 = lrelu(conv2d(image, config.df_dim, name='dl_h0_conv'))#16,32,32,64
        h1 = lrelu(dl_bn1(conv2d(h0, config.df_dim*2, name='dl_h1_conv')))#16,16,16,128
        h2 = lrelu(dl_bn2(conv2d(h1, config.df_dim*4, name='dl_h2_conv')))#16,16,16,248
        h3 = lrelu(dl_bn3(conv2d(h2, config.df_dim*8, name='dl_h3_conv')))
        dim3=np.prod(h3.get_shape().as_list()[1:])
        h3_flat=tf.reshape(h3, [-1,dim3])
        D_labels_logits = linear(h3_flat, output_dim, 'dl_h3_Label')
        D_labels = tf.nn.sigmoid(D_labels_logits)
        variables = tf.contrib.framework.get_variables(vs)
    return D_labels, D_labels_logits, variables
项目:CausalGAN    作者:mkocaoglu    | 项目源码 | 文件源码
def discriminator_gen_labeler(image, output_dim, config, reuse=None):
    batch_size=tf.shape(image)[0]
    with tf.variable_scope("disc_gen_labeler",reuse=reuse) as vs:
        dl_bn1 = batch_norm(name='dl_bn1')
        dl_bn2 = batch_norm(name='dl_bn2')
        dl_bn3 = batch_norm(name='dl_bn3')

        h0 = lrelu(conv2d(image, config.df_dim, name='dgl_h0_conv'))#16,32,32,64
        h1 = lrelu(dl_bn1(conv2d(h0, config.df_dim*2, name='dgl_h1_conv')))#16,16,16,128
        h2 = lrelu(dl_bn2(conv2d(h1, config.df_dim*4, name='dgl_h2_conv')))#16,16,16,248
        h3 = lrelu(dl_bn3(conv2d(h2, config.df_dim*8, name='dgl_h3_conv')))
        dim3=np.prod(h3.get_shape().as_list()[1:])
        h3_flat=tf.reshape(h3, [-1,dim3])
        D_labels_logits = linear(h3_flat, output_dim, 'dgl_h3_Label')
        D_labels = tf.nn.sigmoid(D_labels_logits)
        variables = tf.contrib.framework.get_variables(vs)
    return D_labels, D_labels_logits,variables
项目:CausalGAN    作者:mkocaoglu    | 项目源码 | 文件源码
def discriminator_on_z(image, config, reuse=None):
    batch_size=tf.shape(image)[0]
    with tf.variable_scope("disc_z_labeler",reuse=reuse) as vs:
        dl_bn1 = batch_norm(name='dl_bn1')
        dl_bn2 = batch_norm(name='dl_bn2')
        dl_bn3 = batch_norm(name='dl_bn3')

        h0 = lrelu(conv2d(image, config.df_dim, name='dzl_h0_conv'))#16,32,32,64
        h1 = lrelu(dl_bn1(conv2d(h0, config.df_dim*2, name='dzl_h1_conv')))#16,16,16,128
        h2 = lrelu(dl_bn2(conv2d(h1, config.df_dim*4, name='dzl_h2_conv')))#16,16,16,248
        h3 = lrelu(dl_bn3(conv2d(h2, config.df_dim*8, name='dzl_h3_conv')))
        dim3=np.prod(h3.get_shape().as_list()[1:])
        h3_flat=tf.reshape(h3, [-1,dim3])
        D_labels_logits = linear(h3_flat, config.z_dim, 'dzl_h3_Label')
        D_labels = tf.nn.tanh(D_labels_logits)
        variables = tf.contrib.framework.get_variables(vs)
    return D_labels,variables
项目:tf-sr-zoo    作者:MLJejuCamp2017    | 项目源码 | 文件源码
def create_discriminator(hr_images_fake, hr_images, cfg):
    n_layers = 3
    layers = []

    input = tf.concat([hr_images_fake, hr_images ], axis = 3)

    conv = slim.conv2d(input, cfg.ndf, [3,3], stride = 2, activation_fn = lrelu, scope = 'layers%d'%(0))
    layers.append(conv)

    for i in range(n_layers):
        out_channels = cfg.ndf*min(2**(i+1), 8)
        stride = 1 if i == n_layers -1 else 2
        conv = slim.conv2d(layers[-1], out_channels, [3,3], stride = stride, activation_fn = lrelu, scope = 'layers_%d'%(i+2))
        layers.append(conv)

    conv = slim.conv2d(layers[-1], 1, [3,3], stride = 1)
    output = tf.sigmoid(conv)
    return output
项目:seglink    作者:bgshih    | 项目源码 | 文件源码
def _detection_classifier(self, maps, ksize, cross_links=False, scope=None):
    """
    Create a SegLink detection classifier on a feature layer
    """
    with tf.variable_scope(scope):
      seg_depth = N_SEG_CLASSES
      if cross_links:
        lnk_depth = N_LNK_CLASSES * (N_LOCAL_LINKS + N_CROSS_LINKS)
      else:
        lnk_depth = N_LNK_CLASSES * N_LOCAL_LINKS
      reg_depth = OFFSET_DIM
      map_depth = maps.get_shape()[3].value
      seg_maps = ops.conv2d(maps, map_depth, seg_depth, ksize, 1, 'SAME', scope='conv_cls')
      lnk_maps = ops.conv2d(maps, map_depth, lnk_depth, ksize, 1, 'SAME', scope='conv_lnk')
      reg_maps = ops.conv2d(maps, map_depth, reg_depth, ksize, 1, 'SAME', scope='conv_reg')
    return seg_maps, lnk_maps, reg_maps
项目:adagan    作者:tolstikhin    | 项目源码 | 文件源码
def discriminator(self, opts, input_, is_training,
                      prefix='DISCRIMINATOR', reuse=False):
        """Encoder function, suitable for simple toy experiments.

        """
        num_filters = opts['d_num_filters']

        with tf.variable_scope(prefix, reuse=reuse):
            h0 = ops.conv2d(opts, input_, num_filters / 8, scope='h0_conv')
            h0 = ops.batch_norm(opts, h0, is_training, reuse, scope='bn_layer1')
            h0 = tf.nn.relu(h0)
            h1 = ops.conv2d(opts, h0, num_filters / 4, scope='h1_conv')
            h1 = ops.batch_norm(opts, h1, is_training, reuse, scope='bn_layer2')
            h1 = tf.nn.relu(h1)
            h2 = ops.conv2d(opts, h1, num_filters / 2, scope='h2_conv')
            h2 = ops.batch_norm(opts, h2, is_training, reuse, scope='bn_layer3')
            h2 = tf.nn.relu(h2)
            h3 = ops.conv2d(opts, h2, num_filters, scope='h3_conv')
            h3 = ops.batch_norm(opts, h3, is_training, reuse, scope='bn_layer4')
            h3 = tf.nn.relu(h3)
            # Already has NaNs!!
            latent_mean = ops.linear(opts, h3, opts['latent_space_dim'], scope='h3_lin')
            log_latent_sigmas = ops.linear(opts, h3, opts['latent_space_dim'], scope='h3_lin_sigma')

        return latent_mean, log_latent_sigmas
项目:adagan    作者:tolstikhin    | 项目源码 | 文件源码
def discriminator(self, opts, input_, is_training,
                      prefix='DISCRIMINATOR', reuse=False):
        """Discriminator function, suitable for simple toy experiments.

        """
        num_filters = opts['d_num_filters']

        with tf.variable_scope(prefix, reuse=reuse):
            h0 = ops.conv2d(opts, input_, num_filters, scope='h0_conv')
            h0 = ops.batch_norm(opts, h0, is_training, reuse, scope='bn_layer1')
            h0 = ops.lrelu(h0)
            h1 = ops.conv2d(opts, h0, num_filters * 2, scope='h1_conv')
            h1 = ops.batch_norm(opts, h1, is_training, reuse, scope='bn_layer2')
            h1 = ops.lrelu(h1)
            h2 = ops.conv2d(opts, h1, num_filters * 4, scope='h2_conv')
            h2 = ops.batch_norm(opts, h2, is_training, reuse, scope='bn_layer3')
            h2 = ops.lrelu(h2)
            h3 = ops.linear(opts, h2, 1, scope='h3_lin')

        return h3
项目:opt-mmd    作者:dougalsutherland    | 项目源码 | 文件源码
def discriminator(self, image, y=None, reuse=False):
        if reuse:
            tf.get_variable_scope().reuse_variables()

        s = self.output_size
        if np.mod(s, 16) == 0:
            h0 = lrelu(conv2d(image, self.df_dim, name='d_h0_conv'))
            h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim*2, name='d_h1_conv')))
            h2 = lrelu(self.d_bn2(conv2d(h1, self.df_dim*4, name='d_h2_conv')))
            h3 = lrelu(self.d_bn3(conv2d(h2, self.df_dim*8, name='d_h3_conv')))
            h4 = linear(tf.reshape(h3, [self.batch_size, -1]), 1, 'd_h3_lin')

            return tf.nn.sigmoid(h4), h4
        else:
            h0 = lrelu(conv2d(image, self.df_dim, name='d_h0_conv'))
            h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim*2, name='d_h1_conv')))
            h2 = linear(tf.reshape(h1, [self.batch_size, -1]), 1, 'd_h2_lin')
            if not self.config.use_kernel:
              return tf.nn.sigmoid(h2), h2
            else:
              return tf.nn.sigmoid(h2), h2, h1, h0
项目:opt-mmd    作者:dougalsutherland    | 项目源码 | 文件源码
def discriminator(self, image, y=None, reuse=False):
        if reuse:
            tf.get_variable_scope().reuse_variables()

        s = self.output_size
        if np.mod(s, 16) == 0:
            h0 = lrelu(conv2d(image, self.df_dim, name='d_h0_conv'))
            h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim*2, name='d_h1_conv')))
            h2 = lrelu(self.d_bn2(conv2d(h1, self.df_dim*4, name='d_h2_conv')))
            h3 = lrelu(self.d_bn3(conv2d(h2, self.df_dim*8, name='d_h3_conv')))
            h4 = linear(tf.reshape(h3, [self.batch_size, -1]), 1, 'd_h3_lin')

            return tf.nn.sigmoid(h4), h4
        else:
            h0 = lrelu(conv2d(image, self.df_dim, name='d_h0_conv'))
            h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim*2, name='d_h1_conv')))
            h2 = linear(tf.reshape(h1, [self.batch_size, -1]), 1, 'd_h2_lin')
            if not self.config.use_kernel:
              return tf.nn.sigmoid(h2), h2
            else:
              return tf.nn.sigmoid(h2), h2, h1, h0
项目:opt-mmd    作者:dougalsutherland    | 项目源码 | 文件源码
def discriminator(self, image, y=None, reuse=False):
        if reuse:
            tf.get_variable_scope().reuse_variables()

        s = self.output_size
        if np.mod(s, 16) == 0:
            h0 = lrelu(conv2d(image, self.df_dim, name='d_h0_conv'))
            h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim*2, name='d_h1_conv')))
            h2 = lrelu(self.d_bn2(conv2d(h1, self.df_dim*4, name='d_h2_conv')))
            h3 = lrelu(self.d_bn3(conv2d(h2, self.df_dim*8, name='d_h3_conv')))
            h4 = linear(tf.reshape(h3, [self.batch_size, -1]), 1, 'd_h3_lin')

            return tf.nn.sigmoid(h4), h4
        else:
            h0 = lrelu(conv2d(image, self.df_dim, name='d_h0_conv'))
            h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim*2, name='d_h1_conv')))
            h2 = linear(tf.reshape(h1, [self.batch_size, -1]), 1, 'd_h2_lin')
            if not self.config.use_kernel:
              return tf.nn.sigmoid(h2), h2
            else:
              return tf.nn.sigmoid(h2), h2, h1, h0
项目:InceptionV3_TensorFlow    作者:MasazI    | 项目源码 | 文件源码
def inception_v3_parameters(weight_decay=0.00004, stddev=0.1,
                            batch_norm_decay=0.9997, batch_norm_epsilon=0.001):
  """Yields the scope with the default parameters for inception_v3.

  Args:
    weight_decay: the weight decay for weights variables.
    stddev: standard deviation of the truncated guassian weight distribution.
    batch_norm_decay: decay for the moving average of batch_norm momentums.
    batch_norm_epsilon: small float added to variance to avoid dividing by zero.

  Yields:
    a arg_scope with the parameters needed for inception_v3.
  """
  # Set weight_decay for weights in Conv and FC layers.
  with scopes.arg_scope([ops.conv2d, ops.fc],
                        weight_decay=weight_decay):
    # Set stddev, activation and parameters for batch_norm.
    with scopes.arg_scope([ops.conv2d],
                          stddev=stddev,
                          activation=tf.nn.relu,
                          batch_norm_params={
                              'decay': batch_norm_decay,
                              'epsilon': batch_norm_epsilon}) as arg_scope:
      yield arg_scope
项目:vae-gan-tensorflow    作者:zhangqianhui    | 项目源码 | 文件源码
def discriminate(self, x_var, reuse=False):

        with tf.variable_scope("discriminator") as scope:

            if reuse:
                scope.reuse_variables()

            conv1 = tf.nn.relu(conv2d(x_var, output_dim=32, name='dis_conv1'))
            conv2= tf.nn.relu(batch_normal(conv2d(conv1, output_dim=128, name='dis_conv2'), scope='dis_bn1', reuse=reuse))
            conv3= tf.nn.relu(batch_normal(conv2d(conv2, output_dim=256, name='dis_conv3'), scope='dis_bn2', reuse=reuse))
            conv4 = conv2d(conv3, output_dim=256, name='dis_conv4')
            middle_conv = conv4
            conv4= tf.nn.relu(batch_normal(conv4, scope='dis_bn3', reuse=reuse))
            conv4= tf.reshape(conv4, [self.batch_size, -1])

            fl = tf.nn.relu(batch_normal(fully_connect(conv4, output_size=256, scope='dis_fully1'), scope='dis_bn4', reuse=reuse))
            output = fully_connect(fl , output_size=1, scope='dis_fully2')

            return middle_conv, output
项目:ICGan-tensorflow    作者:zhangqianhui    | 项目源码 | 文件源码
def encode_y(self, x, weights, biases):

        c1 = tf.nn.relu(batch_normal(conv2d(x, weights['e1'], biases['eb1']), scope='eny_bn1'))

        c2 = tf.nn.relu(batch_normal(conv2d(c1, weights['e2'], biases['eb2']), scope='eny_bn2'))

        c2 = tf.reshape(c2, [self.batch_size, 128 * 7 * 7])

        result_y = tf.nn.sigmoid(fully_connect(c2, weights['e3'], biases['eb3']))

        #y_vec = tf.one_hot(tf.arg_max(result_y, 1), 10)

        return result_y
项目:seglink    作者:bgshih    | 项目源码 | 文件源码
def _vgg_conv_relu(self, x, n_in, n_out, scope, fc7=False, trainable=True):
    with tf.variable_scope(scope):
      if fc7 == False:
        conv = ops.conv2d(x, n_in, n_out, 3, trainable=trainable, relu=True)
      else:
        conv = ops.conv2d(x, n_in, n_out, 1, trainable=trainable, relu=True)
    return conv
项目:adagan    作者:tolstikhin    | 项目源码 | 文件源码
def compute_moments(_inputs, moments=[2, 3]):
    """From an image input, compute moments"""
    _inputs_sq = tf.square(_inputs)
    _inputs_cube = tf.pow(_inputs, 3)
    height = int(_inputs.get_shape()[1])
    width = int(_inputs.get_shape()[2])
    channels = int(_inputs.get_shape()[3])
    def ConvFlatten(x, kernel_size):
#                 w_sum = tf.ones([kernel_size, kernel_size, channels, 1]) / (kernel_size * kernel_size * channels)
        w_sum = tf.eye(num_rows=channels, num_columns=channels, batch_shape=[kernel_size * kernel_size])
        w_sum = tf.reshape(w_sum, [kernel_size, kernel_size, channels, channels])
        w_sum = w_sum / (kernel_size * kernel_size)
        sum_ = tf.nn.conv2d(x, w_sum, strides=[1, 1, 1, 1], padding='VALID')
        size = prod_dim(sum_)
        assert size == (height - kernel_size + 1) * (width - kernel_size + 1) * channels, size
        return tf.reshape(sum_, [-1, size])
    outputs = []
    for size in [3, 4, 5]:
        mean = ConvFlatten(_inputs, size)
        square = ConvFlatten(_inputs_sq, size)
        var = square - tf.square(mean)
        if 2 in moments:
            outputs.append(var)
        if 3 in moments:
            cube = ConvFlatten(_inputs_cube, size)
            skewness = cube - 3.0 * mean * var - tf.pow(mean, 3)  # Unnormalized
            outputs.append(skewness)
    return tf.concat(outputs, 1)
项目:adagan    作者:tolstikhin    | 项目源码 | 文件源码
def began_dec(self, opts, noise, is_training, reuse, keep_prob):
        """ Architecture reported here: https://arxiv.org/pdf/1703.10717.pdf
        """

        output_shape = self._data.data_shape
        num_units = opts['g_num_filters']
        num_layers = opts['g_num_layers']
        batch_size = tf.shape(noise)[0]

        h0 = ops.linear(
            opts, noise, num_units * 8 * 8, scope='h0_lin')
        h0 = tf.reshape(h0, [-1, 8, 8, num_units])
        layer_x = h0
        for i in xrange(num_layers):
            if i % 3 < 2:
                # Don't change resolution
                layer_x = ops.conv2d(opts, layer_x, num_units, d_h=1, d_w=1, scope='h%d_conv' % i)
                layer_x = tf.nn.elu(layer_x)
            else:
                if i != num_layers - 1:
                    # Upsampling by factor of 2 with NN
                    scale = 2 ** (i / 3 + 1)
                    layer_x = ops.upsample_nn(layer_x, [scale * 8, scale * 8],
                                              scope='h%d_upsample' % i, reuse=reuse)
                    # Skip connection
                    append = ops.upsample_nn(h0, [scale * 8, scale * 8],
                                              scope='h%d_skipup' % i, reuse=reuse)
                    layer_x = tf.concat([layer_x, append], axis=3)

        last_h = ops.conv2d(opts, layer_x, output_shape[-1], d_h=1, d_w=1, scope='hlast_conv')

        if opts['input_normalize_sym']:
            return tf.nn.tanh(last_h)
        else:
            return tf.nn.sigmoid(last_h)
项目:adagan    作者:tolstikhin    | 项目源码 | 文件源码
def dcgan_encoder(self, opts, input_, is_training=False, reuse=False, keep_prob=1.):
        num_units = opts['e_num_filters']
        num_layers = opts['e_num_layers']
        layer_x = input_
        for i in xrange(num_layers):
            scale = 2**(num_layers-i-1)
            layer_x = ops.conv2d(opts, layer_x, num_units / scale, scope='h%d_conv' % i)

            if opts['batch_norm']:
                layer_x = ops.batch_norm(opts, layer_x, is_training, reuse, scope='bn%d' % i)
            layer_x = tf.nn.relu(layer_x)
            if opts['dropout']:
                _keep_prob = tf.minimum(
                    1., 0.9 - (0.9 - keep_prob) * float(i + 1) / num_layers)
                layer_x = tf.nn.dropout(layer_x, _keep_prob)

            if opts['e_3x3_conv'] > 0:
                before = layer_x
                for j in range(opts['e_3x3_conv']):
                    layer_x = ops.conv2d(opts, layer_x, num_units / scale, d_h=1, d_w=1,
                                         scope='conv2d_3x3_%d_%d' % (i, j),
                                         conv_filters_dim=3)
                    layer_x = tf.nn.relu(layer_x)
                layer_x += before  # Residual connection.

        if opts['e_is_random']:
            latent_mean = ops.linear(
                opts, layer_x, opts['latent_space_dim'], scope='hlast_lin')
            log_latent_sigmas = ops.linear(
                opts, layer_x, opts['latent_space_dim'], scope='hlast_lin_sigma')
            return latent_mean, log_latent_sigmas
        else:
            return ops.linear(opts, layer_x, opts['latent_space_dim'], scope='hlast_lin')
项目:adagan    作者:tolstikhin    | 项目源码 | 文件源码
def began_encoder(self, opts, input_, is_training=False, reuse=False, keep_prob=1.):
        num_units = opts['e_num_filters']
        assert num_units == opts['g_num_filters'], 'BEGAN requires same number of filters in encoder and decoder'
        num_layers = opts['e_num_layers']
        layer_x = ops.conv2d(opts, input_, num_units, scope='h_first_conv')
        for i in xrange(num_layers):
            if i % 3 < 2:
                if i != num_layers - 2:
                    ii = i - (i / 3)
                    scale = (ii + 1 - ii / 2)
                else:
                    ii = i - (i / 3)
                    scale = (ii - (ii - 1) / 2)
                layer_x = ops.conv2d(opts, layer_x, num_units * scale, d_h=1, d_w=1, scope='h%d_conv' % i)
                layer_x = tf.nn.elu(layer_x)
            else:
                if i != num_layers - 1:
                    layer_x = ops.downsample(layer_x, scope='h%d_maxpool' % i, reuse=reuse)
        # Tensor should be [N, 8, 8, filters] right now

        if opts['e_is_random']:
            latent_mean = ops.linear(
                opts, layer_x, opts['latent_space_dim'], scope='hlast_lin')
            log_latent_sigmas = ops.linear(
                opts, layer_x, opts['latent_space_dim'], scope='hlast_lin_sigma')
            return latent_mean, log_latent_sigmas
        else:
            return ops.linear(opts, layer_x, opts['latent_space_dim'], scope='hlast_lin')
项目:vgg16.tf    作者:bgshih    | 项目源码 | 文件源码
def _vgg_conv_relu(self, x, n_in, n_out, scope):
    with tf.variable_scope(scope):
      conv = ops.conv2d(x, n_in, n_out, 3, 1, p='SAME')
      relu = tf.nn.relu(conv)
    return relu
项目:Conditional-Gans    作者:zhangqianhui    | 项目源码 | 文件源码
def dis_net(self, images, y, reuse=False):

        with tf.variable_scope("discriminator") as scope:

            if reuse == True:
                scope.reuse_variables()

            # mnist data's shape is (28 , 28 , 1)
            yb = tf.reshape(y, shape=[self.batch_size, 1, 1, self.y_dim])
            # concat
            concat_data = conv_cond_concat(images, yb)

            conv1, w1 = conv2d(concat_data, output_dim=10, name='dis_conv1')
            tf.add_to_collection('weight_1', w1)

            conv1 = lrelu(conv1)
            conv1 = conv_cond_concat(conv1, yb)
            tf.add_to_collection('ac_1', conv1)


            conv2, w2 = conv2d(conv1, output_dim=64, name='dis_conv2')
            tf.add_to_collection('weight_2', w2)

            conv2 = lrelu(batch_normal(conv2, scope='dis_bn1'))
            tf.add_to_collection('ac_2', conv2)

            conv2 = tf.reshape(conv2, [self.batch_size, -1])
            conv2 = tf.concat([conv2, y], 1)

            f1 = lrelu(batch_normal(fully_connect(conv2, output_size=1024, scope='dis_fully1'), scope='dis_bn2', reuse=reuse))
            f1 = tf.concat([f1, y], 1)

            out = fully_connect(f1, output_size=1, scope='dis_fully2')

            return tf.nn.sigmoid(out), out
项目:vae-gan-tensorflow    作者:zhangqianhui    | 项目源码 | 文件源码
def Encode(self, x):

        with tf.variable_scope('encode') as scope:

            conv1 = tf.nn.relu(batch_normal(conv2d(x, output_dim=64, name='e_c1'), scope='e_bn1'))
            conv2 = tf.nn.relu(batch_normal(conv2d(conv1, output_dim=128, name='e_c2'), scope='e_bn2'))
            conv3 = tf.nn.relu(batch_normal(conv2d(conv2 , output_dim=256, name='e_c3'), scope='e_bn3'))
            conv3 = tf.reshape(conv3, [self.batch_size, 256 * 8 * 8])
            fc1 = tf.nn.relu(batch_normal(fully_connect(conv3, output_size=1024, scope='e_f1'), scope='e_bn4'))
            z_mean = fully_connect(fc1 , output_size=128, scope='e_f2')
            z_sigma = fully_connect(fc1, output_size=128, scope='e_f3')

            return z_mean, z_sigma
项目:csgm    作者:AshishBora    | 项目源码 | 文件源码
def discriminator(hparams, x, scope_name, train, reuse):

    with tf.variable_scope(scope_name) as scope:
        if reuse:
            scope.reuse_variables()

        d_bn1 = ops.batch_norm(name='d_bn1')
        d_bn2 = ops.batch_norm(name='d_bn2')
        d_bn3 = ops.batch_norm(name='d_bn3')

        h0 = ops.lrelu(ops.conv2d(x, hparams.df_dim, name='d_h0_conv'))

        h1 = ops.conv2d(h0, hparams.df_dim*2, name='d_h1_conv')
        h1 = ops.lrelu(d_bn1(h1, train=train))

        h2 = ops.conv2d(h1, hparams.df_dim*4, name='d_h2_conv')
        h2 = ops.lrelu(d_bn2(h2, train=train))

        h3 = ops.conv2d(h2, hparams.df_dim*8, name='d_h3_conv')
        h3 = ops.lrelu(d_bn3(h3, train=train))

        h4 = ops.linear(tf.reshape(h3, [hparams.batch_size, -1]), 1, 'd_h3_lin')

        d_logit = h4
        d = tf.nn.sigmoid(d_logit)

    return d, d_logit
项目:csgm    作者:AshishBora    | 项目源码 | 文件源码
def discriminator(hparams, x, train, reuse):

    if reuse:
        tf.get_variable_scope().reuse_variables()

    d_bn1 = ops.batch_norm(name='d_bn1')
    d_bn2 = ops.batch_norm(name='d_bn2')
    d_bn3 = ops.batch_norm(name='d_bn3')

    h0 = ops.lrelu(ops.conv2d(x, hparams.df_dim, name='d_h0_conv'))

    h1 = ops.conv2d(h0, hparams.df_dim*2, name='d_h1_conv')
    h1 = ops.lrelu(d_bn1(h1, train=train))

    h2 = ops.conv2d(h1, hparams.df_dim*4, name='d_h2_conv')
    h2 = ops.lrelu(d_bn2(h2, train=train))

    h3 = ops.conv2d(h2, hparams.df_dim*8, name='d_h3_conv')
    h3 = ops.lrelu(d_bn3(h3, train=train))

    h4 = ops.linear(tf.reshape(h3, [hparams.batch_size, -1]), 1, 'd_h3_lin')

    d_logit = h4
    d = tf.nn.sigmoid(d_logit)

    return d, d_logit
项目:nn_q_learning_tensorflow    作者:EndingCredits    | 项目源码 | 文件源码
def cnn(self, state, input_dims, num_actions):
        w = {}
        initializer = tf.truncated_normal_initializer(0, 0.02)
        activation_fn = tf.nn.relu

        state = tf.transpose(state, perm=[0, 2, 3, 1])

        l1, w['l1_w'], w['l1_b'] = conv2d(state,
          32, [8, 8], [4, 4], initializer, activation_fn, 'NHWC', name='l1')
        l2, w['l2_w'], w['l2_b'] = conv2d(l1,
          64, [4, 4], [2, 2], initializer, activation_fn, 'NHWC', name='l2')

        shape = l2.get_shape().as_list()
        l2_flat = tf.reshape(l2, [-1, reduce(lambda x, y: x * y, shape[1:])])

        l3, w['l3_w'], w['l3_b'] = linear(l2_flat, 256, activation_fn=activation_fn, name='value_hid')


        value, w['val_w_out'], w['val_w_b'] = linear(l3, 1, name='value_out')
        V = tf.reshape(value, [-1])

        pi_, w['pi_w_out'], w['pi_w_b'] = \
            linear(l3, num_actions, activation_fn=tf.nn.softmax, name='pi_out')

        sums = tf.tile(tf.expand_dims(tf.reduce_sum(pi_, 1), 1), [1, num_actions])
        pi = pi_ / sums

        #A3C is l1 = (16, [8,8], [4,4], ReLu), l2 = (32, [4,4], [2,2], ReLu), l3 = (256, Conn, ReLu), V = (1, Conn, Lin), pi = (#act, Conn, Softmax)
        return pi, V, [ v for v in w.values() ]




# Adapted from github.com/devsisters/DQN-tensorflow/
项目:CausalGAN    作者:mkocaoglu    | 项目源码 | 文件源码
def DiscriminatorCNN(image, config, reuse=None):
    '''
    Discriminator for GAN model.

    image      : batch_size x 64x64x3 image
    config     : see causal_dcgan/config.py
    reuse      : pass True if not calling for first time

    returns: probabilities(real)
           : logits(real)
           : first layer activation used to estimate z from
           : variables list
    '''
    with tf.variable_scope("discriminator",reuse=reuse) as vs:
        d_bn1 = batch_norm(name='d_bn1')
        d_bn2 = batch_norm(name='d_bn2')
        d_bn3 = batch_norm(name='d_bn3')

        if not config.stab_proj:
            h0 = lrelu(conv2d(image, config.df_dim, name='d_h0_conv'))#16,32,32,64

        else:#method to restrict disc from winning
            #I think this is equivalent to just not letting disc optimize first layer
            #and also removing nonlinearity

            #k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
            #paper used 8x8 kernel, but I'm using 5x5 because it is more similar to my achitecture
            #n_projs=config.df_dim#64 instead of 32 in paper
            n_projs=config.n_stab_proj#64 instead of 32 in paper

            print("WARNING:STAB_PROJ active, using ",n_projs," projections")

            w_proj = tf.get_variable('w_proj', [5, 5, image.get_shape()[-1],n_projs],
                initializer=tf.truncated_normal_initializer(stddev=0.02),trainable=False)
            conv = tf.nn.conv2d(image, w_proj, strides=[1, 2, 2, 1], padding='SAME')

            b_proj = tf.get_variable('b_proj', [n_projs],#does nothing
                 initializer=tf.constant_initializer(0.0),trainable=False)
            h0=tf.nn.bias_add(conv,b_proj)


        h1_ = lrelu(d_bn1(conv2d(h0, config.df_dim*2, name='d_h1_conv')))#16,16,16,128

        h1 = add_minibatch_features(h1_, config.df_dim)
        h2 = lrelu(d_bn2(conv2d(h1, config.df_dim*4, name='d_h2_conv')))#16,16,16,248
        h3 = lrelu(d_bn3(conv2d(h2, config.df_dim*8, name='d_h3_conv')))
        #print('h3shape: ',h3.get_shape().as_list())
        #print('8df_dim:',config.df_dim*8)
        #dim3=tf.reduce_prod(tf.shape(h3)[1:])
        dim3=np.prod(h3.get_shape().as_list()[1:])
        h3_flat=tf.reshape(h3, [-1,dim3])
        h4 = linear(h3_flat, 1, 'd_h3_lin')

        prob=tf.nn.sigmoid(h4)

        variables = tf.contrib.framework.get_variables(vs,collection=tf.GraphKeys.TRAINABLE_VARIABLES)

    return prob, h4, h1_, variables
项目:tf-sr-zoo    作者:MLJejuCamp2017    | 项目源码 | 文件源码
def create_generator(hr_image_bilinear, num_channels, cfg):
    layers = []
    print(hr_image_bilinear.get_shape())
    conv = slim.conv2d(hr_image_bilinear, cfg.ngf, [3,3], stride = 2, scope = 'encoder0')
    layers.append(conv)

    layers_specs = [
        cfg.ngf*2, 
        cfg.ngf*4,
        cfg.ngf*8,
        cfg.ngf*8,
        cfg.ngf*8,
        cfg.ngf*8,
    ]
    for idx, out_channels in enumerate(layers_specs):
        with slim.arg_scope([slim.conv2d], activation_fn = lrelu, stride = 2, padding = 'VALID'):
            conv = conv2d(layers[-1], out_channels, scope = 'encoder%d'%(idx+1))
            print(conv.get_shape())
            layers.append(conv)
    ### decoder part

    layers_specs = [
        (cfg.ngf*8, 0.5),
        (cfg.ngf*8, 0.5),
        (cfg.ngf*8, 0.0),
        (cfg.ngf*4, 0.0),
        (cfg.ngf*2, 0.0),
        (cfg.ngf, 0.0)
    ]
    num_encoder_layers = len(layers)

    for decoder_layer_idx, (out_channels, dropout) in enumerate(layers_specs):
        skip_layer = num_encoder_layers - decoder_layer_idx - 1
        with slim.arg_scope([slim.conv2d], activation_fn = lrelu):
            if decoder_layer_idx == 0:
                input = layers[-1]
            else:
                input = tf.concat([layers[-1], layers[skip_layer]], axis = 3)
            output = upsample_layer(input, out_channels, mode = 'deconv')
            print(output.get_shape())
            if dropout > 0.0:
                output = tf.nn.dropout(output, keep_prob = 1 - dropout)
            layers.append(output)
    input = tf.concat([layers[-1],layers[0]], axis = 3)
    output = slim.conv2d_transpose(input, num_channels, [4,4], stride = 2, activation_fn = tf.tanh)
    return output
项目:CGAN    作者:theflashsean1    | 项目源码 | 文件源码
def __call__(self, input_, y):
        batch_size, y_dim = y.get_shape().as_list()
        batch_size_, height, width, c_dim = input_.get_shape().as_list()
        assert batch_size == batch_size_
        assert (self._input_size == width) and (self._input_size == height)
        h0_size = int(self._input_size / 2)
        h1_size = int(self._input_size / 4)

        with tf.variable_scope(self._name):
            yb = tf.reshape(y, shape=[-1, 1, 1, y_dim])
            # dim(x) = (100, 28, 28, 11)
            x = tf.concat([input_, yb*tf.ones([batch_size, self._input_size, self._input_size, y_dim])], axis=3)
            h0 = ops.leaky_relu(
                ops.conv2d(x, c_dim + y_dim, reuse=self._reuse, name='d_conv0'),
                slope=0.2
            )
            h0 = tf.concat([h0, yb*tf.ones([batch_size, h0_size, h0_size, y_dim])], axis=3)  # (100, 14, 14, 21)

            h1 = ops.leaky_relu(
                ops.batch_norm(
                    ops.conv2d(h0, c_dim + self._ndf, reuse=self._reuse, name='d_conv1'),
                    is_training=self._is_training,
                    reuse=self._reuse,
                    name_scope='d_bn1'
                ),
                slope=0.2
            )
            h1 = tf.reshape(h1, [batch_size, h1_size*h1_size*(c_dim+self._ndf)])
            h1 = tf.concat([h1, y], axis=1)  # (100, 28*28*(1+64)+10)

            h2 = ops.leaky_relu(
                ops.batch_norm(
                    ops.fc(h1, self._fc_dim, reuse=self._reuse, name='d_fc2'),
                    is_training=self._is_training,
                    reuse=self._reuse,
                    name_scope='d_bn2'
                ),
                slope=0.2
            )
            h2 = tf.concat([h2, y], axis=1)  # (100, 794)
            # h3 = tf.nn.sigmoid(
            h3 = ops.fc(h2, 1, reuse=self._reuse, name='d_fc3')
            # )
        self._reuse = True
        return h3  # (100, 1)
项目:adagan    作者:tolstikhin    | 项目源码 | 文件源码
def vgg_16(inputs,
           is_training=False,
           dropout_keep_prob=0.5,
           scope='vgg_16',
           fc_conv_padding='VALID', reuse=None):
    inputs = inputs * 255.0
    inputs -= tf.constant([123.68, 116.779, 103.939], dtype=tf.float32)
    with tf.variable_scope(scope, 'vgg_16', [inputs], reuse=reuse) as sc:
      end_points_collection = sc.name + '_end_points'
      end_points = {}
      # Collect outputs for conv2d, fully_connected and max_pool2d.
      with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
                          outputs_collections=end_points_collection):
        end_points['pool0'] = inputs
        net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')
        net = slim.max_pool2d(net, [2, 2], scope='pool1')
        end_points['pool1'] = net
        net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
        net = slim.max_pool2d(net, [2, 2], scope='pool2')
        end_points['pool2'] = net
        net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')
        net = slim.max_pool2d(net, [2, 2], scope='pool3')
        end_points['pool3'] = net
        net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4')
        net = slim.max_pool2d(net, [2, 2], scope='pool4')
        end_points['pool4'] = net
        net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5')
        net = slim.max_pool2d(net, [2, 2], scope='pool5')
        end_points['pool5'] = net
  #       # Use conv2d instead of fully_connected layers.
  #       net = slim.conv2d(net, 4096, [7, 7], padding=fc_conv_padding, scope='fc6')
  #       net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
  #                          scope='dropout6')
  #       net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
  #       net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
  #                          scope='dropout7')
  #       net = slim.conv2d(net, num_classes, [1, 1],
  #                         activation_fn=None,
  #                         normalizer_fn=None,
  #                         scope='fc8')
        # Convert end_points_collection into a end_point dict.
  #       end_points = slim.utils.convert_collection_to_dict(end_points_collection)
        return net, end_points
项目:adagan    作者:tolstikhin    | 项目源码 | 文件源码
def ali_deconv(self, opts, noise, is_training, reuse, keep_prob):
        output_shape = self._data.data_shape

        batch_size = tf.shape(noise)[0]
        noise_size = int(noise.get_shape()[1])
        data_height = output_shape[0]
        data_width = output_shape[1]
        data_channels = output_shape[2]

        noise = tf.reshape(noise, [-1, 1, 1, noise_size])

        num_units = opts['g_num_filters']
        layer_params = []
        layer_params.append([4, 1, num_units])
        layer_params.append([4, 2, num_units / 2])
        layer_params.append([4, 1, num_units / 4])
        layer_params.append([4, 2, num_units / 8])
        layer_params.append([5, 1, num_units / 8])
        # For convolution: (n - k) / stride + 1 = s
        # For transposed: (s - 1) * stride + k = n
        layer_x = noise
        height = 1
        width = 1
        for i, (kernel, stride, channels) in enumerate(layer_params):
            height = (height - 1) * stride + kernel
            width = height
            layer_x = ops.deconv2d(
                opts, layer_x, [batch_size, height, width, channels], d_h=stride, d_w=stride,
                scope='h%d_deconv' % i, conv_filters_dim=kernel, padding='VALID')
            if opts['batch_norm']:
                layer_x = ops.batch_norm(opts, layer_x, is_training, reuse, scope='bn%d' % i)
            layer_x = ops.lrelu(layer_x, 0.1)
        assert height == data_height
        assert width == data_width

        # Then two 1x1 convolutions.
        layer_x = ops.conv2d(opts, layer_x, num_units / 8, d_h=1, d_w=1, scope='conv2d_1x1', conv_filters_dim=1)
        if opts['batch_norm']:
            layer_x = ops.batch_norm(opts, layer_x, is_training, reuse, scope='bnlast')
        layer_x = ops.lrelu(layer_x, 0.1)
        layer_x = ops.conv2d(opts, layer_x, data_channels, d_h=1, d_w=1, scope='conv2d_1x1_2', conv_filters_dim=1)

        if opts['input_normalize_sym']:
            return tf.nn.tanh(layer_x)
        else:
            return tf.nn.sigmoid(layer_x)
项目:adagan    作者:tolstikhin    | 项目源码 | 文件源码
def ali_encoder(self, opts, input_, is_training=False, reuse=False, keep_prob=1.):
        num_units = opts['e_num_filters']
        layer_params = []
        layer_params.append([5, 1, num_units / 8])
        layer_params.append([4, 2, num_units / 4])
        layer_params.append([4, 1, num_units / 2])
        layer_params.append([4, 2, num_units])
        layer_params.append([4, 1, num_units * 2])
        # For convolution: (n - k) / stride + 1 = s
        # For transposed: (s - 1) * stride + k = n
        layer_x = input_
        height = int(layer_x.get_shape()[1])
        width = int(layer_x.get_shape()[2])
        assert height == width
        for i, (kernel, stride, channels) in enumerate(layer_params):
            height = (height - kernel) / stride + 1
            width = height
            # print((height, width))
            layer_x = ops.conv2d(
                opts, layer_x, channels, d_h=stride, d_w=stride,
                scope='h%d_conv' % i, conv_filters_dim=kernel, padding='VALID')
            if opts['batch_norm']:
                layer_x = ops.batch_norm(opts, layer_x, is_training, reuse, scope='bn%d' % i)
            layer_x = ops.lrelu(layer_x, 0.1)
        assert height == 1
        assert width == 1

        # Then two 1x1 convolutions.
        layer_x = ops.conv2d(opts, layer_x, num_units * 2, d_h=1, d_w=1, scope='conv2d_1x1', conv_filters_dim=1)
        if opts['batch_norm']:
            layer_x = ops.batch_norm(opts, layer_x, is_training, reuse, scope='bnlast')
        layer_x = ops.lrelu(layer_x, 0.1)
        layer_x = ops.conv2d(opts, layer_x, num_units / 2, d_h=1, d_w=1, scope='conv2d_1x1_2', conv_filters_dim=1)

        if opts['e_is_random']:
            latent_mean = ops.linear(
                opts, layer_x, opts['latent_space_dim'], scope='hlast_lin')
            log_latent_sigmas = ops.linear(
                opts, layer_x, opts['latent_space_dim'], scope='hlast_lin_sigma')
            return latent_mean, log_latent_sigmas
        else:
            return ops.linear(opts, layer_x, opts['latent_space_dim'], scope='hlast_lin')
项目:adagan    作者:tolstikhin    | 项目源码 | 文件源码
def _recon_loss_using_disc_conv_eb(self, opts, reconstructed_training, real_points, is_training, keep_prob):
        """Build an additional loss using a discriminator in X space, using Energy Based approach."""
        def copy3D(height, width, channels):
            m = np.zeros([height, width, channels, height, width, channels])
            for i in xrange(height):
                for j in xrange(width):
                    for c in xrange(channels):
                        m[i, j, c, i, j, c] = 1.0
            return tf.constant(np.reshape(m, [height, width, channels, -1]), dtype=tf.float32)

        def _architecture(inputs, reuse=None):
            dim = opts['adv_c_patches_size']
            height = int(inputs.get_shape()[1])
            width = int(inputs.get_shape()[2])
            channels = int(inputs.get_shape()[3])
            with tf.variable_scope('DISC_X_LOSS', reuse=reuse):
                num_units = opts['adv_c_num_units']
                num_layers = 1
                layer_x = inputs
                for i in xrange(num_layers):
#                     scale = 2**(num_layers-i-1)
                    layer_x = ops.conv2d(opts, layer_x, num_units, d_h=1, d_w=1, scope='h%d_conv' % i,
                                         conv_filters_dim=dim, padding='SAME')
#                     if opts['batch_norm']:
#                         layer_x = ops.batch_norm(opts, layer_x, is_training, reuse, scope='bn%d' % i)
                    layer_x = ops.lrelu(layer_x, 0.1)  #tf.nn.relu(layer_x)

                copy_w = copy3D(dim, dim, channels)
                duplicated = tf.nn.conv2d(inputs, copy_w, strides=[1, 1, 1, 1], padding='SAME')
                decoded = ops.conv2d(
                    opts, layer_x, channels * dim * dim, d_h=1, d_w=1, scope="decoder",
                    conv_filters_dim=1, padding='SAME')
            reconstruction = tf.reduce_mean(tf.square(tf.stop_gradient(duplicated) - decoded), [1, 2, 3])
            assert len(reconstruction.get_shape()) == 1
            return flatten(layer_x), reconstruction


        reconstructed_embed_sg, adv_fake_layer = _architecture(tf.stop_gradient(reconstructed_training), reuse=None)
        reconstructed_embed, _ = _architecture(reconstructed_training, reuse=True)
        # Below line enforces the forward to be reconstructed_embed and backwards to NOT change the discriminator....
        crazy_hack = reconstructed_embed-reconstructed_embed_sg+tf.stop_gradient(reconstructed_embed_sg)
        real_p_embed_sg, adv_true_layer = _architecture(tf.stop_gradient(real_points), reuse=True)
        real_p_embed, _ = _architecture(real_points, reuse=True)

        adv_fake = tf.reduce_mean(adv_fake_layer)
        adv_true = tf.reduce_mean(adv_true_layer)

        adv_c_loss = tf.log(adv_true) - tf.log(adv_fake)
        emb_c = tf.reduce_sum(tf.square(crazy_hack - tf.stop_gradient(real_p_embed)), 1)
        emb_c_loss = tf.reduce_mean(emb_c)

        return adv_c_loss, emb_c_loss
项目:Relation-Network-Tensorflow    作者:gitlimlab    | 项目源码 | 文件源码
def build(self, is_train=True):

        n = self.a_dim
        conv_info = self.conv_info

        # build loss and accuracy {{{
        def build_loss(logits, labels):
            # Cross-entropy loss
            loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels)

            # Classification accuracy
            correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
            accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
            return tf.reduce_mean(loss), accuracy
        # }}}

        # Classifier: takes images as input and outputs class label [B, m]
        def C(img, q, scope='Classifier'):
            with tf.variable_scope(scope) as scope:
                log.warn(scope.name)
                conv_1 = conv2d(img, conv_info[0], is_train, s_h=3, s_w=3, name='conv_1')
                conv_2 = conv2d(conv_1, conv_info[1], is_train, s_h=3, s_w=3, name='conv_2')
                conv_3 = conv2d(conv_2, conv_info[2], is_train, name='conv_3')
                conv_4 = conv2d(conv_3, conv_info[3], is_train, name='conv_4')
                conv_q = tf.concat([tf.reshape(conv_4, [self.batch_size, -1]), q], axis=1)
                fc_1 = fc(conv_q, 256, name='fc_1')
                fc_2 = fc(fc_1, 256, name='fc_2')
                fc_2 = slim.dropout(fc_2, keep_prob=0.5, is_training=is_train, scope='fc_3/')
                fc_3 = fc(fc_2, n, activation_fn=None, name='fc_3')
                return fc_3

        logits = C(self.img, self.q, scope='Classifier')
        self.all_preds = tf.nn.softmax(logits)
        self.loss, self.accuracy = build_loss(logits, self.a)

        # Add summaries
        def draw_iqa(img, q, target_a, pred_a):
            fig, ax = tfplot.subplots(figsize=(6, 6))
            ax.imshow(img)
            ax.set_title(question2str(q))
            ax.set_xlabel(answer2str(target_a)+answer2str(pred_a, 'Predicted'))
            return fig

        try:
            tfplot.summary.plot_many('IQA/',
                                     draw_iqa, [self.img, self.q, self.a, self.all_preds],
                                     max_outputs=3,
                                     collections=["plot_summaries"])
        except:
            pass

        tf.summary.scalar("loss/accuracy", self.accuracy)
        tf.summary.scalar("loss/cross_entropy", self.loss)
        log.warn('Successfully loaded the model.')