Python tensorflow 模块,image_summary() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.image_summary()

项目:neuro-stereo    作者:lugu    | 项目源码 | 文件源码
def conv_max_pool_2x2(x, conv_width, conv_height, in_depth, out_depth, name="conv"):

    with tf.name_scope(name) as scope:
        W_conv = weight_variable([conv_width, conv_height, in_depth, out_depth])
        b_conv = bias_variable([out_depth])
        h_conv = tf.nn.relu(conv2d(x, W_conv) + b_conv)
        h_pool = max_pool_2x2(h_conv)

    with tf.name_scope("summaries") as scope:

        # TIPS: to display the 32 convolution filters, re-arrange the
        # weigths to look like 32 images with a transposition.
        a = tf.reshape(W_conv, [conv_width * conv_height * in_depth, out_depth])
        b = tf.transpose(a)
        c = tf.reshape(b, [out_depth, conv_width, conv_height * in_depth, 1])
        conv_image = tf.image_summary(name + " filter", c, out_depth)

        # TIPS: by looking at the weights histogram, we can see the the
        # weigths are explosing or vanishing.
        W_conv_hist = tf.histogram_summary(name + " weights", W_conv)
        b_conv_hist = tf.histogram_summary(name + " biases", b_conv)

    return h_pool
项目:easy-tensorflow    作者:khanhptnk    | 项目源码 | 文件源码
def run(self):
    """Run evaluation."""
    # Create logging directory if not exists.
    if not os.path.isdir(self._eval_log_dir):
      os.makedirs(self._eval_log_dir)

    # Compute loss function and other evaluating metrics.
    self._initialize()

    # Visualize input images in Tensorboard.
    self._summary_ops.append(tf.image_summary("Eval_Image", self._observations, max_images=5))

    # Use `slim.evaluation.evaluation_loop` to evaluate the model periodically.
    slim.evaluation.evaluation_loop(
        master='',
        checkpoint_dir=self._train_log_dir,
        logdir=self._eval_log_dir,
        num_evals=self._config.num_batches,
        eval_op=self._metrics_to_updates.values(),
        summary_op=tf.merge_summary(self._summary_ops),
        eval_interval_secs=self._config.eval_interval_secs)
项目:easy-tensorflow    作者:khanhptnk    | 项目源码 | 文件源码
def run(self):
    """Run training."""
    # Create logging directory if not exists.
    if not os.path.isdir(self._train_log_dir):
      os.makedirs(self._train_log_dir)

    # Load data and compute loss function
    self._initialize()

    # Visualize input images in Tensorboard.
    self._summary_ops.append(tf.image_summary("Image_Train", self._observations, max_images=5))

    # Initialize optimizer.
    optimizer = tf.train.AdadeltaOptimizer(self._config.learning_rate)
    train_op = slim.learning.create_train_op(self._loss, optimizer)

    # Use `slim.learning.train` to manage training.
    slim.learning.train(train_op=train_op,
                        logdir=self._train_log_dir,
                        graph=self._graph,
                        number_of_steps=self._config.train_steps,
                        summary_op=tf.merge_summary(self._summary_ops),
                        save_summaries_secs=self._config.save_summaries_secs,
                        save_interval_secs=self._config.save_interval_secs)
项目:how_to_convert_text_to_images    作者:llSourcell    | 项目源码 | 文件源码
def visualization(self, n):
        fake_sum_train, superimage_train =\
            self.visualize_one_superimage(self.fake_images[:n * n],
                                          self.images[:n * n],
                                          n, "train")
        fake_sum_test, superimage_test =\
            self.visualize_one_superimage(self.fake_images[n * n:2 * n * n],
                                          self.images[n * n:2 * n * n],
                                          n, "test")
        self.superimages = tf.concat(0, [superimage_train, superimage_test])
        self.image_summary = tf.merge_summary([fake_sum_train, fake_sum_test])

        hr_fake_sum_train, hr_superimage_train =\
            self.visualize_one_superimage(self.hr_fake_images[:n * n],
                                          self.hr_images[:n * n, :, :, :],
                                          n, "hr_train")
        hr_fake_sum_test, hr_superimage_test =\
            self.visualize_one_superimage(self.hr_fake_images[n * n:2 * n * n],
                                          self.hr_images[n * n:2 * n * n],
                                          n, "hr_test")
        self.hr_superimages =\
            tf.concat(0, [hr_superimage_train, hr_superimage_test])
        self.hr_image_summary =\
            tf.merge_summary([hr_fake_sum_train, hr_fake_sum_test])
项目:Supply-demand-forecasting    作者:LevinJ    | 项目源码 | 文件源码
def get_input(self):
        # Input data.
        # Load the training, validation and test data into constants that are
        # attached to the graph.
        self.mnist = input_data.read_data_sets('data',
                                    one_hot=True,
                                    fake_data=False)
        # Input placehoolders
        with tf.name_scope('input'):
            self.x = tf.placeholder(tf.float32, [None, 784], name='x-input')
            self.y_true = tf.placeholder(tf.float32, [None, 10], name='y-input')
        self.keep_prob = tf.placeholder(tf.float32, name='drop_out')
        # below is just for the sake of visualization
        with tf.name_scope('input_reshape'):
            image_shaped_input = tf.reshape(self.x, [-1, 28, 28, 1])
            tf.image_summary('input', image_shaped_input, 10)

        return
项目:Magic-Pixel    作者:zhwhong    | 项目源码 | 文件源码
def build_model(self):
        self.inputs = tf.placeholder(tf.float32, [self.batch_size, self.input_size, self.input_size, 3], name='real_images')
        # self.inputs = tf.placeholder(tf.float32, [None, self.input_size, self.input_size, 3], name='real_images')

        try:
            self.up_inputs = tf.image.resize_images(self.inputs, self.image_shape[0], self.image_shape[1], tf.image.ResizeMethod.NEAREST_NEIGHBOR)
        except ValueError:
            # newer versions of tensorflow
            self.up_inputs = tf.image.resize_images(self.inputs, [self.image_shape[0], self.image_shape[1]], tf.image.ResizeMethod.NEAREST_NEIGHBOR)

        self.images = tf.placeholder(tf.float32, [self.batch_size] + self.image_shape, name='real_images')
        # self.images = tf.placeholder(tf.float32, [None] + self.image_shape, name='real_images')
        self.sample_images= tf.placeholder(tf.float32, [self.sample_size] + self.image_shape, name='sample_images')
        # self.sample_images = tf.placeholder(tf.float32, [None] + self.image_shape, name='sample_images')

        self.G = self.generator(self.inputs)
        self.G_sum = tf.image_summary("G", self.G)
        self.g_loss = tf.reduce_mean(tf.square(self.images-self.G))
        self.g_loss_sum = tf.scalar_summary("g_loss", self.g_loss)
        t_vars = tf.trainable_variables()
        self.g_vars = [var for var in t_vars if 'g_' in var.name]
        self.saver = tf.train.Saver()
项目:StackGAN    作者:hanzhanggit    | 项目源码 | 文件源码
def visualization(self, n):
        fake_sum_train, superimage_train =\
            self.visualize_one_superimage(self.fake_images[:n * n],
                                          self.images[:n * n],
                                          n, "train")
        fake_sum_test, superimage_test =\
            self.visualize_one_superimage(self.fake_images[n * n:2 * n * n],
                                          self.images[n * n:2 * n * n],
                                          n, "test")
        self.superimages = tf.concat(0, [superimage_train, superimage_test])
        self.image_summary = tf.merge_summary([fake_sum_train, fake_sum_test])

        hr_fake_sum_train, hr_superimage_train =\
            self.visualize_one_superimage(self.hr_fake_images[:n * n],
                                          self.hr_images[:n * n, :, :, :],
                                          n, "hr_train")
        hr_fake_sum_test, hr_superimage_test =\
            self.visualize_one_superimage(self.hr_fake_images[n * n:2 * n * n],
                                          self.hr_images[n * n:2 * n * n],
                                          n, "hr_test")
        self.hr_superimages =\
            tf.concat(0, [hr_superimage_train, hr_superimage_test])
        self.hr_image_summary =\
            tf.merge_summary([hr_fake_sum_train, hr_fake_sum_test])
项目:cnn_picture_gazebo    作者:liuyandong1988    | 项目源码 | 文件源码
def get_batch(image, label, batch_size, crop_size):
        #??????
    distorted_image=tf.image.central_crop(image,33./37.)
    distorted_image = tf.random_crop(distorted_image, [crop_size, crop_size, 3])#????,???
# #     distorted_image = tf.image.random_flip_up_down(distorted_image)#??????
#     distorted_image = tf.image.random_brightness(distorted_image,max_delta=50)#????  
#     distorted_image = tf.image.random_contrast(distorted_image,lower=0.2, upper=1.8)#?????  

    #??batch
    #shuffle_batch????capacity????shuttle??????????????????batch???capacity?????
    #?????????
    images, label_batch = tf.train.shuffle_batch([distorted_image, label],batch_size=batch_size,
                                                 num_threads=4,capacity=50000,min_after_dequeue=10000)

    # ????
    #tf.image_summary('images', images)
    return images, tf.reshape(label_batch, [batch_size])

#?????????????get_batch??
项目:ultrasound-nerve-segmentation-in-tensorflow    作者:loliverhennigh    | 项目源码 | 文件源码
def nerve_inputs(batch_size):
  """ Construct nerve input net.
  Args:
    batch_size: Number of images per batch.
  Returns:
    images: Images. 4D tensor. Possible of size [batch_size, 84x84x4].
    mask: Images. 4D tensor. Possible of size [batch_size, 84x84x4].
  """

  shape = (420,580)

  tfrecord_filename = glb('../data/tfrecords/*') 
  print(tfrecord_filename)

  filename_queue = tf.train.string_input_producer(tfrecord_filename) 

  image, mask = read_data(filename_queue, shape)

  images, masks = _generate_image_label_batch(image, mask, batch_size)

  # display in tf summary page 
  tf.image_summary('images', images)
  tf.image_summary('mask', masks)

  return images, masks
项目:neuro-stereo    作者:lugu    | 项目源码 | 文件源码
def conv_max_pool_2x2(x, conv_width, conv_height, in_depth, out_depth, name="conv"):

    with tf.name_scope(name) as scope:
        W_conv = weight_variable([conv_width, conv_height, in_depth, out_depth])
        b_conv = bias_variable([out_depth])
        h_conv = tf.nn.relu(conv2d(x, W_conv) + b_conv)
        h_pool = max_pool_2x2(h_conv)

    with tf.name_scope("summaries") as scope:

        # TIPS: to display the 32 convolution filters, re-arrange the
        # weigths to look like 32 images with a transposition.
        a = tf.reshape(W_conv, [conv_width * conv_height * in_depth, out_depth])
        b = tf.transpose(a)
        c = tf.reshape(b, [out_depth, conv_width, conv_height * in_depth, 1])
        conv_image = tf.image_summary(name + " filter", c, out_depth)

        # TIPS: by looking at the weights histogram, we can see the the
        # weigths are explosing or vanishing.
        W_conv_hist = tf.histogram_summary(name + " weights", W_conv)
        b_conv_hist = tf.histogram_summary(name + " biases", b_conv)

    return h_pool
项目:the-neural-perspective    作者:GokuMohandas    | 项目源码 | 文件源码
def preprocess_for_eval(image, output_height, output_width):
  """Preprocesses the given image for evaluation.

  Args:
    image: A `Tensor` representing an image of arbitrary size.
    output_height: The height of the image after preprocessing.
    output_width: The width of the image after preprocessing.

  Returns:
    A preprocessed image.
  """
  tf.image_summary('image', tf.expand_dims(image, 0))
  # Transform the image to floats.
  image = tf.to_float(image)

  # Resize and crop if needed.
  resized_image = tf.image.resize_image_with_crop_or_pad(image,
                                                         output_width,
                                                         output_height)
  tf.image_summary('resized_image', tf.expand_dims(resized_image, 0))

  # Subtract off the mean and divide by the variance of the pixels.
  return tf.image.per_image_whitening(resized_image)
项目:fast-neural-style    作者:coder-james    | 项目源码 | 文件源码
def preprocess_for_eval(image, output_height, output_width):
  """Preprocesses the given image for evaluation.

  Args:
    image: A `Tensor` representing an image of arbitrary size.
    output_height: The height of the image after preprocessing.
    output_width: The width of the image after preprocessing.

  Returns:
    A preprocessed image.
  """
  tf.image_summary('image', tf.expand_dims(image, 0))
  # Transform the image to floats.
  image = tf.to_float(image)

  # Resize and crop if needed.
  resized_image = tf.image.resize_image_with_crop_or_pad(image,
                                                         output_width,
                                                         output_height)
  tf.image_summary('resized_image', tf.expand_dims(resized_image, 0))

  # Subtract off the mean and divide by the variance of the pixels.
  return tf.image.per_image_whitening(resized_image)
项目:gan-image-similarity    作者:marcbelmont    | 项目源码 | 文件源码
def zap_data(FLAGS, shuffle):
    files = glob(FLAGS.file_pattern)
    filename_queue = tf.train.string_input_producer(
        files,
        shuffle=shuffle,
        num_epochs=None if shuffle else 1)
    image = read_image(filename_queue, shuffle)

    # Mini batch
    num_preprocess_threads = 1 if FLAGS.debug else 4
    min_queue_examples = 100 if FLAGS.debug else 10000
    if shuffle:
        images = tf.train.shuffle_batch(
            image,
            batch_size=FLAGS.batch_size,
            num_threads=num_preprocess_threads,
            capacity=min_queue_examples + 3 * FLAGS.batch_size,
            min_after_dequeue=min_queue_examples)
    else:
        images = tf.train.batch(
            image,
            FLAGS.batch_size,
            allow_smaller_final_batch=True)
    # tf.image_summary('images', images, max_images=8)
    return dict(batch=images, size=len(files))
项目:gan-image-similarity    作者:marcbelmont    | 项目源码 | 文件源码
def generator(z, latent_c):
    depths = [32, 64, 64, 64, 64, 64, 3]
    sizes = zip(
        np.linspace(4, IMAGE_SIZE['resized'][0], len(depths)).astype(np.int),
        np.linspace(6, IMAGE_SIZE['resized'][1], len(depths)).astype(np.int))
    with slim.arg_scope([slim.conv2d_transpose],
                        normalizer_fn=slim.batch_norm,
                        kernel_size=3):
        with tf.variable_scope("gen"):
            size = sizes.pop(0)
            net = tf.concat(1, [z, latent_c])
            net = slim.fully_connected(net, depths[0] * size[0] * size[1])
            net = tf.reshape(net, [-1, size[0], size[1], depths[0]])
            for depth in depths[1:-1] + [None]:
                net = tf.image.resize_images(
                    net, sizes.pop(0),
                    tf.image.ResizeMethod.NEAREST_NEIGHBOR)
                if depth:
                    net = slim.conv2d_transpose(net, depth)
            net = slim.conv2d_transpose(
                net, depths[-1], activation_fn=tf.nn.tanh, stride=1, normalizer_fn=None)
            tf.image_summary("gen", net, max_images=8)
    return net
项目:the-neural-perspective    作者:johnsonc    | 项目源码 | 文件源码
def preprocess_for_eval(image, output_height, output_width):
  """Preprocesses the given image for evaluation.

  Args:
    image: A `Tensor` representing an image of arbitrary size.
    output_height: The height of the image after preprocessing.
    output_width: The width of the image after preprocessing.

  Returns:
    A preprocessed image.
  """
  tf.image_summary('image', tf.expand_dims(image, 0))
  # Transform the image to floats.
  image = tf.to_float(image)

  # Resize and crop if needed.
  resized_image = tf.image.resize_image_with_crop_or_pad(image,
                                                         output_width,
                                                         output_height)
  tf.image_summary('resized_image', tf.expand_dims(resized_image, 0))

  # Subtract off the mean and divide by the variance of the pixels.
  return tf.image.per_image_whitening(resized_image)
项目:tfPhotoClassifier    作者:daiz713    | 项目源码 | 文件源码
def distorted_inputs (tfrecord_file_paths=[]):
    fqueue = tf.train.string_input_producer(tfrecord_file_paths)
    reader = tf.TFRecordReader()
    key, serialized_example = reader.read(fqueue)
    features = tf.parse_single_example(serialized_example, features={
        'label': tf.FixedLenFeature([], tf.int64),
        'image': tf.FixedLenFeature([], tf.string)
    })
    image = tf.image.decode_jpeg(features['image'], channels=size['depth'])
    image = tf.cast(image, tf.float32)
    image.set_shape([size['width'], size['height'], size['depth']])

    min_fraction_of_examples_in_queue = 0.4
    min_queue_examples = int(cifar10.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL * min_fraction_of_examples_in_queue)

    images, labels = tf.train.shuffle_batch(
        [tf.image.per_image_whitening(image), tf.cast(features['label'], tf.int32)],
        batch_size=BATCH_SIZE,
        capacity=min_queue_examples + 3 * BATCH_SIZE,
        min_after_dequeue=min_queue_examples
    )

    images = tf.image.resize_images(images, size['input_width'], size['input_height'])
    tf.image_summary('images', images)
    return images, labels
项目:tfPhotoClassifier    作者:daiz713    | 项目源码 | 文件源码
def distorted_inputs (tfrecord_file_paths=[]):
    fqueue = tf.train.string_input_producer(tfrecord_file_paths)
    reader = tf.TFRecordReader()
    key, serialized_example = reader.read(fqueue)
    features = tf.parse_single_example(serialized_example, features={
        'label': tf.FixedLenFeature([], tf.int64),
        'image': tf.FixedLenFeature([], tf.string)
    })
    image = tf.image.decode_jpeg(features['image'], channels=size['depth'])
    image = tf.cast(image, tf.float32)
    image.set_shape([size['width'], size['height'], size['depth']])

    min_fraction_of_examples_in_queue = 0.4
    min_queue_examples = int(cifar10.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN * min_fraction_of_examples_in_queue)

    images, labels = tf.train.shuffle_batch(
        [tf.image.per_image_whitening(image), tf.cast(features['label'], tf.int32)],
        batch_size=BATCH_SIZE,
        capacity=min_queue_examples + 3 * BATCH_SIZE,
        min_after_dequeue=min_queue_examples
    )

    images = tf.image.resize_images(images, size['input_width'], size['input_height'])
    tf.image_summary('images', images)
    return images, labels
项目:ml    作者:hohoins    | 项目源码 | 文件源码
def _generate_image_and_label_batch(image, label, min_queue_examples,
                                    batch_size):
  """Construct a queued batch of images and labels.

  Args:
    image: 3-D Tensor of [height, width, 3] of type.float32.
    label: 1-D Tensor of type.int32
    min_queue_examples: int32, minimum number of samples to retain
      in the queue that provides of batches of examples.
    batch_size: Number of images per batch.

  Returns:
    images: Images. 4D tensor of [batch_size, height, width, 3] size.
    labels: Labels. 1D tensor of [batch_size] size.
  """
  # Create a queue that shuffles the examples, and then
  # read 'batch_size' images + labels from the example queue.
  num_preprocess_threads = 16
  images, label_batch = tf.train.shuffle_batch(
      [image, label],
      batch_size=batch_size,
      num_threads=num_preprocess_threads,
      capacity=min_queue_examples + 3 * batch_size,
      min_after_dequeue=min_queue_examples)

  # Display the training images in the visualizer.
  # tf.image_summary('images', images)
  tf.summary.image('images', images)

  return images, tf.reshape(label_batch, [batch_size])
项目:ml    作者:hohoins    | 项目源码 | 文件源码
def _generate_image_and_label_batch(image, label, min_queue_examples,
                                    batch_size):
  """Construct a queued batch of images and labels.

  Args:
    image: 3-D Tensor of [height, width, 3] of type.float32.
    label: 1-D Tensor of type.int32
    min_queue_examples: int32, minimum number of samples to retain
      in the queue that provides of batches of examples.
    batch_size: Number of images per batch.

  Returns:
    images: Images. 4D tensor of [batch_size, height, width, 3] size.
    labels: Labels. 1D tensor of [batch_size] size.
  """
  # Create a queue that shuffles the examples, and then
  # read 'batch_size' images + labels from the example queue.
  num_preprocess_threads = 16
  images, label_batch = tf.train.shuffle_batch(
      [image, label],
      batch_size=batch_size,
      num_threads=num_preprocess_threads,
      capacity=min_queue_examples + 3 * batch_size,
      min_after_dequeue=min_queue_examples)

  # Display the training images in the visualizer.
  # tf.image_summary('images', images)
  tf.summary.image('images', images)

  return images, tf.reshape(label_batch, [batch_size])
项目:how_to_convert_text_to_images    作者:llSourcell    | 项目源码 | 文件源码
def visualize_one_superimage(self, img_var, images, rows, filename):
        stacked_img = []
        for row in range(rows):
            img = images[row * rows, :, :, :]
            row_img = [img]  # real image
            for col in range(rows):
                row_img.append(img_var[row * rows + col, :, :, :])
            # each rows is 1realimage +10_fakeimage
            stacked_img.append(tf.concat(1, row_img))
        imgs = tf.expand_dims(tf.concat(0, stacked_img), 0)
        current_img_summary = tf.image_summary(filename, imgs)
        return current_img_summary, imgs
项目:how_to_convert_text_to_images    作者:llSourcell    | 项目源码 | 文件源码
def visualization(self, n):
        fake_sum_train, superimage_train = \
            self.visualize_one_superimage(self.fake_images[:n * n],
                                          self.images[:n * n],
                                          n, "train")
        fake_sum_test, superimage_test = \
            self.visualize_one_superimage(self.fake_images[n * n:2 * n * n],
                                          self.images[n * n:2 * n * n],
                                          n, "test")
        self.superimages = tf.concat(0, [superimage_train, superimage_test])
        self.image_summary = tf.merge_summary([fake_sum_train, fake_sum_test])
项目:how_to_convert_text_to_images    作者:llSourcell    | 项目源码 | 文件源码
def epoch_sum_images(self, sess, n):
        images_train, _, embeddings_train, captions_train, _ =\
            self.dataset.train.next_batch(n * n, cfg.TRAIN.NUM_EMBEDDING)
        images_train = self.preprocess(images_train, n)
        embeddings_train = self.preprocess(embeddings_train, n)

        images_test, _, embeddings_test, captions_test, _ = \
            self.dataset.test.next_batch(n * n, 1)
        images_test = self.preprocess(images_test, n)
        embeddings_test = self.preprocess(embeddings_test, n)

        images = np.concatenate([images_train, images_test], axis=0)
        embeddings =\
            np.concatenate([embeddings_train, embeddings_test], axis=0)

        if self.batch_size > 2 * n * n:
            images_pad, _, embeddings_pad, _, _ =\
                self.dataset.test.next_batch(self.batch_size - 2 * n * n, 1)
            images = np.concatenate([images, images_pad], axis=0)
            embeddings = np.concatenate([embeddings, embeddings_pad], axis=0)
        feed_dict = {self.images: images,
                     self.embeddings: embeddings}
        gen_samples, img_summary =\
            sess.run([self.superimages, self.image_summary], feed_dict)

        # save images generated for train and test captions
        scipy.misc.imsave('%s/train.jpg' % (self.log_dir), gen_samples[0])
        scipy.misc.imsave('%s/test.jpg' % (self.log_dir), gen_samples[1])

        # pfi_train = open(self.log_dir + "/train.txt", "w")
        pfi_test = open(self.log_dir + "/test.txt", "w")
        for row in range(n):
            # pfi_train.write('\n***row %d***\n' % row)
            # pfi_train.write(captions_train[row * n])

            pfi_test.write('\n***row %d***\n' % row)
            pfi_test.write(captions_test[row * n])
        # pfi_train.close()
        pfi_test.close()

        return img_summary
项目:how_to_convert_text_to_images    作者:llSourcell    | 项目源码 | 文件源码
def visualize_one_superimage(self, img_var, images, rows, filename):
        stacked_img = []
        for row in range(rows):
            img = images[row * rows, :, :, :]
            row_img = [img]  # real image
            for col in range(rows):
                row_img.append(img_var[row * rows + col, :, :, :])
            # each rows is 1realimage +10_fakeimage
            stacked_img.append(tf.concat(1, row_img))
        imgs = tf.expand_dims(tf.concat(0, stacked_img), 0)
        current_img_summary = tf.image_summary(filename, imgs)
        return current_img_summary, imgs
项目:agent-trainer    作者:lopespm    | 项目源码 | 文件源码
def _convolutional_layer(self, input, patch_size, stride, input_channels, output_channels, bias_init_value, scope_name):
        with tf.variable_scope(scope_name) as scope:
            weights = tf.get_variable(name='weights',
                                  shape=[patch_size, patch_size, input_channels, output_channels],
                                  initializer=tf.contrib.layers.xavier_initializer_conv2d())
            biases = tf.Variable(name='biases', initial_value=tf.constant(value=bias_init_value, shape=[output_channels]))
            conv = tf.nn.conv2d(input, weights, [1, stride, stride, 1], padding='SAME')

            linear_rectification_bias = tf.nn.bias_add(conv, biases)
            output = tf.nn.relu(linear_rectification_bias, name=scope.name)

            grid_x = output_channels // 4
            grid_y = 4 * input_channels
            kernels_image_grid = self._create_kernels_image_grid(weights, (grid_x, grid_y))
            tf.image_summary(scope_name + '/features', kernels_image_grid, max_images=1)

            if "_conv1" in scope_name:
                x_min = tf.reduce_min(weights)
                x_max = tf.reduce_max(weights)
                weights_0_to_1 = (weights - x_min) / (x_max - x_min)
                weights_0_to_255_uint8 = tf.image.convert_image_dtype(weights_0_to_1, dtype=tf.uint8)

                # to tf.image_summary format [batch_size, height, width, channels]
                weights_transposed = tf.transpose(weights_0_to_255_uint8, [3, 0, 1, 2])

                tf.image_summary(scope_name + '/features', weights_transposed[:,:,:,0:1], max_images=32)

        return output
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def _set_model(self, model):
        import tensorflow as tf
        import keras.backend.tensorflow_backend as KTF

        self.model = model
        self.sess = KTF.get_session()
        if self.histogram_freq and self.merged is None:
            for layer in self.model.layers:

                for weight in layer.weights:
                    tf.histogram_summary(weight.name, weight)

                    if self.write_images:
                        w_img = tf.squeeze(weight)

                        shape = w_img.get_shape()
                        if len(shape) > 1 and shape[0] > shape[1]:
                            w_img = tf.transpose(w_img)

                        if len(shape) == 1:
                            w_img = tf.expand_dims(w_img, 0)

                        w_img = tf.expand_dims(tf.expand_dims(w_img, 0), -1)

                        tf.image_summary(weight.name, w_img)

                if hasattr(layer, 'output'):
                    tf.histogram_summary('{}_out'.format(layer.name),
                                         layer.output)
        self.merged = tf.merge_all_summaries()
        if self.write_graph:
            if parse_version(tf.__version__) >= parse_version('0.8.0'):
                self.writer = tf.train.SummaryWriter(self.log_dir,
                                                     self.sess.graph)
            else:
                self.writer = tf.train.SummaryWriter(self.log_dir,
                                                     self.sess.graph_def)
        else:
            self.writer = tf.train.SummaryWriter(self.log_dir)
项目:facial-emotion-detection-dl    作者:dllatas    | 项目源码 | 文件源码
def _generate_image_and_label_batch(image, label, min_queue_examples,
                                    batch_size, shuffle):
  """Construct a queued batch of images and labels.
  Args:
    image: 3-D Tensor of [height, width, 3] of type.float32.
    label: 1-D Tensor of type.int32
    min_queue_examples: int32, minimum number of samples to retain
      in the queue that provides of batches of examples.
    batch_size: Number of images per batch.
    shuffle: boolean indicating whether to use a shuffling queue.
  Returns:
    images: Images. 4D tensor of [batch_size, height, width, 3] size.
    labels: Labels. 1D tensor of [batch_size] size.
  """
  # Create a queue that shuffles the examples, and then
  # read 'batch_size' images + labels from the example queue.
  num_preprocess_threads = 16
  if shuffle:
    images, label_batch = tf.train.shuffle_batch(
        [image, label],
        batch_size=batch_size,
        num_threads=num_preprocess_threads,
        capacity=min_queue_examples + 3 * batch_size,
        min_after_dequeue=min_queue_examples)
  else:
    images, label_batch = tf.train.batch(
        [image, label],
        batch_size=batch_size,
        num_threads=num_preprocess_threads,
        capacity=min_queue_examples + 3 * batch_size)

  # Display the training images in the visualizer.
  tf.image_summary('images', images, max_images=10)

  return images, tf.reshape(label_batch, [batch_size])
项目:facial-emotion-detection-dl    作者:dllatas    | 项目源码 | 文件源码
def _generate_image_and_label_batch(image, label, min_queue_examples,
                                    batch_size, shuffle):
  """Construct a queued batch of images and labels.
  Args:
    image: 3-D Tensor of [height, width, 3] of type.float32.
    label: 1-D Tensor of type.int32
    min_queue_examples: int32, minimum number of samples to retain
      in the queue that provides of batches of examples.
    batch_size: Number of images per batch.
    shuffle: boolean indicating whether to use a shuffling queue.
  Returns:
    images: Images. 4D tensor of [batch_size, height, width, 3] size.
    labels: Labels. 1D tensor of [batch_size] size.
  """
  # Create a queue that shuffles the examples, and then
  # read 'batch_size' images + labels from the example queue.
  num_preprocess_threads = 16
  if shuffle:
    images, label_batch = tf.train.shuffle_batch(
        [image, label],
        batch_size=batch_size,
        num_threads=num_preprocess_threads,
        capacity=min_queue_examples + 3 * batch_size,
        min_after_dequeue=min_queue_examples)
  else:
    images, label_batch = tf.train.batch(
        [image, label],
        batch_size=batch_size,
        num_threads=num_preprocess_threads,
        capacity=min_queue_examples + 3 * batch_size)

  # Display the training images in the visualizer.
  tf.image_summary('images/train', images, max_images=10)
  return images, tf.reshape(label_batch, [batch_size])
项目:facial-emotion-detection-dl    作者:dllatas    | 项目源码 | 文件源码
def _generate_image_and_label_batch(image, label, min_queue_examples,
                                    batch_size, shuffle):
  """Construct a queued batch of images and labels.
  Args:
    image: 3-D Tensor of [height, width, 3] of type.float32.
    label: 1-D Tensor of type.int32
    min_queue_examples: int32, minimum number of samples to retain
      in the queue that provides of batches of examples.
    batch_size: Number of images per batch.
    shuffle: boolean indicating whether to use a shuffling queue.
  Returns:
    images: Images. 4D tensor of [batch_size, height, width, 3] size.
    labels: Labels. 1D tensor of [batch_size] size.
  """
  # Create a queue that shuffles the examples, and then
  # read 'batch_size' images + labels from the example queue.
  num_preprocess_threads = 16
  if shuffle:
    images, label_batch = tf.train.shuffle_batch(
        [image, label],
        batch_size=batch_size,
        num_threads=num_preprocess_threads,
        capacity=min_queue_examples + 3 * batch_size,
        min_after_dequeue=min_queue_examples)
  else:
    images, label_batch = tf.train.batch(
        [image, label],
        batch_size=batch_size,
        num_threads=num_preprocess_threads,
        capacity=min_queue_examples + 3 * batch_size)

  # Display the training images in the visualizer.
  tf.image_summary('images', images, max_images=10)

  return images, tf.reshape(label_batch, [batch_size])
项目:facial-emotion-detection-dl    作者:dllatas    | 项目源码 | 文件源码
def _generate_image_and_label_batch(image, label, min_queue_examples,
                                    batch_size):
  """Construct a queued batch of images and labels.

  Args:
    image: 3-D Tensor of [height, width, 3] of type.float32.
    label: 1-D Tensor of type.int32
    min_queue_examples: int32, minimum number of samples to retain
      in the queue that provides of batches of examples.
    batch_size: Number of images per batch.

  Returns:
    images: Images. 4D tensor of [batch_size, height, width, 3] size.
    labels: Labels. 1D tensor of [batch_size] size.
  """
  # Create a queue that shuffles the examples, and then
  # read 'batch_size' images + labels from the example queue.
  num_preprocess_threads = 16
  images, label_batch = tf.train.shuffle_batch(
      [image, label],
      batch_size=batch_size,
      num_threads=num_preprocess_threads,
      capacity=min_queue_examples + 3 * batch_size,
      min_after_dequeue=min_queue_examples)

  # Display the training images in the visualizer.
  tf.image_summary('images', images, max_images=10)

  return images, tf.reshape(label_batch, [batch_size])
项目:facial-emotion-detection-dl    作者:dllatas    | 项目源码 | 文件源码
def generate_train_batch(label, image, batch_size=FLAGS.batch_size):
    num_preprocess_threads = 1
    min_fraction_of_examples_in_queue = 0.5
    min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN * min_fraction_of_examples_in_queue)
    images, label_batch = tf.train.shuffle_batch(
        [image, label],
        batch_size=batch_size,
        num_threads=num_preprocess_threads,
        capacity=min_queue_examples + 3 * batch_size,
        # capacity=4,
        min_after_dequeue=min_queue_examples
        # min_after_dequeue=1
        )
    tf.image_summary('images', images)
    return images, tf.reshape(label_batch, [batch_size])
项目:CDBN-for-Tensorflow    作者:shygiants    | 项目源码 | 文件源码
def __image_summary(self, name, image, max_images):
        tf.image_summary('{}/{}'.format(self.name, name), image, max_images=max_images)
项目:dlbench    作者:hclhkbu    | 项目源码 | 文件源码
def _generate_image_and_label_batch(image, label, min_queue_examples,
                                    batch_size, shuffle):
  """Construct a queued batch of images and labels.

  Args:
    image: 3-D Tensor of [height, width, 3] of type.float32.
    label: 1-D Tensor of type.int32
    min_queue_examples: int32, minimum number of samples to retain
      in the queue that provides of batches of examples.
    batch_size: Number of images per batch.
    shuffle: boolean indicating whether to use a shuffling queue.

  Returns:
    images: Images. 4D tensor of [batch_size, height, width, 3] size.
    labels: Labels. 1D tensor of [batch_size] size.
  """
  # Create a queue that shuffles the examples, and then
  # read 'batch_size' images + labels from the example queue.
  num_preprocess_threads = 8 
  if shuffle:
    images, label_batch = tf.train.shuffle_batch(
        [image, label],
        batch_size=batch_size,
        num_threads=num_preprocess_threads,
        capacity=min_queue_examples + 16 * batch_size,
        min_after_dequeue=min_queue_examples)
  else:
    images, label_batch = tf.train.batch(
        [image, label],
        batch_size=batch_size,
        num_threads=num_preprocess_threads,
        capacity=min_queue_examples + 16 * batch_size)

  # Display the training images in the visualizer.
  #tf.image_summary('images', images)

  return images, tf.reshape(label_batch, [batch_size])
项目:StackGAN    作者:hanzhanggit    | 项目源码 | 文件源码
def visualize_one_superimage(self, img_var, images, rows, filename):
        stacked_img = []
        for row in range(rows):
            img = images[row * rows, :, :, :]
            row_img = [img]  # real image
            for col in range(rows):
                row_img.append(img_var[row * rows + col, :, :, :])
            # each rows is 1realimage +10_fakeimage
            stacked_img.append(tf.concat(1, row_img))
        imgs = tf.expand_dims(tf.concat(0, stacked_img), 0)
        current_img_summary = tf.image_summary(filename, imgs)
        return current_img_summary, imgs
项目:StackGAN    作者:hanzhanggit    | 项目源码 | 文件源码
def visualization(self, n):
        fake_sum_train, superimage_train = \
            self.visualize_one_superimage(self.fake_images[:n * n],
                                          self.images[:n * n],
                                          n, "train")
        fake_sum_test, superimage_test = \
            self.visualize_one_superimage(self.fake_images[n * n:2 * n * n],
                                          self.images[n * n:2 * n * n],
                                          n, "test")
        self.superimages = tf.concat(0, [superimage_train, superimage_test])
        self.image_summary = tf.merge_summary([fake_sum_train, fake_sum_test])
项目:StackGAN    作者:hanzhanggit    | 项目源码 | 文件源码
def epoch_sum_images(self, sess, n):
        images_train, _, embeddings_train, captions_train, _ =\
            self.dataset.train.next_batch(n * n, cfg.TRAIN.NUM_EMBEDDING)
        images_train = self.preprocess(images_train, n)
        embeddings_train = self.preprocess(embeddings_train, n)

        images_test, _, embeddings_test, captions_test, _ = \
            self.dataset.test.next_batch(n * n, 1)
        images_test = self.preprocess(images_test, n)
        embeddings_test = self.preprocess(embeddings_test, n)

        images = np.concatenate([images_train, images_test], axis=0)
        embeddings =\
            np.concatenate([embeddings_train, embeddings_test], axis=0)

        if self.batch_size > 2 * n * n:
            images_pad, _, embeddings_pad, _, _ =\
                self.dataset.test.next_batch(self.batch_size - 2 * n * n, 1)
            images = np.concatenate([images, images_pad], axis=0)
            embeddings = np.concatenate([embeddings, embeddings_pad], axis=0)
        feed_dict = {self.images: images,
                     self.embeddings: embeddings}
        gen_samples, img_summary =\
            sess.run([self.superimages, self.image_summary], feed_dict)

        # save images generated for train and test captions
        scipy.misc.imsave('%s/train.jpg' % (self.log_dir), gen_samples[0])
        scipy.misc.imsave('%s/test.jpg' % (self.log_dir), gen_samples[1])

        # pfi_train = open(self.log_dir + "/train.txt", "w")
        pfi_test = open(self.log_dir + "/test.txt", "w")
        for row in range(n):
            # pfi_train.write('\n***row %d***\n' % row)
            # pfi_train.write(captions_train[row * n])

            pfi_test.write('\n***row %d***\n' % row)
            pfi_test.write(captions_test[row * n])
        # pfi_train.close()
        pfi_test.close()

        return img_summary
项目:StackGAN    作者:hanzhanggit    | 项目源码 | 文件源码
def visualize_one_superimage(self, img_var, images, rows, filename):
        stacked_img = []
        for row in range(rows):
            img = images[row * rows, :, :, :]
            row_img = [img]  # real image
            for col in range(rows):
                row_img.append(img_var[row * rows + col, :, :, :])
            # each rows is 1realimage +10_fakeimage
            stacked_img.append(tf.concat(1, row_img))
        imgs = tf.expand_dims(tf.concat(0, stacked_img), 0)
        current_img_summary = tf.image_summary(filename, imgs)
        return current_img_summary, imgs
项目:tensorflow-ram    作者:qingzew    | 项目源码 | 文件源码
def _generate_image_and_label_batch(self, image, label, min_queue_examples,
                                        shuffle):
        """Construct a queued batch of images and labels.

        Args:
            image: 3-D Tensor of [height, width, 3] of type.float32.
            label: 1-D Tensor of type.int32
            min_queue_examples: int32, minimum number of samples to retain
                in the queue that provides of batches of examples.
            batch_size: Number of images per batch.
            shuffle: boolean indicating whether to use a shuffling queue.

        Returns:
            images: Images. 4D tensor of [batch_size, height, width, 3] size.
            labels: Labels. 1D tensor of [batch_size] size.
        """
        # Create a queue that shuffles the examples, and then
        # read 'batch_size' images + labels from the example queue.
        if shuffle:
            images, labels = tf.train.shuffle_batch(
                [image, label],
                batch_size = self.batch_size,
                num_threads = self.num_threads,
                capacity = min_queue_examples + 3 * self.batch_size,
                min_after_dequeue = min_queue_examples)
        else:
            images, labels = tf.train.batch(
                [image, label],
                batch_size = self.batch_size,
                num_threads = self.num_threads,
                capacity = min_queue_examples + 3 * self.batch_size)

        # Display the training images in the visualizer.
        tf.image_summary('images', images, max_images = 3)

        return {'images' : images, 'labels' : labels}
项目:various_residual_networks    作者:yuhui-lin    | 项目源码 | 文件源码
def _generate_image_and_label_batch(image, label, min_queue_examples,
                                    batch_size, shuffle, smr_name):
    """Construct a queued batch of images and labels.
    Args:
        image: 3-D Tensor of [height, width, 3] of type.float32.
        label: 1-D Tensor of type.int32
        min_queue_examples: int32, minimum number of samples to retain
        in the queue that provides of batches of examples.
        batch_size: Number of images per batch.
        shuffle: boolean indicating whether to use a shuffling queue.
    Returns:
        images: Images. 4D tensor of [batch_size, height, width, 3] size.
        labels: Labels. 1D tensor of [batch_size] size.
    """
    # Create a queue that shuffles the examples, and then
    # read 'batch_size' images + labels from the example queue.
    num_preprocess_threads = 16
    if shuffle:
        images, label_batch = tf.train.shuffle_batch(
            [image, label],
            batch_size=batch_size,
            num_threads=num_preprocess_threads,
            capacity=min_queue_examples + 3 * batch_size,
            min_after_dequeue=min_queue_examples)
    else:
        images, label_batch = tf.train.batch(
            [image, label],
            batch_size=batch_size,
            num_threads=num_preprocess_threads,
            capacity=min_queue_examples + 3 * batch_size)

    # Display the training images in the visualizer.
    tf.image_summary(smr_name, images, max_images=FLAGS.max_images)

    return images, tf.reshape(label_batch, [batch_size])
项目:TFFRCNN    作者:InterVideo    | 项目源码 | 文件源码
def build_image_summary(self):
        """
        A simple graph for write image summary
        :return:
        """
        log_image_data = tf.placeholder(tf.uint8, [None, None, 3])
        log_image_name = tf.placeholder(tf.string)
        log_image = tf.image_summary(log_image_name, tf.expand_dims(log_image_data, 0), max_images=1)
        # log_image = tf.image_summary(log_image_name, log_image_data, max_images=50)
        return log_image, log_image_data, log_image_name
项目:NetworkCompress    作者:luzai    | 项目源码 | 文件源码
def conv_layer(input, size_in, size_out, name="conv"):
    with tf.name_scope(name) as scope:
        w = tf.Variable(tf.truncated_normal([5, 5, size_in, size_out], stddev=0.1), name="W")
        b = tf.Variable(tf.constant(0.1, shape=[size_out]), name="B")
        conv = tf.nn.conv2d(input, w, strides=[1, 1, 1, 1], padding="SAME")
        act = tf.nn.relu(conv + b)
        tf.summary.histogram("weights", w)
        tf.summary.histogram("bias", b)
        tf.summary.histogram("activation", act)
        # act_list=tf.split(act,size_out,axis=)
        print(act.get_shape())
        # tf.Print(act,[act],message="!!!!!")
        # tf.Print(act,[act.get_shape()],message="!!!")
        # tf.Print(act,[tf.shape(act)],message="!!!!")

        x_min = tf.reduce_min(w)
        x_max = tf.reduce_max(w)
        weights_0_to_1 = (w - x_min) / (x_max - x_min)
        weights_0_to_255_uint8 = tf.image.convert_image_dtype(weights_0_to_1, dtype=tf.uint8)

        # to tf.image_summary format [batch_size, height, width, channels]
        weights_transposed = tf.transpose(weights_0_to_255_uint8, [3, 0, 1, 2])
        tf.summary.image('activation', weights_transposed)
        return tf.nn.max_pool(act, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")


# Add fully connected layer
项目:SLAM    作者:sanjeevkumar42    | 项目源码 | 文件源码
def read_rgbd_data(self, input_queue):
        # original input size
        width_original = 480
        height_original = 640

        # input size
        width = 224
        height = 224

        value_rgb = tf.read_file(input_queue[0])
        value_depth = tf.read_file(input_queue[1])

        # Decoder
        png_rgb = tf.image.decode_png(value_rgb, channels=3)
        tf.image_summary('image', png_rgb)
        png_depth = tf.image.decode_png(value_depth, channels=1)

        # Reshape
        png_rgb = tf.reshape(png_rgb, [width_original, height_original, 3])
        png_depth = tf.reshape(png_depth, [width_original, height_original, 1])

        # Resize
        png_rgb = tf.image.resize_images(png_rgb, width, height)
        png_depth = tf.image.resize_images(png_depth, width, height)

        # Normalize depth
        png_depth = png_depth * 255.0 / tf.reduce_max(png_depth)

        image = tf.concat(2, (png_rgb, png_depth))

        twist = tf.reshape(input_queue[2], [1, 1, 6])

        return tf.cast(image, tf.float32), tf.cast(twist, tf.float32)
项目:SLAM    作者:sanjeevkumar42    | 项目源码 | 文件源码
def _generate_image_and_label_batch(image, label, min_queue_examples,
                                    batch_size, shuffle):
  """Construct a queued batch of images and labels.

  Args:
    image: 3-D Tensor of [height, width, 3] of type.float32.
    label: 1-D Tensor of type.int32
    min_queue_examples: int32, minimum number of samples to retain
      in the queue that provides of batches of examples.
    batch_size: Number of images per batch.
    shuffle: boolean indicating whether to use a shuffling queue.

  Returns:
    images: Images. 4D tensor of [batch_size, height, width, 3] size.
    labels: Labels. 1D tensor of [batch_size] size.
  """
  # Create a queue that shuffles the examples, and then
  # read 'batch_size' images + labels from the example queue.
  num_preprocess_threads = 16
  if shuffle:
    images, label_batch = tf.train.shuffle_batch(
        [image, label],
        batch_size=batch_size,
        num_threads=num_preprocess_threads,
        capacity=min_queue_examples + 3 * batch_size,
        min_after_dequeue=min_queue_examples)
  else:
    images, label_batch = tf.train.batch(
        [image, label],
        batch_size=batch_size,
        num_threads=num_preprocess_threads,
        capacity=min_queue_examples + 3 * batch_size)

  # Display the training images in the visualizer.
  tf.image_summary('images', images)

  return images, tf.reshape(label_batch, [batch_size])
项目:tensorflow_node    作者:elggem    | 项目源码 | 文件源码
def image_summary(self, tag, image):
        image = image.reshape((1, image.shape[0], image.shape[1], 1)).astype(np.float32)

        image_summary_op = tf.image_summary(tag, image)
        image_summary_str = tf.Session().run(image_summary_op)

        SummaryWriter().writer.add_summary(image_summary_str, 0)
        SummaryWriter().writer.flush()

        rospy.loginfo("?? " + tag + " image plotted.")
        pass
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def make_cdna_kerns_summary(cdna_kerns, t, suffix):

    sum = []
    cdna_kerns = tf.split(4, 10, cdna_kerns)
    for i, kern in enumerate(cdna_kerns):
        kern = tf.squeeze(kern)
        kern = tf.expand_dims(kern,-1)
        sum.append(
            tf.image_summary('step' + str(t) +'_filter'+ str(i)+ suffix, kern)
        )

    return  sum
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def make_cdna_kerns_summary(cdna_kerns, t, suffix):

    sum = []
    cdna_kerns = tf.split(4, 10, cdna_kerns)
    for i, kern in enumerate(cdna_kerns):
        kern = tf.squeeze(kern)
        kern = tf.expand_dims(kern,-1)
        sum.append(
            tf.image_summary('step' + str(t) +'_filter'+ str(i)+ suffix, kern)
        )

    return  sum
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def make_cdna_kerns_summary(cdna_kerns, t, suffix):

    sum = []
    cdna_kerns = tf.split(4, 10, cdna_kerns)
    for i, kern in enumerate(cdna_kerns):
        kern = tf.squeeze(kern)
        kern = tf.expand_dims(kern,-1)
        sum.append(
            tf.image_summary('step' + str(t) +'_filter'+ str(i)+ suffix, kern)
        )

    return  sum
项目:TF-Net    作者:Jorba123    | 项目源码 | 文件源码
def image_summary(x, tensor_name=None, max_images=3):
    if tensor_name is None:
        tensor_name = x.op.name
    tf.summary.image(tensor_name, x)
项目:pixel-rnn    作者:pby5    | 项目源码 | 文件源码
def _generate_image_and_label_batch(image, label, min_queue_examples,
                                    batch_size):
  """Construct a queued batch of images and labels.

  Args:
    image: 3-D Tensor of [height, width, 3] of type.float32.
    label: 1-D Tensor of type.int32
    min_queue_examples: int32, minimum number of samples to retain
      in the queue that provides of batches of examples.
    batch_size: Number of images per batch.

  Returns:
    images: Images. 4D tensor of [batch_size, height, width, 3] size.
    labels: Labels. 1D tensor of [batch_size] size.
  """
  # Create a queue that shuffles the examples, and then
  # read 'batch_size' images + labels from the example queue.
  num_preprocess_threads = 16
  images, label_batch = tf.train.shuffle_batch(
      [image, label],
      batch_size=batch_size,
      num_threads=num_preprocess_threads,
      capacity=min_queue_examples + 3 * batch_size,
      min_after_dequeue=min_queue_examples)

  # Display the training images in the visualizer.
  tf.image_summary('images', images)

  return images, tf.reshape(label_batch, [batch_size])
项目:deep-style-transfer    作者:albertlai    | 项目源码 | 文件源码
def build_loss(self, session, texture_weight=15, tv=500):
        if self.is_training:
            with tf.name_scope('loss'):     
                self.loss = self.descriptor_loss.build(session, self.generator.image_in, texture_weight)
                if tv > 0:
                    print("tv loss %d" % tv)
                    with tf.name_scope('tv_loss'):
                        batches, h, w, c = self.generator.out.get_shape().as_list()
                        x = self.generator.out[:,1:,:,:]
                        x_1 = self.generator.out[:,:(h-1),:,:]
                        y = self.generator.out[:,:,1:,:]
                        y_1 = self.generator.out[:,:,:w-1,:]
                        x_var = tf.nn.l2_loss(x - x_1)
                        y_var = tf.nn.l2_loss(y - y_1)
                        x_n = batches * (h-1) * w * c
                        y_n = batches * h * (w-1) * c
                        tv_loss = tv * (x_var/x_n + y_var/y_n)
                    self.loss = self.loss + tv_loss

            loss_summary_name = "loss"
            self.summary = tf.scalar_summary(loss_summary_name, self.loss)
            image_summary_name = "out"
            self.image_summary = tf.image_summary(image_summary_name, self.generator.out + utils.MEAN_VALUES, max_images=3)
            input_summary_name = "in"
            self.input_summary = tf.image_summary(input_summary_name, self.image + utils.MEAN_VALUES, max_images=3)

            self.merged = tf.merge_all_summaries()

            self.global_step = tf.Variable(0, name='global_step', trainable=False)

            return self.loss
项目:deep-style-transfer    作者:albertlai    | 项目源码 | 文件源码
def run_epoch(self, session, train_op, train_writer, batch_gen=None, num_iterations=NUM_ITERATIONS, output_dir="output", write_image=False):
        epoch_size = num_iterations
        start_time = time.time()
        image_skip = 1 if epoch_size < 5 else epoch_size / 5
        summary_skip = 1 if epoch_size < 25 else epoch_size / 25
        for step in range(epoch_size):
            if self.model_name == MULTISCALE:
                feed = self.add_noise_to_feed({})
            else:
                feed = {}
            batch = batch_gen.get_batch()
            feed[self.image] = batch
            if self.is_training:
                ops = [train_op, self.loss, self.merged, self.image_summary, self.input_summary, self.generator.out, self.global_step]
                _, loss, summary, image_summary, input_summary, last_out, global_step = session.run(ops, feed_dict=feed)
                if write_image and step % image_skip == 0:
                    utils.write_image(os.path.join('%s/images/valid_%d.png' % (output_dir, step)), last_out)
                if train_writer != None:
                    if step % summary_skip == 0:
                        train_writer.add_summary(summary, global_step)
                        train_writer.flush()
                    if step % image_skip == 0:
                        train_writer.add_summary(input_summary)
                        train_writer.flush()
                        train_writer.add_summary(image_summary)
                        train_writer.flush()
            else:
                ops = self.generator.out
                last_out = session.run(ops, feed_dict=feed)
                loss = summary = image_summary = input_summary = global_step = None
        return loss, summary, image_summary, last_out, global_step