Python utils 模块,save_images() 实例源码

我们从Python开源项目中,提取了以下7个代码示例,用于说明如何使用utils.save_images()

项目:ICGan-tensorflow    作者:zhangqianhui    | 项目源码 | 文件源码
def test(self):

        init = tf.global_variables_initializer()

        with tf.Session() as sess:

            sess.run(init)

            self.saver_z.restore(sess, self.encode_z_model)
            self.saver_y.restore(sess, self.encode_y_model)

            realbatch_array, _ = MnistData.getNextBatch(self.ds_train, self.label_y, 0, 50,
                                                        self.batch_size)

            output_image , label_y = sess.run([self.fake_images,self.e_y], feed_dict={self.images: realbatch_array})

            #one-hot
            #label_y = tf.arg_max(label_y, 1)

            print label_y

            save_images(output_image , [8 , 8] , './{}/test{:02d}_{:04d}.png'.format(self.sample_path , 0, 0))
            save_images(realbatch_array , [8 , 8] , './{}/test{:02d}_{:04d}_r.png'.format(self.sample_path , 0, 0))

            gen_img = cv2.imread('./{}/test{:02d}_{:04d}.png'.format(self.sample_path , 0, 0), 0)
            real_img = cv2.imread('./{}/test{:02d}_{:04d}_r.png'.format(self.sample_path , 0, 0), 0)


            cv2.imshow("test_EGan", gen_img)
            cv2.imshow("Real_Image", real_img)

            cv2.waitKey(-1)

            print("Test finish!")
项目:gated-pixel-cnn    作者:jakebelew    | 项目源码 | 文件源码
def train(dataset, network, stat, sample_dir):
  initial_step = stat.get_t()
  logger.info("Training starts on epoch {}".format(initial_step))

  train_step_per_epoch = dataset.train.num_examples / conf.batch_size
  test_step_per_epoch = dataset.test.num_examples / conf.batch_size          

  for epoch in range(initial_step, conf.max_epoch):
    start_time = time.time()

    # 1. train
    total_train_costs = []        
    for _ in xrange(train_step_per_epoch):
      images = dataset.train.next_batch(conf.batch_size)
      cost = network.test(images, with_update=True)
      total_train_costs.append(cost)

    # 2. test        
    total_test_costs = []
    for _ in xrange(test_step_per_epoch):          
      images = dataset.test.next_batch(conf.batch_size)          
      cost = network.test(images, with_update=False)
      total_test_costs.append(cost)

    avg_train_cost, avg_test_cost = np.mean(total_train_costs), np.mean(total_test_costs)
    stat.on_step(avg_train_cost, avg_test_cost)

    # 3. generate samples
    images, _ = dataset.test.next_batch(conf.batch_size)
    samples, occluded = generate_from_occluded(network, images)
    util.save_images(np.concatenate((occluded, samples), axis=2), 
                dataset.height, dataset.width * 2, conf.num_generated_images, 1, 
                directory=sample_dir, prefix="epoch_%s" % epoch)

    logger.info("Epoch {}: {:.2f} seconds, avg train cost: {:.3f}, avg test cost: {:.3f}"
                .format(epoch,(time.time() - start_time), avg_train_cost, avg_test_cost))
项目:gated-pixel-cnn    作者:jakebelew    | 项目源码 | 文件源码
def generate(network, height, width, sample_dir):
      logger.info("Image generation starts")
      samples = network.generate()
      util.save_images(samples, height, width, 10, 10, directory=sample_dir)
项目:ERL    作者:NoListen    | 项目源码 | 文件源码
def train(env, network, stat, sample_dir):
    initial_step = stat.get_t()
    logger.info("Training starts on epoch {}".format(initial_step))

    train_step_per_epoch = 100
    test_step_per_epoch = 10
    action_n = env.action_space.n
    for epoch in range(initial_step, conf.max_epoch):
        start_time = time.time()

        # 1. train
        total_train_costs = []
        for _ in tqdm(xrange(train_step_per_epoch)):
            images = collect_samples(conf.batch_size, env, action_n)
            cost = network.test(images, with_update=True)
            total_train_costs.append(cost)

        # 2. test
        total_test_costs = []
        for _ in tqdm(xrange(test_step_per_epoch)):
            images = collect_samples(conf.batch_size, env, action_n)
            cost = network.test(images, with_update=False)
            total_test_costs.append(cost)

        avg_train_cost, avg_test_cost = np.mean(total_train_costs), np.mean(total_test_costs)
        stat.on_step(avg_train_cost, avg_test_cost)

        # 3. generate samples
        images, _ = collect_samples(conf.batch_size, env, action_n)
        samples, occluded = generate_from_occluded(network, images)
        util.save_images(np.concatenate((occluded, samples), axis=2),
                         42, 42 * 2, conf.num_generated_images, 1,
                         directory=sample_dir, prefix="epoch_%s" % epoch)

        logger.info("Epoch {}: {:.2f} seconds, avg train cost: {:.3f}, avg test cost: {:.3f}"
                    .format(epoch, (time.time() - start_time), avg_train_cost, avg_test_cost))
项目:ICGan-tensorflow    作者:zhangqianhui    | 项目源码 | 文件源码
def train(self):

        opti_D = tf.train.AdamOptimizer(learning_rate=self.learning_rate_dis, beta1=0.5).minimize(self.loss , var_list=self.d_vars)
        opti_G = tf.train.AdamOptimizer(learning_rate=self.learning_rate_gen, beta1=0.5).minimize(self.G_fake_loss, var_list=self.g_vars)

        init = tf.global_variables_initializer()

        with tf.Session() as sess:

            sess.run(init)

            summary_op = tf.summary.merge_all()
            summary_writer = tf.summary.FileWriter(self.log_dir, sess.graph)

            #self.saver.restore(sess , self.model_path)

            batch_num = 0
            e = 0
            step = 0

            while e <= self.max_epoch:

                rand = np.random.randint(0, 100)
                rand = 0

                while batch_num < len(self.ds_train)/self.batch_size:

                    step = step + 1
                    realbatch_array, real_y = MnistData.getNextBatch(self.ds_train, self.label_y, rand, batch_num,self.batch_size)

                    batch_z = np.random.normal(0, 1 , size=[self.batch_size, self.sample_size])

                    #optimization D
                    _,summary_str = sess.run([opti_D, summary_op], feed_dict={self.images:realbatch_array, self.z: batch_z, self.y:real_y})
                    summary_writer.add_summary(summary_str , step)
                    #optimizaiton G
                    _,summary_str = sess.run([opti_G, summary_op], feed_dict={self.images:realbatch_array, self.z: batch_z, self.y:real_y})
                    summary_writer.add_summary(summary_str , step)
                    batch_num += 1

                    if step%1 ==0:

                        D_loss = sess.run(self.loss, feed_dict={self.images:realbatch_array, self.z: batch_z, self.y:real_y})
                        fake_loss = sess.run(self.G_fake_loss, feed_dict={self.z : batch_z, self.y:real_y})
                        print("EPOCH %d step %d: D: loss = %.7f G: loss=%.7f " % (e, step , D_loss, fake_loss))

                    if np.mod(step , 50) == 1:

                        sample_images = sess.run(self.fake_images ,feed_dict={self.z:batch_z, self.y:sample_label()})
                        save_images(sample_images[0:64] , [8, 8], './{}/train_{:02d}_{:04d}.png'.format(self.sample_path, e, step))
                        #Save the model
                        self.saver.save(sess , self.model_path)

                e += 1
                batch_num = 0

            save_path = self.saver.save(sess , self.model_path)
            print "Model saved in file: %s" % save_path
项目:ICGan-tensorflow    作者:zhangqianhui    | 项目源码 | 文件源码
def train_ez(self):

        opti_EZ = tf.train.AdamOptimizer(learning_rate = 0.01, beta1 = 0.5).minimize(self.loss_z,
                                                                                      var_list=self.enz_vars)
        init = tf.global_variables_initializer()

        with tf.Session() as sess:

            sess.run(init)
            #summary_op = tf.summary.merge_all()
            summary_writer = tf.summary.FileWriter(self.log_dir, sess.graph)

            self.saver.restore(sess , self.model_path)

            batch_num = 0
            e = 0
            step = 0

            while e <= self.max_epoch:

                rand = np.random.randint(0, 100)
                rand = 0

                while batch_num < len(self.ds_train) / self.batch_size:

                    step = step + 1

                    _,label_y = MnistData.getNextBatch(self.ds_train, self.label_y, rand, batch_num,
                                                                     self.batch_size)
                    batch_z = np.random.normal(0, 1, size=[self.batch_size, self.sample_size])

                    # optimization E
                    sess.run(opti_EZ, feed_dict={self.y: label_y,self.z: batch_z})
                    batch_num += 1

                    if step % 10 == 0:

                        ez_loss = sess.run(self.loss_z, feed_dict={self.y: label_y,self.z: batch_z})
                        #summary_writer.add_summary(ez_loss, step)
                        print("EPOCH %d step %d EZ loss %.7f" % (e, step, ez_loss))

                    if np.mod(step, 50) == 0:

                        # sample_images = sess.run(self.fake_images, feed_dict={self.e_y:})
                        # save_images(sample_images[0:64], [8, 8],
                        #             './{}/train_{:02d}_{:04d}.png'.format(self.sample_path, e, step))
                        self.saver_z.save(sess, self.encode_z_model)

                e += 1
                batch_num = 0

            save_path = self.saver_z.save(sess, self.encode_z_model)
            print "Model saved in file: %s" % save_path
项目:ICGan-tensorflow    作者:zhangqianhui    | 项目源码 | 文件源码
def train_ey(self):

        opti_EY = tf.train.AdamOptimizer(learning_rate=0.001, beta1=0.5).minimize(self.loss_y,
                                                                                 var_list=self.eny_vars)
        init = tf.global_variables_initializer()

        with tf.Session() as sess:

            sess.run(init)
            # summary_op = tf.summary.merge_all()
            summary_writer = tf.summary.FileWriter(self.log_dir, sess.graph)

            batch_num = 0
            e = 0
            step = 0

            while e <= self.max_epoch:

                rand = np.random.randint(0, 100)
                rand = 0

                while batch_num < len(self.ds_train) / self.batch_size:

                    step = step + 1

                    realbatch_image, label_y = MnistData.getNextBatch(self.ds_train, self.label_y, rand, batch_num,
                                                        self.batch_size)
                    #batch_z = np.random.normal(0, 1, size=[self.batch_size, self.sample_size])

                    # optimization E
                    sess.run(opti_EY, feed_dict={self.y: label_y, self.images: realbatch_image})
                    batch_num += 1

                    if step % 10 == 0:

                        ey_loss = sess.run(self.loss_y, feed_dict={self.y: label_y, self.images:realbatch_image})
                        #summary_writer.add_summary(ez_loss, step)
                        print("EPOCH %d step %d EY loss %.7f" % (e, step, ey_loss))

                    if np.mod(step, 50) == 0:
                        # sample_images = sess.run(self.fake_images, feed_dict={self.e_y:})
                        # save_images(sample_images[0:64], [8, 8],
                        #             './{}/train_{:02d}_{:04d}.png'.format(self.sample_path, e, step))
                        self.saver_y.save(sess, self.encode_y_model)

                e += 1
                batch_num = 0

            save_path = self.saver_y.save(sess, self.encode_y_model)
            print "Encode Y Model saved in file: %s" % save_path

    #do test