Python model 模块,Generator() 实例源码

我们从Python开源项目中,提取了以下6个代码示例,用于说明如何使用model.Generator()

项目:pytorch-tutorial    作者:yunjey    | 项目源码 | 文件源码
def build_model(self):
        """Build generator and discriminator."""
        self.generator = Generator(z_dim=self.z_dim,
                                   image_size=self.image_size,
                                   conv_dim=self.g_conv_dim)
        self.discriminator = Discriminator(image_size=self.image_size,
                                           conv_dim=self.d_conv_dim)
        self.g_optimizer = optim.Adam(self.generator.parameters(),
                                      self.lr, [self.beta1, self.beta2])
        self.d_optimizer = optim.Adam(self.discriminator.parameters(),
                                      self.lr, [self.beta1, self.beta2])

        if torch.cuda.is_available():
            self.generator.cuda()
            self.discriminator.cuda()
项目:DeepLearning    作者:Wanwannodao    | 项目源码 | 文件源码
def __init__(self, input_shape):
        self.batch_size = input_shape[0]

        self.D = Discriminator(self.batch_size)
        self.G = Generator(self.batch_size)


        self.X = tf.placeholder(shape=input_shape, dtype=tf.float32, name="X")

        self.gen_img = self.G()

        self.g_loss = 0.5*(tf.reduce_mean( (self.D(self.G(reuse=True)) - 1.0)**2 ))
        self.d_loss = 0.5*(tf.reduce_mean((self.D(self.X, reuse=True) - 1.0)**2 )\
                           + tf.reduce_mean( (self.D( self.G(reuse=True), reuse=True))**2 ) )

        g_opt = tf.train.AdamOptimizer(learning_rate=4e-3,beta1=0.5)
        d_opt = tf.train.AdamOptimizer(learning_rate=1e-3,beta1=0.5)

        g_grads_and_vars = g_opt.compute_gradients(self.g_loss)
        d_grads_and_vars = d_opt.compute_gradients(self.d_loss)

        g_grads_and_vars = [[grad, var] for grad, var in g_grads_and_vars \
                            if grad is not None and var.name.startswith("G")]
        d_grads_and_vars = [[grad, var] for grad, var in d_grads_and_vars \
                            if grad is not None and var.name.startswith("D")]

        self.g_train_op = g_opt.apply_gradients(g_grads_and_vars)
        self.d_train_op = d_opt.apply_gradients(d_grads_and_vars)
项目:DeepLearning    作者:Wanwannodao    | 项目源码 | 文件源码
def __init__(self, batch_size):
        self.C = Critic(batch_size)
        self.G = Generator(batch_size)

        self.X = tf.placeholder(shape=[None, 28, 28, 1], dtype=tf.float32, name="X")
        self.p = tf.placeholder(tf.float32, name="p")

        self.gen_img = self.G()

        g_logits = self.C(self.gen_img, self.p)

        self.g_loss = -tf.reduce_mean(g_logits)
        self.c_loss = tf.reduce_mean(-self.C(self.X, self.p, reuse=True) + g_logits)
        #self.g_loss = tf.reduce_mean(tf.reduce_sum(g_logits, axis=1))
        #self.c_loss = tf.reduce_mean(tf.reduce_sum(-self.C(self.X, self.p, reuse=True) + g_logits, axis=1))

        c_opt = tf.train.RMSPropOptimizer(learning_rate=5e-5)
        g_opt = tf.train.RMSPropOptimizer(learning_rate=5e-5)

        c_grads_and_vars = c_opt.compute_gradients(self.c_loss)
        g_grads_and_vars = g_opt.compute_gradients(self.g_loss)

        c_grads_and_vars = [[grad, var] for grad, var in c_grads_and_vars \
                            if grad is not None and var.name.startswith("C") ]
        g_grads_and_vars = [[grad, var] for grad, var in g_grads_and_vars \
                            if grad is not None and var.name.startswith("G") ]

        self.c_train_op = c_opt.apply_gradients(c_grads_and_vars)
        self.g_train_op = g_opt.apply_gradients(g_grads_and_vars)

        self.w_clip = [var.assign(tf.clip_by_value(var, -0.01, 0.01)) \
                       for var in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="C")]
项目:samplernn-pytorch    作者:deepsound-project    | 项目源码 | 文件源码
def register(self, trainer):
        self.generate = Generator(trainer.model.model, trainer.cuda)
项目:chainer-pix2pix    作者:wuhuikai    | 项目源码 | 文件源码
def main():
    parser = argparse.ArgumentParser(description='pix2pix --- GAN for Image to Image translation')
    parser.add_argument('--gpu', type=int, default=0, help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--load_size', type=int, default=256, help='Scale image to load_size')
    parser.add_argument('--g_filter_num', type=int, default=64, help="# of filters in G's 1st conv layer")
    parser.add_argument('--d_filter_num', type=int, default=64, help="# of filters in D's 1st conv layer")
    parser.add_argument('--output_channel', type=int, default=3, help='# of output image channels')
    parser.add_argument('--n_layers', type=int, default=3, help='# of hidden layers in D')
    parser.add_argument('--list_path', default='list/val_list.txt', help='Path for test list')
    parser.add_argument('--out', default='result/test', help='Directory to output the result')
    parser.add_argument('--G_path', default='result/G.npz', help='Path for pretrained G')
    args = parser.parse_args()

    if not os.path.isdir(args.out):
        os.makedirs(args.out)

    # Set up GAN G
    G = Generator(args.g_filter_num, args.output_channel)
    serializers.load_npz(args.G_path, G)

    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()  # Make a specified GPU current
        G.to_gpu()                               # Copy the model to the GPU

    with open(args.list_path) as f:
        imgs = f.readlines()

    total = len(imgs)
    for idx, img_path in enumerate(imgs):
        print('{}/{} ...'.format(idx+1, total))

        img_path = img_path.strip().split(' ')[-1]
        img = cv2.imread(img_path, cv2.IMREAD_COLOR)[:, :, ::-1]
        h, w, _ = img.shape
        img = np.asarray(Image.fromarray(img).resize((args.load_size, args.load_size), resample=Image.NEAREST), dtype=np.float32)
        img = np.transpose(img, (2, 0, 1))

        A = data_process([img], device=args.gpu, volatile='on')
        B = np.squeeze(output2img(G(A, test=True, dropout=False)))

        Image.fromarray(B).resize((w, h), resample=Image.BILINEAR).save(os.path.join(args.out, os.path.basename(img_path).replace('gtFine_labelIds', 'leftImg8bit')))
项目:Conditional-GAN    作者:m516825    | 项目源码 | 文件源码
def train(self):
        batch_num = self.data.length//self.FLAGS.batch_size if self.data.length%self.FLAGS.batch_size==0 else self.data.length//self.FLAGS.batch_size + 1

        print("Start training WGAN...\n")

        for t in range(self.FLAGS.iter):

            d_cost = 0
            g_coat = 0

            for d_ep in range(self.d_epoch):

                img, tags, _, w_img, w_tags = self.data.next_data_batch(self.FLAGS.batch_size)
                z = self.data.next_noise_batch(len(tags), self.FLAGS.z_dim)

                feed_dict = {
                    self.seq:tags,
                    self.img:img,
                    self.z:z,
                    self.w_seq:w_tags,
                    self.w_img:w_img
                }

                _, loss = self.sess.run([self.d_updates, self.d_loss], feed_dict=feed_dict)

                d_cost += loss/self.d_epoch

            z = self.data.next_noise_batch(len(tags), self.FLAGS.z_dim)
            feed_dict = {
                self.img:img,
                self.w_seq:w_tags,
                self.w_img:w_img,
                self.seq:tags,
                self.z:z
            }

            _, loss, step = self.sess.run([self.g_updates, self.g_loss, self.global_step], feed_dict=feed_dict)

            current_step = tf.train.global_step(self.sess, self.global_step)

            g_cost = loss

            if current_step % self.FLAGS.display_every == 0:
                print("Epoch {}, Current_step {}".format(self.data.epoch, current_step))
                print("Discriminator loss :{}".format(d_cost))
                print("Generator loss     :{}".format(g_cost))
                print("---------------------------------")

            if current_step % self.FLAGS.checkpoint_every == 0:
                path = self.saver.save(self.sess, self.checkpoint_prefix, global_step=current_step)
                print ("\nSaved model checkpoint to {}\n".format(path))

            if current_step % self.FLAGS.dump_every == 0:
                self.eval(current_step)
                print("Dump test image")