Python generator 模块,Generator() 实例源码

我们从Python开源项目中,提取了以下20个代码示例,用于说明如何使用generator.Generator()

项目:CharacterGAN    作者:liamb315    | 项目源码 | 文件源码
def sample_generator(args, num_samples = 10):
    with open(os.path.join(args.save_dir_GAN, 'config.pkl')) as f:
        saved_args = cPickle.load(f)
    with open(os.path.join(args.save_dir_GAN, 'real_beer_vocab.pkl')) as f:
        chars, vocab = cPickle.load(f)
    generator = Generator(saved_args, is_training = False, batch = True)
    with tf.Session() as sess:
        tf.initialize_all_variables().run()
        saver = tf.train.Saver(tf.all_variables())
        ckpt = tf.train.get_checkpoint_state(args.save_dir)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            # for i in range(num_samples):
            #     print 'Review',i,':', generator.generate(sess, chars, vocab, args.n, args.prime), '\n'

            return generator.generate_batch(sess, saved_args, chars, vocab)
项目:CharacterGAN    作者:liamb315    | 项目源码 | 文件源码
def generate_samples(generator, args, sess, num_samples=500):
    '''Generate samples from the current version of the GAN'''
    samples = []

    with open(os.path.join(args.save_dir_GAN, 'config.pkl')) as f:
        saved_args = cPickle.load(f)
    with open(os.path.join(args.save_dir_GAN, args.vocab_file)) as f:
        chars, vocab = cPickle.load(f)

    logging.debug('Loading GAN parameters to Generator...')
    gen_vars = [v for v in tf.all_variables() if v.name.startswith('sampler/')]
    gen_dict = {}
    for v in gen_vars:
        # Key:    op.name in GAN Checkpoint file
        # Value:  Local generator Variable 
        gen_dict[v.op.name.replace('sampler/','')] = v
    gen_saver = tf.train.Saver(gen_dict)
    ckpt = tf.train.get_checkpoint_state(args.save_dir_GAN)
    if ckpt and ckpt.model_checkpoint_path:
        gen_saver.restore(sess, ckpt.model_checkpoint_path)

    for _ in xrange(num_samples / args.batch_size):
        samples.append(generator.generate_samples(sess, saved_args, chars, vocab, args.n))
    return samples
项目:enjoliver    作者:JulienBalestra    | 项目源码 | 文件源码
def _init_discovery(self):
        if EC.extra_selectors:
            extra_selectors = "&".join(["%s=%s" % (k, v) for k, v in EC.extra_selectors.items()])
        else:
            extra_selectors = ""
        extra_md = {
            "etc_hosts": EC.etc_hosts,
            "extra_selectors": extra_selectors,
            "coreos_install_base_url": EC.coreos_install_base_url,
        }
        if EC.lldp_image_url:
            logger.debug("adding lldp_image_url: %s" % EC.lldp_image_url)
            extra_md.update({"lldp_image_url": EC.lldp_image_url})
        gen = generator.Generator(
            api_uri=self.api_uri,
            profile_id="discovery",
            name="discovery",
            ignition_id="%s.yaml" % self.ignition_dict["discovery"],
            matchbox_path=self.matchbox_path,
            extra_metadata=extra_md,
            pxe_redirect=True
        )
        gen.dumps()
项目:TFGPy    作者:tomkren    | 项目源码 | 文件源码
def test_d2(self):
        return
        for goal, gamma, max_k in [d_general_even_parity()]:#d1(), d2(), d3()]:
            g = Generator(gamma, normalizator=normalization.Normalizator)

            for k in range(1, max_k + 1):
                g_num = g.get_num(k, goal)
                print(g_num)
项目:TFGPy    作者:tomkren    | 项目源码 | 文件源码
def check_skeletons(tester):
    for goal, gamma, max_k in [d1(), d2(), d3()]:
        log('goal:', goal)
        # gamma.add_internal_pair() # todo uplne smazat až bude fungovat
        g = Generator(gamma)
        for k in range(1, max_k+1):
            log(' k:', k)
            check_successors(tester, g, k, goal)
项目:TFGPy    作者:tomkren    | 项目源码 | 文件源码
def separate_error_404():
    # seed = random.randint(0, sys.maxsize)
    seed = 7669612278400467845
    random.seed(seed)
    print(seed)

    goal, gamma, max_k = d3()
    gene = Generator(gamma)
    hax_k = 3
    hax_typ = parse_typ(('_P_', 4, (5, '->', (6, '->', 7))))
    hax_tree = gene.gen_one(hax_k, hax_typ)
    print(hax_tree.typ)
项目:TFGPy    作者:tomkren    | 项目源码 | 文件源码
def separate_error_404_sub():

    goal, gamma, max_k = d3()
    gene = Generator(gamma)
    k = 1
    n = 4
    typ = parse_typ((1, '->', (2, '->', 3)))
    tree = gene.subs(k, typ, n)
    print(tree.typ)
项目:TFGPy    作者:tomkren    | 项目源码 | 文件源码
def run_gen_basic(domain_raw, size, verbose=False):
    goal, gamma, raw_fitness, count_evals, cache = domain_raw()
    gen = generator.Generator(gamma)
    random.seed(5)
    indiv = gen.gen_one(size, goal)
    assert indiv is not None
    istr = indiv.eval_str()
    ifit = raw_fitness(indiv)
    if verbose:
        print(istr)
        print(ifit)
项目:bibcat    作者:KnowledgeLinks    | 项目源码 | 文件源码
def run(self):
        """Runs Collection Generator"""
        result = requests.post(self.triplestore_url,
                data={"query": GET_AVAILABLE_COLLECTIONS,
                      "format": "json"})
        if result.status_code > 399:
            raise WorkError("Failed to run sparql query")

        bindings = result.json().get('results').get('bindings')
        for row in bindings:
            instance_uri = rdflib.URIRef(row.get('instance').get('value'))
            org_uri = rdflib.URIRef(row.get('org').get('value'))
            item_uri = rdflib.URIRef(row.get('item').get('value'))
            label = rdflib.Literal(row.get('label').get('value'))
            #! Should check for language in label
            collections = self.__handle_collections__(
                instance=instance_uri, 
                item=item_uri,
                organization=org_uri, 
                rdfs_label=label)
            # Now remove existing BNode's properties from the BF Instance
            delete_result = requests.post(
                self.triplestore_url,
                data=DELETE_COLLECTION_BNODE.format(instance_uri),
                headers={"Content-Type": "application/sparql-update"})
            if delete_result.status_code > 399:
                raise WorkError("Cannot Delete Collection blank nodes for {}\n{}".format(
                    instance_uri, delete_result.text))
项目:LSTM-Generative-and-Discriminative    作者:mattweidman    | 项目源码 | 文件源码
def test_generator():
    g = Generator(10, list_of_chars)
    seq_len = 150
    num_examples = 10
    chr_seqs = g.generate(seq_len, num_examples)
    for seq in chr_seqs:
        print(seq)
项目:LSTM-Generative-and-Discriminative    作者:mattweidman    | 项目源码 | 文件源码
def test_discriminator():

    # parameters
    file_name = "animals.txt"
    genr_hidden_size = 10
    disr_hidden_size = 11
    num_epochs = 20
    lr = 1
    alpha = 0.9
    batch_size = 100

    # load data
    char_list = dataloader.get_char_list(file_name)
    X_actual = dataloader.load_data(file_name)
    num_examples = X_actual.shape[0]
    seq_len = X_actual.shape[1]

    # generate
    genr = Generator(genr_hidden_size, char_list)
    X_generated = genr.generate_tensor(seq_len, num_examples)

    # train discriminator
    disr = Discriminator(len(char_list), disr_hidden_size)
    disr.train_RMS(X_actual, X_generated, num_epochs, lr, alpha, batch_size,
        print_progress=True)

    # print discriminator output
    outp = disr.discriminate(np.concatenate((X_actual, X_generated), axis=0))
    print(outp)

    # evaluate discriminator
    accuracy = disr.accuracy(X_actual, X_generated)
    print("accuracy: ", accuracy)
项目:LSTM-Generative-and-Discriminative    作者:mattweidman    | 项目源码 | 文件源码
def test_generator_training():

    # parameters
    file_name = "animals.txt"
    genr_hidden_size = 10
    disr_hidden_size = 3
    num_epochs_d = 20
    num_epochs_g = 20
    lr = 1
    alpha = 0.9
    batch_size = 100

    # load data
    char_list = dataloader.get_char_list(file_name)
    X_actual = dataloader.load_data(file_name)
    num_examples = X_actual.shape[0]
    seq_len = X_actual.shape[1]

    # generate
    genr_input = np.random.randn(num_examples, len(char_list))
    genr = Generator(genr_hidden_size, char_list)
    X_generated = genr.generate_tensor(seq_len, num_examples, genr_input)

    # train discriminator
    disr = Discriminator(len(char_list), disr_hidden_size)
    disr.train_RMS(X_actual, X_generated, num_epochs_d, lr, alpha, batch_size)

    # evaluate discriminator
    accuracy = disr.accuracy(X_actual, X_generated)
    print("accuracy: ", accuracy)

    # train generator
    genr.train_RMS(genr_input, seq_len, disr, num_epochs_g, 1, lr, alpha,
        batch_size, print_progress=True)

    # evaluate discriminator again
    X_generated = genr.generate_tensor(seq_len, num_examples, genr_input)
    accuracy = disr.accuracy(X_actual, X_generated)
    print("accuracy: ", accuracy)
项目:LSTM-Generative-and-Discriminative    作者:mattweidman    | 项目源码 | 文件源码
def __init__(self, g_hidden_size, d_hidden_size, char_list):
        self.char_list = char_list
        self.generator = Generator(g_hidden_size, char_list)
        self.discriminator = Discriminator(len(char_list), d_hidden_size)

    # X_actual: input data from dataset (not generated)
    # n_epochs: total epochs to train entire network
    # g_epochs: how long to train generator each epoch
    # d_epochs: how long to train disciminator each epoch
    # g_initial_lr, g_multiplier: generator RMSprop parameters
    # d_initial_lr, d_multiplier: discriminator RMSprop parameters
    # g_batch_size, d_batch_size: batch sizes for generator and discriminator
    # num_displayed: if print progress is True, this is how many example words
    # to display - make this None to display all examples
项目:streetview    作者:ydnaandy123    | 项目源码 | 文件源码
def main(_):
    pp.pprint(flags.FLAGS.__flags)

    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir):
        os.makedirs(FLAGS.sample_dir)

    with tf.Session(config=tf.ConfigProto(
              allow_soft_placement=True, log_device_placement=False)) as sess:
        if FLAGS.dataset == 'mnist':
            assert False
        dcgan = DCGAN(sess, image_size=FLAGS.image_size, batch_size=FLAGS.batch_size,
                    sample_size = 64,
                    z_dim = 8192,
                    d_label_smooth = .25,
                    generator_target_prob = .75 / 2.,
                    out_stddev = .075,
                    out_init_b = - .45,
                    image_shape=[FLAGS.image_width, FLAGS.image_width, 3],
                    dataset_name=FLAGS.dataset, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir,
                    sample_dir=FLAGS.sample_dir,
                    generator=Generator(),
                    train_func=train, discriminator_func=discriminator,
                    build_model_func=build_model, config=FLAGS,
                    devices=["gpu:0", "gpu:1", "gpu:2", "gpu:3"] #, "gpu:4"]
                    )

        if FLAGS.is_train:
            print "TRAINING"
            dcgan.train(FLAGS)
            print "DONE TRAINING"
        else:
            dcgan.load(FLAGS.checkpoint_dir)

        OPTION = 2
        visualize(sess, dcgan, FLAGS, OPTION)
项目:pix2pix    作者:takat0m0    | 项目源码 | 文件源码
def __init__(self,  z_dim, batch_size):
        self.batch_size = batch_size
        self.z_dim = z_dim

        # -- generator -----
        self.gen = Generator([64, 128, 256, 512, 512, 512, 512, 512],
                             [512, 512, 512, 512, 256, 128, 64],
                             256, 256, 3)

        # -- discriminator --
        self.disc = Discriminator([64, 128, 256, 512])

        # -- learning parms ---
        self.lr = 0.0002
        self.Lambda = 100.0
项目:TFGPy    作者:tomkren    | 项目源码 | 文件源码
def test_d(self):
        for goal, gamma, max_k in [d_general_even_parity(), d1(), d2(), d3()]:
            g = Generator(gamma, normalizator=normalization.NormalizatorNop)
            gnf = Generator(gamma, normalizator=normalization.Normalizator)
            gNC = Generator(gamma, normalizator=normalization.NormalizatorNop, cache=CacheNop)
            gnfNC = Generator(gamma, normalizator=normalization.Normalizator, cache=CacheNop)

            res = []
            for k in range(1, max_k + 1):
                # check static generator
                s_num = get_num(gamma, k, goal)
                s_trees = set(tr.tree for tr in ts(gamma, k, goal, 0))
                self.assertEqual(s_num, len(s_trees))
                for t in s_trees:
                    self.assertTrue(t.is_well_typed(gamma))

                # check generator
                g_num = g.get_num(k, goal)
                self.assertEqual(s_num, g_num)
                res.append(g_num)
                #print(g_num)

                # check generator in nf
                self.assertEqual(s_num, gnf.get_num(k, goal))
                for i in range(10):
                    t = gnf.gen_one(k, goal)
                    if s_num == 0:
                        self.assertIsNone(t)
                    else:
                        self.assertTrue(t.is_well_typed(gamma))

                # check generator without cache
                self.assertEqual(s_num, gNC.get_num(k, goal))

                # check generator in nf without cache
                self.assertEqual(s_num, gnfNC.get_num(k, goal))

            # second run should have the same results
            # but it should be much faster
            start = time.time()
            for k in range(1, max_k + 1):
                g_num = g.get_num(k, goal)
                self.assertEqual(res[k - 1], g_num)
            end = time.time()
            self.assertLess(end - start, REALLY_SHORT_TIME)
项目:TFGPy    作者:tomkren    | 项目源码 | 文件源码
def separate_error_bad_smart_expansion_2017_02_28():
    print('Separating error: bad_expansion_2017_02_28')
    problem_goal, problem_gamma, _ = d3()
    gene = Generator(problem_gamma)
    problem_k = 5
    skel_0 = UnfinishedLeaf(problem_goal)

    set_log_printing(True)

    def succ(sk, path=None, is_smart=True, goal_typ=None):
        t = time.time()
        if is_smart:
            next_sks = sk.successors_smart(gene, problem_k)
        else:
            next_sks = sk.successors(gene, problem_k, goal_typ)
        log_expansion(sk, next_sks, t)
        if not path:
            return next_sks
        else:
            i = path[0]
            path = path[1:]
            next_one = next_sks[i]
            print('  i=', i, 'selected:', next_one)
            return succ(next_one, path, is_smart, goal_typ) if path else next_one

    bug_path_1 = [0, 0, 0, 2, 0, 0]  # (((k (? ?)) ?) ?)
    bug_path_2 = [0, 0, 0, 2, 0, 0]

    skel = succ(skel_0, bug_path_1, False, problem_goal)
    print(skel)
    print()

    seed = 42
    random.seed(seed)
    print('seed:', seed)
    tree = gene.gen_one_uf(skel, problem_k, problem_goal)
    log(str(tree))
    log('is_well_typed:', tree.is_well_typed(gene.gamma))

    print()

    skel = succ(skel_0, bug_path_2)
    print(skel)
    print()
项目:CycleGAN-TensorFlow    作者:vanhuyz    | 项目源码 | 文件源码
def __init__(self,
               X_train_file='',
               Y_train_file='',
               batch_size=1,
               image_size=256,
               use_lsgan=True,
               norm='instance',
               lambda1=10.0,
               lambda2=10.0,
               learning_rate=2e-4,
               beta1=0.5,
               ngf=64
              ):
    """
    Args:
      X_train_file: string, X tfrecords file for training
      Y_train_file: string Y tfrecords file for training
      batch_size: integer, batch size
      image_size: integer, image size
      lambda1: integer, weight for forward cycle loss (X->Y->X)
      lambda2: integer, weight for backward cycle loss (Y->X->Y)
      use_lsgan: boolean
      norm: 'instance' or 'batch'
      learning_rate: float, initial learning rate for Adam
      beta1: float, momentum term of Adam
      ngf: number of gen filters in first conv layer
    """
    self.lambda1 = lambda1
    self.lambda2 = lambda2
    self.use_lsgan = use_lsgan
    use_sigmoid = not use_lsgan
    self.batch_size = batch_size
    self.image_size = image_size
    self.learning_rate = learning_rate
    self.beta1 = beta1
    self.X_train_file = X_train_file
    self.Y_train_file = Y_train_file

    self.is_training = tf.placeholder_with_default(True, shape=[], name='is_training')

    self.G = Generator('G', self.is_training, ngf=ngf, norm=norm, image_size=image_size)
    self.D_Y = Discriminator('D_Y',
        self.is_training, norm=norm, use_sigmoid=use_sigmoid)
    self.F = Generator('F', self.is_training, norm=norm, image_size=image_size)
    self.D_X = Discriminator('D_X',
        self.is_training, norm=norm, use_sigmoid=use_sigmoid)

    self.fake_x = tf.placeholder(tf.float32,
        shape=[batch_size, image_size, image_size, 3])
    self.fake_y = tf.placeholder(tf.float32,
        shape=[batch_size, image_size, image_size, 3])
项目:dcgan-tfslim    作者:mqtlam    | 项目源码 | 文件源码
def __init__(self, sess, FLAGS):
        """Initialization.

        Args:
            sess: TensorFlow session
            FLAGS: flags object
        """
        # initialize variables
        self.sess = sess
        self.f = FLAGS

        # inputs: real (training) images
        images_shape = [self.f.output_size, self.f.output_size, self.f.c_dim]
        self.real_images = tf.placeholder(tf.float32,
            [None] + images_shape, name="real_images")

        # inputs: z (noise)
        self.z = tf.placeholder(tf.float32, [None, self.f.z_dim], name='z')

        # initialize models
        generator = Generator(FLAGS)
        discriminator = Discriminator(FLAGS)

        # generator network
        self.G = generator(self.z)
        # discriminator network for real images
        self.D_real, self.D_real_logits = discriminator(self.real_images)
        # discriminator network for fake images
        self.D_fake, self.D_fake_logits = discriminator(self.G, reuse=True)

        # losses
        self.d_loss_real = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                logits=self.D_real_logits,
                labels=tf.ones_like(self.D_real))
            )
        self.d_loss_fake = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                logits=self.D_fake_logits,
                labels=tf.zeros_like(self.D_fake))
            )
        self.d_loss = self.d_loss_real + self.d_loss_fake
        self.g_loss = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                logits=self.D_fake_logits,
                labels=tf.ones_like(self.D_fake))
            )

        # create summaries
        self.__create_summaries()

        # organize variables
        t_vars = tf.trainable_variables()
        self.d_vars = [var for var in t_vars if "d/" in var.name]
        self.g_vars = [var for var in t_vars if "g/" in var.name]

        # saver
        self.saver = tf.train.Saver()
项目:CharacterGAN    作者:liamb315    | 项目源码 | 文件源码
def train_generator(args, load_recent=True):
    '''Train the generator via classical approach'''
    logging.debug('Batcher...')
    batcher   = Batcher(args.data_dir, args.batch_size, args.seq_length)

    logging.debug('Vocabulary...')
    with open(os.path.join(args.save_dir_gen, 'config.pkl'), 'w') as f:
        cPickle.dump(args, f)
    with open(os.path.join(args.save_dir_gen, 'real_beer_vocab.pkl'), 'w') as f:
        cPickle.dump((batcher.chars, batcher.vocab), f)

    logging.debug('Creating generator...')
    generator = Generator(args, is_training = True)

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True)) as sess:
        tf.initialize_all_variables().run()
        saver = tf.train.Saver(tf.all_variables())

        if load_recent:
            ckpt = tf.train.get_checkpoint_state(args.save_dir_gen)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)

        for epoch in xrange(args.num_epochs):
            # Anneal learning rate
            new_lr = args.learning_rate * (args.decay_rate ** epoch)
            sess.run(tf.assign(generator.lr, new_lr))
            batcher.reset_batch_pointer()
            state = generator.initial_state.eval()

            for batch in xrange(batcher.num_batches):
                start = time.time()
                x, y  = batcher.next_batch()
                feed  = {generator.input_data: x, generator.targets: y, generator.initial_state: state}
                # train_loss, state, _ = sess.run([generator.cost, generator.final_state, generator.train_op], feed)
                train_loss, _ = sess.run([generator.cost, generator.train_op], feed)
                end   = time.time()

                print '{}/{} (epoch {}), train_loss = {:.3f}, time/batch = {:.3f}' \
                    .format(epoch * batcher.num_batches + batch,
                        args.num_epochs * batcher.num_batches,
                        epoch, train_loss, end - start)

                if (epoch * batcher.num_batches + batch) % args.save_every == 0:
                    checkpoint_path = os.path.join(args.save_dir_gen, 'model.ckpt')
                    saver.save(sess, checkpoint_path, global_step = epoch * batcher.num_batches + batch)
                    print 'Generator model saved to {}'.format(checkpoint_path)