Python tensorflow 模块,initialize_all_variables() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.initialize_all_variables()

项目:tf-image-interpreter    作者:ThoughtWorksInc    | 项目源码 | 文件源码
def test_vgg():
  vgg = Vgg16()
  image_tensor = tf.placeholder(tf.float32)
  with tf.Session() as sess:
    vgg.build(image_tensor)
    init = tf.initialize_all_variables()
    sess.run(init)

    load_feature_layer_params('/Users/dtong/code/data/tf-image-interpreter/pretrain/vgg16_weights.npz', sess)

    for v in tf.get_collection(tf.GraphKeys.VARIABLES):
      print_op = tf.Print(v, [v], message=v.name, first_n=10)
      sess.run(print_op)

    roidb = RoiDb('val.txt', 2007)
    batch_gen = BatchGenerator(roidb)

    for i in range(10):
      image, scale, bboxes = batch_gen.next_batch()

      print(sess.run(vgg.conv5_3, feed_dict={image_tensor: image}))
项目:tf-image-interpreter    作者:ThoughtWorksInc    | 项目源码 | 文件源码
def main():
  roidb = RoiDb('val.txt', 2007)
  batch_gen = BatchGenerator(roidb)

  image_tensor = tf.placeholder(dtype=tf.float32)
  scale_tensor = tf.placeholder(dtype=tf.float32)
  bboxes_tensor = tf.placeholder(dtype=tf.float32)
  p_op = tf.Print(image_tensor, [tf.shape(image_tensor), scale_tensor, bboxes_tensor])

  sess = tf.Session()
  init = tf.initialize_all_variables()
  sess.run(init)

  coord = tf.train.Coordinator()
  queue_threads = queue_runner.start_queue_runners(sess, coord=coord)

  for i in range(10):
    if coord.should_stop():
      break
    image, scale, bboxes = batch_gen.next_batch()

    sess.run([p_op], feed_dict={image_tensor: image, scale_tensor: scale, bboxes_tensor:bboxes})

  coord.request_stop()
  coord.join(queue_threads)
项目:tf-image-interpreter    作者:ThoughtWorksInc    | 项目源码 | 文件源码
def test_rpn():
  vgg = Vgg16()
  rpn = RpnNet()
  image_tensor = tf.placeholder(tf.float32)
  with tf.Session() as sess:
    vgg.build(image_tensor)
    rpn.build(vgg.conv5_3, None)
    init = tf.initialize_all_variables()
    sess.run(init)

    load_feature_layer_params('/Users/dtong/code/data/tf-image-interpreter/pretrain/vgg16_weights.npz', sess)

    roidb = RoiDb('val.txt', 2007)
    batch_gen = BatchGenerator(roidb)

    for i in range(10):
      image, scale, bboxes = batch_gen.next_batch()
      feature_shape = tf.shape(rpn.rpn_cls_score_reshape)
      print_feat_shape = tf.Print(feature_shape, [feature_shape], summarize=5)
      sess.run(print_feat_shape, feed_dict={image_tensor: image})

      # print(sess.run(vgg.conv5_3, feed_dict={image_tensor: image}))
项目:Seq2Seq-Tensorflow    作者:keon    | 项目源码 | 文件源码
def build_model(self):
        self.build_memory()

        self.W = tf.Variable(tf.random_normal([self.edim, self.nwords], stddev=self.init_std))
        z = tf.matmul(self.hid[-1], self.W)

        self.loss = tf.nn.softmax_cross_entropy_with_logits(z, self.target)

        self.lr = tf.Variable(self.current_lr)
        self.opt = tf.train.GradientDescentOptimizer(self.lr)

        params = [self.A, self.B, self.C, self.T_A, self.T_B, self.W]
        grads_and_vars = self.opt.compute_gradients(self.loss,params)
        clipped_grads_and_vars = [(tf.clip_by_norm(gv[0], self.max_grad_norm), gv[1]) \
                                   for gv in grads_and_vars]

        inc = self.global_step.assign_add(1)
        with tf.control_dependencies([inc]):
            self.optim = self.opt.apply_gradients(clipped_grads_and_vars)

        tf.initialize_all_variables().run()
        self.saver = tf.train.Saver()
项目:how_to_convert_text_to_images    作者:llSourcell    | 项目源码 | 文件源码
def build_model(self, sess):
        self.init_opt()
        sess.run(tf.initialize_all_variables())

        if len(self.model_path) > 0:
            print("Reading model parameters from %s" % self.model_path)
            restore_vars = tf.all_variables()
            # all_vars = tf.all_variables()
            # restore_vars = [var for var in all_vars if
            #                 var.name.startswith('g_') or
            #                 var.name.startswith('d_')]
            saver = tf.train.Saver(restore_vars)
            saver.restore(sess, self.model_path)

            istart = self.model_path.rfind('_') + 1
            iend = self.model_path.rfind('.')
            counter = self.model_path[istart:iend]
            counter = int(counter)
        else:
            print("Created model with fresh parameters.")
            counter = 0
        return counter
项目:how_to_convert_text_to_images    作者:llSourcell    | 项目源码 | 文件源码
def build_model(self, sess):
        self.init_opt()

        sess.run(tf.initialize_all_variables())
        if len(self.model_path) > 0:
            print("Reading model parameters from %s" % self.model_path)
            all_vars = tf.trainable_variables()
            # all_vars = tf.all_variables()
            restore_vars = []
            for var in all_vars:
                if var.name.startswith('g_') or var.name.startswith('d_'):
                    restore_vars.append(var)
                    # print(var.name)
            saver = tf.train.Saver(restore_vars)
            saver.restore(sess, self.model_path)

            istart = self.model_path.rfind('_') + 1
            iend = self.model_path.rfind('.')
            counter = self.model_path[istart:iend]
            counter = int(counter)
        else:
            print("Created model with fresh parameters.")
            counter = 0
        return counter
项目:lm    作者:rafaljozefowicz    | 项目源码 | 文件源码
def test_lm(self):
        hps = get_test_hparams()

        with tf.variable_scope("model"):
            model = LM(hps)

        with self.test_session() as sess:
            tf.initialize_all_variables().run()
            tf.initialize_local_variables().run()

            loss = 1e5
            for i in range(50):
                x, y, w = simple_data_generator(hps.batch_size, hps.num_steps)
                loss, _ = sess.run([model.loss, model.train_op], {model.x: x, model.y: y, model.w: w})
                print("%d: %.3f %.3f" % (i, loss, np.exp(loss)))
                if np.isnan(loss):
                    print("NaN detected")
                    break

            self.assertLess(loss, 1.0)
项目:Learning-to-navigate-without-a-map    作者:ToniRV    | 项目源码 | 文件源码
def __init__(self, sess, state_size, action_size,
                 batch_size, tau, learning_rate):
        """Init critic network."""
        self.sess = sess
        self.batch_size = batch_size
        self.tau = tau
        self.learning_rate = learning_rate
        self.action_size = action_size

        K.set_session(sess)

        self.model, self.action, self.state = \
            self.create_critic_network(state_size, action_size)
        self.target_model, self.target_action, self.target_state = \
            self.create_critic_network(state_size, action_size)
        self.action_grads = tf.gradients(self.model.output, self.action)
        self.sess.run(tf.initialize_all_variables())
项目:Supply-demand-forecasting    作者:LevinJ    | 项目源码 | 文件源码
def run_graph(self):
        logging.debug("computeGraph")
        with tf.Session(graph=self.graph) as sess:
            tf.initialize_all_variables().run()
            logging.debug("Initialized")
            for step in range(1, self.num_steps + 1):
                summary, _ , train_loss, train_metrics= sess.run([self.merged, self.train_step, self.loss, self.accuracy], feed_dict=self.feed_dict("train"))
                self.train_writer.add_summary(summary, step)

                if step % 100 == 0:
                    summary, validation_loss, validation_metrics = sess.run([self.merged, self.loss, self.accuracy], feed_dict=self.feed_dict("validation"))
                    self.test_writer.add_summary(summary, step)
#                     loss_train = sess.run(self.loss, feed_dict=self.feed_dict("validation_wholetrain"))
                    logging.info("Step {}/{}, train/test: {:.3f}/{:.3f}, train/test loss: {:.3f}/{:.3f}".format(step, self.num_steps, train_metrics, validation_metrics,\
                                                                                                                train_loss, validation_loss))
                    if self.get_stop_decisision(step, -validation_metrics):
                        logging.info("stop here due to early stopping")
                        return 

#                     y_pred = sess.run(self.y_pred, feed_dict=self.feed_dict("validation"))
#                     logging.info("validation mape :{:.3f}".format(mean_absolute_percentage_error(self.y_validation.reshape(-1), y_pred.reshape(-1))))
        return
项目:tensorflow-srgan    作者:olgaliak    | 项目源码 | 文件源码
def test_model(train_data):
    td = train_data

    summaries = tf.summary.merge_all()
    td.sess.run(tf.initialize_all_variables())
    start_time = time.time()

    batch = 0
    done = False

    while not done:
        print("Processing batch {0}".format(batch))
        test_feature, test_label = td.sess.run([td.features, td.labels])

        # Show progress with test features
        feed_dict = {td.gene_minput: test_feature}
        gene_output = td.sess.run(td.gene_moutput, feed_dict=feed_dict)
        _save_output(td, test_feature, test_label, gene_output, batch, 'out')
        batch += 1

    print('Finished testing!')
项目:cloudml-samples    作者:GoogleCloudPlatform    | 项目源码 | 文件源码
def export(self, last_checkpoint, output_dir):
    """Builds a prediction graph and xports the model.

    Args:
      last_checkpoint: The latest checkpoint from training.
      output_dir: Path to the folder to be used to output the model.
    """
    logging.info('Exporting prediction graph to %s', output_dir)
    with tf.Session(graph=tf.Graph()) as sess:
      # Build and save prediction meta graph and trained variable values.
      self.build_prediction_graph()
      # Remove this if once Tensorflow 0.12 is standard.
      try:
        init_op = tf.global_variables_initializer()
      except AttributeError:
        init_op = tf.initialize_all_variables()
      sess.run(init_op)
      trained_saver = tf.train.Saver()
      trained_saver.restore(sess, last_checkpoint)
      saver = tf.train.Saver()
      saver.export_meta_graph(filename=os.path.join(output_dir, 'export.meta'))
      saver.save(
          sess, os.path.join(output_dir, 'export'), write_meta_graph=False)
项目:tf-translate    作者:chrislit    | 项目源码 | 文件源码
def self_test():
  """Test the translation model."""
  with tf.Session() as sess:
    print("Self-test for neural translation model.")
    # Create model with vocabularies of 10, 2 small buckets, 2 layers of 32.
    model = seq2seq_model.Seq2SeqModel(10, 10, [(3, 3), (6, 6)], 32, 2,
                                       5.0, 32, 0.3, 0.99, num_samples=8)
    sess.run(tf.initialize_all_variables())

    # Fake data set for both the (3, 3) and (6, 6) bucket.
    data_set = ([([1, 1], [2, 2]), ([3, 3], [4]), ([5], [6])],
                [([1, 1, 1, 1, 1], [2, 2, 2, 2, 2]), ([3, 3, 3], [5, 6])])
    for _ in xrange(5):  # Train the fake model for 5 steps.
      bucket_id = random.choice([0, 1])
      encoder_inputs, decoder_inputs, target_weights = model.get_batch(
          data_set, bucket_id)
      model.step(sess, encoder_inputs, decoder_inputs, target_weights,
                 bucket_id, False)
项目:3d-DenseNet    作者:frankgu    | 项目源码 | 文件源码
def _initialize_session(self):
    """Initialize session, variables, saver"""
    config = tf.ConfigProto()
    # restrict model GPU memory utilization to min required
    config.gpu_options.allow_growth = True
    self.sess = tf.Session(config=config)
    tf_ver = int(tf.__version__.split('.')[1])
    if TF_VERSION <= 0.10:
      self.sess.run(tf.initialize_all_variables())
      logswriter = tf.train.SummaryWriter
    else:
      self.sess.run(tf.global_variables_initializer())
      logswriter = tf.summary.FileWriter
    self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=0)
    self.summary_writer = logswriter(self.logs_path, self.sess.graph)

  # (Updated)
项目:PyTorchDemystified    作者:hhsecond    | 项目源码 | 文件源码
def __initialize(self):
        sess = tf.Session()
        loss = tf.Variable(0.0, name="loss", trainable=False)
        acc = tf.Variable(0.0, name="accuracy", trainable=False)
        loss_summary = tf.summary.scalar("loss", loss)
        acc_summary = tf.summary.scalar("accuracy", acc)
        summary_op = tf.summary.merge([loss_summary, acc_summary])
        summary_writer = tf.summary.FileWriter(self.summary_dir, sess.graph)
        tf.train.Saver(tf.all_variables())
        sess.run(tf.initialize_all_variables())

        self.sess = sess
        self.summary_op = summary_op
        self.summary_writer = summary_writer
        self.loss = loss
        self.acc = acc
项目:OpenAI_Challenges    作者:AlwaysLearningDeeper    | 项目源码 | 文件源码
def train_neural_network(x):
    prediction = convolutional_neural_network(x)
    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(prediction, y))
    optimizer = tf.train.AdamOptimizer().minimize(cost)

    hm_epochs = 10
    with tf.Session() as sess:
        sess.run(tf.initialize_all_variables())

        for epoch in range(hm_epochs):
            epoch_loss = 0
            for _ in range(int(mnist.train.num_examples / batch_size)):
                epoch_x, epoch_y = mnist.train.next_batch(batch_size)
                _, c = sess.run([optimizer, cost], feed_dict={x: epoch_x, y: epoch_y})
                epoch_loss += c

            print('Epoch', epoch, 'completed out of', hm_epochs, 'loss:', epoch_loss)

        correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))

        accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
        print('Accuracy:', accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))
项目:isbi2017-part3    作者:learningtitans    | 项目源码 | 文件源码
def testUnknownImageShape(self):
    tf.reset_default_graph()
    batch_size = 2
    height, width = 224, 224
    num_classes = 1000
    input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
    with self.test_session() as sess:
      inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3))
      logits, end_points = inception.inception_v2(inputs, num_classes)
      self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))
      self.assertListEqual(logits.get_shape().as_list(),
                           [batch_size, num_classes])
      pre_pool = end_points['Mixed_5c']
      feed_dict = {inputs: input_np}
      tf.initialize_all_variables().run()
      pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
      self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])
项目:isbi2017-part3    作者:learningtitans    | 项目源码 | 文件源码
def testTrainEvalWithReuse(self):
    train_batch_size = 5
    eval_batch_size = 2
    height, width = 150, 150
    num_classes = 1000

    train_inputs = tf.random_uniform((train_batch_size, height, width, 3))
    inception.inception_v2(train_inputs, num_classes)
    eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3))
    logits, _ = inception.inception_v2(eval_inputs, num_classes, reuse=True)
    predictions = tf.argmax(logits, 1)

    with self.test_session() as sess:
      sess.run(tf.initialize_all_variables())
      output = sess.run(predictions)
      self.assertEquals(output.shape, (eval_batch_size,))
项目:isbi2017-part3    作者:learningtitans    | 项目源码 | 文件源码
def testUnknownBatchSize(self):
    batch = 2
    height, width = 65, 65
    global_pool = True
    num_classes = 10
    inputs = create_test_input(None, height, width, 3)
    with slim.arg_scope(resnet_utils.resnet_arg_scope()):
      logits, _ = self._resnet_small(inputs, num_classes,
                                     global_pool=global_pool,
                                     scope='resnet')
    self.assertTrue(logits.op.name.startswith('resnet/logits'))
    self.assertListEqual(logits.get_shape().as_list(),
                         [None, 1, 1, num_classes])
    images = create_test_input(batch, height, width, 3)
    with self.test_session() as sess:
      sess.run(tf.initialize_all_variables())
      output = sess.run(logits, {inputs: images.eval()})
      self.assertEqual(output.shape, (batch, 1, 1, num_classes))
项目:isbi2017-part3    作者:learningtitans    | 项目源码 | 文件源码
def testAtrousFullyConvolutionalUnknownHeightWidth(self):
    batch = 2
    height, width = 65, 65
    global_pool = False
    output_stride = 8
    inputs = create_test_input(batch, None, None, 3)
    with slim.arg_scope(resnet_utils.resnet_arg_scope()):
      output, _ = self._resnet_small(inputs,
                                     None,
                                     global_pool=global_pool,
                                     output_stride=output_stride)
    self.assertListEqual(output.get_shape().as_list(),
                         [batch, None, None, 32])
    images = create_test_input(batch, height, width, 3)
    with self.test_session() as sess:
      sess.run(tf.initialize_all_variables())
      output = sess.run(output, {inputs: images.eval()})
      self.assertEqual(output.shape, (batch, 9, 9, 32))
项目:isbi2017-part3    作者:learningtitans    | 项目源码 | 文件源码
def testAtrousFullyConvolutionalUnknownHeightWidth(self):
    batch = 2
    height, width = 65, 65
    global_pool = False
    output_stride = 8
    inputs = create_test_input(batch, None, None, 3)
    with slim.arg_scope(resnet_utils.resnet_arg_scope()):
      output, _ = self._resnet_small(inputs,
                                     None,
                                     global_pool=global_pool,
                                     output_stride=output_stride)
    self.assertListEqual(output.get_shape().as_list(),
                         [batch, None, None, 32])
    images = create_test_input(batch, height, width, 3)
    with self.test_session() as sess:
      sess.run(tf.initialize_all_variables())
      output = sess.run(output, {inputs: images.eval()})
      self.assertEqual(output.shape, (batch, 9, 9, 32))
项目:isbi2017-part3    作者:learningtitans    | 项目源码 | 文件源码
def testUnknownImageShape(self):
    tf.reset_default_graph()
    batch_size = 2
    height, width = 299, 299
    num_classes = 1000
    input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
    with self.test_session() as sess:
      inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3))
      logits, end_points = inception.inception_v3(inputs, num_classes)
      self.assertListEqual(logits.get_shape().as_list(),
                           [batch_size, num_classes])
      pre_pool = end_points['Mixed_7c']
      feed_dict = {inputs: input_np}
      tf.initialize_all_variables().run()
      pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
      self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 8, 2048])
项目:isbi2017-part3    作者:learningtitans    | 项目源码 | 文件源码
def testUnknowBatchSize(self):
    batch_size = 1
    height, width = 299, 299
    num_classes = 1000

    inputs = tf.placeholder(tf.float32, (None, height, width, 3))
    logits, _ = inception.inception_v3(inputs, num_classes)
    self.assertTrue(logits.op.name.startswith('InceptionV3/Logits'))
    self.assertListEqual(logits.get_shape().as_list(),
                         [None, num_classes])
    images = tf.random_uniform((batch_size, height, width, 3))

    with self.test_session() as sess:
      sess.run(tf.initialize_all_variables())
      output = sess.run(logits, {inputs: images.eval()})
      self.assertEquals(output.shape, (batch_size, num_classes))
项目:isbi2017-part3    作者:learningtitans    | 项目源码 | 文件源码
def testTrainEvalWithReuse(self):
    train_batch_size = 5
    eval_batch_size = 2
    height, width = 150, 150
    num_classes = 1000
    with self.test_session() as sess:
      train_inputs = tf.random_uniform((train_batch_size, height, width, 3))
      inception.inception_v4(train_inputs, num_classes)
      eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3))
      logits, _ = inception.inception_v4(eval_inputs,
                                         num_classes,
                                         is_training=False,
                                         reuse=True)
      predictions = tf.argmax(logits, 1)
      sess.run(tf.initialize_all_variables())
      output = sess.run(predictions)
      self.assertEquals(output.shape, (eval_batch_size,))
项目:isbi2017-part3    作者:learningtitans    | 项目源码 | 文件源码
def testUnknownImageShape(self):
    tf.reset_default_graph()
    batch_size = 2
    height, width = 224, 224
    num_classes = 1000
    input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
    with self.test_session() as sess:
      inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3))
      logits, end_points = inception.inception_v1(inputs, num_classes)
      self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
      self.assertListEqual(logits.get_shape().as_list(),
                           [batch_size, num_classes])
      pre_pool = end_points['Mixed_5c']
      feed_dict = {inputs: input_np}
      tf.initialize_all_variables().run()
      pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
      self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])
项目:isbi2017-part3    作者:learningtitans    | 项目源码 | 文件源码
def testTrainEvalWithReuse(self):
    train_batch_size = 5
    eval_batch_size = 2
    height, width = 224, 224
    num_classes = 1000

    train_inputs = tf.random_uniform((train_batch_size, height, width, 3))
    inception.inception_v1(train_inputs, num_classes)
    eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3))
    logits, _ = inception.inception_v1(eval_inputs, num_classes, reuse=True)
    predictions = tf.argmax(logits, 1)

    with self.test_session() as sess:
      sess.run(tf.initialize_all_variables())
      output = sess.run(predictions)
      self.assertEquals(output.shape, (eval_batch_size,))
项目:generating_sequences    作者:PFCM    | 项目源码 | 文件源码
def test_argmax_and_embed():
    """Ensure argmax_and_embed works without projection"""
    embedding = tf.get_variable('embedding', [3, 20])
    data = tf.get_variable('input', initializer=np.array([[1., 2., 1.]]))

    loop_fn = helpers.argmax_and_embed(embedding, output_projection=None)
    correct = tf.nn.embedding_lookup(embedding, [1])

    result = loop_fn(data, 0)

    # get ready to see if it's right
    sess = tf.get_default_session()
    sess.run(tf.initialize_all_variables())

    a, b = sess.run([result, correct])

    assert np.all(a == b)
项目:generating_sequences    作者:PFCM    | 项目源码 | 文件源码
def test_sample_and_embed():
    """Ensure sample_and_embed works without projection"""
    embedding = tf.get_variable('embedding', [3, 20])
    data = tf.get_variable('input', initializer=np.array([[1., 2., 1.]]))

    loop_fn = helpers.sample_and_embed(embedding, 1., output_projection=None)
    result = loop_fn(data, 0)

    # get ready to see if does indeed pick out one item
    sess = tf.get_default_session()
    sess.run(tf.initialize_all_variables())

    a, embed_mat = sess.run([result, embedding])

    found = False
    for row in embed_mat:
        if np.all(row == a):
            found = True

    assert found
项目:generating_sequences    作者:PFCM    | 项目源码 | 文件源码
def test_argmax_and_embed_with_projection():
    """Ensure argmax_and_embed works with projection"""
    embedding = tf.get_variable('embedding', [10, 11])
    proj = (tf.get_variable('weights', [3, 10]),
            tf.get_variable('biases', [10]))
    data = tf.get_variable('input', initializer=np.array([[1., 2., 1.]],
                                                         dtype=np.float32))
    loop_fn = helpers.argmax_and_embed(embedding, output_projection=proj)

    # we don't know what the correct answer is now because it's randomly
    # projected, so let's get what we need to do it by hand
    correct_projection = tf.nn.bias_add(tf.matmul(data, proj[0]), proj[1])

    result = loop_fn(data, 0)

    # get ready to see if it's right
    sess = tf.get_default_session()
    sess.run(tf.initialize_all_variables())

    a, embedding, projection = sess.run(
        [result, embedding, correct_projection])

    argmax_p = np.argmax(projection)

    assert np.all(embedding[argmax_p] == a)
项目:generating_sequences    作者:PFCM    | 项目源码 | 文件源码
def test_sample_and_embed_with_projection():
    """Ensure sample_and_embed works with projection"""
    embedding = tf.get_variable('embedding', [10, 11])
    proj = (tf.get_variable('weights', [3, 10]),
            tf.get_variable('biases', [10]))
    data = tf.get_variable('input', initializer=np.array([[1., 2., 1.]],
                                                         dtype=np.float32))

    loop_fn = helpers.sample_and_embed(embedding, 1., output_projection=proj)
    result = loop_fn(data, 0)

    # get ready to see if does indeed pick out one item
    sess = tf.get_default_session()
    sess.run(tf.initialize_all_variables())

    a, embed_mat = sess.run([result, embedding])

    found = False
    for row in embed_mat:
        if np.all(row == a):
            found = True

    assert found
项目:tf-seq2seq-mod    作者:knok    | 项目源码 | 文件源码
def create_model(session, forward_only):
  """Create translation model and initialize or load parameters in session."""
  dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
  model = seq2seq_model.Seq2SeqModel(
      FLAGS.en_vocab_size,
      FLAGS.fr_vocab_size,
      _buckets,
      FLAGS.size,
      FLAGS.num_layers,
      FLAGS.max_gradient_norm,
      FLAGS.batch_size,
      FLAGS.learning_rate,
      FLAGS.learning_rate_decay_factor,
      use_lstm = FLAGS.use_lstm,
      forward_only=forward_only,
      dtype=dtype)
  ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
  if ckpt and tf.gfile.Exists(ckpt.model_checkpoint_path):
    print("Reading model parameters from %s" % ckpt.model_checkpoint_path)
    model.saver.restore(session, ckpt.model_checkpoint_path)
  else:
    print("Created model with fresh parameters.")
    session.run(tf.initialize_all_variables())
  return model
项目:tf-seq2seq-mod    作者:knok    | 项目源码 | 文件源码
def self_test():
  """Test the translation model."""
  with tf.Session() as sess:
    print("Self-test for neural translation model.")
    # Create model with vocabularies of 10, 2 small buckets, 2 layers of 32.
    model = seq2seq_model.Seq2SeqModel(10, 10, [(3, 3), (6, 6)], 32, 2,
                                       5.0, 32, 0.3, 0.99, num_samples=8)
    sess.run(tf.initialize_all_variables())

    # Fake data set for both the (3, 3) and (6, 6) bucket.
    data_set = ([([1, 1], [2, 2]), ([3, 3], [4]), ([5], [6])],
                [([1, 1, 1, 1, 1], [2, 2, 2, 2, 2]), ([3, 3, 3], [5, 6])])
    for _ in xrange(5):  # Train the fake model for 5 steps.
      bucket_id = random.choice([0, 1])
      encoder_inputs, decoder_inputs, target_weights = model.get_batch(
          data_set, bucket_id)
      model.step(sess, encoder_inputs, decoder_inputs, target_weights,
                 bucket_id, False)
项目:-NIPS-2017-Learning-to-Run    作者:kyleliang919    | 项目源码 | 文件源码
def __init__(self,sess,state_dim,action_dim):
        self.time_step = 0
        self.sess = sess
        # create q network
        self.state_input,\
        self.action_input,\
        self.q_value_output,\
        self.net = self.create_q_network(state_dim,action_dim)

        # create target q network (the same structure with q network)
        self.target_state_input,\
        self.target_action_input,\
        self.target_q_value_output,\
        self.target_update = self.create_target_q_network(state_dim,action_dim,self.net)

        self.create_training_method()

        # initialization
        self.sess.run(tf.initialize_all_variables())

        self.update_target()
项目:-NIPS-2017-Learning-to-Run    作者:kyleliang919    | 项目源码 | 文件源码
def __init__(self,sess,state_dim,action_dim):

        self.sess = sess
        self.state_dim = state_dim
        self.action_dim = action_dim
        # create actor network
        self.state_input,self.action_output,self.net = self.create_network(state_dim,action_dim)

        # create target actor network
        self.target_state_input,self.target_action_output,self.target_update,self.target_net = self.create_target_network(state_dim,action_dim,self.net)

        # define training rules
        self.create_training_method()

        self.sess.run(tf.initialize_all_variables())

        self.update_target()
        #self.load_network()
项目:-NIPS-2017-Learning-to-Run    作者:kyleliang919    | 项目源码 | 文件源码
def create_net(self, shape):
        hidden_size = 64
        print(shape)
        self.x = tf.placeholder(tf.float32, shape=[None, shape], name="x")
        self.y = tf.placeholder(tf.float32, shape=[None], name="y")

        weight_init = tf.random_uniform_initializer(-0.05, 0.05)
        bias_init = tf.constant_initializer(0)

        with tf.variable_scope("VF"):
            h1 = tf.nn.relu(fully_connected(self.x, shape, hidden_size, weight_init, bias_init, "h1"))
            h2 = tf.nn.relu(fully_connected(h1, hidden_size, hidden_size, weight_init, bias_init, "h2"))
            h3 = fully_connected(h2, hidden_size, 1, weight_init, bias_init, "h3")
        self.net = tf.reshape(h3, (-1,))
        l2 = tf.nn.l2_loss(self.net - self.y)
        self.train = tf.train.AdamOptimizer().minimize(l2)
        self.session.run(tf.initialize_all_variables())
项目:-NIPS-2017-Learning-to-Run    作者:kyleliang919    | 项目源码 | 文件源码
def __init__(self,sess,state_dim,action_dim):
        self.time_step = 0
        self.sess = sess
        # create q network
        self.state_input,\
        self.action_input,\
        self.q_value_output,\
        self.net = self.create_q_network(state_dim,action_dim,"cbeh")

        # create target q network (the same structure with q network)
        self.target_state_input,\
        self.target_action_input,\
        self.target_q_value_output,\
        self.target_update = self.create_target_q_network(state_dim,action_dim,self.net,"ctare")

        self.create_training_method()

        # initialization
        self.sess.run(tf.initialize_all_variables())

        self.update_target()
项目:-NIPS-2017-Learning-to-Run    作者:kyleliang919    | 项目源码 | 文件源码
def __init__(self,sess,state_dim,action_dim):

        self.sess = sess
        self.state_dim = state_dim
        self.action_dim = action_dim
        # create actor network

        self.state_input,self.action_output,self.net = self.create_network(state_dim,action_dim,"beh")

        # create target actor network
        self.target_state_input,self.target_action_output,self.target_update,self.target_net = self.create_target_network(state_dim,action_dim,self.net)

        # define training rules
        self.create_training_method()

        self.sess.run(tf.initialize_all_variables())

        self.update_target()
        #self.load_network()
项目:hyperchamber    作者:255BITS    | 项目源码 | 文件源码
def __init__(self, network_architecture, transfer_fct=tf.nn.softplus, 
                 learning_rate=0.001, batch_size=100):
        self.network_architecture = network_architecture
        self.transfer_fct = transfer_fct
        self.learning_rate = learning_rate
        self.batch_size = batch_size

        # tf Graph input
        self.x = tf.placeholder(tf.float32, [None, network_architecture["n_input"]])

        # Create autoencoder network
        self._create_network()
        # Define loss function based variational upper-bound and 
        # corresponding optimizer
        self._create_loss_optimizer()

        # Initializing the tensor flow variables
        init = tf.initialize_all_variables()

        # Launch the session
        self.sess = tf.InteractiveSession()
        self.sess.run(init)
项目:deep-news-summarization    作者:hengluchang    | 项目源码 | 文件源码
def create_model(session, forward_only):

  """Create model and initialize or load parameters"""
  model = seq2seq_model.Seq2SeqModel( gConfig['enc_vocab_size'], gConfig['dec_vocab_size'], _buckets, gConfig['hidden_units'], gConfig['num_layers'], gConfig['max_gradient_norm'], gConfig['batch_size'], gConfig['learning_rate'], gConfig['learning_rate_decay_factor'], forward_only=forward_only)

  if 'pretrained_model' in gConfig:
      model.saver.restore(session,gConfig['pretrained_model'])
      return model

  ckpt = tf.train.get_checkpoint_state(gConfig['working_directory'])
  if ckpt and tf.gfile.Exists(ckpt.model_checkpoint_path):
    print("Reading model parameters from %s" % ckpt.model_checkpoint_path)
    model.saver.restore(session, ckpt.model_checkpoint_path)
  else:
    print("Created model with fresh parameters.")
    session.run(tf.initialize_all_variables())
  return model
项目:automatic-portrait-tf    作者:Corea    | 项目源码 | 文件源码
def main():
    model_filename = '../fcn8s-heavy-pascal.mat'
    input_image_filename = '../cat.jpg'

    caffe_mat = np.load(model_filename)
    image = build_image(input_image_filename)
    net = build_fcn8s(caffe_mat, image)
    feed_dict = {
        net['input']: image
    }

    with tf.Session() as sess:
        sess.run(tf.initialize_all_variables())
        result = sess.run(tf.argmax(net['score'], dimension=3),
                          feed_dict=feed_dict)

    save_image(result)
项目:probabilistic_line_search    作者:ProbabilisticNumerics    | 项目源码 | 文件源码
def setUp(self):    
    # Set up model
    tf.reset_default_graph()
    X = tf.placeholder(tf.float32, shape=[None, 784])
    y = tf.placeholder(tf.float32, shape=[None, 10])
    W_fc1 = weight_variable([784, 1024])
    b_fc1 = bias_variable([1024])
    h_fc1 = tf.nn.relu(tf.matmul(X, W_fc1) + b_fc1)
    W_fc2 = weight_variable([1024, 10])
    b_fc2 = bias_variable([10])
    h_fc2 = tf.nn.softmax(tf.matmul(h_fc1, W_fc2) + b_fc2)
    losses = -tf.reduce_sum(y*tf.log(h_fc2), reduction_indices=[1])

    self.loss = tf.reduce_mean(losses)
    self.batch_size = tf.cast(tf.gather(tf.shape(losses), 0), tf.float32)
    self.var_list = [W_fc1, b_fc1, W_fc2, b_fc2]
    self.X = X
    self.y = y

    self.sess = tf.Session()
    self.sess.run(tf.initialize_all_variables())

    self.mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
项目:variational-text-tensorflow    作者:carpedm20    | 项目源码 | 文件源码
def train(self, config):
    start_time = time.time()

    merged_sum = tf.merge_all_summaries()
    writer = tf.train.SummaryWriter("./logs", self.sess.graph_def)

    tf.initialize_all_variables().run()
    self.load(self.checkpoint_dir)

    for epoch in range(self.epoch):
      epoch_loss = 0.

      for idx, x in enumerate(self.reader.next_batch()):
        _, loss, e_loss, g_loss, summary_str = self.sess.run(
            [self.optim, self.loss, self.e_loss, self.g_loss, merged_sum], feed_dict={self.x: x})

        epoch_loss += loss
        if idx % 10 == 0:
          print("Epoch: [%2d] [%4d/%4d] time: %4.4f, loss: %.8f, e_loss: %.8f, g_loss: %.8f" \
              % (epoch, idx, self.reader.batch_cnt, time.time() - start_time, loss, e_loss, g_loss))

        if idx % 2 == 0:
          writer.add_summary(summary_str, step)

        if idx != 0 and idx % 1000 == 0:
          self.save(self.checkpoint_dir, step)
项目:variational-text-tensorflow    作者:carpedm20    | 项目源码 | 文件源码
def initialize(self, log_dir="./logs"):
    self.merged_sum = tf.merge_all_summaries()
    self.writer = tf.train.SummaryWriter(log_dir, self.sess.graph_def)

    tf.initialize_all_variables().run()
    self.load(self.checkpoint_dir)

    start_iter = self.step.eval()
项目:visual-search    作者:GYXie    | 项目源码 | 文件源码
def extract_feature(imgs):
    x, fc6 = initModel()
    # init = tf.initialize_all_variables()
    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)
    return sess.run(fc6, feed_dict={x: imgs})
项目:tf_rnnlm    作者:Ubiqus    | 项目源码 | 文件源码
def _run(self):
    m, mvalid, mtest = self.train_model, self.validation_model, self.test_model
    config = self.config
    data = self.data
    params = self.params

    init_op = tf.initialize_all_variables()
    with tf.Session() as session:
      session.run(init_op)

      print("Starting training from epoch %d using %s loss" % (config.epoch, m.loss_fct))

      while config.epoch <= config.max_max_epoch:
        i = config.epoch
        lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0)
        m.assign_lr(session, config.learning_rate * lr_decay)

        print("\nEpoch: %d Learning rate: %.3f" % (i, session.run(m.lr)))
        train_perplexity = run_epoch(session, m,
          data.train,
          eval_op=m.train_op,
          verbose=True,
          opIO=self.io,
          log_rate=params.log_rate,
          save_rate=params.save_rate)
        print("Epoch: %d Train Perplexity: %.3f" % (i, train_perplexity))

        print("Validation using %s loss" % mvalid.loss_fct)
        valid_perplexity = run_epoch(session, mvalid, data.valid)
        print("Epoch: %d Valid Perplexity: %.3f" % (i, valid_perplexity))

        config.step = 0
        config.epoch += 1
        config.save()

        self.io.save_checkpoint(session, "ep_%d.ckpt" % config.epoch)
项目:encore.ai    作者:dyelax    | 项目源码 | 文件源码
def save(artist, model_path, num_save):
    sample_save_dir = c.get_dir('../save/samples/')
    sess = tf.Session()

    print artist

    data_reader = DataReader(artist)
    vocab = data_reader.get_vocab()

    print 'Init model...'
    model = LSTMModel(sess,
                      vocab,
                      c.BATCH_SIZE,
                      c.SEQ_LEN,
                      c.CELL_SIZE,
                      c.NUM_LAYERS,
                      test=True)

    saver = tf.train.Saver()
    sess.run(tf.initialize_all_variables())

    saver.restore(sess, model_path)
    print 'Model restored from ' + model_path

    artist_save_dir = c.get_dir(join(sample_save_dir, artist))
    for i in xrange(num_save):
        print i

        path = join(artist_save_dir, str(i) + '.txt')
        sample = model.generate()
        processed_sample = process_sample(sample)

        with open(path, 'w') as f:
            f.write(processed_sample)
项目:ddpg-aigym    作者:stevenpjg    | 项目源码 | 文件源码
def __init__(self,num_states,num_actions):
        self.g=tf.Graph()
        with self.g.as_default():
            self.sess = tf.InteractiveSession()


            #actor network model parameters:
            self.W1_a, self.B1_a, self.W2_a, self.B2_a, self.W3_a, self.B3_a,\
            self.actor_state_in, self.actor_model = self.create_actor_net(num_states, num_actions)


            #target actor network model parameters:
            self.t_W1_a, self.t_B1_a, self.t_W2_a, self.t_B2_a, self.t_W3_a, self.t_B3_a,\
            self.t_actor_state_in, self.t_actor_model = self.create_actor_net(num_states, num_actions)

            #cost of actor network:
            self.q_gradient_input = tf.placeholder("float",[None,num_actions]) #gets input from action_gradient computed in critic network file
            self.actor_parameters = [self.W1_a, self.B1_a, self.W2_a, self.B2_a, self.W3_a, self.B3_a]
            self.parameters_gradients = tf.gradients(self.actor_model,self.actor_parameters,-self.q_gradient_input)#/BATCH_SIZE) 
            self.optimizer = tf.train.AdamOptimizer(LEARNING_RATE).apply_gradients(zip(self.parameters_gradients,self.actor_parameters))  
            #initialize all tensor variable parameters:
            self.sess.run(tf.initialize_all_variables())    

            #To make sure actor and target have same intial parmameters copy the parameters:
            # copy target parameters
            self.sess.run([
                self.t_W1_a.assign(self.W1_a),
                self.t_B1_a.assign(self.B1_a),
                self.t_W2_a.assign(self.W2_a),
                self.t_B2_a.assign(self.B2_a),
                self.t_W3_a.assign(self.W3_a),
                self.t_B3_a.assign(self.B3_a)])

            self.update_target_actor_op = [
                self.t_W1_a.assign(TAU*self.W1_a+(1-TAU)*self.t_W1_a),
                self.t_B1_a.assign(TAU*self.B1_a+(1-TAU)*self.t_B1_a),
                self.t_W2_a.assign(TAU*self.W2_a+(1-TAU)*self.t_W2_a),
                self.t_B2_a.assign(TAU*self.B2_a+(1-TAU)*self.t_B2_a),
                self.t_W3_a.assign(TAU*self.W3_a+(1-TAU)*self.t_W3_a),
                self.t_B3_a.assign(TAU*self.B3_a+(1-TAU)*self.t_B3_a)]
项目:lstm-poetry    作者:dvictor    | 项目源码 | 文件源码
def train():
    cleanup.cleanup()
    c.save(c.work_dir)

    data_loader = TextLoader(c.work_dir, c.batch_size, c.seq_length)
    with open(os.path.join(c.work_dir, 'chars_vocab.pkl'), 'wb') as f:
        cPickle.dump((data_loader.chars, data_loader.vocab), f)

    model = Model(c.rnn_size, c.num_layers, len(data_loader.chars), c.grad_clip, c.batch_size, c.seq_length)

    with tf.Session() as sess:
        tf.initialize_all_variables().run()
        saver = tf.train.Saver(tf.all_variables())
        for e in range(c.num_epochs):
            sess.run(tf.assign(model.lr, c.learning_rate * (c.decay_rate ** e)))
            data_loader.reset_batch_pointer()
            state = model.initial_state.eval()
            for b in range(data_loader.num_batches):
                start = time.time()
                x, y = data_loader.next_batch()
                feed = {model.input_data: x, model.targets: y, model.initial_state: state}
                train_loss, state, _ = sess.run([model.cost, model.final_state, model.train_op], feed)
                end = time.time()
                print("{}/{} (epoch {}), train_loss = {:.3f}, time/batch = {:.3f}"
                    .format(e * data_loader.num_batches + b,
                            c.num_epochs * data_loader.num_batches,
                            e, train_loss, end - start))
                if (e * data_loader.num_batches + b) % c.save_every == 0:
                    checkpoint_path = os.path.join(c.work_dir, 'model.ckpt')
                    saver.save(sess, checkpoint_path, global_step=e * data_loader.num_batches + b)
                    print("model saved to {}".format(checkpoint_path))
项目:rlflow    作者:tpbarron    | 项目源码 | 文件源码
def __init__(self,
                 env,
                 policy,
                 episode_len=100,
                 discount=False,
                 optimizer='sgd'):

        raise NotImplementedError

        self.env = env
        self.policy = policy
        self.episode_len = episode_len
        self.discount = discount

        self.states = tf.placeholder(tf.float32, shape=(None, 4))
        self.actions = tf.placeholder(tf.float32, shape=(None, 2))
        self.rewards = tf.placeholder(tf.float32, shape=(None))
        self.probs = self.policy.model(self.states)

        self.action_probs = tf.mul(self.probs, self.actions)
        self.reduced_action_probs = tf.reduce_sum(self.action_probs, reduction_indices=[1])
        self.logprobs = tf.log(self.reduced_action_probs)
        self.eligibility = self.logprobs * self.rewards
        self.L = -tf.reduce_sum(self.eligibility)

        # fisher matrix
        self.F = tf.mul(self.logprobs, tf.transpose(self.logprobs))



        # TODO: gen optimizer based on param
        self.opt = tf.train.AdamOptimizer(0.005).minimize(self.L)

        # do gradient update separately so do apply custom function to gradients?
        # self.grads_and_vars = self.opt.compute_gradients(self.L)
        # self.apply_grads = self.opt.apply_gradients(self.grads_and_vars)

        self.sess = tf.Session()
        self.sess.run(tf.initialize_all_variables())
项目:TF-Genetic    作者:thepropterhoc    | 项目源码 | 文件源码
def __init__(self, layerDimensions=[], netDimensions=[], validActivationFunctions=[]):

        self.layerDimensions = layerDimensions

        self.x = tf.placeholder(tf.float32, [None, netDimensions[0]])
        previousActivation = self.x

        for idx in range(len(layerDimensions)):
            currentLayer = layerDimensions[idx]
            thisActivation = None
            for functionIndex in range(len(currentLayer)):
                inDim, outDim = currentLayer[functionIndex]
                thisW = tf.Variable(tf.random_normal([inDim, outDim]))
                thisB = tf.Variable(tf.random_normal([outDim]))
                thisFunction = validActivationFunctions[functionIndex]
                newTensor = thisFunction(tf.matmul(previousActivation, thisW) + thisB)
                thisActivation = newTensor if thisActivation is None else tf.concat(1, [thisActivation, newTensor])

            previousActivation = thisActivation

        self.predictedOutput = previousActivation
        self.y_ = tf.placeholder(tf.float32, [None, netDimensions[-1]])
        cross_entropy = tf.reduce_mean(tf.square(self.predictedOutput - self.y_))
        self.train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

        init = tf.initialize_all_variables()
        self.sess = tf.Session(config=tf.ConfigProto(
            inter_op_parallelism_threads=4,
                        intra_op_parallelism_threads=4
        ))
        self.sess.run(init)
项目:CNN-MNIST    作者:m516825    | 项目源码 | 文件源码
def train(self):

        data = Data(self.train_dat, self.train_lab)
        batch_num = self.length/self.batch_size if self.length%self.batch_size == 0 else self.length/self.batch_size + 1

        model = self.add_model()

        with self.sess as sess:

            tf.initialize_all_variables().run()

            for ite in range(self.iterations):
                print "Iteration {}".format(ite)
                cost = 0.
                pbar = pb.ProgressBar(widgets=[pb.Percentage(), pb.Bar(), pb.ETA()], maxval=batch_num).start()
                for i in range(batch_num):
                    batch_x, batch_y = data.next_batch(self.batch_size)

                    c, _ = self.sess.run([model['loss'], model['optimizer']], feed_dict={model['train_x']:batch_x, model['train_y']:batch_y, model['p_keep_dens']:0.75})

                    cost += c / batch_num
                    pbar.update(i+1)
                pbar.finish()

                print ">>cost: {}".format(cost)

                t_acc, d_acc = self.eval(model, 3000)
                # early stop
                if t_acc >= 0.995 and d_acc >= 0.995:
                    break

            self.predict(model)