Python tensorflow 模块,no_op() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.no_op()

项目:tfplus    作者:renmengye    | 项目源码 | 文件源码
def build_all(self, param_avg=False):
        """Build all nodes."""
        if self._has_built_all:
            raise Exception('Only call build_all or build_eval once.')
        self._has_built_all = True
        with tf.device(self.get_device_fn()):
            with tf.variable_scope(self.name):
                inp_var = self.build_input()
                output_var = self.build(inp_var)
                loss_var = self.build_loss(inp_var, output_var)
                train_step = self.build_optim(loss_var)
                if param_avg:
                    ema_op, avg_var = self.get_average_var()
                    self._avg_var = avg_var
                    with tf.control_dependencies([train_step, ema_op]):
                        train_step = tf.no_op(name='train_step')
                self.register_var('train_step', train_step)
        return self
项目:Saliency_Detection_Convolutional_Autoencoder    作者:arthurmeyer    | 项目源码 | 文件源码
def train(self, loss, global_step):
    """
    Return a training step for the tensorflow graph

    Args:
      loss                   : loss to do sgd on
      global_step            : which step are we at
    """

    opt = tf.train.AdamOptimizer(self.learning_rate)
    grads = opt.compute_gradients(loss)
    apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)

    variable_averages = tf.train.ExponentialMovingAverage(self.moving_avg_decay, global_step)
    variables_averages_op = variable_averages.apply(tf.trainable_variables())

    with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
      train_op = tf.no_op(name='train')

    return train_op
项目:isbi2017-part3    作者:learningtitans    | 项目源码 | 文件源码
def testPS(self):
    deploy_config = model_deploy.DeploymentConfig(num_clones=1, num_ps_tasks=1)

    self.assertDeviceEqual(deploy_config.clone_device(0),
                           '/job:worker')
    self.assertEqual(deploy_config.clone_scope(0), '')
    self.assertDeviceEqual(deploy_config.optimizer_device(),
                           '/job:worker/device:CPU:0')
    self.assertDeviceEqual(deploy_config.inputs_device(),
                           '/job:worker/device:CPU:0')
    with tf.device(deploy_config.variables_device()):
      a = tf.Variable(0)
      b = tf.Variable(0)
      c = tf.no_op()
      d = slim.variable('a', [],
                        caching_device=deploy_config.caching_device())
    self.assertDeviceEqual(a.device, '/job:ps/task:0/device:CPU:0')
    self.assertDeviceEqual(a.device, a.value().device)
    self.assertDeviceEqual(b.device, '/job:ps/task:0/device:CPU:0')
    self.assertDeviceEqual(b.device, b.value().device)
    self.assertDeviceEqual(c.device, '')
    self.assertDeviceEqual(d.device, '/job:ps/task:0/device:CPU:0')
    self.assertDeviceEqual(d.value().device, '')
项目:isbi2017-part3    作者:learningtitans    | 项目源码 | 文件源码
def testVariablesPS(self):
    deploy_config = model_deploy.DeploymentConfig(num_ps_tasks=2)

    with tf.device(deploy_config.variables_device()):
      a = tf.Variable(0)
      b = tf.Variable(0)
      c = tf.no_op()
      d = slim.variable('a', [],
                        caching_device=deploy_config.caching_device())

    self.assertDeviceEqual(a.device, '/job:ps/task:0/device:CPU:0')
    self.assertDeviceEqual(a.device, a.value().device)
    self.assertDeviceEqual(b.device, '/job:ps/task:1/device:CPU:0')
    self.assertDeviceEqual(b.device, b.value().device)
    self.assertDeviceEqual(c.device, '')
    self.assertDeviceEqual(d.device, '/job:ps/task:0/device:CPU:0')
    self.assertDeviceEqual(d.value().device, '')
项目:terngrad    作者:wenwei202    | 项目源码 | 文件源码
def testPS(self):
    deploy_config = model_deploy.DeploymentConfig(num_clones=1, num_ps_tasks=1)

    self.assertDeviceEqual(deploy_config.clone_device(0),
                           '/job:worker')
    self.assertEqual(deploy_config.clone_scope(0), '')
    self.assertDeviceEqual(deploy_config.optimizer_device(),
                           '/job:worker/device:CPU:0')
    self.assertDeviceEqual(deploy_config.inputs_device(),
                           '/job:worker/device:CPU:0')
    with tf.device(deploy_config.variables_device()):
      a = tf.Variable(0)
      b = tf.Variable(0)
      c = tf.no_op()
      d = slim.variable('a', [],
                        caching_device=deploy_config.caching_device())
    self.assertDeviceEqual(a.device, '/job:ps/task:0/device:CPU:0')
    self.assertDeviceEqual(a.device, a.value().device)
    self.assertDeviceEqual(b.device, '/job:ps/task:0/device:CPU:0')
    self.assertDeviceEqual(b.device, b.value().device)
    self.assertDeviceEqual(c.device, '')
    self.assertDeviceEqual(d.device, '/job:ps/task:0/device:CPU:0')
    self.assertDeviceEqual(d.value().device, '')
项目:terngrad    作者:wenwei202    | 项目源码 | 文件源码
def testVariablesPS(self):
    deploy_config = model_deploy.DeploymentConfig(num_ps_tasks=2)

    with tf.device(deploy_config.variables_device()):
      a = tf.Variable(0)
      b = tf.Variable(0)
      c = tf.no_op()
      d = slim.variable('a', [],
                        caching_device=deploy_config.caching_device())

    self.assertDeviceEqual(a.device, '/job:ps/task:0/device:CPU:0')
    self.assertDeviceEqual(a.device, a.value().device)
    self.assertDeviceEqual(b.device, '/job:ps/task:1/device:CPU:0')
    self.assertDeviceEqual(b.device, b.value().device)
    self.assertDeviceEqual(c.device, '')
    self.assertDeviceEqual(d.device, '/job:ps/task:0/device:CPU:0')
    self.assertDeviceEqual(d.value().device, '')
项目:YOLO2TensorFlow    作者:PaulChongPeng    | 项目源码 | 文件源码
def testPS(self):
    deploy_config = model_deploy.DeploymentConfig(num_clones=1, num_ps_tasks=1)

    self.assertDeviceEqual(deploy_config.clone_device(0),
                           '/job:worker')
    self.assertEqual(deploy_config.clone_scope(0), '')
    self.assertDeviceEqual(deploy_config.optimizer_device(),
                           '/job:worker/device:CPU:0')
    self.assertDeviceEqual(deploy_config.inputs_device(),
                           '/job:worker/device:CPU:0')
    with tf.device(deploy_config.variables_device()):
      a = tf.Variable(0)
      b = tf.Variable(0)
      c = tf.no_op()
      d = slim.variable('a', [],
                        caching_device=deploy_config.caching_device())
    self.assertDeviceEqual(a.device, '/job:ps/task:0/device:CPU:0')
    self.assertDeviceEqual(a.device, a.value().device)
    self.assertDeviceEqual(b.device, '/job:ps/task:0/device:CPU:0')
    self.assertDeviceEqual(b.device, b.value().device)
    self.assertDeviceEqual(c.device, '')
    self.assertDeviceEqual(d.device, '/job:ps/task:0/device:CPU:0')
    self.assertDeviceEqual(d.value().device, '')
项目:YOLO2TensorFlow    作者:PaulChongPeng    | 项目源码 | 文件源码
def testVariablesPS(self):
    deploy_config = model_deploy.DeploymentConfig(num_ps_tasks=2)

    with tf.device(deploy_config.variables_device()):
      a = tf.Variable(0)
      b = tf.Variable(0)
      c = tf.no_op()
      d = slim.variable('a', [],
                        caching_device=deploy_config.caching_device())

    self.assertDeviceEqual(a.device, '/job:ps/task:0/device:CPU:0')
    self.assertDeviceEqual(a.device, a.value().device)
    self.assertDeviceEqual(b.device, '/job:ps/task:1/device:CPU:0')
    self.assertDeviceEqual(b.device, b.value().device)
    self.assertDeviceEqual(c.device, '')
    self.assertDeviceEqual(d.device, '/job:ps/task:0/device:CPU:0')
    self.assertDeviceEqual(d.value().device, '')
项目:RecurrentHighwayNetworks    作者:julian121266    | 项目源码 | 文件源码
def evaluate_mc(data_path, dataset, load_model, mc_steps, seed):
  """Evaluate the model on the given data using MC averaging."""
  ex.commands['print_config']()
  print("MC Evaluation of model:", load_model)
  assert mc_steps > 0
  reader, (train_data, valid_data, test_data, _) = get_data(data_path, dataset)

  config = get_config()
  val_config = deepcopy(config)
  test_config = deepcopy(config)
  test_config.batch_size = test_config.num_steps = 1
  with tf.Session() as session:
    initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale)
    with tf.variable_scope("model", reuse=None, initializer=initializer):
      _ = Model(is_training=True, config=config)
    with tf.variable_scope("model", reuse=True, initializer=initializer):
      _ = Model(is_training=False, config=val_config)
      mtest = Model(is_training=False, config=test_config)
    tf.initialize_all_variables()
    saver = tf.train.Saver()
    saver.restore(session, load_model)

    print("Testing on non-batched Test ...")
    test_perplexity = run_mc_epoch(seed, session, mtest, test_data, tf.no_op(), test_config, mc_steps, verbose=True)
    print("Full Test Perplexity: %.3f, Bits: %.3f" % (test_perplexity, np.log2(test_perplexity)))
项目:human-pose-estimation-by-deep-learning    作者:HYPJUDY    | 项目源码 | 文件源码
def train_op(self, total_loss, global_step):
        self._loss_summary(total_loss)

        optimizer = tf.train.AdamOptimizer()
        grads = optimizer.compute_gradients(total_loss)

        apply_gradient_op = optimizer.apply_gradients(grads, global_step=global_step)

        variable_averages = tf.train.ExponentialMovingAverage(
                self.moving_average_decay, global_step)
        variable_averages_op = variable_averages.apply(tf.trainable_variables())

        with tf.control_dependencies([apply_gradient_op, variable_averages_op]):
            train_op = tf.no_op(name = "train")

        return train_op
项目:deep-attention-text-classifier-tf    作者:krayush07    | 项目源码 | 文件源码
def run_test(session, test_obj, dict_obj):
    start_time = time.time()

    print("Starting test computation\n")
    test_loss = run_epoch(session, tf.no_op(), test_obj, dict_obj)

    curr_time = time.time()
    print('1 epoch run takes ' + str(((curr_time - start_time) / 60)) + ' minutes.')

# def main():
#     session, test_obj = init_test()
#     dict_obj = set_dict.Dictionary()
#     run_test(session, test_obj, dict_obj)
#
#
# if __name__ == "__main__":
#     main()
项目:tensorflow_yolo2    作者:wenxichen    | 项目源码 | 文件源码
def testPS(self):
    deploy_config = model_deploy.DeploymentConfig(num_clones=1, num_ps_tasks=1)

    self.assertDeviceEqual(deploy_config.clone_device(0),
                           '/job:worker')
    self.assertEqual(deploy_config.clone_scope(0), '')
    self.assertDeviceEqual(deploy_config.optimizer_device(),
                           '/job:worker/device:CPU:0')
    self.assertDeviceEqual(deploy_config.inputs_device(),
                           '/job:worker/device:CPU:0')
    with tf.device(deploy_config.variables_device()):
      a = tf.Variable(0)
      b = tf.Variable(0)
      c = tf.no_op()
      d = slim.variable('a', [],
                        caching_device=deploy_config.caching_device())
    self.assertDeviceEqual(a.device, '/job:ps/task:0/device:CPU:0')
    self.assertDeviceEqual(a.device, a.value().device)
    self.assertDeviceEqual(b.device, '/job:ps/task:0/device:CPU:0')
    self.assertDeviceEqual(b.device, b.value().device)
    self.assertDeviceEqual(c.device, '')
    self.assertDeviceEqual(d.device, '/job:ps/task:0/device:CPU:0')
    self.assertDeviceEqual(d.value().device, '')
项目:tensorflow_yolo2    作者:wenxichen    | 项目源码 | 文件源码
def testVariablesPS(self):
    deploy_config = model_deploy.DeploymentConfig(num_ps_tasks=2)

    with tf.device(deploy_config.variables_device()):
      a = tf.Variable(0)
      b = tf.Variable(0)
      c = tf.no_op()
      d = slim.variable('a', [],
                        caching_device=deploy_config.caching_device())

    self.assertDeviceEqual(a.device, '/job:ps/task:0/device:CPU:0')
    self.assertDeviceEqual(a.device, a.value().device)
    self.assertDeviceEqual(b.device, '/job:ps/task:1/device:CPU:0')
    self.assertDeviceEqual(b.device, b.value().device)
    self.assertDeviceEqual(c.device, '')
    self.assertDeviceEqual(d.device, '/job:ps/task:0/device:CPU:0')
    self.assertDeviceEqual(d.value().device, '')
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def _setup_model_loss(self, keep_moving_averages=False, num_classes=10):
        self.learning_rate = tf.placeholder(
            tf.float32, shape=[], name="learning_rate_placeholder")
        optimizer = self._optimizer(self.learning_rate, optname=self.cnf.get(
            'optname', 'momentum'), **self.cnf.get('opt_kwargs', {'decay': 0.9}))
        self.grads_and_vars, self.training_loss = self._process_towers_grads(
            optimizer, self.model, is_classification=self.classification, loss_type=self.loss_type)

        if self.clip_norm and not self.clip_by_global_norm:
            self.grads_and_vars = self._clip_grad_norms(
                self.grads_and_vars, max_norm=self.norm_threshold)
        apply_gradients_op = optimizer.apply_gradients(self.grads_and_vars)
        if keep_moving_averages:
            variables_averages_op = self._moving_averages_op()
            with tf.control_dependencies([apply_gradients_op, variables_averages_op]):
                self.train_op = tf.no_op(name='train_op')
        else:
            self.train_op = apply_gradients_op
项目:chinese-char-rnn    作者:indiejoseph    | 项目源码 | 文件源码
def run_epochs(sess, x, y, model, is_training=True):
  start = time.time()
  feed = {model.input_data: x, model.targets: y, model.is_training: is_training}

  if is_training:
    extra_op = model.train_op
  else:
    extra_op = tf.no_op()

  fetchs = {"loss": model.loss,
            "extra_op": extra_op}

  res = sess.run(fetchs, feed)
  end = time.time()

  return res, end - start
项目:a3c    作者:siemanko    | 项目源码 | 文件源码
def build_graph(reuse):
    with tf.variable_scope('model', reuse=reuse):
        x = tf.placeholder(tf.float32, shape=[None, 784])
        y_ = tf.placeholder(tf.float32, shape=[None, 10])
        keep_prob = tf.placeholder(tf.float32)

        y_conv = forward(x, keep_prob)

        cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_conv), reduction_indices=[1]))

        train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

        correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))

        num_correct = tf.reduce_sum(tf.cast(correct_prediction, tf.float32))

        no_op = tf.no_op()

        return x, y_, keep_prob, train_step, num_correct, no_op
项目:a3c    作者:siemanko    | 项目源码 | 文件源码
def accuracy(session, graphs, data_iter, num_threads, train=False):
    num_total   = 0
    num_correct = 0

    def process_batch(batch_x, batch_y):
        nonlocal num_correct
        nonlocal num_total
        with graphs.lease() as g:
            input_placeholder, output_placeholder, keep_prob_placeholder, train_step_f, num_correct_f, no_op = g
            batch_num_correct, _ = session.run(
                [num_correct_f, train_step_f if train else no_op],
                {
                    input_placeholder:     batch_x,
                    output_placeholder:    batch_y,
                    keep_prob_placeholder: 0.5 if train else 1.0,
                })
            num_correct += batch_num_correct
            num_total   += len(batch_x)

    with BlockOnFullThreadPool(max_workers=num_threads, queue_size=num_threads // 2) as pool:
        for i, (batch_x, batch_y) in enumerate(data_iter):
            pool.submit(process_batch, batch_x, batch_y)
        pool.shutdown(wait=True)

    return float(num_correct) / float(num_total)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def test_stop_based_on_last_step(self):
    h = basic_session_run_hooks.StopAtStepHook(last_step=10)
    with tf.Graph().as_default():
      global_step = tf.contrib.framework.get_or_create_global_step()
      no_op = tf.no_op()
      h.begin()
      with tf.Session() as sess:
        mon_sess = monitored_session._HookedSession(sess, [h])
        sess.run(tf.assign(global_step, 5))
        mon_sess.run(no_op)
        self.assertFalse(mon_sess.should_stop())
        sess.run(tf.assign(global_step, 9))
        mon_sess.run(no_op)
        self.assertFalse(mon_sess.should_stop())
        sess.run(tf.assign(global_step, 10))
        mon_sess.run(no_op)
        self.assertTrue(mon_sess.should_stop())
        sess.run(tf.assign(global_step, 11))
        mon_sess._should_stop = False
        mon_sess.run(no_op)
        self.assertTrue(mon_sess.should_stop())
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def insert(self, ids, scores):
    """Insert the ids and scores into the TopN."""
    with tf.control_dependencies(self.last_ops):
      scatter_op = tf.scatter_update(self.id_to_score, ids, scores)
      larger_scores = tf.greater(scores, self.sl_scores[0])

      def shortlist_insert():
        larger_ids = tf.boolean_mask(tf.to_int64(ids), larger_scores)
        larger_score_values = tf.boolean_mask(scores, larger_scores)
        shortlist_ids, new_ids, new_scores = self.ops.top_n_insert(
            self.sl_ids, self.sl_scores, larger_ids, larger_score_values)
        u1 = tf.scatter_update(self.sl_ids, shortlist_ids, new_ids)
        u2 = tf.scatter_update(self.sl_scores, shortlist_ids, new_scores)
        return tf.group(u1, u2)

      # We only need to insert into the shortlist if there are any
      # scores larger than the threshold.
      cond_op = tf.cond(
          tf.reduce_any(larger_scores), shortlist_insert, tf.no_op)
      with tf.control_dependencies([cond_op]):
        self.last_ops = [scatter_op, cond_op]
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def insert(self, ids, scores):
    """Insert the ids and scores into the TopN."""
    with tf.control_dependencies(self.last_ops):
      scatter_op = tf.scatter_update(self.id_to_score, ids, scores)
      larger_scores = tf.greater(scores, self.sl_scores[0])

      def shortlist_insert():
        larger_ids = tf.boolean_mask(tf.to_int64(ids), larger_scores)
        larger_score_values = tf.boolean_mask(scores, larger_scores)
        shortlist_ids, new_ids, new_scores = self.ops.top_n_insert(
            self.sl_ids, self.sl_scores, larger_ids, larger_score_values)
        u1 = tf.scatter_update(self.sl_ids, shortlist_ids, new_ids)
        u2 = tf.scatter_update(self.sl_scores, shortlist_ids, new_scores)
        return tf.group(u1, u2)

      # We only need to insert into the shortlist if there are any
      # scores larger than the threshold.
      cond_op = tf.cond(
          tf.reduce_any(larger_scores), shortlist_insert, tf.no_op)
      with tf.control_dependencies([cond_op]):
        self.last_ops = [scatter_op, cond_op]
项目:segmentation-models    作者:desimone    | 项目源码 | 文件源码
def testPS(self):
        deploy_config = model_deploy.DeploymentConfig(
            num_clones=1, num_ps_tasks=1)

        self.assertDeviceEqual(deploy_config.clone_device(0), '/job:worker')
        self.assertEqual(deploy_config.clone_scope(0), '')
        self.assertDeviceEqual(deploy_config.optimizer_device(),
                               '/job:worker/device:CPU:0')
        self.assertDeviceEqual(deploy_config.inputs_device(),
                               '/job:worker/device:CPU:0')
        with tf.device(deploy_config.variables_device()):
            a = tf.Variable(0)
            b = tf.Variable(0)
            c = tf.no_op()
            d = slim.variable(
                'a', [], caching_device=deploy_config.caching_device())
        self.assertDeviceEqual(a.device, '/job:ps/task:0/device:CPU:0')
        self.assertDeviceEqual(a.device, a.value().device)
        self.assertDeviceEqual(b.device, '/job:ps/task:0/device:CPU:0')
        self.assertDeviceEqual(b.device, b.value().device)
        self.assertDeviceEqual(c.device, '')
        self.assertDeviceEqual(d.device, '/job:ps/task:0/device:CPU:0')
        self.assertDeviceEqual(d.value().device, '')
项目:segmentation-models    作者:desimone    | 项目源码 | 文件源码
def testVariablesPS(self):
        deploy_config = model_deploy.DeploymentConfig(num_ps_tasks=2)

        with tf.device(deploy_config.variables_device()):
            a = tf.Variable(0)
            b = tf.Variable(0)
            c = tf.no_op()
            d = slim.variable(
                'a', [], caching_device=deploy_config.caching_device())

        self.assertDeviceEqual(a.device, '/job:ps/task:0/device:CPU:0')
        self.assertDeviceEqual(a.device, a.value().device)
        self.assertDeviceEqual(b.device, '/job:ps/task:1/device:CPU:0')
        self.assertDeviceEqual(b.device, b.value().device)
        self.assertDeviceEqual(c.device, '')
        self.assertDeviceEqual(d.device, '/job:ps/task:0/device:CPU:0')
        self.assertDeviceEqual(d.value().device, '')
项目:tensorlm    作者:batzner    | 项目源码 | 文件源码
def _sample_step(self, session, inputs, update_state=True):
        """Feeds batch inputs to the model and returns the batch output ids.

        Args:
            session (tf.Session): The TF session to run the operations in.
            inputs (np.ndarray): A batch of inputs. Must have the shape (batch_size, num_timesteps)
                and contain only integers. The batch size and number of timesteps are determined
                dynamically, so the shape of inputs can vary between calls of this function.
            update_state (bool): If True, the LSTM's memory state will be updated after feeding the
                batch inputs, so that the LSTM will use this state before the next feed of inputs.
                If this function gets called during training, make sure to call it between
                on_pause_training and will_resume_training. Thus, the training's memory state will
                be frozen before and unfrozen after this function call.

        Returns:
            np.ndarray: A batch of outputs with the same shape and data type as the inputs
                parameter.
        """
        # Feed the input
        feed_dict = {self._inputs: inputs}
        runs = [self._logits, self._update_state_op if update_state else tf.no_op()]

        # Get the output
        logits, _ = session.run(runs, feed_dict=feed_dict)
        return np.argmax(logits, axis=2)
项目:CNN-LSTM-Caption-Generator    作者:mosessoh    | 项目源码 | 文件源码
def run_epoch(self, session, train_op):
        total_steps = sum(1 for x in train_data_iterator(self.train_captions, self.train_caption_id2sentence, self.train_caption_id2image_id, self.train_image_id2feature, self.config))
        total_loss = []
        if not train_op:
            train_op = tf.no_op()
        start = time.time()

        for step, (sentences, images, targets) in enumerate(train_data_iterator(self.train_captions, self.train_caption_id2sentence, self.train_caption_id2image_id, self.train_image_id2feature, self.config)):

            feed = {self._sent_placeholder: sentences,
                    self._img_placeholder: images,
                    self._targets_placeholder: targets,
                    self._dropout_placeholder: self.config.keep_prob}
            loss, _ = session.run([self.loss, train_op], feed_dict=feed)
            total_loss.append(loss)

            if (step % 50) == 0:
                print '%d/%d: loss = %.2f time elapsed = %d' % (step, total_steps, np.mean(total_loss) , time.time() - start)

        print 'Total time: %ds' % (time.time() - start) 
        return total_loss
项目:Embarrassingly-Parallel-Image-Classification    作者:Azure    | 项目源码 | 文件源码
def testPS(self):
    deploy_config = model_deploy.DeploymentConfig(num_clones=1, num_ps_tasks=1)

    self.assertDeviceEqual(deploy_config.clone_device(0),
                           '/job:worker')
    self.assertEqual(deploy_config.clone_scope(0), '')
    self.assertDeviceEqual(deploy_config.optimizer_device(),
                           '/job:worker/device:CPU:0')
    self.assertDeviceEqual(deploy_config.inputs_device(),
                           '/job:worker/device:CPU:0')
    with tf.device(deploy_config.variables_device()):
      a = tf.Variable(0)
      b = tf.Variable(0)
      c = tf.no_op()
      d = slim.variable('a', [],
                        caching_device=deploy_config.caching_device())
    self.assertDeviceEqual(a.device, '/job:ps/task:0/device:CPU:0')
    self.assertDeviceEqual(a.device, a.value().device)
    self.assertDeviceEqual(b.device, '/job:ps/task:0/device:CPU:0')
    self.assertDeviceEqual(b.device, b.value().device)
    self.assertDeviceEqual(c.device, '')
    self.assertDeviceEqual(d.device, '/job:ps/task:0/device:CPU:0')
    self.assertDeviceEqual(d.value().device, '')
项目:Embarrassingly-Parallel-Image-Classification    作者:Azure    | 项目源码 | 文件源码
def testVariablesPS(self):
    deploy_config = model_deploy.DeploymentConfig(num_ps_tasks=2)

    with tf.device(deploy_config.variables_device()):
      a = tf.Variable(0)
      b = tf.Variable(0)
      c = tf.no_op()
      d = slim.variable('a', [],
                        caching_device=deploy_config.caching_device())

    self.assertDeviceEqual(a.device, '/job:ps/task:0/device:CPU:0')
    self.assertDeviceEqual(a.device, a.value().device)
    self.assertDeviceEqual(b.device, '/job:ps/task:1/device:CPU:0')
    self.assertDeviceEqual(b.device, b.value().device)
    self.assertDeviceEqual(c.device, '')
    self.assertDeviceEqual(d.device, '/job:ps/task:0/device:CPU:0')
    self.assertDeviceEqual(d.value().device, '')
项目:the-neural-perspective    作者:GokuMohandas    | 项目源码 | 文件源码
def testPS(self):
    deploy_config = model_deploy.DeploymentConfig(num_clones=1, num_ps_tasks=1)

    self.assertDeviceEqual(deploy_config.clone_device(0),
                           '/job:worker')
    self.assertEqual(deploy_config.clone_scope(0), '')
    self.assertDeviceEqual(deploy_config.optimizer_device(),
                           '/job:worker/device:CPU:0')
    self.assertDeviceEqual(deploy_config.inputs_device(),
                           '/job:worker/device:CPU:0')
    with tf.device(deploy_config.variables_device()):
      a = tf.Variable(0)
      b = tf.Variable(0)
      c = tf.no_op()
      d = slim.variable('a', [],
                        caching_device=deploy_config.caching_device())
    self.assertDeviceEqual(a.device, '/job:ps/task:0/device:CPU:0')
    self.assertDeviceEqual(a.device, a.value().device)
    self.assertDeviceEqual(b.device, '/job:ps/task:0/device:CPU:0')
    self.assertDeviceEqual(b.device, b.value().device)
    self.assertDeviceEqual(c.device, '')
    self.assertDeviceEqual(d.device, '/job:ps/task:0/device:CPU:0')
    self.assertDeviceEqual(d.value().device, '')
项目:the-neural-perspective    作者:GokuMohandas    | 项目源码 | 文件源码
def testVariablesPS(self):
    deploy_config = model_deploy.DeploymentConfig(num_ps_tasks=2)

    with tf.device(deploy_config.variables_device()):
      a = tf.Variable(0)
      b = tf.Variable(0)
      c = tf.no_op()
      d = slim.variable('a', [],
                        caching_device=deploy_config.caching_device())

    self.assertDeviceEqual(a.device, '/job:ps/task:0/device:CPU:0')
    self.assertDeviceEqual(a.device, a.value().device)
    self.assertDeviceEqual(b.device, '/job:ps/task:1/device:CPU:0')
    self.assertDeviceEqual(b.device, b.value().device)
    self.assertDeviceEqual(c.device, '')
    self.assertDeviceEqual(d.device, '/job:ps/task:0/device:CPU:0')
    self.assertDeviceEqual(d.value().device, '')
项目:MobileNet    作者:Zehaos    | 项目源码 | 文件源码
def testPS(self):
    deploy_config = model_deploy.DeploymentConfig(num_clones=1, num_ps_tasks=1)

    self.assertDeviceEqual(deploy_config.clone_device(0),
                           '/job:worker')
    self.assertEqual(deploy_config.clone_scope(0), '')
    self.assertDeviceEqual(deploy_config.optimizer_device(),
                           '/job:worker/device:CPU:0')
    self.assertDeviceEqual(deploy_config.inputs_device(),
                           '/job:worker/device:CPU:0')
    with tf.device(deploy_config.variables_device()):
      a = tf.Variable(0)
      b = tf.Variable(0)
      c = tf.no_op()
      d = slim.variable('a', [],
                        caching_device=deploy_config.caching_device())
    self.assertDeviceEqual(a.device, '/job:ps/task:0/device:CPU:0')
    self.assertDeviceEqual(a.device, a.value().device)
    self.assertDeviceEqual(b.device, '/job:ps/task:0/device:CPU:0')
    self.assertDeviceEqual(b.device, b.value().device)
    self.assertDeviceEqual(c.device, '')
    self.assertDeviceEqual(d.device, '/job:ps/task:0/device:CPU:0')
    self.assertDeviceEqual(d.value().device, '')
项目:MobileNet    作者:Zehaos    | 项目源码 | 文件源码
def testVariablesPS(self):
    deploy_config = model_deploy.DeploymentConfig(num_ps_tasks=2)

    with tf.device(deploy_config.variables_device()):
      a = tf.Variable(0)
      b = tf.Variable(0)
      c = tf.no_op()
      d = slim.variable('a', [],
                        caching_device=deploy_config.caching_device())

    self.assertDeviceEqual(a.device, '/job:ps/task:0/device:CPU:0')
    self.assertDeviceEqual(a.device, a.value().device)
    self.assertDeviceEqual(b.device, '/job:ps/task:1/device:CPU:0')
    self.assertDeviceEqual(b.device, b.value().device)
    self.assertDeviceEqual(c.device, '')
    self.assertDeviceEqual(d.device, '/job:ps/task:0/device:CPU:0')
    self.assertDeviceEqual(d.value().device, '')
项目:squeezeDet-hand    作者:fyhtea    | 项目源码 | 文件源码
def _add_train_graph(self):
    """Define the training operation."""
    mc = self.mc

    self.global_step = tf.Variable(0, name='global_step', trainable=False)
    lr = tf.train.exponential_decay(mc.LEARNING_RATE,
                                    self.global_step,
                                    mc.DECAY_STEPS,
                                    mc.LR_DECAY_FACTOR,
                                    staircase=True)

    tf.summary.scalar('learning_rate', lr)

    _add_loss_summaries(self.loss)

    opt = tf.train.MomentumOptimizer(learning_rate=lr, momentum=mc.MOMENTUM)
    grads_vars = opt.compute_gradients(self.loss, tf.trainable_variables())

    with tf.variable_scope('clip_gradient') as scope:
      for i, (grad, var) in enumerate(grads_vars):
        grads_vars[i] = (tf.clip_by_norm(grad, mc.MAX_GRAD_NORM), var)

    apply_gradient_op = opt.apply_gradients(grads_vars, global_step=self.global_step)

    for var in tf.trainable_variables():
        tf.summary.histogram(var.op.name, var)

    for grad, var in grads_vars:
      if grad is not None:
        tf.summary.histogram(var.op.name + '/gradients', grad)

    with tf.control_dependencies([apply_gradient_op]):
      self.train_op = tf.no_op(name='train')
项目:skiprnn-2017-telecombcn    作者:imatge-upc    | 项目源码 | 文件源码
def split_rnn_outputs(model, rnn_outputs):
    """
    Split the output of dynamic_rnn into the actual RNN outputs and the state update gate
    """
    if using_skip_rnn(model):
        return rnn_outputs.h, rnn_outputs.state_gate
    else:
        return rnn_outputs, tf.no_op()
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def test_sampling(self):
    hook = hooks.TrainSampleHook(
        params={"every_n_steps": 10}, model_dir=self.model_dir,
        run_config=tf.contrib.learn.RunConfig())

    global_step = tf.contrib.framework.get_or_create_global_step()
    no_op = tf.no_op()
    hook.begin()
    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      sess.run(tf.local_variables_initializer())
      sess.run(tf.tables_initializer())

      #pylint: disable=W0212
      mon_sess = monitored_session._HookedSession(sess, [hook])
      # Should trigger for step 0
      sess.run(tf.assign(global_step, 0))
      mon_sess.run(no_op)

      outfile = os.path.join(self.sample_dir, "samples_000000.txt")
      with open(outfile, "rb") as readfile:
        self.assertIn("Prediction followed by Target @ Step 0",
                      readfile.read().decode("utf-8"))

      # Should not trigger for step 9
      sess.run(tf.assign(global_step, 9))
      mon_sess.run(no_op)
      outfile = os.path.join(self.sample_dir, "samples_000009.txt")
      self.assertFalse(os.path.exists(outfile))

      # Should trigger for step 10
      sess.run(tf.assign(global_step, 10))
      mon_sess.run(no_op)
      outfile = os.path.join(self.sample_dir, "samples_000010.txt")
      with open(outfile, "rb") as readfile:
        self.assertIn("Prediction followed by Target @ Step 10",
                      readfile.read().decode("utf-8"))
项目:pointnet    作者:charlesq34    | 项目源码 | 文件源码
def batch_norm_template(inputs, is_training, scope, moments_dims, bn_decay):
  """ Batch normalization on convolutional maps and beyond...
  Ref.: http://stackoverflow.com/questions/33949786/how-could-i-use-batch-normalization-in-tensorflow

  Args:
      inputs:        Tensor, k-D input ... x C could be BC or BHWC or BDHWC
      is_training:   boolean tf.Varialbe, true indicates training phase
      scope:         string, variable scope
      moments_dims:  a list of ints, indicating dimensions for moments calculation
      bn_decay:      float or float tensor variable, controling moving average weight
  Return:
      normed:        batch-normalized maps
  """
  with tf.variable_scope(scope) as sc:
    num_channels = inputs.get_shape()[-1].value
    beta = tf.Variable(tf.constant(0.0, shape=[num_channels]),
                       name='beta', trainable=True)
    gamma = tf.Variable(tf.constant(1.0, shape=[num_channels]),
                        name='gamma', trainable=True)
    batch_mean, batch_var = tf.nn.moments(inputs, moments_dims, name='moments')
    decay = bn_decay if bn_decay is not None else 0.9
    ema = tf.train.ExponentialMovingAverage(decay=decay)
    # Operator that maintains moving averages of variables.
    ema_apply_op = tf.cond(is_training,
                           lambda: ema.apply([batch_mean, batch_var]),
                           lambda: tf.no_op())

    # Update moving average and return current batch's avg and var.
    def mean_var_with_update():
      with tf.control_dependencies([ema_apply_op]):
        return tf.identity(batch_mean), tf.identity(batch_var)

    # ema.average returns the Variable holding the average of var.
    mean, var = tf.cond(is_training,
                        mean_var_with_update,
                        lambda: (ema.average(batch_mean), ema.average(batch_var)))
    normed = tf.nn.batch_normalization(inputs, mean, var, beta, gamma, 1e-3)
  return normed
项目:benchmarks    作者:tensorflow    | 项目源码 | 文件源码
def add_sync_queues_and_barrier(self, name_prefix, enqueue_after_list):
    """Adds ops to enqueue on all worker queues.

    Args:
      name_prefix: prefixed for the shared_name of ops.
      enqueue_after_list: control dependency from ops.

    Returns:
      an op that should be used as control dependency before starting next step.
    """
    self.sync_queue_counter += 1
    with tf.device(self.sync_queue_devices[(
        self.sync_queue_counter % len(self.sync_queue_devices))]):
      sync_queues = [
          tf.FIFOQueue(self.num_workers, [tf.bool], shapes=[[]],
                       shared_name='%s%s' % (name_prefix, i))
          for i in range(self.num_workers)]
      queue_ops = []
      # For each other worker, add an entry in a queue, signaling that it can
      # finish this step.
      token = tf.constant(False)
      with tf.control_dependencies(enqueue_after_list):
        for i, q in enumerate(sync_queues):
          if i == self.task_index:
            queue_ops.append(tf.no_op())
          else:
            queue_ops.append(q.enqueue(token))

      # Drain tokens off queue for this worker, one for each other worker.
      queue_ops.append(
          sync_queues[self.task_index].dequeue_many(len(sync_queues) - 1))

      return tf.group(*queue_ops)
项目:cxflow-tensorflow    作者:Cognexa    | 项目源码 | 文件源码
def _create_train_ops(self, *_):
        tf.no_op(name='train_op_1')
项目:cxflow-tensorflow    作者:Cognexa    | 项目源码 | 文件源码
def _create_train_ops(self, *_):
        tf.no_op(name='train_op_1')
项目:cxflow-tensorflow    作者:Cognexa    | 项目源码 | 文件源码
def _create_train_ops(self, *_):
        tf.no_op(name='train_op_1')
项目:cxflow-tensorflow    作者:Cognexa    | 项目源码 | 文件源码
def _create_train_ops(self, *_):
        tf.no_op(name='train_op_1')
项目:cxflow-tensorflow    作者:Cognexa    | 项目源码 | 文件源码
def _create_train_ops(self, *_):
        tf.no_op(name='train_op_1')
项目:fold    作者:tensorflow    | 项目源码 | 文件源码
def test_create_from_flags(self):
    tf.flags.FLAGS.mode = plan.Plan.mode_keys.TRAIN
    tf.flags.FLAGS.truncate_examples = 3
    tf.flags.FLAGS.num_multiprocess_processes = 4
    tf.flags.FLAGS.master = 'foo'
    tf.flags.FLAGS.batches_per_epoch = 123
    foo = tf.get_variable('foo', [], tf.float32, tf.constant_initializer(4))
    p = plan.Plan.create_from_flags(_setup_plan(
        compiler=block_compiler.Compiler.create(blocks.Scalar()),
        losses={'foo': foo},
        examples=xrange(5)))
    self.assertEqual(p.num_multiprocess_processes, 4)
    self.assertEqual(p.master, 'foo')
    self.assertEqual(p.batches_per_epoch, 123)
    self.assertEqual(p.compute_summaries, True)
    self.assertEqual(p.is_chief_trainer, True)
    self.assertEqual(p.logdir, os.path.join('/tmp/', 'plan', 'run_0', 'train'))
    self.assertEqual(p.rundir, os.path.join('/tmp/', 'plan', 'run_0'))
    self.assertEqual(p.plandir, os.path.join('/tmp/', 'plan'))
    self.assertEqual([0, 1, 2], list(p.examples))
    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      self.assertEqual(4, p.loss_total.eval())
      sess.run(p.train_op)  # should make loss smaller
      self.assertLess(p.loss_total.eval(), 4)

    tf.flags.FLAGS.num_multiprocess_processes = 0
    tf.flags.FLAGS.task = 42
    train_op = tf.no_op()
    p = plan.Plan.create_from_flags(_setup_plan(
        compiler=block_compiler.Compiler.create(blocks.Scalar()),
        losses={'foo': tf.constant(3.14)},
        train_op=train_op,
        examples=xrange(5)))
    self.assertEqual(p.num_multiprocess_processes, 0)
    self.assertEqual(p.compute_summaries, False)
    self.assertEqual(p.is_chief_trainer, False)
    self.assertEqual(p.train_op, train_op)
项目:fold    作者:tensorflow    | 项目源码 | 文件源码
def test_create_from_params(self):
    params = plan.plan_default_params()
    params.update({
        'mode': plan.Plan.mode_keys.TRAIN,
        'truncate_examples': 3,
        'num_multiprocess_processes': 4,
        'master': 'foo',
        'batches_per_epoch': 123})
    foo = tf.get_variable('foo', [], tf.float32, tf.constant_initializer(4))
    p = plan.Plan.create_from_params(_setup_plan(
        compiler=block_compiler.Compiler.create(blocks.Scalar()),
        losses={'foo': foo},
        examples=xrange(5)), params)
    self.assertEqual(p.num_multiprocess_processes, 4)
    self.assertEqual(p.master, 'foo')
    self.assertEqual(p.batches_per_epoch, 123)
    self.assertEqual(p.compute_summaries, True)
    self.assertEqual(p.is_chief_trainer, True)
    self.assertEqual(p.logdir, os.path.join('/tmp/', 'plan', 'run_0', 'train'))
    self.assertEqual(p.rundir, os.path.join('/tmp/', 'plan', 'run_0'))
    self.assertEqual(p.plandir, os.path.join('/tmp/', 'plan'))
    self.assertEqual([0, 1, 2], list(p.examples))
    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      self.assertEqual(4, p.loss_total.eval())
      sess.run(p.train_op)  # should make loss smaller
      self.assertLess(p.loss_total.eval(), 4)

    tf.flags.FLAGS.num_multiprocess_processes = 0
    tf.flags.FLAGS.task = 42
    train_op = tf.no_op()
    p = plan.Plan.create_from_flags(_setup_plan(
        compiler=block_compiler.Compiler.create(blocks.Scalar()),
        losses={'foo': tf.constant(3.14)},
        train_op=train_op,
        examples=xrange(5)))
    self.assertEqual(p.num_multiprocess_processes, 0)
    self.assertEqual(p.compute_summaries, False)
    self.assertEqual(p.is_chief_trainer, False)
    self.assertEqual(p.train_op, train_op)
项目:fold    作者:tensorflow    | 项目源码 | 文件源码
def test_assert_runnable(self):
    p = plan.TrainPlan()
    self.assertRaisesWithLiteralMatch(
        ValueError, 'at least one loss is required', p.assert_runnable)
    p.losses['foo'] = tf.constant(42.0)
    self.assertRaisesWithLiteralMatch(
        ValueError, 'compiler is required', p.assert_runnable)
    p.compiler = block_compiler.Compiler.create(blocks.Scalar())
    self.assertRaisesWithLiteralMatch(
        RuntimeError, 'finalize_stats() has not been called', p.assert_runnable)
    p.finalize_stats()
    self.assertRaisesWithLiteralMatch(
        ValueError, 'logdir is required', p.assert_runnable)
    p.logdir = '/tmp/'
    self.assertRaisesWithLiteralMatch(
        ValueError, 'train_op is required', p.assert_runnable)
    p.train_op = tf.no_op()
    self.assertRaisesWithLiteralMatch(
        ValueError, 'batch_size is required', p.assert_runnable)
    p.batch_size = 10
    self.assertRaisesWithLiteralMatch(
        ValueError, 'either examples or batches_per_epoch is required',
        p.assert_runnable)
    p.examples = xrange(2)
    p.assert_runnable()
    p.examples = None
    self.assertRaises(ValueError, p.assert_runnable)
    p.batches_per_epoch = 42
    p.assert_runnable()
项目:fold    作者:tensorflow    | 项目源码 | 文件源码
def test_dequeue(self):
    p = plan.TrainPlan()
    p.compiler = block_compiler.Compiler().compile(blocks.Scalar())
    p.is_chief_trainer = True
    p.batch_size = 3
    p.batches_per_epoch = 2
    p.queue_capacity = 12
    p.num_dequeuers = 1
    p.ps_tasks = 1
    q = p._create_queue(0)
    p._setup_dequeuing([q])
    input_batch = list(p.compiler.build_loom_inputs([7])) * 3
    q_enqueue = q.enqueue_many([input_batch * 4])
    p.losses['foo'], = p.compiler.output_tensors
    p.train_op = tf.no_op()
    p.finalize_stats()
    p.logdir = self.get_temp_dir()
    p.epochs = 2
    p.print_file = six.StringIO()
    init_op = tf.global_variables_initializer()
    sv = p.create_supervisor()
    with self.test_session() as sess:
      sess.run(init_op)
      sess.run(q_enqueue)
      p.run(sv, sess)
    expected = '\n'.join(['running train',
                          'train_size: 6',
                          'epoch:    1 train[loss: 7.000e+00]',
                          'epoch:    2 train[loss: 7.000e+00]',
                          'final model saved in file: %s' % p.logdir])
    log_str = p.print_file.getvalue()
    self.assertIn(expected, log_str)
项目:isbi2017-part3    作者:learningtitans    | 项目源码 | 文件源码
def testMultiGPUPS(self):
    deploy_config = model_deploy.DeploymentConfig(num_clones=2, num_ps_tasks=1)

    self.assertEqual(deploy_config.caching_device()(tf.no_op()), '')
    self.assertDeviceEqual(deploy_config.clone_device(0),
                           '/job:worker/device:GPU:0')
    self.assertDeviceEqual(deploy_config.clone_device(1),
                           '/job:worker/device:GPU:1')
    self.assertEqual(deploy_config.clone_scope(0), 'clone_0')
    self.assertEqual(deploy_config.clone_scope(1), 'clone_1')
    self.assertDeviceEqual(deploy_config.optimizer_device(),
                           '/job:worker/device:CPU:0')
    self.assertDeviceEqual(deploy_config.inputs_device(),
                           '/job:worker/device:CPU:0')
项目:AMS    作者:EthanTaylor2    | 项目源码 | 文件源码
def run_train():
    fout = open('inf.txt','w+')

    test_config = ModelConfig()
    test_config.keep_prob = 1.0
    test_config.batch_size = 1

    Session_config = tf.ConfigProto(allow_soft_placement = True)
    Session_config.gpu_options.allow_growth=True 

    with tf.Graph().as_default(), tf.Session(config=Session_config) as sess:    
        with tf.device('/gpu:0'):
        #if True:
            initializer = tf.random_uniform_initializer(-test_config.init_scale, 
                                                        test_config.init_scale)

            train_model = vgg16.Vgg16(FLAGS.vgg16_file_path)
            train_model.build(initializer)

            data_test = dataset.DataSet(FLAGS.file_path_test,FLAGS.data_root_dir,TEST_SIZE,is_train_set=False)

            test_writer = tf.summary.FileWriter(FLAGS.log_dir + '/test')

            saver = tf.train.Saver(max_to_keep=100)
            last_epoch = load_model(sess, saver,FLAGS.saveModelPath,train_model)
            print ('start: ',last_epoch + 1)

            test_accury_1,test_accury_5,test_loss = run_epoch(sess,test_config.keep_prob, fout,test_config.batch_size, train_model, data_test, tf.no_op(),2,test_writer,istraining=False) 
            info = "Final: Test accury(top 1): %.4f Test accury(top 5): %.4f Loss %.4f" % (test_accury_1,test_accury_5,test_loss)
            print (info)
            fout.write(info + '\n')
            fout.flush()



            test_writer.close()

            print("Training step is compeleted!") 
            fout.close()
项目:AMS    作者:EthanTaylor2    | 项目源码 | 文件源码
def run_train():
    fout = open('inf.txt','w+')

    test_config = ModelConfig()
    test_config.keep_prob = 1.0
    test_config.batch_size = 1

    Session_config = tf.ConfigProto(allow_soft_placement = True)
    Session_config.gpu_options.allow_growth=True 



    with tf.Graph().as_default(), tf.Session(config=Session_config) as sess:    
        with tf.device('/gpu:3'):
        #if True:
            initializer = tf.random_uniform_initializer(-test_config.init_scale, 
                                                        test_config.init_scale)

            train_model = vgg16.Vgg16(FLAGS.vgg16_file_path)
            train_model.build(initializer)

            data_test = dataset.DataSet(FLAGS.file_path_test,FLAGS.data_root_dir,TEST_SIZE,is_train_set=False)

            test_writer = tf.summary.FileWriter(FLAGS.log_dir + '/test')

            saver = tf.train.Saver(max_to_keep=100)
            last_epoch = load_model(sess, saver,FLAGS.saveModelPath,train_model)
            print ('start: ',last_epoch + 1)

            test_accury_1,test_accury_5,test_loss = run_epoch(sess,test_config.keep_prob, fout,test_config.batch_size, train_model, data_test, tf.no_op(),2,test_writer,istraining=False) 
            info = "Final: Test accury(top 1): %.3f Test accury(top 5): %.3f Loss %.3f" % (test_accury_1,test_accury_5,test_loss)
            print (info)
            fout.write(info + '\n')
            fout.flush()



            test_writer.close()

            print("Training step is compeleted!") 
            fout.close()
项目:AMS    作者:EthanTaylor2    | 项目源码 | 文件源码
def run_train():
    fout = open('inf.txt','w+')

    test_config = ModelConfig()
    test_config.keep_prob = 1.0
    test_config.batch_size = 1

    Session_config = tf.ConfigProto(allow_soft_placement = True)
    Session_config.gpu_options.allow_growth=True 

    with tf.Graph().as_default(), tf.Session(config=Session_config) as sess:    
        with tf.device('/gpu:0'):
        #if True:
            initializer = tf.random_uniform_initializer(-test_config.init_scale, 
                                                        test_config.init_scale)

            train_model = vgg16.Vgg16(FLAGS.vgg16_file_path)
            train_model.build(initializer)

            data_test = dataset.DataSet(FLAGS.file_path_test,FLAGS.data_root_dir,TEST_SIZE,is_train_set=False)

            test_writer = tf.summary.FileWriter(FLAGS.log_dir + '/test')

            saver = tf.train.Saver(max_to_keep=100)
            last_epoch = load_model(sess, saver,FLAGS.saveModelPath,train_model)
            print ('start: ',last_epoch + 1)

            test_accury_1,test_accury_5,test_loss = run_epoch(sess,test_config.keep_prob, fout,test_config.batch_size, train_model, data_test, tf.no_op(),2,test_writer,istraining=False) 
            info = "Final: Test accury(top 1): %.4f Test accury(top 5): %.4f Loss %.4f" % (test_accury_1,test_accury_5,test_loss)
            print (info)
            fout.write(info + '\n')
            fout.flush()



            test_writer.close()

            print("Training step is compeleted!") 
            fout.close()
项目:AMS    作者:EthanTaylor2    | 项目源码 | 文件源码
def run_train():
    fout = open('inf.txt','w+')

    test_config = ModelConfig()
    test_config.keep_prob = 1.0
    test_config.batch_size = 1

    Session_config = tf.ConfigProto(allow_soft_placement = True)
    Session_config.gpu_options.allow_growth=True 



    with tf.Graph().as_default(), tf.Session(config=Session_config) as sess:    
        with tf.device('/gpu:3'):
        #if True:
            initializer = tf.random_uniform_initializer(-test_config.init_scale, 
                                                        test_config.init_scale)

            train_model = vgg16.Vgg16(FLAGS.vgg16_file_path)
            train_model.build(initializer)

            data_test = dataset.DataSet(FLAGS.file_path_test,FLAGS.data_root_dir,TEST_SIZE,is_train_set=False)

            test_writer = tf.summary.FileWriter(FLAGS.log_dir + '/test')

            saver = tf.train.Saver(max_to_keep=100)
            last_epoch = load_model(sess, saver,FLAGS.saveModelPath,train_model)
            print ('start: ',last_epoch + 1)

            test_accury_1,test_accury_5,test_loss = run_epoch(sess,test_config.keep_prob, fout,test_config.batch_size, train_model, data_test, tf.no_op(),2,test_writer,istraining=False) 
            info = "Final: Test accury(top 1): %.3f Test accury(top 5): %.3f Loss %.3f" % (test_accury_1,test_accury_5,test_loss)
            print (info)
            fout.write(info + '\n')
            fout.flush()



            test_writer.close()

            print("Training step is compeleted!") 
            fout.close()
项目:baselines    作者:openai    | 项目源码 | 文件源码
def apply_stats(self, statsUpdates):
        """ compute stats and update/apply the new stats to the running average
        """

        def updateAccumStats():
            if self._full_stats_init:
                return tf.cond(tf.greater(self.sgd_step, self._cold_iter), lambda: tf.group(*self._apply_stats(statsUpdates, accumulate=True, accumulateCoeff=1. / self._stats_accum_iter)), tf.no_op)
            else:
                return tf.group(*self._apply_stats(statsUpdates, accumulate=True, accumulateCoeff=1. / self._stats_accum_iter))

        def updateRunningAvgStats(statsUpdates, fac_iter=1):
            # return tf.cond(tf.greater_equal(self.factor_step,
            # tf.convert_to_tensor(fac_iter)), lambda:
            # tf.group(*self._apply_stats(stats_list, varlist)), tf.no_op)
            return tf.group(*self._apply_stats(statsUpdates))

        if self._async_stats:
            # asynchronous stats update
            update_stats = self._apply_stats(statsUpdates)

            queue = tf.FIFOQueue(1, [item.dtype for item in update_stats], shapes=[
                                 item.get_shape() for item in update_stats])
            enqueue_op = queue.enqueue(update_stats)

            def dequeue_stats_op():
                return queue.dequeue()
            self.qr_stats = tf.train.QueueRunner(queue, [enqueue_op])
            update_stats_op = tf.cond(tf.equal(queue.size(), tf.convert_to_tensor(
                0)), tf.no_op, lambda: tf.group(*[dequeue_stats_op(), ]))
        else:
            # synchronous stats update
            update_stats_op = tf.cond(tf.greater_equal(
                self.stats_step, self._stats_accum_iter), lambda: updateRunningAvgStats(statsUpdates), updateAccumStats)
        self._update_stats_op = update_stats_op
        return update_stats_op