Python tensorflow 模块,assign_add() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.assign_add()

项目:baselines    作者:openai    | 项目源码 | 文件源码
def __init__(self, epsilon=1e-2, shape=()):

        self._sum = tf.get_variable(
            dtype=tf.float64,
            shape=shape,
            initializer=tf.constant_initializer(0.0),
            name="runningsum", trainable=False)
        self._sumsq = tf.get_variable(
            dtype=tf.float64,
            shape=shape,
            initializer=tf.constant_initializer(epsilon),
            name="runningsumsq", trainable=False)
        self._count = tf.get_variable(
            dtype=tf.float64,
            shape=(),
            initializer=tf.constant_initializer(epsilon),
            name="count", trainable=False)
        self.shape = shape

        self.mean = tf.to_float(self._sum / self._count)
        self.std = tf.sqrt( tf.maximum( tf.to_float(self._sumsq / self._count) - tf.square(self.mean) , 1e-2 ))

        newsum = tf.placeholder(shape=self.shape, dtype=tf.float64, name='sum')
        newsumsq = tf.placeholder(shape=self.shape, dtype=tf.float64, name='var')
        newcount = tf.placeholder(shape=[], dtype=tf.float64, name='count')
        self.incfiltparams = U.function([newsum, newsumsq, newcount], [],
            updates=[tf.assign_add(self._sum, newsum),
                     tf.assign_add(self._sumsq, newsumsq),
                     tf.assign_add(self._count, newcount)])
项目:kaggle_redefining_cancer_treatment    作者:jorgemf    | 项目源码 | 文件源码
def model(self, input_vectors, input_gene, input_variation, output_label, batch_size,
              embedding_size=EMBEDDINGS_SIZE,
              output_classes=9):
        logits, targets = doc2vec_prediction_model(input_vectors, input_gene, input_variation,
                                                   output_label, batch_size,
                                                   is_training=False, embedding_size=embedding_size,
                                                   output_classes=output_classes)

        loss = tf.nn.softmax_cross_entropy_with_logits(labels=targets, logits=logits)
        self.global_step = training_util.get_or_create_global_step()
        global_step_increase = tf.assign_add(self.global_step, 1)
        self.accumulated_loss = tf.Variable(0.0, dtype=tf.float32, name='accumulated_loss',
                                            trainable=False)
        self.accumulated_loss = tf.assign_add(self.accumulated_loss, tf.reduce_sum(loss))
        self.prediction = tf.nn.softmax(logits)
        self.metrics = metrics.single_label(self.prediction, targets, moving_average=False)
        steps = tf.cast(global_step_increase, dtype=tf.float32)
        tf.summary.scalar('loss', self.accumulated_loss / (steps * batch_size))
        return None
项目:kaggle_redefining_cancer_treatment    作者:jorgemf    | 项目源码 | 文件源码
def model(self, input_text_begin, input_text_end, gene, variation, expected_labels, batch_size,
              vocabulary_size=VOCABULARY_SIZE, embeddings_size=EMBEDDINGS_SIZE, output_classes=9):
        # embeddings
        embeddings = _load_embeddings(vocabulary_size, embeddings_size)
        # model
        with slim.arg_scope(self.text_classification_model.model_arg_scope()):
            outputs = self.text_classification_model.model(input_text_begin, input_text_end,
                                                           gene, variation, output_classes,
                                                           embeddings=embeddings,
                                                           batch_size=batch_size,
                                                           training=False)
        # loss
        targets = self.text_classification_model.targets(expected_labels, output_classes)
        loss = self.text_classification_model.loss(targets, outputs)
        self.accumulated_loss = tf.Variable(0.0, dtype=tf.float32, name='accumulated_loss',
                                            trainable=False)
        self.accumulated_loss = tf.assign_add(self.accumulated_loss, loss)
        step = tf.Variable(0, dtype=tf.int32, name='eval_step', trainable=False)
        step_increase = tf.assign_add(step, 1)
        self.loss = self.accumulated_loss / tf.cast(step_increase, dtype=tf.float32)
        tf.summary.scalar('loss', self.loss)
        # metrics
        self.metrics = metrics.single_label(outputs['prediction'], targets, moving_average=False)
        return None
项目:kaggle_redefining_cancer_treatment    作者:jorgemf    | 项目源码 | 文件源码
def model(self, input_text_begin, input_text_end, gene, variation, batch_size,
              vocabulary_size=VOCABULARY_SIZE, embeddings_size=EMBEDDINGS_SIZE, output_classes=9):
        # embeddings
        embeddings = _load_embeddings(vocabulary_size, embeddings_size)
        # global step
        self.global_step = training_util.get_or_create_global_step()
        self.global_step = tf.assign_add(self.global_step, 1)
        # model
        with tf.control_dependencies([self.global_step]):
            with slim.arg_scope(self.text_classification_model.model_arg_scope()):
                self.outputs = self.text_classification_model.model(input_text_begin, input_text_end,
                                                                    gene, variation, output_classes,
                                                                    embeddings=embeddings,
                                                                    batch_size=batch_size,
                                                                    training=False)
        # restore only the trainable variables
        self.saver = tf.train.Saver(var_list=tf_variables.trainable_variables())
        return self.outputs
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def test_ref_assign_add(self):
        # Currently ngraph and tf have different assign semantics
        # eval(ng.assign(a, 1)) resturns None, but eval(tf.assign(a, 1)) returns
        # a which is 1.
        # TODO: fix this test after assign op / user_deps are fixed in ngraph
        # TODO: double assignments fails

        # tf placeholder
        a = tf.Variable(tf.constant(np.random.randn(2, 3), name="a"))
        b = tf.Variable(tf.constant(np.random.randn(2, 3), name="b"))
        init_op = tf.global_variables_initializer()
        a_update = tf.assign_add(a, b)

        # test
        tf_result = self.tf_run(a_update, tf_init_op=init_op)
        ng_result = self.ng_run(a)
        ng.testing.assert_allclose(tf_result, ng_result)
项目:relaax    作者:deeplearninc    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
        """Long short-term memory cell (LSTM)."""
        with tf.variable_scope(scope or type(self).__name__):  # "DilatedLSTMCell"
            # Parameters of gates are concatenated into one multiply for efficiency.
            c, h = tf.split(state, 2, axis=1)
            concat = self._linear([inputs, h], 4 * self._num_units, True)

            # i = input_gate, j = new_input, f = forget_gate, o = output_gate
            i, j, f, o = tf.split(concat, 4, axis=1)

            new_c = c * tf.sigmoid(f + self._forget_bias) + tf.sigmoid(i) * tf.tanh(j)
            new_h = tf.tanh(new_c) * tf.sigmoid(o)

            # update relevant cores
            timestep = tf.assign_add(self._timestep, 1)
            core_to_update = tf.mod(timestep, self._cores)

            updated_h = self._hold_mask[core_to_update] * h + self._dilated_mask[core_to_update] * new_h

            return updated_h, tf.concat([new_c, updated_h], axis=1)
项目:Super_TF    作者:Dhruv-Mohan    | 项目源码 | 文件源码
def construct_renorm_dict(self, Rmax, Dmax, R_Iter, D_Iter):
        rmax = tf.Variable(1.0, trainable=False, name='Rmax', dtype=tf.float32)
        rmin = tf.Variable(0.99, trainable=False, name='Rmin', dtype=tf.float32)
        dmax = tf.Variable(0.0, trainable=False, name='Dmax', dtype=tf.float32)
        update_rmax = tf.cond(self.global_step<R_Iter, self.assign_add(rmax, 1, Rmax, R_Iter), self.make_noop).op
        update_dmax = tf.cond(self.global_step<D_Iter, self.assign_add(dmax, 0, Dmax, D_Iter), self.make_noop).op
        update_rmin = tf.cond(self.global_step<R_Iter, self.assign_inv(rmin, rmax), self.make_noop).op

        tf.summary.scalar('rmax', rmax)
        tf.summary.scalar('rmin', rmin)
        tf.summary.scalar('dmax', dmax)

        tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_rmax)
        tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_dmax)
        tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_rmin)

        self.renorm_dict = {'rmax':rmax, 'rmin':0.0, 'dmax':dmax}
项目:tfdnn-kaldi    作者:dreaming-dog    | 项目源码 | 文件源码
def buildForwardGraph(self, batch_size, discrimivative=False):
        """

        :param batch_size: Minibatch Size. Currently unused. Using None.
        :param discrimivative: True for discriminative pretraining (Creates a graph with zero hidden layers). Default \
        value: False (Creates a graph with specified hidden layers)
        """
        with tf.variable_scope('forward_variables', reuse=False):
            self.input = tf.placeholder(tf.float32, (None, self.input_dim), 'input_nodes')
            self.output = tf.placeholder(tf.float32, (None, self.output_dim), 'output_nodes')
            inpt = self.input;
            if not discrimivative:
                inpt = self.__buildFullGraph__()
                self.layers.append(LinearLayer(self.layer_dims[-2], self.layer_dims[-1], inpt,
                                               str(len(self.layer_dims) - 2) + 'layerNet_output'))
            else:
                self.layers.append(
                    LinearLayer(self.layer_dims[0], self.layer_dims[-1], inpt, '0layerNet_output'))
            self.global_step = tf.Variable(0, name='global_step', trainable=False)
            self.step_incr = tf.assign_add(self.global_step, 1)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def test_recovery(self):
    logdir = self._test_dir('test_recovery')
    with tf.Graph().as_default():
      gstep = tf.contrib.framework.get_or_create_global_step()
      do_step = tf.assign_add(gstep, 1)
      scaffold = monitored_session.Scaffold()
      # Use a hook to save the model every 100 steps.  It also saves it at
      # the end.
      hooks = [basic_session_run_hooks.CheckpointSaverHook(
          logdir, save_steps=1, scaffold=scaffold)]
      with monitored_session.MonitoredSession(
          session_creator=monitored_session.ChiefSessionCreator(
              scaffold, checkpoint_dir=logdir),
          hooks=hooks) as session:
        self.assertEqual(0, session.run(gstep))
        self.assertEqual(1, session.run(do_step))
        self.assertEqual(2, session.run(do_step))
      # A restart will find the checkpoint and recover automatically.
      with monitored_session.MonitoredSession(
          session_creator=monitored_session.ChiefSessionCreator(
              scaffold, checkpoint_dir=logdir)) as session:
        self.assertEqual(2, session.run(gstep))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def test_retry_on_aborted_error(self):
    # Tests that we silently retry on abort.  Note that this does not test
    # recovery as we do not use a CheckpointSaver in this test.
    with tf.Graph().as_default():
      gstep = tf.contrib.framework.get_or_create_global_step()
      do_step = tf.assign_add(gstep, 1)
      hook = RaiseOnceAtCountN(4, tf.errors.AbortedError(None, None, 'Abort'))
      with monitored_session.MonitoredSession(hooks=[hook]) as session:
        self.assertEqual(0, session.run(gstep))
        self.assertEqual(1, session.run(do_step))
        self.assertEqual(2, session.run(do_step))
        self.assertFalse(session.should_stop())
        # Here at step 3, the hook triggers and raises AbortedError.  The
        # MonitoredSession automatically retries and restart from a freshly
        # initialized session, so the step is back to 0 and running do_step
        # moves it to 1.
        self.assertEqual(1, session.run(do_step))
        self.assertFalse(session.should_stop())
        self.assertTrue(hook.raised)
        self.assertEqual(2, session.run(do_step))
        self.assertFalse(session.should_stop())
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def test_exit_cleanly_on_stop_iteration_exception(self):
    # Tests that we stop cleanly when OutOfRange is raised.
    with tf.Graph().as_default():
      gstep = tf.contrib.framework.get_or_create_global_step()
      do_step = tf.assign_add(gstep, 1)
      hook = RaiseOnceAtCountN(2, StopIteration)
      session = monitored_session.MonitoredSession(hooks=[hook])
      # session should cleanly exit from the context.
      with session:
        self.assertEqual(0, session.run(gstep))
        self.assertFalse(session.should_stop())
        # Here at step 1, the hook triggers and raises StopIteration. The
        # session should go into should_stop() mode. It should raise the
        # exception. So next step should not be executed.
        session.run(do_step)
        self.assertTrue(False)
      self.assertTrue(session.should_stop())
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def test_regular_exception_pass_through_run(self):
    # Tests that regular exceptions just pass through a "with
    # MonitoredSession" block and set the session in stop mode.
    with tf.Graph().as_default():
      gstep = tf.contrib.framework.get_or_create_global_step()
      do_step = tf.assign_add(gstep, 1)
      hook = RaiseOnceAtCountN(4, RuntimeError('regular exception'))
      session = monitored_session.MonitoredSession(hooks=[hook])
      with self.assertRaisesRegexp(RuntimeError, 'regular exception'):
        with session:
          self.assertEqual(0, session.run(gstep))
          self.assertEqual(1, session.run(do_step))
          self.assertEqual(2, session.run(do_step))
          self.assertFalse(session.should_stop())
          # This triggers the hook and raises the exception
          session.run(do_step)
          # We should not hit this
          self.assertFalse(True)
      self.assertTrue(hook.raised)
      self.assertTrue(session.should_stop())
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def test_raises_regular_exceptions_in_with_body(self):
    # Tests that regular exceptions in "with body" are seen outside.
    with tf.Graph().as_default():
      gstep = tf.contrib.framework.get_or_create_global_step()
      do_step = tf.assign_add(gstep, 1)
      session = monitored_session.MonitoredSession()
      # We should see that exception.
      with self.assertRaisesRegexp(RuntimeError, 'regular exception'):
        with session:
          self.assertEqual(1, session.run(do_step))
          self.assertEqual(2, session.run(do_step))
          self.assertFalse(session.should_stop())
          # Will be visible outside the "with body".
          raise RuntimeError('regular exception')
      # Should have closed.
      self.assertTrue(session.should_stop())
      self.assertTrue(session._is_closed())
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def test_train_loss(self):
    with tf.Graph().as_default() as g, self.test_session(g):
      tf.contrib.framework.create_global_step()
      loss_var = tf.contrib.framework.local_variable(10.0)
      train_op = tf.group(
          tf.assign_add(tf.contrib.framework.get_global_step(), 1),
          tf.assign_add(loss_var, -1.0))
      self._assert_summaries(self._output_dir)
      self._assert_ckpt(self._output_dir, False)
      loss = learn.graph_actions._monitored_train(  # pylint: disable=protected-access
          g,
          output_dir=self._output_dir,
          train_op=train_op,
          loss_op=loss_var.value(),
          steps=6)
      self.assertEqual(4.0, loss)
      self._assert_summaries(self._output_dir, expected_graphs=[g])
      self._assert_ckpt(self._output_dir, True)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def test_train_summaries(self):
    with tf.Graph().as_default() as g, self.test_session(g):
      with tf.control_dependencies(self._build_inference_graph()):
        train_op = tf.assign_add(tf.contrib.framework.get_global_step(), 1)
      loss_op = tf.constant(2.0)
      tf.scalar_summary('loss', loss_op)
      self._assert_summaries(self._output_dir)
      self._assert_ckpt(self._output_dir, False)
      loss = learn.graph_actions._monitored_train(  # pylint: disable=protected-access
          g,
          output_dir=self._output_dir,
          train_op=train_op,
          loss_op=loss_op,
          steps=1)
      self.assertEqual(2.0, loss)
      self._assert_summaries(self._output_dir,
                             expected_graphs=[g],
                             expected_summaries={1: {'loss': 2.0}})
      self._assert_ckpt(self._output_dir, True)


# TODO(ispir): remove following tests after deprecated train.
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def test_train_max_steps_is_not_incremental(self):
    with tf.Graph().as_default() as g, self.test_session(g):
      with tf.control_dependencies(self._build_inference_graph()):
        train_op = tf.assign_add(tf.contrib.framework.get_global_step(), 1)
      learn.graph_actions.train(g, output_dir=self._output_dir,
                                train_op=train_op, loss_op=tf.constant(2.0),
                                max_steps=10)
      step = checkpoints.load_variable(
          self._output_dir, tf.contrib.framework.get_global_step().name)
      self.assertEqual(10, step)

    with tf.Graph().as_default() as g, self.test_session(g):
      with tf.control_dependencies(self._build_inference_graph()):
        train_op = tf.assign_add(tf.contrib.framework.get_global_step(), 1)
      learn.graph_actions.train(g, output_dir=self._output_dir,
                                train_op=train_op, loss_op=tf.constant(2.0),
                                max_steps=15)
      step = checkpoints.load_variable(
          self._output_dir, tf.contrib.framework.get_global_step().name)
      self.assertEqual(15, step)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def test_train_summaries(self):
    with tf.Graph().as_default() as g, self.test_session(g):
      with tf.control_dependencies(self._build_inference_graph()):
        train_op = tf.assign_add(tf.contrib.framework.get_global_step(), 1)
      loss_op = tf.constant(2.0)
      tf.scalar_summary('loss', loss_op)
      self._assert_summaries(self._output_dir)
      self._assert_ckpt(self._output_dir, False)
      loss = learn.graph_actions.train(
          g, output_dir=self._output_dir, train_op=train_op, loss_op=loss_op,
          steps=1)
      self.assertEqual(2.0, loss)
      self._assert_summaries(
          self._output_dir,
          expected_graphs=[g],
          expected_summaries={1: {'loss': 2.0}})
      self._assert_ckpt(self._output_dir, True)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def test_train_chief_monitor(self):
    with tf.Graph().as_default() as g, self.test_session(g):
      with tf.control_dependencies(self._build_inference_graph()):
        train_op = tf.assign_add(tf.contrib.framework.get_global_step(), 1)
      loss_op = tf.constant(2.0)
      tf.scalar_summary('loss', loss_op)
      chief_exclusive_monitor = _BaseMonitorWrapper(False)
      all_workers_monitor = _BaseMonitorWrapper(True)
      loss = learn.graph_actions.train(
          g, output_dir=self._output_dir, train_op=train_op, loss_op=loss_op,
          supervisor_is_chief=True, steps=1,
          monitors=[chief_exclusive_monitor, all_workers_monitor])
      self.assertEqual(2.0, loss)
      self.assertTrue(chief_exclusive_monitor.is_active and
                      all_workers_monitor.is_active,
                      'All monitors must have been active.')
      self.assertTrue(chief_exclusive_monitor.has_step and
                      all_workers_monitor.has_step,
                      'All monitors must have a step.')
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def test_capture_variable(self):
    monitor = learn.monitors.CaptureVariable(
        var_name='my_assign_add:0', every_n=8, first_n=2)
    with tf.Graph().as_default() as g, self.test_session(g):
      var = tf.Variable(0.0, name='my_var')
      var.initializer.run()
      tf.assign_add(var, 1.0, name='my_assign_add')
      self._run_monitor(monitor, num_epochs=3, num_steps_per_epoch=10)
      self.assertEqual({
          0: 1.0,
          1: 2.0,
          2: 3.0,
          10: 4.0,
          18: 5.0,
          26: 6.0,
          29: 7.0,
      }, monitor.values)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _get_train_ops(self, features, _):
    (_,
     _,
     losses,
     training_op) = clustering_ops.KMeans(
         self._parse_tensor_or_dict(features),
         self._num_clusters,
         self._training_initial_clusters,
         self._distance_metric,
         self._use_mini_batch,
         random_seed=self._random_seed,
         kmeans_plus_plus_num_retries=self.kmeans_plus_plus_num_retries
     ).training_graph()
    incr_step = tf.assign_add(tf.contrib.framework.get_global_step(), 1)
    self._loss = tf.reduce_sum(losses)
    training_op = with_dependencies([training_op, incr_step], self._loss)
    return training_op, self._loss
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def test_train(self):
    with tf.Graph().as_default() as g, self.test_session(g):
      with tf.control_dependencies(self._build_inference_graph()):
        train_op = tf.assign_add(tf.contrib.framework.get_global_step(), 1)
      writer = learn.graph_actions.get_summary_writer(self._output_dir)
      self._assert_summaries(self._output_dir, writer)
      self._assert_ckpt(self._output_dir, False)
      loss = learn.graph_actions._monitored_train(  # pylint: disable=protected-access
          g,
          output_dir=self._output_dir,
          train_op=train_op,
          loss_op=tf.constant(2.0),
          steps=1)
      meta_graph_def = meta_graph.create_meta_graph_def()
      self.assertEqual(2.0, loss)
      self._assert_summaries(self._output_dir, writer, expected_graphs=[g],
                             expected_meta_graphs=[meta_graph_def])
      self._assert_ckpt(self._output_dir, True)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def test_train_loss(self):
    with tf.Graph().as_default() as g, self.test_session(g):
      tf.contrib.framework.create_global_step()
      loss_var = tf.contrib.framework.local_variable(10.0)
      train_op = tf.group(
          tf.assign_add(tf.contrib.framework.get_global_step(), 1),
          tf.assign_add(loss_var, -1.0))
      writer = learn.graph_actions.get_summary_writer(self._output_dir)
      self._assert_summaries(self._output_dir, writer)
      self._assert_ckpt(self._output_dir, False)
      loss = learn.graph_actions._monitored_train(  # pylint: disable=protected-access
          g,
          output_dir=self._output_dir,
          train_op=train_op,
          loss_op=loss_var.value(),
          steps=6)
      meta_graph_def = meta_graph.create_meta_graph_def()
      self.assertEqual(4.0, loss)
      self._assert_summaries(self._output_dir, writer, expected_graphs=[g],
                             expected_meta_graphs=[meta_graph_def])
      self._assert_ckpt(self._output_dir, True)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def test_train_summaries(self):
    with tf.Graph().as_default() as g, self.test_session(g):
      with tf.control_dependencies(self._build_inference_graph()):
        train_op = tf.assign_add(tf.contrib.framework.get_global_step(), 1)
      loss_op = tf.constant(2.0)
      tf.summary.scalar('loss', loss_op)
      writer = learn.graph_actions.get_summary_writer(self._output_dir)
      self._assert_summaries(self._output_dir, writer)
      self._assert_ckpt(self._output_dir, False)
      loss = learn.graph_actions._monitored_train(  # pylint: disable=protected-access
          g,
          output_dir=self._output_dir,
          train_op=train_op,
          loss_op=loss_op,
          steps=1)
      meta_graph_def = meta_graph.create_meta_graph_def()
      self.assertEqual(2.0, loss)
      self._assert_summaries(self._output_dir, writer,
                             expected_graphs=[g],
                             expected_meta_graphs=[meta_graph_def],
                             expected_summaries={1: {'loss': 2.0}})
      self._assert_ckpt(self._output_dir, True)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def test_train_override_saver(self):
    with tf.Graph().as_default() as g, self.test_session(g):
      saver = tf.test.mock.Mock()
      tf.add_to_collection(tf.GraphKeys.SAVERS, saver)
      with tf.control_dependencies(self._build_inference_graph()):
        train_op = tf.assign_add(tf.contrib.framework.get_global_step(), 1)
      self._assert_ckpt(self._output_dir, False)
      loss = learn.graph_actions._monitored_train(  # pylint: disable=protected-access
          g,
          output_dir=self._output_dir,
          train_op=train_op,
          loss_op=tf.constant(2.0),
          steps=1)
      self.assertEqual(2.0, loss)
      self._assert_ckpt(self._output_dir, False)
      self.assertTrue(saver.build.called)
      self.assertEqual(1, saver.save.call_count)


# TODO(ispir): remove following tests after deprecated train.
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def test_train_max_steps_is_not_incremental(self):
    with tf.Graph().as_default() as g, self.test_session(g):
      with tf.control_dependencies(self._build_inference_graph()):
        train_op = tf.assign_add(tf.contrib.framework.get_global_step(), 1)
      learn.graph_actions.train(g, output_dir=self._output_dir,
                                train_op=train_op, loss_op=tf.constant(2.0),
                                max_steps=10)
      step = tf.contrib.framework.load_variable(
          self._output_dir, tf.contrib.framework.get_global_step().name)
      self.assertEqual(10, step)

    with tf.Graph().as_default() as g, self.test_session(g):
      with tf.control_dependencies(self._build_inference_graph()):
        train_op = tf.assign_add(tf.contrib.framework.get_global_step(), 1)
      learn.graph_actions.train(g, output_dir=self._output_dir,
                                train_op=train_op, loss_op=tf.constant(2.0),
                                max_steps=15)
      step = tf.contrib.framework.load_variable(
          self._output_dir, tf.contrib.framework.get_global_step().name)
      self.assertEqual(15, step)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def test_train_loss(self):
    with tf.Graph().as_default() as g, self.test_session(g):
      tf.contrib.framework.create_global_step()
      loss_var = tf.contrib.framework.local_variable(10.0)
      train_op = tf.group(
          tf.assign_add(tf.contrib.framework.get_global_step(), 1),
          tf.assign_add(loss_var, -1.0))
      self._assert_summaries(self._output_dir)
      self._assert_ckpt(self._output_dir, False)
      loss = learn.graph_actions.train(
          g, output_dir=self._output_dir, train_op=train_op,
          loss_op=loss_var.value(), steps=6)
      # TODO(ebrevdo,ptucker,ispir): this meta_graph_def lacks the
      # SaverDef, so we can't add it to the summary assertion test below.
      # meta_graph_def = meta_graph.create_meta_graph_def()
      self.assertEqual(4.0, loss)
      self._assert_summaries(self._output_dir, expected_graphs=[g])
      self._assert_ckpt(self._output_dir, True)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def test_train_summaries(self):
    with tf.Graph().as_default() as g, self.test_session(g):
      with tf.control_dependencies(self._build_inference_graph()):
        train_op = tf.assign_add(tf.contrib.framework.get_global_step(), 1)
      loss_op = tf.constant(2.0)
      tf.summary.scalar('loss', loss_op)
      self._assert_summaries(self._output_dir)
      self._assert_ckpt(self._output_dir, False)
      loss = learn.graph_actions.train(
          g, output_dir=self._output_dir, train_op=train_op, loss_op=loss_op,
          steps=1)
      # TODO(ebrevdo,ptucker,ispir): this meta_graph_def lacks the
      # SaverDef, so we can't add it to the summary assertion test below.
      # meta_graph_def = meta_graph.create_meta_graph_def()
      self.assertEqual(2.0, loss)
      self._assert_summaries(
          self._output_dir,
          expected_graphs=[g],
          expected_summaries={1: {'loss': 2.0}})
      self._assert_ckpt(self._output_dir, True)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def test_train_chief_monitor(self):
    with tf.Graph().as_default() as g, self.test_session(g):
      with tf.control_dependencies(self._build_inference_graph()):
        train_op = tf.assign_add(tf.contrib.framework.get_global_step(), 1)
      loss_op = tf.constant(2.0)
      tf.summary.scalar('loss', loss_op)
      chief_exclusive_monitor = _BaseMonitorWrapper(False)
      all_workers_monitor = _BaseMonitorWrapper(True)
      loss = learn.graph_actions.train(
          g, output_dir=self._output_dir, train_op=train_op, loss_op=loss_op,
          supervisor_is_chief=True, steps=1,
          monitors=[chief_exclusive_monitor, all_workers_monitor])
      self.assertEqual(2.0, loss)
      self.assertTrue(chief_exclusive_monitor.is_active and
                      all_workers_monitor.is_active,
                      'All monitors must have been active.')
      self.assertTrue(chief_exclusive_monitor.has_step and
                      all_workers_monitor.has_step,
                      'All monitors must have a step.')
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def test_capture_variable(self):
    monitor = learn.monitors.CaptureVariable(
        var_name='my_assign_add:0', every_n=8, first_n=2)
    with tf.Graph().as_default() as g, self.test_session(g):
      var = tf.Variable(0.0, name='my_var')
      var.initializer.run()
      tf.assign_add(var, 1.0, name='my_assign_add')
      self._run_monitor(monitor, num_epochs=3, num_steps_per_epoch=10)
      self.assertEqual({
          0: 1.0,
          1: 2.0,
          2: 3.0,
          10: 4.0,
          18: 5.0,
          26: 6.0,
          29: 7.0,
      }, monitor.values)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _get_train_ops(self, features, _):
    (_,
     _,
     losses,
     training_op) = clustering_ops.KMeans(
         self._parse_tensor_or_dict(features),
         self._num_clusters,
         self._training_initial_clusters,
         self._distance_metric,
         self._use_mini_batch,
         random_seed=self._random_seed,
         kmeans_plus_plus_num_retries=self.kmeans_plus_plus_num_retries
     ).training_graph()
    incr_step = tf.assign_add(tf.contrib.framework.get_global_step(), 1)
    self._loss = tf.reduce_sum(losses)
    tf.scalar_summary('loss/raw', self._loss)
    training_op = with_dependencies([training_op, incr_step], self._loss)
    return training_op, self._loss
项目:Sing_Par    作者:wanghm92    | 项目源码 | 文件源码
def _dense_moving_average(self, x_tm1, a_t, name, beta=.9):
    """"""

    b_tm1 = self.get_accumulator(x_tm1, '%s' % name)
    tm1 = self.get_accumulator(x_tm1, '%s/tm1' % name, shape=[])
    t = tf.assign_add(tm1, 1)
    if beta < 1:
      beta_t = tf.convert_to_tensor(beta, name='%s/decay' % name)
      beta_t = beta_t * (1-beta**tm1) / (1-beta**t)
    else:
      beta_t = tm1 / t
    b_t = tf.assign(b_tm1, beta_t*b_tm1)
    b_t = tf.assign_add(b_t, (1-beta_t)*a_t)
    return b_t, t

  #=============================================================
项目:openai-rl    作者:morgangiraud    | 项目源码 | 文件源码
def tabular_learning_with_lr(init_lr, decay_steps, Qs_t, states_t, actions_t, targets):
    reusing_scope = tf.get_variable_scope().reuse

    state_action_pairs = tf.stack([states_t, actions_t], 1)
    estimates = tf.gather_nd(Qs_t, state_action_pairs)
    err_estimates = targets - estimates
    loss = tf.reduce_mean(err_estimates)

    global_step = tf.Variable(0, trainable=False, name="global_step", collections=[tf.GraphKeys.GLOBAL_STEP, tf.GraphKeys.GLOBAL_VARIABLES])
    lr = tf.train.exponential_decay(tf.constant(init_lr, dtype=tf.float32), global_step, decay_steps, 0.5, staircase=True)
    if reusing_scope is False:
        tf.summary.scalar('lr', lr)
    inc_global_step = global_step.assign_add(1)
    with tf.control_dependencies([inc_global_step]):
        updates = lr * err_estimates
        train_op = tf.scatter_nd_add(Qs_t, state_action_pairs, updates)

    return loss, train_op
项目:Parser-v1    作者:tdozat    | 项目源码 | 文件源码
def _dense_moving_average(self, x_tm1, a_t, name, beta=.9):
    """"""

    b_tm1 = self.get_accumulator(x_tm1, '%s' % name)
    tm1 = self.get_accumulator(x_tm1, '%s/tm1' % name, shape=[])
    t = tf.assign_add(tm1, 1)
    if beta < 1:
      beta_t = tf.convert_to_tensor(beta, name='%s/decay' % name)
      beta_t = beta_t * (1-beta**tm1) / (1-beta**t)
    else:
      beta_t = tm1 / t
    b_t = tf.assign(b_tm1, beta_t*b_tm1)
    b_t = tf.assign_add(b_t, (1-beta_t)*a_t)
    return b_t, t

  #=============================================================
项目:acktr    作者:emansim    | 项目源码 | 文件源码
def apply_gradients(self, grads):
        coldOptim = tf.train.MomentumOptimizer(
            self._cold_lr * (1. - self._momentum), self._momentum)

        def coldSGDstart():
            sgd_step_op = tf.assign_add(self.sgd_step, 1)
            coldOptim_op = coldOptim.apply_gradients(grads)
            if KFAC_DEBUG:
                with tf.control_dependencies([sgd_step_op, coldOptim_op]):
                    sgd_step_op = tf.Print(
                        sgd_step_op, [self.sgd_step, tf.convert_to_tensor('doing cold sgd step')])
            return tf.group(*[sgd_step_op, coldOptim_op])

        kfacOptim_op, qr = self.apply_gradients_kfac(grads)

        def warmKFACstart():
            return kfacOptim_op

        return tf.cond(tf.greater(self.sgd_step, self._cold_iter), warmKFACstart, coldSGDstart), qr
项目:Tensorflow_Practice    作者:Ram81    | 项目源码 | 文件源码
def model(features, labels, mode):

    W = tf.get_variable("W", [1], dtype = tf.float64)
    b = tf.get_variable("b", [1], dtype = tf.float64)

    y = W * features['x'] + b

    #loss sub-graph
    loss = tf.reduce_sum(tf.square(y - labels))

    #training sub-graph
    global_step = tf.train.get_global_step()
    optimizer = tf.train.GradientDescentOptimizer(0.01)
    train  = tf.group(optimizer.minimize(loss), tf.assign_add(global_step, 1))

    #modelFnOps connects subgraphs we built

    return tf.contrib.learn.ModelFnOps(mode = mode, predictions = y, loss = loss, train_op = train)
项目:rascal-tensorflow    作者:stayrascal    | 项目源码 | 文件源码
def model(features, labels, mode, params):
    with tf.device("/cpu:0"):
        # Build a linear model and predict values
        W = tf.get_variable("W", [1], dtype=tf.float64)
        b = tf.get_variable("b", [1], dtype=tf.float64)
        y = W * features[:, 0] + b
        # Loss sub-graph
        loss = tf.reduce_sum(tf.square(y - labels))
        # Training sub-graph
        global_step = tf.train.get_global_step()
        optimizer = tf.train.GradientDescentOptimizer(0.01)
        train = tf.group(optimizer.minimize(loss),
                         tf.assign_add(global_step, 1))
        # ModelFnOps connects subgraphs we built to the
        # appropriate functionality.
        return tf.contrib.learn.estimators.model_fn.ModelFnOps(
            mode=mode, predictions=y,
            loss=loss,
            train_op=train)
项目:UnstableParser    作者:tdozat    | 项目源码 | 文件源码
def _dense_moving_average(self, x_tm1, a_t, name, beta=.9):
    """"""

    b_tm1 = self.get_accumulator(x_tm1, '%s' % name)
    tm1 = self.get_accumulator(x_tm1, '%s/tm1' % name, shape=[])
    t = tf.assign_add(tm1, 1)
    if beta < 1:
      beta_t = tf.convert_to_tensor(beta, name='%s/decay' % name)
      beta_t = beta_t * (1-beta**tm1) / (1-beta**t)
    else:
      beta_t = tm1 / t
    b_t = tf.assign(b_tm1, beta_t*b_tm1)
    b_t = tf.assign_add(b_t, (1-beta_t)*a_t)
    return b_t, t

  #=============================================================
项目:gymmeforce    作者:lgvaz    | 项目源码 | 文件源码
def __init__(self, env_config, grad_clip_norm=None, log_dir='logs/examples',
                 **kwargs):
        self.env_config = env_config
        self.grad_clip_norm = grad_clip_norm
        self.log_dir = log_dir
        self.placeholders = {}
        self.training_op = None
        self.merged = None
        self._saver = None
        self._writer = None
        self.callbacks = []

        self.global_step_sy = tf.Variable(1, name='global_step', trainable=False)
        placeholders_config = {'add_to_global_step': [[], tf.int32]}
        self._create_placeholders(placeholders_config)
        self.increase_global_step_op = tf.assign_add(
            self.global_step_sy,
            self.placeholders['add_to_global_step'],
            name='increase_global_step')
项目:deep_rl    作者:domluna    | 项目源码 | 文件源码
def __init__(self,
            graph,
            exploration_steps,
            total_steps,
            gamma,
            a3c_update_interval,
            action_sampler):
        """
        graph should have the placeholders called "states", "actions",
        and "returns". It should also have operations called "loss_op", "train_op",
        "probs", and "value".
        """

        self.graph = graph
        self.gamma = gamma
        self.a3c_update_interval = a3c_update_interval
        self.action_sampler = action_sampler

        self.T = graph.get_collection("global_step")[0]
        self.exploration_steps = exploration_steps
        self.total_steps = total_steps
        self.incr_T = tf.assign_add(self.T, 1)
项目:EWC    作者:stokesj    | 项目源码 | 文件源码
def create_fisher_ops(self):
        self.fisher_diagonal = self.bias_shaped_variables(name='bias_grads2', c=0.0, trainable=False) +\
                               self.weight_shaped_variables(name='weight_grads2', c=0.0, trainable=False)

        self.fisher_accumulate_op = [tf.assign_add(f1, f2) for f1, f2 in zip(self.fisher_diagonal, self.fisher_minibatch)]
        scale = 1 / float(self.ewc_batches * self.ewc_batch_size)
        self.fisher_full_batch_average_op = [tf.assign(var, scale * var) for var in self.fisher_diagonal]
        self.fisher_zero_op = [tf.assign(tensor, tf.zeros_like(tensor)) for tensor in self.fisher_diagonal]
项目:fold    作者:tensorflow    | 项目源码 | 文件源码
def test_some_initialized(self):
    with self.test_session() as sess:
      x = tf.Variable(tf.zeros([]))
      self.assertEqual([x], tdc._init_uninitialized(sess))
      self.assertEqual(0, sess.run(x))
      y = tf.assign_add(x, 1)
      self.assertEqual([], tdc._init_uninitialized(sess))
      self.assertEqual(1, sess.run(y))
      self.assertEqual([], tdc._init_uninitialized(sess))
      # If we had done initialize_all_variables we'd see 1.
      self.assertEqual(2, sess.run(y))
项目:cloudml-samples    作者:GoogleCloudPlatform    | 项目源码 | 文件源码
def loss(loss_value):
  """Calculates aggregated mean loss."""
  total_loss = tf.Variable(0.0, False)
  loss_count = tf.Variable(0, False)
  total_loss_update = tf.assign_add(total_loss, loss_value)
  loss_count_update = tf.assign_add(loss_count, 1)
  loss_op = total_loss / tf.cast(loss_count, tf.float32)
  return [total_loss_update, loss_count_update], loss_op
项目:cloudml-samples    作者:GoogleCloudPlatform    | 项目源码 | 文件源码
def accuracy(logits, labels):
  """Calculates aggregated accuracy."""
  is_correct = tf.nn.in_top_k(logits, labels, 1)
  correct = tf.reduce_sum(tf.cast(is_correct, tf.int32))
  incorrect = tf.reduce_sum(tf.cast(tf.logical_not(is_correct), tf.int32))
  correct_count = tf.Variable(0, False)
  incorrect_count = tf.Variable(0, False)
  correct_count_update = tf.assign_add(correct_count, correct)
  incorrect_count_update = tf.assign_add(incorrect_count, incorrect)
  accuracy_op = tf.cast(correct_count, tf.float32) / tf.cast(
      correct_count + incorrect_count, tf.float32)
  return [correct_count_update, incorrect_count_update], accuracy_op
项目:cloudml-samples    作者:GoogleCloudPlatform    | 项目源码 | 文件源码
def loss(loss_value):
  """Calculates aggregated mean loss."""
  total_loss = tf.Variable(0.0, False)
  loss_count = tf.Variable(0, False)
  total_loss_update = tf.assign_add(total_loss, loss_value)
  loss_count_update = tf.assign_add(loss_count, 1)
  loss_op = total_loss / tf.cast(loss_count, tf.float32)
  return [total_loss_update, loss_count_update], loss_op
项目:cloudml-samples    作者:GoogleCloudPlatform    | 项目源码 | 文件源码
def accuracy(logits, labels):
  """Calculates aggregated accuracy."""
  is_correct = tf.nn.in_top_k(logits, labels, 1)
  correct = tf.reduce_sum(tf.cast(is_correct, tf.int32))
  incorrect = tf.reduce_sum(tf.cast(tf.logical_not(is_correct), tf.int32))
  correct_count = tf.Variable(0, False)
  incorrect_count = tf.Variable(0, False)
  correct_count_update = tf.assign_add(correct_count, correct)
  incorrect_count_update = tf.assign_add(incorrect_count, incorrect)
  accuracy_op = tf.cast(correct_count, tf.float32) / tf.cast(
      correct_count + incorrect_count, tf.float32)
  return [correct_count_update, incorrect_count_update], accuracy_op
项目:cloudml-samples    作者:GoogleCloudPlatform    | 项目源码 | 文件源码
def loss(loss_value):
  """Calculates aggregated mean loss."""
  total_loss = tf.Variable(0.0, False)
  loss_count = tf.Variable(0, False)
  total_loss_update = tf.assign_add(total_loss, loss_value)
  loss_count_update = tf.assign_add(loss_count, 1)
  loss_op = total_loss / tf.cast(loss_count, tf.float32)
  return [total_loss_update, loss_count_update], loss_op
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def update_add(x, increment):
    return tf.assign_add(x, increment)
项目:baselines    作者:openai    | 项目源码 | 文件源码
def _apply_stats(self, statsUpdates, accumulate=False, accumulateCoeff=0.):
        updateOps = []
        # obtain the stats var list
        for stats_var in statsUpdates:
            stats_new = statsUpdates[stats_var]
            if accumulate:
                # simple superbatch averaging
                update_op = tf.assign_add(
                    stats_var, accumulateCoeff * stats_new, use_locking=True)
            else:
                # exponential running averaging
                update_op = tf.assign(
                    stats_var, stats_var * self._stats_decay, use_locking=True)
                update_op = tf.assign_add(
                    update_op, (1. - self._stats_decay) * stats_new, use_locking=True)
            updateOps.append(update_op)

        with tf.control_dependencies(updateOps):
            stats_step_op = tf.assign_add(self.stats_step, 1)

        if KFAC_DEBUG:
            stats_step_op = (tf.Print(stats_step_op,
                                      [tf.convert_to_tensor('step:'),
                                       self.global_step,
                                       tf.convert_to_tensor('fac step:'),
                                       self.factor_step,
                                       tf.convert_to_tensor('sgd step:'),
                                       self.sgd_step,
                                       tf.convert_to_tensor('Accum:'),
                                       tf.convert_to_tensor(accumulate),
                                       tf.convert_to_tensor('Accum coeff:'),
                                       tf.convert_to_tensor(accumulateCoeff),
                                       tf.convert_to_tensor('stat step:'),
                                       self.stats_step, updateOps[0], updateOps[1]]))
        return [stats_step_op, ]
项目:baselines    作者:openai    | 项目源码 | 文件源码
def applyStatsEigen(self, eigen_list):
        updateOps = []
        print(('updating %d eigenvalue/vectors' % len(eigen_list)))
        for i, (tensor, mark) in enumerate(zip(eigen_list, self.eigen_update_list)):
            stats_eigen_var = self.eigen_reverse_lookup[mark]
            updateOps.append(
                tf.assign(stats_eigen_var, tensor, use_locking=True))

        with tf.control_dependencies(updateOps):
            factor_step_op = tf.assign_add(self.factor_step, 1)
            updateOps.append(factor_step_op)
            if KFAC_DEBUG:
                updateOps.append(tf.Print(tf.constant(
                    0.), [tf.convert_to_tensor('updated kfac factors')]))
        return updateOps
项目:baselines    作者:openai    | 项目源码 | 文件源码
def apply_gradients(self, grads):
        coldOptim = tf.train.MomentumOptimizer(
            self._cold_lr, self._momentum)

        def coldSGDstart():
            sgd_grads, sgd_var = zip(*grads)

            if self.max_grad_norm != None:
                sgd_grads, sgd_grad_norm = tf.clip_by_global_norm(sgd_grads,self.max_grad_norm)

            sgd_grads = list(zip(sgd_grads,sgd_var))

            sgd_step_op = tf.assign_add(self.sgd_step, 1)
            coldOptim_op = coldOptim.apply_gradients(sgd_grads)
            if KFAC_DEBUG:
                with tf.control_dependencies([sgd_step_op, coldOptim_op]):
                    sgd_step_op = tf.Print(
                        sgd_step_op, [self.sgd_step, tf.convert_to_tensor('doing cold sgd step')])
            return tf.group(*[sgd_step_op, coldOptim_op])

        kfacOptim_op, qr = self.apply_gradients_kfac(grads)

        def warmKFACstart():
            return kfacOptim_op

        return tf.cond(tf.greater(self.sgd_step, self._cold_iter), warmKFACstart, coldSGDstart), qr