Python tensorflow.python.ops.control_flow_ops 模块,group() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.python.ops.control_flow_ops.group()

项目:DNGPU    作者:LUMII-Syslab    | 项目源码 | 文件源码
def _apply_dense(self, grad, var):
        lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
        beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
        beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
        epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
        clip_multiplier_t = math_ops.cast(self.clip_multiplier_t, var.dtype.base_dtype)
        clip_epsilon_t = math_ops.cast(self.clip_epsilon_t, var.dtype.base_dtype)

        v = self.get_slot(var, "v")
        # clip gradient so that each value exceeds its previous maximum by no more than clip_multiplier
        if self.clip_gradients:
            clipVal = v * clip_multiplier_t + clip_epsilon_t
            grad = clip_ops.clip_by_value(grad, -clipVal, clipVal)

        # m := beta1 * m + (1 - beta1) * g_t

        m = self.get_slot(var, "m")
        m_t = state_ops.assign(m, beta1_t * m + (1. - beta1_t) * grad, use_locking=self._use_locking)
        # v := max(beta2 * v , abs(grad))
        v_t = state_ops.assign(v,math_ops.maximum(beta2_t * v, math_ops.abs(grad)), use_locking=self._use_locking)
        # variable -= learning_rate * m_t / (epsilon_t + v_t)
        # we do not use bias-correction term for the first moment; it does not give observable benefit
        var_update = state_ops.assign_sub(var, lr_t * m_t / (v_t+epsilon_t), use_locking=self._use_locking)

        return control_flow_ops.group(*[var_update, v_t, m_t])
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def insert(self, keys, values, name=None):
    num_shards = self._num_shards
    if num_shards == 1:
      return self._table_shards[0].insert(keys, values, name=name)

    shard_indices = self._shard_indices(keys)
    # TODO(andreasst): support 'keys' that are not vectors
    key_shards = data_flow_ops.dynamic_partition(keys, shard_indices,
                                                 num_shards)
    value_shards = data_flow_ops.dynamic_partition(values, shard_indices,
                                                   num_shards)
    return_values = [
        self._table_shards[i].insert(key_shards[i], value_shards[i], name=name)
        for i in range(num_shards)
    ]

    return control_flow_ops.group(*return_values)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def close(self, cancel_pending_enqueues=False, name=None):
    """Closes the barrier and the FIFOQueue.

    This operation signals that no more segments of new sequences will be
    enqueued. New segments of already inserted sequences may still be enqueued
    and dequeued if there is a sufficient number filling a batch or
    allow_small_batch is true. Otherwise dequeue operations will fail
    immediately.

    Args:
      cancel_pending_enqueues: (Optional.) A boolean, defaulting to
        `False`. If `True`, all pending enqueues to the underlying queues will
        be cancelled, and completing already started sequences is not possible.
      name: Optional name for the op.

    Returns:
      The operation that closes the barrier and the FIFOQueue.
    """
    with ops.name_scope(name, "SQSSClose", [self._prefetch_op]) as name:
      barrier_close = self.barrier.close(
          cancel_pending_enqueues, "BarrierClose")
      fifo_queue_close = self._capacity_queue.close(
          cancel_pending_enqueues, "FIFOClose")
      return control_flow_ops.group(barrier_close, fifo_queue_close, name=name)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _export_graph(graph, saver, checkpoint_path, export_dir,
                  default_graph_signature, named_graph_signatures,
                  exports_to_keep):
  """Exports graph via session_bundle, by creating a Session."""
  with graph.as_default():
    with tf_session.Session('') as session:
      variables.initialize_local_variables()
      data_flow_ops.initialize_all_tables()
      saver.restore(session, checkpoint_path)

      export = exporter.Exporter(saver)
      export.init(init_op=control_flow_ops.group(
          variables.initialize_local_variables(),
          data_flow_ops.initialize_all_tables()),
                  default_graph_signature=default_graph_signature,
                  named_graph_signatures=named_graph_signatures,
                  assets_collection=ops.get_collection(
                      ops.GraphKeys.ASSET_FILEPATHS))
      return export.export(export_dir, contrib_variables.get_global_step(),
                           session, exports_to_keep=exports_to_keep)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
    with ops.name_scope(name, self._name) as name:
      update_op = self._opt.apply_gradients(
          grads_and_vars, global_step=global_step)
      clip_update_ops = []
      with ops.control_dependencies([update_op]):
        for grad, var in grads_and_vars:
          if grad is None or var not in self._vars_to_clip_dims:
            continue
          with ops.name_scope("clip_" + var.op.name):
            if isinstance(grad, ops.Tensor):
              clip_update_ops.append(self._clip_dense(var))
            else:
              clip_update_ops.append(self._clip_sparse(grad, var))

      # In case no var was clipped, still need to run the update_op.
      return control_flow_ops.group(*([update_op] + clip_update_ops), name=name)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def insert(self, keys, values, name=None):
    self._check_keys(keys)
    num_shards = self._num_shards
    if num_shards == 1:
      return self._table_shards[0].insert(keys, values, name=name)

    shard_indices = self._shard_indices(keys)
    # TODO(andreasst): support 'keys' that are not vectors
    key_shards = data_flow_ops.dynamic_partition(keys, shard_indices,
                                                 num_shards)
    value_shards = data_flow_ops.dynamic_partition(values, shard_indices,
                                                   num_shards)
    return_values = [
        self._table_shards[i].insert(key_shards[i], value_shards[i], name=name)
        for i in range(num_shards)
    ]

    return control_flow_ops.group(*return_values)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _extract_metric_update_ops(self, eval_dict):
    """Separate update operations from metric value operations."""
    update_ops = []
    value_ops = {}
    for name, metric_ops in six.iteritems(eval_dict):
      if isinstance(metric_ops, (list, tuple)):
        if len(metric_ops) == 2:
          value_ops[name] = metric_ops[0]
          update_ops.append(metric_ops[1])
        else:
          logging.warning(
              'Ignoring metric {}. It returned a list|tuple with len {}, '
              'expected 2'.format(name, len(metric_ops)))
          value_ops[name] = metric_ops
      else:
        value_ops[name] = metric_ops

    if update_ops:
      update_ops = control_flow_ops.group(*update_ops)
    else:
      update_ops = None

    return update_ops, value_ops
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _export_graph(graph, saver, checkpoint_path, export_dir,
                  default_graph_signature, named_graph_signatures,
                  exports_to_keep):
  """Exports graph via session_bundle, by creating a Session."""
  with graph.as_default():
    with tf_session.Session('') as session:
      variables.local_variables_initializer()
      data_flow_ops.initialize_all_tables()
      saver.restore(session, checkpoint_path)

      export = exporter.Exporter(saver)
      export.init(init_op=control_flow_ops.group(
          variables.local_variables_initializer(),
          data_flow_ops.initialize_all_tables()),
                  default_graph_signature=default_graph_signature,
                  named_graph_signatures=named_graph_signatures,
                  assets_collection=ops.get_collection(
                      ops.GraphKeys.ASSET_FILEPATHS))
      return export.export(export_dir, contrib_variables.get_global_step(),
                           session, exports_to_keep=exports_to_keep)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
    train_op = self._optimizer.apply_gradients(
        grads_and_vars, global_step=global_step, name=name)
    var_list = [x[1] for x in grads_and_vars if x[0] is not None]
    self._variable_map = {}
    if self._sequential_update:
      with ops.control_dependencies([train_op]):
        ma_op = self._ema.apply(var_list)
    else:
      ma_op = self._ema.apply(var_list)

    for v in var_list:
      v_avg = self._ema.average(v)
      self._variable_map[v.op.name] = v_avg
      self._variable_map[v_avg.op.name] = v
    return control_flow_ops.group(train_op, ma_op, name="train_with_avg")
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
    with ops.name_scope(name, self._name) as name:
      update_op = self._opt.apply_gradients(
          grads_and_vars, global_step=global_step)
      clip_update_ops = []
      with ops.control_dependencies([update_op]):
        for grad, var in grads_and_vars:
          if grad is None or var not in self._vars_to_clip_dims:
            continue
          with ops.name_scope("clip_" + var.op.name):
            if isinstance(grad, ops.Tensor):
              clip_update_ops.append(self._clip_dense(var))
            else:
              clip_update_ops.append(self._clip_sparse(grad, var))

      # In case no var was clipped, still need to run the update_op.
      return control_flow_ops.group(*([update_op] + clip_update_ops), name=name)
项目:ndm    作者:jurcicek    | 项目源码 | 文件源码
def _apply_dense(self, grad, var):
        lr = (self._lr_t *
              math_ops.sqrt(1 - self._beta2_power)
              / (1 - self._beta1_power))
        # m_t = beta1 * m + (1 - beta1) * g_t
        m = self.get_slot(var, "m")
        m_scaled_g_values = grad * (1 - self._beta1_t)
        m_t = m * self._beta1_t
        m_t = m_t + m_scaled_g_values
        # v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
        v = self.get_slot(var, "v")
        v_scaled_g_values = tf.pow(grad, 2) * (1 - self._beta2_t)
        v_t = v * self._beta2_t
        v_t = v_t + v_scaled_g_values
        v_sqrt = tf.pow(v_t, self._pow_t)
        var_update = state_ops.assign_sub(var,
                                          lr * m_t / (v_sqrt + self._epsilon_t),
                                          use_locking=self._use_locking)
        # regularization
        var_update = state_ops.assign_sub(var_update,
                                          self._dense_regularization * var,
                                          use_locking=self._use_locking)

        return control_flow_ops.group(*[var_update, m_t, v_t])
项目:iaf    作者:openai    | 项目源码 | 文件源码
def _apply_dense(self, grad, var):
        lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
        beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
        beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
        if var.dtype.base_dtype == tf.float16:
            eps = 1e-7  # Can't use 1e-8 due to underflow -- not sure if it makes a big difference.
        else:
            eps = 1e-8

        v = self.get_slot(var, "v")
        v_t = v.assign(beta1_t * v + (1. - beta1_t) * grad)
        m = self.get_slot(var, "m")
        m_t = m.assign(tf.maximum(beta2_t * m + eps, tf.abs(grad)))
        g_t = v_t / m_t

        var_update = state_ops.assign_sub(var, lr_t * g_t)
        return control_flow_ops.group(*[var_update, m_t, v_t])
项目:Optimization    作者:tdozat    | 项目源码 | 文件源码
def clear_slots(self, var_list=None):
    """"""

    updates = []
    if var_list is None:
      var_list = variables.trainable_variables()
    for var in var_list:
      if self._mu > 0:
        m = self.get_slot(var, 'm')
        updates.append(state_ops.assign(m, m*0, use_locking=self._use_locking))
        tm1_m = self.get_slot(var, 'm')
        updates.append(state_ops.assign(tm1_m, tm1_m*0, use_locking=self._use_locking))
      if self._ups > 0:
        v = self.get_slot(var, 'v')
        updates.append(state_ops.assign(v, v*0, use_locking=self._use_locking))
        tm1_v = self.get_slot(var, 'v/tm1')
        updates.append(state_ops.assign(tm1_v, tm1_v*0, use_locking=self._use_locking))
    return control_flow_ops.group(*updates)
项目:Optimization    作者:tdozat    | 项目源码 | 文件源码
def clear_slots(self, var_list=None):
    """"""

    updates = []
    if var_list is None:
      var_list = variables.trainable_variables()
    for var in var_list:
      if self._mu > 0:
        m = self.get_slot(var, 'm')
        updates.append(state_ops.assign(m, m*0, use_locking=self._use_locking))
        tm1_m = self.get_slot(var, 'm')
        updates.append(state_ops.assign(tm1_m, tm1_m*0, use_locking=self._use_locking))
      if self._ups > 0:
        v = self.get_slot(var, 'v')
        updates.append(state_ops.assign(v, v*0, use_locking=self._use_locking))
        tm1_v = self.get_slot(var, 'v/tm1')
        updates.append(state_ops.assign(tm1_v, tm1_v*0, use_locking=self._use_locking))
    return control_flow_ops.group(*updates)

#***************************************************************
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def insert(self, keys, values, name=None):
    self._check_keys(keys)
    num_shards = self._num_shards
    if num_shards == 1:
      return self._table_shards[0].insert(keys, values, name=name)

    shard_indices = self._shard_indices(keys)
    # TODO(andreasst): support 'keys' that are not vectors
    key_shards = data_flow_ops.dynamic_partition(keys, shard_indices,
                                                 num_shards)
    value_shards = data_flow_ops.dynamic_partition(values, shard_indices,
                                                   num_shards)
    return_values = [
        self._table_shards[i].insert(key_shards[i], value_shards[i], name=name)
        for i in range(num_shards)
    ]

    return control_flow_ops.group(*return_values)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def test_train_loss(self):
    with ops.Graph().as_default() as g, self.test_session(g):
      variables_lib.create_global_step()
      loss_var = variables_lib.local_variable(10.0)
      train_op = control_flow_ops.group(
          state_ops.assign_add(variables_lib.get_global_step(), 1),
          state_ops.assign_add(loss_var, -1.0))
      writer = learn.graph_actions.get_summary_writer(self._output_dir)
      self._assert_summaries(self._output_dir, writer)
      self._assert_ckpt(self._output_dir, False)
      loss = learn.graph_actions._monitored_train(  # pylint: disable=protected-access
          g,
          output_dir=self._output_dir,
          train_op=train_op,
          loss_op=loss_var.value(),
          steps=6)
      self.assertEqual(4.0, loss)
      self._assert_summaries(
          self._output_dir,
          writer,
          expected_graphs=[g],
          expected_meta_graphs=None)
      self._assert_ckpt(self._output_dir, True)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _combine_train(self, all_model_fn_ops, train_op_fn):
    """Combines list of ModelFnOps for training.

    Args:
      all_model_fn_ops: list of ModelFnOps for the individual heads.
      train_op_fn: Function to create train op. See `create_model_fn_ops`
          documentaion for more details.

    Returns:
      ModelFnOps that combines all the heads.
    """
    losses = []
    additional_train_ops = []
    for m in all_model_fn_ops:
      losses.append(m.loss)
      additional_train_ops.append(m.train_op)
    loss = self._loss_combiner(losses)

    train_op = train_op_fn(loss)
    train_op = control_flow_ops.group(train_op, *additional_train_ops)
    return model_fn.ModelFnOps(
        mode=model_fn.ModeKeys.TRAIN,
        loss=loss,
        train_op=train_op)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _train_op(loss,
              labels,
              train_op_fn,
              centered_bias=None,
              logits_dimension=None,
              loss_fn=None):
  """Returns op for the training step."""
  if centered_bias is not None:
    centered_bias_step = _centered_bias_step(centered_bias, logits_dimension,
                                             labels, loss_fn)
  else:
    centered_bias_step = None
  with ops.name_scope(None, "train_op", (loss, labels)):
    train_op = train_op_fn(loss)
    if centered_bias_step is not None:
      train_op = control_flow_ops.group(train_op, centered_bias_step)
    return train_op
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _extract_metric_update_ops(self, eval_dict):
    """Separate update operations from metric value operations."""
    update_ops = []
    value_ops = {}
    for name, metric_ops in six.iteritems(eval_dict):
      if isinstance(metric_ops, (list, tuple)):
        if len(metric_ops) == 2:
          value_ops[name] = metric_ops[0]
          update_ops.append(metric_ops[1])
        else:
          logging.warning(
              'Ignoring metric {}. It returned a list|tuple with len {}, '
              'expected 2'.format(name, len(metric_ops)))
          value_ops[name] = metric_ops
      else:
        value_ops[name] = metric_ops

    if update_ops:
      update_ops = control_flow_ops.group(*update_ops)
    else:
      update_ops = None

    return update_ops, value_ops
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _export_graph(graph, saver, checkpoint_path, export_dir,
                  default_graph_signature, named_graph_signatures,
                  exports_to_keep):
  """Exports graph via session_bundle, by creating a Session."""
  with graph.as_default():
    with tf_session.Session('') as session:
      variables.local_variables_initializer()
      data_flow_ops.tables_initializer()
      saver.restore(session, checkpoint_path)

      export = exporter.Exporter(saver)
      export.init(init_op=control_flow_ops.group(
          variables.local_variables_initializer(),
          data_flow_ops.tables_initializer()),
                  default_graph_signature=default_graph_signature,
                  named_graph_signatures=named_graph_signatures,
                  assets_collection=ops.get_collection(
                      ops.GraphKeys.ASSET_FILEPATHS))
      return export.export(export_dir, contrib_variables.get_global_step(),
                           session, exports_to_keep=exports_to_keep)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def scatter_update(cls, factor, indices, values, sharding_func):
    """Helper function for doing sharded scatter update."""
    assert isinstance(factor, list)
    if len(factor) == 1:
      with ops.colocate_with(factor[0]):
        # TODO(agarwal): assign instead of scatter update for full batch update.
        return state_ops.scatter_update(factor[0], indices, values).op
    else:
      num_shards = len(factor)
      assignments, new_ids = sharding_func(indices)
      assert assignments is not None
      assignments = math_ops.cast(assignments, dtypes.int32)
      sharded_ids = data_flow_ops.dynamic_partition(new_ids, assignments,
                                                    num_shards)
      sharded_values = data_flow_ops.dynamic_partition(values, assignments,
                                                       num_shards)
      updates = []
      for i in xrange(num_shards):
        updates.append(
            state_ops.scatter_update(factor[i], sharded_ids[i], sharded_values[
                i]))
      return control_flow_ops.group(*updates)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def benchmarkTfRNNLSTMBlockCellTraining(self):
    test_configs = self._GetTestConfig()
    for config_name, config in test_configs.items():
      num_layers = config["num_layers"]
      num_units = config["num_units"]
      batch_size = config["batch_size"]
      seq_length = config["seq_length"]

      with ops.Graph().as_default(), ops.device("/gpu:0"):
        inputs = seq_length * [
            array_ops.zeros([batch_size, num_units], dtypes.float32)
        ]
        cell = lambda: lstm_ops.LSTMBlockCell(num_units=num_units)  # pylint: disable=cell-var-from-loop

        multi_cell = core_rnn_cell_impl.MultiRNNCell(
            [cell() for _ in range(num_layers)])
        outputs, final_state = core_rnn.static_rnn(
            multi_cell, inputs, dtype=dtypes.float32)
        trainable_variables = ops.get_collection(
            ops.GraphKeys.TRAINABLE_VARIABLES)
        gradients = gradients_impl.gradients([outputs, final_state],
                                             trainable_variables)
        training_op = control_flow_ops.group(*gradients)
        self._BenchmarkOp(training_op, "tf_rnn_lstm_block_cell %s %s" %
                          (config_name, self._GetConfigDesc(config)))
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
    train_op = self._optimizer.apply_gradients(
        grads_and_vars, global_step=global_step, name=name)
    var_list = [x[1] for x in grads_and_vars if x[0] is not None]
    self._variable_map = {}
    if self._sequential_update:
      with ops.control_dependencies([train_op]):
        ma_op = self._ema.apply(var_list)
    else:
      ma_op = self._ema.apply(var_list)

    for v in var_list:
      v_avg = self._ema.average(v)
      self._variable_map[v.op.name] = v_avg
      self._variable_map[v_avg.op.name] = v
    return control_flow_ops.group(train_op, ma_op, name="train_with_avg")
项目:cloudml-samples    作者:GoogleCloudPlatform    | 项目源码 | 文件源码
def main_op():
  init_local = variables.local_variables_initializer()
  init_tables = lookup_ops.tables_initializer()
  return control_flow_ops.group(init_local, init_tables)
项目:DNGPU    作者:LUMII-Syslab    | 项目源码 | 文件源码
def _apply_sparse(self, grad, var):
        lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
        beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
        beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
        epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
        clip_multiplier_t = math_ops.cast(self.clip_multiplier_t, var.dtype.base_dtype)
        clip_epsilon_t = math_ops.cast(self.clip_epsilon_t, var.dtype.base_dtype)

        v = self.get_slot(var, "v")
        v_slice = array_ops.gather(v, grad.indices)

        #clip gradient so that each value exceeds its previous maximum by no more than clip_multiplier
        clipped_values = grad.values
        if self.clip_gradients:
            clipVal = v_slice * clip_multiplier_t + clip_epsilon_t
            clipped_values = clip_ops.clip_by_value(grad.values, -clipVal, clipVal)

        # m := beta1 * m + (1 - beta1) * g_t
        m = self.get_slot(var, "m")
        m_t_values = beta1_t * array_ops.gather(m, grad.indices) + (1 - beta1_t) * clipped_values
        m_t = state_ops.scatter_update(m, grad.indices, m_t_values, use_locking=self._use_locking)

        # v := max(beta2 * v , abs(grad))
        v_t_values = math_ops.maximum(beta2_t * v_slice, math_ops.abs(clipped_values))
        v_t = state_ops.scatter_update(v, grad.indices, v_t_values, use_locking=self._use_locking)

        # variable -= learning_rate * m_t / (epsilon_t + v_t)
        # we do not use bias-correction term for the first moment; it does not give observable benefit
        var_update = state_ops.scatter_sub(var, grad.indices,
                                           lr_t * m_t_values / (v_t_values + epsilon_t),
                                           use_locking=self._use_locking)
        return control_flow_ops.group(var_update, v_t, m_t)
项目:deep-learning    作者:lbkchen    | 项目源码 | 文件源码
def _get_train_ops(self, features, targets):
    """Method that builds model graph and returns trainer ops.
    Args:
      features: `Tensor` or `dict` of `Tensor` objects.
      targets: `Tensor` or `dict` of `Tensor` objects.
    Returns:
      Tuple of train `Operation` and loss `Tensor`.
    """
    features, spec = data_ops.ParseDataTensorOrDict(features)
    labels = data_ops.ParseLabelTensorOrDict(targets)

    graph_builder = self.graph_builder_class(
        self.params, device_assigner=self.device_assigner,
        **self.construction_args)

    epoch = None
    if self.data_feeder:
      epoch = self.data_feeder.make_epoch_variable()

    train = control_flow_ops.group(
        graph_builder.training_graph(
            features, labels, data_spec=spec, epoch=epoch,
            **self.training_args),
        state_ops.assign_add(contrib_framework.get_global_step(), 1))

    self.training_loss = graph_builder.training_loss(features, targets)

    return train, self.training_loss
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def __init__(self, inputs, outputs, updates=None, name=None,
                   **session_kwargs):
        updates = updates or []
        if not isinstance(inputs, (list, tuple)):
          raise TypeError('`inputs` to a TensorFlow backend function '
                          'should be a list or tuple.')
        if not isinstance(outputs, (list, tuple)):
          raise TypeError('`outputs` of a TensorFlow backend function '
                          'should be a list or tuple.')
        if not isinstance(updates, (list, tuple)):
          raise TypeError('`updates` in a TensorFlow backend function '
                          'should be a list or tuple.')
        self.inputs = list(inputs)
        self.outputs = list(outputs)
        with ops.control_dependencies(self.outputs):
          updates_ops = []
          for update in updates:
            if isinstance(update, tuple):
              p, new_p = update
              updates_ops.append(state_ops.assign(p, new_p))
            else:
              # assumed already an op
              updates_ops.append(update)
          self.updates_op = control_flow_ops.group(*updates_ops)
        self.name = name
        self.session_kwargs = session_kwargs
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def save_weights(self, filepath, overwrite=True):
            """Dumps all layer weights to a HDF5 file.

            The weight file has:
                - `layer_names` (attribute), a list of strings
                    (ordered names of model layers).
                - For every layer, a `group` named `layer.name`
                    - For every such layer group, a group attribute `weight_names`,
                        a list of strings
                        (ordered names of weights tensor of the layer).
                    - For every weight in the layer, a dataset
                        storing the weight value, named after the weight tensor.

            Arguments:
                filepath: String, path to the file to save the weights to.
                overwrite: Whether to silently overwrite any existing file at the
                    target location, or provide the user with a manual prompt.

            Raises:
                ImportError: If h5py is not available.
            """
            if h5py is None:
              raise ImportError('`save_weights` requires h5py.')
            # If file exists and should not be overwritten:
            if not overwrite and os.path.isfile(filepath):
              proceed = ask_to_proceed_with_overwrite(filepath)
              if not proceed:
                return
            f = h5py.File(filepath, 'w')
            save_weights_to_hdf5_group(f, self.layers)
            f.flush()
            f.close()

          # by_name = True, most used in transfer learning
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _get_train_ops(self, features, targets):
    """Method that builds model graph and returns trainer ops.

    Args:
      features: `Tensor` or `dict` of `Tensor` objects.
      targets: `Tensor` or `dict` of `Tensor` objects.

    Returns:
      Tuple of train `Operation` and loss `Tensor`.
    """
    features, _, spec = data_ops.ParseDataTensorOrDict(features)
    labels = data_ops.ParseLabelTensorOrDict(targets)
    _assert_float32(features)
    _assert_float32(labels)

    graph_builder = self.graph_builder_class(
        self.params, device_assigner=self.device_assigner,
        **self.construction_args)

    epoch = None
    if self.data_feeder:
      epoch = self.data_feeder.make_epoch_variable()

    train = control_flow_ops.group(
        graph_builder.training_graph(
            features, labels, data_spec=spec, epoch=epoch,
            **self.training_args),
        state_ops.assign_add(contrib_framework.get_global_step(), 1))

    self.training_loss = graph_builder.training_loss(features, targets)

    return train, self.training_loss
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _get_local_init_op():
  local_init_op = _get_first_op_from_collection(
      ops.GraphKeys.LOCAL_INIT_OP)
  if local_init_op is None:
    op_list = [variables.initialize_local_variables(),
               data_flow_ops.initialize_all_tables()]
    if op_list:
      local_init_op = control_flow_ops.group(*op_list)
      ops.add_to_collection(ops.GraphKeys.LOCAL_INIT_OP, local_init_op)
  return local_init_op
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _default_local_init_op():
    return control_flow_ops.group(variables.initialize_local_variables(),
                                  data_flow_ops.initialize_all_tables())
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _auc_hist_accumulate(hist_true, hist_false, nbins, collections):
  """Accumulate histograms in new variables."""
  with variable_scope.variable_scope(
      None, 'hist_accumulate', [hist_true, hist_false]):
    # Holds running total histogram of scores for records labeled True.
    hist_true_acc = variable_scope.get_variable(
        'hist_true_acc',
        initializer=init_ops.zeros_initializer(
            [nbins],
            dtype=hist_true.dtype),
        collections=collections,
        trainable=False)
    # Holds running total histogram of scores for records labeled False.
    hist_false_acc = variable_scope.get_variable(
        'hist_false_acc',
        initializer=init_ops.zeros_initializer(
            [nbins],
            dtype=hist_false.dtype),
        collections=collections,
        trainable=False)

    update_op = control_flow_ops.group(
        hist_true_acc.assign_add(hist_true),
        hist_false_acc.assign_add(hist_false),
        name='update_op')

    return hist_true_acc, hist_false_acc, update_op
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def update_weights(self, train_op):
    """Updates the model weights.

    This function must be called on at least one worker after `minimize`.
    In distributed training this call can be omitted on non-chief workers to
    speed up training.

    Args:
      train_op: The operation returned by the `minimize` call.

    Returns:
      An Operation that updates the model weights.
    """
    with ops.control_dependencies([train_op]):
      update_ops = []
      # Copy over unshrinked weights to user provided variables.
      for name in ['sparse_features_weights', 'dense_features_weights']:
        for var, slot_var in zip(self._variables[name],
                                 self._slots['unshrinked_' + name]):
          update_ops.append(var.assign(slot_var))

    # Apply proximal step.
    with ops.control_dependencies(update_ops):
      update_ops = []
      for name in ['sparse_features_weights', 'dense_features_weights']:
        for var in self._variables[name]:
          with ops.device(var.device):
            # pylint: disable=protected-access
            update_ops.append(
                gen_sdca_ops._sdca_shrink_l1(
                    self._convert_n_to_tensor(
                        [var], as_ref=True),
                    l1=self._symmetric_l1_regularization(),
                    l2=self._symmetric_l2_regularization()))
      return control_flow_ops.group(*update_ops)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _train_op(self, features, labels, train_op_fn, logits):
    """Returns op for the training step."""
    loss = self._training_loss(features, labels, logits)
    train_op = train_op_fn(loss)

    if self._enable_centered_bias:
      centered_bias_step = [_centered_bias_step(
          self.logits_dimension,
          self._centered_bias_weight_collection,
          labels,
          self._train_loss_fn)]
      train_op = control_flow_ops.group(train_op, *centered_bias_step)

    return train_op
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _train_op(self, features, labels, train_op_fn, logits):
    """Returns op for the training step."""
    loss = self._training_loss(features, labels, logits)
    train_op = train_op_fn(loss)

    if self._enable_centered_bias:
      centered_bias_step = [_centered_bias_step(
          self.logits_dimension,
          self._centered_bias_weight_collection,
          labels,
          self._train_loss_fn)]
      train_op = control_flow_ops.group(train_op, *centered_bias_step)

    return train_op
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _auc_hist_accumulate(hist_true, hist_false, nbins, collections):
  """Accumulate histograms in new variables."""
  with variable_scope.variable_scope(
      None, 'hist_accumulate', [hist_true, hist_false]):
    # Holds running total histogram of scores for records labeled True.
    hist_true_acc = variable_scope.get_variable(
        'hist_true_acc',
        initializer=init_ops.zeros_initializer(
            [nbins],
            dtype=hist_true.dtype),
        collections=collections,
        trainable=False)
    # Holds running total histogram of scores for records labeled False.
    hist_false_acc = variable_scope.get_variable(
        'hist_false_acc',
        initializer=init_ops.zeros_initializer(
            [nbins],
            dtype=hist_false.dtype),
        collections=collections,
        trainable=False)

    update_op = control_flow_ops.group(
        hist_true_acc.assign_add(hist_true),
        hist_false_acc.assign_add(hist_false),
        name='update_op')

    return hist_true_acc, hist_false_acc, update_op
项目:Machine-Learning    作者:sfeng15    | 项目源码 | 文件源码
def _AddShardedRestoreOps(self, filename_tensor, per_device,
                            restore_sequentially, reshape):
    """Add Ops to save variables from multiple devices.

    Args:
      filename_tensor: Tensor for the path of the file to load.
      per_device: A list of (device, _VarToSave) pairs, as
        returned by _GroupByDevices().
      restore_sequentially: True if we want to restore variables sequentially
        within a shard.
      reshape: True if we want to reshape loaded tensors to the shape of
        the corresponding variable.

    Returns:
      An Operation that restores the variables.
    """
    sharded_restores = []
    for shard, (device, vars_to_save) in enumerate(per_device):
      with ops.device(device):
        sharded_restores.append(self._AddRestoreOps(
            filename_tensor,
            vars_to_save,
            restore_sequentially,
            reshape,
            preferred_shard=shard,
            name="restore_shard"))
    return control_flow_ops.group(*sharded_restores, name="restore_all")
项目:ndm    作者:jurcicek    | 项目源码 | 文件源码
def _apply_sparse(self, grad, var):
        lr = (self._lr_t *
              math_ops.sqrt(1 - self._beta2_power)
              / (1 - self._beta1_power))
        # m_t = beta1 * m + (1 - beta1) * g_t
        m = self.get_slot(var, "m")
        m_scaled_g_values = grad.values * (1 - self._beta1_t)
        m_t = state_ops.assign(m, m * self._beta1_t,
                               use_locking=self._use_locking)

        m_t = state_ops.scatter_add(m_t, grad.indices, m_scaled_g_values,
                                    use_locking=self._use_locking)
        # v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
        v = self.get_slot(var, "v")
        v_scaled_g_values = (grad.values * grad.values) * (1 - self._beta2_t)
        v_t = state_ops.assign(v, v * self._beta2_t, use_locking=self._use_locking)
        v_t = state_ops.scatter_add(v_t, grad.indices, v_scaled_g_values,
                                    use_locking=self._use_locking)
        v_sqrt = tf.pow(v_t, self._pow_t)
        var_update = state_ops.assign_sub(var,
                                          lr * m_t / (v_sqrt + self._epsilon_t),
                                          use_locking=self._use_locking)
        # regularization
        var_update = state_ops.assign_sub(var_update,
                                          self._sparse_regularization * var,
                                          use_locking=self._use_locking)

        return control_flow_ops.group(*[var_update, m_t, v_t])
项目:ndm    作者:jurcicek    | 项目源码 | 文件源码
def _finish(self, update_ops, name_scope):
        # Update the power accumulators.
        with ops.control_dependencies(update_ops):
            with ops.device(self._beta1_power.device):
                update_beta1 = self._beta1_power.assign(
                        self._beta1_power * self._beta1_t,
                        use_locking=self._use_locking)
                update_beta2 = self._beta2_power.assign(
                        self._beta2_power * self._beta2_t,
                        use_locking=self._use_locking)
        return control_flow_ops.group(*update_ops + [update_beta1, update_beta2],
                                      name=name_scope)
项目:ndm    作者:jurcicek    | 项目源码 | 文件源码
def _apply_dense(self, grad, var):
        lr = (self._lr_t *
              math_ops.sqrt(1 - self._beta2_power)
              / (1 - self._beta1_power))
        # m_t = beta1 * m + (1 - beta1) * g_t
        m = self.get_slot(var, "m")
        m_scaled_g_values = grad * (1 - self._beta1_t)
        m_t = m * self._beta1_t
        m_t = m_t + m_scaled_g_values
        # v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
        v = self.get_slot(var, "v")
        #######################################################################################
        v_scaled_g_values = tf.pow(grad - m_t, 2) * (1 - self._beta2_t)
        #######################################################################################
        v_t = v * self._beta2_t
        v_t = v_t + v_scaled_g_values
        v_sqrt = tf.pow(v_t, self._pow_t)
        var_update = state_ops.assign_sub(var,
                                          lr * m_t / (v_sqrt + self._epsilon_t),
                                          use_locking=self._use_locking)
        # regularization
        var_update = state_ops.assign_sub(var_update,
                                          self._dense_regularization * var,
                                          use_locking=self._use_locking)

        return control_flow_ops.group(*[var_update, m_t, v_t])
项目:ndm    作者:jurcicek    | 项目源码 | 文件源码
def _apply_sparse(self, grad, var):
        lr = (self._lr_t *
              math_ops.sqrt(1 - self._beta2_power)
              / (1 - self._beta1_power))
        # m_t = beta1 * m + (1 - beta1) * g_t
        m = self.get_slot(var, "m")
        m_scaled_g_values = grad.values * (1 - self._beta1_t)
        m_t = state_ops.assign(m, m * self._beta1_t,
                               use_locking=self._use_locking)

        m_t = state_ops.scatter_add(m_t, grad.indices, m_scaled_g_values,
                                    use_locking=self._use_locking)
        # v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
        v = self.get_slot(var, "v")
        v_scaled_g_values = (grad.values * grad.values) * (1 - self._beta2_t)
        v_t = state_ops.assign(v, v * self._beta2_t, use_locking=self._use_locking)
        v_t = state_ops.scatter_add(v_t, grad.indices, v_scaled_g_values,
                                    use_locking=self._use_locking)
        v_sqrt = tf.pow(v_t, self._pow_t)
        var_update = state_ops.assign_sub(var,
                                          lr * m_t / (v_sqrt + self._epsilon_t),
                                          use_locking=self._use_locking)
        # regularization
        var_update = state_ops.assign_sub(var_update,
                                          self._sparse_regularization * var,
                                          use_locking=self._use_locking)

        return control_flow_ops.group(*[var_update, m_t, v_t])
项目:pydatalab    作者:googledatalab    | 项目源码 | 文件源码
def export(self, last_checkpoint, output_dir):
    """Builds a prediction graph and xports the model.

    Args:
      last_checkpoint: Path to the latest checkpoint file from training.
      output_dir: Path to the folder to be used to output the model.
    """
    logging.info('Exporting prediction graph to %s', output_dir)
    with tf.Session(graph=tf.Graph()) as sess:
      # Build and save prediction meta graph and trained variable values.
      inputs, outputs = self.build_prediction_graph()
      signature_def_map = {
        'serving_default': signature_def_utils.predict_signature_def(inputs, outputs)
      }
      init_op = tf.global_variables_initializer()
      sess.run(init_op)
      self.restore_from_checkpoint(sess, self.inception_checkpoint_file,
                                   last_checkpoint)
      init_op_serving = control_flow_ops.group(
          variables.local_variables_initializer(),
          tf.tables_initializer())

      builder = saved_model_builder.SavedModelBuilder(output_dir)
      builder.add_meta_graph_and_variables(
          sess, [tag_constants.SERVING],
          signature_def_map=signature_def_map,
          legacy_init_op=init_op_serving)
      builder.save(False)
项目:polyaxon    作者:polyaxon    | 项目源码 | 文件源码
def _extract_metric_update_ops(eval_dict):
        """Separate update operations from metric value operations."""
        update_ops = []
        value_ops = {}
        # Sort metrics lexicographically so graph is identical every time.
        for name, metric_ops in sorted(six.iteritems(eval_dict)):
            value_ops[name] = metric_ops[0]
            update_ops.append(metric_ops[1])

        if update_ops:
            update_op = control_flow_ops.group(*update_ops)
        else:
            update_op = None

        return update_op, value_ops
项目:Optimization    作者:tdozat    | 项目源码 | 文件源码
def _finish(self, update_ops, steps_and_params, name_scope):
    """"""

    return control_flow_ops.group(*update_ops, name=name_scope)

  #=============================================================
项目:Optimization    作者:tdozat    | 项目源码 | 文件源码
def _finish(self, update_ops, steps_and_params, name_scope):
    """"""

    return control_flow_ops.group(*update_ops, name=name_scope)

  #=============================================================
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def update_weights(self, train_op):
    """Updates the model weights.

    This function must be called on at least one worker after `minimize`.
    In distributed training this call can be omitted on non-chief workers to
    speed up training.

    Args:
      train_op: The operation returned by the `minimize` call.

    Returns:
      An Operation that updates the model weights.
    """
    with ops.control_dependencies([train_op]):
      update_ops = []
      # Copy over unshrinked weights to user provided variables.
      for name in ['sparse_features_weights', 'dense_features_weights']:
        for var, slot_var in zip(self._variables[name],
                                 self._slots['unshrinked_' + name]):
          update_ops.append(var.assign(slot_var))

    # Apply proximal step.
    with ops.control_dependencies(update_ops):
      update_ops = []
      for name in ['sparse_features_weights', 'dense_features_weights']:
        for var in self._variables[name]:
          with ops.device(var.device):
            # pylint: disable=protected-access
            update_ops.append(
                gen_sdca_ops._sdca_shrink_l1(
                    self._convert_n_to_tensor(
                        [var], as_ref=True),
                    l1=self._symmetric_l1_regularization(),
                    l2=self._symmetric_l2_regularization()))
      return control_flow_ops.group(*update_ops)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _get_local_init_op():
  local_init_op = _get_first_op_from_collection(
      ops.GraphKeys.LOCAL_INIT_OP)
  if local_init_op is None:
    op_list = [variables.local_variables_initializer(),
               data_flow_ops.tables_initializer()]
    if op_list:
      local_init_op = control_flow_ops.group(*op_list)
      ops.add_to_collection(ops.GraphKeys.LOCAL_INIT_OP, local_init_op)
  return local_init_op
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def restore(self, restored_tensors, restored_shapes):
    weights = restored_tensors[:len(restored_tensors) // 2]
    biases = restored_tensors[len(restored_tensors) // 2:]
    params = self._canonical_to_params(weights, biases)
    if not isinstance(params, tuple):
      params = (params,)
    assign_ops = [
        state_ops.assign(
            variable, param, validate_shape=False)
        for variable, param in zip(self._variables, params)
    ]
    return control_flow_ops.group(*assign_ops)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def benchmarkTfRNNLSTMTraining(self):
    test_configs = self._GetTestConfig()
    for config_name, config in test_configs.items():
      num_layers = config["num_layers"]
      num_units = config["num_units"]
      batch_size = config["batch_size"]
      seq_length = config["seq_length"]

      with ops.Graph().as_default(), ops.device("/gpu:0"):
        inputs = seq_length * [
            array_ops.zeros([batch_size, num_units], dtypes.float32)
        ]
        initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)

        cell = core_rnn_cell_impl.LSTMCell(
            num_units=num_units, initializer=initializer, state_is_tuple=True)
        multi_cell = core_rnn_cell_impl.MultiRNNCell(
            [cell() for _ in range(num_layers)])
        outputs, final_state = core_rnn.static_rnn(
            multi_cell, inputs, dtype=dtypes.float32)
        trainable_variables = ops.get_collection(
            ops.GraphKeys.TRAINABLE_VARIABLES)
        gradients = gradients_impl.gradients([outputs, final_state],
                                             trainable_variables)
        training_op = control_flow_ops.group(*gradients)
        self._BenchmarkOp(training_op, "tf_rnn_lstm %s %s" %
                          (config_name, self._GetConfigDesc(config)))
项目:deep-learning    作者:lbkchen    | 项目源码 | 文件源码
def training_graph(self, input_data, input_labels, data_spec=None,
                     epoch=None, **tree_kwargs):
    """Constructs a TF graph for training a random forest.

    Args:
      input_data: A tensor or SparseTensor or placeholder for input data.
      input_labels: A tensor or placeholder for labels associated with
        input_data.
      data_spec: A list of tf.dtype values specifying the original types of
        each column.
      epoch: A tensor or placeholder for the epoch the training data comes from.
      **tree_kwargs: Keyword arguments passed to each tree's training_graph.

    Returns:
      The last op in the random forest training graph.
    """
    data_spec = [constants.DATA_FLOAT] if data_spec is None else data_spec
    tree_graphs = []
    for i in range(self.params.num_trees):
      with ops.device(self.device_assigner.get_device(i)):
        seed = self.params.base_random_seed
        if seed != 0:
          seed += i
        # If using bagging, randomly select some of the input.
        tree_data = input_data
        tree_labels = input_labels
        if self.params.bagging_fraction < 1.0:
          # TODO(thomaswc): This does sampling without replacment.  Consider
          # also allowing sampling with replacement as an option.
          batch_size = array_ops.slice(array_ops.shape(input_data), [0], [1])
          r = random_ops.random_uniform(batch_size, seed=seed)
          mask = math_ops.less(
              r, array_ops.ones_like(r) * self.params.bagging_fraction)
          gather_indices = array_ops.squeeze(
              array_ops.where(mask), squeeze_dims=[1])
          # TODO(thomaswc): Calculate out-of-bag data and labels, and store
          # them for use in calculating statistics later.
          tree_data = array_ops.gather(input_data, gather_indices)
          tree_labels = array_ops.gather(input_labels, gather_indices)
        if self.params.bagged_features:
          tree_data = self._bag_features(i, tree_data)

        initialization = self.trees[i].tree_initialization()

        with ops.control_dependencies([initialization]):
          tree_graphs.append(
              self.trees[i].training_graph(
                  tree_data, tree_labels, seed, data_spec=data_spec,
                  epoch=([0] if epoch is None else epoch),
                  **tree_kwargs))

    return control_flow_ops.group(*tree_graphs, name='train')