Python tensorflow 模块,Operation() 实例源码

我们从Python开源项目中,提取了以下46个代码示例,用于说明如何使用tensorflow.Operation()

项目:tfutils    作者:neuroailab    | 项目源码 | 文件源码
def apply_gradients(self, grads_and_vars, global_step=None):
        """Apply gradients to model variables specified in `grads_and_vars`.

        `apply_gradients` returns an op that calls
        `tf.train.Optimizer.apply_gradients` and then zeros the gradient
        variables stored in `self.grads_and_vars`.

        Args:
            grads_and_vars (list): Description.
            global_step (None, optional): tensorflow global_step variable.

        Returns:
            (tf.Operation): Applies gradient update to model followed by an
                internal gradient zeroing operation to `self.grads_and_vars`.

        """
        self.mini_flag = tf.assign(self.mini_flag, tf.constant([0], dtype = tf.float32))
        # grads_and_vars = self.aggregate_gradients(grads_and_vars, method='average')
        with tf.control_dependencies([self.mini_flag]):
            optimize = self._optimizer.apply_gradients(grads_and_vars,
                                                       global_step=global_step)
        #return [optimize, self.zero_grad()]
        return optimize
项目:spark-deep-learning    作者:databricks    | 项目源码 | 文件源码
def get_op(tfobj_or_name, graph):
    """
    Get a :py:class:`tf.Operation` object.

    :param tfobj_or_name: either a :py:class:`tf.Tensor`, :py:class:`tf.Operation` or
                          a name to either.
    :param graph: a :py:class:`tf.Graph` object containing the operation.
                  By default the graph we don't require this argument to be provided.
    """
    graph = validated_graph(graph)
    _assert_same_graph(tfobj_or_name, graph)
    if isinstance(tfobj_or_name, tf.Operation):
        return tfobj_or_name
    name = tfobj_or_name
    if isinstance(tfobj_or_name, tf.Tensor):
        name = tfobj_or_name.name
    if not isinstance(name, six.string_types):
        raise TypeError('invalid op request for [type {}] {}'.format(type(name), name))
    _op_name = op_name(name, graph=None)
    op = graph.get_operation_by_name(_op_name)
    err_msg = 'cannot locate op {} in the current graph, got [type {}] {}'
    assert isinstance(op, tf.Operation), err_msg.format(_op_name, type(op), op)
    return op
项目:spark-deep-learning    作者:databricks    | 项目源码 | 文件源码
def get_tensor(tfobj_or_name, graph):
    """
    Get a :py:class:`tf.Tensor` object

    :param tfobj_or_name: either a :py:class:`tf.Tensor`, :py:class:`tf.Operation` or
                          a name to either.
    :param graph: a :py:class:`tf.Graph` object containing the tensor.
                  By default the graph we don't require this argument to be provided.
    """
    graph = validated_graph(graph)
    _assert_same_graph(tfobj_or_name, graph)
    if isinstance(tfobj_or_name, tf.Tensor):
        return tfobj_or_name
    name = tfobj_or_name
    if isinstance(tfobj_or_name, tf.Operation):
        name = tfobj_or_name.name
    if not isinstance(name, six.string_types):
        raise TypeError('invalid tensor request for {} of {}'.format(name, type(name)))
    _tensor_name = tensor_name(name, graph=None)
    tnsr = graph.get_tensor_by_name(_tensor_name)
    err_msg = 'cannot locate tensor {} in the current graph, got [type {}] {}'
    assert isinstance(tnsr, tf.Tensor), err_msg.format(_tensor_name, type(tnsr), tnsr)
    return tnsr
项目:deep-learning    作者:lbkchen    | 项目源码 | 文件源码
def testTrainingConstructionClassificationSparse(self):
    input_data = tf.SparseTensor(
        indices=[[0, 0], [0, 3],
                 [1, 0], [1, 7],
                 [2, 1],
                 [3, 9]],
        values=[-1.0, 0.0,
                -1., 2.,
                1.,
                -2.0],
        shape=[4, 10])
    input_labels = [0, 1, 2, 3]

    params = tensor_forest.ForestHParams(
        num_classes=4, num_features=10, num_trees=10, max_nodes=1000,
        split_after_samples=25).fill()

    graph_builder = tensor_forest.RandomForestGraphs(params)
    graph = graph_builder.training_graph(input_data, input_labels)
    self.assertTrue(isinstance(graph, tf.Operation))
项目:MinervaSc2    作者:phraust1612    | 项目源码 | 文件源码
def get_copy_var_ops(*, dest_scope_name: str, src_scope_name: str) -> List[tf.Operation]:
    """Creates TF operations that copy weights from `src_scope` to `dest_scope`
    Args:
        dest_scope_name (str): Destination weights (copy to)
        src_scope_name (str): Source weight (copy from)
    Returns:
        List[tf.Operation]: Update operations are created and returned
    """
    # Copy variables src_scope to dest_scope
    op_holder = []

    src_vars = tf.get_collection(
        tf.GraphKeys.TRAINABLE_VARIABLES, scope=src_scope_name)
    dest_vars = tf.get_collection(
        tf.GraphKeys.TRAINABLE_VARIABLES, scope=dest_scope_name)

    for src_var, dest_var in zip(src_vars, dest_vars):
        op_holder.append(dest_var.assign(src_var.value()))

    return op_holder

# returns pysc2.env.environment.TimeStep after end of the game
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testTrainingConstructionClassificationSparse(self):
    input_data = tf.SparseTensor(
        indices=[[0, 0], [0, 3],
                 [1, 0], [1, 7],
                 [2, 1],
                 [3, 9]],
        values=[-1.0, 0.0,
                -1., 2.,
                1.,
                -2.0],
        shape=[4, 10])
    input_labels = [0, 1, 2, 3]

    params = tensor_forest.ForestHParams(
        num_classes=4, num_features=10, num_trees=10, max_nodes=1000,
        split_after_samples=25).fill()

    graph_builder = tensor_forest.RandomForestGraphs(params)
    graph = graph_builder.training_graph(input_data, input_labels)
    self.assertTrue(isinstance(graph, tf.Operation))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testTrainingConstructionClassificationSparse(self):
    input_data = tf.SparseTensor(
        indices=[[0, 0], [0, 3],
                 [1, 0], [1, 7],
                 [2, 1],
                 [3, 9]],
        values=[-1.0, 0.0,
                -1., 2.,
                1.,
                -2.0],
        shape=[4, 10])
    input_labels = [0, 1, 2, 3]

    params = tensor_forest.ForestHParams(
        num_classes=4, num_features=10, num_trees=10, max_nodes=1000,
        split_after_samples=25).fill()

    graph_builder = tensor_forest.RandomForestGraphs(params)
    graph = graph_builder.training_graph(input_data, input_labels)
    self.assertTrue(isinstance(graph, tf.Operation))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def tf_num_params(x):
  """Number of parameters in a TensorFlow subgraph.

  Args:
      x: root of the subgraph (Tensor, Operation)

  Returns:
      Total number of elements found in all Variables
      in the subgraph.
  """

  if isinstance(x, tf.Tensor):
    shape = x.get_shape()
    x = x.op
  if x.type == "Variable":
    return shape.num_elements()
  totals = [tf_num_params(y) for y in x.inputs]
  return sum(totals)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def tf_parameter_iter(x):
  """Iterate over the left branches of a graph and yield sizes.

  Args:
      x: root of the subgraph (Tensor, Operation)

  Yields:
      A triple of name, number of params, and shape.
  """

  while 1:
    if isinstance(x, tf.Tensor):
      shape = x.get_shape().as_list()
      x = x.op
    else:
      shape = ""
    left, right = tf_left_split(x)
    totals = [tf_num_params(y) for y in right]
    total = sum(totals)
    yield x.name, total, shape
    if left is None: break
    x = left
项目:atari-rl    作者:brendanator    | 项目源码 | 文件源码
def required_feeds(cls, tensor):
    if hasattr(tensor, 'required_feeds'):
      # Return cached result
      return tensor.required_feeds
    else:
      # Get feeds required by all inputs
      if isinstance(tensor, list):
        input_tensors = tensor
      else:
        op = tensor if isinstance(tensor, tf.Operation) else tensor.op
        input_tensors = list(op.inputs) + list(op.control_inputs)

      from networks import inputs
      feeds = inputs.RequiredFeeds()
      for input_tensor in input_tensors:
        feeds = feeds.merge(cls.required_feeds(input_tensor))

      # Cache results
      if not isinstance(tensor, list):
        tensor.required_feeds = feeds

      return feeds
项目:tensorlm    作者:batzner    | 项目源码 | 文件源码
def _build_optimizer(self):
        """Based on the loss tensor, build an optimizer that minimizes the loss.

        This function returns an optimizer operation that updates the model's trainable parameters
        by determining the loss's gradients w.r.t. each of the trainable parameters. Specifically,
        RMSProp is used to minimize the loss. The gradients are clipped to the max_gradient_norm to
        prevent too drastic updates of the trainable parameters. See also tf.clip_by_global_norm

        Returns:
            tf.Operation: An operation that updates the model's trainable parameters.
        """

        # Clip the gradients
        tvars = tf.trainable_variables()
        grads, _ = tf.clip_by_global_norm(tf.gradients(self._loss, tvars), self.max_gradient_norm)

        # Optimize the variables
        optimizer = tf.train.RMSPropOptimizer(self._learning_rate)
        return optimizer.apply_gradients(zip(grads, tvars))
项目:auDeep    作者:auDeep    | 项目源码 | 文件源码
def optimizer(self) -> tf.Operation:
        """
        Creates the optimization operation used for training the autoencoder.

        Gradient clipping of values outside [-2;2] is automatically applied to prevent exploding gradients.

        Returns
        -------
        tf.Operation
            The optimization operation used for training the autoencoder
        """
        optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
        gvs = optimizer.compute_gradients(self.loss)

        with tf.variable_scope("clip_gradients"):
            capped_gvs = [(grad, var) if grad is None else (tf.clip_by_value(grad, -2., 2.), var) for grad, var in
                          gvs]

        train_op = optimizer.apply_gradients(capped_gvs)

        tf.add_to_collection("train_op", train_op)

        return train_op
项目:auDeep    作者:auDeep    | 项目源码 | 文件源码
def optimizer(self) -> tf.Operation:
        """
        Creates the optimization operation used for training the autoencoder.

        Gradient clipping of values outside [-2;2] is automatically applied to prevent exploding gradients.

        Returns
        -------
        tf.Operation
            The optimization operation used for training the autoencoder
        """
        optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
        gvs = optimizer.compute_gradients(self.loss)

        with tf.variable_scope("clip_gradients"):
            capped_gvs = [(grad, var) if grad is None else (tf.clip_by_value(grad, -2., 2.), var) for grad, var in
                          gvs]

        train_op = optimizer.apply_gradients(capped_gvs)

        tf.add_to_collection("train_op", train_op)

        return train_op
项目:auDeep    作者:auDeep    | 项目源码 | 文件源码
def optimizer(self) -> tf.Operation:
        """
        Creates the optimization operation used for training the autoencoder.

        Gradient clipping of values outside [-2;2] is automatically applied to prevent exploding gradients.

        Returns
        -------
        tf.Operation
            The optimization operation used for training the autoencoder
        """
        optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
        gvs = optimizer.compute_gradients(self.loss)

        with tf.variable_scope("clip_gradients"):
            capped_gvs = [(grad, var) if grad is None else (tf.clip_by_value(grad, -2., 2.), var) for grad, var in
                          gvs]

        train_op = optimizer.apply_gradients(capped_gvs)

        tf.add_to_collection("train_op", train_op)

        return train_op
项目:pruning_with_tensorflow    作者:ex4sperans    | 项目源码 | 文件源码
def test_shapes(self):

        input_size = 20
        n_classes = 5
        layer_sizes = [5, 10]

        network = network_dense.FullyConnectedClassifier(input_size=input_size,
                                                         n_classes=n_classes,
                                                         layer_sizes=layer_sizes,
                                                         model_path='temp',
                                                         verbose=False)

        self.assertEqual(network.logits.get_shape().as_list(), [None, 5])
        self.assertEqual(network.loss.get_shape().as_list(), [])
        self.assertIsInstance(network.train_op, tf.Operation)

        shapes = [[20, 5], [5, 10], [10, 5]]
        for v, shape in zip(network.weight_matrices, shapes):
            self.assertEqual(v.get_shape().as_list(), shape)
项目:pruning_with_tensorflow    作者:ex4sperans    | 项目源码 | 文件源码
def _create_optimizer(self,
                          loss: tf.Tensor,
                          learning_rate: Union[tf.Tensor, float],
                          momentum: Union[tf.Tensor, float],
                          threshold: float) -> tf.Operation:

        if threshold is not None:
            return self._create_optimizer_sparse(loss=loss,
                                                 threshold=threshold,
                                                 learning_rate=learning_rate,
                                                 momentum=momentum)
        with tf.variable_scope('optimizer'):

            optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate,
                                                   momentum=momentum,
                                                   name='optimizer')
            self.global_step = tf.Variable(0)
            train_op = optimizer.minimize(loss,
                                          global_step=self.global_step,
                                          name='train_op')

            return train_op
项目:pruning_with_tensorflow    作者:ex4sperans    | 项目源码 | 文件源码
def _create_optimizer_sparse(self,
                                 loss: tf.Tensor,
                                 threshold: float,
                                 learning_rate: Union[tf.Tensor, float],
                                 momentum: Union[tf.Tensor, float]) -> tf.Operation:

        with tf.variable_scope('optimizer'):

            optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate,
                                                   momentum=momentum,
                                                   name='optimizer')
            self.global_step = tf.Variable(0)
            grads_and_vars = optimizer.compute_gradients(loss)
            grads_and_vars_sparse = self._apply_prune_on_grads(grads_and_vars,
                                                               threshold)
            train_op = optimizer.apply_gradients(grads_and_vars_sparse,
                                                 global_step=self.global_step,
                                                 name='train_op')

            return train_op
项目:dqn-tensorflow    作者:DongjunLee    | 项目源码 | 文件源码
def get_copy_var_ops(*, dest_scope_name: str, src_scope_name: str) -> List[tf.Operation]:
    """Creates TF operations that copy weights from `src_scope` to `dest_scope`
    Args:
        dest_scope_name (str): Destination weights (copy to)
        src_scope_name (str): Source weight (copy from)
    Returns:
        List[tf.Operation]: Update operations are created and returned
    """
    # Copy variables src_scope to dest_scope
    op_holder = []

    src_vars = tf.get_collection(
        tf.GraphKeys.TRAINABLE_VARIABLES, scope=src_scope_name)
    dest_vars = tf.get_collection(
        tf.GraphKeys.TRAINABLE_VARIABLES, scope=dest_scope_name)

    for src_var, dest_var in zip(src_vars, dest_vars):
        op_holder.append(dest_var.assign(src_var.value()))

    return op_holder
项目:tfutils    作者:neuroailab    | 项目源码 | 文件源码
def train_loop(sess, train_targets, num_minibatches=1, **loop_params):
    """Define default minibatch training loop.

    A training loop that performs minibatching with ``num_minibatches``
    minibatches.

    Args:
        sess (tf.Session): Current tensorflow session.
        train_targets (dict): Target operations to be evaluated by ``sess.run``.
            By default, ``base.train_from_params`` inserts the following
            targets to facilitate minibatching:
            * ``__grads__`` (tf.Operation): Accumulates and stores gradients.
            * ``optimizer`` (tf.Operation): Applies and zeros gradients.
        num_minibatches (int): number of minibatches to use.
        **loop_params (mapping): additional, user-defined kwargs to
            be used in the training loop.

    Returns:
        dict: A dictionary containing train targets evaluated by the session.

    """
    assert all([required in targets for targets in train_targets
                for required in ['__grads__', 'optimizer']])

    # Perform minibatching
    range_len = (int)(num_minibatches)
    for minibatch in range(range_len - 1):
        # Accumulate gradient for each minibatch
        sess.run([target['__grads__'] for target in train_targets])

    # Compute final targets (includes zeroing gradient accumulator variable)

    return sess.run(train_targets)
项目:spark-deep-learning    作者:databricks    | 项目源码 | 文件源码
def get_shape(tfobj_or_name, graph):
    """
    Return the shape of the tensor as a list

    :param graph: tf.Graph, a TensorFlow Graph object
    :param tfobj_or_name: either a tf.Tensor, tf.Operation or a name to either
    """
    graph = validated_graph(graph)
    _shape = get_tensor(tfobj_or_name, graph).get_shape().as_list()
    return [-1 if x is None else x for x in _shape]
项目:spark-deep-learning    作者:databricks    | 项目源码 | 文件源码
def tensor_name(tfobj_or_name, graph=None):
    """
    Derive the :py:class:`tf.Tensor` name from a :py:class:`tf.Operation` or :py:class:`tf.Tensor`
    object, or its name.
    If a name is provided and the graph is not, we will derive the tensor name based on
    TensorFlow's naming convention.
    If the input is a TensorFlow object, or the graph is given, we also check that
    the tensor exists in the associated graph.

    :param tfobj_or_name: either a :py:class:`tf.Tensor`, :py:class:`tf.Operation` or
                          a name to either.
    :param graph: a :py:class:`tf.Graph` object containing the tensor.
                  By default the graph we don't require this argument to be provided.
    """
    if graph is not None:
        return get_tensor(tfobj_or_name, graph).name
    if isinstance(tfobj_or_name, six.string_types):
        # If input is a string, assume it is a name and infer the corresponding tensor name.
        # WARNING: this depends on TensorFlow's tensor naming convention
        name = tfobj_or_name
        name_parts = name.split(":")
        assert len(name_parts) <= 2, name_parts
        if len(name_parts) < 2:
            name += ":0"
        return name
    elif hasattr(tfobj_or_name, 'graph'):
        return get_tensor(tfobj_or_name, tfobj_or_name.graph).name
    else:
        raise TypeError('invalid tf.Tensor name query type {}'.format(type(tfobj_or_name)))
项目:spark-deep-learning    作者:databricks    | 项目源码 | 文件源码
def op_name(tfobj_or_name, graph=None):
    """
    Derive the :py:class:`tf.Operation` name from a :py:class:`tf.Operation` or
    :py:class:`tf.Tensor` object, or its name.
    If a name is provided and the graph is not, we will derive the operation name based on
    TensorFlow's naming convention.
    If the input is a TensorFlow object, or the graph is given, we also check that
    the operation exists in the associated graph.

    :param tfobj_or_name: either a :py:class:`tf.Tensor`, :py:class:`tf.Operation` or
                          a name to either.
    :param graph: a :py:class:`tf.Graph` object containing the operation.
                  By default the graph we don't require this argument to be provided.
    """
    if graph is not None:
        return get_op(tfobj_or_name, graph).name
    if isinstance(tfobj_or_name, six.string_types):
        # If input is a string, assume it is a name and infer the corresponding operation name.
        # WARNING: this depends on TensorFlow's operation naming convention
        name = tfobj_or_name
        name_parts = name.split(":")
        assert len(name_parts) <= 2, name_parts
        return name_parts[0]
    elif hasattr(tfobj_or_name, 'graph'):
        return get_op(tfobj_or_name, tfobj_or_name.graph).name
    else:
        raise TypeError('invalid tf.Operation name query type {}'.format(type(tfobj_or_name)))
项目:spark-deep-learning    作者:databricks    | 项目源码 | 文件源码
def validated_input(tfobj_or_name, graph):
    """
    Validate and return the input names useable GraphFunction

    :param graph: tf.Graph, a TensorFlow Graph object
    :param tfobj_or_name: either a tf.Tensor, tf.Operation or a name to either
    """
    graph = validated_graph(graph)
    name = op_name(tfobj_or_name, graph)
    op = graph.get_operation_by_name(name)
    assert 'Placeholder' == op.type, \
        ('input must be Placeholder, but get', op.type)
    return name
项目:dataset    作者:analysiscenter    | 项目源码 | 文件源码
def predict(self, fetches=None, feed_dict=None):      # pylint: disable=arguments-differ
        """ Get predictions on the data provided

        Parameters
        ----------
        fetches : tuple, list
            a sequence of `tf.Operation` and/or `tf.Tensor` to calculate
        feed_dict : dict
            input data, where key is a placeholder name and value is a numpy value

        Returns
        -------
        Calculated values of tensors in `fetches` in the same structure

        Notes
        -----
        The only difference between `predict` and `train` is that `train` also executes a `train_step` operation
        which involves calculating and applying gradients and thus chainging model weights.

        See also
        --------
        `Tensorflow Session run <https://www.tensorflow.org/api_docs/python/tf/Session#run>`_
        """
        with self.graph.as_default():
            _feed_dict = self._fill_feed_dict(feed_dict, is_training=False)
            _fetches = self._fill_fetches(fetches, default='predictions')
            output = self.session.run(_fetches, _feed_dict)
        return self._fill_output(output, _fetches)
项目:deep-learning    作者:lbkchen    | 项目源码 | 文件源码
def testTrainingConstructionClassification(self):
    input_data = [[-1., 0.], [-1., 2.],  # node 1
                  [1., 0.], [1., -2.]]  # node 2
    input_labels = [0, 1, 2, 3]

    params = tensor_forest.ForestHParams(
        num_classes=4, num_features=2, num_trees=10, max_nodes=1000,
        split_after_samples=25).fill()

    graph_builder = tensor_forest.RandomForestGraphs(params)
    graph = graph_builder.training_graph(input_data, input_labels)
    self.assertTrue(isinstance(graph, tf.Operation))
项目:deep-learning    作者:lbkchen    | 项目源码 | 文件源码
def testTrainingConstructionRegression(self):
    input_data = [[-1., 0.], [-1., 2.],  # node 1
                  [1., 0.], [1., -2.]]  # node 2
    input_labels = [0, 1, 2, 3]

    params = tensor_forest.ForestHParams(
        num_classes=4, num_features=2, num_trees=10, max_nodes=1000,
        split_after_samples=25, regression=True).fill()

    graph_builder = tensor_forest.RandomForestGraphs(params)
    graph = graph_builder.training_graph(input_data, input_labels)
    self.assertTrue(isinstance(graph, tf.Operation))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def test_defaults_empty_graph(self):
    with tf.Graph().as_default():
      scaffold = monitored_session.Scaffold()
      tf.Variable(1, name='my_var')
      scaffold.finalize()
      self.assertTrue(isinstance(scaffold.init_op, tf.Operation))
      self.assertEqual(None, scaffold.init_feed_dict)
      self.assertEqual(None, scaffold.init_fn)
      self.assertTrue(isinstance(scaffold.ready_op, tf.Tensor))
      self.assertTrue(isinstance(scaffold.local_init_op, tf.Operation))
      self.assertTrue(isinstance(scaffold.saver, tf.train.Saver))
      with self.test_session() as sess:
        self.assertTrue(b'my_var' in sess.run(scaffold.ready_op))
        sess.run([scaffold.init_op, scaffold.local_init_op])
        self.assertEquals(0, len(sess.run(scaffold.ready_op)))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def test_defaults_no_variables(self):
    with tf.Graph().as_default():
      scaffold = monitored_session.Scaffold()
      tf.constant(1, name='my_const')
      scaffold.finalize()
      self.assertTrue(isinstance(scaffold.init_op, tf.Operation))
      self.assertEqual(None, scaffold.init_feed_dict)
      self.assertEqual(None, scaffold.init_fn)
      self.assertTrue(isinstance(scaffold.ready_op, tf.Tensor))
      self.assertTrue(isinstance(scaffold.local_init_op, tf.Operation))
      self.assertTrue(isinstance(scaffold.saver, tf.train.Saver))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testTrainingConstructionClassification(self):
    input_data = [[-1., 0.], [-1., 2.],  # node 1
                  [1., 0.], [1., -2.]]  # node 2
    input_labels = [0, 1, 2, 3]

    params = tensor_forest.ForestHParams(
        num_classes=4, num_features=2, num_trees=10, max_nodes=1000,
        split_after_samples=25).fill()

    graph_builder = tensor_forest.RandomForestGraphs(params)
    graph = graph_builder.training_graph(input_data, input_labels)
    self.assertTrue(isinstance(graph, tf.Operation))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testTrainingConstructionRegression(self):
    input_data = [[-1., 0.], [-1., 2.],  # node 1
                  [1., 0.], [1., -2.]]  # node 2
    input_labels = [0, 1, 2, 3]

    params = tensor_forest.ForestHParams(
        num_classes=4, num_features=2, num_trees=10, max_nodes=1000,
        split_after_samples=25, regression=True).fill()

    graph_builder = tensor_forest.RandomForestGraphs(params)
    graph = graph_builder.training_graph(input_data, input_labels)
    self.assertTrue(isinstance(graph, tf.Operation))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testTrainingConstructionRegression(self):
    input_data = [[-1., 0.], [-1., 2.],  # node 1
                  [1., 0.], [1., -2.]]  # node 2
    input_labels = [0, 1, 2, 3]

    params = tensor_forest.ForestHParams(
        num_classes=4, num_features=2, num_trees=10, max_nodes=1000,
        split_after_samples=25, regression=True).fill()

    graph_builder = tensor_forest.RandomForestGraphs(params)
    graph = graph_builder.training_graph(input_data, input_labels)
    self.assertTrue(isinstance(graph, tf.Operation))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def tf_structure(x, include_shapes=False, finished=None):
  """A postfix expression summarizing the TF graph.

  This is intended to be used as part of test cases to
  check for gross differences in the structure of the graph.
  The resulting string is not invertible or unabiguous
  and cannot be used to reconstruct the graph accurately.

  Args:
      x: a tf.Tensor or tf.Operation
      include_shapes: include shapes in the output string
      finished: a set of ops that have already been output

  Returns:
      A string representing the structure as a string of
      postfix operations.
  """
  if finished is None:
    finished = set()
  if isinstance(x, tf.Tensor):
    shape = x.get_shape().as_list()
    x = x.op
  else:
    shape = []
  if x in finished:
    return " <>"
  finished |= {x}
  result = ""
  if not _truncate_structure(x):
    for y in x.inputs:
      result += tf_structure(y, include_shapes, finished)
  if include_shapes:
    result += " %s" % (shape,)
  if x.type != "Identity":
    name = SHORT_NAMES.get(x.type, x.type.lower())
    result += " " + name
  return result
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def tf_print(x, depth=0, finished=None, printer=print):
  """A simple print function for a TensorFlow graph.

  Args:
      x: a tf.Tensor or tf.Operation
      depth: current printing depth
      finished: set of nodes already output
      printer: print function to use

  Returns:
      Total number of parameters found in the
      subtree.
  """

  if finished is None:
    finished = set()
  if isinstance(x, tf.Tensor):
    shape = x.get_shape().as_list()
    x = x.op
  else:
    shape = ""
  if x.type == "Identity":
    x = x.inputs[0].op
  if x in finished:
    printer("%s<%s> %s %s" % ("  "*depth, x.name, x.type, shape))
    return
  finished |= {x}
  printer("%s%s %s %s" % ("  "*depth, x.name, x.type, shape))
  if not _truncate_structure(x):
    for y in x.inputs:
      tf_print(y, depth+1, finished, printer=printer)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def tf_parameter_summary(x, printer=print, combine=True):
  """Summarize parameters by depth.

  Args:
      x: root of the subgraph (Tensor, Operation)
      printer: print function for output
      combine: combine layers by top-level scope
  """
  seq = tf_parameter_iter(x)
  if combine: seq = _combine_filter(seq)
  seq = reversed(list(seq))
  for name, total, shape in seq:
    printer("%10d %-20s %s" % (total, name, shape))
项目:tensorlm    作者:batzner    | 项目源码 | 文件源码
def get_state_update_op(state_variables, new_states):
    """Returns an operation to update an LSTM's state variables.

    See get_state_variables() for more info.

    Args:
        state_variables (tuple[tf.contrib.rnn.LSTMStateTuple]): The LSTM's state variables.
        new_states (tuple[tf.contrib.rnn.LSTMStateTuple]): The new values for the state variables.
            new_states may have state tuples with state sizes < max_batch_size. Then, only the first
            rows of the corresponding state variables will be updated.

    Returns:
        tf.Operation: An operation that updates the LSTM's.
    """

    # Add an operation to update the train states with the last state tensors.
    update_ops = []
    for state_variable, new_state in zip(state_variables, new_states):
        # new_state[0] might be smaller than state_variable[0], because state_variable[0]
        # contains max_batch_size entries.

        # Get the update indices for both states in the tuple
        update_indices = (tf.range(0, tf.shape(new_state[0])[0]),
                          tf.range(0, tf.shape(new_state[1])[0]))
        update_ops.extend([
            tf.scatter_update(state_variable[0], update_indices[0], new_state[0]),
            tf.scatter_update(state_variable[1], update_indices[1], new_state[1])
        ])
    return tf.tuple(update_ops)
项目:tensorlm    作者:batzner    | 项目源码 | 文件源码
def get_state_reset_op(state_variables, cell, max_batch_size):
    """Returns an operation to set each variable in a list of LSTMStateTuples to zero.

    See get_state_variables() for more info.

    Args:
        state_variables (tuple[tf.contrib.rnn.LSTMStateTuple]): The LSTM's state variables.
        cell (tf.contrib.rnn.MuliRNNCell): An MultiRNNCell consisting of multiple LSTMCells.
        max_batch_size (int): The maximum size of batches that are be fed to the LSTMCell.

    Returns:
        tf.Operation: An operation that sets the LSTM's state to zero.
    """
    zero_states = cell.zero_state(max_batch_size, tf.float32)
    return get_state_update_op(state_variables, zero_states)
项目:auDeep    作者:auDeep    | 项目源码 | 文件源码
def optimize(self,
                 loss: tf.Tensor,
                 learning_rate: float) -> tf.Operation:
        with tf.variable_scope("optimize"):
            optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)

            return optimizer.minimize(loss, name="train_op")
项目:auDeep    作者:auDeep    | 项目源码 | 文件源码
def train_op(self) -> tf.Operation:
        """
        Returns an operation for performing a single training step.

        Returns
        -------
        tf.Operation
            An operation for performing a single training step
        """
        return self.graph.get_collection("train_op")[0]
项目:odin    作者:imito    | 项目源码 | 文件源码
def get_operation_footprint(op):
  """ Trace back the inputs of given Op and record all:
  * placholders
  * variables
  * ops
  Those are related to given op.

  The final footprint is concatenated string of all variables,
  placeholders, constants, and Ops

  Note
  ----
  This is just a fair attempt to create short identification of a
  tenorflow Op
  """
  if not isinstance(op, tf.Operation) and hasattr(op, 'op'):
    op = op.op
  var = []
  placeholder = []
  const = []
  ops = [op.type]
  inputs = list(op._inputs)
  while len(inputs) > 0:
    i = inputs.pop()
    o = i.op
    ops.append(o.type)
    if o.type == "VariableV2":
      var.append(i)
    elif o.type == "Placeholder":
      placeholder.append(i)
    elif o.type == "Const":
      const.append(i)
    inputs = list(o._inputs) + inputs
  return ':'.join([get_normalized_name(v) for v in var]) + '|' +\
         ':'.join([get_normalized_name(p) for p in placeholder]) + '|' +\
         ':'.join([get_normalized_name(c) for c in const]) + '|' +\
         ':'.join([j.split(':')[0] for j in ops])
项目:odin    作者:imito    | 项目源码 | 文件源码
def __init__(self, inputs, outputs, updates=[], defaults={},
               training=None):
    self.training = training
    # ====== validate input ====== #
    if isinstance(inputs, Mapping):
      self.inputs_name = inputs.keys()
      inputs = inputs.values()
    elif not isinstance(inputs, (tuple, list)):
      inputs = [inputs]
    self.inputs = flatten_list(inputs, level=None)
    if not hasattr(self, 'inputs_name'):
      self.inputs_name = [i.name.split(':')[0] for i in self.inputs]
    # ====== defaults ====== #
    defaults = dict(defaults)
    self.defaults = defaults
    # ====== validate outputs ====== #
    return_list = True
    if not isinstance(outputs, (tuple, list)):
      outputs = (outputs,)
      return_list = False
    self.outputs = flatten_list(list(outputs), level=None)
    self.return_list = return_list
    # ====== validate updates ====== #
    if isinstance(updates, Mapping):
      updates = updates.items()
    with tf.control_dependencies(self.outputs):
      # create updates ops
      if not isinstance(updates, tf.Operation):
        updates_ops = []
        for update in updates:
          if isinstance(update, (tuple, list)):
            p, new_p = update
            updates_ops.append(tf.assign(p, new_p))
          else: # assumed already an assign op
            updates_ops.append(update)
        self.updates_ops = tf.group(*updates_ops)
      else: # already an tensorflow Ops
        self.updates_ops = updates
项目:ADD-GAN    作者:zblasingame    | 项目源码 | 文件源码
def reset_weights(self):
        """Returns a TensorFlow operation to resets TensorFlow weights
        so the model can be used again.

        Returns:
            list [tf.Operation]: List of operations to reassign weights.
        """

        weights = [entry['weights'] for entry in self.network]
        weights.extend([entry['biases'] for entry in self.network])

        return [weight.assign(tf.random_normal(weight.get_shape(), stddev=0.1))
                for weight in weights]
项目:dvae    作者:dojoteef    | 项目源码 | 文件源码
def device_fn(device):
    """ Returns a function that given a tf.Operation it returns what device to put it on """
    def function(operation):
        """ Given a tf.Operation returns what device to put it on """
        if operation.type in _CPU_OPERATIONS:
            return '/cpu:0'
        else:
            return device

    return function
项目:SSD-Keras_Tensorflow    作者:jedol    | 项目源码 | 文件源码
def fit_generator(sess, num_iter, operations=[], batch_generator=None, inputs=[], outputs={},
                  static_inputs={}, print_interval=100):
    if not isinstance(operations, list):
        if isinstance(operations, tuple):
            operations = list(operations)
        assert isinstance(operations, tf.Operation)
        operations = [operations]
    if not isinstance(inputs, list) and not isinstance(inputs, tuple):
        assert isinstance(inputs, tf.Tensor)
        inputs = [inputs]
    if not isinstance(outputs, dict):
        assert isinstance(outputs, tf.Tensor)
        outputs = {'loss':outputs}

    output_names = outputs.keys()
    output_tensors = outputs.values()

    tic = ti.default_timer()
    for step in xrange(1,num_iter+1):
        feed_dict = dict()
        if batch_generator is not None:
            feed_dict.update(dict(zip(inputs,batch_generator.next())))
        feed_dict.update(static_inputs)

        if step % print_interval == 0:
            outputs = sess.run(operations+output_tensors, feed_dict=feed_dict)[len(operations):]

            toc = ti.default_timer()
            eta = (toc-tic)/step*(num_iter-step)
            log = '[Step: {}/{} ETA: {:.0f}s]'.format(step, num_iter, eta)

            for output_name, output in zip(output_names, outputs):
                log += ' {}: {:.4f}'.format(output_name, output)

            print log
        else:
            _ = sess.run(operations, feed_dict=feed_dict)
    toc = ti.default_timer()
    log = '[Step: {}/{} ETA: {:.0f}s]'.format(step, num_iter, toc-tic)
    print log
项目:tensorflow-extenteten    作者:raviqqe    | 项目源码 | 文件源码
def with_dependencies(dependencies, tensor):
    """
    This function is documented partially in tensorflow.org.
    But, it cannot be found in a library.
    """
    with tf.control_dependencies(dependencies):
        if isinstance(tensor, tf.Tensor):
            return tf.identity(tensor)
        elif isinstance(tensor, tf.Operation):
            return tf.group(tensor)

        raise ValueError("{} must be tf.Tensor or tf.Operation."
                         .format(tensor))
项目:cxflow-tensorflow    作者:Cognexa    | 项目源码 | 文件源码
def _create_train_ops(self, dependencies: List[List[tf.Operation]], optimizer_config: Optional[dict]) -> None:
        """
        Create the train ops for training. In order to handle incomplete batches, there must be one train op for
        each number of empty towers. E.g. for 2 GPU training, one must define 2 train ops for 1 and 2 towers
        respectively. The train ops must be named ``train_op_1``, ``train_op_2`` etc.
        wherein the suffixed number stands for the number of towers.

        By default the train ops are constructed in the following way:
            - optimizer is created from the ``model.optimizer`` configuration dict
            - REGULARIZATION_LOSSSES collection is summed to ``regularization_loss``
            - gradients minimizing the respective tower losses and ``regularization_loss`` are computed
            - for each number of non-empty towers
                - gradients of the respective towers are averaged and applied

        To implement a custom behavior, override this method and create your own op named as :py:attr:`TRAIN_OP_NAME`.

        .. code-block:: yaml
            :caption: example optimizer config

            model:
                optimizer:
                    class: RMSPropOptimizer
                    learning_rate: 0.001

        :param dependencies: a list of dependent operations (e.g. batch normalization updates) for each number of towers
        :param optimizer_config: optimizer configuration dict
        """
        if optimizer_config is None:
            raise ValueError('Optimizer config was not specified although it is required for creating the train op. '
                             'Please specify the configuration in `model.optimizer`.')
        grads_and_vars = []
        optimizer = create_optimizer(optimizer_config)
        regularization_losses = self.graph.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
        regularization_loss = tf.reduce_sum(tf.stack(regularization_losses))
        if regularization_losses:
            logging.info('\tAdding regularization losses')
            logging.debug('\tRegularization losses: %s', [var.name for var in regularization_losses])
        for tower in self._towers:
            with tower:
                grads_and_vars.append(optimizer.compute_gradients(tf.reduce_mean(tower.loss) + regularization_loss))
        for i in range(len(self._towers)):
            with tf.control_dependencies(dependencies[i]):
                optimizer.apply_gradients(average_gradients(grads_and_vars[:(i + 1)]),
                                          name=BaseModel.TRAIN_OP_NAME + '_{}'.format(i + 1))
项目:dataset    作者:analysiscenter    | 项目源码 | 文件源码
def train(self, fetches=None, feed_dict=None, use_lock=False):   # pylint: disable=arguments-differ
        """ Train the model with the data provided

        Parameters
        ----------
        fetches : tuple, list
            a sequence of `tf.Operation` and/or `tf.Tensor` to calculate
        feed_dict : dict
            input data, where key is a placeholder name and value is a numpy value
        use_lock : bool
            if True, the whole train step is locked, thus allowing for multithreading.

        Returns
        -------
        Calculated values of tensors in `fetches` in the same structure

        See also
        --------
        `Tensorflow Session run <https://www.tensorflow.org/api_docs/python/tf/Session#run>`_
        """
        with self.graph.as_default():
            _feed_dict = self._fill_feed_dict(feed_dict, is_training=True)
            if fetches is None:
                _fetches = tuple()
            else:
                _fetches = self._fill_fetches(fetches, default=None)

            if use_lock:
                self._train_lock.acquire()

            _all_fetches = []
            if self.train_step:
                _all_fetches += [self.train_step]
            if _fetches is not None:
                _all_fetches += [_fetches]
            if len(_all_fetches) > 0:
                _, output = self.session.run(_all_fetches, feed_dict=_feed_dict)
            else:
                output = None

            if use_lock:
                self._train_lock.release()

            return self._fill_output(output, _fetches)