Python tensorflow.python.framework.ops 模块,name_scope() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.python.framework.ops.name_scope()

项目:Question-Answering    作者:MurtyShikhar    | 项目源码 | 文件源码
def hardmax(logits, name=None):
  """Returns batched one-hot vectors.

  The depth index containing the `1` is that of the maximum logit value.

  Args:
    logits: A batch tensor of logit values.
    name: Name to use when creating ops.
  Returns:
    A batched one-hot tensor.
  """
  with ops.name_scope(name, "Hardmax", [logits]):
    logits = ops.convert_to_tensor(logits, name="logits")
    if logits.get_shape()[-1].value is not None:
      depth = logits.get_shape()[-1].value
    else:
      depth = array_ops.shape(logits)[-1]
    return array_ops.one_hot(
        math_ops.argmax(logits, -1), depth, dtype=logits.dtype)
项目:antgo    作者:jianzfb    | 项目源码 | 文件源码
def average_precision_voc12(precision, recall, name=None):
    """Compute (interpolated) average precision from precision and recall Tensors.

    The implementation follows Pascal 2012 and ILSVRC guidelines.
    See also: https://sanchom.wordpress.com/tag/average-precision/
    """
    with tf.name_scope(name, 'average_precision_voc12', [precision, recall]):
        # Convert to float64 to decrease error on Riemann sums.
        precision = tf.cast(precision, dtype=tf.float64)
        recall = tf.cast(recall, dtype=tf.float64)

        # Add bounds values to precision and recall.
        precision = tf.concat([[0.], precision, [0.]], axis=0)
        recall = tf.concat([[0.], recall, [1.]], axis=0)
        # Ensures precision is increasing in reverse order.
        precision = tfe_math.cummax(precision, reverse=True)

        # Riemann sums for estimating the integral.
        # mean_pre = (precision[1:] + precision[:-1]) / 2.
        mean_pre = precision[1:]
        diff_rec = recall[1:] - recall[:-1]
        ap = tf.reduce_sum(mean_pre * diff_rec)
        return ap
项目:antgo    作者:jianzfb    | 项目源码 | 文件源码
def average_precision_voc07(precision, recall, name=None):
    """Compute (interpolated) average precision from precision and recall Tensors.

    The implementation follows Pascal 2007 guidelines.
    See also: https://sanchom.wordpress.com/tag/average-precision/
    """
    with tf.name_scope(name, 'average_precision_voc07', [precision, recall]):
        # Convert to float64 to decrease error on cumulated sums.
        precision = tf.cast(precision, dtype=tf.float64)
        recall = tf.cast(recall, dtype=tf.float64)
        # Add zero-limit value to avoid any boundary problem...
        precision = tf.concat([precision, [0.]], axis=0)
        recall = tf.concat([recall, [np.inf]], axis=0)

        # Split the integral into 10 bins.
        l_aps = []
        for t in np.arange(0., 1.1, 0.1):
            mask = tf.greater_equal(recall, t)
            v = tf.reduce_max(tf.boolean_mask(precision, mask))
            l_aps.append(v / 11.)
        ap = tf.add_n(l_aps)
        return ap
项目:SSD_tensorflow_VOC    作者:LevinJ    | 项目源码 | 文件源码
def average_precision_voc12(precision, recall, name=None):
    """Compute (interpolated) average precision from precision and recall Tensors.

    The implementation follows Pascal 2012 and ILSVRC guidelines.
    See also: https://sanchom.wordpress.com/tag/average-precision/
    """
    with tf.name_scope(name, 'average_precision_voc12', [precision, recall]):
        # Convert to float64 to decrease error on Riemann sums.
        precision = tf.cast(precision, dtype=tf.float64)
        recall = tf.cast(recall, dtype=tf.float64)

        # Add bounds values to precision and recall.
        precision = tf.concat([[0.], precision, [0.]], axis=0)
        recall = tf.concat([[0.], recall, [1.]], axis=0)
        # Ensures precision is increasing in reverse order.
        precision = tfe_math.cummax(precision, reverse=True)

        # Riemann sums for estimating the integral.
        # mean_pre = (precision[1:] + precision[:-1]) / 2.
        mean_pre = precision[1:]
        diff_rec = recall[1:] - recall[:-1]
        ap = tf.reduce_sum(mean_pre * diff_rec)
        return ap
项目:SSD_tensorflow_VOC    作者:LevinJ    | 项目源码 | 文件源码
def average_precision_voc07(precision, recall, name=None):
    """Compute (interpolated) average precision from precision and recall Tensors.

    The implementation follows Pascal 2007 guidelines.
    See also: https://sanchom.wordpress.com/tag/average-precision/
    """
    with tf.name_scope(name, 'average_precision_voc07', [precision, recall]):
        # Convert to float64 to decrease error on cumulated sums.
        precision = tf.cast(precision, dtype=tf.float64)
        recall = tf.cast(recall, dtype=tf.float64)
        # Add zero-limit value to avoid any boundary problem...
        precision = tf.concat([precision, [0.]], axis=0)
        recall = tf.concat([recall, [np.inf]], axis=0)

        # Split the integral into 10 bins.
        l_aps = []
        for t in np.arange(0., 1.1, 0.1):
            mask = tf.greater_equal(recall, t)
            v = tf.reduce_max(tf.boolean_mask(precision, mask))
            l_aps.append(v / 11.)
        ap = tf.add_n(l_aps)
        return ap
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def reduce_sum_n(tensors, name=None):
  """Reduce tensors to a scalar sum.

  This reduces each tensor in `tensors` to a scalar via `tf.reduce_sum`, then
  adds them via `tf.add_n`.

  Args:
    tensors: List of tensors, all of the same numeric type.
    name: Tensor name, and scope for all other ops.

  Returns:
    Total loss tensor, or None if no losses have been configured.

  Raises:
    ValueError: if `losses` is missing or empty.
  """
  if not tensors:
    raise ValueError('No tensors provided.')
  tensors = [math_ops.reduce_sum(t, name='%s/sum' % t.op.name) for t in tensors]
  if len(tensors) == 1:
    return tensors[0]
  with ops.name_scope(name, 'reduce_sum_n', tensors) as scope:
    return math_ops.add_n(tensors, name=scope)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _is_shape(expected_shape, actual_tensor, actual_shape=None):
  """Returns whether actual_tensor's shape is expected_shape.

  Args:
    expected_shape: Integer list defining the expected shape, or tensor of same.
    actual_tensor: Tensor to test.
    actual_shape: Shape of actual_tensor, if we already have it.
  Returns:
    New tensor.
  """
  with ops.name_scope('is_shape', values=[actual_tensor]) as scope:
    is_rank = _is_rank(array_ops.size(expected_shape), actual_tensor)
    if actual_shape is None:
      actual_shape = array_ops.shape(actual_tensor, name='actual')
    shape_equal = _all_equal(
        ops.convert_to_tensor(expected_shape, name='expected'),
        actual_shape)
    return math_ops.logical_and(is_rank, shape_equal, name=scope)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _assert_shape_op(expected_shape, actual_tensor):
  """Asserts actual_tensor's shape is expected_shape.

  Args:
    expected_shape: List of integers defining the expected shape, or tensor of
        same.
    actual_tensor: Tensor to test.
  Returns:
    New assert tensor.
  """
  with ops.name_scope('assert_shape', values=[actual_tensor]) as scope:
    actual_shape = array_ops.shape(actual_tensor, name='actual')
    is_shape = _is_shape(expected_shape, actual_tensor, actual_shape)
    return control_flow_ops.Assert(
        is_shape, [
            'Wrong shape for %s [expected] [actual].' % actual_tensor.name,
            expected_shape,
            actual_shape
        ], name=scope)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def get_score_function_with_advantage(advantage_fn=None,
                                      name="ScoreFunctionWithAdvantage"):
  """Score function estimator with advantage function.

  Args:
    advantage_fn: callable that takes the `DistributionTensor` and the
      downstream `loss` and returns a `Tensor` advantage
      (e.g. `loss - baseline`).
    name: name to prepend ops with.

  Returns:
    Callable score function estimator that takes the `DistributionTensor`, the
    sampled `value`, and the downstream `loss`, and uses the provided advantage.
  """

  def score_function_with_advantage(dist_tensor, value, loss):
    with ops.name_scope(name, values=[value, loss]):
      advantage = advantage_fn(dist_tensor, loss)
      advantage = array_ops.stop_gradient(advantage)
      return dist_tensor.distribution.log_prob(value) * advantage

  return score_function_with_advantage
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def get_score_function_with_baseline(baseline_fn=None, name="ScoreFunction"):
  """Score function estimator with baseline function.

  Args:
    baseline_fn: callable that takes the `DistributionTensor` and the downstream
      `loss` and returns a `Tensor` baseline to be subtracted from the `loss`.
      If None, defaults to `get_mean_baseline`, which is an EMA of the loss.
    name: name to prepend ops with.

  Returns:
    Callable score function estimator that takes the `DistributionTensor`, the
    sampled `value`, and the downstream `loss`, and subtracts the provided
    `baseline` from the `loss`.
  """
  if baseline_fn is None:
    baseline_fn = get_mean_baseline()

  def score_function_with_baseline(dist_tensor, value, loss):
    with ops.name_scope(name):
      b = baseline_fn(dist_tensor, loss)
      return score_function(dist_tensor, value, loss, b)

  return score_function_with_baseline
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def loss(self, final_loss, name="Loss"):
    # Return a loss based on final_loss and the distribution. Returns
    # None if pathwise derivatives are supported, if the loss_fn
    # was explicitly set to None, or if the value type is MeanValue.
    if self._loss_fn is None:
      return None

    if (self._dist.is_continuous and self._dist.is_reparameterized and
        not self._value_type.stop_gradient):
      # Can perform pathwise-derivative on this one; no additional loss needed.
      return None

    with ops.name_scope(self.name, values=[final_loss]):
      with ops.name_scope(name):
        if (self._value_type.stop_gradient or
            isinstance(self._value_type, SampleAndReshapeValue) or
            isinstance(self._value_type, SampleValue)):
          return self._loss_fn(self, self._value, final_loss)
        elif isinstance(self._value_type, MeanValue):
          return None  # MeanValue generally provides its own gradient
        else:
          raise TypeError("Unrecognized Distribution Value Type: %s",
                          self._value_type)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def hinge_loss(logits, target, scope=None):
  """Method that returns the loss tensor for hinge loss.

  Args:
    logits: The logits, a float tensor.
    target: The ground truth output tensor. Its shape should match the shape of
      logits. The values of the tensor are expected to be 0.0 or 1.0.
    scope: The scope for the operations performed in computing the loss.

  Returns:
    A `Tensor` of same shape as logits and target representing the loss values
      across the batch.

  Raises:
    ValueError: If the shapes of `logits` and `target` don't match.
  """
  with ops.name_scope(scope, "hinge_loss", [logits, target]) as scope:
    logits.get_shape().assert_is_compatible_with(target.get_shape())
    # We first need to convert binary labels to -1/1 labels (as floats).
    target = math_ops.to_float(target)
    all_ones = array_ops.ones_like(target)
    labels = math_ops.sub(2 * target, all_ones)
    losses = nn_ops.relu(math_ops.sub(all_ones, math_ops.mul(labels, logits)))
    return losses
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __init__(self,
               key_dtype,
               value_dtype,
               default_value,
               num_shards=1,
               name='ShardedMutableHashTable'):
    with ops.name_scope(name, 'sharded_mutable_hash_table') as scope:
      super(_ShardedMutableHashTable, self).__init__(key_dtype, value_dtype,
                                                     scope)
      table_shards = []
      for i in range(num_shards):
        table_shards.append(lookup_ops.MutableHashTable(
            key_dtype=key_dtype,
            value_dtype=value_dtype,
            default_value=default_value,
            name='%s-%d-of-%d' % (name, i + 1, num_shards)))
      self._table_shards = table_shards
      # TODO(andreasst): add a value_shape() method to LookupInterface
      # pylint: disable=protected-access
      self._value_shape = self._table_shards[0]._value_shape
      # pylint: enable=protected-access
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _linear_predictions(self, examples):
    """Returns predictions of the form w*x."""
    with name_scope('sdca/prediction'):
      sparse_variables = self._convert_n_to_tensor(self._variables[
          'sparse_features_weights'])
      result = 0.0
      for sfc, sv in zip(examples['sparse_features'], sparse_variables):
        # TODO(sibyl-Aix6ihai): following does not take care of missing features.
        result += math_ops.segment_sum(
            math_ops.mul(
                array_ops.gather(sv, sfc.feature_indices), sfc.feature_values),
            sfc.example_indices)
      dense_features = self._convert_n_to_tensor(examples['dense_features'])
      dense_variables = self._convert_n_to_tensor(self._variables[
          'dense_features_weights'])

      for i in range(len(dense_variables)):
        result += math_ops.matmul(dense_features[i], array_ops.expand_dims(
            dense_variables[i], -1))

    # Reshaping to allow shape inference at graph construction time.
    return array_ops.reshape(result, [-1])
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def predictions(self, examples):
    """Add operations to compute predictions by the model.

    If logistic_loss is being used, predicted probabilities are returned.
    Otherwise, (raw) linear predictions (w*x) are returned.

    Args:
      examples: Examples to compute predictions on.

    Returns:
      An Operation that computes the predictions for examples.

    Raises:
      ValueError: if examples are not well defined.
    """
    self._assertSpecified(
        ['example_weights', 'sparse_features', 'dense_features'], examples)
    self._assertList(['sparse_features', 'dense_features'], examples)

    result = self._linear_predictions(examples)
    if self._options['loss_type'] == 'logistic_loss':
      # Convert logits to probability for logistic loss predictions.
      with name_scope('sdca/logistic_prediction'):
        result = math_ops.sigmoid(result)
    return result
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def approximate_duality_gap(self):
    """Add operations to compute the approximate duality gap.

    Returns:
      An Operation that computes the approximate duality gap over all
      examples.
    """
    with name_scope('sdca/approximate_duality_gap'):
      _, values_list = self._hashtable.export_sharded()
      shard_sums = []
      for values in values_list:
        with ops.device(values.device):
          shard_sums.append(
              math_ops.reduce_sum(math_ops.cast(values, dtypes.float64), 0))
      summed_values = math_ops.add_n(shard_sums)

      primal_loss = summed_values[1]
      dual_loss = summed_values[2]
      example_weights = summed_values[3]
      # Note: we return NaN if there are no weights or all weights are 0, e.g.
      # if no examples have been processed
      return (primal_loss + dual_loss + self._l1_loss() +
              (2.0 * self._l2_loss(self._symmetric_l2_regularization()))
             ) / example_weights
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def regularized_loss(self, examples):
    """Add operations to compute the loss with regularization loss included.

    Args:
      examples: Examples to compute loss on.

    Returns:
      An Operation that computes mean (regularized) loss for given set of
      examples.
    Raises:
      ValueError: if examples are not well defined.
    """
    self._assertSpecified(['example_labels', 'example_weights',
                           'sparse_features', 'dense_features'], examples)
    self._assertList(['sparse_features', 'dense_features'], examples)
    with name_scope('sdca/regularized_loss'):
      weights = convert_to_tensor(examples['example_weights'])
      return ((
          self._l1_loss() +
          # Note that here we are using the raw regularization
          # (as specified by the user) and *not*
          # self._symmetric_l2_regularization().
          self._l2_loss(self._options['symmetric_l2_regularization'])) /
              math_ops.reduce_sum(math_ops.cast(weights, dtypes.float64)) +
              self.unregularized_loss(examples))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def prefetch_op(self):
    """The op used to prefetch new data into the state saver.

    Running it once enqueues one new input example into the state saver.
    The first time this gets called, it additionally creates the prefetch_op.
    Subsequent calls simply return the previously created `prefetch_op`.

    It should be run in a separate thread via e.g. a `QueueRunner`.

    Returns:
      An `Operation` that performs prefetching.
    """
    if not self._prefetch_op:
      with ops.name_scope(None), ops.name_scope(
          self._scope, values=[self._barrier.barrier_ref]):
        self._create_prefetch_op()
    return self._prefetch_op
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def close(self, cancel_pending_enqueues=False, name=None):
    """Closes the barrier and the FIFOQueue.

    This operation signals that no more segments of new sequences will be
    enqueued. New segments of already inserted sequences may still be enqueued
    and dequeued if there is a sufficient number filling a batch or
    allow_small_batch is true. Otherwise dequeue operations will fail
    immediately.

    Args:
      cancel_pending_enqueues: (Optional.) A boolean, defaulting to
        `False`. If `True`, all pending enqueues to the underlying queues will
        be cancelled, and completing already started sequences is not possible.
      name: Optional name for the op.

    Returns:
      The operation that closes the barrier and the FIFOQueue.
    """
    with ops.name_scope(name, "SQSSClose", [self._prefetch_op]) as name:
      barrier_close = self.barrier.close(
          cancel_pending_enqueues, "BarrierClose")
      fifo_queue_close = self._capacity_queue.close(
          cancel_pending_enqueues, "FIFOClose")
      return control_flow_ops.group(barrier_close, fifo_queue_close, name=name)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __init__(self, keys, values, key_dtype=None, value_dtype=None, name=None):
    """Constructs a table initializer object based on keys and values tensors.

    Args:
      keys: The tensor for the keys.
      values: The tensor for the values.
      key_dtype: The `keys` data type. Used when `keys` is a python array.
      value_dtype: The `values` data type. Used when `values` is a python array.
      name: A name for the operation (optional).
    """
    with ops.name_scope(name, "key_value_init", [keys, values]) as scope:
      self._keys = ops.convert_to_tensor(keys, dtype=key_dtype, name="keys")
      self._values = ops.convert_to_tensor(values,
                                           dtype=value_dtype,
                                           name="values")
      self._name = scope

    super(KeyValueTensorInitializer, self).__init__(self._keys.dtype,
                                                    self._values.dtype)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def export(self, name=None):
    """Returns tensors of all keys and values in the table.

    Args:
      name: A name for the operation (optional).

    Returns:
      A pair of tensors with the first tensor containing all keys and the
        second tensors containing all values in the table.
    """
    with ops.name_scope(name, "%s_lookup_table_export_values" % self._name,
                        [self._table_ref]) as name:
      # pylint: disable=protected-access
      exported_keys, exported_values = gen_data_flow_ops._lookup_table_export(
          self._table_ref,
          self._key_dtype,
          self._value_dtype,
          name=name)
      # pylint: enable=protected-access

    exported_values.set_shape(exported_keys.get_shape().concatenate(
        self._value_shape))
    return exported_keys, exported_values
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def sum_regularizer(regularizer_list, scope=None):
  """Returns a function that applies the sum of multiple regularizers.

  Args:
    regularizer_list: A list of regularizers to apply.
    scope: An optional scope name

  Returns:
    A function with signature `sum_reg(weights)` that applies the
    sum of all the input regularizers.
  """
  regularizer_list = [reg for reg in regularizer_list if reg is not None]
  if not regularizer_list:
    return None

  def sum_reg(weights):
    """Applies the sum of all the input regularizers."""
    with ops.name_scope(scope, 'sum_regularizer', [weights]) as name:
      regularizer_tensors = [reg(weights) for reg in regularizer_list]
      return math_ops.add_n(regularizer_tensors, name=name)

  return sum_reg
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _sample_n(self, n, seed=None):
    lower_cutoff = self._lower_cutoff
    upper_cutoff = self._upper_cutoff
    with ops.name_scope("transform"):
      n = ops.convert_to_tensor(n, name="n")
      x_samps = self.base_distribution.sample_n(n=n, seed=seed)
      ones = array_ops.ones_like(x_samps)

      # Snap values to the intervals (j - 1, j].
      result_so_far = math_ops.ceil(x_samps)

      if lower_cutoff is not None:
        result_so_far = math_ops.select(result_so_far < lower_cutoff,
                                        lower_cutoff * ones, result_so_far)

      if upper_cutoff is not None:
        result_so_far = math_ops.select(result_so_far > upper_cutoff,
                                        upper_cutoff * ones, result_so_far)

      return result_so_far
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _kl_bernoulli_bernoulli(a, b, name=None):
  """Calculate the batched KL divergence KL(a || b) with a and b Bernoulli.

  Args:
    a: instance of a Bernoulli distribution object.
    b: instance of a Bernoulli distribution object.
    name: (optional) Name to use for created operations.
      default is "kl_bernoulli_bernoulli".

  Returns:
    Batchwise KL(a || b)
  """
  with ops.name_scope(name, "kl_bernoulli_bernoulli", [a.logits, b.logits]):
    return (math_ops.sigmoid(a.logits) * (-nn.softplus(-a.logits) +
                                          nn.softplus(-b.logits)) +
            math_ops.sigmoid(-a.logits) * (-nn.softplus(a.logits) +
                                           nn.softplus(b.logits)))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _kl_normal_normal(n_a, n_b, name=None):
  """Calculate the batched KL divergence KL(n_a || n_b) with n_a and n_b Normal.

  Args:
    n_a: instance of a Normal distribution object.
    n_b: instance of a Normal distribution object.
    name: (optional) Name to use for created operations.
      default is "kl_normal_normal".

  Returns:
    Batchwise KL(n_a || n_b)
  """
  with ops.name_scope(name, "kl_normal_normal", [n_a.mu, n_b.mu]):
    one = constant_op.constant(1, dtype=n_a.dtype)
    two = constant_op.constant(2, dtype=n_a.dtype)
    half = constant_op.constant(0.5, dtype=n_a.dtype)
    s_a_squared = math_ops.square(n_a.sigma)
    s_b_squared = math_ops.square(n_b.sigma)
    ratio = s_a_squared / s_b_squared
    return (math_ops.square(n_a.mu - n_b.mu) / (two * s_b_squared) +
            half * (ratio - one - math_ops.log(ratio)))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def inv_quadratic_form_on_vectors(
      self, x, name="inv_quadratic_form_on_vectors"):
    """Compute the quadratic form: `x^T A^{-1} x` where `x` is a batch vector.

    `x` is a batch vector with compatible shape if
self.shape = [N1,...,Nn] + [k, k]
x.shape = [M1,...,Mm] + [N1,...,Nn] + [k]
```

Args:
  x: `Tensor` with compatible batch vector shape and same `dtype` as self.
  name:  A name scope to use for ops added by this method.

Returns:
  `Tensor` with shape `[M1,...,Mm] + [N1,...,Nn]` and same `dtype`
    as `self`.
"""
with ops.name_scope(self.name):
  with ops.name_scope(name, values=[x] + self.inputs):
    x = ops.convert_to_tensor(x, name="x")
    return self._inv_quadratic_form_on_vectors(x)

```

项目:lsdc    作者:febert    | 项目源码 | 文件源码
def rank(self, name="rank"):
    """Tensor rank.  Equivalent to `tf.rank(A)`.  Will equal `n + 2`.

    If this operator represents the batch matrix `A` with
    `A.shape = [N1,...,Nn, k, k]`, the `rank` is `n + 2`.

    Args:
      name:  A name scope to use for ops added by this method.

    Returns:
      `int32` `Tensor`
    """
    # Derived classes get this "for free" once .shape() is implemented.
    with ops.name_scope(self.name):
      with ops.name_scope(name, values=self.inputs):
        return array_ops.size(self.shape())
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def batch_shape(self, name="batch_shape"):
    """Shape of batches associated with this operator.

    If this operator represents the batch matrix `A` with
    `A.shape = [N1,...,Nn, k, k]`, the `batch_shape` is `[N1,...,Nn]`.

    Args:
      name:  A name scope to use for ops added by this method.

    Returns:
      `int32` `Tensor`
    """
    # Derived classes get this "for free" once .shape() is implemented.
    with ops.name_scope(self.name):
      with ops.name_scope(name, values=self.inputs):
        return array_ops.slice(self.shape(), [0], [self.rank() - 2])
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def vector_space_dimension(self, name="vector_space_dimension"):
    """Dimension of vector space on which this acts.  The `k` in `R^k`.

    If this operator represents the batch matrix `A` with
    `A.shape = [N1,...,Nn, k, k]`, the `vector_space_dimension` is `k`.

    Args:
      name:  A name scope to use for ops added by this method.

    Returns:
      `int32` `Tensor`
    """
    # Derived classes get this "for free" once .shape() is implemented.
    with ops.name_scope(self.name):
      with ops.name_scope(name, values=self.inputs):
        return array_ops.gather(self.shape(), self.rank() - 1)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def matmul(self, x, transpose_x=False, name="matmul"):
    """Left (batch) matmul `x` by this matrix:  `Ax`.

    `x` is a batch matrix with compatible shape if
self.shape = [N1,...,Nn] + [k, k]
x.shape = [N1,...,Nn] + [k, r]
```

Args:
  x: `Tensor` with shape `self.batch_shape + [k, r]` and same `dtype` as
    this `Operator`.
  transpose_x: If `True`, `x` is transposed before multiplication.
  name:  A name to give this `Op`.

Returns:
  A result equivalent to `tf.batch_matmul(self.to_dense(), x)`.
"""
with ops.name_scope(self.name):
  with ops.name_scope(name, values=[x] + self.inputs):
    x = ops.convert_to_tensor(x, name="x")
    return self._dispatch_based_on_batch(
        self._batch_matmul, self._matmul, x=x, transpose_x=transpose_x)

```

项目:lsdc    作者:febert    | 项目源码 | 文件源码
def sqrt_matmul(self, x, transpose_x=False, name="sqrt_matmul"):
    """Left (batch) matmul `x` by a sqrt of this matrix: `Sx` where `A = S S^T`.

    `x` is a batch matrix with compatible shape if
self.shape = [N1,...,Nn] + [k, k]
x.shape = [N1,...,Nn] + [k, r]
```

Args:
  x: `Tensor` with shape `self.batch_shape + [k, r]` and same `dtype` as
    this `Operator`.
  transpose_x: If `True`, `x` is transposed before multiplication.
  name:  A name scope to use for ops added by this method.

Returns:
  A result equivalent to `tf.batch_matmul(self.sqrt_to_dense(), x)`.
"""
with ops.name_scope(self.name):
  with ops.name_scope(name, values=[x] + self.inputs):
    x = ops.convert_to_tensor(x, name="x")
    return self._dispatch_based_on_batch(
        self._batch_sqrt_matmul, self._sqrt_matmul, x=x,
        transpose_x=transpose_x)

```

项目:lsdc    作者:febert    | 项目源码 | 文件源码
def extract_batch_shape(x, num_event_dims, name="extract_batch_shape"):
  """Extract the batch shape from `x`.

  Assuming `x.shape = batch_shape + event_shape`, when `event_shape` has
  `num_event_dims` dimensions.  This `Op` returns the batch shape `Tensor`.

  Args:
    x: `Tensor` with rank at least `num_event_dims`.  If rank is not high enough
      this `Op` will fail.
    num_event_dims:  `int32` scalar `Tensor`.  The number of trailing dimensions
      in `x` to be considered as part of `event_shape`.
    name:  A name to prepend to created `Ops`.

  Returns:
    batch_shape:  `1-D` `int32` `Tensor`
  """
  with ops.name_scope(name, values=[x]):
    x = ops.convert_to_tensor(x, name="x")
    return array_ops.slice(
        array_ops.shape(x), [0], [array_ops.rank(x) - num_event_dims])
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __init__(self, matrix, verify_pd=True, name="OperatorPDFull"):
    """Initialize an OperatorPDFull.

    Args:
      matrix:  Shape `[N1,...,Nb, k, k]` tensor with `b >= 0`, `k >= 1`.  The
        last two dimensions should be `k x k` symmetric positive definite
        matrices.
      verify_pd: Whether to check that `matrix` is symmetric positive definite.
        If `verify_pd` is `False`, correct behavior is not guaranteed.
      name:  A name to prepend to all ops created by this class.
    """
    with ops.name_scope(name):
      with ops.name_scope("init", values=[matrix]):
        matrix = ops.convert_to_tensor(matrix)
        # Check symmetric here.  Positivity will be verified by checking the
        # diagonal of the Cholesky factor inside the parent class.  The Cholesky
        # factorization linalg_ops.cholesky() does not always fail for non PSD
        # matrices, so don't rely on that.
        if verify_pd:
          matrix = distribution_util.assert_symmetric(matrix)
        chol = linalg_ops.cholesky(matrix)
        super(OperatorPDFull, self).__init__(chol, verify_pd=verify_pd)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _get_identity_operator(self, v):
    """Get an `OperatorPDIdentity` to play the role of `D` in `VDV^T`."""
    with ops.name_scope("get_identity_operator", values=[v]):
      if v.get_shape().is_fully_defined():
        v_shape = v.get_shape().as_list()
        v_batch_shape = v_shape[:-2]
        r = v_shape[-1]
        id_shape = v_batch_shape + [r, r]
      else:
        v_shape = array_ops.shape(v)
        v_rank = array_ops.rank(v)
        v_batch_shape = array_ops.slice(v_shape, [0], [v_rank - 2])
        r = array_ops.gather(v_shape, v_rank - 1)  # Last dim of v
        id_shape = array_ops.concat(0, (v_batch_shape, [r, r]))
      return operator_pd_identity.OperatorPDIdentity(
          id_shape, v.dtype, verify_pd=self._verify_pd)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __init__(self, chol, verify_pd=True, name="OperatorPDCholesky"):
    """Initialize an OperatorPDCholesky.

    Args:
      chol:  Shape `[N1,...,Nn, k, k]` tensor with `n >= 0`, `k >= 1`, and
        positive diagonal elements.  The strict upper triangle of `chol` is
        never used, and the user may set these elements to zero, or ignore them.
      verify_pd: Whether to check that `chol` has positive diagonal (this is
        equivalent to it being a Cholesky factor of a symmetric positive
        definite matrix.  If `verify_pd` is `False`, correct behavior is not
        guaranteed.
      name:  A name to prepend to all ops created by this class.
    """
    self._verify_pd = verify_pd
    self._name = name
    with ops.name_scope(name):
      with ops.name_scope("init", values=[chol]):
        self._chol = self._check_chol(chol)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def reduce_sum_n(tensors, name=None):
  """Reduce tensors to a scalar sum.

  This reduces each tensor in `tensors` to a scalar via `tf.reduce_sum`, then
  adds them via `tf.add_n`.

  Args:
    tensors: List of tensors, all of the same numeric type.
    name: Tensor name, and scope for all other ops.

  Returns:
    Total loss tensor, or None if no losses have been configured.

  Raises:
    ValueError: if `losses` is missing or empty.
  """
  if not tensors:
    raise ValueError('No tensors provided.')
  tensors = [math_ops.reduce_sum(t, name='%s/sum' % t.op.name) for t in tensors]
  if len(tensors) == 1:
    return tensors[0]
  with ops.name_scope(name, 'reduce_sum_n', tensors) as scope:
    return math_ops.add_n(tensors, name=scope)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _is_shape(expected_shape, actual_tensor, actual_shape=None):
  """Returns whether actual_tensor's shape is expected_shape.

  Args:
    expected_shape: Integer list defining the expected shape, or tensor of same.
    actual_tensor: Tensor to test.
    actual_shape: Shape of actual_tensor, if we already have it.
  Returns:
    New tensor.
  """
  with ops.name_scope('is_shape', values=[actual_tensor]) as scope:
    is_rank = _is_rank(array_ops.size(expected_shape), actual_tensor)
    if actual_shape is None:
      actual_shape = array_ops.shape(actual_tensor, name='actual')
    shape_equal = _all_equal(
        ops.convert_to_tensor(expected_shape, name='expected'),
        actual_shape)
    return math_ops.logical_and(is_rank, shape_equal, name=scope)
项目:skiprnn-2017-telecombcn    作者:imatge-upc    | 项目源码 | 文件源码
def _binary_round(x):
    """
    Rounds a tensor whose values are in [0,1] to a tensor with values in {0, 1},
    using the straight through estimator for the gradient.

    Based on http://r2rt.com/binary-stochastic-neurons-in-tensorflow.html

    :param x: input tensor
    :return: y=round(x) with gradients defined by the identity mapping (y=x)
    """
    g = tf.get_default_graph()

    with ops.name_scope("BinaryRound") as name:
        with g.gradient_override_map({"Round": "Identity"}):
            return tf.round(x, name=name)
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def initialize(self, name=None):
    with ops.name_scope(name, "%sInitialize" % type(self).__name__):
      (finished, next_inputs) = self._initialize_fn()
      if self._batch_size is None:
        self._batch_size = array_ops.size(finished)
    return (finished, next_inputs)
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def sample(self, time, outputs, state, name=None):
    with ops.name_scope(
        name, "%sSample" % type(self).__name__, (time, outputs, state)):
      return self._sample_fn(time=time, outputs=outputs, state=state)
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def next_inputs(self, time, outputs, state, sample_ids, name=None):
    with ops.name_scope(
        name, "%sNextInputs" % type(self).__name__, (time, outputs, state)):
      return self._next_inputs_fn(
          time=time, outputs=outputs, state=state, sample_ids=sample_ids)
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def __init__(self, inputs, sequence_length, time_major=False, name=None):
    """Initializer.

    Args:
      inputs: A (structure of) input tensors.
      sequence_length: An int32 vector tensor.
      time_major: Python bool.  Whether the tensors in `inputs` are time major.
        If `False` (default), they are assumed to be batch major.
      name: Name scope for any created operations.

    Raises:
      ValueError: if `sequence_length` is not a 1D tensor.
    """
    with ops.name_scope(name, "TrainingHelper", [inputs, sequence_length]):
      inputs = ops.convert_to_tensor(inputs, name="inputs")
      if not time_major:
        inputs = nest.map_structure(_transpose_batch_time, inputs)

      self._input_tas = nest.map_structure(_unstack_ta, inputs)
      self._sequence_length = ops.convert_to_tensor(
          sequence_length, name="sequence_length")
      if self._sequence_length.get_shape().ndims != 1:
        raise ValueError(
            "Expected sequence_length to be a vector, but received shape: %s" %
            self._sequence_length.get_shape())

      self._zero_inputs = nest.map_structure(
          lambda inp: array_ops.zeros_like(inp[0, :]), inputs)

      self._batch_size = array_ops.size(sequence_length)
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def initialize(self, name=None):
    with ops.name_scope(name, "TrainingHelperInitialize"):
      finished = math_ops.equal(0, self._sequence_length)
      all_finished = math_ops.reduce_all(finished)
      next_inputs = control_flow_ops.cond(
          all_finished, lambda: self._zero_inputs,
          lambda: nest.map_structure(lambda inp: inp.read(0), self._input_tas))
      return (finished, next_inputs)
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def next_inputs(self, time, outputs, state, name=None, **unused_kwargs):
    """next_inputs_fn for TrainingHelper."""
    with ops.name_scope(name, "TrainingHelperNextInputs",
                        [time, outputs, state]):
      next_time = time + 1
      finished = (next_time >= self._sequence_length)
      all_finished = math_ops.reduce_all(finished)
      def read_from_ta(inp):
        return inp.read(next_time)
      next_inputs = control_flow_ops.cond(
          all_finished, lambda: self._zero_inputs,
          lambda: nest.map_structure(read_from_ta, self._input_tas))
      return (finished, next_inputs, state)
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def __init__(self, inputs, sequence_length, embedding, sampling_probability,
               time_major=False, seed=None, scheduling_seed=None, name=None):
    """Initializer.

    Args:
      inputs: A (structure of) input tensors.
      sequence_length: An int32 vector tensor.
      embedding: A callable that takes a vector tensor of `ids` (argmax ids),
        or the `params` argument for `embedding_lookup`.
      sampling_probability: A 0D `float32` tensor: the probability of sampling
        categorically from the output ids instead of reading directly from the
        inputs.
      time_major: Python bool.  Whether the tensors in `inputs` are time major.
        If `False` (default), they are assumed to be batch major.
      seed: The sampling seed.
      scheduling_seed: The schedule decision rule sampling seed.
      name: Name scope for any created operations.

    Raises:
      ValueError: if `sampling_probability` is not a scalar or vector.
    """
    with ops.name_scope(name, "ScheduledEmbeddingSamplingWrapper",
                        [embedding, sampling_probability]):
      if callable(embedding):
        self._embedding_fn = embedding
      else:
        self._embedding_fn = (
            lambda ids: embedding_ops.embedding_lookup(embedding, ids))
      self._sampling_probability = ops.convert_to_tensor(
          sampling_probability, name="sampling_probability")
      if self._sampling_probability.get_shape().ndims not in (0, 1):
        raise ValueError(
            "sampling_probability must be either a scalar or a vector. "
            "saw shape: %s" % (self._sampling_probability.get_shape()))
      self._seed = seed
      self._scheduling_seed = scheduling_seed
      super(ScheduledEmbeddingTrainingHelper, self).__init__(
          inputs=inputs,
          sequence_length=sequence_length,
          time_major=time_major,
          name=name)
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def sample(self, time, outputs, state, name=None):
    with ops.name_scope(name, "ScheduledEmbeddingTrainingHelperSample",
                        [time, outputs, state]):
      # Return -1s where we did not sample, and sample_ids elsewhere
      select_sample_noise = random_ops.random_uniform(
          [self.batch_size], seed=self._scheduling_seed)
      select_sample = (self._sampling_probability > select_sample_noise)
      sample_id_sampler = categorical.Categorical(logits=outputs)
      return array_ops.where(
          select_sample,
          sample_id_sampler.sample(seed=self._seed),
          array_ops.tile([-1], [self.batch_size]))
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def next_inputs(self, time, outputs, state, sample_ids, name=None):
    with ops.name_scope(name, "ScheduledEmbeddingTrainingHelperSample",
                        [time, outputs, state, sample_ids]):
      (finished, base_next_inputs, state) = (
          super(ScheduledEmbeddingTrainingHelper, self).next_inputs(
              time=time,
              outputs=outputs,
              state=state,
              sample_ids=sample_ids,
              name=name))

      def maybe_sample():
        """Perform scheduled sampling."""
        where_sampling = math_ops.cast(
            array_ops.where(sample_ids > -1), dtypes.int32)
        where_not_sampling = math_ops.cast(
            array_ops.where(sample_ids <= -1), dtypes.int32)
        where_sampling_flat = array_ops.reshape(where_sampling, [-1])
        where_not_sampling_flat = array_ops.reshape(where_not_sampling, [-1])
        sample_ids_sampling = array_ops.gather(sample_ids, where_sampling_flat)
        inputs_not_sampling = array_ops.gather(
            base_next_inputs, where_not_sampling_flat)
        sampled_next_inputs = self._embedding_fn(sample_ids_sampling)
        base_shape = array_ops.shape(base_next_inputs)
        return (array_ops.scatter_nd(indices=where_sampling,
                                     updates=sampled_next_inputs,
                                     shape=base_shape)
                + array_ops.scatter_nd(indices=where_not_sampling,
                                       updates=inputs_not_sampling,
                                       shape=base_shape))

      all_finished = math_ops.reduce_all(finished)
      next_inputs = control_flow_ops.cond(
          all_finished, lambda: base_next_inputs, maybe_sample)
      return (finished, next_inputs, state)
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def sample(self, time, outputs, state, name=None):
    with ops.name_scope(name, "ScheduledOutputTrainingHelperSample",
                        [time, outputs, state]):
      sampler = bernoulli.Bernoulli(probs=self._sampling_probability)
      return math_ops.cast(
          sampler.sample(sample_shape=self.batch_size, seed=self._seed),
          dtypes.bool)
项目:tensorflow_seq2seq_chatbot    作者:higepon    | 项目源码 | 文件源码
def sequence_loss_by_example(logits, targets, weights,
                             average_across_timesteps=True,
                             softmax_loss_function=None, name=None):
  """Weighted cross-entropy loss for a sequence of logits (per example).

  Args:
    logits: List of 2D Tensors of shape [batch_size x num_decoder_symbols].
    targets: List of 1D batch-sized int32 Tensors of the same length as logits.
    weights: List of 1D batch-sized float-Tensors of the same length as logits.
    average_across_timesteps: If set, divide the returned cost by the total
      label weight.
    softmax_loss_function: Function (inputs-batch, labels-batch) -> loss-batch
      to be used instead of the standard softmax (the default if this is None).
    name: Optional name for this operation, default: "sequence_loss_by_example".

  Returns:
    1D batch-sized float Tensor: The log-perplexity for each sequence.

  Raises:
    ValueError: If len(logits) is different from len(targets) or len(weights).
  """
  if len(targets) != len(logits) or len(weights) != len(logits):
    raise ValueError("Lengths of logits, weights, and targets must be the same "
                     "%d, %d, %d." % (len(logits), len(weights), len(targets)))
  with ops.name_scope( name,
                    "sequence_loss_by_example",logits + targets + weights):
    log_perp_list = []
    for logit, target, weight in zip(logits, targets, weights):
      if softmax_loss_function is None:
        target = array_ops.reshape(target, [-1])
        crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(
            logit, target)
      else:
        crossent = softmax_loss_function(logit, target)
      log_perp_list.append(crossent * weight)
    log_perps = math_ops.add_n(log_perp_list)
    if average_across_timesteps:
      total_size = math_ops.add_n(weights)
      total_size += 1e-12  # Just to avoid division by 0 for all-0 weights.
      log_perps /= total_size
  return log_perps
项目:tensorflow_seq2seq_chatbot    作者:higepon    | 项目源码 | 文件源码
def sequence_loss(logits, targets, weights,
                  average_across_timesteps=True, average_across_batch=True,
                  softmax_loss_function=None, name=None):
  """Weighted cross-entropy loss for a sequence of logits, batch-collapsed.

  Args:
    logits: List of 2D Tensors of shape [batch_size x num_decoder_symbols].
    targets: List of 1D batch-sized int32 Tensors of the same length as logits.
    weights: List of 1D batch-sized float-Tensors of the same length as logits.
    average_across_timesteps: If set, divide the returned cost by the total
      label weight.
    average_across_batch: If set, divide the returned cost by the batch size.
    softmax_loss_function: Function (inputs-batch, labels-batch) -> loss-batch
      to be used instead of the standard softmax (the default if this is None).
    name: Optional name for this operation, defaults to "sequence_loss".

  Returns:
    A scalar float Tensor: The average log-perplexity per symbol (weighted).

  Raises:
    ValueError: If len(logits) is different from len(targets) or len(weights).
  """
  with ops.name_scope( name, "sequence_loss",logits + targets + weights):
    cost = math_ops.reduce_sum(sequence_loss_by_example(
        logits, targets, weights,
        average_across_timesteps=average_across_timesteps,
        softmax_loss_function=softmax_loss_function))
    if average_across_batch:
      batch_size = array_ops.shape(targets[0])[0]
      return cost / math_ops.cast(batch_size, dtypes.float32)
    else:
      return cost