Python tensorflow.python.ops.array_ops 模块,identity() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.python.ops.array_ops.identity()

项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def eye(size, dtype=None, name=None):
      """Instantiate an identity matrix and returns it.

      Arguments:
          size: Integer, number of rows/columns.
          dtype: String, data type of returned Keras variable.
          name: String, name of returned Keras variable.

      Returns:
          A Keras variable, an identity matrix.

      Example:
      ```python
          >>> from keras import backend as K
          >>> kvar = K.eye(3)
          >>> K.eval(kvar)
          array([[ 1.,  0.,  0.],
                 [ 0.,  1.,  0.],
                 [ 0.,  0.,  1.]], dtype=float32)
"""
  return variable(np.eye(size), dtype, name)

```

项目:lsdc    作者:febert    | 项目源码 | 文件源码
def identity(labeled_tensor, name=None):
  """The identity op.

  See tf.identity.

  Args:
    labeled_tensor: The input tensor.
    name: Optional op name.

  Returns:
    The tensor.
  """
  with ops.name_scope(name, 'lt_identity', [labeled_tensor]) as scope:
    labeled_tensor = convert_to_labeled_tensor(labeled_tensor)
    return LabeledTensor(
        array_ops.identity(labeled_tensor.tensor, name=scope),
        labeled_tensor.axes)


# We don't call this slice because that shadows a built-in. Instead, we alias
# this to lt.slice in __init__.py.
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testExplicitStochasticTensors(self):
    with self.test_session() as sess:
      mu = constant_op.constant([0.0, 0.1, 0.2])
      sigma = constant_op.constant([1.1, 1.2, 1.3])
      with st.value_type(st.SampleValue()):
        dt1 = st.StochasticTensor(NormalNotParam(loc=mu, scale=sigma))
        dt2 = st.StochasticTensor(NormalNotParam(loc=mu, scale=sigma))
        loss = math_ops.square(array_ops.identity(dt1)) + 10. + dt2

        sl_all = sg.surrogate_loss([loss])
        sl_dt1 = sg.surrogate_loss([loss], stochastic_tensors=[dt1])
        sl_dt2 = sg.surrogate_loss([loss], stochastic_tensors=[dt2])

        dt1_term = dt1.distribution.log_prob(dt1) * loss
        dt2_term = dt2.distribution.log_prob(dt2) * loss

        self.assertAllClose(*sess.run(
            [sl_all, sum([loss, dt1_term, dt2_term])]))
        self.assertAllClose(*sess.run([sl_dt1, sum([loss, dt1_term])]))
        self.assertAllClose(*sess.run([sl_dt2, sum([loss, dt2_term])]))
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _testSingleAllReduce(self, sess, np_type, nccl_fn, numpy_accumulation_fn):
    for devices in [['/gpu:0', '/gpu:0', '/gpu:0'], ['/gpu:0', '/gpu:0']]:
      shape = (3, 4)
      np_ans = None
      tensors = []
      for d in devices:
        with ops.device(d):
          t = ((np.random.random_sample(shape) - .5) * 1024).astype(np_type)
          if np_ans is None:
            np_ans = t
          else:
            np_ans = numpy_accumulation_fn(np_ans, t)
          tensors.append(array_ops.identity(t))

      all_reduce_tensors = nccl_fn(tensors)

      # Test shape inference.
      for r in all_reduce_tensors:
        self.assertEqual(shape, r.get_shape())

      # Test execution and results.
      nccl_results = sess.run(all_reduce_tensors)
      for r in nccl_results:
        self.assertAllClose(r, np_ans)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def state_tuple_to_dict(state):
  """Returns a dict containing flattened `state`.

  Args:
    state: A `Tensor` or a nested tuple of `Tensors`. All of the `Tensor`s must
    have the same rank and agree on all dimensions except the last.

  Returns:
    A dict containing the `Tensor`s that make up `state`. The keys of the dict
    are of the form "STATE_PREFIX_i" where `i` is the place of this `Tensor`
    in a depth-first traversal of `state`.
  """
  with ops.name_scope('state_tuple_to_dict'):
    flat_state = nest.flatten(state)
    state_dict = {}
    for i, state_component in enumerate(flat_state):
      state_name = _get_state_name(i)
      state_value = (None if state_component is None else array_ops.identity(
          state_component, name=state_name))
      state_dict[state_name] = state_value
  return state_dict
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def identity(labeled_tensor, name=None):
  """The identity op.

  See tf.identity.

  Args:
    labeled_tensor: The input tensor.
    name: Optional op name.

  Returns:
    The tensor.
  """
  with ops.name_scope(name, 'lt_identity', [labeled_tensor]) as scope:
    labeled_tensor = convert_to_labeled_tensor(labeled_tensor)
    return LabeledTensor(
        array_ops.identity(
            labeled_tensor.tensor, name=scope),
        labeled_tensor.axes)


# We don't call this slice because that shadows a built-in. Instead, we alias
# this to lt.slice in __init__.py.
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testDomainErrorExceptions(self):

    class MyDistException(normal.Normal):
      pass

    # Register KL to a lambda that spits out the name parameter
    @kullback_leibler.RegisterKL(MyDistException, MyDistException)
    # pylint: disable=unused-argument,unused-variable
    def _kl(a, b, name=None):
      return array_ops.identity([float("nan")])

    # pylint: disable=unused-argument,unused-variable

    with self.test_session():
      a = MyDistException(loc=0.0, scale=1.0)
      kl = kullback_leibler.kl(a, a)
      with self.assertRaisesOpError(
          "KL calculation between .* and .* returned NaN values"):
        kl.eval()
      kl_ok = kullback_leibler.kl(a, a, allow_nan=True)
      self.assertAllEqual([float("nan")], kl_ok.eval())
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testAssertCloseIntegerDtype(self):
    x = [1, 5, 10, 15, 20]
    y = x
    z = [2, 5, 10, 15, 20]
    with self.test_session():
      with ops.control_dependencies([distribution_util.assert_close(x, y)]):
        array_ops.identity(x).eval()

      with ops.control_dependencies([distribution_util.assert_close(y, x)]):
        array_ops.identity(x).eval()

      with self.assertRaisesOpError("Condition x ~= y"):
        with ops.control_dependencies([distribution_util.assert_close(x, z)]):
          array_ops.identity(x).eval()

      with self.assertRaisesOpError("Condition x ~= y"):
        with ops.control_dependencies([distribution_util.assert_close(y, z)]):
          array_ops.identity(y).eval()
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testAssertCloseNonIntegerDtype(self):
    x = np.array([1., 5, 10, 15, 20], dtype=np.float32)
    y = x + 1e-8
    z = [2., 5, 10, 15, 20]
    with self.test_session():
      with ops.control_dependencies([distribution_util.assert_close(x, y)]):
        array_ops.identity(x).eval()

      with ops.control_dependencies([distribution_util.assert_close(y, x)]):
        array_ops.identity(x).eval()

      with self.assertRaisesOpError("Condition x ~= y"):
        with ops.control_dependencies([distribution_util.assert_close(x, z)]):
          array_ops.identity(x).eval()

      with self.assertRaisesOpError("Condition x ~= y"):
        with ops.control_dependencies([distribution_util.assert_close(y, z)]):
          array_ops.identity(y).eval()
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testAssertCloseEpsilon(self):
    x = [0., 5, 10, 15, 20]
    # x != y
    y = [0.1, 5, 10, 15, 20]
    # x = z
    z = [1e-8, 5, 10, 15, 20]
    with self.test_session():
      with ops.control_dependencies([distribution_util.assert_close(x, z)]):
        array_ops.identity(x).eval()

      with self.assertRaisesOpError("Condition x ~= y"):
        with ops.control_dependencies([distribution_util.assert_close(x, y)]):
          array_ops.identity(x).eval()

      with self.assertRaisesOpError("Condition x ~= y"):
        with ops.control_dependencies([distribution_util.assert_close(y, z)]):
          array_ops.identity(y).eval()
项目:qrn    作者:uwnlp    | 项目源码 | 文件源码
def state_saving_rnn(cell, inputs, state_saver, state_name,
                     sequence_length=None, scope=None):
  """RNN that accepts a state saver for time-truncated RNN calculation.
  Args:
    cell: An instance of RNNCell.
    inputs: A length T list of inputs, each a tensor of shape
      [batch_size, input_size].
    state_saver: A state saver object with methods `state` and `save_state`.
    state_name: The name to use with the state_saver.
    sequence_length: (optional) An int32/int64 vector size [batch_size].
      See the documentation for rnn() for more details about sequence_length.
    scope: VariableScope for the created subgraph; defaults to "RNN".
  Returns:
    A pair (outputs, state) where:
      outputs is a length T list of outputs (one for each input)
      states is the final state
  Raises:
    TypeError: If "cell" is not an instance of RNNCell.
    ValueError: If inputs is None or an empty list.
  """
  initial_state = state_saver.state(state_name)
  (outputs, state) = rnn(cell, inputs, initial_state=initial_state,
                         sequence_length=sequence_length, scope=scope)
  save_state = state_saver.save_state(state_name, state)
  with ops.control_dependencies([save_state]):
    outputs[-1] = array_ops.identity(outputs[-1])

  return (outputs, state)
项目:Question-Answering    作者:MurtyShikhar    | 项目源码 | 文件源码
def zero_state(self, batch_size, dtype):
    with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
      if self._initial_cell_state is not None:
        cell_state = self._initial_cell_state
      else:
        cell_state = self._cell.zero_state(batch_size, dtype)
      error_message = (
          "When calling zero_state of AttentionWrapper %s: " % self._base_name +
          "Non-matching batch sizes between the memory "
          "(encoder output) and the requested batch size.  Are you using "
          "the BeamSearchDecoder?  If so, make sure your encoder output has "
          "been tiled to beam_width via tf.contrib.seq2seq.tile_batch, and "
          "the batch_size= argument passed to zero_state is "
          "batch_size * beam_width.")
      with ops.control_dependencies(
          [check_ops.assert_equal(batch_size,
                                  self._attention_mechanism.batch_size,
                                  message=error_message)]):
        cell_state = nest.map_structure(
            lambda s: array_ops.identity(s, name="checked_cell_state"),
            cell_state)
      if self._alignment_history:
        alignment_history = tensor_array_ops.TensorArray(
            dtype=dtype, size=0, dynamic_size=True)
      else:
        alignment_history = ()
      return AttentionWrapperState(
          cell_state=cell_state,
          time=array_ops.zeros([], dtype=dtypes.int32),
          attention=_zero_state_tensors(self._attention_size, batch_size,
                                        dtype),
          alignments=self._attention_mechanism.initial_alignments(
              batch_size, dtype),
          alignment_history=alignment_history)
项目:Clairvoyante    作者:aquaskyline    | 项目源码 | 文件源码
def dropout_selu(x, rate, alpha= -1.7580993408473766, fixedPointMean=0.0, fixedPointVar=1.0,
                 noise_shape=None, seed=None, name=None, training=False):
    """Dropout to a value with rescaling."""

    def dropout_selu_impl(x, rate, alpha, noise_shape, seed, name):
        keep_prob = 1.0 - rate
        x = ops.convert_to_tensor(x, name="x")
        if isinstance(keep_prob, numbers.Real) and not 0 < keep_prob <= 1:
            raise ValueError("keep_prob must be a scalar tensor or a float in the "
                                             "range (0, 1], got %g" % keep_prob)
        keep_prob = ops.convert_to_tensor(keep_prob, dtype=x.dtype, name="keep_prob")
        keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar())

        alpha = ops.convert_to_tensor(alpha, dtype=x.dtype, name="alpha")
        alpha.get_shape().assert_is_compatible_with(tensor_shape.scalar())

        if tensor_util.constant_value(keep_prob) == 1:
            return x

        noise_shape = noise_shape if noise_shape is not None else array_ops.shape(x)
        random_tensor = keep_prob
        random_tensor += random_ops.random_uniform(noise_shape, seed=seed, dtype=x.dtype)
        binary_tensor = math_ops.floor(random_tensor)
        ret = x * binary_tensor + alpha * (1-binary_tensor)

        a = math_ops.sqrt(fixedPointVar / (keep_prob *((1-keep_prob) * math_ops.pow(alpha-fixedPointMean,2) + fixedPointVar)))

        b = fixedPointMean - a * (keep_prob * fixedPointMean + (1 - keep_prob) * alpha)
        ret = a * ret + b
        ret.set_shape(x.get_shape())
        return ret

    with ops.name_scope(name, "dropout", [x]) as name:
        return utils.smart_cond(training,
            lambda: dropout_selu_impl(x, rate, alpha, noise_shape, seed, name),
            lambda: array_ops.identity(x))
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def identity(x):
      """Returns a tensor with the same content as the input tensor.

      Arguments:
          x: The input tensor.

      Returns:
          A tensor of the same shape, type and content.
      """
      return array_ops.identity(x)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def __call__(self, shape, dtype=None):
        if len(shape) != 2 or shape[0] != shape[1]:
          raise ValueError('Identity matrix initializer can only be used '
                           'for 2D square matrices.')
        else:
          return self.gain * np.identity(shape[0])
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def identity_block(input_tensor, kernel_size, filters, stage, block):
          """The identity block is the block that has no conv layer at shortcut.

          Arguments:
              input_tensor: input tensor
              kernel_size: default 3, the kernel size of middle conv layer at main path
              filters: list of integers, the filterss of 3 conv layer at main path
              stage: integer, current stage label, used for generating layer names
              block: 'a','b'..., current block label, used for generating layer names

          Returns:
              Output tensor for the block.
          """
          filters1, filters2, filters3 = filters
          if K.image_data_format() == 'channels_last':
            bn_axis = 3
          else:
            bn_axis = 1
          conv_name_base = 'res' + str(stage) + block + '_branch'
          bn_name_base = 'bn' + str(stage) + block + '_branch'

          x = Conv2D(filters1, (1, 1), name=conv_name_base + '2a')(input_tensor)
          x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
          x = Activation('relu')(x)

          x = Conv2D(
              filters2, kernel_size, padding='same', name=conv_name_base + '2b')(x)
          x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
          x = Activation('relu')(x)

          x = Conv2D(filters3, (1, 1), name=conv_name_base + '2c')(x)
          x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)

          x = layers.add([x, input_tensor])
          x = Activation('relu')(x)
          return x
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def value_type(dist_value_type):
  """Creates a value type context for any StochasticTensor created within.

  Typical usage:

with sg.value_type(sg.MeanValue(stop_gradients=True)): dt = sg.DistributionTensor(distributions.Normal, mu=mu, sigma=sigma)

In the example above, `dt.value()` (or equivalently, `tf.identity(dt)`) will
  be the mean value of the Normal distribution, i.e., `mu` (possibly
  broadcasted to the shape of `sigma`).  Furthermore, because the `MeanValue`
  was marked with `stop_gradients=True`, this value will have been wrapped
  in a `stop_gradients` call to disable any possible backpropagation.

  Args:
    dist_value_type: An instance of `MeanValue`, `SampleAndReshapeValue`, or
      any other stochastic value type.

  Yields:
    A context for `StochasticTensor` objects that controls the
    value created when they are initialized.

  Raises:
    TypeError: if `dist_value_type` is not an instance of a stochastic value
      type.
  """
  if not isinstance(dist_value_type, _StochasticValueType):
    raise TypeError("dist_value_type must be a Distribution Value Type")
  thread_id = threading.current_thread().ident
  stack = _STOCHASTIC_VALUE_STACK[thread_id]
  if stack:
    stack[-1].pushed_above(dist_value_type)
  stack.append(dist_value_type)
  yield
  stack.pop()
  if stack:
    stack[-1].popped_above(dist_value_type)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _check_multiple_of(value, multiple_of):
  """Checks that value `value` is a non-zero multiple of `multiple_of`.

  Args:
    value: an int32 scalar Tensor.
    multiple_of: an int or int32 scalar Tensor.

  Returns:
    new_value: an int32 scalar Tensor matching `value`, but which includes an
      assertion that `value` is a multiple of `multiple_of`.
  """
  assert isinstance(value, ops.Tensor)
  with ops.control_dependencies([
      control_flow_ops.Assert(
          math_ops.logical_and(
              math_ops.equal(math_ops.mod(value, multiple_of), 0),
              math_ops.not_equal(value, 0)),
          [string_ops.string_join(
              ["Tensor %s should be a multiple of: " % value.name,
               string_ops.as_string(multiple_of),
               ", but saw value: ",
               string_ops.as_string(value),
               ". Consider setting pad=True."])])]):
    new_value = array_ops.identity(
        value, name="multiple_of_checked")
    return new_value
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _check_rank(value, expected_rank):
  """Check the rank of Tensor `value`, via shape inference and assertions.

  Args:
    value: A Tensor, possibly with shape associated shape information.
    expected_rank: int32 scalar (optionally a `Tensor`).

  Returns:
    new_value: A Tensor matching `value`.  Accessing this tensor tests
      assertions on its rank.  If expected_rank is not a `Tensor`, then
      new_value's shape's rank has been set.

  Raises:
    ValueError: if `expected_rank` is not a `Tensor` and the rank of `value`
      is known and is not equal to `expected_rank`.
  """
  assert isinstance(value, ops.Tensor)
  with ops.control_dependencies([
      control_flow_ops.Assert(
          math_ops.equal(expected_rank, array_ops.rank(value)),
          [string_ops.string_join(
              ["Rank of tensor %s should be: " % value.name,
               string_ops.as_string(expected_rank),
               ", shape received:"]),
           array_ops.shape(value)])]):
    new_value = array_ops.identity(value, name="rank_checked")
    if isinstance(expected_rank, ops.Tensor):
      expected_rank_value = tensor_util.constant_value(expected_rank)
      if expected_rank_value is not None:
        expected_rank = int(expected_rank_value)
    if not isinstance(expected_rank, ops.Tensor):
      try:
        new_value.set_shape(new_value.get_shape().with_rank(expected_rank))
      except ValueError as e:
        raise ValueError("Rank check failed for %s: %s"
                         % (value.name, str(e)))
    return new_value
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _base_inference(self, data, data_spec=None):
    """Returns an op that performs inference without a softmax."""
    inference_result = self._do_layer_inference(self.layers[0], data)

    for layer in self.layers[1:]:
      inference_result = self._do_layer_inference(layer, inference_result)

    output_size = 1 if self.is_regression else self.params.num_classes
    output = layers.fully_connected(
        inference_result, output_size, activation_fn=array_ops.identity)

    return output
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _count_condition(values, weights=None, metrics_collections=None,
                     updates_collections=None):
  """Sums the weights of cases where the given values are True.

  If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.

  Args:
    values: A `bool` `Tensor` of arbitrary size.
    weights: An optional `Tensor` whose shape is broadcastable to `values`.
    metrics_collections: An optional list of collections that the metric
      value variable should be added to.
    updates_collections: An optional list of collections that the metric update
      ops should be added to.

  Returns:
    value_tensor: A tensor representing the current value of the metric.
    update_op: An operation that accumulates the error from a batch of data.

  Raises:
    ValueError: If `weights` is not `None` and its shape doesn't match `values`,
      or if either `metrics_collections` or `updates_collections` are not a list
      or tuple.
  """
  check_ops.assert_type(values, dtypes.bool)
  count = _create_local('count', shape=[])

  values = math_ops.to_float(values)
  if weights is not None:
    weights = math_ops.to_float(weights)
    values = math_ops.mul(values, weights)

  value_tensor = array_ops.identity(count)
  update_op = state_ops.assign_add(count, math_ops.reduce_sum(values))

  if metrics_collections:
    ops.add_to_collections(metrics_collections, value_tensor)

  if updates_collections:
    ops.add_to_collections(updates_collections, update_op)

  return value_tensor, update_op
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _mean(self):
    return array_ops.identity(self.p)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def kl(dist_a, dist_b, allow_nan=False, name=None):
  """Get the KL-divergence KL(dist_a || dist_b).

  Args:
    dist_a: The first distribution.
    dist_b: The second distribution.
    allow_nan: If `False` (default), a runtime error is raised
      if the KL returns NaN values for any batch entry of the given
      distributions.  If `True`, the KL may return a NaN for the given entry.
    name: (optional) Name scope to use for created operations.

  Returns:
    A Tensor with the batchwise KL-divergence between dist_a and dist_b.

  Raises:
    NotImplementedError: If no KL method is defined for distribution types
      of dist_a and dist_b.
  """
  kl_fn = _DIVERGENCES.get((type(dist_a), type(dist_b)), None)
  if kl_fn is None:
    raise NotImplementedError(
        "No KL(dist_a || dist_b) registered for dist_a type %s and dist_b "
        "type %s" % ((type(dist_a).__name__, type(dist_b).__name__)))
  with ops.name_scope("KullbackLeibler"):
    kl_t = kl_fn(dist_a, dist_b, name=name)
    if allow_nan:
      return kl_t

    # Check KL for NaNs
    kl_t = array_ops.identity(kl_t, name="kl")

    with ops.control_dependencies([
        control_flow_ops.Assert(
            math_ops.logical_not(
                math_ops.reduce_any(math_ops.is_nan(kl_t))),
            ["KL calculation between %s and %s returned NaN values "
             "(and was called with allow_nan=False).  Values:"
             % (dist_a.name, dist_b.name), kl_t])]):
      return array_ops.identity(kl_t, name="checked_kl")
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __init__(self,
               lam,
               validate_args=False,
               allow_nan_stats=True,
               name="Poisson"):
    """Construct Poisson distributions.

    Args:
      lam: Floating point tensor, the rate parameter of the
        distribution(s). `lam` must be positive.
      validate_args: `Boolean`, default `False`.  Whether to assert that
        `lam > 0` as well as inputs to pmf computations are non-negative
        integers. If validate_args is `False`, then `pmf` computations might
        return `NaN`, but can be evaluated at any real value.
      allow_nan_stats: `Boolean`, default `True`.  If `False`, raise an
        exception if a statistic (e.g. mean/mode/etc...) is undefined for any
        batch member.  If `True`, batch members with valid parameters leading to
        undefined statistics will return NaN for this statistic.
      name: A name for this distribution.
    """
    with ops.name_scope(name, values=[lam]) as ns:
      with ops.control_dependencies([check_ops.assert_positive(lam)] if
                                    validate_args else []):
        self._lam = array_ops.identity(lam, name="lam")
        super(Poisson, self).__init__(
            dtype=self._lam.dtype,
            parameters={"lam": self._lam},
            is_continuous=False,
            is_reparameterized=False,
            validate_args=validate_args,
            allow_nan_stats=allow_nan_stats,
            name=ns)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _mean(self):
    return array_ops.identity(self.lam)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _mode(self):
    return array_ops.identity(self.mu)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _sqrt_to_dense(self):
    chol = array_ops.matrix_band_part(self._chol, -1, 0)
    return array_ops.identity(chol)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _mean(self):
    return array_ops.identity(self._mean_val)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __init__(self,
               mu,
               cov,
               validate_args=False,
               allow_nan_stats=True,
               name="MultivariateNormalCov"):
    """Multivariate Normal distributions on `R^k`.

    User must provide means `mu`, and an instance of `OperatorPDBase`, `cov`,
    which determines the covariance.

    Args:
      mu: Floating point tensor with shape `[N1,...,Nb, k]`, `b >= 0`.
      cov: Instance of `OperatorPDBase` with same `dtype` as `mu` and shape
        `[N1,...,Nb, k, k]`.
      validate_args: `Boolean`, default `False`.  Whether to validate input
        with asserts.  If `validate_args` is `False`, and the inputs are
        invalid, correct behavior is not guaranteed.
      allow_nan_stats: `Boolean`, default `True`.  If `False`, raise an
        exception if a statistic (e.g. mean/mode/etc...) is undefined for any
        batch member If `True`, batch members with valid parameters leading to
        undefined statistics will return NaN for this statistic.
      name: The name to give Ops created by the initializer.

    Raises:
      TypeError: If `mu` and `cov` are different dtypes.
    """
    with ops.name_scope(name) as ns:
      with ops.name_scope("init", values=[mu] + cov.inputs):
        self._mu = array_ops.identity(mu, name="mu")
        self._cov = cov
        self._validate_args = validate_args  # Needed by _assert_valid_mu.
        self._mu = self._assert_valid_mu(self._mu)
        super(_MultivariateNormalOperatorPD, self).__init__(
            dtype=self._mu.dtype,
            parameters={"mu": self._mu, "cov": self._cov},
            is_reparameterized=True,
            is_continuous=True,
            validate_args=validate_args,
            allow_nan_stats=allow_nan_stats,
            name=ns)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _mode(self):
    return array_ops.identity(self._mu)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _clip_dense(self, var):
    with self._maybe_colocate_with(var):
      updated_var_value = array_ops.identity(var.ref())
      normalized_var = clip_ops.clip_by_norm(
          updated_var_value, self._max_norm, self._vars_to_clip_dims[var])
      delta = updated_var_value - normalized_var
    with ops.colocate_with(var):
      return var.assign_sub(delta, use_locking=self._use_locking)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def get_mean_baseline(ema_decay=0.99, name=None):
  """ExponentialMovingAverage baseline.

  Args:
    ema_decay: decay rate for the ExponentialMovingAverage.
    name: name for variable scope of the ExponentialMovingAverage.

  Returns:
    Callable baseline function that takes the `StochasticTensor` (unused) and
    the downstream `loss`, and returns an EMA of the loss.
  """

  def mean_baseline(_, loss):
    with vs.variable_scope(name, default_name="MeanBaseline"):
      reduced_loss = math_ops.reduce_mean(loss)

      ema = training.ExponentialMovingAverage(decay=ema_decay)
      update_op = ema.apply([reduced_loss])

      with ops.control_dependencies([update_op]):
        # Using `identity` causes an op to be added in this context, which
        # triggers the update. Removing the `identity` means nothing is updated.
        baseline = array_ops.identity(ema.average(reduced_loss))

      return baseline

  return mean_baseline
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def value_type(dist_value_type):
  """Creates a value type context for any StochasticTensor created within.

  Typical usage:

with sg.value_type(sg.MeanValue(stop_gradients=True)): st = sg.StochasticTensor(distributions.Normal, mu=mu, sigma=sigma)

In the example above, `st.value()` (or equivalently, `tf.identity(st)`) will
  be the mean value of the Normal distribution, i.e., `mu` (possibly
  broadcasted to the shape of `sigma`).  Furthermore, because the `MeanValue`
  was marked with `stop_gradients=True`, this value will have been wrapped
  in a `stop_gradients` call to disable any possible backpropagation.

  Args:
    dist_value_type: An instance of `MeanValue`, `SampleValue`, or
      any other stochastic value type.

  Yields:
    A context for `StochasticTensor` objects that controls the
    value created when they are initialized.

  Raises:
    TypeError: if `dist_value_type` is not an instance of a stochastic value
      type.
  """
  if not isinstance(dist_value_type, _StochasticValueType):
    raise TypeError("dist_value_type must be a Distribution Value Type")
  thread_id = threading.current_thread().ident
  stack = _STOCHASTIC_VALUE_STACK[thread_id]
  if stack:
    stack[-1].pushed_above(dist_value_type)
  stack.append(dist_value_type)
  yield
  stack.pop()
  if stack:
    stack[-1].popped_above(dist_value_type)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _check_multiple_of(value, multiple_of):
  """Checks that value `value` is a non-zero multiple of `multiple_of`.

  Args:
    value: an int32 scalar Tensor.
    multiple_of: an int or int32 scalar Tensor.

  Returns:
    new_value: an int32 scalar Tensor matching `value`, but which includes an
      assertion that `value` is a multiple of `multiple_of`.
  """
  assert isinstance(value, ops.Tensor)
  with ops.control_dependencies([
      control_flow_ops.Assert(
          math_ops.logical_and(
              math_ops.equal(math_ops.mod(value, multiple_of), 0),
              math_ops.not_equal(value, 0)),
          [string_ops.string_join(
              ["Tensor %s should be a multiple of: " % value.name,
               string_ops.as_string(multiple_of),
               ", but saw value: ",
               string_ops.as_string(value),
               ". Consider setting pad=True."])])]):
    new_value = array_ops.identity(
        value, name="multiple_of_checked")
    return new_value
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _check_rank(value, expected_rank):
  """Check the rank of Tensor `value`, via shape inference and assertions.

  Args:
    value: A Tensor, possibly with shape associated shape information.
    expected_rank: int32 scalar (optionally a `Tensor`).

  Returns:
    new_value: A Tensor matching `value`.  Accessing this tensor tests
      assertions on its rank.  If expected_rank is not a `Tensor`, then
      new_value's shape's rank has been set.

  Raises:
    ValueError: if `expected_rank` is not a `Tensor` and the rank of `value`
      is known and is not equal to `expected_rank`.
  """
  assert isinstance(value, ops.Tensor)
  with ops.control_dependencies([
      control_flow_ops.Assert(
          math_ops.equal(expected_rank, array_ops.rank(value)),
          [string_ops.string_join(
              ["Rank of tensor %s should be: " % value.name,
               string_ops.as_string(expected_rank),
               ", shape received:"]),
           array_ops.shape(value)])]):
    new_value = array_ops.identity(value, name="rank_checked")
    if isinstance(expected_rank, ops.Tensor):
      expected_rank_value = tensor_util.constant_value(expected_rank)
      if expected_rank_value is not None:
        expected_rank = int(expected_rank_value)
    if not isinstance(expected_rank, ops.Tensor):
      try:
        new_value.set_shape(new_value.get_shape().with_rank(expected_rank))
      except ValueError as e:
        raise ValueError("Rank check failed for %s: %s"
                         % (value.name, str(e)))
    return new_value
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def dropout(inputs,
            keep_prob=0.5,
            noise_shape=None,
            is_training=True,
            outputs_collections=None,
            scope=None):
  """Returns a dropout op applied to the input.

  With probability `keep_prob`, outputs the input element scaled up by
  `1 / keep_prob`, otherwise outputs `0`.  The scaling is so that the expected
  sum is unchanged.

  Args:
    inputs: the tensor to pass to the nn.dropout op.
    keep_prob: A scalar `Tensor` with the same type as x. The probability
      that each element is kept.
    noise_shape: A 1-D `Tensor` of type `int32`, representing the
      shape for randomly generated keep/drop flags.
    is_training: A bool `Tensor` indicating whether or not the model
      is in training mode. If so, dropout is applied and values scaled.
      Otherwise, inputs is returned.
    outputs_collections: collection to add the outputs.
    scope: Optional scope for name_scope.

  Returns:
    a tensor representing the output of the operation.
  """
  with ops.name_scope(scope, 'Dropout', [inputs]) as sc:
    inputs = ops.convert_to_tensor(inputs)
    dropout_fn = lambda: nn.dropout(inputs, keep_prob, noise_shape)
    id_fn = lambda: array_ops.identity(inputs)
    outputs = utils.smart_cond(is_training, dropout_fn, id_fn)
    return utils.collect_named_outputs(outputs_collections, sc, outputs)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def training_loss(self, features, labels, data_spec=None,
                    name='training_loss'):
    return array_ops.identity(
        self._get_loss(features, labels, data_spec=data_spec), name=name)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _base_inference(self, data, data_spec=None):
    """Returns an op that performs inference without a softmax."""
    inference_result = self._do_layer_inference(self.layers[0], data)

    for layer in self.layers[1:]:
      inference_result = self._do_layer_inference(layer, inference_result)

    output_size = 1 if self.is_regression else self.params.num_classes
    output = layers.fully_connected(
        inference_result, output_size, activation_fn=array_ops.identity)

    return output
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _count_condition(values, weights=None, metrics_collections=None,
                     updates_collections=None):
  """Sums the weights of cases where the given values are True.

  If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.

  Args:
    values: A `bool` `Tensor` of arbitrary size.
    weights: An optional `Tensor` whose shape is broadcastable to `values`.
    metrics_collections: An optional list of collections that the metric
      value variable should be added to.
    updates_collections: An optional list of collections that the metric update
      ops should be added to.

  Returns:
    value_tensor: A tensor representing the current value of the metric.
    update_op: An operation that accumulates the error from a batch of data.

  Raises:
    ValueError: If `weights` is not `None` and its shape doesn't match `values`,
      or if either `metrics_collections` or `updates_collections` are not a list
      or tuple.
  """
  check_ops.assert_type(values, dtypes.bool)
  count = _create_local('count', shape=[])

  values = math_ops.to_float(values)
  if weights is not None:
    weights = math_ops.to_float(weights)
    values = math_ops.mul(values, weights)

  value_tensor = array_ops.identity(count)
  update_op = state_ops.assign_add(count, math_ops.reduce_sum(values))

  if metrics_collections:
    ops.add_to_collections(metrics_collections, value_tensor)

  if updates_collections:
    ops.add_to_collections(updates_collections, update_op)

  return value_tensor, update_op
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def transpose(labeled_tensor, axis_order=None, name=None):
  """Permute a tensor's axes.

  See tf.transpose.

  Args:
    labeled_tensor: The input tensor.
    axis_order: Optional desired axis order, as a list of names. By default, the
      order of axes is reversed.
    name: Optional op name.

  Returns:
    The permuted tensor.

  Raises:
    ValueError: If axis_order isn't a permutation of the existing axes.
  """
  with ops.name_scope(name, 'lt_transpose', [labeled_tensor]) as scope:
    labeled_tensor = convert_to_labeled_tensor(labeled_tensor)

    original_order = list(labeled_tensor.axes.keys())
    if axis_order is None:
      axis_order = list(reversed(original_order))
    elif sorted(axis_order) != sorted(original_order):
      raise ValueError(
          'The new axis order must have the same names as the original axes, '
          'but the new order is %r while the original order is %r' %
          (axis_order, original_order))

    axis_names = list(labeled_tensor.axes.keys())
    permutation = [axis_names.index(n) for n in axis_order]

    # Note: TensorFlow doesn't copy data for the identity tranpose.
    transpose_tensor = array_ops.transpose(labeled_tensor.tensor,
                                           permutation,
                                           name=scope)

    permuted_axes = [labeled_tensor.axes[n] for n in axis_order]

    return LabeledTensor(transpose_tensor, permuted_axes)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _mean(self):
    return array_ops.identity(self.p)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __init__(self,
               lam,
               validate_args=False,
               allow_nan_stats=True,
               name="Poisson"):
    """Construct Poisson distributions.

    Args:
      lam: Floating point tensor, the rate parameter of the
        distribution(s). `lam` must be positive.
      validate_args: `Boolean`, default `False`.  Whether to assert that
        `lam > 0` as well as inputs to pmf computations are non-negative
        integers. If validate_args is `False`, then `pmf` computations might
        return `NaN`, but can be evaluated at any real value.
      allow_nan_stats: `Boolean`, default `True`.  If `False`, raise an
        exception if a statistic (e.g. mean/mode/etc...) is undefined for any
        batch member.  If `True`, batch members with valid parameters leading to
        undefined statistics will return NaN for this statistic.
      name: A name for this distribution.
    """
    parameters = locals()
    parameters.pop("self")
    with ops.name_scope(name, values=[lam]) as ns:
      with ops.control_dependencies([check_ops.assert_positive(lam)] if
                                    validate_args else []):
        self._lam = array_ops.identity(lam, name="lam")
    super(Poisson, self).__init__(
        dtype=self._lam.dtype,
        is_continuous=False,
        is_reparameterized=False,
        validate_args=validate_args,
        allow_nan_stats=allow_nan_stats,
        parameters=parameters,
        graph_parents=[self._lam],
        name=ns)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _mean(self):
    return array_ops.identity(self.lam)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _variance(self):
    return array_ops.identity(self.lam)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _mode(self):
    return array_ops.identity(self.mu)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _sqrt_to_dense(self):
    chol = array_ops.matrix_band_part(self._chol, -1, 0)
    return array_ops.identity(chol)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _batch_shape(self):
    # Use identity to inherit callers "name".
    return array_ops.identity(self._batch_shape_val)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _mean(self):
    return array_ops.identity(self._mu)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _mode(self):
    return array_ops.identity(self._mu)
项目:Sing_Par    作者:wanghm92    | 项目源码 | 文件源码
def state_saving_rnn(cell, inputs, state_saver, state_name, sequence_length=None, scope=None):
  """RNN that accepts a state saver for time-truncated RNN calculation.

  Args:
    cell: An instance of RNNCell.
    inputs: A length T list of inputs, each a tensor of shape
      [batch_size, cell.input_size].
    state_saver: A state saver object with methods `state` and `save_state`.
    state_name: The name to use with the state_saver.
    sequence_length: (optional) An int32/int64 vector size [batch_size].
      See the documentation for rnn() for more details about sequence_length.
    scope: VariableScope for the created subgraph; defaults to "RNN".

  Returns:
    A pair (outputs, state) where:
      outputs is a length T list of outputs (one for each input)
      states is the final state

  Raises:
    TypeError: If "cell" is not an instance of RNNCell.
    ValueError: If inputs is None or an empty list.
  """
  initial_state = state_saver.state(state_name)
  (outputs, state) = rnn(cell, inputs, initial_state=initial_state,
                         sequence_length=sequence_length, scope=scope)
  save_state = state_saver.save_state(state_name, state)
  with ops.control_dependencies([save_state]):
    outputs[-1] = array_ops.identity(outputs[-1])

  return (outputs, state)