Python tensorflow.python.ops.math_ops 模块,range() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.python.ops.math_ops.range()

项目:tensorflow_end2end_speech_recognition    作者:hirofumi0810    | 项目源码 | 文件源码
def _transpose_batch_time(x):
    """Transpose the batch and time dimensions of a Tensor.
    Retains as much of the static shape information as possible.
    Args:
        x: A tensor of rank 2 or higher.
    Returns:
        x transposed along the first two dimensions.
    Raises:
        ValueError: if `x` is rank 1 or lower.
    """
    x_static_shape = x.get_shape()
    if x_static_shape.ndims is not None and x_static_shape.ndims < 2:
        raise ValueError(
            "Expected input tensor %s to have rank at least 2, but saw shape: %s" %
            (x, x_static_shape))
    x_rank = array_ops.rank(x)
    x_t = array_ops.transpose(
        x, array_ops.concat(
            ([1, 0], math_ops.range(2, x_rank)), axis=0))
    x_t.set_shape(
        tensor_shape.TensorShape([
            x_static_shape[1].value, x_static_shape[0].value
        ]).concatenate(x_static_shape[2:]))
    return x_t
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def count_params(x):
      """Returns the number of scalars in a Keras variable.

      Arguments:
          x: Keras variable.

      Returns:
          Integer, the number of scalars in `x`.

      Example:
      ```python
          >>> kvar = K.zeros((2,3))
          >>> K.count_params(kvar)
          6
          >>> K.eval(kvar)
          array([[ 0.,  0.,  0.],
                 [ 0.,  0.,  0.]], dtype=float32)
"""
  shape = x.get_shape()
  return np.prod([shape[i]._value for i in range(len(shape))])

```

项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def get_test_data(train_samples,
                      test_samples,
                      input_shape,
                      num_classes):
      """Generates test data to train a model on.

      Arguments:
        train_samples: Integer, how many training samples to generate.
        test_samples: Integer, how many test samples to generate.
        input_shape: Tuple of integers, shape of the inputs.
        num_classes: Integer, number of classes for the data and targets.
          Only relevant if `classification=True`.

      Returns:
        A tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
      """
      num_sample = train_samples + test_samples
      templates = 2 * num_classes * np.random.random((num_classes,) + input_shape)
      y = np.random.randint(0, num_classes, size=(num_sample,))
      x = np.zeros((num_sample,) + input_shape)
      for i in range(num_sample):
        x[i] = templates[y[i]] + np.random.normal(loc=0, scale=1., size=input_shape)
      return ((x[:train_samples], y[:train_samples]),
              (x[train_samples:], y[train_samples:]))
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def convert_kernel(kernel):
          """Converts a Numpy kernel matrix from Theano format to TensorFlow format.

          Also works reciprocally, since the transformation is its own inverse.

          Arguments:
              kernel: Numpy array (3D, 4D or 5D).

          Returns:
              The converted kernel.

          Raises:
              ValueError: in case of invalid kernel shape or invalid data_format.
          """
          kernel = np.asarray(kernel)
          if not 3 <= kernel.ndim <= 5:
            raise ValueError('Invalid kernel shape:', kernel.shape)
          slices = [slice(None, None, -1) for _ in range(kernel.ndim)]
          no_flip = (slice(None, None), slice(None, None))
          slices[-2:] = no_flip
          return np.copy(kernel[slices])
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def import_libs():
            from __future__ import absolute_import
            from __future__ import division
            from __future__ import print_function

            import os
            import re
            import threading

            import numpy as np
            from six.moves import range  # pylint: disable=redefined-builtin

            from tensorflow.contrib.keras.python.keras import backend as K
            from tensorflow.python.platform import tf_logging as logging


        # pylint: disable=g-import-not-at-top
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def build(self, input_shape):
            input_shape = tensor_shape.TensorShape(input_shape).as_list()
            param_shape = input_shape[1:]
            self.param_broadcast = [False] * len(param_shape)
            if self.shared_axes is not None:
              for i in self.shared_axes:
                param_shape[i - 1] = 1
                self.param_broadcast[i - 1] = True
            self.alpha = self.add_weight(
                shape=param_shape,
                name='alpha',
                initializer=self.alpha_initializer,
                regularizer=self.alpha_regularizer,
                constraint=self.alpha_constraint)
            # Set input spec
            axes = {}
            if self.shared_axes:
              for i in range(1, len(input_shape)):
                if i not in self.shared_axes:
                  axes[i] = input_shape[i]
            self.input_spec = InputSpec(ndim=len(input_shape), axes=axes)
            self.built = True
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def compute_output_shape(self, input_shape):
            if input_shape[0] is None:
              output_shape = None
            else:
              output_shape = input_shape[0][1:]
            for i in range(1, len(input_shape)):
              if input_shape[i] is None:
                shape = None
              else:
                shape = input_shape[i][1:]
              output_shape = self._compute_elemwise_op_output_shape(output_shape, shape)
            batch_sizes = [s[0] for s in input_shape if s is not None]
            batch_sizes = set(batch_sizes)
            batch_sizes -= set([None])
            if len(batch_sizes) == 1:
              output_shape = (list(batch_sizes)[0],) + output_shape
            else:
              output_shape = (None,) + output_shape
            return output_shape
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def build(self, input_shape):
            # Used purely for shape validation.
            if not isinstance(input_shape, list):
              raise ValueError('`Concatenate` layer should be called '
                               'on a list of inputs')
            if all([shape is None for shape in input_shape]):
              return
            reduced_inputs_shapes = [
                tensor_shape.TensorShape(shape).as_list() for shape in input_shape
            ]
            shape_set = set()
            for i in range(len(reduced_inputs_shapes)):
              del reduced_inputs_shapes[i][self.axis]
              shape_set.add(tuple(reduced_inputs_shapes[i]))
            if len(shape_set) > 1:
              raise ValueError('`Concatenate` layer requires '
                               'inputs with matching shapes '
                               'except for the concat axis. '
                               'Got inputs shapes: %s' % (input_shape))
            self.built = True
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def call(self, inputs):
            x1 = inputs[0]
            x2 = inputs[1]
            if isinstance(self.axes, int):
              if self.axes < 0:
                axes = [self.axes % K.ndim(x1), self.axes % K.ndim(x2)]
              else:
                axes = [self.axes] * 2
            else:
              axes = []
              for i in range(len(self.axes)):
                if self.axes[i] < 0:
                  axes.append(self.axes[i] % K.ndim(inputs[i]))
                else:
                  axes.append(self.axes[i])
            if self.normalize:
              x1 = K.l2_normalize(x1, axis=axes[0])
              x2 = K.l2_normalize(x2, axis=axes[1])
            output = K.batch_dot(x1, x2, axes)
            return output
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _lengths_to_masks(lengths, max_length):
  """Creates a binary matrix that can be used to mask away padding.

  Args:
    lengths: A vector of integers representing lengths.
    max_length: An integer indicating the maximum length. All values in
      lengths should be less than max_length.
  Returns:
    masks: Masks that can be used to get rid of padding.
  """
  tiled_ranges = array_ops.tile(
      array_ops.expand_dims(math_ops.range(max_length), 0),
      [array_ops.shape(lengths)[0], 1])
  lengths = array_ops.expand_dims(lengths, 1)
  masks = math_ops.to_float(
      math_ops.to_int64(tiled_ranges) < math_ops.to_int64(lengths))
  return masks
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __init__(self,
               key_dtype,
               value_dtype,
               default_value,
               num_shards=1,
               name='ShardedMutableHashTable'):
    with ops.name_scope(name, 'sharded_mutable_hash_table') as scope:
      super(_ShardedMutableHashTable, self).__init__(key_dtype, value_dtype,
                                                     scope)
      table_shards = []
      for i in range(num_shards):
        table_shards.append(lookup_ops.MutableHashTable(
            key_dtype=key_dtype,
            value_dtype=value_dtype,
            default_value=default_value,
            name='%s-%d-of-%d' % (name, i + 1, num_shards)))
      self._table_shards = table_shards
      # TODO(andreasst): add a value_shape() method to LookupInterface
      # pylint: disable=protected-access
      self._value_shape = self._table_shards[0]._value_shape
      # pylint: enable=protected-access
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def insert(self, keys, values, name=None):
    num_shards = self._num_shards
    if num_shards == 1:
      return self._table_shards[0].insert(keys, values, name=name)

    shard_indices = self._shard_indices(keys)
    # TODO(andreasst): support 'keys' that are not vectors
    key_shards = data_flow_ops.dynamic_partition(keys, shard_indices,
                                                 num_shards)
    value_shards = data_flow_ops.dynamic_partition(values, shard_indices,
                                                   num_shards)
    return_values = [
        self._table_shards[i].insert(key_shards[i], value_shards[i], name=name)
        for i in range(num_shards)
    ]

    return control_flow_ops.group(*return_values)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _linear_predictions(self, examples):
    """Returns predictions of the form w*x."""
    with name_scope('sdca/prediction'):
      sparse_variables = self._convert_n_to_tensor(self._variables[
          'sparse_features_weights'])
      result = 0.0
      for sfc, sv in zip(examples['sparse_features'], sparse_variables):
        # TODO(sibyl-Aix6ihai): following does not take care of missing features.
        result += math_ops.segment_sum(
            math_ops.mul(
                array_ops.gather(sv, sfc.feature_indices), sfc.feature_values),
            sfc.example_indices)
      dense_features = self._convert_n_to_tensor(examples['dense_features'])
      dense_variables = self._convert_n_to_tensor(self._variables[
          'dense_features_weights'])

      for i in range(len(dense_variables)):
        result += math_ops.matmul(dense_features[i], array_ops.expand_dims(
            dense_variables[i], -1))

    # Reshaping to allow shape inference at graph construction time.
    return array_ops.reshape(result, [-1])
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _padding_mask(sequence_lengths, padded_length):
  """Creates a mask used for calculating losses with padded input.

  Args:
    sequence_lengths: a `Tensor` of shape `[batch_size]` containing the unpadded
      length of  each sequence.
    padded_length: a scalar `Tensor` indicating the length of the sequences
      after padding
  Returns:
    A boolean `Tensor` M of shape `[batch_size, padded_length]` where
    `M[i, j] == True` when `lengths[i] > j`.

  """
  range_tensor = math_ops.range(padded_length)
  return math_ops.less(array_ops.expand_dims(range_tensor, 0),
                       array_ops.expand_dims(sequence_lengths, 1))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __new__(cls, source_column, boundaries):
    if not isinstance(source_column, _RealValuedColumn):
      raise TypeError("source_column must be an instance of _RealValuedColumn. "
                      "source_column: {}".format(source_column))

    if not isinstance(boundaries, list) or not boundaries:
      raise ValueError("boundaries must be a non-empty list. "
                       "boundaries: {}".format(boundaries))

    # We allow bucket boundaries to be monotonically increasing
    # (ie a[i+1] >= a[i]). When two bucket boundaries are the same, we
    # de-duplicate.
    sanitized_boundaries = []
    for i in range(len(boundaries) - 1):
      if boundaries[i] == boundaries[i + 1]:
        continue
      elif boundaries[i] < boundaries[i + 1]:
        sanitized_boundaries.append(boundaries[i])
      else:
        raise ValueError("boundaries must be a sorted list. "
                         "boundaries: {}".format(boundaries))
    sanitized_boundaries.append(boundaries[len(boundaries) - 1])

    return super(_BucketizedColumn, cls).__new__(cls, source_column,
                                                 tuple(sanitized_boundaries))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _sample_n(self, n, seed=None):
    # Recall _assert_valid_mu ensures mu and self._cov have same batch shape.
    shape = array_ops.concat(0, [self._cov.vector_shape(), [n]])
    white_samples = random_ops.random_normal(shape=shape,
                                             mean=0,
                                             stddev=1,
                                             dtype=self.dtype,
                                             seed=seed)

    correlated_samples = self._cov.sqrt_matmul(white_samples)

    # Move the last dimension to the front
    perm = array_ops.concat(0, (
        array_ops.pack([array_ops.rank(correlated_samples) - 1]),
        math_ops.range(0, array_ops.rank(correlated_samples) - 1)))

    # TODO(ebrevdo): Once we get a proper tensor contraction op,
    # perform the inner product using that instead of batch_matmul
    # and this slow transpose can go away!
    correlated_samples = array_ops.transpose(correlated_samples, perm)
    samples = correlated_samples + self.mu
    return samples
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _lengths_to_masks(lengths, max_length):
  """Creates a binary matrix that can be used to mask away padding.

  Args:
    lengths: A vector of integers representing lengths.
    max_length: An integer indicating the maximum length. All values in
      lengths should be less than max_length.
  Returns:
    masks: Masks that can be used to get rid of padding.
  """
  tiled_ranges = array_ops.tile(
      array_ops.expand_dims(math_ops.range(max_length), 0),
      [array_ops.shape(lengths)[0], 1])
  lengths = array_ops.expand_dims(lengths, 1)
  masks = math_ops.to_float(
      math_ops.to_int64(tiled_ranges) < math_ops.to_int64(lengths))
  return masks
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __init__(self,
               key_dtype,
               value_dtype,
               default_value,
               empty_key,
               num_shards=1,
               name='ShardedMutableHashTable'):
    with ops.name_scope(name, 'sharded_mutable_hash_table') as scope:
      super(_ShardedMutableDenseHashTable, self).__init__(key_dtype,
                                                          value_dtype, scope)
      table_shards = []
      for i in range(num_shards):
        table_shards.append(
            lookup_ops.MutableDenseHashTable(
                key_dtype=key_dtype,
                value_dtype=value_dtype,
                default_value=default_value,
                empty_key=empty_key,
                name='%s-%d-of-%d' % (name, i + 1, num_shards)))
      self._table_shards = table_shards
      # TODO(andreasst): add a value_shape() method to LookupInterface
      # pylint: disable=protected-access
      self._value_shape = self._table_shards[0]._value_shape
      # pylint: enable=protected-access
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def insert(self, keys, values, name=None):
    self._check_keys(keys)
    num_shards = self._num_shards
    if num_shards == 1:
      return self._table_shards[0].insert(keys, values, name=name)

    shard_indices = self._shard_indices(keys)
    # TODO(andreasst): support 'keys' that are not vectors
    key_shards = data_flow_ops.dynamic_partition(keys, shard_indices,
                                                 num_shards)
    value_shards = data_flow_ops.dynamic_partition(values, shard_indices,
                                                   num_shards)
    return_values = [
        self._table_shards[i].insert(key_shards[i], value_shards[i], name=name)
        for i in range(num_shards)
    ]

    return control_flow_ops.group(*return_values)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _linear_predictions(self, examples):
    """Returns predictions of the form w*x."""
    with name_scope('sdca/prediction'):
      sparse_variables = self._convert_n_to_tensor(self._variables[
          'sparse_features_weights'])
      result = 0.0
      for sfc, sv in zip(examples['sparse_features'], sparse_variables):
        # TODO(sibyl-Aix6ihai): following does not take care of missing features.
        result += math_ops.segment_sum(
            math_ops.mul(
                array_ops.gather(sv, sfc.feature_indices), sfc.feature_values),
            sfc.example_indices)
      dense_features = self._convert_n_to_tensor(examples['dense_features'])
      dense_variables = self._convert_n_to_tensor(self._variables[
          'dense_features_weights'])

      for i in range(len(dense_variables)):
        result += math_ops.matmul(dense_features[i], array_ops.expand_dims(
            dense_variables[i], -1))

    # Reshaping to allow shape inference at graph construction time.
    return array_ops.reshape(result, [-1])
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def padding_mask(sequence_lengths, padded_length):
  """Creates a mask used for calculating losses with padded input.

  Args:
    sequence_lengths: A `Tensor` of shape `[batch_size]` containing the unpadded
      length of  each sequence.
    padded_length: A scalar `Tensor` indicating the length of the sequences
      after padding
  Returns:
    A boolean `Tensor` M of shape `[batch_size, padded_length]` where
    `M[i, j] == True` when `lengths[i] > j`.

  """
  range_tensor = math_ops.range(padded_length)
  return math_ops.less(array_ops.expand_dims(range_tensor, 0),
                       array_ops.expand_dims(sequence_lengths, 1))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __new__(cls, source_column, boundaries):
    if not isinstance(source_column, _RealValuedColumn):
      raise TypeError("source_column must be an instance of _RealValuedColumn. "
                      "source_column: {}".format(source_column))

    if not isinstance(boundaries, list) or not boundaries:
      raise ValueError("boundaries must be a non-empty list. "
                       "boundaries: {}".format(boundaries))

    # We allow bucket boundaries to be monotonically increasing
    # (ie a[i+1] >= a[i]). When two bucket boundaries are the same, we
    # de-duplicate.
    sanitized_boundaries = []
    for i in range(len(boundaries) - 1):
      if boundaries[i] == boundaries[i + 1]:
        continue
      elif boundaries[i] < boundaries[i + 1]:
        sanitized_boundaries.append(boundaries[i])
      else:
        raise ValueError("boundaries must be a sorted list. "
                         "boundaries: {}".format(boundaries))
    sanitized_boundaries.append(boundaries[len(boundaries) - 1])

    return super(_BucketizedColumn, cls).__new__(cls, source_column,
                                                 tuple(sanitized_boundaries))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _sample_n(self, n, seed=None):
    # Recall _assert_valid_mu ensures mu and self._cov have same batch shape.
    shape = array_ops.concat(0, [self._cov.vector_shape(), [n]])
    white_samples = random_ops.random_normal(shape=shape,
                                             mean=0.,
                                             stddev=1.,
                                             dtype=self.dtype,
                                             seed=seed)

    correlated_samples = self._cov.sqrt_matmul(white_samples)

    # Move the last dimension to the front
    perm = array_ops.concat(0, (
        array_ops.pack([array_ops.rank(correlated_samples) - 1]),
        math_ops.range(0, array_ops.rank(correlated_samples) - 1)))

    # TODO(ebrevdo): Once we get a proper tensor contraction op,
    # perform the inner product using that instead of batch_matmul
    # and this slow transpose can go away!
    correlated_samples = array_ops.transpose(correlated_samples, perm)
    samples = correlated_samples + self.mu
    return samples
项目:LSTM-CRF-For-Named-Entity-Recognition    作者:zpppy    | 项目源码 | 文件源码
def _lengths_to_masks(lengths, max_length):
  """Creates a binary matrix that can be used to mask away padding.

  Args:
    lengths: A vector of integers representing lengths.
    max_length: An integer indicating the maximum length. All values in
      lengths should be less than max_length.
  Returns:
    masks: Masks that can be used to get rid of padding.
  """

  tiled_ranges = array_ops.tile(
      array_ops.expand_dims(math_ops.range(max_length), 0),
      [array_ops.shape(lengths)[0], 1])
  lengths = array_ops.expand_dims(lengths, 1)
  masks = math_ops.to_float(
      math_ops.to_int64(tiled_ranges) < math_ops.to_int64(lengths))

  return masks
项目:polyaxon    作者:polyaxon    | 项目源码 | 文件源码
def transpose_batch_time(x):
    """Transpose the batch and time dimensions of a Tensor.

    Retains as much of the static shape information as possible.

    Args:
        x: A tensor of rank 2 or higher.

    Returns:
        x transposed along the first two dimensions.

    Raises:
        ValueError: if `x` is rank 1 or lower.
    """
    x_static_shape = x.get_shape()
    if x_static_shape.ndims is not None and x_static_shape.ndims < 2:
        raise ValueError(
            "Expected input tensor %s to have rank at least 2, but saw shape: %s" %
            (x, x_static_shape))
    x_rank = array_ops.rank(x)
    x_t = array_ops.transpose(x, array_ops.concat(([1, 0], math_ops.range(2, x_rank)), axis=0))
    x_t.set_shape(tf.tensor_shape.TensorShape([
        x_static_shape[1].value, x_static_shape[0].value]).concatenate(x_static_shape[2:]))
    return x_t
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _lengths_to_masks(lengths, max_length):
  """Creates a binary matrix that can be used to mask away padding.

  Args:
    lengths: A vector of integers representing lengths.
    max_length: An integer indicating the maximum length. All values in
      lengths should be less than max_length.
  Returns:
    masks: Masks that can be used to get rid of padding.
  """
  tiled_ranges = array_ops.tile(
      array_ops.expand_dims(math_ops.range(max_length), 0),
      [array_ops.shape(lengths)[0], 1])
  lengths = array_ops.expand_dims(lengths, 1)
  masks = math_ops.to_float(
      math_ops.to_int64(tiled_ranges) < math_ops.to_int64(lengths))
  return masks
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def __init__(self,
               key_dtype,
               value_dtype,
               default_value,
               empty_key,
               num_shards=1,
               name='ShardedMutableHashTable'):
    with ops.name_scope(name, 'sharded_mutable_hash_table') as scope:
      super(ShardedMutableDenseHashTable, self).__init__(key_dtype,
                                                         value_dtype, scope)
      table_shards = []
      for i in range(num_shards):
        table_shards.append(
            lookup.MutableDenseHashTable(
                key_dtype=key_dtype,
                value_dtype=value_dtype,
                default_value=default_value,
                empty_key=empty_key,
                name='%s-%d-of-%d' % (name, i + 1, num_shards)))
      self._table_shards = table_shards
      # TODO(andreasst): add a value_shape() method to LookupInterface
      # pylint: disable=protected-access
      self._value_shape = self._table_shards[0]._value_shape
      # pylint: enable=protected-access
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def insert(self, keys, values, name=None):
    self._check_keys(keys)
    num_shards = self._num_shards
    if num_shards == 1:
      return self._table_shards[0].insert(keys, values, name=name)

    shard_indices = self._shard_indices(keys)
    # TODO(andreasst): support 'keys' that are not vectors
    key_shards = data_flow_ops.dynamic_partition(keys, shard_indices,
                                                 num_shards)
    value_shards = data_flow_ops.dynamic_partition(values, shard_indices,
                                                   num_shards)
    return_values = [
        self._table_shards[i].insert(key_shards[i], value_shards[i], name=name)
        for i in range(num_shards)
    ]

    return control_flow_ops.group(*return_values)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testBasic(self):
    with self.test_session() as sess:
      values_queue = data_flow_ops.FIFOQueue(
          4, dtypes=dtypes_lib.float32, shapes=(1, 2))
      _enqueue_vector(sess, values_queue, [0, 1])
      _enqueue_vector(sess, values_queue, [-4.2, 9.1])
      _enqueue_vector(sess, values_queue, [6.5, 0])
      _enqueue_vector(sess, values_queue, [-3.2, 4.0])
      values = values_queue.dequeue()

      mean, update_op = metrics.streaming_mean(values)

      sess.run(variables.local_variables_initializer())
      for _ in range(4):
        sess.run(update_op)
      self.assertAlmostEqual(1.65, sess.run(mean), 5)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def test1dWeightedValues_placeholders(self):
    with self.test_session() as sess:
      # Create the queue that populates the values.
      feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
      values = array_ops.placeholder(dtype=dtypes_lib.float32)

      # Create the queue that populates the weighted labels.
      weights_queue = data_flow_ops.FIFOQueue(
          4, dtypes=dtypes_lib.float32, shapes=(1,))
      _enqueue_vector(sess, weights_queue, 1, shape=(1,))
      _enqueue_vector(sess, weights_queue, 0, shape=(1,))
      _enqueue_vector(sess, weights_queue, 0, shape=(1,))
      _enqueue_vector(sess, weights_queue, 1, shape=(1,))
      weights = weights_queue.dequeue()

      mean, update_op = metrics.streaming_mean(values, weights)

      variables.local_variables_initializer().run()
      for i in range(4):
        update_op.eval(feed_dict={values: feed_values[i]})
      self.assertAlmostEqual((0 + 1 - 3.2 + 4.0) / 4.0, mean.eval(), 5)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def test2dWeightedValues_placeholders(self):
    with self.test_session() as sess:
      # Create the queue that populates the values.
      feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
      values = array_ops.placeholder(dtype=dtypes_lib.float32)

      # Create the queue that populates the weighted labels.
      weights_queue = data_flow_ops.FIFOQueue(
          4, dtypes=dtypes_lib.float32, shapes=(2,))
      _enqueue_vector(sess, weights_queue, [1, 1], shape=(2,))
      _enqueue_vector(sess, weights_queue, [1, 0], shape=(2,))
      _enqueue_vector(sess, weights_queue, [0, 1], shape=(2,))
      _enqueue_vector(sess, weights_queue, [0, 0], shape=(2,))
      weights = weights_queue.dequeue()

      mean, update_op = metrics.streaming_mean(values, weights)

      variables.local_variables_initializer().run()
      for i in range(4):
        update_op.eval(feed_dict={values: feed_values[i]})
      self.assertAlmostEqual((0 + 1 - 4.2 + 0) / 4.0, mean.eval(), 5)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testBasic(self):
    with self.test_session() as sess:
      values_queue = data_flow_ops.FIFOQueue(
          4, dtypes=dtypes_lib.float32, shapes=(1, 2))
      _enqueue_vector(sess, values_queue, [0, 1])
      _enqueue_vector(sess, values_queue, [-4.2, 9.1])
      _enqueue_vector(sess, values_queue, [6.5, 0])
      _enqueue_vector(sess, values_queue, [-3.2, 4.0])
      values = values_queue.dequeue()

      mean, update_op = metrics.streaming_mean_tensor(values)

      sess.run(variables.local_variables_initializer())
      for _ in range(4):
        sess.run(update_op)
      self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(mean))
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testMultiDimensional(self):
    with self.test_session() as sess:
      values_queue = data_flow_ops.FIFOQueue(
          2, dtypes=dtypes_lib.float32, shapes=(2, 2, 2))
      _enqueue_vector(
          sess,
          values_queue, [[[1, 2], [1, 2]], [[1, 2], [1, 2]]],
          shape=(2, 2, 2))
      _enqueue_vector(
          sess,
          values_queue, [[[1, 2], [1, 2]], [[3, 4], [9, 10]]],
          shape=(2, 2, 2))
      values = values_queue.dequeue()

      mean, update_op = metrics.streaming_mean_tensor(values)

      sess.run(variables.local_variables_initializer())
      for _ in range(2):
        sess.run(update_op)
      self.assertAllClose([[[1, 2], [1, 2]], [[2, 3], [5, 6]]], sess.run(mean))
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testValueTensorIsIdempotent(self):
    predictions = random_ops.random_uniform(
        (10, 3), maxval=3, dtype=dtypes_lib.int64, seed=1)
    labels = random_ops.random_uniform(
        (10, 3), maxval=3, dtype=dtypes_lib.int64, seed=1)
    accuracy, update_op = metrics.streaming_accuracy(predictions, labels)

    with self.test_session() as sess:
      sess.run(variables.local_variables_initializer())

      # Run several updates.
      for _ in range(10):
        sess.run(update_op)

      # Then verify idempotency.
      initial_accuracy = accuracy.eval()
      for _ in range(10):
        self.assertEqual(initial_accuracy, accuracy.eval())
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testValueTensorIsIdempotent(self):
    predictions = random_ops.random_uniform(
        (10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
    labels = random_ops.random_uniform(
        (10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
    precision, update_op = metrics.streaming_precision(predictions, labels)

    with self.test_session() as sess:
      sess.run(variables.local_variables_initializer())

      # Run several updates.
      for _ in range(10):
        sess.run(update_op)

      # Then verify idempotency.
      initial_precision = precision.eval()
      for _ in range(10):
        self.assertEqual(initial_precision, precision.eval())
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testValueTensorIsIdempotent(self):
    predictions = random_ops.random_uniform(
        (10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
    labels = random_ops.random_uniform(
        (10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
    recall, update_op = metrics.streaming_recall(predictions, labels)

    with self.test_session() as sess:
      sess.run(variables.local_variables_initializer())

      # Run several updates.
      for _ in range(10):
        sess.run(update_op)

      # Then verify idempotency.
      initial_recall = recall.eval()
      for _ in range(10):
        self.assertEqual(initial_recall, recall.eval())
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testValueTensorIsIdempotent(self):
    predictions = random_ops.random_uniform(
        (10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
    labels = random_ops.random_uniform(
        (10, 3), maxval=2, dtype=dtypes_lib.int64, seed=1)
    specificity, update_op = metrics.streaming_specificity_at_sensitivity(
        predictions, labels, sensitivity=0.7)

    with self.test_session() as sess:
      sess.run(variables.local_variables_initializer())

      # Run several updates.
      for _ in range(10):
        sess.run(update_op)

      # Then verify idempotency.
      initial_specificity = specificity.eval()
      for _ in range(10):
        self.assertAlmostEqual(initial_specificity, specificity.eval(), 5)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testValueTensorIsIdempotent(self):
    predictions = random_ops.random_uniform(
        (10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
    labels = random_ops.random_uniform(
        (10, 3), maxval=2, dtype=dtypes_lib.int64, seed=1)
    sensitivity, update_op = metrics.streaming_sensitivity_at_specificity(
        predictions, labels, specificity=0.7)

    with self.test_session() as sess:
      sess.run(variables.local_variables_initializer())

      # Run several updates.
      for _ in range(10):
        sess.run(update_op)

      # Then verify idempotency.
      initial_sensitivity = sensitivity.eval()
      for _ in range(10):
        self.assertAlmostEqual(initial_sensitivity, sensitivity.eval(), 5)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testValueTensorIsIdempotent(self):
    predictions = random_ops.random_uniform(
        (10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
    labels = random_ops.random_uniform(
        (10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
    thresholds = [0, 0.5, 1.0]
    prec, prec_op = metrics.streaming_precision_at_thresholds(predictions,
                                                              labels,
                                                              thresholds)
    rec, rec_op = metrics.streaming_recall_at_thresholds(predictions, labels,
                                                         thresholds)

    with self.test_session() as sess:
      sess.run(variables.local_variables_initializer())

      # Run several updates, then verify idempotency.
      sess.run([prec_op, rec_op])
      initial_prec = prec.eval()
      initial_rec = rec.eval()
      for _ in range(10):
        sess.run([prec_op, rec_op])
        self.assertAllClose(initial_prec, prec.eval())
        self.assertAllClose(initial_rec, rec.eval())

  # TODO(nsilberman): fix tests (passing but incorrect).
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def test_average_precision_some_labels_out_of_range(self):
    """Tests that labels outside the [0, n_classes) range are ignored."""
    labels_ex1 = (-1, 0, 1, 2, 3, 4, 7)
    labels = np.array([labels_ex1], dtype=np.int64)
    predictions_ex1 = (0.2, 0.1, 0.0, 0.4, 0.0, 0.5, 0.3)
    predictions = (predictions_ex1,)
    predictions_top_k_ex1 = (5, 3, 6, 0, 1, 2)
    precision_ex1 = (0.0 / 1, 1.0 / 2, 1.0 / 3, 2.0 / 4)
    avg_precision_ex1 = (0.0 / 1, precision_ex1[1] / 2, precision_ex1[1] / 3,
                         (precision_ex1[1] + precision_ex1[3]) / 4)
    for i in xrange(4):
      k = i + 1
      self._test_streaming_sparse_precision_at_k(
          predictions, labels, k, expected=precision_ex1[i])
      self._test_streaming_sparse_precision_at_top_k(
          (predictions_top_k_ex1[:k],), labels, expected=precision_ex1[i])
      self._test_sparse_average_precision_at_k(
          predictions, labels, k, expected=[avg_precision_ex1[i]])
      self._test_streaming_sparse_average_precision_at_k(
          predictions, labels, k, expected=avg_precision_ex1[i])
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def test_three_labels_at_k5_no_predictions(self):
    predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
                   [0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
    top_k_predictions = [
        [9, 4, 6, 2, 0],
        [5, 7, 2, 9, 6],
    ]
    sparse_labels = _binary_2d_label_to_sparse_value(
        [[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
    dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)

    for labels in (sparse_labels, dense_labels):
      # Classes 1,3,8 have 0 predictions, classes -1 and 10 are out of range.
      for class_id in (-1, 1, 3, 8, 10):
        self._test_streaming_sparse_precision_at_k(
            predictions, labels, k=5, expected=NAN, class_id=class_id)
        self._test_streaming_sparse_precision_at_top_k(
            top_k_predictions, labels, expected=NAN, class_id=class_id)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def test_3d_nan(self):
    predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
                    [0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
                   [[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
                    [0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
    top_k_predictions = [[
        [9, 4, 6, 2, 0],
        [5, 7, 2, 9, 6],
    ], [
        [5, 7, 2, 9, 6],
        [9, 4, 6, 2, 0],
    ]]
    labels = _binary_3d_label_to_sparse_value(
        [[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
         [[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])

    # Classes 1,3,8 have 0 predictions, classes -1 and 10 are out of range.
    for class_id in (-1, 1, 3, 8, 10):
      self._test_streaming_sparse_precision_at_k(
          predictions, labels, k=5, expected=NAN, class_id=class_id)
      self._test_streaming_sparse_precision_at_top_k(
          top_k_predictions, labels, expected=NAN, class_id=class_id)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def test_3d_nan(self):
    predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
                    [0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
                   [[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
                    [0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
    sparse_labels = _binary_3d_label_to_sparse_value(
        [[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
         [[0, 1, 1, 0, 0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 1, 1, 0]]])
    dense_labels = np.array(
        [[[2, 7, 8], [1, 2, 5]], [
            [1, 2, 5],
            [2, 7, 8],
        ]], dtype=np.int64)

    for labels in (sparse_labels, dense_labels):
      # Classes 0,3,4,6,9 have 0 labels, class 10 is out of range.
      for class_id in (0, 3, 4, 6, 9, 10):
        self._test_streaming_sparse_recall_at_k(
            predictions, labels, k=5, expected=NAN, class_id=class_id)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testValueTensorIsIdempotent(self):
    predictions = random_ops.random_normal((10, 3), seed=1)
    labels = random_ops.random_normal((10, 3), seed=2)
    error, update_op = metrics.streaming_mean_absolute_error(predictions,
                                                             labels)

    with self.test_session() as sess:
      sess.run(variables.local_variables_initializer())

      # Run several updates.
      for _ in range(10):
        sess.run(update_op)

      # Then verify idempotency.
      initial_error = error.eval()
      for _ in range(10):
        self.assertEqual(initial_error, error.eval())
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testValueTensorIsIdempotent(self):
    predictions = random_ops.random_normal((10, 3), seed=1)
    labels = random_ops.random_normal((10, 3), seed=2)
    normalizer = random_ops.random_normal((10, 3), seed=3)
    error, update_op = metrics.streaming_mean_relative_error(predictions,
                                                             labels, normalizer)

    with self.test_session() as sess:
      sess.run(variables.local_variables_initializer())

      # Run several updates.
      for _ in range(10):
        sess.run(update_op)

      # Then verify idempotency.
      initial_error = error.eval()
      for _ in range(10):
        self.assertEqual(initial_error, error.eval())
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testValueTensorIsIdempotent(self):
    predictions = random_ops.random_normal((10, 3), seed=1)
    labels = random_ops.random_normal((10, 3), seed=2)
    error, update_op = metrics.streaming_mean_squared_error(predictions, labels)

    with self.test_session() as sess:
      sess.run(variables.local_variables_initializer())

      # Run several updates.
      for _ in range(10):
        sess.run(update_op)

      # Then verify idempotency.
      initial_error = error.eval()
      for _ in range(10):
        self.assertEqual(initial_error, error.eval())
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testValueTensorIsIdempotent(self):
    predictions = random_ops.random_normal((10, 3), seed=1)
    labels = random_ops.random_normal((10, 3), seed=2)
    error, update_op = metrics.streaming_root_mean_squared_error(predictions,
                                                                 labels)

    with self.test_session() as sess:
      sess.run(variables.local_variables_initializer())

      # Run several updates.
      for _ in range(10):
        sess.run(update_op)

      # Then verify idempotency.
      initial_error = error.eval()
      for _ in range(10):
        self.assertEqual(initial_error, error.eval())
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testValueTensorIsIdempotent(self):
    labels = random_ops.random_normal((10, 3), seed=2)
    predictions = labels * 0.5 + random_ops.random_normal((10, 3), seed=1) * 0.5
    cov, update_op = metrics.streaming_covariance(predictions, labels)

    with self.test_session() as sess:
      sess.run(variables.local_variables_initializer())

      # Run several updates.
      for _ in range(10):
        sess.run(update_op)

      # Then verify idempotency.
      initial_cov = cov.eval()
      for _ in range(10):
        self.assertEqual(initial_cov, cov.eval())
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testVars(self):
    metrics.streaming_pearson_correlation(
        predictions=math_ops.to_float(math_ops.range(10)) + array_ops.ones(
            [10, 10]),
        labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]))
    _assert_local_variables(self, (
        'pearson_r/covariance/comoment:0',
        'pearson_r/covariance/count:0',
        'pearson_r/covariance/mean_label:0',
        'pearson_r/covariance/mean_prediction:0',
        'pearson_r/variance_labels/count:0',
        'pearson_r/variance_labels/comoment:0',
        'pearson_r/variance_labels/mean_label:0',
        'pearson_r/variance_labels/mean_prediction:0',
        'pearson_r/variance_predictions/comoment:0',
        'pearson_r/variance_predictions/count:0',
        'pearson_r/variance_predictions/mean_label:0',
        'pearson_r/variance_predictions/mean_prediction:0',))
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testValueTensorIsIdempotent(self):
    labels = random_ops.random_normal((10, 3), seed=2)
    predictions = labels * 0.5 + random_ops.random_normal((10, 3), seed=1) * 0.5
    pearson_r, update_op = metrics.streaming_pearson_correlation(predictions,
                                                                 labels)

    with self.test_session() as sess:
      sess.run(variables.local_variables_initializer())

      # Run several updates.
      for _ in range(10):
        sess.run(update_op)

      # Then verify idempotency.
      initial_r = pearson_r.eval()
      for _ in range(10):
        self.assertEqual(initial_r, pearson_r.eval())