Python tensorflow.python.ops.math_ops 模块,minimum() 实例源码

我们从Python开源项目中,提取了以下15个代码示例,用于说明如何使用tensorflow.python.ops.math_ops.minimum()

项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def min(x, axis=None, keepdims=False):
      """Minimum value in a tensor.

      Arguments:
          x: A tensor or variable.
          axis: An integer, the axis to find minimum values.
          keepdims: A boolean, whether to keep the dimensions or not.
              If `keepdims` is `False`, the rank of the tensor is reduced
              by 1. If `keepdims` is `True`,
              the reduced dimension is retained with length 1.

      Returns:
          A tensor with miminum values of `x`.
      """
      axis = _normalize_axis(axis, ndim(x))
      return math_ops.reduce_min(x, reduction_indices=axis, keep_dims=keepdims)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _optimal_step_size(last_step,
                       error_ratio,
                       safety=0.9,
                       ifactor=10.0,
                       dfactor=0.2,
                       order=5,
                       name=None):
  """Calculate the optimal size for the next Runge-Kutta step."""
  with ops.name_scope(
      name, 'optimal_step_size', [last_step, error_ratio]) as scope:
    error_ratio = math_ops.cast(error_ratio, last_step.dtype)
    exponent = math_ops.cast(1 / order, last_step.dtype)
    # this looks more complex than necessary, but importantly it keeps
    # error_ratio in the numerator so we can't divide by zero:
    factor = math_ops.maximum(
        1 / ifactor,
        math_ops.minimum(error_ratio ** exponent / safety, 1 / dfactor))
    return math_ops.div(last_step, factor, name=scope)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _adaptive_max_norm(norm, std_factor, decay, global_step, epsilon, name):
  """Find max_norm given norm and previous average."""
  with vs.variable_scope(name, "AdaptiveMaxNorm", [norm]):
    log_norm = math_ops.log(norm + epsilon)

    def moving_average(name, value, decay):
      moving_average_variable = vs.get_variable(
          name, shape=value.get_shape(), dtype=value.dtype,
          initializer=init_ops.zeros_initializer, trainable=False)
      return moving_averages.assign_moving_average(
          moving_average_variable, value, decay, zero_debias=False)

    # quicker adaptation at the beginning
    if global_step is not None:
      n = math_ops.to_float(global_step)
      decay = math_ops.minimum(decay, n / (n + 1.))

    # update averages
    mean = moving_average("mean", log_norm, decay)
    sq_mean = moving_average("sq_mean", math_ops.square(log_norm), decay)

    variance = sq_mean - math_ops.square(mean)
    std = math_ops.sqrt(math_ops.maximum(epsilon, variance))
    max_norms = math_ops.exp(mean + std_factor*std)
    return max_norms, mean
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _optimal_step_size(last_step,
                       error_ratio,
                       safety=0.9,
                       ifactor=10.0,
                       dfactor=0.2,
                       order=5,
                       name=None):
  """Calculate the optimal size for the next Runge-Kutta step."""
  with ops.name_scope(
      name, 'optimal_step_size', [last_step, error_ratio]) as scope:
    error_ratio = math_ops.cast(error_ratio, last_step.dtype)
    exponent = math_ops.cast(1 / order, last_step.dtype)
    # this looks more complex than necessary, but importantly it keeps
    # error_ratio in the numerator so we can't divide by zero:
    factor = math_ops.maximum(
        1 / ifactor,
        math_ops.minimum(error_ratio ** exponent / safety, 1 / dfactor))
    return math_ops.div(last_step, factor, name=scope)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def setUp(self):
    super(FloatBinaryOpsTest, self).setUp()

    self.ops = [
        ('igamma', None, math_ops.igamma, core.igamma),
        ('igammac', None, math_ops.igammac, core.igammac),
        ('zeta', None, math_ops.zeta, core.zeta),
        ('polygamma', None, math_ops.polygamma, core.polygamma),
        ('maximum', None, math_ops.maximum, core.maximum),
        ('minimum', None, math_ops.minimum, core.minimum),
        ('squared_difference', None, math_ops.squared_difference,
         core.squared_difference),
    ]
    total_size = np.prod([v.size for v in self.original_lt.axes.values()])
    test_lt = core.LabeledTensor(
        math_ops.cast(self.original_lt, dtypes.float32) / total_size,
        self.original_lt.axes)
    self.test_lt_1 = test_lt
    self.test_lt_2 = 1.0 - test_lt
    self.test_lt_1_broadcast = self.test_lt_1.tensor
    self.test_lt_2_broadcast = self.test_lt_2.tensor
    self.broadcast_axes = self.test_lt_1.axes
项目:HyperGAN    作者:255BITS    | 项目源码 | 文件源码
def resize_audio_with_crop_or_pad(image, target_height, target_width,
                                  dynamic_shape=False):
  image = tf.convert_to_tensor(image, name='audio')
  original_height,  _ =     _ImageDimensions(image, dynamic_shape=dynamic_shape)

  if target_height <= 0:
    raise ValueError('target_height must be > 0.')

  if dynamic_shape:
    max_ = math_ops.maximum
    min_ = math_ops.minimum
  else:
    max_ = max
    min_ = min

  height_diff = target_height - original_height
  offset_crop_height = max_(-height_diff // 2, 0)
  offset_pad_height = max_(height_diff // 2, 0)

  # Maybe crop if needed.
  cropped = crop_to_1d_bounding_box(image, offset_crop_height,
                                 min_(target_height, original_height),
                                 dynamic_shape=dynamic_shape)

  # Maybe pad if needed.
  resized = pad_to_1d_bounding_box(cropped, offset_pad_height, 
                                target_height, 
                                dynamic_shape=dynamic_shape)

  if resized.get_shape().ndims is None:
    raise ValueError('resized contains no shape.')
  if not resized.get_shape()[0].is_compatible_with(target_height):
    raise ValueError('resized height is not correct.')
  return resized


# In[5]:
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def argmin(x, axis=-1):
      """Returns the index of the minimum value along an axis.

      Arguments:
          x: Tensor or variable.
          axis: axis along which to perform the reduction.

      Returns:
          A tensor.
      """
      axis = _normalize_axis(axis, ndim(x))
      return math_ops.argmin(x, axis)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def minimum(x, y):
      """Element-wise minimum of two tensors.

      Arguments:
          x: Tensor or variable.
          y: Tensor or variable.

      Returns:
          A tensor.
      """
      return math_ops.minimum(x, y)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def make_sampling_table(size, sampling_factor=1e-5):
          """Generates a word rank-based probabilistic sampling table.

          This generates an array where the ith element
          is the probability that a word of rank i would be sampled,
          according to the sampling distribution used in word2vec.

          The word2vec formula is:
              p(word) = min(1, sqrt(word.frequency/sampling_factor) /
              (word.frequency/sampling_factor))

          We assume that the word frequencies follow Zipf's law (s=1) to derive
          a numerical approximation of frequency(rank):
             frequency(rank) ~ 1/(rank * (log(rank) + gamma) + 1/2 - 1/(12*rank))
              where gamma is the Euler-Mascheroni constant.

          Arguments:
              size: int, number of possible words to sample.
              sampling_factor: the sampling factor in the word2vec formula.

          Returns:
              A 1D Numpy array of length `size` where the ith entry
              is the probability that a word of rank i should be sampled.
          """
          gamma = 0.577
          rank = np.array(list(range(size)))
          rank[0] = 1
          inv_fq = rank * (np.log(rank) + gamma) + 0.5 - 1. / (12. * rank)
          f = sampling_factor * inv_fq

          return np.minimum(1., f / np.sqrt(f))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def num_relevant(labels, k):
  """Computes number of relevant values for each row in labels.

  For labels with shape [D1, ... DN, num_labels], this is the minimum of
  `num_labels` and `k`.

  Args:
    labels: `int64` `Tensor` or `SparseTensor` with shape
      [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
      target classes for the associated prediction. Commonly, N=1 and `labels`
      has shape [batch_size, num_labels].
    k: Integer, k for @k metric.

  Returns:
    Integer `Tensor` of shape [D1, ... DN], where each value is the number of
    relevant values for that row.

  Raises:
    ValueError: if inputs have invalid dtypes or values.
  """
  if k < 1:
    raise ValueError('Invalid k=%s.' % k)
  with ops.name_scope(None, 'num_relevant', (labels,)) as scope:
    # For SparseTensor, calculate separate count for each row.
    if isinstance(labels, (ops.SparseTensor, ops.SparseTensorValue)):
      labels_sizes = set_ops.set_size(labels)
      return math_ops.minimum(labels_sizes, k, name=scope)

    # For dense Tensor, calculate scalar count based on last dimension, and
    # tile across labels shape.
    labels_shape = array_ops.shape(labels)
    labels_size = labels_shape[-1]
    num_relevant_scalar = math_ops.minimum(labels_size, k)
    return array_ops.fill(labels_shape[0:-1], num_relevant_scalar, name=scope)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def num_relevant(labels, k):
  """Computes number of relevant values for each row in labels.

  For labels with shape [D1, ... DN, num_labels], this is the minimum of
  `num_labels` and `k`.

  Args:
    labels: `int64` `Tensor` or `SparseTensor` with shape
      [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
      target classes for the associated prediction. Commonly, N=1 and `labels`
      has shape [batch_size, num_labels].
    k: Integer, k for @k metric.

  Returns:
    Integer `Tensor` of shape [D1, ... DN], where each value is the number of
    relevant values for that row.

  Raises:
    ValueError: if inputs have invalid dtypes or values.
  """
  if k < 1:
    raise ValueError('Invalid k=%s.' % k)
  with ops.name_scope(None, 'num_relevant', (labels,)) as scope:
    # For SparseTensor, calculate separate count for each row.
    if isinstance(
        labels, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
      labels_sizes = set_ops.set_size(labels)
      return math_ops.minimum(labels_sizes, k, name=scope)

    # For dense Tensor, calculate scalar count based on last dimension, and
    # tile across labels shape.
    labels_shape = array_ops.shape(labels)
    labels_size = labels_shape[-1]
    num_relevant_scalar = math_ops.minimum(labels_size, k)
    return array_ops.fill(labels_shape[0:-1], num_relevant_scalar, name=scope)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _adaptive_max_norm(norm, std_factor, decay, global_step, epsilon, name):
  """Find max_norm given norm and previous average."""
  with vs.variable_scope(name, "AdaptiveMaxNorm", [norm]):
    log_norm = math_ops.log(norm + epsilon)

    def moving_average(name, value, decay):
      moving_average_variable = vs.get_variable(
          name,
          shape=value.get_shape(),
          dtype=value.dtype,
          initializer=init_ops.zeros_initializer(),
          trainable=False)
      return moving_averages.assign_moving_average(
          moving_average_variable, value, decay, zero_debias=False)

    # quicker adaptation at the beginning
    if global_step is not None:
      n = math_ops.to_float(global_step)
      decay = math_ops.minimum(decay, n / (n + 1.))

    # update averages
    mean = moving_average("mean", log_norm, decay)
    sq_mean = moving_average("sq_mean", math_ops.square(log_norm), decay)

    variance = sq_mean - math_ops.square(mean)
    std = math_ops.sqrt(math_ops.maximum(epsilon, variance))
    max_norms = math_ops.exp(mean + std_factor * std)
    return max_norms, mean
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def num_relevant(labels, k):
  """Computes number of relevant values for each row in labels.

  For labels with shape [D1, ... DN, num_labels], this is the minimum of
  `num_labels` and `k`.

  Args:
    labels: `int64` `Tensor` or `SparseTensor` with shape
      [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
      target classes for the associated prediction. Commonly, N=1 and `labels`
      has shape [batch_size, num_labels].
    k: Integer, k for @k metric.

  Returns:
    Integer `Tensor` of shape [D1, ... DN], where each value is the number of
    relevant values for that row.

  Raises:
    ValueError: if inputs have invalid dtypes or values.
  """
  if k < 1:
    raise ValueError('Invalid k=%s.' % k)
  with ops.name_scope(None, 'num_relevant', (labels,)) as scope:
    # For SparseTensor, calculate separate count for each row.
    if isinstance(
        labels, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
      labels_sizes = set_ops.set_size(labels)
      return math_ops.minimum(labels_sizes, k, name=scope)

    # For dense Tensor, calculate scalar count based on last dimension, and
    # tile across labels shape.
    labels_shape = array_ops.shape(labels)
    labels_size = labels_shape[-1]
    num_relevant_scalar = math_ops.minimum(labels_size, k)
    return array_ops.fill(labels_shape[0:-1], num_relevant_scalar, name=scope)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def _obtain_input_shape(input_shape, default_size, min_size, data_format,
                                include_top):
          """Internal utility to compute/validate an ImageNet model's input shape.

          Arguments:
              input_shape: either None (will return the default network input shape),
                  or a user-provided shape to be validated.
              default_size: default input width/height for the model.
              min_size: minimum input width/height accepted by the model.
              data_format: image data format to use.
              include_top: whether the model is expected to
                  be linked to a classifier via a Flatten layer.

          Returns:
              An integer shape tuple (may include None entries).

          Raises:
              ValueError: in case of invalid argument values.
          """
          if data_format == 'channels_first':
            default_shape = (3, default_size, default_size)
          else:
            default_shape = (default_size, default_size, 3)
          if include_top:
            if input_shape is not None:
              if input_shape != default_shape:
                raise ValueError('When setting`include_top=True`, '
                                 '`input_shape` should be ' + str(default_shape) + '.')
            input_shape = default_shape
          else:
            if data_format == 'channels_first':
              if input_shape is not None:
                if len(input_shape) != 3:
                  raise ValueError('`input_shape` must be a tuple of three integers.')
                if input_shape[0] != 3:
                  raise ValueError('The input must have 3 channels; got '
                                   '`input_shape=' + str(input_shape) + '`')
                if ((input_shape[1] is not None and input_shape[1] < min_size) or
                    (input_shape[2] is not None and input_shape[2] < min_size)):
                  raise ValueError('Input size must be at least ' + str(min_size) + 'x'
                                   + str(min_size) + ', got '
                                   '`input_shape=' + str(input_shape) + '`')
              else:
                input_shape = (3, None, None)
            else:
              if input_shape is not None:
                if len(input_shape) != 3:
                  raise ValueError('`input_shape` must be a tuple of three integers.')
                if input_shape[-1] != 3:
                  raise ValueError('The input must have 3 channels; got '
                                   '`input_shape=' + str(input_shape) + '`')
                if ((input_shape[0] is not None and input_shape[0] < min_size) or
                    (input_shape[1] is not None and input_shape[1] < min_size)):
                  raise ValueError('Input size must be at least ' + str(min_size) + 'x'
                                   + str(min_size) + ', got '
                                   '`input_shape=' + str(input_shape) + '`')
              else:
                input_shape = (None, None, 3)
          return input_shape
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def weighted_resample(inputs, weights, overall_rate, scope=None,
                      mean_decay=0.999, warmup=10, seed=None):
  """Performs an approximate weighted resampling of `inputs`.

  This method chooses elements from `inputs` where each item's rate of
  selection is proportional to its value in `weights`, and the average
  rate of selection across all inputs (and many invocations!) is
  `overall_rate`.

  Args:
    inputs: A list of tensors whose first dimension is `batch_size`.
    weights: A `[batch_size]`-shaped tensor with each batch member's weight.
    overall_rate: Desired overall rate of resampling.
    scope: Scope to use for the op.
    mean_decay: How quickly to decay the running estimate of the mean weight.
    warmup: Until the resulting tensor has been evaluated `warmup`
      times, the resampling menthod uses the true mean over all calls
      as its weight estimate, rather than a decayed mean.
    seed: Random seed.

  Returns:
    A list of tensors exactly like `inputs`, but with an unknown (and
      possibly zero) first dimension.
    A tensor containing the effective resampling rate used for each output.

  """
  # Algorithm: Just compute rates as weights/mean_weight *
  # overall_rate. This way the the average weight corresponds to the
  # overall rate, and a weight twice the average has twice the rate,
  # etc.
  with ops.name_scope(scope, 'weighted_resample', inputs) as opscope:
    # First: Maintain a running estimated mean weight, with decay
    # adjusted (by also maintaining an invocation count) during the
    # warmup period so that at the beginning, there aren't too many
    # zeros mixed in, throwing the average off.

    with variable_scope.variable_scope(scope, 'estimate_mean', inputs):
      count_so_far = variable_scope.get_local_variable(
          'resample_count', initializer=0)

      estimated_mean = variable_scope.get_local_variable(
          'estimated_mean', initializer=0.0)

      count = count_so_far.assign_add(1)
      real_decay = math_ops.minimum(
          math_ops.truediv((count - 1), math_ops.minimum(count, warmup)),
          mean_decay)

      batch_mean = math_ops.reduce_mean(weights)
      mean = moving_averages.assign_moving_average(
          estimated_mean, batch_mean, real_decay, zero_debias=False)

    # Then, normalize the weights into rates using the mean weight and
    # overall target rate:
    rates = weights * overall_rate / mean

    results = resample_at_rate([rates] + inputs, rates,
                               scope=opscope, seed=seed, back_prop=False)

    return (results[1:], results[0])