Python tensorflow.python.ops.math_ops 模块,reduce_max() 实例源码

我们从Python开源项目中,提取了以下12个代码示例,用于说明如何使用tensorflow.python.ops.math_ops.reduce_max()

项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def max(x, axis=None, keepdims=False):
      """Maximum value in a tensor.

      Arguments:
          x: A tensor or variable.
          axis: An integer, the axis to find maximum values.
          keepdims: A boolean, whether to keep the dimensions or not.
              If `keepdims` is `False`, the rank of the tensor is reduced
              by 1. If `keepdims` is `True`,
              the reduced dimension is retained with length 1.

      Returns:
          A tensor with maximum values of `x`.
      """
      axis = _normalize_axis(axis, ndim(x))
      return math_ops.reduce_max(x, reduction_indices=axis, keep_dims=keepdims)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _sample_max(values):
  """Max over sample indices.  In this module this is always [0]."""
  return math_ops.reduce_max(values, reduction_indices=[0])
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _sample_max(values):
  """Max over sample indices.  In this module this is always [0]."""
  return math_ops.reduce_max(values, reduction_indices=[0])
项目:dynamic-coattention-network    作者:marshmelloX    | 项目源码 | 文件源码
def maxout(inputs,
           num_units,
           axis=None,
           outputs_collections=None,
           scope=None):
  """Adds a maxout op which is a max pooling performed in filter/channel
  dimension. This can also be used after fully-connected layers to reduce
  number of features.
  Args:
    inputs: A Tensor on which maxout will be performed
    num_units: Specifies how many features will remain after max pooling at the
      channel dimension. This must be multiple of number of channels.
    axis: The dimension where max pooling will be performed. Default is the
      last dimension.
    outputs_collections: The collections to which the outputs are added.
    scope: Optional scope for name_scope.
  Returns:
    A `Tensor` representing the results of the pooling operation.
  Raises:
    ValueError: if num_units is not multiple of number of features.
    """
  with ops.name_scope(scope, 'MaxOut', [inputs]) as sc:
    inputs = ops.convert_to_tensor(inputs)
    shape = inputs.get_shape().as_list()
    if axis is None:
      # Assume that channel is the last dimension
      axis = -1
    num_channels = shape[axis]
    if num_channels % num_units:
      raise ValueError('number of features({}) is not '
                       'a multiple of num_units({})'
              .format(num_channels, num_units))
    shape[axis] = -1
    shape += [num_channels // num_units]
    outputs = math_ops.reduce_max(gen_array_ops.reshape(inputs, shape), -1,
                                  keep_dims=False)
    return utils.collect_named_outputs(outputs_collections, sc, outputs)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _sample_max(values):
  """Max over sample indices.  In this module this is always [0]."""
  return math_ops.reduce_max(values, reduction_indices=[0])
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def test_name(self):
    result_lt = ops.reduce_max(self.original_lt, {'channel'})
    self.assertIn('lt_reduce_max', result_lt.name)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def test(self):
    result_lt = ops.reduce_max(self.original_lt, {'channel'})
    golden_lt = core.LabeledTensor(
        math_ops.reduce_max(self.original_lt.tensor, 1),
        [self.a0, self.a2, self.a3])
    self.assertLabeledTensorsEqual(result_lt, golden_lt)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testReduceMax(self):

    def reference_max(inp, axis):
      """Wrapper around np.amax that returns -infinity for an empty input."""
      if inp.shape[axis] == 0:
        return np.full(inp.shape[0:axis] + inp.shape[axis + 1:], float('-inf'))
      return np.amax(inp, axis)

    self._testReduction(math_ops.reduce_max, reference_max, np.float32,
                        self.FLOAT_DATA)
项目:opinatt    作者:epochx    | 项目源码 | 文件源码
def seq_labeling_decoder_linear(decoder_inputs, num_decoder_symbols,
                                scope=None, sequence_length=None, dtype=tf.float32):
  with tf.variable_scope(scope or "non-attention_RNN"):

    decoder_outputs = list()

    # copy over logits once out of sequence_length
    if decoder_inputs[0].get_shape().ndims != 1:
      (fixed_batch_size, output_size) = decoder_inputs[0].get_shape().with_rank(2)
    else:
      fixed_batch_size = decoder_inputs[0].get_shape().with_rank_at_least(1)[0]

    if fixed_batch_size.value:
      batch_size = fixed_batch_size.value
    else:
      batch_size = tf.shape(decoder_inputs[0])[0]
    if sequence_length is not None:
      sequence_length = math_ops.to_int32(sequence_length)
    if sequence_length is not None:  # Prepare variables
      zero_logit = tf.zeros(
        tf.stack([batch_size, num_decoder_symbols]), decoder_inputs[0].dtype)
      zero_logit.set_shape(
        tensor_shape.TensorShape([fixed_batch_size.value, num_decoder_symbols]))
      min_sequence_length = math_ops.reduce_min(sequence_length)
      max_sequence_length = math_ops.reduce_max(sequence_length)

    for time, input_ in enumerate(decoder_inputs):
      # if time == 0:
      #  hidden_state = zero_state(num_decoder_symbols, batch_size)
      if time > 0: tf.get_variable_scope().reuse_variables()
      # pylint: disable=cell-var-from-loop
      # call_cell = lambda: cell(input_, state)
      generate_logit = lambda: _linear(decoder_inputs[time], num_decoder_symbols, True)
      # pylint: enable=cell-var-from-loop
      if sequence_length is not None:
        logit = _step(
          time, sequence_length, min_sequence_length, max_sequence_length, zero_logit, generate_logit)
      else:
        logit = generate_logit
      decoder_outputs.append(logit)

  return decoder_outputs
项目:joint-slu-lm    作者:HadoopIt    | 项目源码 | 文件源码
def generate_sequence_output(encoder_outputs, 
                  encoder_state,
                  num_decoder_symbols,
                  sequence_length,
                num_heads=1,
                dtype=dtypes.float32,
                use_attention=True,
                loop_function=None,
                scope=None,
                DNN_at_output=False,
                forward_only=False):
  with variable_scope.variable_scope(scope or "non-attention_RNN"):
    attention_encoder_outputs = list()
    sequence_attention_weights = list()

    # copy over logits once out of sequence_length
    if encoder_outputs[0].get_shape().ndims != 1:
      (fixed_batch_size, output_size) = encoder_outputs[0].get_shape().with_rank(2)
    else:
      fixed_batch_size = encoder_outputs[0].get_shape().with_rank_at_least(1)[0]

    if fixed_batch_size.value: 
      batch_size = fixed_batch_size.value
    else:
      batch_size = array_ops.shape(encoder_outputs[0])[0]
    if sequence_length is not None:
      sequence_length = math_ops.to_int32(sequence_length)
    if sequence_length is not None:  # Prepare variables
      zero_logit = array_ops.zeros(
          array_ops.pack([batch_size, num_decoder_symbols]), encoder_outputs[0].dtype)
      zero_logit.set_shape(
          tensor_shape.TensorShape([fixed_batch_size.value, num_decoder_symbols]))
      min_sequence_length = math_ops.reduce_min(sequence_length)
      max_sequence_length = math_ops.reduce_max(sequence_length)

    for time, input_ in enumerate(encoder_outputs):
      if time > 0: variable_scope.get_variable_scope().reuse_variables()

      if not DNN_at_output:  
        generate_logit = lambda: linear_transformation(encoder_outputs[time], output_size, num_decoder_symbols)
      else:
        generate_logit = lambda: multilayer_perceptron(encoder_outputs[time], output_size, 200, num_decoder_symbols, forward_only=forward_only)
      # pylint: enable=cell-var-from-loop
      if sequence_length is not None:
        logit = _step(
            time, sequence_length, min_sequence_length, max_sequence_length, zero_logit, generate_logit)
      else:
        logit = generate_logit
      attention_encoder_outputs.append(logit)   
    if DNN_at_output:  
      regularizers = get_multilayer_perceptron_regularizers()
    else:
      regularizers = get_linear_transformation_regularizers()
  return attention_encoder_outputs, sequence_attention_weights, regularizers
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _calculate_acceptance_probabilities(init_probs, target_probs):
  """Calculate the per-class acceptance rates.

  Args:
    init_probs: The class probabilities of the data.
    target_probs: The desired class proportion in minibatches.
  Returns:
    A list of the per-class acceptance probabilities.

  This method is based on solving the following analysis:

  Let F be the probability of a rejection (on any example).
  Let p_i be the proportion of examples in the data in class i (init_probs)
  Let a_i is the rate the rejection sampler should *accept* class i
  Let t_i is the target proportion in the minibatches for class i (target_probs)

F = sum_i(p_i (1-a_i)) = 1 - sum_i(p_i a_i) using sum_i(p_i) = 1

An example with class `i` will be accepted if `k` rejections occur, then an
  example with class `i` is seen by the rejector, and it is accepted. This can
  be written as follows:

t_i = sum_k=0^inf(F^k p_i a_i) = p_i a_j / (1 - F) using geometric series identity, since 0 <= F < 1 = p_i a_i / sum_j(p_j * a_j) using F from above

Note that the following constraints hold:

0 <= p_i <= 1, sum_i(p_i) = 1 0 <= a_i <= 1 0 <= t_i <= 1, sum_i(t_i) = 1

A solution for a_i in terms of the other variabes is the following:
    ```a_i = (t_i / p_i) / max_i[t_i / p_i]

"""

Make list of t_i / p_i.

ratio_l = target_probs / init_probs

Replace NaNs with 0s.

ratio_l = math_ops.select(math_ops.is_nan(ratio_l), array_ops.zeros_like(ratio_l), ratio_l)

Calculate list of acceptance probabilities.

max_ratio = math_ops.reduce_max(ratio_l) return ratio_l / max_ratio ```

项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _calculate_acceptance_probabilities(init_probs, target_probs):
  """Calculate the per-class acceptance rates.

  Args:
    init_probs: The class probabilities of the data.
    target_probs: The desired class proportion in minibatches.
  Returns:
    A list of the per-class acceptance probabilities.

  This method is based on solving the following analysis:

  Let F be the probability of a rejection (on any example).
  Let p_i be the proportion of examples in the data in class i (init_probs)
  Let a_i is the rate the rejection sampler should *accept* class i
  Let t_i is the target proportion in the minibatches for class i (target_probs)

F = sum_i(p_i (1-a_i)) = 1 - sum_i(p_i a_i) using sum_i(p_i) = 1

An example with class `i` will be accepted if `k` rejections occur, then an
  example with class `i` is seen by the rejector, and it is accepted. This can
  be written as follows:

t_i = sum_k=0^inf(F^k p_i a_i) = p_i a_j / (1 - F) using geometric series identity, since 0 <= F < 1 = p_i a_i / sum_j(p_j * a_j) using F from above

Note that the following constraints hold:

0 <= p_i <= 1, sum_i(p_i) = 1 0 <= a_i <= 1 0 <= t_i <= 1, sum_i(t_i) = 1

A solution for a_i in terms of the other variabes is the following:
    ```a_i = (t_i / p_i) / max_i[t_i / p_i]

"""

Make list of t_i / p_i.

ratio_l = target_probs / init_probs

Replace NaNs with 0s.

ratio_l = math_ops.select(math_ops.is_nan(ratio_l), array_ops.zeros_like(ratio_l), ratio_l)

Calculate list of acceptance probabilities.

max_ratio = math_ops.reduce_max(ratio_l) return ratio_l / max_ratio ```