Python tensorflow.python.ops.math_ops 模块,maximum() 实例源码

我们从Python开源项目中,提取了以下20个代码示例,用于说明如何使用tensorflow.python.ops.math_ops.maximum()

项目:DNGPU    作者:LUMII-Syslab    | 项目源码 | 文件源码
def _apply_dense(self, grad, var):
        lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
        beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
        beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
        epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
        clip_multiplier_t = math_ops.cast(self.clip_multiplier_t, var.dtype.base_dtype)
        clip_epsilon_t = math_ops.cast(self.clip_epsilon_t, var.dtype.base_dtype)

        v = self.get_slot(var, "v")
        # clip gradient so that each value exceeds its previous maximum by no more than clip_multiplier
        if self.clip_gradients:
            clipVal = v * clip_multiplier_t + clip_epsilon_t
            grad = clip_ops.clip_by_value(grad, -clipVal, clipVal)

        # m := beta1 * m + (1 - beta1) * g_t

        m = self.get_slot(var, "m")
        m_t = state_ops.assign(m, beta1_t * m + (1. - beta1_t) * grad, use_locking=self._use_locking)
        # v := max(beta2 * v , abs(grad))
        v_t = state_ops.assign(v,math_ops.maximum(beta2_t * v, math_ops.abs(grad)), use_locking=self._use_locking)
        # variable -= learning_rate * m_t / (epsilon_t + v_t)
        # we do not use bias-correction term for the first moment; it does not give observable benefit
        var_update = state_ops.assign_sub(var, lr_t * m_t / (v_t+epsilon_t), use_locking=self._use_locking)

        return control_flow_ops.group(*[var_update, v_t, m_t])
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def max(x, axis=None, keepdims=False):
      """Maximum value in a tensor.

      Arguments:
          x: A tensor or variable.
          axis: An integer, the axis to find maximum values.
          keepdims: A boolean, whether to keep the dimensions or not.
              If `keepdims` is `False`, the rank of the tensor is reduced
              by 1. If `keepdims` is `True`,
              the reduced dimension is retained with length 1.

      Returns:
          A tensor with maximum values of `x`.
      """
      axis = _normalize_axis(axis, ndim(x))
      return math_ops.reduce_max(x, reduction_indices=axis, keep_dims=keepdims)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _optimal_step_size(last_step,
                       error_ratio,
                       safety=0.9,
                       ifactor=10.0,
                       dfactor=0.2,
                       order=5,
                       name=None):
  """Calculate the optimal size for the next Runge-Kutta step."""
  with ops.name_scope(
      name, 'optimal_step_size', [last_step, error_ratio]) as scope:
    error_ratio = math_ops.cast(error_ratio, last_step.dtype)
    exponent = math_ops.cast(1 / order, last_step.dtype)
    # this looks more complex than necessary, but importantly it keeps
    # error_ratio in the numerator so we can't divide by zero:
    factor = math_ops.maximum(
        1 / ifactor,
        math_ops.minimum(error_ratio ** exponent / safety, 1 / dfactor))
    return math_ops.div(last_step, factor, name=scope)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _optimal_step_size(last_step,
                       error_ratio,
                       safety=0.9,
                       ifactor=10.0,
                       dfactor=0.2,
                       order=5,
                       name=None):
  """Calculate the optimal size for the next Runge-Kutta step."""
  with ops.name_scope(
      name, 'optimal_step_size', [last_step, error_ratio]) as scope:
    error_ratio = math_ops.cast(error_ratio, last_step.dtype)
    exponent = math_ops.cast(1 / order, last_step.dtype)
    # this looks more complex than necessary, but importantly it keeps
    # error_ratio in the numerator so we can't divide by zero:
    factor = math_ops.maximum(
        1 / ifactor,
        math_ops.minimum(error_ratio ** exponent / safety, 1 / dfactor))
    return math_ops.div(last_step, factor, name=scope)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def setUp(self):
    super(FloatBinaryOpsTest, self).setUp()

    self.ops = [
        ('igamma', None, math_ops.igamma, core.igamma),
        ('igammac', None, math_ops.igammac, core.igammac),
        ('zeta', None, math_ops.zeta, core.zeta),
        ('polygamma', None, math_ops.polygamma, core.polygamma),
        ('maximum', None, math_ops.maximum, core.maximum),
        ('minimum', None, math_ops.minimum, core.minimum),
        ('squared_difference', None, math_ops.squared_difference,
         core.squared_difference),
    ]
    total_size = np.prod([v.size for v in self.original_lt.axes.values()])
    test_lt = core.LabeledTensor(
        math_ops.cast(self.original_lt, dtypes.float32) / total_size,
        self.original_lt.axes)
    self.test_lt_1 = test_lt
    self.test_lt_2 = 1.0 - test_lt
    self.test_lt_1_broadcast = self.test_lt_1.tensor
    self.test_lt_2_broadcast = self.test_lt_2.tensor
    self.broadcast_axes = self.test_lt_1.axes
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _get_sharding_func(size, num_shards):
    """Create sharding function for scatter update."""

    def func(ids):
      if num_shards == 1:
        return None, ids
      else:
        ids_per_shard = size // num_shards
        extras = size % num_shards
        assignments = math_ops.maximum(ids // (ids_per_shard + 1),
                                       (ids - extras) // ids_per_shard)
        new_ids = array_ops.where(assignments < extras,
                                  ids % (ids_per_shard + 1),
                                  (ids - extras) % ids_per_shard)
        return assignments, new_ids

    return func
项目:DNGPU    作者:LUMII-Syslab    | 项目源码 | 文件源码
def _apply_sparse(self, grad, var):
        lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
        beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
        beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
        epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
        clip_multiplier_t = math_ops.cast(self.clip_multiplier_t, var.dtype.base_dtype)
        clip_epsilon_t = math_ops.cast(self.clip_epsilon_t, var.dtype.base_dtype)

        v = self.get_slot(var, "v")
        v_slice = array_ops.gather(v, grad.indices)

        #clip gradient so that each value exceeds its previous maximum by no more than clip_multiplier
        clipped_values = grad.values
        if self.clip_gradients:
            clipVal = v_slice * clip_multiplier_t + clip_epsilon_t
            clipped_values = clip_ops.clip_by_value(grad.values, -clipVal, clipVal)

        # m := beta1 * m + (1 - beta1) * g_t
        m = self.get_slot(var, "m")
        m_t_values = beta1_t * array_ops.gather(m, grad.indices) + (1 - beta1_t) * clipped_values
        m_t = state_ops.scatter_update(m, grad.indices, m_t_values, use_locking=self._use_locking)

        # v := max(beta2 * v , abs(grad))
        v_t_values = math_ops.maximum(beta2_t * v_slice, math_ops.abs(clipped_values))
        v_t = state_ops.scatter_update(v, grad.indices, v_t_values, use_locking=self._use_locking)

        # variable -= learning_rate * m_t / (epsilon_t + v_t)
        # we do not use bias-correction term for the first moment; it does not give observable benefit
        var_update = state_ops.scatter_sub(var, grad.indices,
                                           lr_t * m_t_values / (v_t_values + epsilon_t),
                                           use_locking=self._use_locking)
        return control_flow_ops.group(var_update, v_t, m_t)
项目:HyperGAN    作者:255BITS    | 项目源码 | 文件源码
def resize_audio_with_crop_or_pad(image, target_height, target_width,
                                  dynamic_shape=False):
  image = tf.convert_to_tensor(image, name='audio')
  original_height,  _ =     _ImageDimensions(image, dynamic_shape=dynamic_shape)

  if target_height <= 0:
    raise ValueError('target_height must be > 0.')

  if dynamic_shape:
    max_ = math_ops.maximum
    min_ = math_ops.minimum
  else:
    max_ = max
    min_ = min

  height_diff = target_height - original_height
  offset_crop_height = max_(-height_diff // 2, 0)
  offset_pad_height = max_(height_diff // 2, 0)

  # Maybe crop if needed.
  cropped = crop_to_1d_bounding_box(image, offset_crop_height,
                                 min_(target_height, original_height),
                                 dynamic_shape=dynamic_shape)

  # Maybe pad if needed.
  resized = pad_to_1d_bounding_box(cropped, offset_pad_height, 
                                target_height, 
                                dynamic_shape=dynamic_shape)

  if resized.get_shape().ndims is None:
    raise ValueError('resized contains no shape.')
  if not resized.get_shape()[0].is_compatible_with(target_height):
    raise ValueError('resized height is not correct.')
  return resized


# In[5]:
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def argmax(x, axis=-1):
      """Returns the index of the maximum value along an axis.

      Arguments:
          x: Tensor or variable.
          axis: axis along which to perform the reduction.

      Returns:
          A tensor.
      """
      axis = _normalize_axis(axis, ndim(x))
      return math_ops.argmax(x, axis)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def maximum(x, y):
      """Element-wise maximum of two tensors.

      Arguments:
          x: Tensor or variable.
          y: Tensor or variable.

      Returns:
          A tensor.
      """
      return math_ops.maximum(x, y)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def squared_hinge(y_true, y_pred):
      return K.mean(K.square(K.maximum(1. - y_true * y_pred, 0.)), axis=-1)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def hinge(y_true, y_pred):
      return K.mean(K.maximum(1. - y_true * y_pred, 0.), axis=-1)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def evaluate_generator(self,
                             generator,
                             steps,
                             max_q_size=10,
                             workers=1,
                             pickle_safe=False):
        """Evaluates the model on a data generator.

        The generator should return the same kind of data
        as accepted by `test_on_batch`.

        Arguments:
            generator: Generator yielding tuples (inputs, targets)
                or (inputs, targets, sample_weights)
            steps: Total number of steps (batches of samples)
                to yield from `generator` before stopping.
            max_q_size: maximum size for the generator queue
            workers: maximum number of processes to spin up
            pickle_safe: if True, use process based threading.
                Note that because this implementation
                relies on multiprocessing, you should not pass
                non picklable arguments to the generator
                as they can't be passed easily to children processes.

        Returns:
            Scalar test loss (if the model has no metrics)
            or list of scalars (if the model computes other metrics).
            The attribute `model.metrics_names` will give you
            the display labels for the scalar outputs.

        Raises:
            RuntimeError: if the model was never compiled.
        """
        if self.model is None:
          raise RuntimeError('The model needs to be compiled ' 'before being used.')
        return self.model.evaluate_generator(
            generator,
            steps,
            max_q_size=max_q_size,
            workers=workers,
            pickle_safe=pickle_safe)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def predict_generator(self,
                            generator,
                            steps,
                            max_q_size=10,
                            workers=1,
                            pickle_safe=False,
                            verbose=0):
        """Generates predictions for the input samples from a data generator.

        The generator should return the same kind of data as accepted by
        `predict_on_batch`.

        Arguments:
            generator: generator yielding batches of input samples.
            steps: Total number of steps (batches of samples)
                to yield from `generator` before stopping.
            max_q_size: maximum size for the generator queue
            workers: maximum number of processes to spin up
            pickle_safe: if True, use process based threading.
                Note that because this implementation
                relies on multiprocessing, you should not pass
                non picklable arguments to the generator
                as they can't be passed easily to children processes.
            verbose: verbosity mode, 0 or 1.

        Returns:
            A Numpy array of predictions.
        """
        if self.model is None:
          self.build()
        return self.model.predict_generator(
            generator,
            steps,
            max_q_size=max_q_size,
            workers=workers,
            pickle_safe=pickle_safe,
            verbose=verbose)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def get_updates(self, params, constraints, loss):
        grads = self.get_gradients(loss, params)
        self.updates = [K.update_add(self.iterations, 1)]

        lr = self.lr
        if self.initial_decay > 0:
          lr *= (1. / (1. + self.decay * self.iterations))

        t = self.iterations + 1
        lr_t = lr / (1. - K.pow(self.beta_1, t))

        shapes = [K.int_shape(p) for p in params]
        # zero init of 1st moment
        ms = [K.zeros(shape) for shape in shapes]
        # zero init of exponentially weighted infinity norm
        us = [K.zeros(shape) for shape in shapes]
        self.weights = [self.iterations] + ms + us

        for p, g, m, u in zip(params, grads, ms, us):

          m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
          u_t = K.maximum(self.beta_2 * u, K.abs(g))
          p_t = p - lr_t * m_t / (u_t + self.epsilon)

          self.updates.append(K.update(m, m_t))
          self.updates.append(K.update(u, u_t))

          new_p = p_t
          # apply constraints
          if p in constraints:
            c = constraints[p]
            new_p = c(new_p)
          self.updates.append(K.update(p, new_p))
        return self.updates
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def _merge_function(self, inputs):
            output = inputs[0]
            for i in range(1, len(inputs)):
              output = K.maximum(output, inputs[i])
            return output
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def maximum(inputs, **kwargs):
          """Functional interface to the `Maximum` layer.

          Arguments:
              inputs: A list of input tensors (at least 2).
              **kwargs: Standard layer keyword arguments.

          Returns:
              A tensor, the element-wise maximum of the inputs.
          """
          return Maximum(**kwargs)(inputs)
项目:self-supervision    作者:gustavla    | 项目源码 | 文件源码
def resize_to_ensure_size(image, target_height, target_width):
    height, width, _ = _ImageDimensions(image, static_only=False)

    #if height < target_height or width < target_width:
        # Do not preserve aspect ratio
    image4 = tf.expand_dims(image, 0)
    image = tf.image.resize_bilinear(image4, [tf.maximum(height, target_height), tf.maximum(width, target_width)])
    return image[0]
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def Clip(x):
  """Clips x to the range [-1., 1.]."""
  return math_ops.maximum(math_ops.minimum(x, 1.), -1.)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def parameterized_truncated_normal(shape,
                                   means=0.0,
                                   stddevs=1.0,
                                   minvals=-2.0,
                                   maxvals=2.0,
                                   dtype=dtypes.float32,
                                   seed=None,
                                   name=None):
  """Outputs random values from a truncated normal distribution.

  The generated values follow a normal distribution with specified mean and
  standard deviation, except that values whose magnitude is more than 2 standard
  deviations from the mean are dropped and re-picked.

  Args:
    shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
    means: A 0-D Tensor or Python value of type `dtype`. The mean of the
      truncated normal distribution.
    stddevs: A 0-D Tensor or Python value of type `dtype`. The standard
      deviation of the truncated normal distribution.
    minvals: A 0-D Tensor or Python value of type `dtype`. The minimum value of
      the truncated normal distribution.
    maxvals: A 0-D Tensor or Python value of type `dtype`. The maximum value of
      the truncated normal distribution.
    dtype: The type of the output.
    seed: A Python integer. Used to create a random seed for the distribution.
      See
      @{tf.set_random_seed}
      for behavior.
    name: A name for the operation (optional).

  Returns:
    A tensor of the specified shape filled with random truncated normal values.
  """
  with ops.name_scope(name, "parameterized_truncated_normal",
                      [shape, means, stddevs, minvals, maxvals]) as name:
    shape_tensor = _ShapeTensor(shape)
    means_tensor = ops.convert_to_tensor(means, dtype=dtype, name="means")
    stddevs_tensor = ops.convert_to_tensor(stddevs, dtype=dtype, name="stddevs")
    minvals_tensor = ops.convert_to_tensor(minvals, dtype=dtype, name="minvals")
    maxvals_tensor = ops.convert_to_tensor(maxvals, dtype=dtype, name="maxvals")
    seed1, seed2 = random_seed.get_seed(seed)
    rnd = gen_random_ops._parameterized_truncated_normal(
        shape_tensor,
        means_tensor,
        stddevs_tensor,
        minvals_tensor,
        maxvals_tensor,
        seed=seed1,
        seed2=seed2)
    return rnd