Python tensorflow.python.ops.math_ops 模块,abs() 实例源码

我们从Python开源项目中,提取了以下49个代码示例,用于说明如何使用tensorflow.python.ops.math_ops.abs()

项目:DNGPU    作者:LUMII-Syslab    | 项目源码 | 文件源码
def _apply_dense(self, grad, var):
        lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
        beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
        beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
        epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
        clip_multiplier_t = math_ops.cast(self.clip_multiplier_t, var.dtype.base_dtype)
        clip_epsilon_t = math_ops.cast(self.clip_epsilon_t, var.dtype.base_dtype)

        v = self.get_slot(var, "v")
        # clip gradient so that each value exceeds its previous maximum by no more than clip_multiplier
        if self.clip_gradients:
            clipVal = v * clip_multiplier_t + clip_epsilon_t
            grad = clip_ops.clip_by_value(grad, -clipVal, clipVal)

        # m := beta1 * m + (1 - beta1) * g_t

        m = self.get_slot(var, "m")
        m_t = state_ops.assign(m, beta1_t * m + (1. - beta1_t) * grad, use_locking=self._use_locking)
        # v := max(beta2 * v , abs(grad))
        v_t = state_ops.assign(v,math_ops.maximum(beta2_t * v, math_ops.abs(grad)), use_locking=self._use_locking)
        # variable -= learning_rate * m_t / (epsilon_t + v_t)
        # we do not use bias-correction term for the first moment; it does not give observable benefit
        var_update = state_ops.assign_sub(var, lr_t * m_t / (v_t+epsilon_t), use_locking=self._use_locking)

        return control_flow_ops.group(*[var_update, v_t, m_t])
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def he_normal(seed=None):
      """He normal initializer.

      It draws samples from a truncated normal distribution centered on 0
      with `stddev = sqrt(2 / fan_in)`
      where `fan_in` is the number of input units in the weight tensor.

      Arguments:
          seed: A Python integer. Used to seed the random generator.

      Returns:
          An initializer.

      References:
          He et al., http://arxiv.org/abs/1502.01852
      """
      return VarianceScaling(
          scale=2., mode='fan_in', distribution='normal', seed=seed)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def he_uniform(seed=None):
      """He uniform variance scaling initializer.

      It draws samples from a uniform distribution within [-limit, limit]
      where `limit` is `sqrt(6 / fan_in)`
      where `fan_in` is the number of input units in the weight tensor.

      Arguments:
          seed: A Python integer. Used to seed the random generator.

      Returns:
          An initializer.

      References:
          He et al., http://arxiv.org/abs/1502.01852
      """
      return VarianceScaling(
          scale=2., mode='fan_in', distribution='uniform', seed=seed)


    # Compatibility aliases

    # pylint: disable=invalid-name
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def test_normal_entropy_sample_form_gets_approximate_answer(self):
    # Tested by showing we get a good answer that is not exact.
    with self.test_session():
      dist = distributions.Normal(loc=1.11, scale=2.22)
      mc_entropy = entropy.entropy_shannon(
          dist, n=1000, form=entropy.ELBOForms.sample, seed=0)
      exact_entropy = dist.entropy()

      self.assertEqual(exact_entropy.get_shape(), mc_entropy.get_shape())

      # Relative tolerance (rtol) chosen 2 times as large as minimim needed to
      # pass.
      self.assertAllClose(exact_entropy.eval(), mc_entropy.eval(), rtol=0.01)

      # Make sure there is some error, proving we used samples
      self.assertLess(0.0001, math_ops.abs(exact_entropy - mc_entropy).eval())
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def test_default_entropy_falls_back_on_sample_if_analytic_not_available(self):
    # Tested by showing we get a good answer that is not exact.
    with self.test_session():
      # NormalNoEntropy is like a Normal, but does not have .entropy method, so
      # we are forced to fall back on sample entropy.
      dist_no_entropy = NormalNoEntropy(loc=1.11, scale=2.22)
      dist_yes_entropy = distributions.Normal(loc=1.11, scale=2.22)

      mc_entropy = entropy.entropy_shannon(
          dist_no_entropy, n=1000, form=entropy.ELBOForms.sample, seed=0)
      exact_entropy = dist_yes_entropy.entropy()

      self.assertEqual(exact_entropy.get_shape(), mc_entropy.get_shape())

      # Relative tolerance (rtol) chosen 2 times as large as minimim needed to
      # pass.
      self.assertAllClose(exact_entropy.eval(), mc_entropy.eval(), rtol=0.01)

      # Make sure there is some error, proving we used samples
      self.assertLess(0.0001, math_ops.abs(exact_entropy - mc_entropy).eval())
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testNoGlobalStep(self):
    optimizers = [
        "SGD", gradient_descent.GradientDescentOptimizer,
        gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
    ]
    for optimizer in optimizers:
      with ops.Graph().as_default() as g, self.test_session(graph=g) as session:
        x = array_ops.placeholder(dtypes.float32, [])
        var = variable_scope.get_variable(
            "test", [], initializer=init_ops.constant_initializer(10))
        loss = math_ops.abs(var * x)
        update_var = variable_scope.get_variable(
            "update", [], initializer=init_ops.constant_initializer(10))
        update_op = state_ops.assign(update_var, 20)
        train = optimizers_lib.optimize_loss(
            loss,
            global_step=None,
            learning_rate=0.1,
            optimizer=optimizer,
            update_ops=[update_op])
        variables.global_variables_initializer().run()
        session.run(train, feed_dict={x: 5})
        self.assertEqual(9.5, var.eval())
        self.assertEqual(20, update_var.eval())
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testNoGlobalStepWithDecay(self):
    optimizers = [
        "SGD", gradient_descent.GradientDescentOptimizer,
        gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
    ]
    for optimizer in optimizers:
      with ops.Graph().as_default() as g, self.test_session(graph=g):
        x = array_ops.placeholder(dtypes.float32, [])
        var = variable_scope.get_variable(
            "test", [], initializer=init_ops.constant_initializer(10))
        loss = math_ops.abs(var * x)
        update_var = variable_scope.get_variable(
            "update", [], initializer=init_ops.constant_initializer(10))
        update_op = state_ops.assign(update_var, 20)
        with self.assertRaisesRegexp(
            ValueError, "global_step is required for learning_rate_decay_fn"):
          optimizers_lib.optimize_loss(
              loss,
              global_step=None,
              learning_rate=0.1,
              learning_rate_decay_fn=_no_op_learning_rate_decay_fn,
              optimizer=optimizer,
              update_ops=[update_op])
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def test_log_abs_det(self):
    self._maybe_skip("log_abs_det")
    for use_placeholder in False, True:
      for shape in self._shapes_to_test:
        for dtype in self._dtypes_to_test:
          if dtype.is_complex:
            self.skipTest(
                "tf.matrix_determinant does not work with complex, so this "
                "test is being skipped.")
          with self.test_session(graph=ops.Graph()) as sess:
            sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
            operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
                shape, dtype, use_placeholder=use_placeholder)
            op_log_abs_det = operator.log_abs_determinant()
            mat_log_abs_det = math_ops.log(
                math_ops.abs(linalg_ops.matrix_determinant(mat)))
            if not use_placeholder:
              self.assertAllEqual(shape[:-2], op_log_abs_det.get_shape())
            op_log_abs_det_v, mat_log_abs_det_v = sess.run(
                [op_log_abs_det, mat_log_abs_det],
                feed_dict=feed_dict)
            self.assertAC(op_log_abs_det_v, mat_log_abs_det_v)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _sqrt_log_det_core(self, diag_chol_c):
    """Finish computation of Sqrt[Log[Det]]."""
    # Complete computation of ._log_det and ._batch_log_det, after the initial
    # Cholesky factor has been taken with the appropriate batch/non-batch method

    # det(M + VDV^T) = det(D^{-1} + V^T M^{-1} V) * det(D) * det(M)
    #                = det(C) * det(D) * det(M)
    # Multiply by 2 here because this is the log-det of the Cholesky factor of C
    log_det_c = 2 * math_ops.reduce_sum(
        math_ops.log(math_ops.abs(diag_chol_c)),
        reduction_indices=[-1])
    # Add together to get Log[det(M + VDV^T)], the Log-det of the updated square
    # root.
    log_det_updated_sqrt = (
        log_det_c + self._diag_operator.log_det() + self._operator.log_det())
    return log_det_updated_sqrt
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def __init__(self,
               df,
               loc,
               scale,
               validate_args=False,
               allow_nan_stats=True,
               name="StudentTWithAbsDfSoftplusScale"):
    parameters = locals()
    parameters.pop("self")
    with ops.name_scope(name, values=[df, scale]) as ns:
      super(StudentTWithAbsDfSoftplusScale, self).__init__(
          df=math_ops.floor(math_ops.abs(df)),
          loc=loc,
          scale=nn.softplus(scale, name="softplus_scale"),
          validate_args=validate_args,
          allow_nan_stats=allow_nan_stats,
          name=ns)
    self._parameters = parameters
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testNumericOps(self):
    for dtype in self.numeric_types:
      self._testUnary(
          math_ops.abs,
          np.array([[2, -1]], dtype=dtype),
          expected=np.array([[2, 1]], dtype=dtype))

      self._testUnary(
          math_ops.negative,
          np.array([[-1, 1]], dtype=dtype),
          expected=np.array([[1, -1]], dtype=dtype))

      self._testUnary(
          math_ops.square,
          np.array([[-2, 3]], dtype=dtype),
          expected=np.array([[4, 9]], dtype=dtype))

      self._testUnary(
          array_ops.zeros_like,
          np.array([[4, 3], [2, 1]], dtype=dtype),
          expected=np.array([[0, 0], [0, 0]], dtype=dtype))
项目:DNGPU    作者:LUMII-Syslab    | 项目源码 | 文件源码
def _apply_sparse(self, grad, var):
        lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
        beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
        beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
        epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
        clip_multiplier_t = math_ops.cast(self.clip_multiplier_t, var.dtype.base_dtype)
        clip_epsilon_t = math_ops.cast(self.clip_epsilon_t, var.dtype.base_dtype)

        v = self.get_slot(var, "v")
        v_slice = array_ops.gather(v, grad.indices)

        #clip gradient so that each value exceeds its previous maximum by no more than clip_multiplier
        clipped_values = grad.values
        if self.clip_gradients:
            clipVal = v_slice * clip_multiplier_t + clip_epsilon_t
            clipped_values = clip_ops.clip_by_value(grad.values, -clipVal, clipVal)

        # m := beta1 * m + (1 - beta1) * g_t
        m = self.get_slot(var, "m")
        m_t_values = beta1_t * array_ops.gather(m, grad.indices) + (1 - beta1_t) * clipped_values
        m_t = state_ops.scatter_update(m, grad.indices, m_t_values, use_locking=self._use_locking)

        # v := max(beta2 * v , abs(grad))
        v_t_values = math_ops.maximum(beta2_t * v_slice, math_ops.abs(clipped_values))
        v_t = state_ops.scatter_update(v, grad.indices, v_t_values, use_locking=self._use_locking)

        # variable -= learning_rate * m_t / (epsilon_t + v_t)
        # we do not use bias-correction term for the first moment; it does not give observable benefit
        var_update = state_ops.scatter_sub(var, grad.indices,
                                           lr_t * m_t_values / (v_t_values + epsilon_t),
                                           use_locking=self._use_locking)
        return control_flow_ops.group(var_update, v_t, m_t)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def abs(x):
      """Element-wise absolute value.

      Arguments:
          x: Tensor or variable.

      Returns:
          A tensor.
      """
      return math_ops.abs(x)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def mean_absolute_error(y_true, y_pred):
      return K.mean(K.abs(y_pred - y_true), axis=-1)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def mean_absolute_percentage_error(y_true, y_pred):
      # Equivalent to MAE, but sometimes easier to interpret.
      diff = K.abs((y_true - y_pred) / K.clip(K.abs(y_true), K.epsilon(), None))
      return 100. * K.mean(diff, axis=-1)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def __call__(self, x):
        regularization = 0.
        if self.l1:
          regularization += K.sum(self.l1 * K.abs(x))
        if self.l2:
          regularization += K.sum(self.l2 * K.square(x))
        return regularization
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def call(self, inputs, mask=None):
            pos = K.relu(inputs)
            if K.backend() == 'theano':
              neg = (K.pattern_broadcast(self.alpha, self.param_broadcast) *
                     (inputs - K.abs(inputs)) * 0.5)
            else:
              neg = -self.alpha * K.relu(-inputs)
            return pos + neg
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def _object_list_uid(object_list):
          object_list = _to_list(object_list)
          return ', '.join([str(abs(id(x))) for x in object_list])
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _ndtr(x):
  """Implements ndtr core logic."""
  half_sqrt_2 = constant_op.constant(
      0.5 * math.sqrt(2.), dtype=x.dtype, name="half_sqrt_2")
  w = x * half_sqrt_2
  z = math_ops.abs(w)
  y = math_ops.select(math_ops.less(z, half_sqrt_2),
                      1. + math_ops.erf(w),
                      math_ops.select(math_ops.greater(w, 0.),
                                      2. - math_ops.erfc(z),
                                      math_ops.erfc(z)))
  return 0.5 * y
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def absolute_difference(predictions, targets, weight=1.0, scope=None):
  """Adds an Absolute Difference loss to the training procedure.

  `weight` acts as a coefficient for the loss. If a scalar is provided, then the
  loss is simply scaled by the given value. If `weight` is a tensor of size
  [batch_size], then the total loss for each sample of the batch is rescaled
  by the corresponding element in the `weight` vector. If the shape of
  `weight` matches the shape of `predictions`, then the loss of each
  measurable element of `predictions` is scaled by the corresponding value of
  `weight`.

  Args:
    predictions: The predicted outputs.
    targets: The ground truth output tensor, same dimensions as 'predictions'.
    weight: Coefficients for the loss a scalar, a tensor of shape
      [batch_size] or a tensor whose shape matches `predictions`.
    scope: The scope for the operations performed in computing the loss.

  Returns:
    A scalar `Tensor` representing the loss value.

  Raises:
    ValueError: If the shape of `predictions` doesn't match that of `targets` or
      if the shape of `weight` is invalid.
  """
  with ops.name_scope(scope, "absolute_difference",
                      [predictions, targets]) as scope:
    predictions.get_shape().assert_is_compatible_with(targets.get_shape())
    if weight is None:
      raise ValueError("`weight` cannot be None")
    predictions = math_ops.to_float(predictions)
    targets = math_ops.to_float(targets)
    losses = math_ops.abs(math_ops.sub(predictions, targets))
    return compute_weighted_loss(losses, weight)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _l1_loss(self):
    """Computes the (un-normalized) l1 loss of the model."""
    with name_scope('sdca/l1_loss'):
      sums = []
      for name in ['sparse_features_weights', 'dense_features_weights']:
        for weights in self._convert_n_to_tensor(self._variables[name]):
          with ops.device(weights.device):
            sums.append(
                math_ops.reduce_sum(
                    math_ops.abs(math_ops.cast(weights, dtypes.float64))))
      sum = math_ops.add_n(sums)
      # SDCA L1 regularization cost is: l1 * sum(|weights|)
      return self._options['symmetric_l1_regularization'] * sum
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _log_prob(self, x):
    return (-math.log(2.) - math_ops.log(self.scale) -
            math_ops.abs(x - self.loc) / self.scale)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _prob(self, x):
    return 0.5 / self.scale * math_ops.exp(
        -math_ops.abs(x - self.loc) / self.scale)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _cdf(self, x):
    y = x - self.loc
    return (0.5 + 0.5 * math_ops.sign(y) *
            (1. - math_ops.exp(-math_ops.abs(y) / self.scale)))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def assert_close(
    x, y, data=None, summarize=None, message=None, name="assert_close"):
  """Assert that that x and y are within machine epsilon of each other.

  Args:
    x: Numeric `Tensor`
    y: Numeric `Tensor`
    data: The tensors to print out if the condition is `False`. Defaults to
      error message and first few entries of `x` and `y`.
    summarize: Print this many entries of each tensor.
    message: A string to prefix to the default message.
    name: A name for this operation (optional).

  Returns:
    Op raising `InvalidArgumentError` if |x - y| > machine epsilon.
  """
  message = message or ""
  x = ops.convert_to_tensor(x, name="x")
  y = ops.convert_to_tensor(y, name="y")

  if x.dtype.is_integer:
    return check_ops.assert_equal(
        x, y, data=data, summarize=summarize, message=message, name=name)

  with ops.name_scope(name, "assert_close", [x, y, data]):
    tol = np.finfo(x.dtype.as_numpy_dtype).resolution
    if data is None:
      data = [
          message,
          "Condition x ~= y did not hold element-wise: x = ", x.name, x, "y = ",
          y.name, y
      ]
    condition = math_ops.reduce_all(math_ops.less_equal(math_ops.abs(x-y), tol))
    return control_flow_ops.Assert(
        condition, data, summarize=summarize)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _ndtr(x):
  """Implements ndtr core logic."""
  half_sqrt_2 = constant_op.constant(
      0.5 * math.sqrt(2.), dtype=x.dtype, name="half_sqrt_2")
  w = x * half_sqrt_2
  z = math_ops.abs(w)
  y = math_ops.select(math_ops.less(z, half_sqrt_2),
                      1. + math_ops.erf(w),
                      math_ops.select(math_ops.greater(w, 0.),
                                      2. - math_ops.erfc(z),
                                      math_ops.erfc(z)))
  return 0.5 * y
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _shard_indices(self, keys):
    key_shape = keys.get_shape()
    if key_shape.ndims > 1:
      # If keys are a matrix (i.e. a single key is a vector), we use the first
      # element of each key vector to determine the shard.
      keys = array_ops.slice(keys, [0, 0], [key_shape[0].value, 1])
      keys = array_ops.reshape(keys, [-1])
    indices = math_ops.mod(math_ops.abs(keys), self._num_shards)
    return math_ops.cast(indices, dtypes.int32)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _l1_loss(self):
    """Computes the (un-normalized) l1 loss of the model."""
    with name_scope('sdca/l1_loss'):
      sums = []
      for name in ['sparse_features_weights', 'dense_features_weights']:
        for weights in self._convert_n_to_tensor(self._variables[name]):
          with ops.device(weights.device):
            sums.append(
                math_ops.reduce_sum(
                    math_ops.abs(math_ops.cast(weights, dtypes.float64))))
      sum = math_ops.add_n(sums)
      # SDCA L1 regularization cost is: l1 * sum(|weights|)
      return self._options['symmetric_l1_regularization'] * sum
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _sample_n(self, n, seed=None):
    shape = array_ops.concat(0, ([n], self.batch_shape()))
    # Sample uniformly-at-random from the open-interval (-1, 1).
    uniform_samples = random_ops.random_uniform(
        shape=shape,
        minval=np.nextafter(self.dtype.as_numpy_dtype(-1.),
                            self.dtype.as_numpy_dtype(0.)),
        maxval=1.,
        dtype=self.dtype,
        seed=seed)
    return (self.loc - self.scale * math_ops.sign(uniform_samples) *
            math_ops.log(1. - math_ops.abs(uniform_samples)))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _prob(self, x):
    return 0.5 / self.scale * math_ops.exp(
        -math_ops.abs(x - self.loc) / self.scale)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _cdf(self, x):
    y = x - self.loc
    return (0.5 + 0.5 * math_ops.sign(y) *
            (1. - math_ops.exp(-math_ops.abs(y) / self.scale)))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def assert_close(
    x, y, data=None, summarize=None, message=None, name="assert_close"):
  """Assert that that x and y are within machine epsilon of each other.

  Args:
    x: Numeric `Tensor`
    y: Numeric `Tensor`
    data: The tensors to print out if the condition is `False`. Defaults to
      error message and first few entries of `x` and `y`.
    summarize: Print this many entries of each tensor.
    message: A string to prefix to the default message.
    name: A name for this operation (optional).

  Returns:
    Op raising `InvalidArgumentError` if |x - y| > machine epsilon.
  """
  message = message or ""
  x = ops.convert_to_tensor(x, name="x")
  y = ops.convert_to_tensor(y, name="y")

  if data is None:
    data = [
        message,
        "Condition x ~= y did not hold element-wise: x = ", x.name, x, "y = ",
        y.name, y
    ]

  if x.dtype.is_integer:
    return check_ops.assert_equal(
        x, y, data=data, summarize=summarize, message=message, name=name)

  with ops.name_scope(name, "assert_close", [x, y, data]):
    tol = np.finfo(x.dtype.as_numpy_dtype).eps
    condition = math_ops.reduce_all(math_ops.less_equal(math_ops.abs(x-y), tol))
    return control_flow_ops.Assert(
        condition, data, summarize=summarize)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _inverse_log_det_jacobian(self, y):  # pylint: disable=unused-argument
    abs_diag = math_ops.abs(array_ops.matrix_diag_part(self.scale))
    return -math_ops.reduce_sum(math_ops.log(abs_diag), reduction_indices=[-1])
项目:GORU-tensorflow    作者:jingli9111    | 项目源码 | 文件源码
def modrelu(z, b, comp):
    if comp:
        z_norm = math_ops.sqrt(math_ops.square(math_ops.real(z)) + math_ops.square(math_ops.imag(z))) + 0.00001
        step1 = nn_ops.bias_add(z_norm, b)
        step2 = math_ops.complex(nn_ops.relu(step1), array_ops.zeros_like(z_norm))
        step3 = z/math_ops.complex(z_norm, array_ops.zeros_like(z_norm))
    else:
        z_norm = math_ops.abs(z) + 0.00001
        step1 = nn_ops.bias_add(z_norm, b)
        step2 = nn_ops.relu(step1)
        step3 = math_ops.sign(z)

    return math_ops.multiply(step3, step2)
项目:polyaxon    作者:polyaxon    | 项目源码 | 文件源码
def absolute_difference(weights=1.0, name='AbsoluteDifference', scope=None, collect=True):
    """Adds an Absolute Difference loss to the training procedure.

    `weights` acts as a coefficient for the loss. If a scalar is provided, then
    the loss is simply scaled by the given value. If `weights` is a `Tensor` of
    shape `[batch_size]`, then the total loss for each sample of the batch is
    rescaled by the corresponding element in the `weights` vector. If the shape of
    `weights` matches the shape of `predictions`, then the loss of each
    measurable element of `predictions` is scaled by the corresponding value of
    `weights`.

    Args:
        weights: Optional `Tensor` whose rank is either 0, or the same rank as
            `labels`, and must be broadcastable to `labels` (i.e., all dimensions must
            be either `1`, or the same as the corresponding `losses` dimension).
        name: operation name.
        scope: operation scope.
        collect: whether to collect this metric under the metric collection.

    Returns:
        A scalar `Tensor` representing the loss value.
    """
    def inner_loss(y_true, y_pred):
        losses = math_ops.abs(math_ops.subtract(y_pred, y_true))
        return losses

    return built_loss(inner_loss, weights, name, scope, collect)
项目:polyaxon    作者:polyaxon    | 项目源码 | 文件源码
def huber_loss(weights=1.0, clip=0.0, name='HuberLoss', scope=None, collect=True):
    """Computes Huber Loss for DQN.

    [Wikipedia link](https://en.wikipedia.org/wiki/Huber_loss)
    [DeepMind link](https://sites.google.com/a/deepmind.com/dqn/)

    Args:
        weights: Coefficients for the loss a `scalar`.
        scope: scope to add the op to.
        name: name of the op.
        collect: add to losses collection.

    Returns:
        A scalar `Tensor` representing the loss value.

    Raises:
        ValueError: If `predictions` shape doesn't match `labels` shape, or `weights` is `None`.
    """

    def inner_loss(y_true, y_pred):
        delta = math_ops.abs(math_ops.subtract(y_pred, y_true))
        losses = math_ops.square(delta)
        if clip > 0.0:
            losses = tf.where(delta < clip, 0.5 * losses, delta - 0.5)

        return losses
    return built_loss(inner_loss, weights, name, scope, collect)
项目:Optimization    作者:tdozat    | 项目源码 | 文件源码
def _prepare(self, grads_and_vars):
    """"""

    if self._lr is None:
      sTy = 0
      sTs = 0
      yTy = 0
      for g_t, x_tm1 in grads_and_vars:
        if g_t is None:
          continue
        with ops.name_scope('update_' + x_tm1.op.name), ops.device(x_tm1.device):
          if isinstance(g_t, ops.Tensor):
            g_tm1 = self.get_slot(x_tm1, 'g')
            s_tm1 = self.get_slot(x_tm1, 's')
            y_t = (g_t-g_tm1)
            sTy += math_ops.reduce_sum(s_tm1*y_t)
            sTs += math_ops.reduce_sum(s_tm1**2)
            yTy += math_ops.reduce_sum(y_t**2)
          else:
            idxs, idxs_ = array_ops.unique(g_t.indices)
            g_t_ = math_ops.unsorted_segment_sum(g_t.values, idxs_, array_ops.size(idxs))
            g_tm1 = self.get_slot(x_tm1, 'g')
            g_tm1_ = array_ops.gather(g_tm1, idxs)
            s_tm1 = self.get_slot(x_tm1, 's')
            s_tm1_ = array_ops.gather(s_tm1, idxs)
            y_t_ = (g_t_-g_tm1_)
            sTy += math_ops.reduce_sum(s_tm1_*y_t_)
            sTs += math_ops.reduce_sum(s_tm1_**2)
            yTy += math_ops.reduce_sum(y_t_**2)
      sTy = math_ops.abs(sTy)
      self._lr = sTs / (sTy + self._eps)

  #=============================================================
项目:Optimization    作者:tdozat    | 项目源码 | 文件源码
def _prepare(self, grads_and_vars):
    """"""

    if self._lr is None:
      sTy = 0
      sTs = 0
      yTy = 0
      for g_t, x_tm1 in grads_and_vars:
        if g_t is None:
          continue
        with ops.name_scope('update_' + x_tm1.op.name), ops.device(x_tm1.device):
          if isinstance(g_t, ops.Tensor):
            g_tm1 = self.get_slot(x_tm1, 'g')
            s_tm1 = self.get_slot(x_tm1, 's')
            y_t = (g_t-g_tm1)
            sTy += math_ops.reduce_sum(s_tm1*y_t)
            sTs += math_ops.reduce_sum(s_tm1**2)
            yTy += math_ops.reduce_sum(y_t**2)
          else:
            idxs, idxs_ = array_ops.unique(g_t.indices)
            g_t_ = math_ops.unsorted_segment_sum(g_t.values, idxs_, array_ops.size(idxs))
            g_tm1 = self.get_slot(x_tm1, 'g')
            g_tm1_ = array_ops.gather(g_tm1, idxs)
            s_tm1 = self.get_slot(x_tm1, 's')
            s_tm1_ = array_ops.gather(s_tm1, idxs)
            y_t_ = (g_t_-g_tm1_)
            sTy += math_ops.reduce_sum(s_tm1_*y_t_)
            sTs += math_ops.reduce_sum(s_tm1_**2)
            yTy += math_ops.reduce_sum(y_t_**2)
      sTy = math_ops.abs(sTy)
      self._lr = sTs / (sTy + self._eps)

  #=============================================================
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _logexpm1(x):
  """Stably calculate log(exp(x)-1)."""
  with ops.name_scope("logsumexp1"):
    eps = np.finfo(x.dtype.as_numpy_dtype).eps
    # Choose a small offset that makes gradient calculations stable for
    # float16, float32, and float64.
    safe_log = lambda y: math_ops.log(y + eps / 1e8)  # For gradient stability
    return array_ops.where(
        math_ops.abs(x) < eps,
        safe_log(x) + x/2 + x*x/24,  # small x approximation to log(expm1(x))
        safe_log(math_ops.exp(x) - 1))
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def absolute_difference(predictions, labels=None, weights=1.0, scope=None):
  """Adds an Absolute Difference loss to the training procedure.

  `weights` acts as a coefficient for the loss. If a scalar is provided, then
  the loss is simply scaled by the given value. If `weights` is a tensor of size
  [batch_size], then the total loss for each sample of the batch is rescaled
  by the corresponding element in the `weights` vector. If the shape of
  `weights` matches the shape of `predictions`, then the loss of each
  measurable element of `predictions` is scaled by the corresponding value of
  `weights`.

  Args:
    predictions: The predicted outputs.
    labels: The ground truth output tensor, same dimensions as 'predictions'.
    weights: Coefficients for the loss a scalar, a tensor of shape
      [batch_size] or a tensor whose shape matches `predictions`.
    scope: The scope for the operations performed in computing the loss.

  Returns:
    A scalar `Tensor` representing the loss value.

  Raises:
    ValueError: If the shape of `predictions` doesn't match that of `labels` or
      if the shape of `weights` is invalid.
  """
  with ops.name_scope(scope, "absolute_difference",
                      [predictions, labels, weights]) as scope:
    predictions.get_shape().assert_is_compatible_with(labels.get_shape())
    predictions = math_ops.to_float(predictions)
    labels = math_ops.to_float(labels)
    losses = math_ops.abs(math_ops.subtract(predictions, labels))
    return compute_weighted_loss(losses, weights, scope=scope)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _l1_loss(self):
    """Computes the (un-normalized) l1 loss of the model."""
    with name_scope('sdca/l1_loss'):
      sums = []
      for name in ['sparse_features_weights', 'dense_features_weights']:
        for weights in self._convert_n_to_tensor(self._variables[name]):
          with ops.device(weights.device):
            sums.append(
                math_ops.reduce_sum(
                    math_ops.abs(math_ops.cast(weights, dtypes.float64))))
      sum = math_ops.add_n(sums)
      # SDCA L1 regularization cost is: l1 * sum(|weights|)
      return self._options['symmetric_l1_regularization'] * sum
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testInvalidGlobalStep(self):
    with ops.Graph().as_default() as g, self.test_session(graph=g):
      x = array_ops.placeholder(dtypes.float32, [])
      var = variable_scope.get_variable(
          "test", [], initializer=init_ops.constant_initializer(10))
      loss = math_ops.abs(var * x)
      with self.assertRaises(AttributeError):
        optimizers_lib.optimize_loss(
            loss,
            global_step=constant_op.constant(
                43, dtype=dtypes.int64),
            learning_rate=0.1,
            optimizer="SGD")
      with self.assertRaises(TypeError):
        optimizers_lib.optimize_loss(
            loss,
            global_step=variable_scope.get_variable(
                "global_step", [],
                trainable=False,
                dtype=dtypes.float64,
                initializer=init_ops.constant_initializer(
                    0.0, dtype=dtypes.float64)),
            learning_rate=0.1,
            optimizer="SGD")
      with self.assertRaises(ValueError):
        optimizers_lib.optimize_loss(
            loss,
            global_step=variable_scope.get_variable(
                "global_step", [1],
                trainable=False,
                dtype=dtypes.int64,
                initializer=init_ops.constant_initializer(
                    [0], dtype=dtypes.int64)),
            learning_rate=0.1,
            optimizer="SGD")
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def setUp(self):
    super(CoreUnaryOpsTest, self).setUp()

    self.ops = [
        ('abs', operator.abs, math_ops.abs, core.abs_function),
        ('neg', operator.neg, math_ops.negative, core.neg),
        # TODO(shoyer): add unary + to core TensorFlow
        ('pos', None, None, None),
        ('sign', None, math_ops.sign, core.sign),
        ('reciprocal', None, math_ops.reciprocal, core.reciprocal),
        ('square', None, math_ops.square, core.square),
        ('round', None, math_ops.round, core.round_function),
        ('sqrt', None, math_ops.sqrt, core.sqrt),
        ('rsqrt', None, math_ops.rsqrt, core.rsqrt),
        ('log', None, math_ops.log, core.log),
        ('exp', None, math_ops.exp, core.exp),
        ('log', None, math_ops.log, core.log),
        ('ceil', None, math_ops.ceil, core.ceil),
        ('floor', None, math_ops.floor, core.floor),
        ('cos', None, math_ops.cos, core.cos),
        ('sin', None, math_ops.sin, core.sin),
        ('tan', None, math_ops.tan, core.tan),
        ('acos', None, math_ops.acos, core.acos),
        ('asin', None, math_ops.asin, core.asin),
        ('atan', None, math_ops.atan, core.atan),
        ('lgamma', None, math_ops.lgamma, core.lgamma),
        ('digamma', None, math_ops.digamma, core.digamma),
        ('erf', None, math_ops.erf, core.erf),
        ('erfc', None, math_ops.erfc, core.erfc),
        ('lgamma', None, math_ops.lgamma, core.lgamma),
    ]
    total_size = np.prod([v.size for v in self.original_lt.axes.values()])
    self.test_lt = core.LabeledTensor(
        math_ops.cast(self.original_lt, dtypes.float32) / total_size,
        self.original_lt.axes)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _log_abs_determinant(self):
    if self._is_spd:
      diag = array_ops.matrix_diag_part(self._chol)
      return 2 * math_ops.reduce_sum(math_ops.log(diag), reduction_indices=[-1])
    abs_det = math_ops.abs(self.determinant())
    return math_ops.log(abs_det)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _assert_non_singular(self):
    return check_ops.assert_positive(
        math_ops.abs(self.multiplier),
        message="LinearOperator was singular")
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _sample_n(self, n, seed=None):
    shape = array_ops.concat(([n], self.batch_shape()), 0)
    # Sample uniformly-at-random from the open-interval (-1, 1).
    uniform_samples = random_ops.random_uniform(
        shape=shape,
        minval=np.nextafter(self.dtype.as_numpy_dtype(-1.),
                            self.dtype.as_numpy_dtype(0.)),
        maxval=1.,
        dtype=self.dtype,
        seed=seed)
    return (self.loc - self.scale * math_ops.sign(uniform_samples) *
            math_ops.log(1. - math_ops.abs(uniform_samples)))
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _cdf(self, x):
    z = self._z(x)
    return (0.5 + 0.5 * math_ops.sign(z) *
            (1. - math_ops.exp(-math_ops.abs(z))))
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _batch_log_det(self):
    rank = array_ops.size(self._shape_arg)
    last_dim = math_ops.cast(
        array_ops.gather(self._shape_arg, rank - 1), dtype=self.dtype)
    log_det = (last_dim * math_ops.log(math_ops.abs(self._scale)) *
               array_ops.ones(self.batch_shape(), dtype=self.dtype))
    log_det.set_shape(self.get_batch_shape())
    return log_det
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _batch_sqrt_log_abs_det(self):
    rank = array_ops.size(self._shape_arg)
    last_dim = math_ops.cast(
        array_ops.gather(self._shape_arg, rank - 1), dtype=self.dtype)
    sqrt_log_abs_det = 0.5 * last_dim * math_ops.log(
        math_ops.abs(self._scale)) * array_ops.ones(
            self.batch_shape(), dtype=self.dtype)
    sqrt_log_abs_det.set_shape(self.get_batch_shape())
    return sqrt_log_abs_det