Python tensorflow.python.ops.math_ops 模块,pow() 实例源码

我们从Python开源项目中,提取了以下12个代码示例,用于说明如何使用tensorflow.python.ops.math_ops.pow()

项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _sample_n(self, n, seed=None):
    # We use 2 uniform random floats to generate polar random variates.
    # http://dl.acm.org/citation.cfm?id=179631
    # Theorem 2. Let G, H be iid variates, uniformly distributed on [0,1].
    # Let theta = 2*pi*H, let R = sqrt(df*(G^(-2/df) - 1)) for df > 0.
    # Let X = R*cos(theta), and let Y = R*sin(theta).
    # Then X ~ t_df and Y ~ t_df.
    # The variates X and Y are not independent.
    shape = array_ops.concat(0, ([2, n], self.batch_shape()))
    uniform = random_ops.random_uniform(shape=shape,
                                        dtype=self.dtype,
                                        seed=seed)
    samples_g, samples_h = array_ops.unpack(uniform, num=2)
    theta = (2. * math.pi) * samples_h
    r = math_ops.sqrt(self.df *
                      (math_ops.pow(samples_g, -2 / self.df) - 1))
    samples = r * math_ops.cos(theta)
    return samples * self.sigma + self.mu
项目:Clairvoyante    作者:aquaskyline    | 项目源码 | 文件源码
def dropout_selu(x, rate, alpha= -1.7580993408473766, fixedPointMean=0.0, fixedPointVar=1.0,
                 noise_shape=None, seed=None, name=None, training=False):
    """Dropout to a value with rescaling."""

    def dropout_selu_impl(x, rate, alpha, noise_shape, seed, name):
        keep_prob = 1.0 - rate
        x = ops.convert_to_tensor(x, name="x")
        if isinstance(keep_prob, numbers.Real) and not 0 < keep_prob <= 1:
            raise ValueError("keep_prob must be a scalar tensor or a float in the "
                                             "range (0, 1], got %g" % keep_prob)
        keep_prob = ops.convert_to_tensor(keep_prob, dtype=x.dtype, name="keep_prob")
        keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar())

        alpha = ops.convert_to_tensor(alpha, dtype=x.dtype, name="alpha")
        alpha.get_shape().assert_is_compatible_with(tensor_shape.scalar())

        if tensor_util.constant_value(keep_prob) == 1:
            return x

        noise_shape = noise_shape if noise_shape is not None else array_ops.shape(x)
        random_tensor = keep_prob
        random_tensor += random_ops.random_uniform(noise_shape, seed=seed, dtype=x.dtype)
        binary_tensor = math_ops.floor(random_tensor)
        ret = x * binary_tensor + alpha * (1-binary_tensor)

        a = math_ops.sqrt(fixedPointVar / (keep_prob *((1-keep_prob) * math_ops.pow(alpha-fixedPointMean,2) + fixedPointVar)))

        b = fixedPointMean - a * (keep_prob * fixedPointMean + (1 - keep_prob) * alpha)
        ret = a * ret + b
        ret.set_shape(x.get_shape())
        return ret

    with ops.name_scope(name, "dropout", [x]) as name:
        return utils.smart_cond(training,
            lambda: dropout_selu_impl(x, rate, alpha, noise_shape, seed, name),
            lambda: array_ops.identity(x))
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def pow(x, a):
      """Element-wise exponentiation.

      Arguments:
          x: Tensor or variable.
          a: Python integer.

      Returns:
          A tensor.
      """
      return math_ops.pow(x, a)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def get_updates(self, params, constraints, loss):
        grads = self.get_gradients(loss, params)
        self.updates = [K.update_add(self.iterations, 1)]

        lr = self.lr
        if self.initial_decay > 0:
          lr *= (1. / (1. + self.decay * self.iterations))

        t = self.iterations + 1
        lr_t = lr * (K.sqrt(1. - K.pow(self.beta_2, t)) /
                     (1. - K.pow(self.beta_1, t)))

        shapes = [K.int_shape(p) for p in params]
        ms = [K.zeros(shape) for shape in shapes]
        vs = [K.zeros(shape) for shape in shapes]
        self.weights = [self.iterations] + ms + vs

        for p, g, m, v in zip(params, grads, ms, vs):
          m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
          v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)
          p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)

          self.updates.append(K.update(m, m_t))
          self.updates.append(K.update(v, v_t))

          new_p = p_t
          # apply constraints
          if p in constraints:
            c = constraints[p]
            new_p = c(new_p)
          self.updates.append(K.update(p, new_p))
        return self.updates
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def get_updates(self, params, constraints, loss):
        grads = self.get_gradients(loss, params)
        self.updates = [K.update_add(self.iterations, 1)]

        lr = self.lr
        if self.initial_decay > 0:
          lr *= (1. / (1. + self.decay * self.iterations))

        t = self.iterations + 1
        lr_t = lr / (1. - K.pow(self.beta_1, t))

        shapes = [K.int_shape(p) for p in params]
        # zero init of 1st moment
        ms = [K.zeros(shape) for shape in shapes]
        # zero init of exponentially weighted infinity norm
        us = [K.zeros(shape) for shape in shapes]
        self.weights = [self.iterations] + ms + us

        for p, g, m, u in zip(params, grads, ms, us):

          m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
          u_t = K.maximum(self.beta_2 * u, K.abs(g))
          p_t = p - lr_t * m_t / (u_t + self.epsilon)

          self.updates.append(K.update(m, m_t))
          self.updates.append(K.update(u, u_t))

          new_p = p_t
          # apply constraints
          if p in constraints:
            c = constraints[p]
            new_p = c(new_p)
          self.updates.append(K.update(p, new_p))
        return self.updates
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _prob(self, x):
    y = (x - self.mu) / self.sigma
    half_df = 0.5 * self.df
    return (math_ops.exp(math_ops.lgamma(0.5 + half_df) -
                         math_ops.lgamma(half_df)) /
            (math_ops.sqrt(self.df) * math.sqrt(math.pi) * self.sigma) *
            math_ops.pow(1. + math_ops.square(y) / self.df, -(0.5 + half_df)))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _prob(self, x):
    y = (x - self.mu) / self.sigma
    half_df = 0.5 * self.df
    return (math_ops.exp(math_ops.lgamma(0.5 + half_df) -
                         math_ops.lgamma(half_df)) /
            (math_ops.sqrt(self.df) * math.sqrt(math.pi) * self.sigma) *
            math_ops.pow(1. + math_ops.square(y) / self.df, -(0.5 + half_df)))
项目:paints_tf    作者:latte0    | 项目源码 | 文件源码
def dropout_selu(x, rate, alpha= -1.7580993408473766, fixedPointMean=0.0, fixedPointVar=1.0,
                 noise_shape=None, seed=None, name=None, training=False):
    """Dropout to a value with rescaling."""

    def dropout_selu_impl(x, rate, alpha, noise_shape, seed, name):
        keep_prob = 1.0 - rate
        x = ops.convert_to_tensor(x, name="x")
        if isinstance(keep_prob, numbers.Real) and not 0 < keep_prob <= 1:
            raise ValueError("keep_prob must be a scalar tensor or a float in the "
                                             "range (0, 1], got %g" % keep_prob)
        keep_prob = ops.convert_to_tensor(keep_prob, dtype=x.dtype, name="keep_prob")
        keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar())

        alpha = ops.convert_to_tensor(alpha, dtype=x.dtype, name="alpha")
        alpha.get_shape().assert_is_compatible_with(tensor_shape.scalar())

        if tensor_util.constant_value(keep_prob) == 1:
            return x

        noise_shape = noise_shape if noise_shape is not None else array_ops.shape(x)
        random_tensor = keep_prob
        random_tensor += random_ops.random_uniform(noise_shape, seed=seed, dtype=x.dtype)
        binary_tensor = math_ops.floor(random_tensor)
        ret = x * binary_tensor + alpha * (1-binary_tensor)

        a = math_ops.sqrt(fixedPointVar / (keep_prob *((1-keep_prob) * math_ops.pow(alpha-fixedPointMean,2) + fixedPointVar)))

        b = fixedPointMean - a * (keep_prob * fixedPointMean + (1 - keep_prob) * alpha)
        ret = a * ret + b
        ret.set_shape(x.get_shape())
        return ret

    with ops.name_scope(name, "dropout", [x]) as name:
        return utils.smart_cond(training,
            lambda: dropout_selu_impl(x, rate, alpha, noise_shape, seed, name),
            lambda: array_ops.identity(x))
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def setUp(self):
    super(CoreBinaryOpsTest, self).setUp()

    self.x_probs_broadcast_tensor = array_ops.reshape(
        self.x_probs_lt.tensor, [self.x_size, 1, self.probs_size])

    self.channel_probs_broadcast_tensor = array_ops.reshape(
        self.channel_probs_lt.tensor, [1, self.channel_size, self.probs_size])

    # == and != are not element-wise for tf.Tensor, so they shouldn't be
    # elementwise for LabeledTensor, either.
    self.ops = [
        ('add', operator.add, math_ops.add, core.add),
        ('sub', operator.sub, math_ops.subtract, core.sub),
        ('mul', operator.mul, math_ops.multiply, core.mul),
        ('div', operator.truediv, math_ops.div, core.div),
        ('mod', operator.mod, math_ops.mod, core.mod),
        ('pow', operator.pow, math_ops.pow, core.pow_function),
        ('equal', None, math_ops.equal, core.equal),
        ('less', operator.lt, math_ops.less, core.less),
        ('less_equal', operator.le, math_ops.less_equal, core.less_equal),
        ('not_equal', None, math_ops.not_equal, core.not_equal),
        ('greater', operator.gt, math_ops.greater, core.greater),
        ('greater_equal', operator.ge, math_ops.greater_equal,
         core.greater_equal),
    ]
    self.test_lt_1 = self.x_probs_lt
    self.test_lt_2 = self.channel_probs_lt
    self.test_lt_1_broadcast = self.x_probs_broadcast_tensor
    self.test_lt_2_broadcast = self.channel_probs_broadcast_tensor
    self.broadcast_axes = [self.a0, self.a1, self.a3]
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def get_updates(self, params, constraints, loss):
        grads = self.get_gradients(loss, params)
        self.updates = [K.update_add(self.iterations, 1)]

        t = self.iterations + 1

        # Due to the recommendations in [2], i.e. warming momentum schedule
        momentum_cache_t = self.beta_1 * (1. - 0.5 *
                                          (K.pow(0.96, t * self.schedule_decay)))
        momentum_cache_t_1 = self.beta_1 * (1. - 0.5 *
                                            (K.pow(0.96,
                                                   (t + 1) * self.schedule_decay)))
        m_schedule_new = self.m_schedule * momentum_cache_t
        m_schedule_next = self.m_schedule * momentum_cache_t * momentum_cache_t_1
        self.updates.append((self.m_schedule, m_schedule_new))

        shapes = [K.int_shape(p) for p in params]
        ms = [K.zeros(shape) for shape in shapes]
        vs = [K.zeros(shape) for shape in shapes]

        self.weights = [self.iterations] + ms + vs

        for p, g, m, v in zip(params, grads, ms, vs):
          # the following equations given in [1]
          g_prime = g / (1. - m_schedule_new)
          m_t = self.beta_1 * m + (1. - self.beta_1) * g
          m_t_prime = m_t / (1. - m_schedule_next)
          v_t = self.beta_2 * v + (1. - self.beta_2) * K.square(g)
          v_t_prime = v_t / (1. - K.pow(self.beta_2, t))
          m_t_bar = (
              1. - momentum_cache_t) * g_prime + momentum_cache_t_1 * m_t_prime

          self.updates.append(K.update(m, m_t))
          self.updates.append(K.update(v, v_t))

          p_t = p - self.lr * m_t_bar / (K.sqrt(v_t_prime) + self.epsilon)
          new_p = p_t

          # apply constraints
          if p in constraints:
            c = constraints[p]
            new_p = c(new_p)
          self.updates.append(K.update(p, new_p))
        return self.updates
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def dropout_selu(x, is_training, drop_p=0.2, alpha=-1.7580993408473766, fixedPointMean=0.0, fixedPointVar=1.0,
                 noise_shape=None, seed=None, name='dropout_selu', outputs_collections=None, **unused):
    """
    Dropout layer for self normalizing networks
    Args:
        x: a `Tensor`.
        is_training: a bool, training or validation
        drop_p: probability of droping unit
        fixedPointsMean: float, the mean used to calculate the selu parameters
        fixedPointsVar: float, the Variance used to calculate the selu parameters
        alpha: float, product of the two selu parameters
        name: a optional scope/name of the layer
        outputs_collections: The collections to which the outputs are added.

    Returns:
        A `Tensor` representing the results of the dropout operation.
    """
    _check_unused(unused, name)

    def dropout_selu_impl(x, drop_p, alpha, noise_shape, seed, name):
        keep_prob = 1.0 - drop_p
        if isinstance(keep_prob, numbers.Real) and not 0 < keep_prob <= 1:
            raise ValueError("keep_prob must be a scalar tensor or a float in the "
                             "range (0, 1], got %g" % keep_prob)
        keep_prob = tf.convert_to_tensor(
            keep_prob, dtype=x.dtype, name="keep_prob")
        keep_prob.get_shape().assert_has_rank(0)

        alpha = tf.convert_to_tensor(alpha, dtype=x.dtype, name="alpha")
        alpha.get_shape().assert_has_rank(0)

        if tf.contrib.util.constant_value(keep_prob) == 1:
            return x

        noise_shape = noise_shape if noise_shape is not None else x.get_shape().as_list()
        random_tensor = keep_prob
        random_tensor += tf.random_uniform(noise_shape,
                                           seed=seed, dtype=x.dtype)
        binary_tensor = tf.floor(random_tensor)
        ret = x * binary_tensor + alpha * (1 - binary_tensor)

        a = tf.sqrt(fixedPointVar / (keep_prob * ((1 - keep_prob) *
                                                  math_ops.pow(alpha - fixedPointMean, 2) + fixedPointVar)))

        b = fixedPointMean - a * \
            (keep_prob * fixedPointMean + (1 - keep_prob) * alpha)
        ret = a * ret + b
        ret.set_shape(x.get_shape())
        return ret

    with tf.name_scope(name, [x]):
        if is_training:
            output = dropout_selu_impl(
                x, drop_p, alpha, noise_shape, seed, name)
        else:
            output = x
        return _collect_named_outputs(outputs_collections, name, output)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def get_mean_baseline(ema_decay=0.99, name=None):
  """ExponentialMovingAverage baseline.

  EMA initializes to 0, which introduces a bias. This baseline implements the
  bias correction term from Adam (section 3 of
  https://arxiv.org/pdf/1412.6980v8.pdf), dividing by `1 - ema_decay^t`, where
  `t` is the step count.

  Args:
    ema_decay: decay rate for the ExponentialMovingAverage.
    name: name for variable scope of the ExponentialMovingAverage.

  Returns:
    Callable baseline function that takes the `DistributionTensor` (unused) and
    the downstream `loss`, and returns an EMA of the loss.
  """

  def mean_baseline(_, loss):
    with vs.variable_scope(name, default_name="MeanBaseline"):
      reduced_loss = math_ops.reduce_mean(loss)

      ema = training.ExponentialMovingAverage(decay=ema_decay)
      update_op = ema.apply([reduced_loss])

      # The bias correction term requires keeping track of how many times the
      # EMA has been updated. Creating a variable here to do so. The global step
      # is not used because it may or may not track exactly the number of times
      # the EMA is updated.
      ema_var = ema.average(reduced_loss)
      assert ema_var is not None
      with ops.colocate_with(ema_var):
        num_updates = vs.get_variable(
            "local_ema_step", initializer=0, trainable=False)
      num_updates = num_updates.assign_add(1)
      bias_correction = 1. - math_ops.pow(ema_decay, math_ops.cast(
          num_updates, reduced_loss.dtype))

      with ops.control_dependencies([update_op]):
        baseline = ema.average(reduced_loss) / bias_correction

      return baseline

  return mean_baseline