Python tensorflow.python.ops.math_ops 模块,floor() 实例源码

我们从Python开源项目中,提取了以下29个代码示例,用于说明如何使用tensorflow.python.ops.math_ops.floor()

项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __init__(self,
               df,
               mu,
               sigma,
               validate_args=False,
               allow_nan_stats=True,
               name="StudentTWithAbsDfSoftplusSigma"):
    parameters = locals()
    parameters.pop("self")
    with ops.name_scope(name, values=[df, sigma]) as ns:
      super(StudentTWithAbsDfSoftplusSigma, self).__init__(
          df=math_ops.floor(math_ops.abs(df)),
          mu=mu,
          sigma=nn.softplus(sigma),
          validate_args=validate_args,
          allow_nan_stats=allow_nan_stats,
          name=ns)
    self._parameters = parameters
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def __init__(self,
               df,
               loc,
               scale,
               validate_args=False,
               allow_nan_stats=True,
               name="StudentTWithAbsDfSoftplusScale"):
    parameters = locals()
    parameters.pop("self")
    with ops.name_scope(name, values=[df, scale]) as ns:
      super(StudentTWithAbsDfSoftplusScale, self).__init__(
          df=math_ops.floor(math_ops.abs(df)),
          loc=loc,
          scale=nn.softplus(scale, name="softplus_scale"),
          validate_args=validate_args,
          allow_nan_stats=allow_nan_stats,
          name=ns)
    self._parameters = parameters
项目:Clairvoyante    作者:aquaskyline    | 项目源码 | 文件源码
def dropout_selu(x, rate, alpha= -1.7580993408473766, fixedPointMean=0.0, fixedPointVar=1.0,
                 noise_shape=None, seed=None, name=None, training=False):
    """Dropout to a value with rescaling."""

    def dropout_selu_impl(x, rate, alpha, noise_shape, seed, name):
        keep_prob = 1.0 - rate
        x = ops.convert_to_tensor(x, name="x")
        if isinstance(keep_prob, numbers.Real) and not 0 < keep_prob <= 1:
            raise ValueError("keep_prob must be a scalar tensor or a float in the "
                                             "range (0, 1], got %g" % keep_prob)
        keep_prob = ops.convert_to_tensor(keep_prob, dtype=x.dtype, name="keep_prob")
        keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar())

        alpha = ops.convert_to_tensor(alpha, dtype=x.dtype, name="alpha")
        alpha.get_shape().assert_is_compatible_with(tensor_shape.scalar())

        if tensor_util.constant_value(keep_prob) == 1:
            return x

        noise_shape = noise_shape if noise_shape is not None else array_ops.shape(x)
        random_tensor = keep_prob
        random_tensor += random_ops.random_uniform(noise_shape, seed=seed, dtype=x.dtype)
        binary_tensor = math_ops.floor(random_tensor)
        ret = x * binary_tensor + alpha * (1-binary_tensor)

        a = math_ops.sqrt(fixedPointVar / (keep_prob *((1-keep_prob) * math_ops.pow(alpha-fixedPointMean,2) + fixedPointVar)))

        b = fixedPointMean - a * (keep_prob * fixedPointMean + (1 - keep_prob) * alpha)
        ret = a * ret + b
        ret.set_shape(x.get_shape())
        return ret

    with ops.name_scope(name, "dropout", [x]) as name:
        return utils.smart_cond(training,
            lambda: dropout_selu_impl(x, rate, alpha, noise_shape, seed, name),
            lambda: array_ops.identity(x))
项目:dnn-quant    作者:euclidjda    | 项目源码 | 文件源码
def _input_dropout(self,inputs):
    # This implementation of dropout dropouts an entire feature along the time dim
    random_tensor = self._keep_prob
    random_tensor += random_ops.random_uniform([self._batch_size,self._num_inputs],
                                               dtype=inputs.dtype)
    random_tensor = tf.tile(random_tensor,[1,self._num_unrollings])
    binary_tensor = math_ops.floor(random_tensor)

    ret = math_ops.div(inputs, self._keep_prob) * binary_tensor
    ret.set_shape(inputs.get_shape())
    return ret
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __init__(self, df, validate_args=False, allow_nan_stats=True,
               name="Chi2WithAbsDf"):
    with ops.name_scope(name, values=[df]) as ns:
      super(Chi2WithAbsDf, self).__init__(
          df=math_ops.floor(math_ops.abs(df)),
          validate_args=validate_args,
          allow_nan_stats=allow_nan_stats,
          name=ns)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _log_cdf(self, y):
    lower_cutoff = self._lower_cutoff
    upper_cutoff = self._upper_cutoff

    # Recall the promise:
    # cdf(y) := P[Y <= y]
    #         = 1, if y >= upper_cutoff,
    #         = 0, if y < lower_cutoff,
    #         = P[X <= y], otherwise.

    # P[Y <= j] = P[floor(Y) <= j] since mass is only at integers, not in
    # between.
    j = math_ops.floor(y)

    result_so_far = self.base_distribution.log_cdf(j)

    # Broadcast, because it's possible that this is a single distribution being
    # evaluated on a number of samples, or something like that.
    j += array_ops.zeros_like(result_so_far)

    # Re-define values at the cutoffs.
    if lower_cutoff is not None:
      neg_inf = -np.inf * array_ops.ones_like(result_so_far)
      result_so_far = math_ops.select(j < lower_cutoff, neg_inf, result_so_far)
    if upper_cutoff is not None:
      result_so_far = math_ops.select(j >= upper_cutoff,
                                      array_ops.zeros_like(result_so_far),
                                      result_so_far)

    return result_so_far
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _cdf(self, y):
    lower_cutoff = self._lower_cutoff
    upper_cutoff = self._upper_cutoff

    # Recall the promise:
    # cdf(y) := P[Y <= y]
    #         = 1, if y >= upper_cutoff,
    #         = 0, if y < lower_cutoff,
    #         = P[X <= y], otherwise.

    # P[Y <= j] = P[floor(Y) <= j] since mass is only at integers, not in
    # between.
    j = math_ops.floor(y)

    # P[X <= j], used when lower_cutoff < X < upper_cutoff.
    result_so_far = self.base_distribution.cdf(j)

    # Broadcast, because it's possible that this is a single distribution being
    # evaluated on a number of samples, or something like that.
    j += array_ops.zeros_like(result_so_far)

    # Re-define values at the cutoffs.
    if lower_cutoff is not None:
      result_so_far = math_ops.select(j < lower_cutoff,
                                      array_ops.zeros_like(result_so_far),
                                      result_so_far)
    if upper_cutoff is not None:
      result_so_far = math_ops.select(j >= upper_cutoff,
                                      array_ops.ones_like(result_so_far),
                                      result_so_far)

    return result_so_far
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _mode(self):
    return math_ops.floor((self._n + 1) * self._p)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _cdf(self, x):
    x = self._assert_valid_sample(x, check_integer=False)
    return math_ops.igammac(math_ops.floor(x + 1), self.lam)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __init__(self,
               df,
               mu,
               sigma,
               validate_args=False,
               allow_nan_stats=True,
               name="StudentTWithAbsDfSoftplusSigma"):
    with ops.name_scope(name, values=[df, mu, sigma]) as ns:
      super(StudentTWithAbsDfSoftplusSigma, self).__init__(
          df=math_ops.floor(math_ops.abs(df)),
          mu=mu,
          sigma=nn.softplus(sigma),
          validate_args=validate_args,
          allow_nan_stats=allow_nan_stats,
          name=ns)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __init__(self,
               df,
               validate_args=False,
               allow_nan_stats=True,
               name="Chi2WithAbsDf"):
    parameters = locals()
    parameters.pop("self")
    with ops.name_scope(name, values=[df]) as ns:
      super(Chi2WithAbsDf, self).__init__(
          df=math_ops.floor(math_ops.abs(df)),
          validate_args=validate_args,
          allow_nan_stats=allow_nan_stats,
          name=ns)
    self._parameters = parameters
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _log_cdf(self, y):
    lower_cutoff = self._lower_cutoff
    upper_cutoff = self._upper_cutoff

    # Recall the promise:
    # cdf(y) := P[Y <= y]
    #         = 1, if y >= upper_cutoff,
    #         = 0, if y < lower_cutoff,
    #         = P[X <= y], otherwise.

    # P[Y <= j] = P[floor(Y) <= j] since mass is only at integers, not in
    # between.
    j = math_ops.floor(y)

    result_so_far = self.distribution.log_cdf(j)

    # Broadcast, because it's possible that this is a single distribution being
    # evaluated on a number of samples, or something like that.
    j += array_ops.zeros_like(result_so_far)

    # Re-define values at the cutoffs.
    if lower_cutoff is not None:
      neg_inf = -np.inf * array_ops.ones_like(result_so_far)
      result_so_far = math_ops.select(j < lower_cutoff, neg_inf, result_so_far)
    if upper_cutoff is not None:
      result_so_far = math_ops.select(j >= upper_cutoff,
                                      array_ops.zeros_like(result_so_far),
                                      result_so_far)

    return result_so_far
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _cdf(self, y):
    lower_cutoff = self._lower_cutoff
    upper_cutoff = self._upper_cutoff

    # Recall the promise:
    # cdf(y) := P[Y <= y]
    #         = 1, if y >= upper_cutoff,
    #         = 0, if y < lower_cutoff,
    #         = P[X <= y], otherwise.

    # P[Y <= j] = P[floor(Y) <= j] since mass is only at integers, not in
    # between.
    j = math_ops.floor(y)

    # P[X <= j], used when lower_cutoff < X < upper_cutoff.
    result_so_far = self.distribution.cdf(j)

    # Broadcast, because it's possible that this is a single distribution being
    # evaluated on a number of samples, or something like that.
    j += array_ops.zeros_like(result_so_far)

    # Re-define values at the cutoffs.
    if lower_cutoff is not None:
      result_so_far = math_ops.select(j < lower_cutoff,
                                      array_ops.zeros_like(result_so_far),
                                      result_so_far)
    if upper_cutoff is not None:
      result_so_far = math_ops.select(j >= upper_cutoff,
                                      array_ops.ones_like(result_so_far),
                                      result_so_far)

    return result_so_far
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _mode(self):
    return math_ops.floor((self._n + 1) * self._p)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _mode(self):
    return math_ops.floor(self.lam)
项目:gradual-learning-rnn    作者:zivaharoni    | 项目源码 | 文件源码
def __init__(self, cell, batch_size, hidden_size, output_keep_prob=1.0, state_keep_prob=1.0):
        self._cell = cell
        self._new_output_keep_prob = tf.placeholder(tf.float32, shape=[], name="output_keep_prob")
        self._new_state_keep_prob = tf.placeholder(tf.float32, shape=[], name="state_keep_prob")
        self._output_keep_prob = tf.Variable(output_keep_prob, trainable=False)
        self._state_keep_prob = tf.Variable(state_keep_prob, trainable=False)

        self._output_keep_prob_update = tf.assign(self._output_keep_prob, self._new_output_keep_prob)
        self._state_keep_prob_update = tf.assign(self._state_keep_prob, self._new_state_keep_prob)

        self._batch_size = batch_size
        with tf.name_scope("variational_masks"):
            self._output_mask = tf.Variable(tf.ones(shape=[self._batch_size, hidden_size]), trainable=False)
            self._state_mask = tf.Variable(tf.ones(shape=[self._batch_size, hidden_size]), trainable=False)
            self._mem_mask = tf.Variable(tf.ones(shape=[self._batch_size, hidden_size]), trainable=False)

            with tf.name_scope("out_mask"):

                random_tensor = ops.convert_to_tensor(self._output_keep_prob)
                random_tensor += random_ops.random_uniform([self._batch_size, self._cell.output_size])
                output_mask = math_ops.floor(random_tensor)
                self._assign_output_mask = tf.assign(self._output_mask, output_mask)

            with tf.name_scope("rec_mask"):
                random_tensor = ops.convert_to_tensor(self._state_keep_prob)
                random_tensor += random_ops.random_uniform([self._batch_size, self._cell.output_size])
                state_mask = math_ops.floor(random_tensor)
                self._assign_state_mask = tf.assign(self._state_mask, state_mask)
项目:paints_tf    作者:latte0    | 项目源码 | 文件源码
def dropout_selu(x, rate, alpha= -1.7580993408473766, fixedPointMean=0.0, fixedPointVar=1.0,
                 noise_shape=None, seed=None, name=None, training=False):
    """Dropout to a value with rescaling."""

    def dropout_selu_impl(x, rate, alpha, noise_shape, seed, name):
        keep_prob = 1.0 - rate
        x = ops.convert_to_tensor(x, name="x")
        if isinstance(keep_prob, numbers.Real) and not 0 < keep_prob <= 1:
            raise ValueError("keep_prob must be a scalar tensor or a float in the "
                                             "range (0, 1], got %g" % keep_prob)
        keep_prob = ops.convert_to_tensor(keep_prob, dtype=x.dtype, name="keep_prob")
        keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar())

        alpha = ops.convert_to_tensor(alpha, dtype=x.dtype, name="alpha")
        alpha.get_shape().assert_is_compatible_with(tensor_shape.scalar())

        if tensor_util.constant_value(keep_prob) == 1:
            return x

        noise_shape = noise_shape if noise_shape is not None else array_ops.shape(x)
        random_tensor = keep_prob
        random_tensor += random_ops.random_uniform(noise_shape, seed=seed, dtype=x.dtype)
        binary_tensor = math_ops.floor(random_tensor)
        ret = x * binary_tensor + alpha * (1-binary_tensor)

        a = math_ops.sqrt(fixedPointVar / (keep_prob *((1-keep_prob) * math_ops.pow(alpha-fixedPointMean,2) + fixedPointVar)))

        b = fixedPointMean - a * (keep_prob * fixedPointMean + (1 - keep_prob) * alpha)
        ret = a * ret + b
        ret.set_shape(x.get_shape())
        return ret

    with ops.name_scope(name, "dropout", [x]) as name:
        return utils.smart_cond(training,
            lambda: dropout_selu_impl(x, rate, alpha, noise_shape, seed, name),
            lambda: array_ops.identity(x))
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def setUp(self):
    super(CoreUnaryOpsTest, self).setUp()

    self.ops = [
        ('abs', operator.abs, math_ops.abs, core.abs_function),
        ('neg', operator.neg, math_ops.negative, core.neg),
        # TODO(shoyer): add unary + to core TensorFlow
        ('pos', None, None, None),
        ('sign', None, math_ops.sign, core.sign),
        ('reciprocal', None, math_ops.reciprocal, core.reciprocal),
        ('square', None, math_ops.square, core.square),
        ('round', None, math_ops.round, core.round_function),
        ('sqrt', None, math_ops.sqrt, core.sqrt),
        ('rsqrt', None, math_ops.rsqrt, core.rsqrt),
        ('log', None, math_ops.log, core.log),
        ('exp', None, math_ops.exp, core.exp),
        ('log', None, math_ops.log, core.log),
        ('ceil', None, math_ops.ceil, core.ceil),
        ('floor', None, math_ops.floor, core.floor),
        ('cos', None, math_ops.cos, core.cos),
        ('sin', None, math_ops.sin, core.sin),
        ('tan', None, math_ops.tan, core.tan),
        ('acos', None, math_ops.acos, core.acos),
        ('asin', None, math_ops.asin, core.asin),
        ('atan', None, math_ops.atan, core.atan),
        ('lgamma', None, math_ops.lgamma, core.lgamma),
        ('digamma', None, math_ops.digamma, core.digamma),
        ('erf', None, math_ops.erf, core.erf),
        ('erfc', None, math_ops.erfc, core.erfc),
        ('lgamma', None, math_ops.lgamma, core.lgamma),
    ]
    total_size = np.prod([v.size for v in self.original_lt.axes.values()])
    self.test_lt = core.LabeledTensor(
        math_ops.cast(self.original_lt, dtypes.float32) / total_size,
        self.original_lt.axes)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def __init__(self,
               df,
               validate_args=False,
               allow_nan_stats=True,
               name="Chi2WithAbsDf"):
    parameters = locals()
    parameters.pop("self")
    with ops.name_scope(name, values=[df]) as ns:
      super(Chi2WithAbsDf, self).__init__(
          df=math_ops.floor(math_ops.abs(df, name="abs_df"),
                            name="floor_abs_df"),
          validate_args=validate_args,
          allow_nan_stats=allow_nan_stats,
          name=ns)
    self._parameters = parameters
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _log_cdf(self, y):
    lower_cutoff = self._lower_cutoff
    upper_cutoff = self._upper_cutoff

    # Recall the promise:
    # cdf(y) := P[Y <= y]
    #         = 1, if y >= upper_cutoff,
    #         = 0, if y < lower_cutoff,
    #         = P[X <= y], otherwise.

    # P[Y <= j] = P[floor(Y) <= j] since mass is only at integers, not in
    # between.
    j = math_ops.floor(y)

    result_so_far = self.distribution.log_cdf(j)

    # Broadcast, because it's possible that this is a single distribution being
    # evaluated on a number of samples, or something like that.
    j += array_ops.zeros_like(result_so_far)

    # Re-define values at the cutoffs.
    if lower_cutoff is not None:
      neg_inf = -np.inf * array_ops.ones_like(result_so_far)
      result_so_far = array_ops.where(j < lower_cutoff, neg_inf, result_so_far)
    if upper_cutoff is not None:
      result_so_far = array_ops.where(j >= upper_cutoff,
                                      array_ops.zeros_like(result_so_far),
                                      result_so_far)

    return result_so_far
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _cdf(self, y):
    lower_cutoff = self._lower_cutoff
    upper_cutoff = self._upper_cutoff

    # Recall the promise:
    # cdf(y) := P[Y <= y]
    #         = 1, if y >= upper_cutoff,
    #         = 0, if y < lower_cutoff,
    #         = P[X <= y], otherwise.

    # P[Y <= j] = P[floor(Y) <= j] since mass is only at integers, not in
    # between.
    j = math_ops.floor(y)

    # P[X <= j], used when lower_cutoff < X < upper_cutoff.
    result_so_far = self.distribution.cdf(j)

    # Broadcast, because it's possible that this is a single distribution being
    # evaluated on a number of samples, or something like that.
    j += array_ops.zeros_like(result_so_far)

    # Re-define values at the cutoffs.
    if lower_cutoff is not None:
      result_so_far = array_ops.where(j < lower_cutoff,
                                      array_ops.zeros_like(result_so_far),
                                      result_so_far)
    if upper_cutoff is not None:
      result_so_far = array_ops.where(j >= upper_cutoff,
                                      array_ops.ones_like(result_so_far),
                                      result_so_far)

    return result_so_far
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _mode(self):
    return math_ops.floor((1. + self.total_count) * self.probs)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _mode(self):
    return math_ops.floor(self.lam)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testChi2WithAbsDf(self):
    with self.test_session():
      df_v = np.array([-1.3, -3.2, 5], dtype=np.float64)
      chi2 = chi2_lib.Chi2WithAbsDf(df=df_v)
      self.assertAllClose(
          math_ops.floor(math_ops.abs(df_v)).eval(), chi2.df.eval())
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testStudentTWithAbsDfSoftplusScale(self):
    with self.test_session():
      df = constant_op.constant([-3.2, -4.6])
      mu = constant_op.constant([-4.2, 3.4])
      sigma = constant_op.constant([-6.4, -8.8])
      student = ds.StudentTWithAbsDfSoftplusScale(df=df, loc=mu, scale=sigma)
      self.assertAllClose(
          math_ops.floor(math_ops.abs(df)).eval(), student.df.eval())
      self.assertAllClose(mu.eval(), student.loc.eval())
      self.assertAllClose(nn_ops.softplus(sigma).eval(), student.scale.eval())
项目:dnnQuery    作者:richardxiong    | 项目源码 | 文件源码
def _variational_recurrent_dropout_value(
      self, index, value, noise, keep_prob):
    """Performs dropout given the pre-calculated noise tensor."""
    # uniform [keep_prob, 1.0 + keep_prob)
    random_tensor = keep_prob + noise

    # 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
    binary_tensor = math_ops.floor(random_tensor)
    ret = math_ops.div(value, keep_prob) * binary_tensor
    ret.set_shape(value.get_shape())
    return ret
项目:dnnQuery    作者:richardxiong    | 项目源码 | 文件源码
def _variational_recurrent_dropout_value(
      self, index, value, noise, keep_prob):
    """Performs dropout given the pre-calculated noise tensor."""
    # uniform [keep_prob, 1.0 + keep_prob)
    random_tensor = keep_prob + noise

    # 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
    binary_tensor = math_ops.floor(random_tensor)
    ret = math_ops.div(value, keep_prob) * binary_tensor
    ret.set_shape(value.get_shape())
    return ret
项目:dnnQuery    作者:richardxiong    | 项目源码 | 文件源码
def _variational_recurrent_dropout_value(
      self, index, value, noise, keep_prob):
    """Performs dropout given the pre-calculated noise tensor."""
    # uniform [keep_prob, 1.0 + keep_prob)
    random_tensor = keep_prob + noise

    # 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
    binary_tensor = math_ops.floor(random_tensor)
    ret = math_ops.div(value, keep_prob) * binary_tensor
    ret.set_shape(value.get_shape())
    return ret
项目:KBOPrediction    作者:riceluxs1t    | 项目源码 | 文件源码
def dropout_selu(
        self,
        x, 
        keep_prob, 
        alpha= DROP_ALPHA, 
        fixedPointMean=0.0, 
        fixedPointVar=1.0, 
        noise_shape=None, 
        seed=None, 
        name=None, 
        training=False):
        """Dropout to a value with rescaling."""

        def dropout_selu_impl(x, rate, alpha, noise_shape, seed, name):
            keep_prob = 1.0 - rate
            x = ops.convert_to_tensor(x, name="x")
            if isinstance(keep_prob, numbers.Real) and not 0 < keep_prob <= 1:
                raise ValueError("keep_prob must be a scalar tensor or a float in the "
                                                 "range (0, 1], got %g" % keep_prob)
            keep_prob = ops.convert_to_tensor(keep_prob, dtype=x.dtype, name="keep_prob")
            keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar())

            alpha = ops.convert_to_tensor(alpha, dtype=x.dtype, name="alpha")
            keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar())

            if tensor_util.constant_value(keep_prob) == 1:
                return x

            noise_shape = noise_shape if noise_shape is not None else array_ops.shape(x)
            random_tensor = keep_prob
            random_tensor += random_ops.random_uniform(noise_shape, seed=seed, dtype=x.dtype)
            binary_tensor = math_ops.floor(random_tensor)
            ret = x * binary_tensor + alpha * (1-binary_tensor)

            a = tf.sqrt(fixedPointVar / (keep_prob *((1-keep_prob) * tf.pow(alpha-fixedPointMean,2) + fixedPointVar)))

            b = fixedPointMean - a * (keep_prob * fixedPointMean + (1 - keep_prob) * alpha)
            ret = a * ret + b
            ret.set_shape(x.get_shape())
            return ret

        with ops.name_scope(name, "dropout", [x]) as name:
            return utils.smart_cond(training,
                                    lambda: dropout_selu_impl(x, keep_prob, alpha, noise_shape, seed, name),
                                    lambda: array_ops.identity(x))