Python tensorflow.python.ops.math_ops 模块,logical_or() 实例源码

我们从Python开源项目中,提取了以下13个代码示例,用于说明如何使用tensorflow.python.ops.math_ops.logical_or()

项目:sequencing    作者:SwordYork    | 项目源码 | 文件源码
def next_inputs(self, time, sample_ids=None, prev_finished=None):
        if sample_ids is None or self.teacher_rate > 0.:
            finished = tf.greater_equal(time + 1, self.sequence_length)
        else:
            finished = math_ops.logical_or(
                tf.greater_equal(time + 1, self.max_step),
                tf.equal(self.eos_id, sample_ids))

        if self.teacher_rate == 1. or (sample_ids is None):
            next_input_ids = self._input_tas.read(time)
            return finished, self.lookup(next_input_ids)

        if self.teacher_rate > 0.:
            # scheduled
            teacher_rates = tf.less_equal(
                tf.random_uniform(tf.shape(sample_ids), minval=0., maxval=1.),
                self.teacher_rate)
            teacher_rates = tf.to_int32(teacher_rates)

            next_input_ids = (teacher_rates * self._input_tas.read(time)
                              + (1 - teacher_rates) * sample_ids)
        else:
            next_input_ids = sample_ids

        return finished, self.lookup(next_input_ids)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def sparse_boolean_mask(sparse_tensor, mask, name="sparse_boolean_mask"):
  """Boolean mask for `SparseTensor`s.

  Args:
    sparse_tensor: a `SparseTensor`.
    mask: a 1D boolean dense`Tensor` whose length is equal to the 0th dimension
      of `sparse_tensor`.
    name: optional name for this operation.
  Returns:
    A `SparseTensor` that contains row `k` of `sparse_tensor` iff `mask[k]` is
    `True`.
  """
  # TODO(jamieas): consider mask dimension > 1 for symmetry with `boolean_mask`.
  with ops.name_scope(name, values=[sparse_tensor, mask]):
    mask = ops.convert_to_tensor(mask)
    mask_rows = array_ops.where(mask)
    first_indices = array_ops.squeeze(array_ops.slice(sparse_tensor.indices,
                                                      [0, 0], [-1, 1]))

    # Identify indices corresponding to the rows identified by mask_rows.
    sparse_entry_matches = functional_ops.map_fn(
        lambda x: math_ops.equal(first_indices, x),
        mask_rows,
        dtype=dtypes.bool)
    # Combine the rows of index_matches to form a mask for the sparse indices
    # and values.
    to_retain = array_ops.reshape(
        functional_ops.foldl(math_ops.logical_or, sparse_entry_matches), [-1])

    return sparse_ops.sparse_retain(sparse_tensor, to_retain)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _prob(self, x):
    broadcasted_x = x * array_ops.ones(self.batch_shape())
    return math_ops.select(
        math_ops.is_nan(broadcasted_x),
        broadcasted_x,
        math_ops.select(
            math_ops.logical_or(broadcasted_x < self.a,
                                broadcasted_x > self.b),
            array_ops.zeros_like(broadcasted_x),
            (1. / self.range()) * array_ops.ones_like(broadcasted_x)))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def sparse_boolean_mask(sparse_tensor, mask, name="sparse_boolean_mask"):
  """Boolean mask for `SparseTensor`s.

  Args:
    sparse_tensor: a `SparseTensor`.
    mask: a 1D boolean dense`Tensor` whose length is equal to the 0th dimension
      of `sparse_tensor`.
    name: optional name for this operation.
  Returns:
    A `SparseTensor` that contains row `k` of `sparse_tensor` iff `mask[k]` is
    `True`.
  """
  # TODO(jamieas): consider mask dimension > 1 for symmetry with `boolean_mask`.
  with ops.name_scope(name, values=[sparse_tensor, mask]):
    mask = ops.convert_to_tensor(mask)
    mask_rows = array_ops.where(mask)
    first_indices = array_ops.squeeze(array_ops.slice(sparse_tensor.indices,
                                                      [0, 0], [-1, 1]))

    # Identify indices corresponding to the rows identified by mask_rows.
    sparse_entry_matches = functional_ops.map_fn(
        lambda x: math_ops.equal(first_indices, x),
        mask_rows,
        dtype=dtypes.bool)
    # Combine the rows of index_matches to form a mask for the sparse indices
    # and values.
    to_retain = array_ops.reshape(
        functional_ops.foldl(math_ops.logical_or, sparse_entry_matches), [-1])

    return sparse_ops.sparse_retain(sparse_tensor, to_retain)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __or__(self, other):
    return logical_or(self, other)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _prob(self, x):
    broadcasted_x = x * array_ops.ones(self.batch_shape())
    return math_ops.select(
        math_ops.is_nan(broadcasted_x),
        broadcasted_x,
        math_ops.select(
            math_ops.logical_or(broadcasted_x < self.a,
                                broadcasted_x > self.b),
            array_ops.zeros_like(broadcasted_x),
            (1. / self.range()) * array_ops.ones_like(broadcasted_x)))
项目:sequencing    作者:SwordYork    | 项目源码 | 文件源码
def next_inputs(self, time, sample_ids, prev_finished):
        finished = math_ops.logical_or(
            tf.greater_equal(time + 1, tf.maximum(self.max_step,
                                                  self.max_sequence_length)),
            tf.equal(self.eos_id, sample_ids))
        next_finished = math_ops.logical_or(finished, prev_finished)
        return next_finished, self.lookup(sample_ids)
项目:sequencing    作者:SwordYork    | 项目源码 | 文件源码
def next_inputs(self, time, sample_ids):
        finished = math_ops.logical_or(
            tf.greater_equal(time + 1, self.max_step),
            tf.equal(self.eos_id, sample_ids))
        return finished, self.lookup(sample_ids)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def sparse_boolean_mask(sparse_tensor, mask, name="sparse_boolean_mask"):
  """Boolean mask for `SparseTensor`s.

  Args:
    sparse_tensor: a `SparseTensor`.
    mask: a 1D boolean dense`Tensor` whose length is equal to the 0th dimension
      of `sparse_tensor`.
    name: optional name for this operation.
  Returns:
    A `SparseTensor` that contains row `k` of `sparse_tensor` iff `mask[k]` is
    `True`.
  """
  # TODO(jamieas): consider mask dimension > 1 for symmetry with `boolean_mask`.
  with ops.name_scope(name, values=[sparse_tensor, mask]):
    mask = ops.convert_to_tensor(mask)
    mask_rows = array_ops.where(mask)
    first_indices = array_ops.squeeze(array_ops.slice(sparse_tensor.indices,
                                                      [0, 0], [-1, 1]))

    # Identify indices corresponding to the rows identified by mask_rows.
    sparse_entry_matches = functional_ops.map_fn(
        lambda x: math_ops.equal(first_indices, x),
        mask_rows,
        dtype=dtypes.bool)
    # Combine the rows of index_matches to form a mask for the sparse indices
    # and values.
    to_retain = array_ops.reshape(
        functional_ops.foldl(math_ops.logical_or, sparse_entry_matches), [-1])

    return sparse_ops.sparse_retain(sparse_tensor, to_retain)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def setUp(self):
    super(LogicalBinaryOpsTest, self).setUp()

    self.ops = [
        ('logical_and', operator.and_, math_ops.logical_and, core.logical_and),
        ('logical_or', operator.or_, math_ops.logical_or, core.logical_or),
        ('logical_xor', operator.xor, math_ops.logical_xor, core.logical_xor),
    ]
    self.test_lt_1 = self.original_lt < 10
    self.test_lt_2 = self.original_lt < 5
    self.test_lt_1_broadcast = self.test_lt_1.tensor
    self.test_lt_2_broadcast = self.test_lt_2.tensor
    self.broadcast_axes = self.test_lt_1.axes
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def __or__(self, other):
    return logical_or(self, other)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _prob(self, x):
    broadcasted_x = x * array_ops.ones(self.batch_shape())
    return array_ops.where(
        math_ops.is_nan(broadcasted_x),
        broadcasted_x,
        array_ops.where(
            math_ops.logical_or(broadcasted_x < self.a,
                                broadcasted_x > self.b),
            array_ops.zeros_like(broadcasted_x),
            (1. / self.range()) * array_ops.ones_like(broadcasted_x)))
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def softplus_inverse(x, name=None):
  """Computes the inverse softplus, i.e., x = softplus_inverse(softplus(x)).

  Mathematically this op is equivalent to:

  ```none
  softplus_inverse = log(exp(x) - 1.)

Args: x: Tensor. Non-negative (not enforced), floating-point. name: A name for the operation (optional).

Returns: Tensor. Has the same type/shape as input x. """ with ops.name_scope(name, "softplus_inverse", values=[x]): x = ops.convert_to_tensor(x, name="x")

# We begin by deriving a more numerically stable softplus_inverse:
# x = softplus(y) = Log[1 + exp{y}], (which means x > 0).
# ==> exp{x} = 1 + exp{y}                                (1)
# ==> y = Log[exp{x} - 1]                                (2)
#       = Log[(exp{x} - 1) / exp{x}] + Log[exp{x}]
#       = Log[(1 - exp{-x}) / 1] + Log[exp{x}]
#       = Log[1 - exp{-x}] + x                           (3)
# (2) is the "obvious" inverse, but (3) is more stable than (2) for large x.
# For small x (e.g. x = 1e-10), (3) will become -inf since 1 - exp{-x} will
# be zero.  To fix this, we use 1 - exp{-x} approx x for small x > 0.
#
# In addition to the numerically stable derivation above, we clamp
# small/large values to be congruent with the logic in:
# tensorflow/core/kernels/softplus_op.h
#
# Finally, we set the input to one whenever the input is too large or too
# small. This ensures that no unchosen codepath is +/- inf. This is
# necessary to ensure the gradient doesn't get NaNs. Recall that the
# gradient of `where` behaves like `pred*pred_true + (1-pred)*pred_false`
# thus an `inf` in an unselected path results in `0*inf=nan`. We are careful
# to overwrite `x` with ones only when we will never actually use this
# value.  Note that we use ones and not zeros since `log(expm1(0.)) = -inf`.
threshold = np.log(np.finfo(x.dtype.as_numpy_dtype).eps) + 2.
is_too_small = math_ops.less(x, np.exp(threshold))
is_too_large = math_ops.greater(x, -threshold)
too_small_value = math_ops.log(x)
too_large_value = x
# This `where` will ultimately be a NOP because we won't select this
# codepath whenever we used the surrogate `ones_like`.
x = array_ops.where(math_ops.logical_or(is_too_small, is_too_large),
                    array_ops.ones_like(x), x)
y = x + math_ops.log(-math_ops.expm1(-x))  # == log(expm1(x))
return array_ops.where(is_too_small, too_small_value,
                       array_ops.where(is_too_large, too_large_value, y))

```