Python tensorflow.python.ops.array_ops 模块,ones_like() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.python.ops.array_ops.ones_like()

项目:antgo    作者:jianzfb    | 项目源码 | 文件源码
def _broadcast_weights(weights, values):
    """Broadcast `weights` to the same shape as `values`.
    This returns a version of `weights` following the same broadcast rules as
    `mul(weights, values)`. When computing a weighted average, use this function
    to broadcast `weights` before summing them; e.g.,
    `reduce_sum(w * v) / reduce_sum(_broadcast_weights(w, v))`.
    Args:
      weights: `Tensor` whose shape is broadcastable to `values`.
      values: `Tensor` of any shape.
    Returns:
      `weights` broadcast to `values` shape.
    """
    weights_shape = weights.get_shape()
    values_shape = values.get_shape()
    if(weights_shape.is_fully_defined() and
       values_shape.is_fully_defined() and
       weights_shape.is_compatible_with(values_shape)):
        return weights
    return math_ops.mul(
        weights, array_ops.ones_like(values), name='broadcast_weights')


# =========================================================================== #
# TF Extended metrics: TP and FP arrays.
# =========================================================================== #
项目:deep-learning    作者:lbkchen    | 项目源码 | 文件源码
def average_impurity(self):
    """Constructs a TF graph for evaluating the average leaf impurity of a tree.

    If in regression mode, this is the leaf variance. If in classification mode,
    this is the gini impurity.

    Returns:
      The last op in the graph.
    """
    children = array_ops.squeeze(array_ops.slice(
        self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1])
    is_leaf = math_ops.equal(constants.LEAF_NODE, children)
    leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf),
                                                 squeeze_dims=[1]))
    counts = array_ops.gather(self.variables.node_sums, leaves)
    gini = self._weighted_gini(counts)
    # Guard against step 1, when there often are no leaves yet.
    def impurity():
      return gini
    # Since average impurity can be used for loss, when there's no data just
    # return a big number so that loss always decreases.
    def big():
      return array_ops.ones_like(gini, dtype=dtypes.float32) * 10000000.
    return control_flow_ops.cond(math_ops.greater(
        array_ops.shape(leaves)[0], 0), impurity, big)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def ones_like(x, dtype=None, name=None):
      """Instantiates an all-ones variable of the same shape as another tensor.

      Arguments:
          x: Keras variable or tensor.
          dtype: String, dtype of returned Keras variable.
               None uses the dtype of x.
          name: String, name for the variable to create.

      Returns:
          A Keras variable with the shape of x filled with ones.

      Example:
      ```python
          >>> from keras import backend as K
          >>> kvar = K.variable(np.random.random((2,3)))
          >>> kvar_ones = K.ones_like(kvar)
          >>> K.eval(kvar_ones)
          array([[ 1.,  1.,  1.],
                 [ 1.,  1.,  1.]], dtype=float32)
"""
  return array_ops.ones_like(x, dtype=dtype, name=name)

```

项目:SSD_tensorflow_VOC    作者:LevinJ    | 项目源码 | 文件源码
def _broadcast_weights(weights, values):
    """Broadcast `weights` to the same shape as `values`.
    This returns a version of `weights` following the same broadcast rules as
    `mul(weights, values)`. When computing a weighted average, use this function
    to broadcast `weights` before summing them; e.g.,
    `reduce_sum(w * v) / reduce_sum(_broadcast_weights(w, v))`.
    Args:
      weights: `Tensor` whose shape is broadcastable to `values`.
      values: `Tensor` of any shape.
    Returns:
      `weights` broadcast to `values` shape.
    """
    weights_shape = weights.get_shape()
    values_shape = values.get_shape()
    if(weights_shape.is_fully_defined() and
       values_shape.is_fully_defined() and
       weights_shape.is_compatible_with(values_shape)):
        return weights
    return math_ops.mul(
        weights, array_ops.ones_like(values), name='broadcast_weights')


# =========================================================================== #
# TF Extended metrics: TP and FP arrays.
# =========================================================================== #
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _safe_div(numerator, denominator, name="value"):
  """Computes a safe divide which returns 0 if the denominator is zero.

  Note that the function contains an additional conditional check that is
  necessary for avoiding situations where the loss is zero causing NaNs to
  creep into the gradient computation.

  Args:
    numerator: An arbitrary `Tensor`.
    denominator: A `Tensor` whose shape matches `numerator` and whose values are
      assumed to be non-negative.
    name: An optional name for the returned op.

  Returns:
    The element-wise value of the numerator divided by the denominator.
  """
  return math_ops.select(
      math_ops.greater(denominator, 0),
      math_ops.div(numerator, math_ops.select(
          math_ops.equal(denominator, 0),
          array_ops.ones_like(denominator), denominator)),
      array_ops.zeros_like(numerator),
      name=name)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def hinge_loss(logits, target, scope=None):
  """Method that returns the loss tensor for hinge loss.

  Args:
    logits: The logits, a float tensor.
    target: The ground truth output tensor. Its shape should match the shape of
      logits. The values of the tensor are expected to be 0.0 or 1.0.
    scope: The scope for the operations performed in computing the loss.

  Returns:
    A `Tensor` of same shape as logits and target representing the loss values
      across the batch.

  Raises:
    ValueError: If the shapes of `logits` and `target` don't match.
  """
  with ops.name_scope(scope, "hinge_loss", [logits, target]) as scope:
    logits.get_shape().assert_is_compatible_with(target.get_shape())
    # We first need to convert binary labels to -1/1 labels (as floats).
    target = math_ops.to_float(target)
    all_ones = array_ops.ones_like(target)
    labels = math_ops.sub(2 * target, all_ones)
    losses = nn_ops.relu(math_ops.sub(all_ones, math_ops.mul(labels, logits)))
    return losses
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _broadcast_weights(weights, values):
  """Broadcast `weights` to the same shape as `values`.

  This returns a version of `weights` following the same broadcast rules as
  `mul(weights, values)`. When computing a weighted average, use this function
  to broadcast `weights` before summing them; e.g.,
  `reduce_sum(w * v) / reduce_sum(_broadcast_weights(w, v))`.

  Args:
    weights: `Tensor` whose shape is broadcastable to `values`.
    values: `Tensor` of any shape.

  Returns:
    `weights` broadcast to `values` shape.
  """
  weights_shape = weights.get_shape()
  values_shape = values.get_shape()
  if (weights_shape.is_fully_defined() and
      values_shape.is_fully_defined() and
      weights_shape.is_compatible_with(values_shape)):
    return weights
  return math_ops.mul(
      weights, array_ops.ones_like(values), name='broadcast_weights')
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _sample_n(self, n, seed=None):
    lower_cutoff = self._lower_cutoff
    upper_cutoff = self._upper_cutoff
    with ops.name_scope("transform"):
      n = ops.convert_to_tensor(n, name="n")
      x_samps = self.base_distribution.sample_n(n=n, seed=seed)
      ones = array_ops.ones_like(x_samps)

      # Snap values to the intervals (j - 1, j].
      result_so_far = math_ops.ceil(x_samps)

      if lower_cutoff is not None:
        result_so_far = math_ops.select(result_so_far < lower_cutoff,
                                        lower_cutoff * ones, result_so_far)

      if upper_cutoff is not None:
        result_so_far = math_ops.select(result_so_far > upper_cutoff,
                                        upper_cutoff * ones, result_so_far)

      return result_so_far
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _log_prob(self, event):
    # TODO(jaana): The current sigmoid_cross_entropy_with_logits has
    # inconsistent  behavior for logits = inf/-inf.
    event = ops.convert_to_tensor(event, name="event")
    event = math_ops.cast(event, self.logits.dtype)
    logits = self.logits
    # sigmoid_cross_entropy_with_logits doesn't broadcast shape,
    # so we do this here.
    # TODO(b/30637701): Check dynamic shape, and don't broadcast if the
    # dynamic shapes are the same.
    if (not event.get_shape().is_fully_defined() or
        not logits.get_shape().is_fully_defined() or
        event.get_shape() != logits.get_shape()):
      logits = array_ops.ones_like(event) * logits
      event = array_ops.ones_like(logits) * event
    return -nn.sigmoid_cross_entropy_with_logits(logits, event)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _safe_div(numerator, denominator, name="value"):
  """Computes a safe divide which returns 0 if the denominator is zero.

  Note that the function contains an additional conditional check that is
  necessary for avoiding situations where the loss is zero causing NaNs to
  creep into the gradient computation.

  Args:
    numerator: An arbitrary `Tensor`.
    denominator: A `Tensor` whose shape matches `numerator` and whose values are
      assumed to be non-negative.
    name: An optional name for the returned op.

  Returns:
    The element-wise value of the numerator divided by the denominator.
  """
  return math_ops.select(
      math_ops.greater(denominator, 0),
      math_ops.div(numerator, math_ops.select(
          math_ops.equal(denominator, 0),
          array_ops.ones_like(denominator), denominator)),
      array_ops.zeros_like(numerator),
      name=name)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def hinge_loss(logits, labels=None, scope=None, target=None):
  """Method that returns the loss tensor for hinge loss.

  Args:
    logits: The logits, a float tensor.
    labels: The ground truth output tensor. Its shape should match the shape of
      logits. The values of the tensor are expected to be 0.0 or 1.0.
    scope: The scope for the operations performed in computing the loss.
    target: Deprecated alias for `labels`.

  Returns:
    A `Tensor` of same shape as logits and target representing the loss values
      across the batch.

  Raises:
    ValueError: If the shapes of `logits` and `labels` don't match.
  """
  labels = _labels(labels, target)
  with ops.name_scope(scope, "hinge_loss", [logits, labels]) as scope:
    logits.get_shape().assert_is_compatible_with(labels.get_shape())
    # We first need to convert binary labels to -1/1 labels (as floats).
    labels = math_ops.to_float(labels)
    all_ones = array_ops.ones_like(labels)
    labels = math_ops.sub(2 * labels, all_ones)
    return nn_ops.relu(math_ops.sub(all_ones, math_ops.mul(labels, logits)))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _broadcast_weights(weights, values):
  """Broadcast `weights` to the same shape as `values`.

  This returns a version of `weights` following the same broadcast rules as
  `mul(weights, values)`. When computing a weighted average, use this function
  to broadcast `weights` before summing them; e.g.,
  `reduce_sum(w * v) / reduce_sum(_broadcast_weights(w, v))`.

  Args:
    weights: `Tensor` whose shape is broadcastable to `values`.
    values: `Tensor` of any shape.

  Returns:
    `weights` broadcast to `values` shape.
  """
  weights_shape = weights.get_shape()
  values_shape = values.get_shape()
  if (weights_shape.is_fully_defined() and
      values_shape.is_fully_defined() and
      weights_shape.is_compatible_with(values_shape)):
    return weights
  return math_ops.mul(
      weights, array_ops.ones_like(values), name='broadcast_weights')
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _log_prob(self, event):
    # TODO(jaana): The current sigmoid_cross_entropy_with_logits has
    # inconsistent  behavior for logits = inf/-inf.
    event = ops.convert_to_tensor(event, name="event")
    event = math_ops.cast(event, self.logits.dtype)
    logits = self.logits
    # sigmoid_cross_entropy_with_logits doesn't broadcast shape,
    # so we do this here.
    # TODO(b/30637701): Check dynamic shape, and don't broadcast if the
    # dynamic shapes are the same.
    if (not event.get_shape().is_fully_defined() or
        not logits.get_shape().is_fully_defined() or
        event.get_shape() != logits.get_shape()):
      logits = array_ops.ones_like(event) * logits
      event = array_ops.ones_like(logits) * event
    return -nn.sigmoid_cross_entropy_with_logits(logits, event)
项目:Deep-Fashion    作者:TomPyonsuke    | 项目源码 | 文件源码
def _broadcast_weights(weights, values):
    """Broadcast `weights` to the same shape as `values`.
    This returns a version of `weights` following the same broadcast rules as
    `mul(weights, values)`. When computing a weighted average, use this function
    to broadcast `weights` before summing them; e.g.,
    `reduce_sum(w * v) / reduce_sum(_broadcast_weights(w, v))`.
    Args:
      weights: `Tensor` whose shape is broadcastable to `values`.
      values: `Tensor` of any shape.
    Returns:
      `weights` broadcast to `values` shape.
    """
    weights_shape = weights.get_shape()
    values_shape = values.get_shape()
    if(weights_shape.is_fully_defined() and
       values_shape.is_fully_defined() and
       weights_shape.is_compatible_with(values_shape)):
        return weights
    return math_ops.mul(
        weights, array_ops.ones_like(values), name='broadcast_weights')


# =========================================================================== #
# TF Extended metrics: TP and FP arrays.
# =========================================================================== #
项目:polyaxon    作者:polyaxon    | 项目源码 | 文件源码
def hinge_loss(weights=1.0, name='HingeLoss', scope=None, collect=True):
    """Hinge Loss.

    Args:
        weights: Coefficients for the loss a `scalar`.
        name: name of the op.
        scope: The scope for the operations performed in computing the loss.
        collect: add to losses collection.

    Returns:
        A scalar `Tensor` representing the loss value.

    Raises:
        ValueError: If `predictions` shape doesn't match `labels` shape, or `weights` is `None`.
    """

    def inner_loss(y_true, y_pred):
        all_ones = array_ops.ones_like(y_true)
        y_true = math_ops.subtract(2 * y_true, all_ones)
        losses = tf.nn.relu(math_ops.subtract(all_ones, math_ops.multiply(y_true, y_pred)))
        return losses

    return built_loss(inner_loss, weights, name, scope, collect)
项目:DAVIS-2016-Chanllege-Solution    作者:tangyuhao    | 项目源码 | 文件源码
def _broadcast_weights(weights, values):
    """Broadcast `weights` to the same shape as `values`.
    This returns a version of `weights` following the same broadcast rules as
    `mul(weights, values)`. When computing a weighted average, use this function
    to broadcast `weights` before summing them; e.g.,
    `reduce_sum(w * v) / reduce_sum(_broadcast_weights(w, v))`.
    Args:
      weights: `Tensor` whose shape is broadcastable to `values`.
      values: `Tensor` of any shape.
    Returns:
      `weights` broadcast to `values` shape.
    """
    weights_shape = weights.get_shape()
    values_shape = values.get_shape()
    if(weights_shape.is_fully_defined() and
       values_shape.is_fully_defined() and
       weights_shape.is_compatible_with(values_shape)):
        return weights
    return math_ops.mul(
        weights, array_ops.ones_like(values), name='broadcast_weights')


# =========================================================================== #
# TF Extended metrics: TP and FP arrays.
# =========================================================================== #
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _safe_div(numerator, denominator, name="value"):
  """Computes a safe divide which returns 0 if the denominator is zero.

  Note that the function contains an additional conditional check that is
  necessary for avoiding situations where the loss is zero causing NaNs to
  creep into the gradient computation.

  Args:
    numerator: An arbitrary `Tensor`.
    denominator: A `Tensor` whose shape matches `numerator` and whose values are
      assumed to be non-negative.
    name: An optional name for the returned op.

  Returns:
    The element-wise value of the numerator divided by the denominator.
  """
  return array_ops.where(
      math_ops.greater(denominator, 0),
      math_ops.div(numerator, array_ops.where(
          math_ops.equal(denominator, 0),
          array_ops.ones_like(denominator), denominator)),
      array_ops.zeros_like(numerator),
      name=name)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def hinge_loss(logits, labels=None, scope=None):
  """Method that returns the loss tensor for hinge loss.

  Args:
    logits: The logits, a float tensor.
    labels: The ground truth output tensor. Its shape should match the shape of
      logits. The values of the tensor are expected to be 0.0 or 1.0.
    scope: The scope for the operations performed in computing the loss.

  Returns:
    A `Tensor` of same shape as `logits` and `labels` representing the loss
      values across the batch.

  Raises:
    ValueError: If the shapes of `logits` and `labels` don't match.
  """
  with ops.name_scope(scope, "hinge_loss", [logits, labels]) as scope:
    logits.get_shape().assert_is_compatible_with(labels.get_shape())
    # We first need to convert binary labels to -1/1 labels (as floats).
    labels = math_ops.to_float(labels)
    all_ones = array_ops.ones_like(labels)
    labels = math_ops.subtract(2 * labels, all_ones)
    return nn_ops.relu(
        math_ops.subtract(all_ones, math_ops.multiply(labels, logits)))
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _sample_n(self, n, seed=None):
    lower_cutoff = self._lower_cutoff
    upper_cutoff = self._upper_cutoff
    with ops.name_scope("transform"):
      n = ops.convert_to_tensor(n, name="n")
      x_samps = self.distribution.sample(n, seed=seed)
      ones = array_ops.ones_like(x_samps)

      # Snap values to the intervals (j - 1, j].
      result_so_far = math_ops.ceil(x_samps)

      if lower_cutoff is not None:
        result_so_far = array_ops.where(result_so_far < lower_cutoff,
                                        lower_cutoff * ones, result_so_far)

      if upper_cutoff is not None:
        result_so_far = array_ops.where(result_so_far > upper_cutoff,
                                        upper_cutoff * ones, result_so_far)

      return result_so_far
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _log_prob(self, event):
    # TODO(jaana): The current sigmoid_cross_entropy_with_logits has
    # inconsistent  behavior for logits = inf/-inf.
    event = math_ops.cast(event, self.logits.dtype)
    logits = self.logits
    # sigmoid_cross_entropy_with_logits doesn't broadcast shape,
    # so we do this here.

    broadcast = lambda logits, event: (
        array_ops.ones_like(event) * logits,
        array_ops.ones_like(logits) * event)

    # First check static shape.
    if (event.get_shape().is_fully_defined() and
        logits.get_shape().is_fully_defined()):
      if event.get_shape() != logits.get_shape():
        logits, event = broadcast(logits, event)
    else:
      logits, event = control_flow_ops.cond(
          distribution_util.same_dynamic_shape(logits, event),
          lambda: (logits, event),
          lambda: broadcast(logits, event))
    return -nn.sigmoid_cross_entropy_with_logits(labels=event, logits=logits)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _log_prob(self, x):
    x = self._assert_valid_sample(x)
    # broadcast logits or x if need be.
    logits = self.logits
    if (not x.get_shape().is_fully_defined() or
        not logits.get_shape().is_fully_defined() or
        x.get_shape() != logits.get_shape()):
      logits = array_ops.ones_like(x, dtype=logits.dtype) * logits
      x = array_ops.ones_like(logits, dtype=x.dtype) * x

    logits_shape = array_ops.shape(logits)
    if logits.get_shape().ndims == 2:
      logits_2d = logits
      x_2d = x
    else:
      logits_2d = array_ops.reshape(logits, [-1, self.event_size])
      x_2d = array_ops.reshape(x, [-1, self.event_size])
    ret = -nn_ops.softmax_cross_entropy_with_logits(labels=x_2d,
                                                    logits=logits_2d)
    ret = array_ops.reshape(ret, logits_shape)
    return ret
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testEntropy(self):
    with self.test_session():
      shift = np.array([[-1, 0, 1], [-1, -2, -3]], dtype=np.float32)
      diag = np.array([[1, 2, 3], [2, 3, 2]], dtype=np.float32)
      actual_mvn_entropy = np.concatenate([
          [stats.multivariate_normal(shift[i], np.diag(diag[i]**2)).entropy()]
          for i in range(len(diag))])
      fake_mvn = self._cls()(
          ds.MultivariateNormalDiag(
              array_ops.zeros_like(shift),
              array_ops.ones_like(diag),
              validate_args=True),
          bs.AffineLinearOperator(
              shift,
              scale=la.LinearOperatorDiag(diag, is_non_singular=True),
              validate_args=True),
          validate_args=True)
      self.assertAllClose(actual_mvn_entropy,
                          fake_mvn.entropy().eval())
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _testPDFShapes(self, mvn_dist, mu, sigma):
    with self.test_session() as sess:
      mvn = mvn_dist(mu, sigma)
      x = 2 * array_ops.ones_like(mu)

      log_pdf = mvn.log_prob(x)
      pdf = mvn.prob(x)

      mu_value = np.ones([3, 3, 2])
      sigma_value = np.zeros([3, 3, 2, 2])
      sigma_value[:] = np.identity(2)
      x_value = 2. * np.ones([3, 3, 2])
      feed_dict = {mu: mu_value, sigma: sigma_value}

      scipy_mvn = stats.multivariate_normal(
          mean=mu_value[(0, 0)], cov=sigma_value[(0, 0)])
      expected_log_pdf = scipy_mvn.logpdf(x_value[(0, 0)])
      expected_pdf = scipy_mvn.pdf(x_value[(0, 0)])

      log_pdf_evaled, pdf_evaled = sess.run([log_pdf, pdf], feed_dict=feed_dict)
      self.assertAllEqual([3, 3], log_pdf_evaled.shape)
      self.assertAllEqual([3, 3], pdf_evaled.shape)
      self.assertAllClose(expected_log_pdf, log_pdf_evaled[0, 0])
      self.assertAllClose(expected_pdf, pdf_evaled[0, 0])
项目:Question-Answering    作者:MurtyShikhar    | 项目源码 | 文件源码
def _maybe_mask_score(score, memory_sequence_length, score_mask_value):
  if memory_sequence_length is None:
    return score
  message = ("All values in memory_sequence_length must greater than zero.")
  with ops.control_dependencies(
      [check_ops.assert_positive(memory_sequence_length, message=message)]):
    score_mask = array_ops.sequence_mask(
        memory_sequence_length, maxlen=array_ops.shape(score)[1])
    score_mask_values = score_mask_value * array_ops.ones_like(score)
    return array_ops.where(score_mask, score, score_mask_values)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def compute_mask(self, inputs, mask=None):
            if mask is None:
              return None
            if not isinstance(mask, list):
              raise ValueError('`mask` should be a list.')
            if not isinstance(inputs, list):
              raise ValueError('`inputs` should be a list.')
            if len(mask) != len(inputs):
              raise ValueError('The lists `inputs` and `mask` '
                               'should have the same length.')
            if all([m is None for m in mask]):
              return None
            # Make a list of masks while making sure
            # the dimensionality of each mask
            # is the same as the corresponding input.
            masks = []
            for input_i, mask_i in zip(inputs, mask):
              if mask_i is None:
                # Input is unmasked. Append all 1s to masks,
                # but cast it to bool first
                masks.append(K.cast(K.ones_like(input_i), 'bool'))
              elif K.ndim(mask_i) < K.ndim(input_i):
                # Mask is smaller than the input, expand it
                masks.append(K.expand_dims(mask_i))
              else:
                masks.append(mask_i)
            concatenated = K.concatenate(masks, axis=self.axis)
            return K.all(concatenated, axis=-1, keepdims=False)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def get_constants(self, inputs, training=None):
            constants = []
            if self.implementation != 0 and 0 < self.dropout < 1:
              input_shape = K.int_shape(inputs)
              input_dim = input_shape[-1]
              ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
              ones = K.tile(ones, (1, int(input_dim)))

              def dropped_inputs():
                return K.dropout(ones, self.dropout)

              dp_mask = K.in_train_phase(dropped_inputs, ones, training=training)
              constants.append(dp_mask)
            else:
              constants.append(K.cast_to_floatx(1.))

            if 0 < self.recurrent_dropout < 1:
              ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
              ones = K.tile(ones, (1, self.units))

              def dropped_inputs():  # pylint: disable=function-redefined
                return K.dropout(ones, self.recurrent_dropout)

              rec_dp_mask = K.in_train_phase(dropped_inputs, ones, training=training)
              constants.append(rec_dp_mask)
            else:
              constants.append(K.cast_to_floatx(1.))
            return constants
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def get_constants(self, inputs, training=None):
            constants = []
            if self.implementation != 0 and 0 < self.dropout < 1:
              input_shape = K.int_shape(inputs)
              input_dim = input_shape[-1]
              ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
              ones = K.tile(ones, (1, int(input_dim)))

              def dropped_inputs():
                return K.dropout(ones, self.dropout)

              dp_mask = [
                  K.in_train_phase(dropped_inputs, ones, training=training)
                  for _ in range(3)
              ]
              constants.append(dp_mask)
            else:
              constants.append([K.cast_to_floatx(1.) for _ in range(3)])

            if 0 < self.recurrent_dropout < 1:
              ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
              ones = K.tile(ones, (1, self.units))

              def dropped_inputs():  # pylint: disable=function-redefined
                return K.dropout(ones, self.recurrent_dropout)

              rec_dp_mask = [
                  K.in_train_phase(dropped_inputs, ones, training=training)
                  for _ in range(3)
              ]
              constants.append(rec_dp_mask)
            else:
              constants.append([K.cast_to_floatx(1.) for _ in range(3)])
            return constants
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _add_bias_column(feature_columns, columns_to_tensors, bias_variable,
                     targets, columns_to_variables):
  # TODO(b/31008490): Move definition to a common constants place.
  bias_column_name = "tf_virtual_bias_column"
  if any(col.name is bias_column_name for col in feature_columns):
    raise ValueError("%s is a reserved column name." % bias_column_name)
  bias_column = layers.real_valued_column(bias_column_name)
  columns_to_tensors[bias_column] = array_ops.ones_like(targets,
                                                        dtype=dtypes.float32)
  columns_to_variables[bias_column] = [bias_variable]
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _mask_weights(mask=None, weights=None):
  """Mask a given set of weights.

  Elements are included when the corresponding `mask` element is `False`, and
  excluded otherwise.

  Args:
    mask: An optional, `bool` `Tensor`.
    weights: An optional `Tensor` whose shape matches `mask` if `mask` is not
      `None`.

  Returns:
    Masked weights if `mask` and `weights` are not `None`, weights equivalent to
    `mask` if `weights` is `None`, and otherwise `weights`.

  Raises:
    ValueError: If `weights` and `mask` are not `None` and have mismatched
      shapes.
  """
  if mask is not None:
    check_ops.assert_type(mask, dtypes.bool)
    if weights is None:
      weights = array_ops.ones_like(mask, dtype=dtypes.float32)
    weights = math_ops.cast(math_ops.logical_not(mask), weights.dtype) * weights

  return weights
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _strict_1d_cumsum(tensor, len_tensor):
  """Cumsum of a 1D tensor with defined shape by padding and convolving."""
  # Assumes tensor shape is fully defined.
  with ops.name_scope('strict_1d_cumsum', values=[tensor]):
    if len_tensor == 0:
      return constant_op.constant([])
    len_pad = len_tensor - 1
    x = array_ops.pad(tensor, [[len_pad, 0]])
    h = array_ops.ones_like(x)
    return _strict_conv1d(x, h)[:len_tensor]


# TODO(langmore) Remove once a faster cumsum (accumulate_sum) Op is available.
# See:  https://github.com/tensorflow/tensorflow/issues/813
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _cdf(self, y):
    lower_cutoff = self._lower_cutoff
    upper_cutoff = self._upper_cutoff

    # Recall the promise:
    # cdf(y) := P[Y <= y]
    #         = 1, if y >= upper_cutoff,
    #         = 0, if y < lower_cutoff,
    #         = P[X <= y], otherwise.

    # P[Y <= j] = P[floor(Y) <= j] since mass is only at integers, not in
    # between.
    j = math_ops.floor(y)

    # P[X <= j], used when lower_cutoff < X < upper_cutoff.
    result_so_far = self.base_distribution.cdf(j)

    # Broadcast, because it's possible that this is a single distribution being
    # evaluated on a number of samples, or something like that.
    j += array_ops.zeros_like(result_so_far)

    # Re-define values at the cutoffs.
    if lower_cutoff is not None:
      result_so_far = math_ops.select(j < lower_cutoff,
                                      array_ops.zeros_like(result_so_far),
                                      result_so_far)
    if upper_cutoff is not None:
      result_so_far = math_ops.select(j >= upper_cutoff,
                                      array_ops.ones_like(result_so_far),
                                      result_so_far)

    return result_so_far
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _log_survival_function(self, y):
    lower_cutoff = self._lower_cutoff
    upper_cutoff = self._upper_cutoff

    # Recall the promise:
    # survival_function(y) := P[Y > y]
    #                       = 0, if y >= upper_cutoff,
    #                       = 1, if y < lower_cutoff,
    #                       = P[X > y], otherwise.

    # P[Y > j] = P[ceiling(Y) > j] since mass is only at integers, not in
    # between.
    j = math_ops.ceil(y)

    # P[X > j], used when lower_cutoff < X < upper_cutoff.
    result_so_far = self.base_distribution.log_survival_function(j)

    # Broadcast, because it's possible that this is a single distribution being
    # evaluated on a number of samples, or something like that.
    j += array_ops.zeros_like(result_so_far)

    # Re-define values at the cutoffs.
    if lower_cutoff is not None:
      result_so_far = math_ops.select(j < lower_cutoff,
                                      array_ops.zeros_like(result_so_far),
                                      result_so_far)
    if upper_cutoff is not None:
      neg_inf = -np.inf * array_ops.ones_like(result_so_far)
      result_so_far = math_ops.select(j >= upper_cutoff, neg_inf, result_so_far)

    return result_so_far
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _survival_function(self, y):
    lower_cutoff = self._lower_cutoff
    upper_cutoff = self._upper_cutoff

    # Recall the promise:
    # survival_function(y) := P[Y > y]
    #                       = 0, if y >= upper_cutoff,
    #                       = 1, if y < lower_cutoff,
    #                       = P[X > y], otherwise.

    # P[Y > j] = P[ceiling(Y) > j] since mass is only at integers, not in
    # between.
    j = math_ops.ceil(y)

    # P[X > j], used when lower_cutoff < X < upper_cutoff.
    result_so_far = self.base_distribution.survival_function(j)

    # Broadcast, because it's possible that this is a single distribution being
    # evaluated on a number of samples, or something like that.
    j += array_ops.zeros_like(result_so_far)

    # Re-define values at the cutoffs.
    if lower_cutoff is not None:
      result_so_far = math_ops.select(j < lower_cutoff,
                                      array_ops.ones_like(result_so_far),
                                      result_so_far)
    if upper_cutoff is not None:
      result_so_far = math_ops.select(j >= upper_cutoff,
                                      array_ops.zeros_like(result_so_far),
                                      result_so_far)

    return result_so_far
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _entropy(self):
    # Use broadcasting rules to calculate the full broadcast sigma.
    sigma = self.sigma * array_ops.ones_like(self.mu)
    return 0.5 * math.log(2. * math.pi * math.e) + math_ops.log(sigma)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _std(self):
    return self.sigma * array_ops.ones_like(self.mu)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _variance(self):
    alpha_sum = array_ops.expand_dims(self.alpha_sum, -1)
    normalized_alpha = self.alpha / alpha_sum
    variance = -math_ops.batch_matmul(
        array_ops.expand_dims(normalized_alpha, -1),
        array_ops.expand_dims(normalized_alpha, -2))
    variance = array_ops.matrix_set_diag(variance, normalized_alpha *
                                         (1. - normalized_alpha))
    shared_factor = (self.n * (alpha_sum + self.n) /
                     (alpha_sum + 1) * array_ops.ones_like(self.alpha))
    variance *= array_ops.expand_dims(shared_factor, -1)
    return variance
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _sample_n(self, n, seed=None):
    a = array_ops.ones_like(self.a_b_sum, dtype=self.dtype) * self.a
    b = array_ops.ones_like(self.a_b_sum, dtype=self.dtype) * self.b
    gamma1_sample = random_ops.random_gamma(
        [n,], a, dtype=self.dtype, seed=seed)
    gamma2_sample = random_ops.random_gamma(
        [n,], b, dtype=self.dtype, seed=seed)
    beta_sample = gamma1_sample / (gamma1_sample + gamma2_sample)
    return beta_sample
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _log_prob(self, k):
    k = ops.convert_to_tensor(k, name="k")
    logits = self.logits * array_ops.ones_like(
        array_ops.expand_dims(k, -1),
        dtype=self.logits.dtype)
    shape = array_ops.slice(array_ops.shape(logits), [0],
                            [array_ops.rank(logits) - 1])
    k *= array_ops.ones(shape, dtype=k.dtype)
    k.set_shape(tensor_shape.TensorShape(logits.get_shape()[:-1]))
    return -nn_ops.sparse_softmax_cross_entropy_with_logits(logits, k)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _prob(self, x):
    broadcasted_x = x * array_ops.ones(self.batch_shape())
    return math_ops.select(
        math_ops.is_nan(broadcasted_x),
        broadcasted_x,
        math_ops.select(
            math_ops.logical_or(broadcasted_x < self.a,
                                broadcasted_x > self.b),
            array_ops.zeros_like(broadcasted_x),
            (1. / self.range()) * array_ops.ones_like(broadcasted_x)))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _add_bias_column(feature_columns, columns_to_tensors, bias_variable,
                     labels, columns_to_variables):
  # TODO(b/31008490): Move definition to a common constants place.
  bias_column_name = "tf_virtual_bias_column"
  if any(col.name is bias_column_name for col in feature_columns):
    raise ValueError("%s is a reserved column name." % bias_column_name)
  bias_column = layers.real_valued_column(bias_column_name)
  columns_to_tensors[bias_column] = array_ops.ones_like(labels,
                                                        dtype=dtypes.float32)
  columns_to_variables[bias_column] = [bias_variable]
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _strict_1d_cumsum(tensor, len_tensor):
  """Cumsum of a 1D tensor with defined shape by padding and convolving."""
  # Assumes tensor shape is fully defined.
  with ops.name_scope('strict_1d_cumsum', values=[tensor]):
    if len_tensor == 0:
      return constant_op.constant([])
    len_pad = len_tensor - 1
    x = array_ops.pad(tensor, [[len_pad, 0]])
    h = array_ops.ones_like(x)
    return _strict_conv1d(x, h)[:len_tensor]


# TODO(langmore) Remove once a faster cumsum (accumulate_sum) Op is available.
# See:  https://github.com/tensorflow/tensorflow/issues/813
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def accuracy(predictions, labels, weights=None):
  """Computes the percentage of times that predictions matches labels.

  Args:
    predictions: the predicted values, a `Tensor` whose dtype and shape
                 matches 'labels'.
    labels: the ground truth values, a `Tensor` of any shape and
            bool, integer, or string dtype.
    weights: None or `Tensor` of float values to reweight the accuracy.

  Returns:
    Accuracy `Tensor`.

  Raises:
    ValueError: if dtypes don't match or
                if dtype is not bool, integer, or string.
  """
  if not (labels.dtype.is_integer or
          labels.dtype in (dtypes.bool, dtypes.string)):
    raise ValueError(
        'Labels should have bool, integer, or string dtype, not %r' %
        labels.dtype)
  if not labels.dtype.is_compatible_with(predictions.dtype):
    raise ValueError('Dtypes of predictions and labels should match. '
                     'Given: predictions (%r) and labels (%r)' %
                     (predictions.dtype, labels.dtype))
  with ops.name_scope('accuracy', values=[predictions, labels]):
    is_correct = math_ops.cast(
        math_ops.equal(predictions, labels), dtypes.float32)
    if weights is not None:
      is_correct = math_ops.mul(is_correct, weights)
      num_values = math_ops.mul(weights, array_ops.ones_like(is_correct))
      return math_ops.div(math_ops.reduce_sum(is_correct),
                          math_ops.reduce_sum(num_values))
    return math_ops.reduce_mean(is_correct)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def ones_like(labeled_tensor, dtype=None, name=None):
  """Creates an identical tensor with all elements set to one.

  Args:
    labeled_tensor: The input tensor.
    dtype: The type of the returned tensor.
    name: Optional op name.

  Returns:
    The tensor with elements set to one.
  """
  with ops.name_scope(name, 'lt_ones_like', [labeled_tensor]) as scope:
    labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
    op = array_ops.ones_like(labeled_tensor.tensor, dtype=dtype, name=scope)
    return core.LabeledTensor(op, labeled_tensor.axes)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _log_cdf(self, y):
    lower_cutoff = self._lower_cutoff
    upper_cutoff = self._upper_cutoff

    # Recall the promise:
    # cdf(y) := P[Y <= y]
    #         = 1, if y >= upper_cutoff,
    #         = 0, if y < lower_cutoff,
    #         = P[X <= y], otherwise.

    # P[Y <= j] = P[floor(Y) <= j] since mass is only at integers, not in
    # between.
    j = math_ops.floor(y)

    result_so_far = self.distribution.log_cdf(j)

    # Broadcast, because it's possible that this is a single distribution being
    # evaluated on a number of samples, or something like that.
    j += array_ops.zeros_like(result_so_far)

    # Re-define values at the cutoffs.
    if lower_cutoff is not None:
      neg_inf = -np.inf * array_ops.ones_like(result_so_far)
      result_so_far = math_ops.select(j < lower_cutoff, neg_inf, result_so_far)
    if upper_cutoff is not None:
      result_so_far = math_ops.select(j >= upper_cutoff,
                                      array_ops.zeros_like(result_so_far),
                                      result_so_far)

    return result_so_far
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _cdf(self, y):
    lower_cutoff = self._lower_cutoff
    upper_cutoff = self._upper_cutoff

    # Recall the promise:
    # cdf(y) := P[Y <= y]
    #         = 1, if y >= upper_cutoff,
    #         = 0, if y < lower_cutoff,
    #         = P[X <= y], otherwise.

    # P[Y <= j] = P[floor(Y) <= j] since mass is only at integers, not in
    # between.
    j = math_ops.floor(y)

    # P[X <= j], used when lower_cutoff < X < upper_cutoff.
    result_so_far = self.distribution.cdf(j)

    # Broadcast, because it's possible that this is a single distribution being
    # evaluated on a number of samples, or something like that.
    j += array_ops.zeros_like(result_so_far)

    # Re-define values at the cutoffs.
    if lower_cutoff is not None:
      result_so_far = math_ops.select(j < lower_cutoff,
                                      array_ops.zeros_like(result_so_far),
                                      result_so_far)
    if upper_cutoff is not None:
      result_so_far = math_ops.select(j >= upper_cutoff,
                                      array_ops.ones_like(result_so_far),
                                      result_so_far)

    return result_so_far
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _log_survival_function(self, y):
    lower_cutoff = self._lower_cutoff
    upper_cutoff = self._upper_cutoff

    # Recall the promise:
    # survival_function(y) := P[Y > y]
    #                       = 0, if y >= upper_cutoff,
    #                       = 1, if y < lower_cutoff,
    #                       = P[X > y], otherwise.

    # P[Y > j] = P[ceiling(Y) > j] since mass is only at integers, not in
    # between.
    j = math_ops.ceil(y)

    # P[X > j], used when lower_cutoff < X < upper_cutoff.
    result_so_far = self.distribution.log_survival_function(j)

    # Broadcast, because it's possible that this is a single distribution being
    # evaluated on a number of samples, or something like that.
    j += array_ops.zeros_like(result_so_far)

    # Re-define values at the cutoffs.
    if lower_cutoff is not None:
      result_so_far = math_ops.select(j < lower_cutoff,
                                      array_ops.zeros_like(result_so_far),
                                      result_so_far)
    if upper_cutoff is not None:
      neg_inf = -np.inf * array_ops.ones_like(result_so_far)
      result_so_far = math_ops.select(j >= upper_cutoff, neg_inf, result_so_far)

    return result_so_far
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _survival_function(self, y):
    lower_cutoff = self._lower_cutoff
    upper_cutoff = self._upper_cutoff

    # Recall the promise:
    # survival_function(y) := P[Y > y]
    #                       = 0, if y >= upper_cutoff,
    #                       = 1, if y < lower_cutoff,
    #                       = P[X > y], otherwise.

    # P[Y > j] = P[ceiling(Y) > j] since mass is only at integers, not in
    # between.
    j = math_ops.ceil(y)

    # P[X > j], used when lower_cutoff < X < upper_cutoff.
    result_so_far = self.distribution.survival_function(j)

    # Broadcast, because it's possible that this is a single distribution being
    # evaluated on a number of samples, or something like that.
    j += array_ops.zeros_like(result_so_far)

    # Re-define values at the cutoffs.
    if lower_cutoff is not None:
      result_so_far = math_ops.select(j < lower_cutoff,
                                      array_ops.ones_like(result_so_far),
                                      result_so_far)
    if upper_cutoff is not None:
      result_so_far = math_ops.select(j >= upper_cutoff,
                                      array_ops.zeros_like(result_so_far),
                                      result_so_far)

    return result_so_far
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _mean(self):
    return self.mu * array_ops.ones_like(self.sigma)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _std(self):
    return self.sigma * array_ops.ones_like(self.mu)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _variance(self):
    alpha_sum = array_ops.expand_dims(self.alpha_sum, -1)
    normalized_alpha = self.alpha / alpha_sum
    variance = -math_ops.matmul(
        array_ops.expand_dims(normalized_alpha, -1),
        array_ops.expand_dims(normalized_alpha, -2))
    variance = array_ops.matrix_set_diag(variance, normalized_alpha *
                                         (1. - normalized_alpha))
    shared_factor = (self.n * (alpha_sum + self.n) /
                     (alpha_sum + 1) * array_ops.ones_like(self.alpha))
    variance *= array_ops.expand_dims(shared_factor, -1)
    return variance