Python tensorflow.python.ops.array_ops 模块,zeros_like() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.python.ops.array_ops.zeros_like()

项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def zeros_like(x, dtype=None, name=None):
      """Instantiates an all-zeros variable of the same shape as another tensor.

      Arguments:
          x: Keras variable or Keras tensor.
          dtype: String, dtype of returned Keras variable.
               None uses the dtype of x.
          name: String, name for the variable to create.

      Returns:
          A Keras variable with the shape of x filled with zeros.

      Example:
      ```python
          >>> from keras import backend as K
          >>> kvar = K.variable(np.random.random((2,3)))
          >>> kvar_zeros = K.zeros_like(kvar)
          >>> K.eval(kvar_zeros)
          array([[ 0.,  0.,  0.],
                 [ 0.,  0.,  0.]], dtype=float32)
"""
  return array_ops.zeros_like(x, dtype=dtype, name=name)

```

项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _safe_div(numerator, denominator, name="value"):
  """Computes a safe divide which returns 0 if the denominator is zero.

  Note that the function contains an additional conditional check that is
  necessary for avoiding situations where the loss is zero causing NaNs to
  creep into the gradient computation.

  Args:
    numerator: An arbitrary `Tensor`.
    denominator: A `Tensor` whose shape matches `numerator` and whose values are
      assumed to be non-negative.
    name: An optional name for the returned op.

  Returns:
    The element-wise value of the numerator divided by the denominator.
  """
  return math_ops.select(
      math_ops.greater(denominator, 0),
      math_ops.div(numerator, math_ops.select(
          math_ops.equal(denominator, 0),
          array_ops.ones_like(denominator), denominator)),
      array_ops.zeros_like(numerator),
      name=name)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _safe_div(numerator, denominator, name="value"):
  """Computes a safe divide which returns 0 if the denominator is zero.

  Note that the function contains an additional conditional check that is
  necessary for avoiding situations where the loss is zero causing NaNs to
  creep into the gradient computation.

  Args:
    numerator: An arbitrary `Tensor`.
    denominator: A `Tensor` whose shape matches `numerator` and whose values are
      assumed to be non-negative.
    name: An optional name for the returned op.

  Returns:
    The element-wise value of the numerator divided by the denominator.
  """
  return math_ops.select(
      math_ops.greater(denominator, 0),
      math_ops.div(numerator, math_ops.select(
          math_ops.equal(denominator, 0),
          array_ops.ones_like(denominator), denominator)),
      array_ops.zeros_like(numerator),
      name=name)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _logits_to_predictions(self, logits):
    """Returns a dict of predictions.

    Args:
      logits: logits `Tensor` after applying possible centered bias.

    Returns:
      Dict of prediction `Tensor` keyed by `PredictionKey`.
    """
    predictions = {prediction_key.PredictionKey.LOGITS: logits}
    if self.logits_dimension == 1:
      predictions[prediction_key.PredictionKey.LOGISTIC] = math_ops.sigmoid(
          logits)
      logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
    predictions[prediction_key.PredictionKey.PROBABILITIES] = nn.softmax(
        logits)
    predictions[prediction_key.PredictionKey.CLASSES] = math_ops.argmax(
        logits, 1)

    return predictions
项目:tensorflow-kr    作者:tensorflowkorea    | 项目源码 | 文件源码
def _zero_out_grad(op, grad):
  """The gradients for `zero_out`.

  Args:
    op: The `zero_out` `Operation` that we are differentiating, which we can use
      to find the inputs and outputs of the original op.
    grad: Gradient with respect to the output of the `zero_out` op.

  Returns:
    Gradients with respect to the input of `zero_out`.
  """
  to_zero = op.inputs[0]
  shape = array_ops.shape(to_zero)
  index = array_ops.zeros_like(shape)
  first_grad = array_ops.reshape(grad, [-1])[0]
  to_zero_grad = sparse_ops.sparse_to_dense(index, shape, first_grad, 0)
  return [to_zero_grad]  # List of one Tensor, since we have one input
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _safe_div(numerator, denominator, name="value"):
  """Computes a safe divide which returns 0 if the denominator is zero.

  Note that the function contains an additional conditional check that is
  necessary for avoiding situations where the loss is zero causing NaNs to
  creep into the gradient computation.

  Args:
    numerator: An arbitrary `Tensor`.
    denominator: A `Tensor` whose shape matches `numerator` and whose values are
      assumed to be non-negative.
    name: An optional name for the returned op.

  Returns:
    The element-wise value of the numerator divided by the denominator.
  """
  return array_ops.where(
      math_ops.greater(denominator, 0),
      math_ops.div(numerator, array_ops.where(
          math_ops.equal(denominator, 0),
          array_ops.ones_like(denominator), denominator)),
      array_ops.zeros_like(numerator),
      name=name)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testEntropy(self):
    with self.test_session():
      shift = np.array([[-1, 0, 1], [-1, -2, -3]], dtype=np.float32)
      diag = np.array([[1, 2, 3], [2, 3, 2]], dtype=np.float32)
      actual_mvn_entropy = np.concatenate([
          [stats.multivariate_normal(shift[i], np.diag(diag[i]**2)).entropy()]
          for i in range(len(diag))])
      fake_mvn = self._cls()(
          ds.MultivariateNormalDiag(
              array_ops.zeros_like(shift),
              array_ops.ones_like(diag),
              validate_args=True),
          bs.AffineLinearOperator(
              shift,
              scale=la.LinearOperatorDiag(diag, is_non_singular=True),
              validate_args=True),
          validate_args=True)
      self.assertAllClose(actual_mvn_entropy,
                          fake_mvn.entropy().eval())
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def __init__(self, inputs, sequence_length, time_major=False, name=None):
    """Initializer.

    Args:
      inputs: A (structure of) input tensors.
      sequence_length: An int32 vector tensor.
      time_major: Python bool.  Whether the tensors in `inputs` are time major.
        If `False` (default), they are assumed to be batch major.
      name: Name scope for any created operations.

    Raises:
      ValueError: if `sequence_length` is not a 1D tensor.
    """
    with ops.name_scope(name, "TrainingHelper", [inputs, sequence_length]):
      inputs = ops.convert_to_tensor(inputs, name="inputs")
      if not time_major:
        inputs = nest.map_structure(_transpose_batch_time, inputs)

      self._input_tas = nest.map_structure(_unstack_ta, inputs)
      self._sequence_length = ops.convert_to_tensor(
          sequence_length, name="sequence_length")
      if self._sequence_length.get_shape().ndims != 1:
        raise ValueError(
            "Expected sequence_length to be a vector, but received shape: %s" %
            self._sequence_length.get_shape())

      self._zero_inputs = nest.map_structure(
          lambda inp: array_ops.zeros_like(inp[0, :]), inputs)

      self._batch_size = array_ops.size(sequence_length)
项目:cancer    作者:yancz1989    | 项目源码 | 文件源码
def _hungarian_grad(op, *args):
  return map(array_ops.zeros_like, op.inputs)
项目:conv_seq2seq    作者:tobyyouup    | 项目源码 | 文件源码
def __init__(self, inputs, sequence_length, time_major=False, name=None):
    """Initializer.

    Args:
      inputs: A (structure of) input tensors.
      sequence_length: An int32 vector tensor.
      time_major: Python bool.  Whether the tensors in `inputs` are time major.
        If `False` (default), they are assumed to be batch major.
      name: Name scope for any created operations.

    Raises:
      ValueError: if `sequence_length` is not a 1D tensor.
    """
    with ops.name_scope(name, "TrainingHelper", [inputs, sequence_length]):
      inputs = ops.convert_to_tensor(inputs, name="inputs")
      if not time_major:
        inputs = nest.map_structure(_transpose_batch_time, inputs)

      self._input_tas = nest.map_structure(_unstack_ta, inputs)
      self._sequence_length = ops.convert_to_tensor(
          sequence_length, name="sequence_length")
      if self._sequence_length.get_shape().ndims != 1:
        raise ValueError(
            "Expected sequence_length to be a vector, but received shape: %s" %
            self._sequence_length.get_shape())

      self._zero_inputs = nest.map_structure(
          lambda inp: array_ops.zeros_like(inp[0, :]), inputs)

      self._batch_size = array_ops.size(sequence_length)
项目:GPflow    作者:GPflow    | 项目源码 | 文件源码
def _compute_gradients(tensor, var_list):
  grads = gradients.gradients(tensor, var_list)
  # tf.gradients sometimes returns `None` when it should return 0.
  return [
      grad if grad is not None else array_ops.zeros_like(var)
      for var, grad in zip(var_list, grads)
  ]
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def get_initial_state(self, inputs):
            # (samples, timesteps, rows, cols, filters)
            initial_state = K.zeros_like(inputs)
            # (samples, rows, cols, filters)
            initial_state = K.sum(initial_state, axis=1)
            shape = list(self.kernel_shape)
            shape[-1] = self.filters
            initial_state = self.input_conv(
                initial_state, K.zeros(tuple(shape)), padding=self.padding)

            initial_states = [initial_state for _ in range(2)]
            return initial_states
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def get_constants(self, inputs, training=None):
            constants = []
            if self.implementation == 0 and 0 < self.dropout < 1:
              ones = K.zeros_like(inputs)
              ones = K.sum(ones, axis=1)
              ones += 1

              def dropped_inputs():
                return K.dropout(ones, self.dropout)

              dp_mask = [
                  K.in_train_phase(dropped_inputs, ones, training=training)
                  for _ in range(4)
              ]
              constants.append(dp_mask)
            else:
              constants.append([K.cast_to_floatx(1.) for _ in range(4)])

            if 0 < self.recurrent_dropout < 1:
              shape = list(self.kernel_shape)
              shape[-1] = self.filters
              ones = K.zeros_like(inputs)
              ones = K.sum(ones, axis=1)
              ones = self.input_conv(ones, K.zeros(shape), padding=self.padding)
              ones += 1.

              def dropped_inputs():  # pylint: disable=function-redefined
                return K.dropout(ones, self.recurrent_dropout)

              rec_dp_mask = [
                  K.in_train_phase(dropped_inputs, ones, training=training)
                  for _ in range(4)
              ]
              constants.append(rec_dp_mask)
            else:
              constants.append([K.cast_to_floatx(1.) for _ in range(4)])
            return constants
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def get_initial_state(self, inputs):
            # build an all-zero tensor of shape (samples, output_dim)
            initial_state = K.zeros_like(inputs)  # (samples, timesteps, input_dim)
            initial_state = K.sum(initial_state, axis=(1, 2))  # (samples,)
            initial_state = K.expand_dims(initial_state)  # (samples, 1)
            initial_state = K.tile(initial_state, [1,
                                                   self.units])  # (samples, output_dim)
            initial_state = [initial_state for _ in range(len(self.states))]
            return initial_state
项目:SSD_tensorflow_VOC    作者:LevinJ    | 项目源码 | 文件源码
def decode_bboxes_layer(self, feat_localizations,anchors):
        """convert ssd boxes from relative to input image anchors to relative to 
        input width/height, for one signle feature layer

        Return:
          numpy array BatchesxHxWx4: ymin, xmin, ymax, xmax
        """

        l_shape = feat_localizations.shape
#         if feat_localizations.shape != anchors.shape:
#             raise "feat_localizations and anchors should be of identical shape, and corresond to each other"

        # Reshape for easier broadcasting.
        feat_localizations = feat_localizations[np.newaxis,:]
        anchors = anchors[np.newaxis,:]

        xref = anchors[...,0]
        yref = anchors[...,1]
        wref = anchors[...,2]
        href = anchors[...,3]


        # Compute center, height and width
        cy = feat_localizations[..., 1] * href * self.prior_scaling[0] + yref
        cx = feat_localizations[..., 0] * wref * self.prior_scaling[1] + xref
        h = href * np.exp(feat_localizations[..., 3] * self.prior_scaling[2])
        w = wref * np.exp(feat_localizations[..., 2] * self.prior_scaling[3])

        # bboxes: ymin, xmin, xmax, ymax.
        bboxes = np.zeros_like(feat_localizations)
        bboxes[..., 0] = cy - h / 2.
        bboxes[..., 1] = cx - w / 2.
        bboxes[..., 2] = cy + h / 2.
        bboxes[..., 3] = cx + w / 2.
        bboxes = np.reshape(bboxes, l_shape)
        return bboxes
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _create_slots(self):
    # Make internal variables which have the updates before applying L1
    # regularization.
    self._slots = collections.defaultdict(list)
    for name in ['sparse_features_weights', 'dense_features_weights']:
      for var in self._variables[name]:
        with ops.device(var.device):
          # TODO(andreasst): remove SDCAOptimizer suffix once bug 30843109 is
          # fixed
          self._slots['unshrinked_' + name].append(var_ops.Variable(
              array_ops.zeros_like(var.initialized_value(), dtypes.float32),
              name=var.op.name + '_unshrinked/SDCAOptimizer'))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _predictions(logits, n_classes):
  """Returns predictions for the given logits and n_classes."""
  predictions = {}
  if n_classes == 2:
    predictions[_LOGISTIC] = math_ops.sigmoid(logits)
    logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
  predictions[_PROBABILITIES] = nn.softmax(logits)
  predictions[_CLASSES] = array_ops.reshape(
      math_ops.argmax(logits, 1), shape=(-1, 1))
  return predictions
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def logits_to_predictions(self, logits, proba=False):
    if proba:
      raise ValueError(
          "logits to probabilities is not supported for _BinarySvmTargetColumn")

    logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
    return math_ops.argmax(logits, 1)


# TODO(zakaria): use contrib losses.
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _log_cdf(self, y):
    lower_cutoff = self._lower_cutoff
    upper_cutoff = self._upper_cutoff

    # Recall the promise:
    # cdf(y) := P[Y <= y]
    #         = 1, if y >= upper_cutoff,
    #         = 0, if y < lower_cutoff,
    #         = P[X <= y], otherwise.

    # P[Y <= j] = P[floor(Y) <= j] since mass is only at integers, not in
    # between.
    j = math_ops.floor(y)

    result_so_far = self.base_distribution.log_cdf(j)

    # Broadcast, because it's possible that this is a single distribution being
    # evaluated on a number of samples, or something like that.
    j += array_ops.zeros_like(result_so_far)

    # Re-define values at the cutoffs.
    if lower_cutoff is not None:
      neg_inf = -np.inf * array_ops.ones_like(result_so_far)
      result_so_far = math_ops.select(j < lower_cutoff, neg_inf, result_so_far)
    if upper_cutoff is not None:
      result_so_far = math_ops.select(j >= upper_cutoff,
                                      array_ops.zeros_like(result_so_far),
                                      result_so_far)

    return result_so_far
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _cdf(self, y):
    lower_cutoff = self._lower_cutoff
    upper_cutoff = self._upper_cutoff

    # Recall the promise:
    # cdf(y) := P[Y <= y]
    #         = 1, if y >= upper_cutoff,
    #         = 0, if y < lower_cutoff,
    #         = P[X <= y], otherwise.

    # P[Y <= j] = P[floor(Y) <= j] since mass is only at integers, not in
    # between.
    j = math_ops.floor(y)

    # P[X <= j], used when lower_cutoff < X < upper_cutoff.
    result_so_far = self.base_distribution.cdf(j)

    # Broadcast, because it's possible that this is a single distribution being
    # evaluated on a number of samples, or something like that.
    j += array_ops.zeros_like(result_so_far)

    # Re-define values at the cutoffs.
    if lower_cutoff is not None:
      result_so_far = math_ops.select(j < lower_cutoff,
                                      array_ops.zeros_like(result_so_far),
                                      result_so_far)
    if upper_cutoff is not None:
      result_so_far = math_ops.select(j >= upper_cutoff,
                                      array_ops.ones_like(result_so_far),
                                      result_so_far)

    return result_so_far
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _log_survival_function(self, y):
    lower_cutoff = self._lower_cutoff
    upper_cutoff = self._upper_cutoff

    # Recall the promise:
    # survival_function(y) := P[Y > y]
    #                       = 0, if y >= upper_cutoff,
    #                       = 1, if y < lower_cutoff,
    #                       = P[X > y], otherwise.

    # P[Y > j] = P[ceiling(Y) > j] since mass is only at integers, not in
    # between.
    j = math_ops.ceil(y)

    # P[X > j], used when lower_cutoff < X < upper_cutoff.
    result_so_far = self.base_distribution.log_survival_function(j)

    # Broadcast, because it's possible that this is a single distribution being
    # evaluated on a number of samples, or something like that.
    j += array_ops.zeros_like(result_so_far)

    # Re-define values at the cutoffs.
    if lower_cutoff is not None:
      result_so_far = math_ops.select(j < lower_cutoff,
                                      array_ops.zeros_like(result_so_far),
                                      result_so_far)
    if upper_cutoff is not None:
      neg_inf = -np.inf * array_ops.ones_like(result_so_far)
      result_so_far = math_ops.select(j >= upper_cutoff, neg_inf, result_so_far)

    return result_so_far
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _entropy(self):
    # Use broadcasting rules to calculate the full broadcast scale.
    scale = self.scale + array_ops.zeros_like(self.loc)
    return math.log(2.) + 1. + math_ops.log(scale)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _mean(self):
    return self.loc + array_ops.zeros_like(self.scale)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _std(self):
    return math.sqrt(2.) * self.scale + array_ops.zeros_like(self.loc)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _prob(self, x):
    broadcasted_x = x * array_ops.ones(self.batch_shape())
    return math_ops.select(
        math_ops.is_nan(broadcasted_x),
        broadcasted_x,
        math_ops.select(
            math_ops.logical_or(broadcasted_x < self.a,
                                broadcasted_x > self.b),
            array_ops.zeros_like(broadcasted_x),
            (1. / self.range()) * array_ops.ones_like(broadcasted_x)))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _cdf(self, x):
    broadcasted_x = x * array_ops.ones(self.batch_shape())
    zeros = array_ops.zeros_like(x + self.a + self.b, dtype=self.dtype)
    ones = array_ops.ones_like(x + self.a + self.b, dtype=self.dtype)
    result_if_not_big = math_ops.select(
        x < self.a, zeros, (broadcasted_x - self.a) / self.range())
    return math_ops.select(x >= self.b, ones, result_if_not_big)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _create_slots(self):
    # Make internal variables which have the updates before applying L1
    # regularization.
    self._slots = collections.defaultdict(list)
    for name in ['sparse_features_weights', 'dense_features_weights']:
      for var in self._variables[name]:
        with ops.device(var.device):
          # TODO(andreasst): remove SDCAOptimizer suffix once bug 30843109 is
          # fixed
          self._slots['unshrinked_' + name].append(var_ops.Variable(
              array_ops.zeros_like(var.initialized_value(), dtypes.float32),
              name=var.op.name + '_unshrinked/SDCAOptimizer'))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _logits_to_predictions(self, logits):
    """See `_MultiClassHead`."""
    predictions = {}
    predictions[prediction_key.PredictionKey.LOGITS] = logits
    logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
    predictions[prediction_key.PredictionKey.CLASSES] = math_ops.argmax(
        logits, 1)

    return predictions
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def logits_to_predictions(self, logits, proba=False):
    if self.num_label_columns == 1:
      logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])

    if proba:
      return nn.softmax(logits)
    else:
      return math_ops.argmax(logits, 1)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def logits_to_predictions(self, logits, proba=False):
    if proba:
      raise ValueError(
          "logits to probabilities is not supported for _BinarySvmTargetColumn")

    logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
    return math_ops.argmax(logits, 1)


# TODO(zakaria): use contrib losses.
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def zeros_like(labeled_tensor, dtype=None, name=None):
  """Creates an identical tensor with all elements set to zero.

  Args:
    labeled_tensor: The input tensor.
    dtype: The type of the returned tensor.
    name: Optional op name.

  Returns:
    The tensor with elements set to zero.
  """
  with ops.name_scope(name, 'lt_zeros_like', [labeled_tensor]) as scope:
    labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
    op = array_ops.zeros_like(labeled_tensor.tensor, dtype=dtype, name=scope)
    return core.LabeledTensor(op, labeled_tensor.axes)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _log_cdf(self, y):
    lower_cutoff = self._lower_cutoff
    upper_cutoff = self._upper_cutoff

    # Recall the promise:
    # cdf(y) := P[Y <= y]
    #         = 1, if y >= upper_cutoff,
    #         = 0, if y < lower_cutoff,
    #         = P[X <= y], otherwise.

    # P[Y <= j] = P[floor(Y) <= j] since mass is only at integers, not in
    # between.
    j = math_ops.floor(y)

    result_so_far = self.distribution.log_cdf(j)

    # Broadcast, because it's possible that this is a single distribution being
    # evaluated on a number of samples, or something like that.
    j += array_ops.zeros_like(result_so_far)

    # Re-define values at the cutoffs.
    if lower_cutoff is not None:
      neg_inf = -np.inf * array_ops.ones_like(result_so_far)
      result_so_far = math_ops.select(j < lower_cutoff, neg_inf, result_so_far)
    if upper_cutoff is not None:
      result_so_far = math_ops.select(j >= upper_cutoff,
                                      array_ops.zeros_like(result_so_far),
                                      result_so_far)

    return result_so_far
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _log_survival_function(self, y):
    lower_cutoff = self._lower_cutoff
    upper_cutoff = self._upper_cutoff

    # Recall the promise:
    # survival_function(y) := P[Y > y]
    #                       = 0, if y >= upper_cutoff,
    #                       = 1, if y < lower_cutoff,
    #                       = P[X > y], otherwise.

    # P[Y > j] = P[ceiling(Y) > j] since mass is only at integers, not in
    # between.
    j = math_ops.ceil(y)

    # P[X > j], used when lower_cutoff < X < upper_cutoff.
    result_so_far = self.distribution.log_survival_function(j)

    # Broadcast, because it's possible that this is a single distribution being
    # evaluated on a number of samples, or something like that.
    j += array_ops.zeros_like(result_so_far)

    # Re-define values at the cutoffs.
    if lower_cutoff is not None:
      result_so_far = math_ops.select(j < lower_cutoff,
                                      array_ops.zeros_like(result_so_far),
                                      result_so_far)
    if upper_cutoff is not None:
      neg_inf = -np.inf * array_ops.ones_like(result_so_far)
      result_so_far = math_ops.select(j >= upper_cutoff, neg_inf, result_so_far)

    return result_so_far
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _survival_function(self, y):
    lower_cutoff = self._lower_cutoff
    upper_cutoff = self._upper_cutoff

    # Recall the promise:
    # survival_function(y) := P[Y > y]
    #                       = 0, if y >= upper_cutoff,
    #                       = 1, if y < lower_cutoff,
    #                       = P[X > y], otherwise.

    # P[Y > j] = P[ceiling(Y) > j] since mass is only at integers, not in
    # between.
    j = math_ops.ceil(y)

    # P[X > j], used when lower_cutoff < X < upper_cutoff.
    result_so_far = self.distribution.survival_function(j)

    # Broadcast, because it's possible that this is a single distribution being
    # evaluated on a number of samples, or something like that.
    j += array_ops.zeros_like(result_so_far)

    # Re-define values at the cutoffs.
    if lower_cutoff is not None:
      result_so_far = math_ops.select(j < lower_cutoff,
                                      array_ops.ones_like(result_so_far),
                                      result_so_far)
    if upper_cutoff is not None:
      result_so_far = math_ops.select(j >= upper_cutoff,
                                      array_ops.zeros_like(result_so_far),
                                      result_so_far)

    return result_so_far
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _entropy(self):
    # Use broadcasting rules to calculate the full broadcast scale.
    scale = self.scale + array_ops.zeros_like(self.loc)
    return math.log(2.) + 1. + math_ops.log(scale)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _mean(self):
    return self.loc + array_ops.zeros_like(self.scale)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _std(self):
    return math.sqrt(2.) * self.scale + array_ops.zeros_like(self.loc)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _prob(self, x):
    broadcasted_x = x * array_ops.ones(self.batch_shape())
    return math_ops.select(
        math_ops.is_nan(broadcasted_x),
        broadcasted_x,
        math_ops.select(
            math_ops.logical_or(broadcasted_x < self.a,
                                broadcasted_x > self.b),
            array_ops.zeros_like(broadcasted_x),
            (1. / self.range()) * array_ops.ones_like(broadcasted_x)))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _cdf(self, x):
    broadcasted_x = x * array_ops.ones(self.batch_shape())
    zeros = array_ops.zeros_like(x + self.a + self.b, dtype=self.dtype)
    ones = array_ops.ones_like(x + self.a + self.b, dtype=self.dtype)
    result_if_not_big = math_ops.select(
        x < self.a, zeros, (broadcasted_x - self.a) / self.range())
    return math_ops.select(x >= self.b, ones, result_if_not_big)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _compute_gradients(tensor, var_list):
  grads = gradients.gradients(tensor, var_list)
  # tf.gradients sometimes returns `None` when it should return 0.
  return [grad if grad is not None else array_ops.zeros_like(var)
          for var, grad in zip(var_list, grads)]
项目:GORU-tensorflow    作者:jingli9111    | 项目源码 | 文件源码
def modrelu(z, b, comp):
    if comp:
        z_norm = math_ops.sqrt(math_ops.square(math_ops.real(z)) + math_ops.square(math_ops.imag(z))) + 0.00001
        step1 = nn_ops.bias_add(z_norm, b)
        step2 = math_ops.complex(nn_ops.relu(step1), array_ops.zeros_like(z_norm))
        step3 = z/math_ops.complex(z_norm, array_ops.zeros_like(z_norm))
    else:
        z_norm = math_ops.abs(z) + 0.00001
        step1 = nn_ops.bias_add(z_norm, b)
        step2 = nn_ops.relu(step1)
        step3 = math_ops.sign(z)

    return math_ops.multiply(step3, step2)
项目:automatic-summarization    作者:mozilla    | 项目源码 | 文件源码
def __init__(self, inputs, sequence_length, time_major=False, name=None):
    """Initializer.

    Args:
      inputs: A (structure of) input tensors.
      sequence_length: An int32 vector tensor.
      time_major: Python bool.  Whether the tensors in `inputs` are time major.
        If `False` (default), they are assumed to be batch major.
      name: Name scope for any created operations.

    Raises:
      ValueError: if `sequence_length` is not a 1D tensor.
    """
    with ops.name_scope(name, "TrainingHelper", [inputs, sequence_length]):
      inputs = ops.convert_to_tensor(inputs, name="inputs")
      if not time_major:
        inputs = nest.map_structure(_transpose_batch_time, inputs)

      self._input_tas = nest.map_structure(_unstack_ta, inputs)
      self._sequence_length = ops.convert_to_tensor(
          sequence_length, name="sequence_length")
      if self._sequence_length.get_shape().ndims != 1:
        raise ValueError(
            "Expected sequence_length to be a vector, but received shape: %s" %
            self._sequence_length.get_shape())

      self._zero_inputs = nest.map_structure(
          lambda inp: array_ops.zeros_like(inp[0, :]), inputs)

      self._batch_size = array_ops.size(sequence_length)
项目:TensorBoxPy3    作者:SMH17    | 项目源码 | 文件源码
def _hungarian_grad(op, *args):
    return map(array_ops.zeros_like, op.inputs)
项目:TensorBox    作者:Russell91    | 项目源码 | 文件源码
def _hungarian_grad(op, *args):       
    return map(array_ops.zeros_like, op.inputs)
项目:Optimization    作者:tdozat    | 项目源码 | 文件源码
def _zeros_slot(self, x_tm1, slot_name, op_name):
    """"""

    named_slots = self._slot_dict(slot_name)
    if x_tm1 not in named_slots:
      val = array_ops.zeros_like(x_tm1.initialized_value())
      named_slots[x_tm1] = Optimizer.create_slot(x_tm1, val, op_name+'/'+slot_name)
    return named_slots[x_tm1]

  #=============================================================
项目:Optimization    作者:tdozat    | 项目源码 | 文件源码
def _zeros_slot(self, x_tm1, slot_name, op_name):
    """"""

    named_slots = self._slot_dict(slot_name)
    if x_tm1 not in named_slots:
      val = array_ops.zeros_like(x_tm1.initialized_value())
      named_slots[x_tm1] = Optimizer.create_slot(x_tm1, val, op_name+'/'+slot_name)
    return named_slots[x_tm1]

  #=============================================================
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _create_slots(self):
    # Make internal variables which have the updates before applying L1
    # regularization.
    self._slots = collections.defaultdict(list)
    for name in ['sparse_features_weights', 'dense_features_weights']:
      for var in self._variables[name]:
        with ops.device(var.device):
          # TODO(andreasst): remove SDCAOptimizer suffix once bug 30843109 is
          # fixed
          self._slots['unshrinked_' + name].append(
              var_ops.Variable(
                  array_ops.zeros_like(var.initialized_value(), dtypes.float32),
                  name=var.op.name + '_unshrinked/SDCAOptimizer'))
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def get_binary_predictions_for_hinge(predictions):
  return math_ops.cast(
      math_ops.greater_equal(predictions, array_ops.zeros_like(predictions)),
      dtype=dtypes.int32)


# TODO(sibyl-Mooth6ku): Add tests that exercise L1 and Shrinking.
# TODO(sibyl-vie3Poto): Refactor tests to avoid repetition of boilerplate code.
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _one_class_to_two_class_logits(logits):
  return array_ops.concat((array_ops.zeros_like(logits), logits), 1)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def logits_to_predictions(self, logits, proba=False):
    if self.num_label_columns == 1:
      logits = array_ops.concat([array_ops.zeros_like(logits), logits], 1)

    if proba:
      return nn.softmax(logits)
    else:
      return math_ops.argmax(logits, 1)