Python tensorflow.python.ops.math_ops 模块,multiply() 实例源码

我们从Python开源项目中,提取了以下23个代码示例,用于说明如何使用tensorflow.python.ops.math_ops.multiply()

项目:polyaxon    作者:polyaxon    | 项目源码 | 文件源码
def hinge_loss(weights=1.0, name='HingeLoss', scope=None, collect=True):
    """Hinge Loss.

    Args:
        weights: Coefficients for the loss a `scalar`.
        name: name of the op.
        scope: The scope for the operations performed in computing the loss.
        collect: add to losses collection.

    Returns:
        A scalar `Tensor` representing the loss value.

    Raises:
        ValueError: If `predictions` shape doesn't match `labels` shape, or `weights` is `None`.
    """

    def inner_loss(y_true, y_pred):
        all_ones = array_ops.ones_like(y_true)
        y_true = math_ops.subtract(2 * y_true, all_ones)
        losses = tf.nn.relu(math_ops.subtract(all_ones, math_ops.multiply(y_true, y_pred)))
        return losses

    return built_loss(inner_loss, weights, name, scope, collect)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _scale_losses(losses, weights):
  """Computes the scaled loss.

  Args:
    losses: A `Tensor` of size [batch_size, d1, ... dN].
    weights: A `Tensor` of size [1], [batch_size] or [batch_size, d1, ... dN].
      The `losses` are reduced (tf.reduce_sum) until its dimension matches
      that of `weights` at which point the reduced `losses` are element-wise
      multiplied by `weights` and a final reduce_sum is computed on the result.
      Conceptually, this operation is equivalent to broadcasting (tiling)
      `weights` to be the same size as `losses`, performing an element-wise
      multiplication, and summing the result.

  Returns:
    A scalar tf.float32 `Tensor` whose value represents the sum of the scaled
      `losses`.
  """
  # First, compute the sum of the losses over all elements:
  start_index = max(0, weights.get_shape().ndims)
  reduction_indices = list(range(start_index, losses.get_shape().ndims))
  reduced_losses = math_ops.reduce_sum(losses,
                                       reduction_indices=reduction_indices)
  reduced_losses = math_ops.multiply(reduced_losses, weights)
  return math_ops.reduce_sum(reduced_losses)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def hinge_loss(logits, labels=None, scope=None):
  """Method that returns the loss tensor for hinge loss.

  Args:
    logits: The logits, a float tensor.
    labels: The ground truth output tensor. Its shape should match the shape of
      logits. The values of the tensor are expected to be 0.0 or 1.0.
    scope: The scope for the operations performed in computing the loss.

  Returns:
    A `Tensor` of same shape as `logits` and `labels` representing the loss
      values across the batch.

  Raises:
    ValueError: If the shapes of `logits` and `labels` don't match.
  """
  with ops.name_scope(scope, "hinge_loss", [logits, labels]) as scope:
    logits.get_shape().assert_is_compatible_with(labels.get_shape())
    # We first need to convert binary labels to -1/1 labels (as floats).
    labels = math_ops.to_float(labels)
    all_ones = array_ops.ones_like(labels)
    labels = math_ops.subtract(2 * labels, all_ones)
    return nn_ops.relu(
        math_ops.subtract(all_ones, math_ops.multiply(labels, logits)))
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _linear_predictions(self, examples):
    """Returns predictions of the form w*x."""
    with name_scope('sdca/prediction'):
      sparse_variables = self._convert_n_to_tensor(self._variables[
          'sparse_features_weights'])
      result = 0.0
      for sfc, sv in zip(examples['sparse_features'], sparse_variables):
        # TODO(sibyl-Aix6ihai): following does not take care of missing features.
        result += math_ops.segment_sum(
            math_ops.multiply(
                array_ops.gather(sv, sfc.feature_indices), sfc.feature_values),
            sfc.example_indices)
      dense_features = self._convert_n_to_tensor(examples['dense_features'])
      dense_variables = self._convert_n_to_tensor(self._variables[
          'dense_features_weights'])

      for i in range(len(dense_variables)):
        result += math_ops.matmul(dense_features[i],
                                  array_ops.expand_dims(dense_variables[i], -1))

    # Reshaping to allow shape inference at graph construction time.
    return array_ops.reshape(result, [-1])
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def multiply(inputs, **kwargs):
          """Functional interface to the `Multiply` layer.

          Arguments:
              inputs: A list of input tensors (at least 2).
              **kwargs: Standard layer keyword arguments.

          Returns:
              A tensor, the element-wise product of the inputs.
          """
          return Multiply(**kwargs)(inputs)
项目:GORU-tensorflow    作者:jingli9111    | 项目源码 | 文件源码
def modrelu(z, b, comp):
    if comp:
        z_norm = math_ops.sqrt(math_ops.square(math_ops.real(z)) + math_ops.square(math_ops.imag(z))) + 0.00001
        step1 = nn_ops.bias_add(z_norm, b)
        step2 = math_ops.complex(nn_ops.relu(step1), array_ops.zeros_like(z_norm))
        step3 = z/math_ops.complex(z_norm, array_ops.zeros_like(z_norm))
    else:
        z_norm = math_ops.abs(z) + 0.00001
        step1 = nn_ops.bias_add(z_norm, b)
        step2 = nn_ops.relu(step1)
        step3 = math_ops.sign(z)

    return math_ops.multiply(step3, step2)
项目:GORU-tensorflow    作者:jingli9111    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
        with vs.variable_scope(scope or "goru_cell"):

            U_init = init_ops.random_uniform_initializer(-0.01, 0.01)
            b_init = init_ops.constant_initializer(2.)
            mod_b_init = init_ops.constant_initializer(0.01)

            U = vs.get_variable("U", [inputs.get_shape()[-1], self._hidden_size * 3], dtype=tf.float32, initializer = U_init)
            Ux = math_ops.matmul(inputs, U)
            U_cx, U_rx, U_gx = array_ops.split(Ux, 3, axis=1)

            W_r = vs.get_variable("W_r", [self._hidden_size, self._hidden_size], dtype=tf.float32, initializer = U_init)
            W_g = vs.get_variable("W_g", [self._hidden_size, self._hidden_size], dtype=tf.float32, initializer = U_init)
            W_rh = math_ops.matmul(state, W_r)
            W_gh = math_ops.matmul(state, W_g)

            bias_r = vs.get_variable("bias_r", [self._hidden_size], dtype=tf.float32, initializer = b_init)
            bias_g = vs.get_variable("bias_g", [self._hidden_size], dtype=tf.float32)
            bias_c = vs.get_variable("bias_c", [self._hidden_size], dtype=tf.float32, initializer = mod_b_init)


            r_tmp = U_rx + W_rh + bias_r
            g_tmp = U_gx + W_gh + bias_g
            r = math_ops.sigmoid(r_tmp)

            g = math_ops.sigmoid(g_tmp)

            Unitaryh = _eunn_loop(state, self._capacity, self.diag_vec, self.off_vec, self.diag, self._fft)
            c = modrelu(math_ops.multiply(r, Unitaryh) + U_cx, bias_c, False)
            new_state = math_ops.multiply(g, state) +  math_ops.multiply(1 - g, c)

        return new_state, new_state
项目:self-supervision    作者:gustavla    | 项目源码 | 文件源码
def normalize_moments(counts, mean_ss, variance_ss, shift, name=None):
  """Calculate the mean and variance of based on the sufficient statistics.

  Args:
    counts: A `Tensor` containing a the total count of the data (one value).
    mean_ss: A `Tensor` containing the mean sufficient statistics: the (possibly
      shifted) sum of the elements to average over.
    variance_ss: A `Tensor` containing the variance sufficient statistics: the
      (possibly shifted) squared sum of the data to compute the variance over.
    shift: A `Tensor` containing the value by which the data is shifted for
      numerical stability, or `None` if no shift was performed.
    name: Name used to scope the operations that compute the moments.

  Returns:
    Two `Tensor` objects: `mean` and `variance`.
  """
  with tf.variable_scope(name, "normalize", [counts, mean_ss, variance_ss, shift]):
    divisor = math_ops.reciprocal(counts, name="divisor")
    if shift is not None:
      shifted_mean = math_ops.multiply(mean_ss, divisor, name="shifted_mean")
      mean = math_ops.add(shifted_mean, shift, name="mean")
    else:  # no shift.
      shifted_mean = math_ops.multiply(mean_ss, divisor, name="mean")
      mean = shifted_mean
    variance = math_ops.subtract(math_ops.multiply(variance_ss, divisor),
                                 math_ops.square(shifted_mean),
                                 name="variance")
  return (mean, variance)
项目:polyaxon    作者:polyaxon    | 项目源码 | 文件源码
def log_loss(weights=1.0, epsilon=1e-7, name='LogLoss', scope=None, collect=True):
    def inner_loss(y_true, y_pred):
        losses = -math_ops.multiply(
            y_true,
            math_ops.log(y_pred + epsilon)) - math_ops.multiply(
            (1 - y_true), math_ops.log(1 - y_pred + epsilon))
        return losses

    return built_loss(inner_loss, weights, name, scope, collect)
项目:polyaxon    作者:polyaxon    | 项目源码 | 文件源码
def cosine_distance(dim, weights=1.0, name='CosineDistance', scope=None, collect=True):
    """Adds a cosine-distance loss to the training procedure.

    Note that the function assumes that `predictions` and `labels` are already unit-normalized.

    WARNING: `weights` also supports dimensions of 1, but the broadcasting does
    not work as advertised, you'll wind up with weighted sum instead of weighted
    mean for any but the last dimension. This will be cleaned up soon, so please
    do not rely on the current behavior for anything but the shapes documented for
    `weights` below.

    Args:
        dim: The dimension along which the cosine distance is computed.
        weights: Coefficients for the loss a `scalar`.
        name: name of the op.
        scope: The scope for the operations performed in computing the loss.
        collect: add to losses collection.

    Returns:
        A scalar `Tensor` representing the loss value.

    Raises:
        ValueError: If `predictions` shape doesn't match `labels` shape, or `weights` is `None`.
    """

    def inner_loss(y_true, y_pred):
        radial_diffs = math_ops.multiply(y_pred, y_true)
        losses = 1 - math_ops.reduce_sum(radial_diffs, axis=(dim,), keep_dims=True)
        return losses

    return built_loss(inner_loss, weights, name, scope, collect)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testOpsCopy(self):

    with graph1.as_default():
      #Initialize a basic expression y = ax + b
      x = array_ops.placeholder("float")
      a = variables.Variable(3.0)
      b = constant_op.constant(4.0)
      ax = math_ops.multiply(x, a)
      y = math_ops.add(ax, b)
      #Initialize session
      sess1 = session_lib.Session()
      #Initialize the Variable
      variables.global_variables_initializer().run(session=sess1)

    #First, initialize a as a Variable in graph2
    a1 = copy_elements.copy_variable_to_graph(a, graph2)

    #Initialize a1 in graph2
    with graph2.as_default():
      #Initialize session
      sess2 = session_lib.Session()
      #Initialize the Variable
      variables.global_variables_initializer().run(session=sess2)

    #Initialize a copy of y in graph2
    y1 = copy_elements.copy_op_to_graph(y, graph2, [a1])

    #Now that y has been copied, x must be copied too.
    #Get that instance
    x1 = copy_elements.get_copied_op(x, graph2)

    #Compare values of y & y1 for a sample input
    #and check if they match
    v1 = y.eval({x: 5}, session=sess1)
    v2 = y1.eval({x1: 5}, session=sess2)

    assert v1 == v2
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def log_loss(predictions, labels=None, weights=1.0, epsilon=1e-7, scope=None):
  """Adds a Log Loss term to the training procedure.

  `weights` acts as a coefficient for the loss. If a scalar is provided, then
  the loss is simply scaled by the given value. If `weights` is a tensor of size
  [batch_size], then the total loss for each sample of the batch is rescaled
  by the corresponding element in the `weights` vector. If the shape of
  `weights` matches the shape of `predictions`, then the loss of each
  measurable element of `predictions` is scaled by the corresponding value of
  `weights`.

  Args:
    predictions: The predicted outputs.
    labels: The ground truth output tensor, same dimensions as 'predictions'.
    weights: Coefficients for the loss a scalar, a tensor of shape
      [batch_size] or a tensor whose shape matches `predictions`.
    epsilon: A small increment to add to avoid taking a log of zero.
    scope: The scope for the operations performed in computing the loss.

  Returns:
    A scalar `Tensor` representing the loss value.

  Raises:
    ValueError: If the shape of `predictions` doesn't match that of `labels` or
      if the shape of `weights` is invalid.
  """
  with ops.name_scope(scope, "log_loss",
                      [predictions, labels, weights]) as scope:
    predictions.get_shape().assert_is_compatible_with(labels.get_shape())
    predictions = math_ops.to_float(predictions)
    labels = math_ops.to_float(labels)
    losses = -math_ops.multiply(
        labels,
        math_ops.log(predictions + epsilon)) - math_ops.multiply(
            (1 - labels), math_ops.log(1 - predictions + epsilon))
    return compute_weighted_loss(losses, weights, scope=scope)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def softmax_classifier(tensor_in,
                       labels,
                       weights,
                       biases,
                       class_weight=None,
                       name=None):
  """Returns prediction and loss for softmax classifier.

  This function returns "probabilities" and a cross entropy loss. To obtain
  predictions, use `tf.argmax` on the returned probabilities.

  This function requires labels to be passed in one-hot encoding.

  Args:
    tensor_in: Input tensor, [batch_size, feature_size], features.
    labels: Tensor, [batch_size, n_classes], one-hot labels of the output
      classes.
    weights: Tensor, [batch_size, feature_size], linear transformation
      matrix.
    biases: Tensor, [batch_size], biases.
    class_weight: Tensor, optional, [n_classes], weight for each class.
      If not given, all classes are supposed to have weight one.
    name: Operation name.

  Returns:
    `tuple` of softmax predictions and loss `Tensor`s.
  """
  with ops.name_scope(name, 'softmax_classifier', [tensor_in, labels]):
    logits = nn.xw_plus_b(tensor_in, weights, biases)
    if class_weight is not None:
      logits = math_ops.multiply(logits, class_weight)
    return nn.softmax(logits), loss_ops.softmax_cross_entropy(logits, labels)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _multi_head(heads, loss_weights=None):
  """Creates a MultiHead stemming from same logits/hidden layer.

  Args:
    heads: list of _Head objects.
    loss_weights: optional list of weights to be used to combine losses from
        each head. All losses are weighted equally if not provided.

  Returns:
    A _Head instance that combines multiple heads.

  Raises:
    ValueError: if heads and loss_weights have different size.
  """
  if loss_weights:
    if len(loss_weights) != len(heads):
      raise ValueError("heads and loss_weights must have same size")

  def _weighted_loss_combiner(losses):
    if loss_weights:
      if len(losses) != len(loss_weights):
        raise ValueError("losses and loss_weights must have same size")
      weighted_losses = []
      for loss, weight in zip(losses, loss_weights):
        weighted_losses.append(math_ops.multiply(loss, weight))
      return math_ops.add_n(weighted_losses)
    else:
      return math_ops.add_n(losses)

  return _MultiHead(heads, loss_combiner=_weighted_loss_combiner)


# TODO(zakaria): Make the classes public once we are ready for users to subclass
#   them.
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _weighted_loss(loss, weight):
  """Returns cumulative weighted loss as 1d `Tensor`."""
  with ops.name_scope(None, "weighted_loss", (loss, weight)) as name:
    return math_ops.multiply(
        array_ops.reshape(loss, shape=(-1,)),
        array_ops.reshape(weight, shape=(-1,)),
        name=name)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _weighted_loss(self, loss, weight_tensor):
    """Returns cumulative weighted loss."""
    unweighted_loss = array_ops.reshape(loss, shape=(-1,))
    weighted_loss = math_ops.multiply(unweighted_loss,
                                      array_ops.reshape(
                                          weight_tensor, shape=(-1,)))
    return weighted_loss
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _broadcast_weights(weights, values):
  """Broadcast `weights` to the same shape as `values`.

  This returns a version of `weights` following the same broadcast rules as
  `mul(weights, values)`. When computing a weighted average, use this function
  to broadcast `weights` before summing them; e.g.,
  `reduce_sum(w * v) / reduce_sum(_broadcast_weights(w, v))`.

  Args:
    weights: `Tensor` whose rank is either 0, or the same rank as `values`, and
      must be broadcastable to `values` (i.e., all dimensions must be either
      `1`, or the same as the corresponding `values` dimension).
    values: `Tensor` of any shape.

  Returns:
    `weights` broadcast to `values` shape.
  """
  with ops.name_scope(None, 'broadcast_weights', (values, weights)) as scope:
    weights_shape = weights.get_shape()
    values_shape = values.get_shape()
    if (weights_shape.is_fully_defined() and
        values_shape.is_fully_defined() and
        weights_shape.is_compatible_with(values_shape)):
      return weights
    with ops.control_dependencies((_assert_weights_rank(weights, values),)):
      return math_ops.multiply(
          weights, array_ops.ones_like(values), name=scope)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def accuracy(predictions, labels, weights=None):
  """Computes the percentage of times that predictions matches labels.

  Args:
    predictions: the predicted values, a `Tensor` whose dtype and shape
                 matches 'labels'.
    labels: the ground truth values, a `Tensor` of any shape and
            bool, integer, or string dtype.
    weights: None or `Tensor` of float values to reweight the accuracy.

  Returns:
    Accuracy `Tensor`.

  Raises:
    ValueError: if dtypes don't match or
                if dtype is not bool, integer, or string.
  """
  if not (labels.dtype.is_integer or
          labels.dtype in (dtypes.bool, dtypes.string)):
    raise ValueError(
        'Labels should have bool, integer, or string dtype, not %r' %
        labels.dtype)
  if not labels.dtype.is_compatible_with(predictions.dtype):
    raise ValueError('Dtypes of predictions and labels should match. '
                     'Given: predictions (%r) and labels (%r)' %
                     (predictions.dtype, labels.dtype))
  with ops.name_scope('accuracy', values=[predictions, labels]):
    is_correct = math_ops.cast(
        math_ops.equal(predictions, labels), dtypes.float32)
    if weights is not None:
      is_correct = math_ops.multiply(is_correct, weights)
      num_values = math_ops.multiply(weights, array_ops.ones_like(is_correct))
      return math_ops.div(math_ops.reduce_sum(is_correct),
                          math_ops.reduce_sum(num_values))
    return math_ops.reduce_mean(is_correct)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def setUp(self):
    super(CoreBinaryOpsTest, self).setUp()

    self.x_probs_broadcast_tensor = array_ops.reshape(
        self.x_probs_lt.tensor, [self.x_size, 1, self.probs_size])

    self.channel_probs_broadcast_tensor = array_ops.reshape(
        self.channel_probs_lt.tensor, [1, self.channel_size, self.probs_size])

    # == and != are not element-wise for tf.Tensor, so they shouldn't be
    # elementwise for LabeledTensor, either.
    self.ops = [
        ('add', operator.add, math_ops.add, core.add),
        ('sub', operator.sub, math_ops.subtract, core.sub),
        ('mul', operator.mul, math_ops.multiply, core.mul),
        ('div', operator.truediv, math_ops.div, core.div),
        ('mod', operator.mod, math_ops.mod, core.mod),
        ('pow', operator.pow, math_ops.pow, core.pow_function),
        ('equal', None, math_ops.equal, core.equal),
        ('less', operator.lt, math_ops.less, core.less),
        ('less_equal', operator.le, math_ops.less_equal, core.less_equal),
        ('not_equal', None, math_ops.not_equal, core.not_equal),
        ('greater', operator.gt, math_ops.greater, core.greater),
        ('greater_equal', operator.ge, math_ops.greater_equal,
         core.greater_equal),
    ]
    self.test_lt_1 = self.x_probs_lt
    self.test_lt_2 = self.channel_probs_lt
    self.test_lt_1_broadcast = self.x_probs_broadcast_tensor
    self.test_lt_2_broadcast = self.channel_probs_broadcast_tensor
    self.broadcast_axes = [self.a0, self.a1, self.a3]
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _num_present(losses, weights, per_batch=False):
  """Computes the number of elements in the loss function induced by `weights`.

  A given weights tensor induces different numbers of usable elements in the
  `losses` tensor. The `weights` tensor is broadcast across `losses` for all
  possible dimensions. For example, if `losses` is a tensor of dimension
  [4, 5, 6, 3] and `weights` is a tensor of size [4, 5], then `weights` is, in
  effect, tiled to match the size of `losses`. Following this effective tile,
  the total number of present elements is the number of non-zero weights.

  Args:
    losses: A tensor of size [batch_size, d1, ... dN].
    weights: A tensor of size [1] or [batch_size, d1, ... dK] where K < N.
    per_batch: Whether to return the number of elements per batch or as a sum
      total.

  Returns:
    The number of present (non-zero) elements in the losses tensor. If
      `per_batch` is True, the value is returned as a tensor of size
      [batch_size]. Otherwise, a single scalar tensor is returned.
  """
  # If weights is a scalar, its easy to compute:
  if weights.get_shape().ndims == 0:
    batch_size = array_ops.reshape(array_ops.slice(array_ops.shape(losses),
                                                   [0], [1]), [])
    num_per_batch = math_ops.div(math_ops.to_float(array_ops.size(losses)),
                                 math_ops.to_float(batch_size))
    num_per_batch = array_ops.where(math_ops.equal(weights, 0),
                                    0.0, num_per_batch)
    num_per_batch = math_ops.multiply(array_ops.ones(
        array_ops.reshape(batch_size, [1])), num_per_batch)
    return num_per_batch if per_batch else math_ops.reduce_sum(num_per_batch)

  # First, count the number of nonzero weights:
  if weights.get_shape().ndims >= 1:
    reduction_indices = list(range(1, weights.get_shape().ndims))
    num_nonzero_per_batch = math_ops.reduce_sum(
        math_ops.to_float(math_ops.not_equal(weights, 0)),
        reduction_indices=reduction_indices)

  # Next, determine the number of elements that weights would broadcast to:
  broadcast_dims = array_ops.slice(array_ops.shape(losses),
                                   [weights.get_shape().ndims], [-1])
  num_to_broadcast = math_ops.to_float(math_ops.reduce_prod(broadcast_dims))

  num_per_batch = math_ops.multiply(num_nonzero_per_batch, num_to_broadcast)
  return num_per_batch if per_batch else math_ops.reduce_sum(num_per_batch)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def unregularized_loss(self, examples):
    """Add operations to compute the loss (without the regularization loss).

    Args:
      examples: Examples to compute unregularized loss on.

    Returns:
      An Operation that computes mean (unregularized) loss for given set of
      examples.

    Raises:
      ValueError: if examples are not well defined.
    """
    self._assertSpecified([
        'example_labels', 'example_weights', 'sparse_features', 'dense_features'
    ], examples)
    self._assertList(['sparse_features', 'dense_features'], examples)
    with name_scope('sdca/unregularized_loss'):
      predictions = math_ops.cast(
          self._linear_predictions(examples), dtypes.float64)
      labels = math_ops.cast(
          internal_convert_to_tensor(examples['example_labels']),
          dtypes.float64)
      weights = math_ops.cast(
          internal_convert_to_tensor(examples['example_weights']),
          dtypes.float64)

      if self._options['loss_type'] == 'logistic_loss':
        return math_ops.reduce_sum(math_ops.multiply(
            sigmoid_cross_entropy_with_logits(labels=labels,
                                              logits=predictions),
            weights)) / math_ops.reduce_sum(weights)

      if self._options['loss_type'] in ['hinge_loss', 'smooth_hinge_loss']:
        # hinge_loss = max{0, 1 - y_i w*x} where y_i \in {-1, 1}. So, we need to
        # first convert 0/1 labels into -1/1 labels.
        all_ones = array_ops.ones_like(predictions)
        adjusted_labels = math_ops.subtract(2 * labels, all_ones)
        # Tensor that contains (unweighted) error (hinge loss) per
        # example.
        error = nn_ops.relu(
            math_ops.subtract(all_ones,
                              math_ops.multiply(adjusted_labels, predictions)))
        weighted_error = math_ops.multiply(error, weights)
        return math_ops.reduce_sum(weighted_error) / math_ops.reduce_sum(
            weights)

      # squared loss
      err = math_ops.subtract(labels, predictions)

      weighted_squared_err = math_ops.multiply(math_ops.square(err), weights)
      # SDCA squared loss function is sum(err^2) / (2*sum(weights))
      return (math_ops.reduce_sum(weighted_squared_err) /
              (2.0 * math_ops.reduce_sum(weights)))
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def dense_to_sparse_tensor(dense_tensor, ignore_value=None):
  """Converts a dense Tensor to a SparseTensor, dropping ignore_value cells.

  Args:
    dense_tensor: A `Tensor`.
    ignore_value: Entries in `dense_tensor` equal to this value will be
      absent from the return `SparseTensor`. If `None`, default value of
      dense_tensor's dtype will be used (e.g. '' for `str`, 0 for `int`).

  Returns:
    A `SparseTensor` with the same shape as `dense_tensor`.

  Raises:
    ValueError: when `dense_tensor`'s rank is `None`.
  """
  with ops.name_scope("DenseToSparseTensor"):
    dense_t = ops.convert_to_tensor(dense_tensor)
    if dense_t.get_shape().ndims is None:
      # TODO(b/32318825): Implement dense_to_sparse_tensor for undefined rank.
      raise ValueError("dense_tensor.get_shape() should be defined, got None.")
    if ignore_value is None:
      if dense_t.dtype == dtypes.string:
        # Exception due to TF strings are converted to numpy objects by default.
        ignore_value = ""
      else:
        ignore_value = dense_t.dtype.as_numpy_dtype()
    dense_shape = math_ops.cast(array_ops.shape(dense_t), dtypes.int64)
    indices = array_ops.where(
        math_ops.not_equal(dense_t, math_ops.cast(ignore_value, dense_t.dtype)))
    index_dims = len(dense_t.get_shape())
    # Flattens the tensor and indices for use with gather.
    flat_tensor = array_ops.reshape(dense_t, [-1])
    flat_indices = indices[:, index_dims - 1]
    # Computes the correct flattened indices for 2d (or higher) tensors.
    if index_dims > 1:
      higher_dims = indices[:, :index_dims - 1]
      shape_multipliers = array_ops.stack(
          _multiplier_helper(array_ops.unstack(dense_shape)[1:]))
      offsets = math_ops.reduce_sum(
          math_ops.multiply(higher_dims, shape_multipliers),
          reduction_indices=[1])
      flat_indices = math_ops.add(flat_indices, offsets)
    values = array_ops.gather(flat_tensor, flat_indices)
    return sparse_tensor.SparseTensor(indices, values, dense_shape)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _count_condition(values, weights=None, metrics_collections=None,
                     updates_collections=None):
  """Sums the weights of cases where the given values are True.

  If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.

  Args:
    values: A `bool` `Tensor` of arbitrary size.
    weights: Optional `Tensor` whose rank is either 0, or the same rank as
      `values`, and must be broadcastable to `values` (i.e., all dimensions
      must be either `1`, or the same as the corresponding `values`
      dimension).
    metrics_collections: An optional list of collections that the metric
      value variable should be added to.
    updates_collections: An optional list of collections that the metric update
      ops should be added to.

  Returns:
    value_tensor: A `Tensor` representing the current value of the metric.
    update_op: An operation that accumulates the error from a batch of data.

  Raises:
    ValueError: If `weights` is not `None` and its shape doesn't match `values`,
      or if either `metrics_collections` or `updates_collections` are not a list
      or tuple.
  """
  check_ops.assert_type(values, dtypes.bool)
  count = _create_local('count', shape=[])

  values = math_ops.to_float(values)
  if weights is not None:
    weights = math_ops.to_float(weights)
    with ops.control_dependencies((_assert_weights_rank(weights, values),)):
      values = math_ops.multiply(values, weights)

  value_tensor = array_ops.identity(count)
  update_op = state_ops.assign_add(count, math_ops.reduce_sum(values))

  if metrics_collections:
    ops.add_to_collections(metrics_collections, value_tensor)

  if updates_collections:
    ops.add_to_collections(updates_collections, update_op)

  return value_tensor, update_op