Python tensorflow.python.ops.math_ops 模块,argmax() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.python.ops.math_ops.argmax()

项目:tensorflow_seq2seq_chatbot    作者:higepon    | 项目源码 | 文件源码
def _extract_argmax_and_embed(embedding, output_projection=None,
                              update_embedding=True):
  """Get a loop_function that extracts the previous symbol and embeds it.
  Args:
    embedding: embedding tensor for symbols.
    output_projection: None or a pair (W, B). If provided, each fed previous
      output will first be multiplied by W and added B.
    update_embedding: Boolean; if False, the gradients will not propagate
      through the embeddings.
  Returns:
    A loop function.
  """
  def loop_function(prev, _):
    if output_projection is not None:
      prev = nn_ops.xw_plus_b(
          prev, output_projection[0], output_projection[1])
    prev_symbol = math_ops.argmax(prev, 1)
    # Note that gradients will not propagate through the second parameter of
    # embedding_lookup.
    emb_prev = embedding_ops.embedding_lookup(embedding, prev_symbol)
    if not update_embedding:
      emb_prev = array_ops.stop_gradient(emb_prev)
    return emb_prev
  return loop_function
项目:Question-Answering    作者:MurtyShikhar    | 项目源码 | 文件源码
def hardmax(logits, name=None):
  """Returns batched one-hot vectors.

  The depth index containing the `1` is that of the maximum logit value.

  Args:
    logits: A batch tensor of logit values.
    name: Name to use when creating ops.
  Returns:
    A batched one-hot tensor.
  """
  with ops.name_scope(name, "Hardmax", [logits]):
    logits = ops.convert_to_tensor(logits, name="logits")
    if logits.get_shape()[-1].value is not None:
      depth = logits.get_shape()[-1].value
    else:
      depth = array_ops.shape(logits)[-1]
    return array_ops.one_hot(
        math_ops.argmax(logits, -1), depth, dtype=logits.dtype)
项目:opinatt    作者:epochx    | 项目源码 | 文件源码
def _extract_argmax_and_one_hot(one_hot_size,
                                output_projection=None):
  """Get a loop_function that extracts the previous symbol and build a one-hot vector for it.

  Args:
    one_hot_size: total size of one-hot vector.
    output_projection: None or a pair (W, B). If provided, each fed previous
      output will first be multiplied by W and added B.
    update_embedding: Boolean; if False, the gradients will not propagate
      through the embeddings.

  Returns:
    A loop function.
  """
  def loop_function(prev, _):
    if output_projection is not None:
      prev = nn_ops.xw_plus_b(prev, output_projection[0], output_projection[1])
    prev_symbol = math_ops.argmax(prev, 1)
    # Note that gradients will not propagate through the second parameter of
    # embedding_lookup.
    emb_prev = tf.one_hot(prev_symbol, one_hot_size)
    return emb_prev

  return loop_function
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def predict_classes(self, x, batch_size=32, verbose=1):
        """Generate class predictions for the input samples.

        The input samples are processed batch by batch.

        Arguments:
            x: input data, as a Numpy array or list of Numpy arrays
                (if the model has multiple inputs).
            batch_size: integer.
            verbose: verbosity mode, 0 or 1.

        Returns:
            A numpy array of class predictions.
        """
        proba = self.predict(x, batch_size=batch_size, verbose=verbose)
        if proba.shape[-1] > 1:
          return proba.argmax(axis=-1)
        else:
          return (proba > 0.5).astype('int32')
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _logits_to_predictions(self, logits):
    """Returns a dict of predictions.

    Args:
      logits: logits `Tensor` after applying possible centered bias.

    Returns:
      Dict of prediction `Tensor` keyed by `PredictionKey`.
    """
    predictions = {prediction_key.PredictionKey.LOGITS: logits}
    if self.logits_dimension == 1:
      predictions[prediction_key.PredictionKey.LOGISTIC] = math_ops.sigmoid(
          logits)
      logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
    predictions[prediction_key.PredictionKey.PROBABILITIES] = nn.softmax(
        logits)
    predictions[prediction_key.PredictionKey.CLASSES] = math_ops.argmax(
        logits, 1)

    return predictions
项目:seqGan_chatbot    作者:zpppy    | 项目源码 | 文件源码
def _argmax_or_mcsearch(embedding, output_projection=None, update_embedding=True, mc_search=False):
    def loop_function(prev, _):
        if output_projection is not None:
            prev = nn_ops.xw_plus_b(prev, output_projection[0], output_projection[1])


        if isinstance(mc_search, bool):
            #tf.multinomial???prev?????????  ?-1??????????
            prev_symbol = tf.reshape(tf.multinomial(prev, 1), [-1]) if mc_search else math_ops.argmax(prev, 1)
        else:
            prev_symbol = tf.cond(mc_search, lambda: tf.reshape(tf.multinomial(prev, 1), [-1]), lambda: tf.argmax(prev, 1))


        emb_prev = embedding_ops.embedding_lookup(embedding, prev_symbol)
        #???????????
        if not update_embedding:
            emb_prev = array_ops.stop_gradient(emb_prev)
        return emb_prev
    return loop_function
项目:seqGan_chatbot    作者:zpppy    | 项目源码 | 文件源码
def _extract_argmax_and_embed(embedding, output_projection=None, update_embedding=True):
  """Get a loop_function that extracts the previous symbol and embeds it.

  Args:
    embedding: embedding tensor for symbols.
    output_projection: None or a pair (W, B). If provided, each fed previous
      output will first be multiplied by W and added B.
    update_embedding: Boolean; if False, the gradients will not propagate
      through the embeddings.

  Returns:
    A loop function.
  """
  def loop_function(prev, _):
    if output_projection is not None:
      prev = nn_ops.xw_plus_b(
          prev, output_projection[0], output_projection[1])
    prev_symbol = math_ops.argmax(prev, 1)  #??????????
    # Note that gradients will not propagate through the second parameter of
    # embedding_lookup.
    emb_prev = embedding_ops.embedding_lookup(embedding, prev_symbol) #????????????
    if not update_embedding:
      emb_prev = array_ops.stop_gradient(emb_prev)
    return emb_prev
  return loop_function
项目:Deep-Reinforcement-Learning-for-Dialogue-Generation-in-tensorflow    作者:liuyuemaicha    | 项目源码 | 文件源码
def _extract_argmax_and_embed(embedding, output_projection=None, update_embedding=True):
  """Get a loop_function that extracts the previous symbol and embeds it.

  Args:
    embedding: embedding tensor for symbols.
    output_projection: None or a pair (W, B). If provided, each fed previous
      output will first be multiplied by W and added B.
    update_embedding: Boolean; if False, the gradients will not propagate
      through the embeddings.

  Returns:
    A loop function.
  """
  def loop_function(prev, _):
    if output_projection is not None:
      prev = nn_ops.xw_plus_b(
          prev, output_projection[0], output_projection[1])
    prev_symbol = math_ops.argmax(prev, 1)
    # Note that gradients will not propagate through the second parameter of
    # embedding_lookup.
    emb_prev = embedding_ops.embedding_lookup(embedding, prev_symbol)
    if not update_embedding:
      emb_prev = array_ops.stop_gradient(emb_prev)
    return emb_prev
  return loop_function
项目:emoatt    作者:epochx    | 项目源码 | 文件源码
def _extract_argmax_and_one_hot(one_hot_size,
                                output_projection=None):
  """Get a loop_function that extracts the previous symbol and build a one-hot vector for it.

  Args:
    one_hot_size: total size of one-hot vector.
    output_projection: None or a pair (W, B). If provided, each fed previous
      output will first be multiplied by W and added B.
    update_embedding: Boolean; if False, the gradients will not propagate
      through the embeddings.

  Returns:
    A loop function.
  """
  def loop_function(prev, _):
    if output_projection is not None:
      prev = nn_ops.xw_plus_b(prev, output_projection[0], output_projection[1])
    prev_symbol = math_ops.argmax(prev, 1)
    # Note that gradients will not propagate through the second parameter of
    # embedding_lookup.
    emb_prev = tf.one_hot(prev_symbol, one_hot_size)
    return emb_prev

  return loop_function
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _logits_to_predictions(self, logits):
    """Returns a dict of predictions.

    Args:
      logits: logits `Tensor` after applying possible centered bias.

    Returns:
      Dict of prediction `Tensor` keyed by `PredictionKey`.
    """
    with ops.name_scope(None, "predictions", (logits,)):
      return {
          prediction_key.PredictionKey.LOGITS:
              logits,
          prediction_key.PredictionKey.PROBABILITIES:
              nn.softmax(
                  logits, name=prediction_key.PredictionKey.PROBABILITIES),
          prediction_key.PredictionKey.CLASSES:
              math_ops.argmax(
                  logits, 1, name=prediction_key.PredictionKey.CLASSES)
      }
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testIrisAll(self):
    iris = base.load_iris()
    est = estimator.SKCompat(
        estimator.Estimator(model_fn=logistic_model_no_mode_fn))
    est.fit(iris.data, iris.target, steps=100)
    scores = est.score(
        x=iris.data,
        y=iris.target,
        metrics={('accuracy', 'class'): metric_ops.streaming_accuracy})
    predictions = est.predict(x=iris.data)
    predictions_class = est.predict(x=iris.data, outputs=['class'])['class']
    self.assertEqual(predictions['prob'].shape[0], iris.target.shape[0])
    self.assertAllClose(predictions['class'], predictions_class)
    self.assertAllClose(
        predictions['class'], np.argmax(
            predictions['prob'], axis=1))
    other_score = _sklearn.accuracy_score(iris.target, predictions['class'])
    self.assertAllClose(scores['accuracy'], other_score)
    self.assertTrue('global_step' in scores)
    self.assertEqual(100, scores['global_step'])
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testIrisAllDictionaryInput(self):
    iris = base.load_iris()
    est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
    iris_data = {'input': iris.data}
    iris_target = {'labels': iris.target}
    est.fit(iris_data, iris_target, steps=100)
    scores = est.evaluate(
        x=iris_data,
        y=iris_target,
        metrics={('accuracy', 'class'): metric_ops.streaming_accuracy})
    predictions = list(est.predict(x=iris_data))
    predictions_class = list(est.predict(x=iris_data, outputs=['class']))
    self.assertEqual(len(predictions), iris.target.shape[0])
    classes_batch = np.array([p['class'] for p in predictions])
    self.assertAllClose(classes_batch,
                        np.array([p['class'] for p in predictions_class]))
    self.assertAllClose(
        classes_batch,
        np.argmax(
            np.array([p['prob'] for p in predictions]), axis=1))
    other_score = _sklearn.accuracy_score(iris.target, classes_batch)
    self.assertAllClose(other_score, scores['accuracy'])
    self.assertTrue('global_step' in scores)
    self.assertEqual(scores['global_step'], 100)
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def sample(self, time, outputs, name=None, **unused_kwargs):
    with ops.name_scope(name, "TrainingHelperSample", [time, outputs]):
      sample_ids = math_ops.cast(
          math_ops.argmax(outputs, axis=-1), dtypes.int32)
      return sample_ids
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def __init__(self, inputs, sequence_length, embedding, sampling_probability,
               time_major=False, seed=None, scheduling_seed=None, name=None):
    """Initializer.

    Args:
      inputs: A (structure of) input tensors.
      sequence_length: An int32 vector tensor.
      embedding: A callable that takes a vector tensor of `ids` (argmax ids),
        or the `params` argument for `embedding_lookup`.
      sampling_probability: A 0D `float32` tensor: the probability of sampling
        categorically from the output ids instead of reading directly from the
        inputs.
      time_major: Python bool.  Whether the tensors in `inputs` are time major.
        If `False` (default), they are assumed to be batch major.
      seed: The sampling seed.
      scheduling_seed: The schedule decision rule sampling seed.
      name: Name scope for any created operations.

    Raises:
      ValueError: if `sampling_probability` is not a scalar or vector.
    """
    with ops.name_scope(name, "ScheduledEmbeddingSamplingWrapper",
                        [embedding, sampling_probability]):
      if callable(embedding):
        self._embedding_fn = embedding
      else:
        self._embedding_fn = (
            lambda ids: embedding_ops.embedding_lookup(embedding, ids))
      self._sampling_probability = ops.convert_to_tensor(
          sampling_probability, name="sampling_probability")
      if self._sampling_probability.get_shape().ndims not in (0, 1):
        raise ValueError(
            "sampling_probability must be either a scalar or a vector. "
            "saw shape: %s" % (self._sampling_probability.get_shape()))
      self._seed = seed
      self._scheduling_seed = scheduling_seed
      super(ScheduledEmbeddingTrainingHelper, self).__init__(
          inputs=inputs,
          sequence_length=sequence_length,
          time_major=time_major,
          name=name)
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def __init__(self, embedding, start_tokens, end_token):
    """Initializer.

    Args:
      embedding: A callable that takes a vector tensor of `ids` (argmax ids),
        or the `params` argument for `embedding_lookup`.
      start_tokens: `int32` vector shaped `[batch_size]`, the start tokens.
      end_token: `int32` scalar, the token that marks end of decoding.

    Raises:
      ValueError: if `sequence_length` is not a 1D tensor.
    """
    if callable(embedding):
      self._embedding_fn = embedding
    else:
      self._embedding_fn = (
          lambda ids: embedding_ops.embedding_lookup(embedding, ids))

    self._start_tokens = ops.convert_to_tensor(
        start_tokens, dtype=dtypes.int32, name="start_tokens")
    self._end_token = ops.convert_to_tensor(
        end_token, dtype=dtypes.int32, name="end_token")
    if self._start_tokens.get_shape().ndims != 1:
      raise ValueError("start_tokens must be a vector")
    self._batch_size = array_ops.size(start_tokens)
    if self._end_token.get_shape().ndims != 0:
      raise ValueError("end_token must be a scalar")
    self._start_inputs = self._embedding_fn(self._start_tokens)
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def sample(self, time, outputs, state, name=None):
    """sample for GreedyEmbeddingHelper."""
    del time, state  # unused by sample_fn
    # Outputs are logits, use argmax to get the most probable id
    if not isinstance(outputs, ops.Tensor):
      raise TypeError("Expected outputs to be a single Tensor, got: %s" %
                      type(outputs))
    sample_ids = math_ops.cast(
        math_ops.argmax(outputs, axis=-1), dtypes.int32)
    return sample_ids
项目:Biseq2Seq_NLG    作者:MaZhiyuanBUAA    | 项目源码 | 文件源码
def _extract_argmax_and_embed(embedding,
                              output_projection=None,
                              update_embedding=True):
  """Get a loop_function that extracts the previous symbol and embeds it.

  Args:
    embedding: embedding tensor for symbols.
    output_projection: None or a pair (W, B). If provided, each fed previous
      output will first be multiplied by W and added B.
    update_embedding: Boolean; if False, the gradients will not propagate
      through the embeddings.

  Returns:
    A loop function.
  """

  def loop_function(prev, _):
    if output_projection is not None:
      prev = nn_ops.xw_plus_b(prev, output_projection[0], output_projection[1])
    prev_symbol = math_ops.argmax(prev, 1)
    # Note that gradients will not propagate through the second parameter of
    # embedding_lookup.
    emb_prev = embedding_ops.embedding_lookup(embedding, prev_symbol)
    if not update_embedding:
      emb_prev = array_ops.stop_gradient(emb_prev)
    return emb_prev

  return loop_function
项目:opinatt    作者:epochx    | 项目源码 | 文件源码
def _extract_argmax_and_embed(embedding,
                              output_projection=None,
                              update_embedding=True):
  """Get a loop_function that extracts the previous symbol and embeds it.

  Args:
    embedding: embedding tensor for symbols.
    output_projection: None or a pair (W, B). If provided, each fed previous
      output will first be multiplied by W and added B.
    update_embedding: Boolean; if False, the gradients will not propagate
      through the embeddings.

  Returns:
    A loop function.
  """
  def loop_function(prev, _):
    if output_projection is not None:
      prev = nn_ops.xw_plus_b(prev, output_projection[0], output_projection[1])
    prev_symbol = math_ops.argmax(prev, 1)
    # Note that gradients will not propagate through the second parameter of
    # embedding_lookup.
    emb_prev = embedding_ops.embedding_lookup(embedding, prev_symbol)
    if not update_embedding:
      emb_prev = array_ops.stop_gradient(emb_prev)
    return emb_prev

  return loop_function
项目:conv_seq2seq    作者:tobyyouup    | 项目源码 | 文件源码
def sample(self, time, outputs, name=None, **unused_kwargs):
    with ops.name_scope(name, "TrainingHelperSample", [time, outputs]):
      sample_ids = math_ops.cast(
          math_ops.argmax(outputs, axis=-1), dtypes.int32)
      return sample_ids
项目:conv_seq2seq    作者:tobyyouup    | 项目源码 | 文件源码
def __init__(self, inputs, sequence_length, embedding, sampling_probability,
               time_major=False, seed=None, scheduling_seed=None, name=None):
    """Initializer.

    Args:
      inputs: A (structure of) input tensors.
      sequence_length: An int32 vector tensor.
      embedding: A callable that takes a vector tensor of `ids` (argmax ids),
        or the `params` argument for `embedding_lookup`.
      sampling_probability: A 0D `float32` tensor: the probability of sampling
        categorically from the output ids instead of reading directly from the
        inputs.
      time_major: Python bool.  Whether the tensors in `inputs` are time major.
        If `False` (default), they are assumed to be batch major.
      seed: The sampling seed.
      scheduling_seed: The schedule decision rule sampling seed.
      name: Name scope for any created operations.

    Raises:
      ValueError: if `sampling_probability` is not a scalar or vector.
    """
    with ops.name_scope(name, "ScheduledEmbeddingSamplingWrapper",
                        [embedding, sampling_probability]):
      if callable(embedding):
        self._embedding_fn = embedding
      else:
        self._embedding_fn = (
            lambda ids: embedding_ops.embedding_lookup(embedding, ids))
      self._sampling_probability = ops.convert_to_tensor(
          sampling_probability, name="sampling_probability")
      if self._sampling_probability.get_shape().ndims not in (0, 1):
        raise ValueError(
            "sampling_probability must be either a scalar or a vector. "
            "saw shape: %s" % (self._sampling_probability.get_shape()))
      self._seed = seed
      self._scheduling_seed = scheduling_seed
      super(ScheduledEmbeddingTrainingHelper, self).__init__(
          inputs=inputs,
          sequence_length=sequence_length,
          time_major=time_major,
          name=name)
项目:conv_seq2seq    作者:tobyyouup    | 项目源码 | 文件源码
def __init__(self, embedding, start_tokens, end_token):
    """Initializer.

    Args:
      embedding: A callable that takes a vector tensor of `ids` (argmax ids),
        or the `params` argument for `embedding_lookup`.
      start_tokens: `int32` vector shaped `[batch_size]`, the start tokens.
      end_token: `int32` scalar, the token that marks end of decoding.

    Raises:
      ValueError: if `sequence_length` is not a 1D tensor.
    """
    if callable(embedding):
      self._embedding_fn = embedding
    else:
      self._embedding_fn = (
          lambda ids: embedding_ops.embedding_lookup(embedding, ids))

    self._start_tokens = ops.convert_to_tensor(
        start_tokens, dtype=dtypes.int32, name="start_tokens")
    self._end_token = ops.convert_to_tensor(
        end_token, dtype=dtypes.int32, name="end_token")
    if self._start_tokens.get_shape().ndims != 1:
      raise ValueError("start_tokens must be a vector")
    self._batch_size = array_ops.size(start_tokens)
    if self._end_token.get_shape().ndims != 0:
      raise ValueError("end_token must be a scalar")
    self._start_inputs = self._embedding_fn(self._start_tokens)
项目:conv_seq2seq    作者:tobyyouup    | 项目源码 | 文件源码
def sample(self, time, outputs, state, name=None):
    """sample for GreedyEmbeddingHelper."""
    del time, state  # unused by sample_fn
    # Outputs are logits, use argmax to get the most probable id
    if not isinstance(outputs, ops.Tensor):
      raise TypeError("Expected outputs to be a single Tensor, got: %s" %
                      type(outputs))
    sample_ids = math_ops.cast(
        math_ops.argmax(outputs, axis=-1), dtypes.int32)
    return sample_ids
项目:PTTChatBot_DL2017    作者:thisray    | 项目源码 | 文件源码
def _extract_argmax_and_embed(embedding, output_projection=None, update_embedding=True):
    """Get a loop_function that extracts the previous symbol and embeds it.

    Args:
        embedding: embedding tensor for symbols.
        output_projection: None or a pair (W, B). If provided, each fed previous
            output will first be multiplied by W and added B.
        update_embedding: Boolean; if False, the gradients will not propagate
            through the embeddings.

    Returns:
        A loop function.
    """

    def loop_function(prev, _):
        if output_projection is not None:
            prev = nn_ops.xw_plus_b(prev, output_projection[0], output_projection[1])
        prev_symbol = math_ops.argmax(prev, 1)
        # Note that gradients will not propagate through the second parameter of
        # embedding_lookup.
        emb_prev = embedding_ops.embedding_lookup(embedding, prev_symbol)
        if not update_embedding:
            emb_prev = array_ops.stop_gradient(emb_prev)
        return emb_prev

    return loop_function
项目:joint-slu-lm    作者:HadoopIt    | 项目源码 | 文件源码
def _extract_argmax_and_embed(embedding, DNN_at_output, output_projection, forward_only=False, update_embedding=True):
  def loop_function(prev, _):
    if DNN_at_output is True:
      prev = multilayer_perceptron_with_initialized_W(prev, output_projection, forward_only=forward_only)
    else:
      prev = linear_transformation_with_initialized_W(prev, output_projection, forward_only=forward_only)
    prev_symbol = math_ops.argmax(prev, 1)
    # Note that gradients will not propagate through the second parameter of
    # embedding_lookup.
    emb_prev = embedding_ops.embedding_lookup(embedding, prev_symbol)
    return prev, emb_prev
  return loop_function
项目:deep-text-corrector    作者:atpaino    | 项目源码 | 文件源码
def apply_input_bias_and_extract_argmax_fn_factory(input_bias):
    """

    :param encoder_inputs: list of length equal to the input bucket
    length of 1-D tensors (of length equal to the batch size) whose
    elements consist of the token index of each sample in the batch
    at a given index in the input.
    :return:
    """

    def fn_factory(embedding, output_projection=None, update_embedding=True):
        """Get a loop_function that extracts the previous symbol and embeds it.

        Args:
          embedding: embedding tensor for symbols.
          output_projection: None or a pair (W, B). If provided, each fed previous
            output will first be multiplied by W and added B.
          update_embedding: Boolean; if False, the gradients will not propagate
            through the embeddings.

        Returns:
          A loop function.
        """
        def loop_function(prev, _):
            prev = project_and_apply_input_bias(prev, output_projection,
                                                input_bias)

            prev_symbol = math_ops.argmax(prev, 1)
            # Note that gradients will not propagate through the second
            # parameter of embedding_lookup.
            emb_prev = embedding_ops.embedding_lookup(embedding, prev_symbol)
            if not update_embedding:
                emb_prev = array_ops.stop_gradient(emb_prev)
            return emb_prev, prev_symbol
        return loop_function

    return fn_factory
项目:deep-text-corrector    作者:atpaino    | 项目源码 | 文件源码
def _extract_argmax_and_embed(embedding, output_projection=None,
                              update_embedding=True):
    """Get a loop_function that extracts the previous symbol and embeds it.

    Args:
      embedding: embedding tensor for symbols.
      output_projection: None or a pair (W, B). If provided, each fed previous
        output will first be multiplied by W and added B.
      update_embedding: Boolean; if False, the gradients will not propagate
        through the embeddings.

    Returns:
      A loop function.
    """
    def loop_function(prev, _):
        # decoder outputs thus far.
        if output_projection is not None:
            prev = nn_ops.xw_plus_b(
                prev, output_projection[0], output_projection[1])
        prev_symbol = math_ops.argmax(prev, 1)
        # Note that gradients will not propagate through the second parameter of
        # embedding_lookup.
        emb_prev = embedding_ops.embedding_lookup(embedding, prev_symbol)
        if not update_embedding:
            emb_prev = array_ops.stop_gradient(emb_prev)
        return emb_prev, prev_symbol
    return loop_function
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def argmax(x, axis=-1):
      """Returns the index of the maximum value along an axis.

      Arguments:
          x: Tensor or variable.
          axis: axis along which to perform the reduction.

      Returns:
          A tensor.
      """
      axis = _normalize_axis(axis, ndim(x))
      return math_ops.argmax(x, axis)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def categorical_accuracy(y_true, y_pred):
      return K.cast(
          K.equal(K.argmax(y_true, axis=-1), K.argmax(y_pred, axis=-1)), K.floatx())
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def sparse_categorical_accuracy(y_true, y_pred):
      return K.cast(
          K.equal(
              K.max(y_true, axis=-1), K.cast(K.argmax(y_pred, axis=-1),
                                             K.floatx())), K.floatx())
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def top_k_categorical_accuracy(y_true, y_pred, k=5):
      return K.mean(K.in_top_k(y_pred, K.argmax(y_true, axis=-1), k), axis=-1)


    # Aliases
项目:neural-chat    作者:henriblancke    | 项目源码 | 文件源码
def _extract_argmax_and_embed(embedding,
                              output_projection=None,
                              update_embedding=True):
    """Get a loop_function that extracts the previous symbol and embeds it.
    Args:
      embedding: embedding tensor for symbols.
      output_projection: None or a pair (W, B). If provided, each fed previous
        output will first be multiplied by W and added B.
      update_embedding: Boolean; if False, the gradients will not propagate
        through the embeddings.
    Returns:
      A loop function.
    """

    def loop_function(prev, _):
        if output_projection is not None:
            prev = nn_ops.xw_plus_b(
                prev, output_projection[0], output_projection[1])
        prev_symbol = math_ops.argmax(prev, 1)
        # Note that gradients will not propagate through the second parameter of
        # embedding_lookup.
        emb_prev = embedding_ops.embedding_lookup(embedding, prev_symbol)
        if not update_embedding:
            emb_prev = array_ops.stop_gradient(emb_prev)
        return emb_prev

    return loop_function
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _predictions(logits, n_classes):
  """Returns predictions for the given logits and n_classes."""
  predictions = {}
  if n_classes == 2:
    predictions[_LOGISTIC] = math_ops.sigmoid(logits)
    logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
  predictions[_PROBABILITIES] = nn.softmax(logits)
  predictions[_CLASSES] = array_ops.reshape(
      math_ops.argmax(logits, 1), shape=(-1, 1))
  return predictions
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testPredictAsIterable(self):
    """Tests predict() and predict_proba() call with as_iterable set to True."""
    def _input_fn(num_epochs=None):
      features = {
          'age': tf.train.limit_epochs(tf.constant([[.9], [.1], [.1]]),
                                       num_epochs=num_epochs),
          'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
                                      indices=[[0, 0], [0, 1], [2, 0]],
                                      shape=[3, 2])
      }
      target = tf.constant([[1], [0], [0]], dtype=tf.int64)
      return features, target

    sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
        'language', hash_bucket_size=20)
    feature_columns = [
        tf.contrib.layers.embedding_column(sparse_column, dimension=1),
        tf.contrib.layers.real_valued_column('age')
    ]

    classifier = dnn_sampled_softmax_classifier._DNNSampledSoftmaxClassifier(
        n_classes=3,
        n_samples=2,
        n_labels=1,
        feature_columns=feature_columns,
        hidden_units=[4, 4])

    classifier.fit(input_fn=_input_fn, steps=1)

    predict_input_fn = functools.partial(_input_fn, num_epochs=1)
    # Test the output of predict() and predict_proba() with as_iterable=True
    predictions = list(
        classifier.predict(input_fn=predict_input_fn, as_iterable=True))
    predictions_proba = list(
        classifier.predict_proba(input_fn=predict_input_fn, as_iterable=True))
    self.assertTrue(np.array_equal(predictions,
                                   np.argmax(predictions_proba, 1)))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testCustomMetrics(self):
    """Tests the use of custom metric."""
    def _input_fn():
      features = {
          'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
                                      indices=[[0, 0], [0, 1], [2, 0]],
                                      shape=[3, 2])
      }
      target = tf.constant([[1], [0], [0]], dtype=tf.int64)
      return features, target

    def _my_metric_op(predictions, targets):
      """Simply multiplies predictions and targets to return [1, 0 , 0]."""
      prediction_classes = math_ops.argmax(predictions, 1)
      return tf.mul(prediction_classes, tf.reshape(targets, [-1]))

    sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
        'language', hash_bucket_size=20)
    embedding_features = [
        tf.contrib.layers.embedding_column(sparse_column, dimension=1)
    ]

    classifier = dnn_sampled_softmax_classifier._DNNSampledSoftmaxClassifier(
        n_classes=3,
        n_samples=2,
        n_labels=1,
        feature_columns=embedding_features,
        hidden_units=[4, 4],
        optimizer=tf.train.AdamOptimizer(learning_rate=0.01),
        config=tf.contrib.learn.RunConfig(tf_random_seed=5))

    # Test that the model actually trains.
    classifier.fit(input_fn=_input_fn, steps=50)
    metrics = {('my_metric', 'probabilities'): _my_metric_op}
    evaluate_output = classifier.evaluate(input_fn=_input_fn, steps=1,
                                          metrics=metrics)
    self.assertListEqual([1, 0, 0], list(evaluate_output['my_metric']))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _convert_to_estimator_model_result(self, logits_fn_result):
    logits, loss, train_op = logits_fn_result
    return {
        Classifier.CLASS_OUTPUT:
            math_ops.argmax(logits, len(logits.get_shape()) - 1),
        Classifier.PROBABILITY_OUTPUT: nn.softmax(logits)
    }, loss, train_op
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def logits_to_predictions(self, logits, proba=False):
    if self.num_label_columns == 1:
      logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])

    if proba:
      return nn.softmax(logits)
    else:
      return math_ops.argmax(logits, 1)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def logits_to_predictions(self, logits, proba=False):
    if proba:
      raise ValueError(
          "logits to probabilities is not supported for _BinarySvmTargetColumn")

    logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
    return math_ops.argmax(logits, 1)


# TODO(zakaria): use contrib losses.
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _accuracy(probabilities, targets):
  predictions = math_ops.argmax(probabilities, 1)
  # undo one-hot
  labels = math_ops.argmax(targets, 1)
  return metric_ops.streaming_accuracy(predictions, labels)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _predictions(probabilities, unused_targets):
  return math_ops.argmax(probabilities, 1)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _mode(self):
    ret = math_ops.argmax(self.logits, dimension=self._batch_rank)
    ret = math_ops.cast(ret, self.dtype)
    ret.set_shape(self.get_batch_shape())
    return ret
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _convert_to_estimator_model_result(self, logits_fn_result):
    logits, loss, train_op = logits_fn_result
    return {
        Classifier.CLASS_OUTPUT:
            math_ops.argmax(logits, len(logits.get_shape()) - 1),
        Classifier.PROBABILITY_OUTPUT: nn.softmax(logits)
    }, loss, train_op
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _logits_to_predictions(self, logits):
    """See `_MultiClassHead`."""
    predictions = {}
    predictions[prediction_key.PredictionKey.LOGITS] = logits
    logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
    predictions[prediction_key.PredictionKey.CLASSES] = math_ops.argmax(
        logits, 1)

    return predictions
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _single_value_predictions(
    activations, sequence_length, target_column, predict_probabilities):
  """Maps `activations` from the RNN to predictions for single value models.

  If `predict_probabilities` is `False`, this function returns a `dict`
  containing single entry with key `PREDICTIONS_KEY`. If `predict_probabilities`
  is `True`, it will contain a second entry with key `PROBABILITIES_KEY`. The
  value of this entry is a `Tensor` of probabilities with shape
  `[batch_size, num_classes]`.

  Args:
    activations: Output from an RNN. Should have dtype `float32` and shape
      `[batch_size, padded_length, ?]`.
    sequence_length: A `Tensor` with shape `[batch_size]` and dtype `int32`
      containing the length of each sequence in the batch. If `None`, sequences
      are assumed to be unpadded.
    target_column: An initialized `TargetColumn`, calculate predictions.
    predict_probabilities: A Python boolean, indicating whether probabilities
      should be returned. Should only be set to `True` for
      classification/logistic regression problems.
  Returns:
    A `dict` mapping strings to `Tensors`.
  """
  with ops.name_scope('SingleValuePrediction'):
    last_activations = select_last_activations(activations, sequence_length)
    if predict_probabilities:
      probabilities = target_column.logits_to_predictions(
          last_activations, proba=True)
      prediction_dict = {
          RNNKeys.PROBABILITIES_KEY: probabilities,
          RNNKeys.PREDICTIONS_KEY: math_ops.argmax(probabilities, 1)}
    else:
      predictions = target_column.logits_to_predictions(
          last_activations, proba=False)
      prediction_dict = {RNNKeys.PREDICTIONS_KEY: predictions}
    return prediction_dict
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def predict(
      self, x=None, input_fn=None, axis=None, batch_size=None, outputs=None,
      as_iterable=True):
    """Returns predictions for given features.

    Args:
      x: features.
      input_fn: Input function. If set, x must be None.
      axis: Axis on which to argmax (for classification).
            Last axis is used by default.
      batch_size: Override default batch size.
      outputs: list of `str`, name of the output to predict.
        If `None`, returns all.
      as_iterable: If True, return an iterable which keeps yielding predictions
        for each example until inputs are exhausted. Note: The inputs must
        terminate if you want the iterable to terminate (e.g. be sure to pass
        num_epochs=1 if you are using something like read_batch_features).

    Returns:
      Numpy array of predicted classes or regression values (or an iterable of
      predictions if as_iterable is True).
    """
    results = self._estimator.predict(
        x=x, input_fn=input_fn, batch_size=batch_size, outputs=outputs,
        as_iterable=as_iterable)

    predict_name = (eval_metrics.INFERENCE_PROB_NAME if self.params.regression
                    else eval_metrics.INFERENCE_PRED_NAME)
    if as_iterable:
      return (x[predict_name] for x in results)
    else:
      return results[predict_name]
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def logits_to_predictions(self, logits, proba=False):
    if self.num_label_columns == 1:
      logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])

    if proba:
      return nn.softmax(logits)
    else:
      return math_ops.argmax(logits, 1)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def logits_to_predictions(self, logits, proba=False):
    if proba:
      raise ValueError(
          "logits to probabilities is not supported for _BinarySvmTargetColumn")

    logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
    return math_ops.argmax(logits, 1)


# TODO(zakaria): use contrib losses.
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _mode(self):
    ret = math_ops.argmax(self.logits, dimension=self._batch_rank)
    ret = math_ops.cast(ret, self.dtype)
    ret.set_shape(self.get_batch_shape())
    return ret
项目:attention-ocr    作者:emedvedev    | 项目源码 | 文件源码
def _extract_argmax_and_embed(embedding, output_projection=None,
                              update_embedding=True):
    """Get a loop_function that extracts the previous symbol and embeds it.

    Args:
        embedding: embedding tensor for symbols.
        output_projection: None or a pair (W, B). If provided, each fed previous
            output will first be multiplied by W and added B.
        update_embedding: Boolean; if False, the gradients will not propagate
            through the embeddings.

    Returns:
        A loop function.
    """
    def loop_function(prev, _):
        if output_projection is not None:
            prev = nn_ops.xw_plus_b(
                    prev, output_projection[0], output_projection[1])
        prev_symbol = math_ops.argmax(prev, 1)
        # Note that gradients will not propagate through the second parameter of
        # embedding_lookup.
        emb_prev = embedding_ops.embedding_lookup(embedding, prev_symbol)
        if not update_embedding:
            emb_prev = array_ops.stop_gradient(emb_prev)
        return emb_prev
    return loop_function
项目:seq2seq_parser    作者:trangham283    | 项目源码 | 文件源码
def _extract_argmax_and_embed(embedding, output_projection=None,
                              update_embedding=True):
  def loop_function(prev, _):
    if output_projection is not None:
      prev = nn_ops.xw_plus_b(
          prev, output_projection[0], output_projection[1])
    prev_symbol = math_ops.argmax(prev, 1)
    # Note that gradients will not propagate through the second parameter of
    # embedding_lookup.
    emb_prev = embedding_ops.embedding_lookup(embedding, prev_symbol)
    if not update_embedding:
      emb_prev = array_ops.stop_gradient(emb_prev)
    return emb_prev
  return loop_function
项目:seq2seq_parser    作者:trangham283    | 项目源码 | 文件源码
def _extract_argmax_and_embed(embedding, output_projection=None,
                              update_embedding=True):
  """Get a loop_function that extracts the previous symbol and embeds it.

  Args:
    embedding: embedding tensor for symbols.
    output_projection: None or a pair (W, B). If provided, each fed previous
      output will first be multiplied by W and added B.
    update_embedding: Boolean; if False, the gradients will not propagate
      through the embeddings.

  Returns:
    A loop function.
  """
  def loop_function(prev, _):
    if output_projection is not None:
      prev = nn_ops.xw_plus_b(
          prev, output_projection[0], output_projection[1])
    prev_symbol = math_ops.argmax(prev, 1)
    # Note that gradients will not propagate through the second parameter of
    # embedding_lookup.
    emb_prev = embedding_ops.embedding_lookup(embedding, prev_symbol)
    if not update_embedding:
      emb_prev = array_ops.stop_gradient(emb_prev)
    return emb_prev
  return loop_function