Python tensorflow.python.ops.math_ops 模块,to_int64() 实例源码

我们从Python开源项目中,提取了以下37个代码示例,用于说明如何使用tensorflow.python.ops.math_ops.to_int64()

项目:opinatt    作者:epochx    | 项目源码 | 文件源码
def get_classification_loss(logits, targets, softmax_loss_function=None):
  bucket_outputs = logits
  if softmax_loss_function is None:
    assert len(bucket_outputs) == len(targets) == 1
    # We need to make target an int64-tensor and set its shape.
    bucket_target = array_ops.reshape(math_ops.to_int64(targets[0]), [-1])
    crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(logits=bucket_outputs[0],
                                                               labels=bucket_target)
  else:
    assert len(bucket_outputs) == len(targets) == 1
    crossent = softmax_loss_function(bucket_outputs[0], targets[0])

  batch_size = array_ops.shape(targets[0])[0]
  loss = tf.reduce_sum(crossent) / math_ops.cast(batch_size, dtypes.float32)

  return loss
项目:deep-learning    作者:lbkchen    | 项目源码 | 文件源码
def _get_eval_ops(self, features, targets, metrics):
    features, spec = data_ops.ParseDataTensorOrDict(features)
    labels = data_ops.ParseLabelTensorOrDict(targets)

    graph_builder = self.graph_builder_class(
        self.params, device_assigner=self.device_assigner, training=False,
        **self.construction_args)

    probabilities = graph_builder.inference_graph(features, data_spec=spec)

    # One-hot the labels.
    if not self.params.regression:
      labels = math_ops.to_int64(array_ops.one_hot(math_ops.to_int64(
          array_ops.squeeze(labels)), self.params.num_classes, 1, 0))

    if metrics is None:
      metrics = {self.accuracy_metric:
                 eval_metrics.get_metric(self.accuracy_metric)}

    result = {}
    for name, metric in six.iteritems(metrics):
      result[name] = metric(probabilities, labels)

    return result
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _lengths_to_masks(lengths, max_length):
  """Creates a binary matrix that can be used to mask away padding.

  Args:
    lengths: A vector of integers representing lengths.
    max_length: An integer indicating the maximum length. All values in
      lengths should be less than max_length.
  Returns:
    masks: Masks that can be used to get rid of padding.
  """
  tiled_ranges = array_ops.tile(
      array_ops.expand_dims(math_ops.range(max_length), 0),
      [array_ops.shape(lengths)[0], 1])
  lengths = array_ops.expand_dims(lengths, 1)
  masks = math_ops.to_float(
      math_ops.to_int64(tiled_ranges) < math_ops.to_int64(lengths))
  return masks
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def assert_integer_form(
    x, data=None, summarize=None, message=None, name="assert_integer_form"):
  """Assert that x has integer components (or floats equal to integers).

  Args:
    x: Numeric `Tensor`
    data: The tensors to print out if the condition is `False`. Defaults to
      error message and first few entries of `x` and `y`.
    summarize: Print this many entries of each tensor.
    message: A string to prefix to the default message.
    name: A name for this operation (optional).

  Returns:
    Op raising `InvalidArgumentError` if round(x) != x.
  """

  message = message or "x has non-integer components"
  x = ops.convert_to_tensor(x, name="x")
  casted_x = math_ops.to_int64(x)
  return check_ops.assert_equal(
      x, math_ops.cast(math_ops.round(casted_x), x.dtype),
      data=data, summarize=summarize, message=message, name=name)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _lengths_to_masks(lengths, max_length):
  """Creates a binary matrix that can be used to mask away padding.

  Args:
    lengths: A vector of integers representing lengths.
    max_length: An integer indicating the maximum length. All values in
      lengths should be less than max_length.
  Returns:
    masks: Masks that can be used to get rid of padding.
  """
  tiled_ranges = array_ops.tile(
      array_ops.expand_dims(math_ops.range(max_length), 0),
      [array_ops.shape(lengths)[0], 1])
  lengths = array_ops.expand_dims(lengths, 1)
  masks = math_ops.to_float(
      math_ops.to_int64(tiled_ranges) < math_ops.to_int64(lengths))
  return masks
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def assert_integer_form(
    x, data=None, summarize=None, message=None, name="assert_integer_form"):
  """Assert that x has integer components (or floats equal to integers).

  Args:
    x: Numeric `Tensor`
    data: The tensors to print out if the condition is `False`. Defaults to
      error message and first few entries of `x` and `y`.
    summarize: Print this many entries of each tensor.
    message: A string to prefix to the default message.
    name: A name for this operation (optional).

  Returns:
    Op raising `InvalidArgumentError` if round(x) != x.
  """

  message = message or "x has non-integer components"
  x = ops.convert_to_tensor(x, name="x")
  casted_x = math_ops.to_int64(x)
  return check_ops.assert_equal(
      x, math_ops.cast(math_ops.round(casted_x), x.dtype),
      data=data, summarize=summarize, message=message, name=name)
项目:LSTM-CRF-For-Named-Entity-Recognition    作者:zpppy    | 项目源码 | 文件源码
def _lengths_to_masks(lengths, max_length):
  """Creates a binary matrix that can be used to mask away padding.

  Args:
    lengths: A vector of integers representing lengths.
    max_length: An integer indicating the maximum length. All values in
      lengths should be less than max_length.
  Returns:
    masks: Masks that can be used to get rid of padding.
  """

  tiled_ranges = array_ops.tile(
      array_ops.expand_dims(math_ops.range(max_length), 0),
      [array_ops.shape(lengths)[0], 1])
  lengths = array_ops.expand_dims(lengths, 1)
  masks = math_ops.to_float(
      math_ops.to_int64(tiled_ranges) < math_ops.to_int64(lengths))

  return masks
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _lengths_to_masks(lengths, max_length):
  """Creates a binary matrix that can be used to mask away padding.

  Args:
    lengths: A vector of integers representing lengths.
    max_length: An integer indicating the maximum length. All values in
      lengths should be less than max_length.
  Returns:
    masks: Masks that can be used to get rid of padding.
  """
  tiled_ranges = array_ops.tile(
      array_ops.expand_dims(math_ops.range(max_length), 0),
      [array_ops.shape(lengths)[0], 1])
  lengths = array_ops.expand_dims(lengths, 1)
  masks = math_ops.to_float(
      math_ops.to_int64(tiled_ranges) < math_ops.to_int64(lengths))
  return masks
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def assert_integer_form(
    x, data=None, summarize=None, message=None, name="assert_integer_form"):
  """Assert that x has integer components (or floats equal to integers).

  Args:
    x: Numeric `Tensor`
    data: The tensors to print out if the condition is `False`. Defaults to
      error message and first few entries of `x` and `y`.
    summarize: Print this many entries of each tensor.
    message: A string to prefix to the default message.
    name: A name for this operation (optional).

  Returns:
    Op raising `InvalidArgumentError` if round(x) != x.
  """

  message = message or "x has non-integer components"
  x = ops.convert_to_tensor(x, name="x")
  casted_x = math_ops.to_int64(x)
  return check_ops.assert_equal(
      x, math_ops.cast(math_ops.round(casted_x), x.dtype),
      data=data, summarize=summarize, message=message, name=name)
项目:qrn    作者:uwnlp    | 项目源码 | 文件源码
def _reverse_seq(input_seq, lengths):
  """Reverse a list of Tensors up to specified lengths.
  Args:
    input_seq: Sequence of seq_len tensors of dimension (batch_size, depth)
    lengths:   A tensor of dimension batch_size, containing lengths for each
               sequence in the batch. If "None" is specified, simply reverses
               the list.
  Returns:
    time-reversed sequence
  """
  if lengths is None:
    return list(reversed(input_seq))

  input_shape = tensor_shape.matrix(None, None)
  for input_ in input_seq:
    input_shape.merge_with(input_.get_shape())
    input_.set_shape(input_shape)

  # Join into (time, batch_size, depth)
  s_joined = array_ops.pack(input_seq)

  # TODO(schuster, ebrevdo): Remove cast when reverse_sequence takes int32
  if lengths is not None:
    lengths = math_ops.to_int64(lengths)

  # Reverse along dimension 0
  s_reversed = array_ops.reverse_sequence(s_joined, lengths, 0, 1)
  # Split again into list
  result = array_ops.unpack(s_reversed)
  for r in result:
    r.set_shape(input_shape)
  return result
项目:opinatt    作者:epochx    | 项目源码 | 文件源码
def get_sequence_loss(logits, targets, weights, softmax_loss_function=None, per_example_loss=False):

  if per_example_loss:
    assert len(logits) == len(targets)
    # We need to make target and int64-tensor and set its shape.
    bucket_target = [array_ops.reshape(math_ops.to_int64(x), [-1]) for x in targets]
    crossent = sequence_loss_by_example(logits, bucket_target, weights,
                                              softmax_loss_function=softmax_loss_function)
  else:
    assert len(logits) == len(targets)
    bucket_target = [array_ops.reshape(math_ops.to_int64(x), [-1]) for x in targets]
    crossent = sequence_loss_by_batch(logits, bucket_target, weights,
                                      softmax_loss_function=softmax_loss_function)

  return crossent
项目:joint-slu-lm    作者:HadoopIt    | 项目源码 | 文件源码
def _reverse_seq(input_seq, lengths):
  """Reverse a list of Tensors up to specified lengths.

  Args:
    input_seq: Sequence of seq_len tensors of dimension (batch_size, depth)
    lengths:   A tensor of dimension batch_size, containing lengths for each
               sequence in the batch. If "None" is specified, simply reverses
               the list.

  Returns:
    time-reversed sequence
  """
  if lengths is None:
    return list(reversed(input_seq))

  input_shape = tensor_shape.matrix(None, None)
  for input_ in input_seq:
    input_shape.merge_with(input_.get_shape())
    input_.set_shape(input_shape)

  # Join into (time, batch_size, depth)
  s_joined = array_ops.pack(input_seq)

  # TODO(schuster, ebrevdo): Remove cast when reverse_sequence takes int32
  if lengths is not None:
    lengths = math_ops.to_int64(lengths)

  # Reverse along dimension 0
  s_reversed = array_ops.reverse_sequence(s_joined, lengths, 0, 1)
  # Split again into list
  result = array_ops.unpack(s_reversed)
  for r in result:
    r.set_shape(input_shape)
  return result
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _get_eval_ops(self, features, targets, metrics):
    features, _, spec = data_ops.ParseDataTensorOrDict(features)
    labels = data_ops.ParseLabelTensorOrDict(targets)
    _assert_float32(features)
    _assert_float32(labels)

    graph_builder = self.graph_builder_class(
        self.params, device_assigner=self.device_assigner, training=False,
        **self.construction_args)

    probabilities = graph_builder.inference_graph(features, data_spec=spec)

    # One-hot the labels.
    if not self.params.regression:
      labels = math_ops.to_int64(array_ops.one_hot(math_ops.to_int64(
          array_ops.squeeze(labels)), self.params.num_classes, 1, 0))

    if metrics is None:
      metrics = {self.accuracy_metric:
                 eval_metrics.get_metric(self.accuracy_metric)}

    result = {}
    for name, metric in six.iteritems(metrics):
      result[name] = metric(probabilities, labels)

    return result
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def to_dnn_input_layer(self,
                         input_tensor,
                         weight_collections=None,
                         trainable=True):
    return array_ops.reshape(
        array_ops.one_hot(
            math_ops.to_int64(input_tensor),
            self.length,
            1.,
            0.,
            name="one_hot"), [-1, self.length * self.source_column.dimension],
        name="reshape")
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _select_class_id(ids, selected_id):
  """Filter all but `selected_id` out of `ids`.

  Args:
    ids: `int64` `Tensor` or `SparseTensor` of IDs.
    selected_id: Int id to select.

  Returns:
    `SparseTensor` of same dimensions as `ids`, except for the last dimension,
    which might be smaller. This contains only the entries equal to
    `selected_id`.
  """
  if isinstance(ids, (ops.SparseTensor, ops.SparseTensorValue)):
    return sparse_ops.sparse_retain(
        ids, math_ops.equal(ids.values, selected_id))

  # TODO(ptucker): Make this more efficient, maybe add a sparse version of
  # tf.equal and tf.reduce_any?

  # Shape of filled IDs is the same as `ids` with the last dim collapsed to 1.
  ids_shape = array_ops.shape(ids)
  ids_last_dim = array_ops.size(ids_shape) - 1
  filled_selected_id_shape = math_ops.reduced_shape(
      ids_shape, array_ops.reshape(ids_last_dim, [1]))

  # Intersect `ids` with the selected ID.
  filled_selected_id = array_ops.fill(
      filled_selected_id_shape, math_ops.to_int64(selected_id))
  return set_ops.set_intersection(filled_selected_id, ids)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _logits_to_predictions(self, logits):
    """See `_MultiClassHead`."""
    predictions = {prediction_key.PredictionKey.LOGITS: logits}
    if self.logits_dimension == 1:
      predictions[prediction_key.PredictionKey.LOGISTIC] = math_ops.sigmoid(
          logits)
      logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
    predictions[prediction_key.PredictionKey.PROBABILITIES] = math_ops.sigmoid(
        logits)
    predictions[prediction_key.PredictionKey.CLASSES] = math_ops.to_int64(
        math_ops.greater(logits, 0))
    return predictions
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _to_dnn_input_layer(self,
                          input_tensor,
                          weight_collections=None,
                          trainable=True,
                          output_rank=2):
    if output_rank != 2:
      raise ValueError("BucketizedColumn currently only supports output_rank=2")
    return array_ops.reshape(
        array_ops.one_hot(
            math_ops.to_int64(input_tensor),
            self.length,
            1.,
            0.,
            name="one_hot"), [-1, self.length * self.source_column.dimension],
        name="reshape")
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _select_class_id(ids, selected_id):
  """Filter all but `selected_id` out of `ids`.

  Args:
    ids: `int64` `Tensor` or `SparseTensor` of IDs.
    selected_id: Int id to select.

  Returns:
    `SparseTensor` of same dimensions as `ids`. This contains only the entries
    equal to `selected_id`.
  """
  if isinstance(
      ids, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
    return sparse_ops.sparse_retain(
        ids, math_ops.equal(ids.values, selected_id))

  # TODO(ptucker): Make this more efficient, maybe add a sparse version of
  # tf.equal and tf.reduce_any?

  # Shape of filled IDs is the same as `ids` with the last dim collapsed to 1.
  ids_shape = array_ops.shape(ids, out_type=dtypes.int64)
  ids_last_dim = array_ops.size(ids_shape) - 1
  filled_selected_id_shape = math_ops.reduced_shape(
      ids_shape, array_ops.reshape(ids_last_dim, [1]))

  # Intersect `ids` with the selected ID.
  filled_selected_id = array_ops.fill(
      filled_selected_id_shape, math_ops.to_int64(selected_id))
  result = set_ops.set_intersection(filled_selected_id, ids)
  return sparse_tensor.SparseTensor(
      indices=result.indices, values=result.values, shape=ids_shape)
项目:Sing_Par    作者:wanghm92    | 项目源码 | 文件源码
def _reverse_seq(input_seq, lengths):
  """Reverse a list of Tensors up to specified lengths.

  Args:
    input_seq: Sequence of seq_len tensors of dimension (batch_size, depth)
    lengths:   A tensor of dimension batch_size, containing lengths for each
               sequence in the batch. If "None" is specified, simply reverses
               the list.

  Returns:
    time-reversed sequence
  """
  if lengths is None:
    return list(reversed(input_seq))

  input_shape = tensor_shape.matrix(None, None)
  for input_ in input_seq:
    input_shape.merge_with(input_.get_shape())
    input_.set_shape(input_shape)

  # Join into (time, batch_size, depth)
  s_joined = array_ops.pack(input_seq)

  # TODO(schuster, ebrevdo): Remove cast when reverse_sequence takes int32
  if lengths is not None:
    lengths = math_ops.to_int64(lengths)

  # Reverse along dimension 0
  s_reversed = array_ops.reverse_sequence(s_joined, lengths, 0, 1)
  # Split again into list
  result = array_ops.unpack(s_reversed)
  for r in result:
    r.set_shape(input_shape)
  return result
项目:Parser-v1    作者:tdozat    | 项目源码 | 文件源码
def _reverse_seq(input_seq, lengths):
  """Reverse a list of Tensors up to specified lengths.

  Args:
    input_seq: Sequence of seq_len tensors of dimension (batch_size, depth)
    lengths:   A tensor of dimension batch_size, containing lengths for each
               sequence in the batch. If "None" is specified, simply reverses
               the list.

  Returns:
    time-reversed sequence
  """
  if lengths is None:
    return list(reversed(input_seq))

  input_shape = tensor_shape.matrix(None, None)
  for input_ in input_seq:
    input_shape.merge_with(input_.get_shape())
    input_.set_shape(input_shape)

  # Join into (time, batch_size, depth)
  s_joined = array_ops.pack(input_seq)

  # TODO(schuster, ebrevdo): Remove cast when reverse_sequence takes int32
  if lengths is not None:
    lengths = math_ops.to_int64(lengths)

  # Reverse along dimension 0
  s_reversed = array_ops.reverse_sequence(s_joined, lengths, 0, 1)
  # Split again into list
  result = array_ops.unpack(s_reversed)
  for r in result:
    r.set_shape(input_shape)
  return result
项目:ROLO    作者:Guanghan    | 项目源码 | 文件源码
def _reverse_seq(input_seq, lengths):
  """Reverse a list of Tensors up to specified lengths.

  Args:
    input_seq: Sequence of seq_len tensors of dimension (batch_size, n_features)
               or nested tuples of tensors.
    lengths:   A `Tensor` of dimension batch_size, containing lengths for each
               sequence in the batch. If "None" is specified, simply reverses
               the list.

  Returns:
    time-reversed sequence
  """
  if lengths is None:
    return list(reversed(input_seq))

  flat_input_seq = tuple(nest.flatten(input_) for input_ in input_seq)

  flat_results = [[] for _ in range(len(input_seq))]
  for sequence in zip(*flat_input_seq):
    input_shape = tensor_shape.unknown_shape(
        ndims=sequence[0].get_shape().ndims)
    for input_ in sequence:
      input_shape.merge_with(input_.get_shape())
      input_.set_shape(input_shape)

    # Join into (time, batch_size, depth)
    s_joined = array_ops.pack(sequence)

    # TODO(schuster, ebrevdo): Remove cast when reverse_sequence takes int32
    if lengths is not None:
      lengths = math_ops.to_int64(lengths)

    # Reverse along dimension 0
    s_reversed = array_ops.reverse_sequence(s_joined, lengths, 0, 1)
    # Split again into list
    result = array_ops.unpack(s_reversed)
    for r, flat_result in zip(result, flat_results):
      r.set_shape(input_shape)
      flat_result.append(r)

  results = [nest.pack_sequence_as(structure=input_, flat_sequence=flat_result)
             for input_, flat_result in zip(input_seq, flat_results)]
  return results
项目:emoatt    作者:epochx    | 项目源码 | 文件源码
def get_classification_loss(logits, targets, softmax_loss_function=None):
  bucket_outputs = logits
  if softmax_loss_function is None:
    assert len(bucket_outputs) == len(targets) == 1
    # We need to make target an int64-tensor and set its shape.
    bucket_target = array_ops.reshape(math_ops.to_int64(targets[0]), [-1])
    crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(bucket_outputs[0], bucket_target)
  else:
    assert len(bucket_outputs) == len(targets) == 1
    crossent = softmax_loss_function(bucket_outputs[0], targets[0])

  batch_size = array_ops.shape(targets[0])[0]
  loss = tf.reduce_sum(crossent) / math_ops.cast(batch_size, dtypes.float32)

  return loss
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _logits_to_predictions(self, logits):
    """See `_MultiClassHead`."""
    with ops.name_scope(None, "predictions", (logits,)):
      return {
          prediction_key.PredictionKey.LOGITS:
              logits,
          prediction_key.PredictionKey.PROBABILITIES:
              math_ops.sigmoid(
                  logits, name=prediction_key.PredictionKey.PROBABILITIES),
          prediction_key.PredictionKey.CLASSES:
              math_ops.to_int64(
                  math_ops.greater(logits, 0),
                  name=prediction_key.PredictionKey.CLASSES)
      }
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _build_estimator_for_export_tests(tmpdir):

  def _input_fn():
    iris = base.load_iris()
    return {
        'feature': constant_op.constant(
            iris.data, dtype=dtypes.float32)
    }, constant_op.constant(
        iris.target, shape=[150], dtype=dtypes.int32)

  feature_columns = [
      feature_column_lib.real_valued_column(
          'feature', dimension=4)
  ]

  est = linear.LinearRegressor(feature_columns)
  est.fit(input_fn=_input_fn, steps=20)

  feature_spec = feature_column_lib.create_feature_spec_for_parsing(
      feature_columns)
  serving_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)

  # hack in an op that uses an asset, in order to test asset export.
  # this is not actually valid, of course.
  def serving_input_fn_with_asset():
    features, labels, inputs = serving_input_fn()

    vocab_file_name = os.path.join(tmpdir, 'my_vocab_file')
    vocab_file = gfile.GFile(vocab_file_name, mode='w')
    vocab_file.write(VOCAB_FILE_CONTENT)
    vocab_file.close()
    hashtable = lookup.HashTable(
        lookup.TextFileStringTableInitializer(vocab_file_name), 'x')
    features['bogus_lookup'] = hashtable.lookup(
        math_ops.to_int64(features['feature']))

    return input_fn_utils.InputFnOps(features, labels, inputs)

  return est, serving_input_fn_with_asset
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _to_dnn_input_layer(self,
                          input_tensor,
                          weight_collections=None,
                          trainable=True,
                          output_rank=2):
    if output_rank != 2:
      raise ValueError("BucketizedColumn currently only supports output_rank=2")
    return array_ops.reshape(
        array_ops.one_hot(
            math_ops.to_int64(input_tensor),
            self.length,
            1.,
            0.,
            name="one_hot"), [-1, self.length * self.source_column.dimension],
        name="reshape")
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def to_sparse_tensor(self, input_tensor):
    """Creates a SparseTensor from the bucketized Tensor."""
    dimension = self.source_column.dimension
    batch_size = array_ops.shape(input_tensor, name="shape")[0]

    if dimension > 1:
      i1 = array_ops.reshape(
          array_ops.tile(
              array_ops.expand_dims(
                  math_ops.range(0, batch_size), 1, name="expand_dims"),
              [1, dimension],
              name="tile"), [-1],
          name="rehsape")
      i2 = array_ops.tile(
          math_ops.range(0, dimension), [batch_size], name="tile")
      # Flatten the bucket indices and unique them across dimensions
      # E.g. 2nd dimension indices will range from k to 2*k-1 with k buckets
      bucket_indices = array_ops.reshape(
          input_tensor, [-1], name="reshape") + self.length * i2
    else:
      # Simpler indices when dimension=1
      i1 = math_ops.range(0, batch_size)
      i2 = array_ops.zeros([batch_size], dtype=dtypes.int32, name="zeros")
      bucket_indices = array_ops.reshape(input_tensor, [-1], name="reshape")

    indices = math_ops.to_int64(array_ops.transpose(array_ops.stack((i1, i2))))
    shape = math_ops.to_int64(array_ops.stack([batch_size, dimension]))
    sparse_id_values = sparse_tensor_py.SparseTensor(
        indices, bucket_indices, shape)

    return sparse_id_values
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _define_distance_to_clusters(self, data):
    """Defines the Mahalanobis distance to the assigned Gaussian."""
    # TODO(xavigonzalvo): reuse (input - mean) * cov^-1 * (input -
    # mean) from log probability function.
    self._all_scores = []
    for shard in data:
      all_scores = []
      shard = array_ops.expand_dims(shard, 0)
      for c in xrange(self._num_classes):
        if self._covariance_type == FULL_COVARIANCE:
          cov = self._covs[c, :, :]
        elif self._covariance_type == DIAG_COVARIANCE:
          cov = array_ops.diag(self._covs[c, :])
        inverse = linalg_ops.matrix_inverse(cov + self._min_var)
        inv_cov = array_ops.tile(
            array_ops.expand_dims(inverse, 0),
            array_ops.stack([self._num_examples, 1, 1]))
        diff = array_ops.transpose(shard - self._means[c, :, :], perm=[1, 0, 2])
        m_left = math_ops.matmul(diff, inv_cov)
        all_scores.append(
            math_ops.sqrt(
                math_ops.matmul(
                    m_left, array_ops.transpose(
                        diff, perm=[0, 2, 1]))))
      self._all_scores.append(
          array_ops.reshape(
              array_ops.concat(all_scores, 1),
              array_ops.stack([self._num_examples, self._num_classes])))

    # Distance to the associated class.
    self._all_scores = array_ops.concat(self._all_scores, 0)
    assignments = array_ops.concat(self.assignments(), 0)
    rows = math_ops.to_int64(math_ops.range(0, self._num_examples))
    indices = array_ops.concat(
        [array_ops.expand_dims(rows, 1), array_ops.expand_dims(assignments, 1)],
        1)
    self._scores = array_ops.gather_nd(self._all_scores, indices)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def ndlstm_base_dynamic(inputs, noutput, scope=None, reverse=False):
  """Run an LSTM, either forward or backward.

  This is a 1D LSTM implementation using dynamic_rnn and
  the TensorFlow LSTM op.

  Args:
    inputs: input sequence (length, batch_size, ninput)
    noutput: depth of output
    scope: optional scope name
    reverse: run LSTM in reverse

  Returns:
    Output sequence (length, batch_size, noutput)
  """
  with variable_scope.variable_scope(scope, "SeqLstm", [inputs]):
    # TODO(tmb) make batch size, sequence_length dynamic
    # example: sequence_length = tf.shape(inputs)[0]
    _, batch_size, _ = _shape(inputs)
    lstm_cell = core_rnn_cell_impl.BasicLSTMCell(noutput, state_is_tuple=False)
    state = array_ops.zeros([batch_size, lstm_cell.state_size])
    sequence_length = int(inputs.get_shape()[0])
    sequence_lengths = math_ops.to_int64(
        array_ops.fill([batch_size], sequence_length))
    if reverse:
      inputs = array_ops.reverse_v2(inputs, [0])
    outputs, _ = rnn.dynamic_rnn(
        lstm_cell, inputs, sequence_lengths, state, time_major=True)
    if reverse:
      outputs = array_ops.reverse_v2(outputs, [0])
    return outputs
项目:DeepNovo    作者:nh2tran    | 项目源码 | 文件源码
def sequence_loss_per_sample(logits,
                             targets,
                             weights):
  """TODO(nh2tran): docstring.
  Weighted cross-entropy loss for a sequence of logits (per example).

  Args:
    logits: List of 2D Tensors of shape [batch_size x num_decoder_symbols].
    targets: List of 1D batch-sized int32 Tensors of the same length as logits.
    weights: List of 1D batch-sized float-Tensors of the same length as logits.
    average_across_timesteps: If set, divide the returned cost by the total
      label weight.
    softmax_loss_function: Function (inputs-batch, labels-batch) -> loss-batch
      to be used instead of the standard softmax (the default if this is None).
    name: Optional name for this operation, default: "sequence_loss_by_example".

  Returns:
    1D batch-sized float Tensor: The log-perplexity for each sequence.

  Raises:
    ValueError: If len(logits) is different from len(targets) or len(weights).
  """

  #~ with tf.name_scope(name="sequence_loss_by_example",
                     #~ values=logits + targets + weights):
  with ops.op_scope(logits + targets + weights,
                    None,
                    "sequence_loss_by_example"):

    log_perp_list = []
    for logit, target, weight in zip(logits, targets, weights):
      target = array_ops.reshape(math_ops.to_int64(target), [-1])
      crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(logits=logit,
                                                                 labels=target)
      log_perp_list.append(crossent * weight)
    log_perps = math_ops.add_n(log_perp_list)

    # average_across_timesteps:
    total_size = math_ops.add_n(weights)
    total_size += 1e-12  # Just to avoid division by 0 for all-0 weights.
    log_perps /= total_size

  return log_perps
项目:joint-slu-lm    作者:HadoopIt    | 项目源码 | 文件源码
def generate_task_output(encoder_outputs, additional_inputs, encoder_state, targets,sequence_length, num_decoder_symbols, weights,
                       buckets, softmax_loss_function=None,
                       per_example_loss=False, name=None, use_attention=False, scope=None, DNN_at_output=False, 
                       intent_results=None, 
                       tagging_results=None, 
                       train_with_true_label=True,
                       use_local_context=False,
                       forward_only=False):
  if len(targets) < buckets[-1][1]:
    raise ValueError("Length of targets (%d) must be at least that of last"
                     "bucket (%d)." % (len(targets), buckets[-1][1]))

  all_inputs = encoder_outputs + targets + weights
  with ops.op_scope(all_inputs, name, "model_with_buckets"):
    if scope == 'intent':
        logits, regularizers, sampled_intents = intent_results
        sampled_tags = list()
    elif scope == 'tagging':
        logits, regularizers, sampled_tags = tagging_results
        sampled_intents = list()
    elif scope == 'lm':
      with variable_scope.variable_scope(scope + "_generate_sequence_output", reuse=None):
        task_inputs = []
        if use_local_context:
          print ('lm task: use sampled_tag_intent_emb as local context')
          task_inputs = [array_ops.concat(1, [additional_input, encoder_output]) for additional_input, encoder_output in zip(additional_inputs, encoder_outputs)]
        else:
          task_inputs = encoder_outputs        

        logits, _, regularizers = generate_sequence_output(task_inputs, 
                                                            encoder_state,
                                                            num_decoder_symbols,
                                                            sequence_length,
                                                            use_attention=use_attention,
                                                            DNN_at_output=DNN_at_output,
                                                            forward_only=forward_only)

        sampled_tags = list()
        sampled_intents = list()             

    if per_example_loss is None:
      assert len(logits) == len(targets)
      # We need to make target and int64-tensor and set its shape.
      bucket_target = [array_ops.reshape(math_ops.to_int64(x), [-1]) for x in targets]
      crossent = sequence_loss_by_example(
            logits, bucket_target, weights,
            softmax_loss_function=softmax_loss_function)
    else:
      assert len(logits) == len(targets)
      bucket_target = [array_ops.reshape(math_ops.to_int64(x), [-1]) for x in targets]
      crossent = sequence_loss(
            logits, bucket_target, weights,
            softmax_loss_function=softmax_loss_function)
      crossent_with_regularizers = crossent + 1e-4 * regularizers

  return logits, sampled_tags, sampled_intents, crossent_with_regularizers, crossent
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def ctc_label_dense_to_sparse(labels, label_lengths):
      """Converts CTC labels from dense to sparse.

      Arguments:
          labels: dense CTC labels.
          label_lengths: length of the labels.

      Returns:
          A sparse tensor representation of the lablels.
      """
      label_shape = array_ops.shape(labels)
      num_batches_tns = array_ops.stack([label_shape[0]])
      max_num_labels_tns = array_ops.stack([label_shape[1]])

      def range_less_than(_, current_input):
        return array_ops.expand_dims(
            math_ops.range(label_shape[1]), 0) < array_ops.fill(
                max_num_labels_tns, current_input)

      init = math_ops.cast(
          array_ops.fill([1, label_shape[1]], 0), dtypes_module.bool)
      dense_mask = functional_ops.scan(
          range_less_than, label_lengths, initializer=init, parallel_iterations=1)
      dense_mask = dense_mask[:, 0, :]

      label_array = array_ops.reshape(
          array_ops.tile(math_ops.range(0, label_shape[1]), num_batches_tns),
          label_shape)
      label_ind = array_ops.boolean_mask(label_array, dense_mask)

      batch_array = array_ops.transpose(
          array_ops.reshape(
              array_ops.tile(math_ops.range(0, label_shape[0]), max_num_labels_tns),
              reverse(label_shape, 0)))
      batch_ind = array_ops.boolean_mask(batch_array, dense_mask)
      indices = array_ops.transpose(
          array_ops.reshape(concatenate([batch_ind, label_ind], axis=0), [2, -1]))

      vals_sparse = array_ops.gather_nd(labels, indices)

      return sparse_tensor.SparseTensor(
          math_ops.to_int64(indices), vals_sparse, math_ops.to_int64(label_shape))
项目:SSD_tensorflow_VOC    作者:LevinJ    | 项目源码 | 文件源码
def __compute_AP(self,c_scores,c_tp,c_fp,c_num_gbboxes):
        aps_voc07 = {}
        aps_voc12 = {}
        for c in c_scores.keys():
            num_gbboxes = c_num_gbboxes[c]
            tp = c_tp[c]
            fp = c_fp[c]
            scores = c_scores[c]

            #reshape data
            num_gbboxes = math_ops.to_int64(num_gbboxes)
            scores = math_ops.to_float(scores)
            stype = tf.bool
            tp = tf.cast(tp, stype)
            fp = tf.cast(fp, stype)
            # Reshape TP and FP tensors and clean away 0 class values.(difficult bboxes)
            scores = tf.reshape(scores, [-1])
            tp = tf.reshape(tp, [-1])
            fp = tf.reshape(fp, [-1])

            # Remove TP and FP both false.
            mask = tf.logical_or(tp, fp)

            rm_threshold = 1e-4
            mask = tf.logical_and(mask, tf.greater(scores, rm_threshold))
            scores = tf.boolean_mask(scores, mask)
            tp = tf.boolean_mask(tp, mask)
            fp = tf.boolean_mask(fp, mask)

            num_gbboxes = tf.reduce_sum(num_gbboxes)
            num_detections = tf.size(scores, out_type=tf.int32)

            # Precison and recall values.
            prec, rec = tfe.precision_recall(num_gbboxes, num_detections, tp, fp, scores)

            v = tfe.average_precision_voc07(prec, rec)
            aps_voc07[c] = v

            # Average precision VOC12.
            v = tfe.average_precision_voc12(prec, rec)

            aps_voc12[c] = v
        return aps_voc07, aps_voc12
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testMultiLabelTopKWithCustomMetrics(self):
    """Tests the cases where n_labels>1 top_k>1 and custom metrics on top_k."""
    def _input_fn():
      features = {
          'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
                                      indices=[[0, 0], [0, 1], [2, 0]],
                                      shape=[3, 2])
      }
      target = tf.constant([[0, 1], [0, 1], [0, 1]], dtype=tf.int64)
      return features, target

    def _my_metric_op(predictions, targets):
      """Simply adds the predictions and targets."""
      return tf.add(math_ops.to_int64(predictions), targets)

    sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
        'language', hash_bucket_size=20)
    embedding_features = [
        tf.contrib.layers.embedding_column(sparse_column, dimension=1)
    ]

    classifier = dnn_sampled_softmax_classifier._DNNSampledSoftmaxClassifier(
        n_classes=3,
        n_samples=2,
        n_labels=2,
        top_k=2,
        feature_columns=embedding_features,
        hidden_units=[4, 4],
        optimizer=tf.train.AdamOptimizer(learning_rate=0.01),
        config=tf.contrib.learn.RunConfig(tf_random_seed=5))

    classifier.fit(input_fn=_input_fn, steps=50)
    # evaluate() without custom metrics.
    evaluate_output = classifier.evaluate(input_fn=_input_fn, steps=1)
    self.assertGreater(evaluate_output['precision_at_1'], 0.4)
    self.assertGreater(evaluate_output['recall_at_1'], 0.4)
    self.assertGreater(evaluate_output['precision_at_2'], 0.4)
    self.assertGreater(evaluate_output['recall_at_2'], 0.4)

    # evaluate() with custom metrics.
    metrics = {('my_metric', 'top_k'): _my_metric_op}
    evaluate_output = classifier.evaluate(input_fn=_input_fn, steps=1,
                                          metrics=metrics)
    # This test's output is flaky so just testing that 'my_metric' is indeed
    # part of the evaluate_output.
    self.assertTrue('my_metric' in evaluate_output)

    # predict() with top_k.
    predict_output = classifier.predict(input_fn=_input_fn, get_top_k=True)
    self.assertListEqual([3, 2], list(predict_output.shape))
    # TODO(dnivara): Setup this test such that it is not flaky and predict() and
    # evaluate() outputs can be tested.
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def sparse_feature_cross(inputs, hashed_output=False, num_buckets=0,
                         name=None):
  """Crosses a list of Tensor or SparseTensor objects.

  See sparse_feature_cross_kernel.cc for more details.

  Args:
    inputs: List of `SparseTensor` or `Tensor` to be crossed.
    hashed_output: If true, returns the hash of the cross instead of the string.
      This will allow us avoiding string manipulations.
    num_buckets: It is used if hashed_output is true.
      output = hashed_value%num_buckets if num_buckets > 0 else hashed_value.
    name: A name prefix for the returned tensors (optional).

  Returns:
    A `SparseTensor` with the crossed features.
    Return type is string if hashed_output=False, int64 otherwise.

  Raises:
    TypeError: If the inputs aren't either SparseTensor or Tensor.
  """
  if not isinstance(inputs, list):
    raise TypeError("Inputs must be a list")
  if not all(isinstance(i, ops.SparseTensor) or
             isinstance(i, ops.Tensor) for i in inputs):
    raise TypeError("All inputs must be SparseTensors")

  sparse_inputs = [i for i in inputs if isinstance(i, ops.SparseTensor)]
  dense_inputs = [i for i in inputs if not isinstance(i, ops.SparseTensor)]

  indices = [sp_input.indices for sp_input in sparse_inputs]
  values = [sp_input.values for sp_input in sparse_inputs]
  shapes = [sp_input.shape for sp_input in sparse_inputs]
  out_type = dtypes.int64 if hashed_output else dtypes.string

  internal_type = dtypes.string
  for i in range(len(values)):
    if values[i].dtype != dtypes.string:
      values[i] = math_ops.to_int64(values[i])
      internal_type = dtypes.int64
  for i in range(len(dense_inputs)):
    if dense_inputs[i].dtype != dtypes.string:
      dense_inputs[i] = math_ops.to_int64(dense_inputs[i])
      internal_type = dtypes.int64

  indices_out, values_out, shape_out = (
      _sparse_feature_cross_op.sparse_feature_cross(indices,
                                                    values,
                                                    shapes,
                                                    dense_inputs,
                                                    hashed_output,
                                                    num_buckets,
                                                    out_type=out_type,
                                                    internal_type=internal_type,
                                                    name=name))
  return ops.SparseTensor(indices_out, values_out, shape_out)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _call_cell(self, inputs, initial_cell_state, initial_output, dtype,
                 sequence_length):
    """Run this LSTM on inputs, starting from the given state.

    Args:
      inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`
      initial_cell_state: initial value for cell state, shape `[batch_size,
        self._num_units]`
      initial_output: initial value of cell output, shape `[batch_size,
        self._num_units]`
      dtype: The data type for the initial state and expected output.
      sequence_length: Specifies the length of each sequence in inputs. An
        `int32` or `int64` vector (tensor) size `[batch_size]`, values in `[0,
        time_len)` or None.

    Returns:
      A pair containing:

      - Cell state (cs): A `3-D` tensor of shape `[time_len, batch_size,
                         output_size]`
      - Output (h): A `3-D` tensor of shape `[time_len, batch_size,
                    output_size]`
    """

    inputs_shape = inputs.get_shape().with_rank(3)
    time_len = inputs_shape[0].value
    if time_len is None:
      time_len = array_ops.shape(inputs)[0]
    input_size = inputs_shape[2].value
    w = vs.get_variable(
        "W_0", [input_size + self._num_units, self._num_units * 4], dtype=dtype)
    b = vs.get_variable(
        "B", [w.get_shape().with_rank(2)[1]],
        initializer=init_ops.constant_initializer(0.0),
        dtype=dtype)
    if self._use_peephole:
      wci = vs.get_variable("W_I_diag", [self._num_units], dtype=dtype)
      wco = vs.get_variable("W_O_diag", [self._num_units], dtype=dtype)
      wcf = vs.get_variable("W_F_diag", [self._num_units], dtype=dtype)
    else:
      wci = wco = wcf = array_ops.zeros([self._num_units], dtype=dtype)

    if sequence_length is None:
      max_seq_len = time_len
    else:
      max_seq_len = math_ops.to_int64(math_ops.reduce_max(sequence_length))

    _, cs, _, _, _, _, h = _lstm_ops_so.block_lstm(
        seq_len_max=max_seq_len,
        x=inputs,
        cs_prev=initial_cell_state,
        h_prev=initial_output,
        w=w,
        wci=wci,
        wco=wco,
        wcf=wcf,
        b=b,
        forget_bias=self._forget_bias,
        cell_clip=self._cell_clip,
        use_peephole=self._use_peephole)
    return cs, h
项目:deepsleepnet    作者:akaraspt    | 项目源码 | 文件源码
def _reverse_seq(input_seq, lengths):
    """Reverse a list of Tensors up to specified lengths.
    Args:
        input_seq: Sequence of seq_len tensors of dimension (batch_size, n_features)
                   or nested tuples of tensors.
        lengths:   A `Tensor` of dimension batch_size, containing lengths for each
                   sequence in the batch. If "None" is specified, simply reverses
                   the list.
    Returns:
        time-reversed sequence
    """
    if lengths is None:
        return list(reversed(input_seq))

    flat_input_seq = tuple(nest.flatten(input_) for input_ in input_seq)

    flat_results = [[] for _ in range(len(input_seq))]
    for sequence in zip(*flat_input_seq):
        input_shape = tensor_shape.unknown_shape(
                ndims=sequence[0].get_shape().ndims)
        for input_ in sequence:
            input_shape.merge_with(input_.get_shape())
            input_.set_shape(input_shape)

        # Join into (time, batch_size, depth)
        s_joined = array_ops.pack(sequence)

        # TODO(schuster, ebrevdo): Remove cast when reverse_sequence takes int32
        if lengths is not None:
            lengths = math_ops.to_int64(lengths)

        # Reverse along dimension 0
        s_reversed = array_ops.reverse_sequence(s_joined, lengths, 0, 1)
        # Split again into list
        result = array_ops.unpack(s_reversed)
        for r, flat_result in zip(result, flat_results):
            r.set_shape(input_shape)
            flat_result.append(r)

    results = [nest.pack_sequence_as(structure=input_, flat_sequence=flat_result)
               for input_, flat_result in zip(input_seq, flat_results)]
    return results
项目:diversity_based_attention    作者:PrekshaNema25    | 项目源码 | 文件源码
def _reverse_seq(input_seq, lengths):
  """Reverse a list of Tensors up to specified lengths.

  Args:
    input_seq: Sequence of seq_len tensors of dimension (batch_size, n_features)
               or nested tuples of tensors.
    lengths:   A `Tensor` of dimension batch_size, containing lengths for each
               sequence in the batch. If "None" is specified, simply reverses
               the list.

  Returns:
    time-reversed sequence
  """
  if lengths is None:
    return list(reversed(input_seq))

  flat_input_seq = tuple(nest.flatten(input_) for input_ in input_seq)

  flat_results = [[] for _ in range(len(input_seq))]
  for sequence in zip(*flat_input_seq):
    input_shape = tensor_shape.unknown_shape(
        ndims=sequence[0].get_shape().ndims)
    for input_ in sequence:
      input_shape.merge_with(input_.get_shape())
      input_.set_shape(input_shape)

    # Join into (time, batch_size, depth)
    s_joined = array_ops.pack(sequence)

    # TODO(schuster, ebrevdo): Remove cast when reverse_sequence takes int32
    if lengths is not None:
      lengths = math_ops.to_int64(lengths)

    # Reverse along dimension 0
    s_reversed = array_ops.reverse_sequence(s_joined, lengths, 0, 1)
    # Split again into list
    result = array_ops.unpack(s_reversed)
    for r, flat_result in zip(result, flat_results):
      r.set_shape(input_shape)
      flat_result.append(r)

  results = [nest.pack_sequence_as(structure=input_, flat_sequence=flat_result)
             for input_, flat_result in zip(input_seq, flat_results)]
  return results