Python tensorflow 模块,to_int64() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.to_int64()

项目:DmsMsgRcg    作者:bshao001    | 项目源码 | 文件源码
def _build_training_graph(self, logits, labels, learning_rate):
        """
        Build the training graph.
        Args:
            logits: Logits tensor, float - [batch_size, class_count].
            labels: Labels tensor, int32 - [batch_size], with values in the range
                [0, class_count).
            learning_rate: The learning rate for the optimization.
        Returns:
            train_op: The Op for training.
            loss: The Op for calculating loss.
        """
        # Create an operation that calculates loss.
        labels = tf.to_int64(labels)
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=logits, labels=labels, name='xentropy')
        loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
        train_op = tf.train.AdamOptimizer(learning_rate).minimize(loss)

        correct_predict = tf.nn.in_top_k(logits, labels, 1)
        accuracy = tf.reduce_mean(tf.cast(correct_predict, tf.float32))

        return train_op, loss, accuracy
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def insert(self, ids, scores):
    """Insert the ids and scores into the TopN."""
    with tf.control_dependencies(self.last_ops):
      scatter_op = tf.scatter_update(self.id_to_score, ids, scores)
      larger_scores = tf.greater(scores, self.sl_scores[0])

      def shortlist_insert():
        larger_ids = tf.boolean_mask(tf.to_int64(ids), larger_scores)
        larger_score_values = tf.boolean_mask(scores, larger_scores)
        shortlist_ids, new_ids, new_scores = self.ops.top_n_insert(
            self.sl_ids, self.sl_scores, larger_ids, larger_score_values)
        u1 = tf.scatter_update(self.sl_ids, shortlist_ids, new_ids)
        u2 = tf.scatter_update(self.sl_scores, shortlist_ids, new_scores)
        return tf.group(u1, u2)

      # We only need to insert into the shortlist if there are any
      # scores larger than the threshold.
      cond_op = tf.cond(
          tf.reduce_any(larger_scores), shortlist_insert, tf.no_op)
      with tf.control_dependencies([cond_op]):
        self.last_ops = [scatter_op, cond_op]
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def insert(self, ids, scores):
    """Insert the ids and scores into the TopN."""
    with tf.control_dependencies(self.last_ops):
      scatter_op = tf.scatter_update(self.id_to_score, ids, scores)
      larger_scores = tf.greater(scores, self.sl_scores[0])

      def shortlist_insert():
        larger_ids = tf.boolean_mask(tf.to_int64(ids), larger_scores)
        larger_score_values = tf.boolean_mask(scores, larger_scores)
        shortlist_ids, new_ids, new_scores = self.ops.top_n_insert(
            self.sl_ids, self.sl_scores, larger_ids, larger_score_values)
        u1 = tf.scatter_update(self.sl_ids, shortlist_ids, new_ids)
        u2 = tf.scatter_update(self.sl_scores, shortlist_ids, new_scores)
        return tf.group(u1, u2)

      # We only need to insert into the shortlist if there are any
      # scores larger than the threshold.
      cond_op = tf.cond(
          tf.reduce_any(larger_scores), shortlist_insert, tf.no_op)
      with tf.control_dependencies([cond_op]):
        self.last_ops = [scatter_op, cond_op]
项目:fathom    作者:rdadolf    | 项目源码 | 文件源码
def ctc_label_dense_to_sparse( self, labels, label_lengths ):
    """Mike Henry's implementation, with some minor modifications."""
    with self.G.as_default():
      label_shape = tf.shape( labels )
      num_batches_tns = tf.stack( [label_shape[0]] )
      max_num_labels_tns = tf.stack( [label_shape[1]] )

      def range_less_than(previous_state, current_input):
        return tf.expand_dims( tf.range( label_shape[1] ), 0 ) < current_input

      init = tf.cast( tf.fill( max_num_labels_tns, 0 ), tf.bool )
      init = tf.expand_dims( init, 0 )
      dense_mask = functional_ops.scan(range_less_than, label_lengths , initializer=init, parallel_iterations=1)
      dense_mask = dense_mask[ :, 0, : ]

      label_array = tf.reshape( tf.tile( tf.range( 0, label_shape[1] ), num_batches_tns ), label_shape )
      label_ind = tf.boolean_mask( label_array, dense_mask )

      batch_array = tf.transpose( tf.reshape( tf.tile( tf.range( 0,  label_shape[0] ), max_num_labels_tns ), tf.reverse( label_shape,[0]) ) )
      batch_ind = tf.boolean_mask( batch_array, dense_mask )

      indices = tf.transpose( tf.reshape( tf.concat( axis=0, values=[batch_ind, label_ind] ), [2,-1] ) )
      vals_sparse = tf.gather_nd( labels, indices )
      return tf.SparseTensor( tf.to_int64(indices), vals_sparse, tf.to_int64( label_shape ) )
项目:TF-Net    作者:Jorba123    | 项目源码 | 文件源码
def loss(logit_tensor, targets_pl, one_hot_labels):
    """Add L2Loss to all the trainable variables.

    Add summary for "Loss" and "Loss/avg".
    Args:
    logits: Logits from inference().
    labels: Targets Placeholder. 1-D tensor of shape [batch_size]

    Returns:
    Loss tensor of type float.
    """
    targets = tf.to_int64(targets_pl)

    # calculate the average cross entropy loss across the batch.
    if one_hot_labels:
        cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logit_tensor, targets, name='cross_entropy_per_example')
    else:
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logit_tensor, targets, name='cross_entropy_per_example_sparse')
    cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy_mean')
    tf.add_to_collection('losses', cross_entropy_mean)
    tf.summary.scalar('loss', cross_entropy_mean)
    return cross_entropy_mean
项目:neuroimage-tensorflow    作者:corticometrics    | 项目源码 | 文件源码
def loss(logits, labels):

    # input: logits: Logits tensor, float - [batch_size, 256, 256, NUM_CLASSES].
    # intput: labels: Labels tensor, int32 - [batch_size, 256, 256].
    # output: loss: Loss tensor of type float.

    labels = tf.to_int64(labels)
    print_tensor_shape( logits, 'logits shape before')
    print_tensor_shape( labels, 'labels shape before')

# reshape to match args required for the cross entropy function
    logits_re = tf.reshape( logits, [-1, 2] )
    labels_re = tf.reshape( labels, [-1] )
    print_tensor_shape( logits, 'logits shape after')
    print_tensor_shape( labels, 'labels shape after')

# call cross entropy with logits
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
         logits, labels, name='cross_entropy')

    loss = tf.reduce_mean(cross_entropy, name='1cnn_cross_entropy_mean')
    return loss
项目:neuroimage-tensorflow    作者:corticometrics    | 项目源码 | 文件源码
def loss_fn(logits, labels):        
    # input:  logits: Logits tensor, float - [batch_size, 256, 256, 256, 2].
    # intput: labels: Labels tensor, int8 - [batch_size, 256, 256, 256].
    # output: loss: Loss tensor of type float.

    labels = tf.to_int64(labels)
    print_tensor_shape( logits, 'logits shape ')
    print_tensor_shape( labels, 'labels shape ')

    # reshape to match args required for the cross entropy function
    logits_re = tf.reshape( logits, [-1, 2] )
    labels_re = tf.reshape( labels, [-1] )
    #print_tensor_shape( logits_re, 'logits shape after')
    #print_tensor_shape( labels_re, 'labels shape after')

    # call cross entropy with logits
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels, name='cross_entropy')
    print_tensor_shape( cross_entropy, 'cross_entropy shape ')

    loss = tf.reduce_mean(cross_entropy, name='1cnn_cross_entropy_mean')
    print_tensor_shape( loss, 'loss shape ')

    return loss
项目:sat-seg    作者:mshiv    | 项目源码 | 文件源码
def loss(logits, labels, num_classes):

    logits = tf.reshape(logits, [-1, num_classes])
    #epsilon = tf.constant(value=1e-4)
    #logits = logits + epsilon

    #CHANGE LABELS TYPE to INT, for sparse_softmax_Cross_...
    # to FLOAT, for softmax_Cross_entropy...
    #labels = tf.to_float(tf.reshape(labels, [-1]))
    labels = tf.to_int64(tf.reshape(labels, [-1]))
    #print (np.unique(labels))
    print ('shape of logits: %s' % str(logits.get_shape()))
    print ('shape of labels: %s' % str(labels.get_shape()))

    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, labels, name='Cross_Entropy')
    cross_entropy_mean = tf.reduce_mean(cross_entropy, name='xentropy_mean')
    tf.add_to_collection('losses', cross_entropy_mean)

    loss = tf.add_n(tf.get_collection('losses'), name='total_loss')
    #loss = tf.add_n(cross_entropy)
    return loss
项目:Defect-Prediction    作者:Jorba123    | 项目源码 | 文件源码
def loss(logit_tensor, targets_pl):
    """Add L2Loss to all the trainable variables.

    Add summary for "Loss" and "Loss/avg".
    Args:
    logits: Logits from inference().
    labels: Targets Placeholder. 1-D tensor of shape [batch_size]

    Returns:
    Loss tensor of type float.
    """
    targets = tf.to_int64(targets_pl)

    # calculate the average cross entropy loss across the batch.
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logit_tensor, targets, name='cross_entropy_per_example')
    cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy_mean')
    tf.add_to_collection('losses', cross_entropy_mean)
    tf.summary.scalar('loss', cross_entropy_mean)
    return cross_entropy_mean
项目:DeepDeepParser    作者:janmbuys    | 项目源码 | 文件源码
def init_thin_stack(batch_size, max_num_concepts):
  """Initializes the thin stack.
  Returns:
    thin_stack: Tensor with the stack content.
    thin_stack_head_next: Index pointers to element after stack head.
  """
  # Stack initialized to -1, points to initial state.
  thin_stack = -tf.ones(tf.pack([batch_size, max_num_concepts]),
      dtype=tf.int32)
  # Reshape to ensure dimension 1 is known.
  thin_stack = tf.reshape(thin_stack, [-1, max_num_concepts])
  # Set to 0 at position 0.
  inds = tf.transpose(tf.to_int64(tf.pack(
   [tf.range(batch_size), tf.zeros(tf.pack([batch_size]), dtype=tf.int32)])))
  delta = tf.SparseTensor(inds, tf.ones(tf.pack([batch_size]), dtype=tf.int32),
      tf.pack([tf.to_int64(batch_size), max_num_concepts]))
  new_thin_stack = thin_stack + tf.sparse_tensor_to_dense(delta)
  # Position 0 is for empty stack; position after head always >= 1.
  thin_stack_head_next = tf.ones(tf.pack([batch_size]),
      dtype=tf.int32)
  return new_thin_stack, thin_stack_head_next
项目:DeepDeepParser    作者:janmbuys    | 项目源码 | 文件源码
def mask_decoder_reduce(logit, thin_stack_head_next, logit_size, batch_size):
  """Ensures that we can only reduce when the stack has at least 1 item.

  For each batch entry k:
    If thin_stack_head_next == 0, #alternatively, or 1.
      let logit[k][reduce_index] = -np.inf, 
    else don't change.
  """
  # Allow reduce only if at least 1 item on stack, i.e., pointer >= 2.
  update_vals = tf.pack([-np.inf, -np.inf, 0.0])
  update_val = tf.gather(update_vals, 
      tf.minimum(thin_stack_head_next,
      2*tf.ones(tf.pack([batch_size]), dtype=tf.int32)))

  re_filled = tf.fill(tf.pack([batch_size]),
      tf.to_int64(data_utils.REDUCE_ID))
  re_inds = tf.transpose(tf.pack(
      [tf.to_int64(tf.range(batch_size)), re_filled]))
  re_delta = tf.SparseTensor(re_inds, update_val, tf.to_int64(
      tf.pack([batch_size, logit_size])))
  new_logit = logit + tf.sparse_tensor_to_dense(re_delta)
  return new_logit
项目:tensor2tensor    作者:tensorflow    | 项目源码 | 文件源码
def imagenet_preprocess_example(example, mode, resize_size=None):
  """Preprocessing used for Imagenet and similar problems."""
  if resize_size is None:
    resize_size = [299, 299]

  def preprocess(img):
    img = tf.image.resize_images(img, [360, 360])
    img = common_layers.image_augmentation(
        tf.to_float(img) / 255., crop_size=resize_size)
    return tf.to_int64(img * 255.)

  def resize(img):
    return tf.to_int64(tf.image.resize_images(img, resize_size))

  inputs = tf.cast(example["inputs"], tf.int64)
  if mode == tf.estimator.ModeKeys.TRAIN:
    example["inputs"] = tf.cond(  # Preprocess 90% of the time.
        tf.less(tf.random_uniform([]), 0.9),
        lambda img=inputs: preprocess(img),
        lambda img=inputs: resize(img))
  else:
    example["inputs"] = resize(inputs)
  return example
项目:AVSR-Deep-Speech    作者:pandeydivesh15    | 项目源码 | 文件源码
def ctc_label_dense_to_sparse(labels, label_lengths, batch_size):
    # The second dimension of labels must be equal to the longest label length in the batch
    correct_shape_assert = tf.assert_equal(tf.shape(labels)[1], tf.reduce_max(label_lengths))
    with tf.control_dependencies([correct_shape_assert]):
        labels = tf.identity(labels)

    label_shape = tf.shape(labels)
    num_batches_tns = tf.stack([label_shape[0]])
    max_num_labels_tns = tf.stack([label_shape[1]])
    def range_less_than(previous_state, current_input):
        return tf.expand_dims(tf.range(label_shape[1]), 0) < current_input

    init = tf.cast(tf.fill(max_num_labels_tns, 0), tf.bool)
    init = tf.expand_dims(init, 0)
    dense_mask = tf.scan(range_less_than, label_lengths, initializer=init, parallel_iterations=1)
    dense_mask = dense_mask[:, 0, :]

    label_array = tf.reshape(tf.tile(tf.range(0, label_shape[1]), num_batches_tns),
          label_shape)
    label_ind = tf.boolean_mask(label_array, dense_mask)

    batch_array = tf.transpose(tf.reshape(tf.tile(tf.range(0, label_shape[0]), max_num_labels_tns), tf.reverse(label_shape, [0])))
    batch_ind = tf.boolean_mask(batch_array, dense_mask)

    indices = tf.transpose(tf.reshape(tf.concat([batch_ind, label_ind], 0), [2, -1]))
    shape = [batch_size, tf.reduce_max(label_lengths)]
    vals_sparse = gather_nd(labels, indices, shape)

    return tf.SparseTensor(tf.to_int64(indices), vals_sparse, tf.to_int64(label_shape))

# Validate and normalize transcriptions. Returns a cleaned version of the label
# or None if it's invalid.
项目:AVSR-Deep-Speech    作者:pandeydivesh15    | 项目源码 | 文件源码
def ctc_label_dense_to_sparse(labels, label_lengths, batch_size):
    # The second dimension of labels must be equal to the longest label length in the batch
    correct_shape_assert = tf.assert_equal(tf.shape(labels)[1], tf.reduce_max(label_lengths))
    with tf.control_dependencies([correct_shape_assert]):
        labels = tf.identity(labels)

    label_shape = tf.shape(labels)
    num_batches_tns = tf.stack([label_shape[0]])
    max_num_labels_tns = tf.stack([label_shape[1]])
    def range_less_than(previous_state, current_input):
        return tf.expand_dims(tf.range(label_shape[1]), 0) < current_input

    init = tf.cast(tf.fill(max_num_labels_tns, 0), tf.bool)
    init = tf.expand_dims(init, 0)
    dense_mask = tf.scan(range_less_than, label_lengths, initializer=init, parallel_iterations=1)
    dense_mask = dense_mask[:, 0, :]

    label_array = tf.reshape(tf.tile(tf.range(0, label_shape[1]), num_batches_tns),
          label_shape)
    label_ind = tf.boolean_mask(label_array, dense_mask)

    batch_array = tf.transpose(tf.reshape(tf.tile(tf.range(0, label_shape[0]), max_num_labels_tns), tf.reverse(label_shape, [0])))
    batch_ind = tf.boolean_mask(batch_array, dense_mask)

    indices = tf.transpose(tf.reshape(tf.concat([batch_ind, label_ind], 0), [2, -1]))
    shape = [batch_size, tf.reduce_max(label_lengths)]
    vals_sparse = gather_nd(labels, indices, shape)

    return tf.SparseTensor(tf.to_int64(indices), vals_sparse, tf.to_int64(label_shape))

# Validate and normalize transcriptions. Returns a cleaned version of the label
# or None if it's invalid.
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def _create_predictions(self, decoder_output, features, labels, losses=None):
    """Creates the dictionary of predictions that is returned by the model.
    """
    predictions = {}

    # Add features and, if available, labels to predictions
    predictions.update(_flatten_dict({"features": features}))
    if labels is not None:
      predictions.update(_flatten_dict({"labels": labels}))

    if losses is not None:
      predictions["losses"] = _transpose_batch_time(losses)

    # Decoders returns output in time-major form [T, B, ...]
    # Here we transpose everything back to batch-major for the user
    output_dict = collections.OrderedDict(
        zip(decoder_output._fields, decoder_output))
    decoder_output_flat = _flatten_dict(output_dict)
    decoder_output_flat = {
        k: _transpose_batch_time(v)
        for k, v in decoder_output_flat.items()
    }
    predictions.update(decoder_output_flat)

    # If we predict the ids also map them back into the vocab and process them
    if "predicted_ids" in predictions.keys():
      vocab_tables = graph_utils.get_dict_from_collection("vocab_tables")
      target_id_to_vocab = vocab_tables["target_id_to_vocab"]
      predicted_tokens = target_id_to_vocab.lookup(
          tf.to_int64(predictions["predicted_ids"]))
      # Raw predicted tokens
      predictions["predicted_tokens"] = predicted_tokens

    return predictions
项目:adversarial-deep-structural-networks    作者:wentaozhu    | 项目源码 | 文件源码
def cnnmodel(X, Y, paras, flag='single'):
  assert(flag=='single' or flag=='combine')
  X = tf.reshape(X, shape=[-1, boxheight, boxwidth, 1])
  yreshape = tf.reshape(Y, [-1, boxheight, boxwidth, 1])
  yonehot = tf.concat(3, [1-yreshape, yreshape])
  if flag == 'combine':
    hconv4clip = buildcombmodel(X, paras)
  else: hconv4clip = buildmodel(X, paras)
  #hconv4log = -tf.log(hconv4clip)
  #q_train, q_test = crfrnn(hconv4log, paras['wsmooth'], paras['wcontra'], k1, k2, trainiter=5, testiter=10)
  #q_train = tf.reshape(q_train, [-1, boxheight, boxwidth, 2])
  q_train = -tf.log(hconv4clip)
  trainenergy = tf.reduce_sum((q_train)*yonehot, reduction_indices=3)
  #trainenergy = tf.reduce_prod(trainenergy, reduction_indices=[1,2])
  trainenergy = tf.reduce_mean(trainenergy, [0,1,2])
  q_test = hconv4clip
  #q_test = crfrnn(hconv4, paras['wsmooth'], paras['wcontra'], k1, k2, iter=5)
  q_test = tf.reshape(q_test, [-1, boxheight, boxwidth, 2])
  testenergy = tf.reduce_sum(tf.mul(q_test, yonehot), reduction_indices=3)
  #testenergy = tf.reduce_prod(testenergy, reduction_indices=[1,2])
  testenergy = tf.reduce_mean(testenergy, [0,1,2])
  predarg = tf.argmax(q_test, 3)
  yint64 = tf.to_int64(Y)
  acc = tf.equal(yint64, predarg)
  acc = tf.to_float(acc)
  accuracy = tf.reduce_mean(acc, [0,1,2])
  di = dice_tf(tf.reshape(yint64, [-1,]), tf.reshape(predarg, [-1,]))
  return trainenergy, accuracy, di, testenergy, q_test
项目:adversarial-deep-structural-networks    作者:wentaozhu    | 项目源码 | 文件源码
def cnnmodel(X, Y, paras, flag='single'):
  assert(flag=='single' or flag=='combine')
  X = tf.reshape(X, shape=[-1, boxheight, boxwidth, 1])
  yreshape = tf.reshape(Y, [-1, boxheight, boxwidth, 1])
  yonehot = tf.concat(3, [1-yreshape, yreshape])
  if flag == 'combine':
    hconv4clip = buildcombmodel(X, paras)
  else: hconv4clip = buildmodel(X, paras)
  #hconv4log = -tf.log(hconv4clip)
  #q_train, q_test = crfrnn(hconv4log, paras['wsmooth'], paras['wcontra'], k1, k2, trainiter=5, testiter=10)
  #q_train = tf.reshape(q_train, [-1, boxheight, boxwidth, 2])
  q_train = -tf.log(hconv4clip)
  trainenergy = tf.reduce_sum((q_train)*yonehot, reduction_indices=3)
  #trainenergy = tf.reduce_prod(trainenergy, reduction_indices=[1,2])
  trainenergy = tf.reduce_mean(trainenergy, [0,1,2])
  q_test = hconv4clip
  #q_test = crfrnn(hconv4, paras['wsmooth'], paras['wcontra'], k1, k2, iter=5)
  q_test = tf.reshape(q_test, [-1, boxheight, boxwidth, 2])
  testenergy = tf.reduce_sum(tf.mul(q_test, yonehot), reduction_indices=3)
  #testenergy = tf.reduce_prod(testenergy, reduction_indices=[1,2])
  testenergy = tf.reduce_mean(testenergy, [0,1,2])
  predarg = tf.argmax(q_test, 3)
  yint64 = tf.to_int64(Y)
  acc = tf.equal(yint64, predarg)
  acc = tf.to_float(acc)
  accuracy = tf.reduce_mean(acc, [0,1,2])
  di = dice_tf(tf.reshape(yint64, [-1,]), tf.reshape(predarg, [-1,]))
  return trainenergy, accuracy, di, testenergy, predarg
项目:adversarial-deep-structural-networks    作者:wentaozhu    | 项目源码 | 文件源码
def model(X, Y, k1, k2, paras, flag='single'):
  assert(flag=='single' or flag=='combine')
  X = tf.reshape(X, shape=[-1, boxheight, boxwidth, 1])
  yreshape = tf.reshape(Y, [-1, boxheight, boxwidth, 1])
  yonehot = tf.concat(3, [1-yreshape, yreshape])
  if flag == 'combine':
    hconv4clip = buildcombmodel(X, paras, fusion=False)
    #h1, h2, h3, h4 = tf.split(3, 4, hconv4clip)
    q_train, q_test = crfrnn(hconv4clip, paras['wsmooth'], paras['wcontra'], k1, k2, 
      trainiter=5, testiter=10, wunary=paras['wunary'])
  else: 
    hconv4clip = buildmodel(X, paras)
    q_train, q_test = crfrnn(hconv4clip, paras['wsmooth'], paras['wcontra'], k1, k2, 
      trainiter=5, testiter=10)
  #hconv4log = -tf.log(hconv4clip)
  #q_train = tf.reshape(q_train, [-1, boxheight, boxwidth, 2])
  #q_train = -tf.log(hconv4clip)
  q_trainclip = tf.clip_by_value(q_train, 1e-6, 1.)
  trainenergy = tf.reduce_sum(-tf.log(q_trainclip)*yonehot, reduction_indices=3)
  #trainenergy = tf.reduce_prod(trainenergy, reduction_indices=[1,2])
  trainenergy = tf.reduce_mean(trainenergy, [0,1,2])

  #q_test = hconv4clip
  #q_test = crfrnn(hconv4, paras['wsmooth'], paras['wcontra'], k1, k2, iter=5)
  q_test = tf.reshape(q_test, [-1, boxheight, boxwidth, 2])
  testenergy = tf.reduce_sum(tf.mul(q_test, yonehot), reduction_indices=3)
  #testenergy = tf.reduce_prod(testenergy, reduction_indices=[1,2])
  testenergy = tf.reduce_mean(testenergy, [0,1,2])
  predarg = tf.argmax(q_test, 3)
  yint64 = tf.to_int64(Y)
  acc = tf.equal(yint64, predarg)
  acc = tf.to_float(acc)
  accuracy = tf.reduce_mean(acc, [0,1,2])
  di = dice_tf(tf.reshape(yint64, [-1,]), tf.reshape(predarg, [-1,]))
  return trainenergy, accuracy, di, testenergy, predarg
项目:rlflow    作者:tpbarron    | 项目源码 | 文件源码
def cast_int(x):
    """
    Cast the output of x to an int64
    """
    return tf.to_int64(x)


# Q learning DQN style
项目:cloudml-samples    作者:GoogleCloudPlatform    | 项目源码 | 文件源码
def generate_top_k_scores_and_ids(logits, top_k):
  """This function computes top K ids and scores from logits tensor.

  Args:
    logits: logit tensor computed in the serving graph.
    top_k: number of top K elements to rank.

  Returns:
    predictions: scores of top K items.
    output_alternatives: ids of the top K items.
  """

  probabilities = tf.nn.softmax(
      logits, name=tf.contrib.learn.PredictionKey.PROBABILITIES)
  top_k_scores, top_k_ids = tf.nn.top_k(
      input=probabilities, k=top_k)
  top_k_ids = tf.contrib.lookup.index_to_string(
      tf.to_int64(top_k_ids),
      mapping=tf.constant([str(i) for i in xrange(MOVIE_VOCAB_SIZE)]))
  predictions = {
      # served as "scores" by Servo in the ClassificationResult
      tf.contrib.learn.PredictionKey.PROBABILITIES:
          top_k_scores,
      # served as "classes" by Servo in the ClassificationResult
      tf.contrib.learn.PredictionKey.CLASSES:
          top_k_ids
  }
  output_alternatives = {DEFAULT_OUTPUT_ALTERNATIVE: (
      tf.contrib.learn.ProblemType.CLASSIFICATION,
      predictions)}
  return predictions, output_alternatives
项目:cloudml-samples    作者:GoogleCloudPlatform    | 项目源码 | 文件源码
def loss(logits, labels):
  """Calculates the loss from the logits and the labels.

  Args:
    logits: Logits tensor, float - [batch_size, NUM_CLASSES].
    labels: Labels tensor, int32 - [batch_size].
  Returns:
    loss: Loss tensor of type float.
  """
  labels = tf.to_int64(labels)
  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
      logits=logits, labels=labels, name='xentropy')
  return tf.reduce_mean(cross_entropy, name='xentropy_mean')
项目:cloudml-samples    作者:GoogleCloudPlatform    | 项目源码 | 文件源码
def loss(logits, labels):
  """Calculates the loss from the logits and the labels.

  Args:
    logits: Logits tensor, float - [batch_size, NUM_CLASSES].
    labels: Labels tensor, int32 - [batch_size].
  Returns:
    loss: Loss tensor of type float.
  """
  labels = tf.to_int64(labels)
  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
      logits=logits, labels=labels, name='xentropy')
  return tf.reduce_mean(cross_entropy, name='xentropy_mean')
项目:cloudml-samples    作者:GoogleCloudPlatform    | 项目源码 | 文件源码
def loss(logits, labels):
  """Calculates the loss from the logits and the labels.

  Args:
    logits: Logits tensor, float - [batch_size, NUM_CLASSES].
    labels: Labels tensor, int32 - [batch_size].
  Returns:
    loss: Loss tensor of type float.
  """
  labels = tf.to_int64(labels)
  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
      logits=logits, labels=labels, name='xentropy')
  return tf.reduce_mean(cross_entropy, name='xentropy_mean')
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def ctc_label_dense_to_sparse(labels, label_lengths):
    # undocumented feature soon to be made public
    from tensorflow.python.ops import functional_ops
    label_shape = tf.shape(labels)
    num_batches_tns = tf.pack([label_shape[0]])
    max_num_labels_tns = tf.pack([label_shape[1]])

    def range_less_than(previous_state, current_input):
        return tf.expand_dims(tf.range(label_shape[1]), 0) < tf.fill(max_num_labels_tns, current_input)

    init = tf.cast(tf.fill([1, label_shape[1]], 0), tf.bool)
    dense_mask = functional_ops.scan(range_less_than, label_lengths,
                                     initializer=init, parallel_iterations=1)
    dense_mask = dense_mask[:, 0, :]

    label_array = tf.reshape(tf.tile(tf.range(0, label_shape[1]), num_batches_tns),
                             label_shape)
    label_ind = tf.boolean_mask(label_array, dense_mask)

    batch_array = tf.transpose(tf.reshape(tf.tile(tf.range(0, label_shape[0]),
                                                  max_num_labels_tns), tf.reverse(label_shape, [True])))
    batch_ind = tf.boolean_mask(batch_array, dense_mask)
    indices = tf.transpose(tf.reshape(tf.concat(0, [batch_ind, label_ind]), [2, -1]))

    vals_sparse = tf.gather_nd(labels, indices)

    return tf.SparseTensor(tf.to_int64(indices), vals_sparse, tf.to_int64(label_shape))
项目:tf-midi-id    作者:cghawthorne    | 项目源码 | 文件源码
def loss(logits, labels):
  """Calculates the loss from the logits and the labels.

  Args:
    logits: Logits tensor, float - [batch_size, NUM_CLASSES].
    labels: Labels tensor, int32 - [batch_size].

  Returns:
    loss: Loss tensor of type float.
  """
  labels = tf.to_int64(labels)
  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
      logits, labels, name='xentropy')
  loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
  return loss
项目:deep_learning_study    作者:jowettcz    | 项目源码 | 文件源码
def loss(logits, labels):
  """Calculates the loss from the logits and the labels.

  Args:
    logits: Logits tensor, float - [batch_size, NUM_CLASSES].
    labels: Labels tensor, int32 - [batch_size].

  Returns:
    loss: Loss tensor of type float.
  """
  labels = tf.to_int64(labels)
  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
      labels=labels, logits=logits, name='xentropy')
  return tf.reduce_mean(cross_entropy, name='xentropy_mean')
项目:dlbench    作者:hclhkbu    | 项目源码 | 文件源码
def loss(logits, labels):
    labels = tf.to_int64(labels)
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels)
    loss = tf.reduce_mean(cross_entropy, name='cross_entropy_mean')
    return loss
项目:dlbench    作者:hclhkbu    | 项目源码 | 文件源码
def loss(logits, labels):
    labels = tf.to_int64(labels)
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels)
    loss = tf.reduce_mean(cross_entropy, name='cross_entropy_mean')
    return loss
项目:conv_seq2seq    作者:tobyyouup    | 项目源码 | 文件源码
def _create_predictions(self, decoder_output, features, labels, losses=None):
    """Creates the dictionary of predictions that is returned by the model.
    """
    predictions = {}

    # Add features and, if available, labels to predictions
    predictions.update(_flatten_dict({"features": features}))
    if labels is not None:
      predictions.update(_flatten_dict({"labels": labels}))

    if losses is not None:
      predictions["losses"] = _transpose_batch_time(losses)

    # Decoders returns output in time-major form [T, B, ...]
    # Here we transpose everything back to batch-major for the user
    output_dict = collections.OrderedDict(
        zip(decoder_output._fields, decoder_output))
    decoder_output_flat = _flatten_dict(output_dict)

    decoder_output_flat = {
        k: _transpose_batch_time(v)
        for k, v in decoder_output_flat.items()
    }
    predictions.update(decoder_output_flat)

    # If we predict the ids also map them back into the vocab and process them
    if "predicted_ids" in predictions.keys():
      vocab_tables = graph_utils.get_dict_from_collection("vocab_tables")
      target_id_to_vocab = vocab_tables["target_id_to_vocab"]
      predicted_tokens = target_id_to_vocab.lookup(
          tf.to_int64(predictions["predicted_ids"]))
      # Raw predicted tokens
      predictions["predicted_tokens"] = predicted_tokens

    return predictions
项目:DataMining    作者:lidalei    | 项目源码 | 文件源码
def loss(logits, labels, l2_penalty):
    """Calculates the loss from the logits and the labels.    
    Args:
      logits: Logits tensor, float - [batch_size, NUM_CLASSES].
      labels: Labels tensor, int32 - [batch_size].
      l2_penalty
    Returns:
      loss: Loss tensor of type float.
    """
    labels = tf.to_int64(labels)
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, labels, name = 'xentropy')
    loss = tf.reduce_mean(cross_entropy, name = 'xentropy_mean') + l2_penalty
    return loss
项目:DataMining    作者:lidalei    | 项目源码 | 文件源码
def loss(logits, labels):
    """Calculates the loss from the logits and the labels.    
    Args:
      logits: Logits tensor, float - [batch_size, NUM_CLASSES].
      labels: Labels tensor, int32 - [batch_size].

    Returns:
      loss: Loss tensor of type float.
    """
    labels = tf.to_int64(labels)
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, labels, name = 'xentropy')
    loss = tf.reduce_mean(cross_entropy, name = 'xentropy_mean')
    return loss
项目:DataMining    作者:lidalei    | 项目源码 | 文件源码
def loss(logits, labels):
    """Calculates the loss from the logits and the labels.    
    Args:
      logits: Logits tensor, float - [batch_size, NUM_CLASSES].
      labels: Labels tensor, int32 - [batch_size].

    Returns:
      loss: Loss tensor of type float.
    """
    labels = tf.to_int64(labels)
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, labels, name = 'xentropy')
    loss = tf.reduce_mean(cross_entropy, name = 'xentropy_mean')
    return loss
项目:DataMining    作者:lidalei    | 项目源码 | 文件源码
def loss(logits, labels):
    """Calculates the loss from the logits and the labels.    
    Args:
      logits: Logits tensor, float - [batch_size, NUM_CLASSES].
      labels: Labels tensor, int32 - [batch_size].

    Returns:
      loss: Loss tensor of type float.
    """
    labels = tf.to_int64(labels)
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, labels, name = 'xentropy')
    loss = tf.reduce_mean(cross_entropy, name = 'xentropy_mean')
    return loss
项目:DataMining    作者:lidalei    | 项目源码 | 文件源码
def loss(logits, labels):
    """Calculates the loss from the logits and the labels.    
    Args:
      logits: Logits tensor, float - [batch_size, NUM_CLASSES].
      labels: Labels tensor, int32 - [batch_size].

    Returns:
      loss: Loss tensor of type float.
    """
    labels = tf.to_int64(labels)
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, labels, name = 'xentropy')
    loss = tf.reduce_mean(cross_entropy, name = 'xentropy_mean')
    return loss
项目:rllabplusplus    作者:shaneshixiang    | 项目源码 | 文件源码
def init_policy(self):
        output_vec = L.get_output(self._output_vec_layer, deterministic=True)
        action = tf.to_int64(tf.argmax(output_vec, 1))
        action_vec = tf.one_hot(action, self._n)
        max_qval = tf.reduce_max(output_vec, 1)

        self._f_actions = tensor_utils.compile_function([self._obs_layer.input_var], action)
        self._f_actions_vec = tensor_utils.compile_function([self._obs_layer.input_var], action_vec)
        self._f_max_qvals = tensor_utils.compile_function([self._obs_layer.input_var], max_qval)
项目:rllabplusplus    作者:shaneshixiang    | 项目源码 | 文件源码
def get_action_sym(self, obs_var):
        output_vec = L.get_output(self._output_vec_layer, obs_var, deterministic=True)
        action = tf.to_int64(tf.argmax(output_vec, 1))
        action_vec = tf.one_hot(action, self._n)
        return action_vec
项目:hourglasstensorlfow    作者:wbenbihi    | 项目源码 | 文件源码
def _create_joint_tensor(self, tensor, name = 'joint_tensor',debug = False):
        """ TensorFlow Computation of Joint Position
        Args:
            tensor      : Prediction Tensor Shape [nbStack x 64 x 64 x outDim] or [64 x 64 x outDim]
            name        : name of the tensor
        Returns:
            out         : Tensor of joints position

        Comment:
            Genuinely Agreeing this tensor is UGLY. If you don't trust me, look at
            'prediction' node in TensorBoard.
            In my defence, I implement it to compare computation times with numpy.
        """
        with tf.name_scope(name):
            shape = tensor.get_shape().as_list()
            if debug:
                print(shape)
            if len(shape) == 3:
                resh = tf.reshape(tensor[:,:,0], [-1])
            elif len(shape) == 4:
                resh = tf.reshape(tensor[-1,:,:,0], [-1])
            if debug:
                print(resh)
            arg = tf.arg_max(resh,0)
            if debug:
                print(arg, arg.get_shape(), arg.get_shape().as_list())
            joints = tf.expand_dims(tf.stack([arg // tf.to_int64(shape[1]), arg % tf.to_int64(shape[1])], axis = -1), axis = 0)
            for i in range(1, shape[-1]):
                if len(shape) == 3:
                    resh = tf.reshape(tensor[:,:,i], [-1])
                elif len(shape) == 4:
                    resh = tf.reshape(tensor[-1,:,:,i], [-1])
                arg = tf.arg_max(resh,0)
                j = tf.expand_dims(tf.stack([arg // tf.to_int64(shape[1]), arg % tf.to_int64(shape[1])], axis = -1), axis = 0)
                joints = tf.concat([joints, j], axis = 0)
            return tf.identity(joints, name = 'joints')
项目:tf_datasets    作者:tmattio    | 项目源码 | 文件源码
def main(args):
    # load the dataset
    mnist = tfd.get_dataset('mnist', FLAGS.data_dir)
    dataset = mnist.load('validation')

    # load batch
    images, labels = load_batch(
        dataset,
        FLAGS.batch_size)

    # get the model prediction
    predictions = lenet(images)

    # convert prediction values for each class into single class prediction
    predictions = tf.to_int64(tf.argmax(predictions, 1))

    # streaming metrics to evaluate
    metrics_to_values, metrics_to_updates = metrics.aggregate_metric_map({
        'mse': metrics.streaming_mean_squared_error(predictions, labels),
        'accuracy': metrics.streaming_accuracy(predictions, labels),
    })

    # write the metrics as summaries
    for metric_name, metric_value in metrics_to_values.iteritems():
        tf.summary.scalar(metric_name, metric_value)

    # evaluate on the model saved at the checkpoint directory
    # evaluate every eval_interval_secs
    slim.evaluation.evaluation_loop(
        '',
        FLAGS.checkpoint_dir,
        FLAGS.log_dir,
        num_evals=FLAGS.num_evals,
        eval_op=metrics_to_updates.values(),
        eval_interval_secs=FLAGS.eval_interval_secs)
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def ctc_label_dense_to_sparse(labels, label_lengths):
    # undocumented feature soon to be made public
    from tensorflow.python.ops import functional_ops
    label_shape = tf.shape(labels)
    num_batches_tns = stack([label_shape[0]])
    max_num_labels_tns = stack([label_shape[1]])

    def range_less_than(previous_state, current_input):
        return tf.expand_dims(tf.range(label_shape[1]), 0) < tf.fill(max_num_labels_tns, current_input)

    init = tf.cast(tf.fill([1, label_shape[1]], 0), tf.bool)
    dense_mask = functional_ops.scan(range_less_than, label_lengths,
                                     initializer=init, parallel_iterations=1)
    dense_mask = dense_mask[:, 0, :]

    label_array = tf.reshape(tf.tile(tf.range(0, label_shape[1]), num_batches_tns),
                             label_shape)
    label_ind = tf.boolean_mask(label_array, dense_mask)

    batch_array = tf.transpose(tf.reshape(tf.tile(tf.range(0, label_shape[0]),
                                                  max_num_labels_tns), reverse(label_shape, 0)))
    batch_ind = tf.boolean_mask(batch_array, dense_mask)
    indices = tf.transpose(tf.reshape(concatenate([batch_ind, label_ind], axis=0), [2, -1]))

    vals_sparse = tf.gather_nd(labels, indices)

    return tf.SparseTensor(tf.to_int64(indices), vals_sparse, tf.to_int64(label_shape))
项目:tensorflow_mnist_cloudml    作者:mainyaa    | 项目源码 | 文件源码
def loss(logits, labels):
  """Calculates the loss from the logits and the labels.

  Args:
    logits: Logits tensor, float - [batch_size, NUM_CLASSES].
    labels: Labels tensor, int32 - [batch_size].
  Returns:
    loss: Loss tensor of type float.
  """
  labels = tf.to_int64(labels)
  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
      logits, labels, name='xentropy')
  return tf.reduce_mean(cross_entropy, name='xentropy_mean')
项目:tensorflow_mnist_cloudml    作者:mainyaa    | 项目源码 | 文件源码
def loss(logits, labels):
  """Calculates the loss from the logits and the labels.

  Args:
    logits: Logits tensor, float - [batch_size, NUM_CLASSES].
    labels: Labels tensor, int32 - [batch_size].
  Returns:
    loss: Loss tensor of type float.
  """
  labels = tf.to_int64(labels)
  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
      logits, labels, name='xentropy')
  return tf.reduce_mean(cross_entropy, name='xentropy_mean')
项目:text2text    作者:google    | 项目源码 | 文件源码
def _reverse_seq(input_seq, lengths):
  """Reverse a list of Tensors up to specified lengths.

  Args:
    input_seq: Sequence of seq_len tensors of dimension (batch_size, depth)
    lengths:   A tensor of dimension batch_size, containing lengths for each
               sequence in the batch. If "None" is specified, simply reverses
               the list.

  Returns:
    time-reversed sequence
  """
  if lengths is None:
    return list(reversed(input_seq))

  input_shape = tensor_shape.matrix(None, None)
  for input_ in input_seq:
    input_shape.merge_with(input_.get_shape())
    input_.set_shape(input_shape)

  # Join into (time, batch_size, depth)
  s_joined = tf.stack(input_seq)

  if lengths is not None:
    lengths = tf.to_int64(lengths)

  # Reverse along dimension 0
  s_reversed = tf.reverse_sequence(s_joined, lengths, 0, 1)
  # Split again into list
  result = tf.unstack(s_reversed)
  for r in result:
    r.set_shape(input_shape)
  return result
项目:tf-slim-mnist    作者:mnuke    | 项目源码 | 文件源码
def main(args):
    # load the dataset
    dataset = mnist.get_split('test', FLAGS.data_dir)

    # load batch
    images, labels = load_batch(
        dataset,
        FLAGS.batch_size,
        is_training=False)

    # get the model prediction
    predictions = lenet(images)

    # convert prediction values for each class into single class prediction
    predictions = tf.to_int64(tf.argmax(predictions, 1))

    # streaming metrics to evaluate
    metrics_to_values, metrics_to_updates = metrics.aggregate_metric_map({
        'mse': metrics.streaming_mean_squared_error(predictions, labels),
        'accuracy': metrics.streaming_accuracy(predictions, labels),
    })

    # write the metrics as summaries
    for metric_name, metric_value in metrics_to_values.iteritems():
        tf.summary.scalar(metric_name, metric_value)

    # evaluate on the model saved at the checkpoint directory
    # evaluate every eval_interval_secs
    slim.evaluation.evaluation_loop(
        '',
        FLAGS.checkpoint_dir,
        FLAGS.log_dir,
        num_evals=FLAGS.num_evals,
        eval_op=metrics_to_updates.values(),
        eval_interval_secs=FLAGS.eval_interval_secs)
项目:TensorflowFramework    作者:vahidk    | 项目源码 | 文件源码
def parse(mode, image, label):
    """Parse input record to features and labels."""
    image = tf.to_float(image)
    label = tf.to_int64(label)
    image = tf.image.per_image_standardization(image)
    return {"image": image}, {"label": label}
项目:notmnist    作者:aidiary    | 项目源码 | 文件源码
def loss(logits, labels):
    """Calculates the loss from the logits and the labels."""
    labels = tf.to_int64(labels)
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits, labels, name='xentropy')
    loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
    return loss
项目:tensorflow_kaggle_mnist    作者:Cuongvn08    | 项目源码 | 文件源码
def get_loss(logit, label):
    label = tf.to_int64(label)
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
                                                        logits=logit,
                                                        labels=label,
                                                        name='cross_entropy')

    loss = tf.reduce_mean(cross_entropy)

    return loss

## get optimizer
# @param learning_rate:
# @param optimizer: optimizer method
项目:DeepLearningAndTensorflow    作者:azheng333    | 项目源码 | 文件源码
def loss(logits, labels):
    """Calculates the loss from the logits and the labels.

    Args:
      logits: Logits tensor, float - [batch_size, NUM_CLASSES].
      labels: Labels tensor, int32 - [batch_size].

    Returns:
      loss: Loss tensor of type float.
    """
    labels = tf.to_int64(labels)
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        labels=labels, logits=logits, name='xentropy')
    return tf.reduce_mean(cross_entropy, name='xentropy_mean')
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def get_best(self, n):
    """Return the indices and values of the n highest scores in the TopN."""

    def refresh_shortlist():
      """Update the shortlist with the highest scores in id_to_score."""
      new_scores, new_ids = tf.nn.top_k(self.id_to_score, self.shortlist_size)
      smallest_new_score = tf.reduce_min(new_scores)
      new_length = tf.reduce_sum(
          tf.to_int32(tf.greater(new_scores, tf.float32.min)))
      u1 = self.sl_ids.assign(
          tf.to_int64(tf.concat(0, [[new_length], new_ids])))
      u2 = self.sl_scores.assign(
          tf.concat(0, [[smallest_new_score], new_scores]))
      self.last_ops = [u1, u2]
      return tf.group(u1, u2)

    # We only need to refresh the shortlist if n is greater than the
    # current shortlist size (which is stored in sl_ids[0]).
    with tf.control_dependencies(self.last_ops):
      cond_op = tf.cond(n > self.sl_ids[0], refresh_shortlist, tf.no_op)
      with tf.control_dependencies([cond_op]):
        topk_values, topk_indices = tf.nn.top_k(
            self.sl_scores, tf.minimum(n, tf.to_int32(self.sl_ids[0])))
        # topk_indices are the indices into the shortlist, we want to return
        # the indices into id_to_score
        gathered_indices = tf.gather(self.sl_ids, topk_indices)
        return gathered_indices, topk_values
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _define_distance_to_clusters(self, data):
    """Defines the Mahalanobis distance to the assigned Gaussian."""
    # TODO(xavigonzalvo): reuse (input - mean) * cov^-1 * (input -
    # mean) from log probability function.
    self._all_scores = []
    for shard in data:
      all_scores = []
      shard = tf.expand_dims(shard, 0)
      for c in xrange(self._num_classes):
        if self._covariance_type == FULL_COVARIANCE:
          cov = self._covs[c, :, :]
        elif self._covariance_type == DIAG_COVARIANCE:
          cov = tf.diag(self._covs[c, :])
        inverse = tf.matrix_inverse(cov + self._min_var)
        inv_cov = tf.tile(
            tf.expand_dims(inverse, 0),
            tf.pack([self._num_examples, 1, 1]))
        diff = tf.transpose(shard - self._means[c, :, :], perm=[1, 0, 2])
        m_left = tf.batch_matmul(diff, inv_cov)
        all_scores.append(tf.sqrt(tf.batch_matmul(
            m_left, tf.transpose(diff, perm=[0, 2, 1])
        )))
      self._all_scores.append(tf.reshape(
          tf.concat(1, all_scores),
          tf.pack([self._num_examples, self._num_classes])))

    # Distance to the associated class.
    self._all_scores = tf.concat(0, self._all_scores)
    assignments = tf.concat(0, self.assignments())
    rows = tf.to_int64(tf.range(0, self._num_examples))
    indices = tf.concat(1, [tf.expand_dims(rows, 1),
                            tf.expand_dims(assignments, 1)])
    self._scores = tf.gather_nd(self._all_scores, indices)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def get_best(self, n):
    """Return the indices and values of the n highest scores in the TopN."""

    def refresh_shortlist():
      """Update the shortlist with the highest scores in id_to_score."""
      new_scores, new_ids = tf.nn.top_k(self.id_to_score, self.shortlist_size)
      smallest_new_score = tf.reduce_min(new_scores)
      new_length = tf.reduce_sum(
          tf.to_int32(tf.greater(new_scores, tf.float32.min)))
      u1 = self.sl_ids.assign(
          tf.to_int64(tf.concat(0, [[new_length], new_ids])))
      u2 = self.sl_scores.assign(
          tf.concat(0, [[smallest_new_score], new_scores]))
      self.last_ops = [u1, u2]
      return tf.group(u1, u2)

    # We only need to refresh the shortlist if n is greater than the
    # current shortlist size (which is stored in sl_ids[0]).
    with tf.control_dependencies(self.last_ops):
      cond_op = tf.cond(n > self.sl_ids[0], refresh_shortlist, tf.no_op)
      with tf.control_dependencies([cond_op]):
        topk_values, topk_indices = tf.nn.top_k(
            self.sl_scores, tf.minimum(n, tf.to_int32(self.sl_ids[0])))
        # topk_indices are the indices into the shortlist, we want to return
        # the indices into id_to_score
        gathered_indices = tf.gather(self.sl_ids, topk_indices)
        return gathered_indices, topk_values