Python tensorflow 模块,gather() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.gather()

项目:Master-R-CNN    作者:Mark110    | 项目源码 | 文件源码
def _filter_negative_samples(labels, tensors):
    """keeps only samples with none-negative labels 
    Params:
    -----
    labels: of shape (N,)
    tensors: a list of tensors, each of shape (N, .., ..) the first axis is sample number

    Returns:
    -----
    tensors: filtered tensors
    """
    # return tensors
    keeps = tf.where(tf.greater_equal(labels, 0))
    keeps = tf.reshape(keeps, [-1])

    filtered = []
    for t in tensors:
        tf.assert_equal(tf.shape(t)[0], tf.shape(labels)[0])
        f = tf.gather(t, keeps)
        filtered.append(f)

    return filtered
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def value_transition(self, curr_state, next_symbols, batch_size):
        first_value_token = self.num_functions + self.num_begin_tokens + self.num_control_tokens
        num_value_tokens = self.output_size - first_value_token
        with tf.name_scope('grammar_transition'):
            adjusted_next_symbols = tf.where(next_symbols >= self.num_control_tokens, next_symbols + (first_value_token - self.num_control_tokens), next_symbols)

            assert1 = tf.Assert(tf.reduce_all(tf.logical_and(next_symbols < num_value_tokens, next_symbols >= 0)), [curr_state, next_symbols])
            with tf.control_dependencies([assert1]):
                transitions = tf.gather(tf.constant(self.transition_matrix), curr_state)
            assert transitions.get_shape()[1:] == (self.output_size,)

            indices = tf.stack((tf.range(0, batch_size), adjusted_next_symbols), axis=1)
            next_state = tf.gather_nd(transitions, indices)

            assert2 = tf.Assert(tf.reduce_all(next_state >= 0), [curr_state, adjusted_next_symbols, next_state])
            with tf.control_dependencies([assert2]):
                return tf.identity(next_state)
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def bag_of_tokens(config, labels, label_lengths):
    if config.train_output_embeddings:
        with tf.variable_scope('embed', reuse=True):
            output_embeddings = tf.get_variable('output_embedding')
    else:
        output_embeddings = tf.constant(config.output_embedding_matrix)

    #everything_label_placeholder = tf.placeholder(shape=(None, config.max_length,), dtype=tf.int32)
    #everything_label_length_placeholder = tf.placeholder(shape=(None,), dtype=tf.int32)

    labels = tf.constant(np.array(labels))
    embedded_output = tf.gather(output_embeddings, labels)
    print('embedded_output before', embedded_output)
    #mask = tf.sequence_mask(label_lengths, maxlen=config.max_length, dtype=tf.float32)
    # note: this multiplication will broadcast the mask along all elements of the depth dimension
    # (which is why we run the expand_dims to choose how to broadcast)
    #embedded_output = embedded_output * tf.expand_dims(mask, axis=2)
    #print('embedded_output after', embedded_output)

    return tf.reduce_sum(embedded_output, axis=1)
项目:cxflow-tensorflow    作者:Cognexa    | 项目源码 | 文件源码
def repeat(tensor: tf.Tensor, repeats: int, axis: int) -> tf.Tensor:
    """
    Repeat elements of the input tensor in the specified axis ``repeats``-times.

    .. note::
        Chaining of this op may produce TF warnings although the performance seems to be unaffected.

    :param tensor: TF tensor to be repeated
    :param repeats: number of repeats
    :param axis: axis to repeat
    :return: tensor with repeated elements
    """
    shape = tensor.get_shape().as_list()

    dims = np.arange(len(tensor.shape))
    prepare_perm = np.hstack(([axis], np.delete(dims, axis)))
    restore_perm = np.hstack((dims[1:axis+1], [0], dims[axis+1:]))

    indices = tf.cast(tf.floor(tf.range(0, shape[axis]*repeats)/tf.constant(repeats)), 'int32')

    shuffled = tf.transpose(tensor, prepare_perm)
    repeated = tf.gather(shuffled, indices)
    return tf.transpose(repeated, restore_perm)
项目:text_classification    作者:brightmart    | 项目源码 | 文件源码
def extract_argmax_and_embed(embedding, output_projection=None):
    """
    Get a loop_function that extracts the previous symbol and embeds it. Used by decoder.
    :param embedding: embedding tensor for symbol
    :param output_projection: None or a pair (W, B). If provided, each fed previous output will
    first be multiplied by W and added B.
    :return: A loop function
    """
    def loop_function(prev, _):
        if output_projection is not None:
            prev = tf.matmul(prev, output_projection[0]) + output_projection[1]
        prev_symbol = tf.argmax(prev, 1) #?????INDEX
        emb_prev = tf.gather(embedding, prev_symbol) #????INDEX???embedding
        return emb_prev
    return loop_function

# RNN??????
# ???????????????????test,?t???????t+1???s??
项目:text_classification    作者:brightmart    | 项目源码 | 文件源码
def extract_argmax_and_embed(embedding, output_projection=None):
    """
    Get a loop_function that extracts the previous symbol and embeds it. Used by decoder.
    :param embedding: embedding tensor for symbol
    :param output_projection: None or a pair (W, B). If provided, each fed previous output will
    first be multiplied by W and added B.
    :return: A loop function
    """
    def loop_function(prev, _):
        if output_projection is not None:
            prev = tf.matmul(prev, output_projection[0]) + output_projection[1]
        prev_symbol = tf.argmax(prev, 1) #?????INDEX
        emb_prev = tf.gather(embedding, prev_symbol) #????INDEX???embedding
        return emb_prev
    return loop_function

# RNN??????
# ???????????????????test,?t???????t+1???s??
项目:DmsMsgRcg    作者:bshao001    | 项目源码 | 文件源码
def batch_transformer(U, thetas, out_size, name='BatchSpatialTransformer'):
    """Batch Spatial Transformer Layer

    Parameters
    ----------
    U : float
        tensor of inputs [num_batch,height,width,num_channels]
    thetas : float
        a set of transformations for each input [num_batch,num_transforms,6]
    out_size : int
        the size of the output [out_height,out_width]

    Returns: float
        Tensor of size [num_batch*num_transforms,out_height,out_width,num_channels]
    """
    with tf.variable_scope(name):
        num_batch, num_transforms = map(int, thetas.get_shape().as_list()[:2])
        indices = [[i]*num_transforms for i in range(num_batch)]
        input_repeated = tf.gather(U, tf.reshape(indices, [-1]))
        return transformer(input_repeated, thetas, out_size)
项目:paraphrase-id-tensorflow    作者:nelson-liu    | 项目源码 | 文件源码
def max_sentence_similarity(sentence_input, similarity_matrix):
    """
    Parameters
    ----------
    sentence_input: Tensor
        Tensor of shape (batch_size, num_sentence_words, rnn_hidden_dim).

    similarity_matrix: Tensor
        Tensor of shape (batch_size, num_sentence_words, num_sentence_words).
    """
    # Shape: (batch_size, passage_len)
    def single_instance(inputs):
        single_sentence = inputs[0]
        argmax_index = inputs[1]
        # Shape: (num_sentence_words, rnn_hidden_dim)
        return tf.gather(single_sentence, argmax_index)

    question_index = tf.arg_max(similarity_matrix, 2)
    elems = (sentence_input, question_index)
    # Shape: (batch_size, num_sentence_words, rnn_hidden_dim)
    return tf.map_fn(single_instance, elems, dtype="float")
项目:DNC    作者:bgavran    | 项目源码 | 文件源码
def calculate_allocation_weighting(self, usage_vector):
        """

        :param: usage vector: tensor of shape [batch_size, memory_size]
        :return: allocation tensor of shape [batch_size, memory_size]
        """
        usage_vector = Memory.epsilon + (1 - Memory.epsilon) * usage_vector

        # We're sorting the "-self.usage_vector" because top_k returns highest values and we need the lowest
        highest_usage, inverse_indices = tf.nn.top_k(-usage_vector, k=self.memory_size)
        lowest_usage = -highest_usage

        allocation_scrambled = (1 - lowest_usage) * tf.cumprod(lowest_usage, axis=1, exclusive=True)

        # allocation is not in the correct order. alloation[i] contains the sorted[i] value
        # reversing the already inversed indices for each batch
        indices = tf.stack([tf.invert_permutation(batch_indices) for batch_indices in tf.unstack(inverse_indices)])
        allocation = tf.stack([tf.gather(mem, ind)
                               for mem, ind in
                               zip(tf.unstack(allocation_scrambled), tf.unstack(indices))])

        return allocation
项目:fold    作者:tensorflow    | 项目源码 | 文件源码
def build_feed_dict(self):
    if FLAGS.serialize_and_merge:
      return self.build_feed_dict_with_serialize_and_merge()

    _logger.info("Traversing trees.")
    # The weaver is an object that can invoke LoomOps, and create a feed_dict.
    weaver = self._loom.make_weaver()
    # Recurse over each tree in the batch
    for _ in six.moves.xrange(0, self.batch_size):
      root = self.traverse_tree(self.get_input_tree(), weaver)
      weaver.add_output(root)

    # Now build the feed_dict, which contains both indices into the embedding
    # tables, and indices for the gather nodes inserted by Loom.
    if FLAGS.direct_feed_dict:
      _logger.info("Calling build_feed_dict in direct mode.")
    else:
      _logger.info("Calling build_feed_dict with serialization.")
    return weaver.build_feed_dict()
项目:jack    作者:uclmr    | 项目源码 | 文件源码
def bilinear_answer_layer(size, encoded_question, question_length, encoded_support, support_length,
                          support2question, answer2support, is_eval, beam_size=1,
                          max_span_size=10000):
    """Answer layer for multiple paragraph QA."""
    # computing single time attention over question
    size = encoded_support.get_shape()[-1].value
    question_state = compute_question_state(encoded_question, question_length)

    # compute logits
    hidden = tf.gather(tf.layers.dense(question_state, 2 * size, name="hidden"), support2question)
    hidden_start, hidden_end = tf.split(hidden, 2, 1)

    support_mask = misc.mask_for_lengths(support_length)

    start_scores = tf.einsum('ik,ijk->ij', hidden_start, encoded_support)
    start_scores = start_scores + support_mask

    end_scores = tf.einsum('ik,ijk->ij', hidden_end, encoded_support)
    end_scores = end_scores + support_mask

    return compute_spans(start_scores, end_scores, answer2support, is_eval, support2question,
                         beam_size, max_span_size)
项目:jack    作者:uclmr    | 项目源码 | 文件源码
def _get_top_k(scores1, scores2, k, max_span_size, support2question):
    max_support_length = tf.shape(scores1)[1]
    doc_idx, pointer1, topk_scores1 = segment_top_k(scores1, support2question, k)

    # [num_questions * beam_size]
    doc_idx_flat = tf.reshape(doc_idx, [-1])
    pointer_flat1 = tf.reshape(pointer1, [-1])

    # [num_questions * beam_size, support_length]
    scores_gathered2 = tf.gather(scores2, doc_idx_flat)
    if max_span_size < 0:
        pointer_flat1, max_span_size = pointer_flat1 + max_span_size + 1, -max_span_size
    left_mask = misc.mask_for_lengths(tf.cast(pointer_flat1, tf.int32),
                                      max_support_length, mask_right=False)
    right_mask = misc.mask_for_lengths(tf.cast(pointer_flat1 + max_span_size, tf.int32),
                                       max_support_length)
    scores_gathered2 = scores_gathered2 + left_mask + right_mask

    pointer2 = tf.argmax(scores_gathered2, axis=1, output_type=tf.int32)

    topk_score2 = tf.gather_nd(scores2, tf.stack([doc_idx_flat, pointer2], 1))

    return doc_idx, pointer1, tf.reshape(pointer2, [-1, k]), topk_scores1 + tf.reshape(topk_score2, [-1, k])
项目:jack    作者:uclmr    | 项目源码 | 文件源码
def interaction_layer(seq1, seq1_length, seq2, seq2_length, seq1_to_seq2,
                      module='attention_matching', attn_type='bilinear_diagonal', scaled=True, with_sentinel=False,
                      name='interaction_layer', reuse=False, num_layers=1, encoder=None, concat=True, **kwargs):
    with tf.variable_scope(name, reuse=reuse):
        if seq1_to_seq2 is not None:
            seq2 = tf.gather(seq2, seq1_to_seq2)
            seq2_length = tf.gather(seq2_length, seq1_to_seq2)
        if module == 'attention_matching':
            out = attention_matching_layer(seq1, seq1_length, seq2, seq2_length,
                                           attn_type, scaled, with_sentinel)
        elif module == 'bidaf':
            out = bidaf_layer(seq1, seq1_length, seq2, seq2_length)
        elif module == 'coattention':
            out = coattention_layer(
                seq1, seq1_length, seq2, seq2_length, attn_type, scaled, with_sentinel, num_layers, encoder)
        else:
            raise ValueError("Unknown interaction type: %s" % module)

    if concat:
        out = tf.concat([seq1, out], 2)
    return out
项目:seq2seq    作者:eske    | 项目源码 | 文件源码
def batch_gather(tensor, indices):
    """Gather in batch from a tensor of arbitrary size.

    In pseduocode this module will produce the following:
    output[i] = tf.gather(tensor[i], indices[i])

    Args:
      tensor: Tensor of arbitrary size.
      indices: Vector of indices.
    Returns:
      output: A tensor of gathered values.
    """
    shape = get_shape(tensor)
    flat_first = tf.reshape(tensor, [shape[0] * shape[1]] + shape[2:])
    indices = tf.convert_to_tensor(indices)
    offset_shape = [shape[0]] + [1] * (indices.shape.ndims - 1)
    offset = tf.reshape(tf.range(shape[0]) * shape[1], offset_shape)
    output = tf.gather(flat_first, indices + offset)
    return output
项目:chemblnet    作者:jaak-s    | 项目源码 | 文件源码
def test_sgld_sparse(self):
        tf.reset_default_graph()

        z     = tf.Variable(tf.zeros((5, 2)), dtype=tf.float32)
        idx   = tf.placeholder(tf.int32)
        zi    = tf.gather(z, idx)
        zloss = tf.square(zi - [10.0, 5.0])

        sgld = SGLD(learning_rate=0.4)
        train_op_sgld = sgld.minimize(zloss)

        sess = tf.InteractiveSession()
        sess.run(tf.global_variables_initializer())

        self.assertTrue(np.alltrue(sess.run(z) == 0.0))

        sess.run(train_op_sgld, feed_dict={idx: 3})
        zh = sess.run(z)
        self.assertTrue(np.alltrue(zh[[0, 1, 2, 4], :] == 0.0))
        self.assertTrue(zh[3, 0] > 0)
项目:chemblnet    作者:jaak-s    | 项目源码 | 文件源码
def test_psgld_sparse(self):
        tf.reset_default_graph()

        z     = tf.Variable(tf.zeros((5, 2)), dtype=tf.float32)
        idx   = tf.placeholder(tf.int32)
        zi    = tf.gather(z, idx)
        zloss = tf.square(zi - [10.0, 5.0])

        psgld = pSGLD(learning_rate=0.4)
        train_op_psgld = psgld.minimize(zloss)

        sess = tf.InteractiveSession()
        sess.run(tf.global_variables_initializer())

        self.assertTrue(np.alltrue(sess.run(z) == 0.0))

        sess.run(train_op_psgld, feed_dict={idx: 3})
        zh = sess.run(z)
        self.assertTrue(np.alltrue(zh[[0, 1, 2, 4], :] == 0.0))
        self.assertTrue(zh[3, 0] > 0)
项目:py-noisemaker    作者:aayars    | 项目源码 | 文件源码
def density_map(tensor, shape):
    """
    """

    height, width, channels = shape

    bins = max(height, width)

    # values = value_map(tensor, shape, keep_dims=True)
    # values = tf.minimum(tf.maximum(tensor, 0.0), 1.0)  # TODO: Get this to work with HDR data
    values = tensor

    # https://stackoverflow.com/a/34143927
    binned_values = tf.cast(tf.reshape(values * (bins - 1), [-1]), tf.int32)
    ones = tf.ones_like(binned_values, dtype=tf.int32)
    counts = tf.unsorted_segment_sum(ones, binned_values, bins)

    out = tf.gather(counts, tf.cast(values[:, :] * (bins - 1), tf.int32))

    return tf.ones(shape) * normalize(tf.cast(out, tf.float32))
项目:icml17_knn    作者:taolei87    | 项目源码 | 文件源码
def linearND(input_, output_size, scope, init_bias=0.0):
    shape = input_.get_shape().as_list()
    ndim = len(shape)
    stddev = min(1.0 / math.sqrt(shape[-1]), 0.1)
    with tf.variable_scope(scope):
        W = tf.get_variable("Matrix", [shape[-1], output_size], tf.float32, tf.random_normal_initializer(stddev=stddev))
    X_shape = tf.gather(tf.shape(input_), range(ndim-1))
    target_shape = tf.concat(0, [X_shape, [output_size]])
    exp_input = tf.reshape(input_, [-1, shape[-1]])
    if init_bias is None:
        res = tf.matmul(exp_input, W)
    else:
        with tf.variable_scope(scope):
            b = tf.get_variable("bias", [output_size], initializer=tf.constant_initializer(init_bias))
        res = tf.matmul(exp_input, W) + b
    res = tf.reshape(res, target_shape)
    res.set_shape(shape[:-1] + [output_size])
    return res
项目:kaggle_redefining_cancer_treatment    作者:jorgemf    | 项目源码 | 文件源码
def rnn(self, sequence, sequence_length, max_length, dropout, batch_size, training,
            num_hidden=TC_MODEL_HIDDEN, num_layers=TC_MODEL_LAYERS):
        # Recurrent network.
        cells = []
        for _ in range(num_layers):
            cell = tf.nn.rnn_cell.GRUCell(num_hidden)
            if training:
                cell = tf.nn.rnn_cell.DropoutWrapper(cell, output_keep_prob=dropout)
            cells.append(cell)
        network = tf.nn.rnn_cell.MultiRNNCell(cells)
        type = sequence.dtype

        sequence_output, _ = tf.nn.dynamic_rnn(network, sequence, dtype=tf.float32,
                                               sequence_length=sequence_length,
                                               initial_state=network.zero_state(batch_size, type))
        # get last output of the dynamic_rnn
        sequence_output = tf.reshape(sequence_output, [batch_size * max_length, num_hidden])
        indexes = tf.range(batch_size) * max_length + (sequence_length - 1)
        output = tf.gather(sequence_output, indexes)
        return output
项目:kaggle_redefining_cancer_treatment    作者:jorgemf    | 项目源码 | 文件源码
def rnn(self, sequence, sequence_length, max_length, dropout, batch_size, training,
            num_hidden=TC_MODEL_HIDDEN, num_layers=TC_MODEL_LAYERS):

        # Recurrent network.
        cell_fw = tf.nn.rnn_cell.GRUCell(num_hidden)
        cell_bw = tf.nn.rnn_cell.GRUCell(num_hidden)
        type = sequence.dtype
        (fw_outputs, bw_outputs), _ = \
            tf.nn.bidirectional_dynamic_rnn(cell_fw=cell_fw,
                                            cell_bw=cell_bw,
                                            initial_state_fw=cell_fw.zero_state(batch_size, type),
                                            initial_state_bw=cell_bw.zero_state(batch_size, type),
                                            inputs=sequence,
                                            dtype=tf.float32,
                                            swap_memory=True,
                                            sequence_length=sequence_length)
        sequence_output = tf.concat((fw_outputs, bw_outputs), 2)
        # get last output of the dynamic_rnn
        sequence_output = tf.reshape(sequence_output, [batch_size * max_length, num_hidden * 2])
        indexes = tf.range(batch_size) * max_length + (sequence_length - 1)
        output = tf.gather(sequence_output, indexes)
        return output
项目:DocumentSegmentation    作者:SeguinBe    | 项目源码 | 文件源码
def multilabel_image_to_class(label_image: tf.Tensor, classes_file: str) -> tf.Tensor:
    classes_color_values, colors_labels = get_classes_color_from_file_multilabel(classes_file)
    # Convert label_image [H,W,3] to the classes [H,W,C],int32 according to the classes [C,3]
    with tf.name_scope('LabelAssign'):
        if len(label_image.get_shape()) == 3:
            diff = tf.cast(label_image[:, :, None, :], tf.float32) - tf.constant(classes_color_values[None, None, :, :])  # [H,W,C,3]
        elif len(label_image.get_shape()) == 4:
            diff = tf.cast(label_image[:, :, :, None, :], tf.float32) - tf.constant(
                classes_color_values[None, None, None, :, :])  # [B,H,W,C,3]
        else:
            raise NotImplementedError('Length is : {}'.format(len(label_image.get_shape())))

        pixel_class_diff = tf.reduce_sum(tf.square(diff), axis=-1)  # [H,W,C] or [B,H,W,C]
        class_label = tf.argmin(pixel_class_diff, axis=-1)  # [H,W] or [B,H,W]

        return tf.gather(colors_labels, class_label) > 0
项目:aboleth    作者:data61    | 项目源码 | 文件源码
def _build(self, X):
        """Build the graph of this layer."""
        n_samples, input_dim = self._get_X_dims(X)
        W_shape, _ = self._weight_shapes(self.n_categories)
        n_batch = tf.shape(X)[1]

        # Layer weights
        self.pW = _make_prior(self.std, self.pW, W_shape)
        self.qW = _make_posterior(self.std, self.qW, W_shape, self.full)

        # Index into the relevant weights rather than using sparse matmul
        Wsamples = _sample_W(self.qW, n_samples)
        features = tf.map_fn(lambda wx: tf.gather(*wx, axis=0), (Wsamples, X),
                             dtype=Wsamples.dtype)

        # Now concatenate the resulting features on the last axis
        f_dims = int(np.prod(features.shape[2:]))  # need this for placeholders
        Net = tf.reshape(features, [n_samples, n_batch, f_dims])

        # Regularizers
        KL = kl_sum(self.qW, self.pW)

        return Net, KL
项目:aboleth    作者:data61    | 项目源码 | 文件源码
def _build(self, X):
        """Build the graph of this layer."""
        n_samples, input_dim = self._get_X_dims(X)
        Wdim = (self.n_categories, self.output_dim)
        n_batch = tf.shape(X)[1]

        W = tf.Variable(tf.random_normal(shape=Wdim, seed=next(seedgen)),
                        name="W_map")

        # Index into the relevant weights rather than using sparse matmul
        features = tf.gather(W, X, axis=0)
        f_dims = int(np.prod(features.shape[2:]))  # need this for placeholders
        Net = tf.reshape(features, [n_samples, n_batch, f_dims])

        # Regularizers
        penalty = self.l2 * tf.nn.l2_loss(W) + self.l1 * _l1_loss(W)

        return Net, penalty


#
# Private module stuff
#
项目:aboleth    作者:data61    | 项目源码 | 文件源码
def _impute2D(self, X_2D):
        r"""Mean impute a rank 2 tensor."""
        # Fill zeros in for missing data initially
        data_zeroed_missing_tf = X_2D * self.real_val_mask

        # Sum the real values in each column
        col_tot = tf.reduce_sum(data_zeroed_missing_tf, 0)

        # Divide column totals by the number of non-nan values
        num_values_col = tf.reduce_sum(self.real_val_mask, 0)
        num_values_col = tf.maximum(num_values_col,
                                    tf.ones(tf.shape(num_values_col)))
        col_nan_means = tf.div(col_tot, num_values_col)

        # Make an vector of the impute values for each missing point
        imputed_vals = tf.gather(col_nan_means, self.missing_ind[:, 1])

        # Fill the imputed values into the data tensor of zeros
        shape = tf.cast(tf.shape(data_zeroed_missing_tf), dtype=tf.int64)
        missing_imputed = tf.scatter_nd(self.missing_ind, imputed_vals, shape)

        X_with_impute = data_zeroed_missing_tf + missing_imputed

        return X_with_impute
项目:aboleth    作者:data61    | 项目源码 | 文件源码
def _impute2D(self, X_2D):
        r"""Randomly impute a rank 2 tensor."""
        # Fill zeros in for missing data initially
        data_zeroed_missing_tf = X_2D * self.real_val_mask

        # Divide column totals by the number of non-nan values
        col_draws = [n.sample(seed=next(seedgen)) for n in self.normal_array]
        # Make an vector of the impute values for each missing point
        imputed_vals = tf.gather(col_draws, self.missing_ind[:, 1])

        # Fill the imputed values into the data tensor of zeros
        shape = tf.cast(tf.shape(data_zeroed_missing_tf), dtype=tf.int64)
        missing_imputed = tf.scatter_nd(self.missing_ind, imputed_vals, shape)

        X_with_impute = data_zeroed_missing_tf + missing_imputed

        return X_with_impute
项目:SSPP-DAN    作者:csehong    | 项目源码 | 文件源码
def construct(self):
        net_opts = VGG_Face.OPTS()
        net_opts.network_name = 'vgg_face_net'
        net_opts.weight_path = 'pretrained/vgg-face.mat'
        net_opts.apply_dropout = True

        self.vgg_net = VGG_Face(net_opts)
        x_normalized = self.vgg_net.normalize_input(self.x)
        self.vgg_net.network(x_normalized)
        self.keep_prob = self.vgg_net.keep_prob
        self.embedded = self.vgg_net.fc6       #Fine tuning from FC6 of VGG-Face
        self.embedded_with_class = tf.gather(self.embedded, self.with_class_idx, name='embedded_with_class')
        self.dom_network(self.embedded)
        self.class_network(self.embedded_with_class)
        self.loss = self.dom_loss + self.class_loss

    # Construct Domain Discriminator Network
项目:transform    作者:tensorflow    | 项目源码 | 文件源码
def segment_indices(segment_ids, name=None):
  """Returns a `Tensor` of indices within each segment.

  segment_ids should be a sequence of non-decreasing non-negative integers that
  define a set of segments, e.g. [0, 0, 1, 2, 2, 2] defines 3 segments of length
  2, 1 and 3.  The return value is a `Tensor` containing the indices within each
  segment.

  Example input: [0, 0, 1, 2, 2, 2]
  Example output: [0, 1, 0, 0, 1, 2]

  Args:
    segment_ids: A 1-d `Tensor` containing an non-decreasing sequence of
        non-negative integers with type `tf.int32` or `tf.int64`.
    name: (Optional) A name for this operation.

  Returns:
    A `Tensor` containing the indices within each segment.
  """
  with tf.name_scope(name, 'segment_indices'):
    segment_lengths = tf.segment_sum(tf.ones_like(segment_ids), segment_ids)
    segment_starts = tf.gather(tf.concat([[0], tf.cumsum(segment_lengths)], 0),
                               segment_ids)
    return (tf.range(tf.size(segment_ids, out_type=segment_ids.dtype)) -
            segment_starts)
项目:TensorFlow-Machine-Learning-Cookbook    作者:PacktPublishing    | 项目源码 | 文件源码
def rnn_model(x_data_ph, max_sequence_length, vocab_size, embedding_size,
              rnn_size, dropout_keep_prob):
    # Create embedding
    embedding_mat = tf.Variable(tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0))
    embedding_output = tf.nn.embedding_lookup(embedding_mat, x_data_ph)

    # Define the RNN cell
    cell = tf.nn.rnn_cell.BasicRNNCell(num_units = rnn_size)
    output, state = tf.nn.dynamic_rnn(cell, embedding_output, dtype=tf.float32)
    output = tf.nn.dropout(output, dropout_keep_prob)

    # Get output of RNN sequence
    output = tf.transpose(output, [1, 0, 2])
    last = tf.gather(output, int(output.get_shape()[0]) - 1)


    weight = tf.Variable(tf.truncated_normal([rnn_size, 2], stddev=0.1))
    bias = tf.Variable(tf.constant(0.1, shape=[2]))
    logits_out = tf.nn.softmax(tf.matmul(last, weight) + bias)

    return(logits_out)


# Define accuracy function
项目:arc-swift    作者:qipeng    | 项目源码 | 文件源码
def traditional_transition_loss_pred(self, i, j, combined_head, combined_dep):
        rel_trans_feat_ids = self.trans_feat_ids[i*self.args.beam_size+j] if not self.train else self.trans_feat_ids[i, j]
        rel_head = tf.reshape(tf.gather(combined_head, rel_trans_feat_ids[:4]), [4, self.args.rel_emb_dim])
        rel_dep  = tf.reshape(tf.gather(combined_dep,  rel_trans_feat_ids[:4]), [4, self.args.rel_emb_dim])

        mask = tf.cast(tf.reshape(tf.greater_equal(rel_trans_feat_ids[:4], 0), [4,1]), tf.float32)
        rel_head = tf.multiply(mask, rel_head)
        rel_dep = tf.multiply(mask, rel_dep)

        rel_hid = self.rel_merge(rel_head, rel_dep)
        rel_logit = self.rel_dense(tf.reshape(rel_hid, [1, -1]))
        rel_logit = tf.reshape(rel_logit, [-1])

        log_partition = tf.reduce_logsumexp(rel_logit)
        if self.train:
            res = log_partition - rel_logit[self.trans_labels[i, j]]

            return res
        else:
            arc_pred = log_partition - rel_logit

            return arc_pred
项目:arc-swift    作者:qipeng    | 项目源码 | 文件源码
def pos_loss_pred(self, i, pos_embeddings, pos_logit, NUM_POS, gold_pos, pos_trainables):
        if self.args.no_pos:
            pos_emb = tf.nn.embedding_lookup(pos_embeddings, gold_pos[i])
            if self.train:
                return 0, pos_emb
            else:
                return tf.gather(gold_pos[i], tf.range(1, self.sent_length)), pos_emb
        else:
            pos_logit = pos_logit[1:]

            log_partition = tf.reduce_logsumexp(pos_logit, [1])

            pos_pred = tf.exp(pos_logit - tf.reshape(log_partition, (-1, 1)))
            pos_emb = tf.concat([tf.reshape(tf.nn.embedding_lookup(pos_embeddings, NUM_POS), (1, -1)),
                tf.matmul(pos_pred, pos_trainables)], 0)

            if self.train:
                loss = tf.reduce_sum(tf.gather(log_partition, tf.range(self.sent_lengths[i]-1))
                    - tf.gather(tf.reshape(pos_logit, [-1]),
                        tf.range(self.sent_lengths[i]-1) * NUM_POS
                        + tf.gather(gold_pos[i], tf.range(1, self.sent_lengths[i]))))

                return loss, pos_emb
            else:
                return tf.cast(tf.argmax(pos_pred, 1), tf.int32), pos_emb
项目:tensorflow-forward-ad    作者:renmengye    | 项目源码 | 文件源码
def _max_pool_grad_grad(dy, x, y, ksize, strides, padding, argmax=None):
  """Gradients of MaxPoolGrad."""
  if argmax is None:
    _, argmax = tf.nn.max_pool_with_argmax(x, ksize, strides, padding)
  grad = dy
  grad_flat = tf.reshape(grad, [-1])
  argmax_flat = tf.reshape(argmax, [-1])

  x_shape = tf.cast(tf.shape(x), argmax.dtype)
  batch_dim = tf.reshape(
      tf.range(
          x_shape[0], dtype=argmax.dtype), [-1, 1, 1, 1])
  nelem = tf.reduce_prod(x_shape[1:])
  batch_dim *= nelem

  y_zero = tf.zeros_like(y, dtype=argmax.dtype)
  batch_dim += y_zero
  batch_dim = tf.reshape(batch_dim, [-1])

  argmax_flat += batch_dim
  grad_input = tf.gather(grad_flat, argmax_flat)
  grad_input = tf.reshape(grad_input, tf.shape(y))
  return grad_input
项目:terngrad    作者:wenwei202    | 项目源码 | 文件源码
def ternary_decoder(encoded_data, scaler, shape):
  """Decoding the signs to float format """
  a = tf.cast(encoded_data, tf.int32)
  a_split1 = tf.mod(a,4)
  a_split2 = tf.to_int32(tf.mod(a/4,4))
  a_split3 = tf.to_int32(tf.mod(a/16,4))
  a_split4 = tf.to_int32(tf.mod(a/64,4))
  a = tf.concat([a_split1, a_split2, a_split3, a_split4], 0)
  real_size = tf.reduce_prod(shape)
  a = tf.to_float(a)
  a = tf.gather(a, tf.range(0,real_size))

  a = tf.reshape(a, shape)
  a = tf.subtract(a,1)
  decoded = a*scaler
  return decoded
项目:antgo    作者:jianzfb    | 项目源码 | 文件源码
def bboxes_nms(scores, bboxes, nms_threshold=0.5, keep_top_k=200, scope=None):
    """Apply non-maximum selection to bounding boxes. In comparison to TF
    implementation, use classes information for matching.
    Should only be used on single-entries. Use batch version otherwise.

    Args:
      scores: N Tensor containing float scores.
      bboxes: N x 4 Tensor containing boxes coordinates.
      nms_threshold: Matching threshold in NMS algorithm;
      keep_top_k: Number of total object to keep after NMS.
    Return:
      classes, scores, bboxes Tensors, sorted by score.
        Padded with zero if necessary.
    """
    with tf.name_scope(scope, 'bboxes_nms_single', [scores, bboxes]):
        # Apply NMS algorithm.
        idxes = tf.image.non_max_suppression(bboxes, scores,
                                             keep_top_k, nms_threshold)
        scores = tf.gather(scores, idxes)
        bboxes = tf.gather(bboxes, idxes)
        # Pad results.
        scores = tfe_tensors.pad_axis(scores, 0, keep_top_k, axis=0)
        bboxes = tfe_tensors.pad_axis(bboxes, 0, keep_top_k, axis=0)
        return scores, bboxes
项目:antgo    作者:jianzfb    | 项目源码 | 文件源码
def _precision_recall(n_gbboxes, n_detections, scores, tp, fp, scope=None):
    """Compute precision and recall from scores, true positives and false
    positives booleans arrays
    """
    # Sort by score.
    with tf.name_scope(scope, 'prec_rec', [n_gbboxes, scores, tp, fp]):
        # Sort detections by score.
        scores, idxes = tf.nn.top_k(scores, k=n_detections, sorted=True)
        tp = tf.gather(tp, idxes)
        fp = tf.gather(fp, idxes)
        # Computer recall and precision.
        dtype = tf.float64
        tp = tf.cumsum(tf.cast(tp, dtype), axis=0)
        fp = tf.cumsum(tf.cast(fp, dtype), axis=0)
        recall = _safe_div(tp, tf.cast(n_gbboxes, dtype), 'recall')
        precision = _safe_div(tp, tp + fp, 'precision')

        return tf.tuple([precision, recall])
项目:DaNet-Tensorflow    作者:khaotik    | 项目源码 | 文件源码
def combinations(s_data, subset_size, total_size=None, name=None):
    assert isinstance(subset_size, int)
    assert subset_size > 0
    if total_size is None:
        total_size = s_data.get_shape().as_list()[0]

    if total_size is None:
        raise ValueError(
            "tensor size on axis 0 is unknown,"
            " please supply 'total_size'")
    else:
        assert isinstance(total_size, int)
        assert subset_size <= total_size

    c_combs = tf.constant(
        list(itertools.combinations(range(total_size), subset_size)),
        dtype=hparams.INTX,
        name=('combs' if name is None else name))

    return tf.gather(s_data, c_combs)
项目:rl-server    作者:parilo    | 项目源码 | 文件源码
def calc (self, xs):
        xs = tf.transpose(xs, [1, 0, 2])
        print "xs: " + str(xs)
        mlp_out = []
        for i in range(self.lstm_steps_count):
            v = self.mlp (tf.gather(xs, i))
            mlp_out.append (v)
        mlp_out = tf.transpose(tf.pack (mlp_out), [1, 0, 2])
        val, state = tf.nn.dynamic_rnn(tf.nn.rnn_cell.MultiRNNCell(self.layers, state_is_tuple=True), mlp_out, dtype=tf.float32)

        val = tf.transpose(val, [1, 0, 2])
        results = []
        for i in range(self.lstm_steps_count):
            v = self.out_mlp (tf.gather(val, i))
            results.append (v)
        return tf.transpose(tf.pack (results), [1, 0, 2])
项目:factorix    作者:gbouchar    | 项目源码 | 文件源码
def sparse_hermitian_product(emb, tuples):
    """
    Compute the Hermitian inner product between selected complex embeddings
    This corresponds to the usual dot product applied on the conjugate of the first vector: <conj(x), y>
    where conj is the complex conjugate (obtained by inverting the imaginary part)
    We consider that the embedding dimension is twice the rank, where the first part is in embeddings[:,:rk] and
    the imaginary part is in embeddings[:,rk:].
    It computes
     S[i] = <conj(E[I[i,1]], E[I[i,2]]>
    Usage:
    S = sparse_hermitian_product(E, I):
    :param emb: embedding matrix of size [n_emb, 2 * r] containing float numbers where r is the complex rank
    :param tuples: tuple matrix of size [n_t, 2] containing integers that correspond to the indices of the embeddings
    :return: a pair containing the real and imaginary parts of the Hermitian dot products
    """
    rk = emb.get_shape()[1].value // 2
    emb_re = emb[:, :rk]
    emb_im = emb[:, rk:]
    emb_sel_a_re = tf.gather(emb_re, tuples[:, 0])
    emb_sel_a_im = tf.gather(emb_im, tuples[:, 0])
    emb_sel_b_re = tf.gather(emb_re, tuples[:, 1])
    emb_sel_b_im = tf.gather(emb_im, tuples[:, 1])
    pred_re = tf.reduce_sum(tf.mul(emb_sel_a_re, emb_sel_b_re) + tf.mul(emb_sel_a_im, emb_sel_b_im), 1)
    pred_im = tf.reduce_sum(tf.mul(emb_sel_a_re, emb_sel_b_im) - tf.mul(emb_sel_a_im, emb_sel_b_re), 1)
    return pred_re, pred_im
项目:factorix    作者:gbouchar    | 项目源码 | 文件源码
def test_matrix_factorization(verbose=False):
    np.random.seed(1)
    n, m, rank = 7, 6, 3
    mat = np.random.randn(n, rank).dot(np.random.randn(rank, m))
    tuples = [([i, n + j], mat[i, j]) for i in range(n) for j in range(m)]
    tuple_iterable = data_to_batches(tuples, minibatch_size=n * m)
    sampler, (x, y) = feed_dict_sampler(tuple_iterable, types=[np.int64, np.float32])
    emb_var = tf.Variable(tf.cast(np.random.randn(n + m, rank), 'float32'))
    offset = tf.Variable(tf.cast(1.0, 'float32'))
    loss_op = tf.reduce_mean(tf.square(tf.reduce_sum(tf.reduce_prod(tf.gather(emb_var, x), 1), 1) + offset - y))
    emb, offset_val = learn(loss_op, sampler, max_epochs=200, variables=[emb_var, offset])
    mat_est = emb[:n, :].dot(emb[n:, :].T)
    if verbose:
        print(np.linalg.norm(mat_est - mat) ** 2)  # we should have recovered the low-rank matrix
    else:
        assert (np.linalg.norm(mat_est - mat) < 1e-3)
项目:factorix    作者:gbouchar    | 项目源码 | 文件源码
def reader(self, context=None, emb0=None):
        if emb0 is None:  # by default, can use the initial embedding
            emb0 = self.emb0
        if context is None:  # empty contexts are not read
            return emb0
        context_inputs, context_ouputs = context
        context_embs = tf.gather(emb0, context_inputs[:, :, 1])
        preds = tf.reshape(tf.matmul(
                tf.reshape(context_embs, (self.n_data * self.n_features, self.rank)),
                tf.reshape(emb0[0, :], [self.rank, 1])),
                (self.n_data, self.n_features))
        update_strength = tf.tile(tf.reshape(loss_quadratic_grad(preds, context_ouputs),
                                             (self.n_data, self.n_features, 1)), (1, 1, self.rank))
        embs_after_reading = tf.tile(tf.reshape(emb0[0, :], (1, self.rank)), (self.n_data, 1)) \
                             - tf.reduce_sum(context_embs * update_strength, 1) * self.step_size
        return embs_after_reading  # size of the output: (n_data, rank)
项目:rltools    作者:sisl    | 项目源码 | 文件源码
def lookup_last_idx(a, inds, name=None):
    """
    Looks up indices in a. e.g. a[[1, 2, 3]] = [a[1], a[2], a[3]]
    a is a d1 x d2 ... dn tensor
    inds is a d1 x d2 ... d(n-1) tensor of integers
    returns the tensor
    out[i_1,...,i_{n-1}] = a[i_1,...,i_{n-1}, inds[i_1,...,i_{n-1}]]
    """
    with tf.op_scope([a, inds], name, 'lookup_last_idx') as scope:
        a = tf.convert_to_tensor(a, name='a')
        inds = tf.convert_to_tensor(inds, name='inds')

        # Flatten the arrays
        ashape, indsshape = tf.shape(a), tf.shape(inds)
        aflat, indsflat = tf.reshape(a, [-1]), tf.reshape(inds, [-1])

        # Compute the indices corresponding to inds in the flattened array
        # TODO Causes UserWarning: Converting sparse IndexedSlices to a dense Tensor of unknown shape.
        delta = tf.gather(ashape, tf.size(ashape) - 1)  # i.e. delta = ashape[-1],
        aflatinds = tf.range(0, limit=tf.size(a), delta=delta) + indsflat

        # Look up the desired elements in the flattened array, and reshape
        # to the original shape
        return tf.reshape(tf.gather(aflat, aflatinds), indsshape, name=scope)
项目:TensorflowFramework    作者:vahidk    | 项目源码 | 文件源码
def batch_gather(tensor, indices):
  """Gather in batch from a tensor of arbitrary size.

  In pseduocode this module will produce the following:
  output[i] = tf.gather(tensor[i], indices[i])

  Args:
    tensor: Tensor of arbitrary size.
    indices: Vector of indices.
  Returns:
    output: A tensor of gathered values.
  """
  shape = get_shape(tensor)
  flat_first = tf.reshape(tensor, [shape[0] * shape[1]] + shape[2:])
  indices = tf.convert_to_tensor(indices)
  offset_shape = [shape[0]] + [1] * (indices.shape.ndims - 1)
  offset = tf.reshape(tf.range(shape[0]) * shape[1], offset_shape)
  output = tf.gather(flat_first, indices + offset)
  return output
项目:Master-R-CNN    作者:Mark110    | 项目源码 | 文件源码
def assign_boxes(gt_boxes, tensors, layers, scope='AssignGTBoxes'):

    with tf.name_scope(scope) as sc:
        min_k = layers[0]
        max_k = layers[-1]
        assigned_layers = \
            tf.py_func(assign.assign_boxes, 
                     [ gt_boxes, min_k, max_k ],
                     tf.int32)
        assigned_layers = tf.reshape(assigned_layers, [-1])

        assigned_tensors = []
        for t in tensors:
            split_tensors = []
            for l in layers:
                tf.cast(l, tf.int32)
                inds = tf.where(tf.equal(assigned_layers, l))
                inds = tf.reshape(inds, [-1])
                split_tensors.append(tf.gather(t, inds))
            assigned_tensors.append(split_tensors)

        return assigned_tensors + [assigned_layers]
项目:trpo    作者:jjkke88    | 项目源码 | 文件源码
def slice_2d(x, inds0, inds1):
    # assume that a path have 1000 vector, then ncols=action dims, inds0=1000,inds1=
    inds0 = tf.cast(inds0, tf.int64)
    inds1 = tf.cast(inds1, tf.int64)
    shape = tf.cast(tf.shape(x), tf.int64)
    ncols = shape[1]
    x_flat = tf.reshape(x, [-1])
    return tf.gather(x_flat, inds0 * ncols + inds1)


# def linesearch(f, x, fullstep, expected_improve_rate):
#     accept_ratio = .1
#     max_backtracks = 10
#     fval, old_kl, entropy = f(x)
#     for (_n_backtracks, stepfrac) in enumerate(.5**np.arange(max_backtracks)):
#         xnew = x + stepfrac * fullstep
#         newfval, new_kl, new_ent= f(xnew)
#         # actual_improve = newfval - fval # minimize target object
#         # expected_improve = expected_improve_rate * stepfrac
#         # ratio = actual_improve / expected_improve
#         # if ratio > accept_ratio and actual_improve > 0:
#         #     return xnew
#         if newfval<fval and new_kl<=pms.max_kl:
#             return xnew
#     return x
项目:wide-deep-cnn    作者:DaniUPC    | 项目源码 | 文件源码
def _sample(self, *params, **kwargs):
        """ Returns the mean of the most probable Gaussian distribution """
        # Get identifier of the most probable component
        mixings, sigma, mean = params
        batch_size = mixings.get_shape()[0]
        id_mix = tf.cast(tf.argmax(mixings, axis=1), tf.int32)

        # Extracted from https://github.com/tensorflow/tensorflow/issues/418
        # Get mean of corresponding component
        sample = tf.gather(
            params=tf.reshape(mean, [-1]),
            indices=tf.range(batch_size) * tf.shape(mean)[1] + id_mix
        )

        # Small workaround
        if sample.get_shape().ndims < 2:
            sample = tf.expand_dims(sample, axis=1)

        return sample
项目:wtfrnn    作者:juliakreutzer    | 项目源码 | 文件源码
def build_eval_graph(self):
    """Build the eval graph."""
    # Eval graph

    # Normalized word embeddings of shape [vocab_size, emb_dim].
    nemb = tf.nn.l2_normalize(self._emb, 1)

    # Nodes for computing neighbors for a given word according to
    # their cosine distance.
    nearby_word = tf.placeholder(dtype=tf.int32)  # word id
    nearby_emb = tf.gather(nemb, nearby_word)
    nearby_dist = tf.matmul(nearby_emb, nemb, transpose_b=True)
    nearby_val, nearby_idx = tf.nn.top_k(nearby_dist,
                                         min(1000, self._options.vocab_size))
    self._nearby_word = nearby_word
    self._nearby_val = nearby_val
    self._nearby_idx = nearby_idx
项目:GPflow    作者:GPflow    | 项目源码 | 文件源码
def _slice(self, X, X2):
        """
        Slice the correct dimensions for use in the kernel, as indicated by
        `self.active_dims`.
        :param X: Input 1 (NxD).
        :param X2: Input 2 (MxD), may be None.
        :return: Sliced X, X2, (Nxself.input_dim).
        """
        if isinstance(self.active_dims, slice):
            X = X[:, self.active_dims]
            if X2 is not None:
                X2 = X2[:, self.active_dims]
        else:
            X = tf.transpose(tf.gather(tf.transpose(X), self.active_dims))
            if X2 is not None:
                X2 = tf.transpose(tf.gather(tf.transpose(X2), self.active_dims))
        input_dim_shape = tf.shape(X)[1]
        input_dim = tf.convert_to_tensor(self.input_dim, dtype=settings.tf_int)
        with tf.control_dependencies([tf.assert_equal(input_dim_shape, input_dim)]):
            X = tf.identity(X)

        return X, X2
项目:GPflow    作者:GPflow    | 项目源码 | 文件源码
def _slice_cov(self, cov):
        """
        Slice the correct dimensions for use in the kernel, as indicated by
        `self.active_dims` for covariance matrices. This requires slicing the
        rows *and* columns. This will also turn flattened diagonal
        matrices into a tensor of full diagonal matrices.
        :param cov: Tensor of covariance matrices (NxDxD or NxD).
        :return: N x self.input_dim x self.input_dim.
        """
        cov = tf.cond(tf.equal(tf.rank(cov), 2), lambda: tf.matrix_diag(cov), lambda: cov)

        if isinstance(self.active_dims, slice):
            cov = cov[..., self.active_dims, self.active_dims]
        else:
            cov_shape = tf.shape(cov)
            covr = tf.reshape(cov, [-1, cov_shape[-1], cov_shape[-1]])
            gather1 = tf.gather(tf.transpose(covr, [2, 1, 0]), self.active_dims)
            gather2 = tf.gather(tf.transpose(gather1, [1, 0, 2]), self.active_dims)
            cov = tf.reshape(tf.transpose(gather2, [2, 0, 1]),
                             tf.concat([cov_shape[:-2], [len(self.active_dims), len(self.active_dims)]], 0))
        return cov
项目:SSD_tensorflow_VOC    作者:LevinJ    | 项目源码 | 文件源码
def bboxes_nms(scores, bboxes, nms_threshold=0.5, keep_top_k=200, scope=None):
    """Apply non-maximum selection to bounding boxes. In comparison to TF
    implementation, use classes information for matching.
    Should only be used on single-entries. Use batch version otherwise.

    Args:
      scores: N Tensor containing float scores.
      bboxes: N x 4 Tensor containing boxes coordinates.
      nms_threshold: Matching threshold in NMS algorithm;
      keep_top_k: Number of total object to keep after NMS.
    Return:
      classes, scores, bboxes Tensors, sorted by score.
        Padded with zero if necessary.
    """
    with tf.name_scope(scope, 'bboxes_nms_single', [scores, bboxes]):
        # Apply NMS algorithm.
        idxes = tf.image.non_max_suppression(bboxes, scores,
                                             keep_top_k, nms_threshold)
        scores = tf.gather(scores, idxes)
        bboxes = tf.gather(bboxes, idxes)
        # Pad results.
        scores = tfe_tensors.pad_axis(scores, 0, keep_top_k, axis=0)
        bboxes = tfe_tensors.pad_axis(bboxes, 0, keep_top_k, axis=0)
        return scores, bboxes
项目:SSD_tensorflow_VOC    作者:LevinJ    | 项目源码 | 文件源码
def _precision_recall(n_gbboxes, n_detections, scores, tp, fp, scope=None):
    """Compute precision and recall from scores, true positives and false
    positives booleans arrays
    """
    # Sort by score.
    with tf.name_scope(scope, 'prec_rec', [n_gbboxes, scores, tp, fp]):
        # Sort detections by score.
        scores, idxes = tf.nn.top_k(scores, k=n_detections, sorted=True)
        tp = tf.gather(tp, idxes)
        fp = tf.gather(fp, idxes)
        # Computer recall and precision.
        dtype = tf.float64
        tp = tf.cumsum(tf.cast(tp, dtype), axis=0)
        fp = tf.cumsum(tf.cast(fp, dtype), axis=0)
        recall = _safe_div(tp, tf.cast(n_gbboxes, dtype), 'recall')
        precision = _safe_div(tp, tp + fp, 'precision')

        return tf.tuple([precision, recall])
项目:visual_mpc    作者:febert    | 项目源码 | 文件源码
def scheduled_sample(ground_truth_x, generated_x, batch_size, num_ground_truth):
    """Sample batch with specified mix of ground truth and generated data_files points.

    Args:
      ground_truth_x: tensor of ground-truth data_files points.
      generated_x: tensor of generated data_files points.
      batch_size: batch size
      num_ground_truth: number of ground-truth examples to include in batch.
    Returns:
      New batch with num_ground_truth sampled from ground_truth_x and the rest
      from generated_x.
    """
    idx = tf.random_shuffle(tf.range(int(batch_size)))
    ground_truth_idx = tf.gather(idx, tf.range(num_ground_truth))
    generated_idx = tf.gather(idx, tf.range(num_ground_truth, int(batch_size)))

    ground_truth_examps = tf.gather(ground_truth_x, ground_truth_idx)
    generated_examps = tf.gather(generated_x, generated_idx)
    return tf.dynamic_stitch([ground_truth_idx, generated_idx],
                             [ground_truth_examps, generated_examps])