Python tensorflow 模块,random_shuffle() 实例源码

我们从Python开源项目中,提取了以下23个代码示例,用于说明如何使用tensorflow.random_shuffle()

项目:visual_mpc    作者:febert    | 项目源码 | 文件源码
def scheduled_sample(ground_truth_x, generated_x, batch_size, num_ground_truth):
    """Sample batch with specified mix of ground truth and generated data_files points.

    Args:
      ground_truth_x: tensor of ground-truth data_files points.
      generated_x: tensor of generated data_files points.
      batch_size: batch size
      num_ground_truth: number of ground-truth examples to include in batch.
    Returns:
      New batch with num_ground_truth sampled from ground_truth_x and the rest
      from generated_x.
    """
    idx = tf.random_shuffle(tf.range(int(batch_size)))
    ground_truth_idx = tf.gather(idx, tf.range(num_ground_truth))
    generated_idx = tf.gather(idx, tf.range(num_ground_truth, int(batch_size)))

    ground_truth_examps = tf.gather(ground_truth_x, ground_truth_idx)
    generated_examps = tf.gather(generated_x, generated_idx)
    return tf.dynamic_stitch([ground_truth_idx, generated_idx],
                             [ground_truth_examps, generated_examps])
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def scheduled_sample(ground_truth_x, generated_x, batch_size, num_ground_truth):
    """Sample batch with specified mix of ground truth and generated data_files points.

    Args:
      ground_truth_x: tensor of ground-truth data_files points.
      generated_x: tensor of generated data_files points.
      batch_size: batch size
      num_ground_truth: number of ground-truth examples to include in batch.
    Returns:
      New batch with num_ground_truth sampled from ground_truth_x and the rest
      from generated_x.
    """

    idx = tf.random_shuffle(tf.range(int(batch_size)))
    ground_truth_idx = tf.gather(idx, tf.range(num_ground_truth))
    generated_idx = tf.gather(idx, tf.range(num_ground_truth, int(batch_size)))

    ground_truth_examps = tf.gather(ground_truth_x, ground_truth_idx)
    generated_examps = tf.gather(generated_x, generated_idx)
    return tf.dynamic_stitch([ground_truth_idx, generated_idx],
                             [ground_truth_examps, generated_examps])
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def scheduled_sample(ground_truth_x, generated_x, batch_size, num_ground_truth):
    """Sample batch with specified mix of ground truth and generated data_files points.

    Args:
      ground_truth_x: tensor of ground-truth data_files points.
      generated_x: tensor of generated data_files points.
      batch_size: batch size
      num_ground_truth: number of ground-truth examples to include in batch.
    Returns:
      New batch with num_ground_truth sampled from ground_truth_x and the rest
      from generated_x.
    """
    idx = tf.random_shuffle(tf.range(int(batch_size)))
    ground_truth_idx = tf.gather(idx, tf.range(num_ground_truth))
    generated_idx = tf.gather(idx, tf.range(num_ground_truth, int(batch_size)))

    ground_truth_examps = tf.gather(ground_truth_x, ground_truth_idx)
    generated_examps = tf.gather(generated_x, generated_idx)
    return tf.dynamic_stitch([ground_truth_idx, generated_idx],
                             [ground_truth_examps, generated_examps])
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def scheduled_sample(ground_truth_x, generated_x, batch_size, num_ground_truth):
    """Sample batch with specified mix of ground truth and generated data_files points.

    Args:
      ground_truth_x: tensor of ground-truth data_files points.
      generated_x: tensor of generated data_files points.
      batch_size: batch size
      num_ground_truth: number of ground-truth examples to include in batch.
    Returns:
      New batch with num_ground_truth sampled from ground_truth_x and the rest
      from generated_x.
    """
    generated_x = tf.squeeze(generated_x)

    idx = tf.random_shuffle(tf.range(int(batch_size)))
    ground_truth_idx = tf.gather(idx, tf.range(num_ground_truth))
    generated_idx = tf.gather(idx, tf.range(num_ground_truth, int(batch_size)))

    ground_truth_examps = tf.gather(ground_truth_x, ground_truth_idx)
    generated_examps = tf.gather(generated_x, generated_idx)
    return tf.dynamic_stitch([ground_truth_idx, generated_idx],
                             [ground_truth_examps, generated_examps])
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def scheduled_sample(ground_truth_x, generated_x, batch_size, num_ground_truth):
  """Sample batch with specified mix of ground truth and generated data_files points.

  Args:
    ground_truth_x: tensor of ground-truth data_files points.
    generated_x: tensor of generated data_files points.
    batch_size: batch size
    num_ground_truth: number of ground-truth examples to include in batch.
  Returns:
    New batch with num_ground_truth sampled from ground_truth_x and the rest
    from generated_x.
  """
  idx = tf.random_shuffle(tf.range(int(batch_size)))
  ground_truth_idx = tf.gather(idx, tf.range(num_ground_truth))
  generated_idx = tf.gather(idx, tf.range(num_ground_truth, int(batch_size)))

  ground_truth_examps = tf.gather(ground_truth_x, ground_truth_idx)
  generated_examps = tf.gather(generated_x, generated_idx)
  return tf.dynamic_stitch([ground_truth_idx, generated_idx],
                           [ground_truth_examps, generated_examps])
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def scheduled_sample(ground_truth_x, generated_x, batch_size, num_ground_truth):
    """Sample batch with specified mix of ground truth and generated data_files points.

    Args:
      ground_truth_x: tensor of ground-truth data_files points.
      generated_x: tensor of generated data_files points.
      batch_size: batch size
      num_ground_truth: number of ground-truth examples to include in batch.
    Returns:
      New batch with num_ground_truth sampled from ground_truth_x and the rest
      from generated_x.
    """
    idx = tf.random_shuffle(tf.range(int(batch_size)))
    ground_truth_idx = tf.gather(idx, tf.range(num_ground_truth))
    generated_idx = tf.gather(idx, tf.range(num_ground_truth, int(batch_size)))

    ground_truth_examps = tf.gather(ground_truth_x, ground_truth_idx)
    generated_examps = tf.gather(generated_x, generated_idx)
    return tf.dynamic_stitch([ground_truth_idx, generated_idx],
                             [ground_truth_examps, generated_examps])
项目:dcn.tf    作者:beopst    | 项目源码 | 文件源码
def identify_saliency(grads):
    """Identify top k saliency scores.

       Args.
            grads: gradient of the entropy wrt features
       Trick.
            use tf.nn.top_k ops to extract position indices
    """

    M = tf.sqrt(tf.reduce_sum(tf.square(grads),3)+1e-8)
    top_k_values, top_k_idxs = tf.nn.top_k(ops.flatten(M), N_PATCHES, sorted=False)

    # shuffle patch indices for batch normalization
    top_k_idxs = tf.random_shuffle(tf.transpose(top_k_idxs))
    top_k_idxs = tf.transpose(top_k_idxs)

    return top_k_values, top_k_idxs, M
项目:dcn.tf    作者:beopst    | 项目源码 | 文件源码
def extract_patches(inputs, size, offsets):

    batch_size = inputs.get_shape()[0]

    padded = tf.pad(inputs, [[0,0],[2,2],[2,2],[0,0]])
    unpacked = tf.unpack(tf.squeeze(padded))

    extra_margins = tf.constant([1,1,2,2])

    sliced_list = []
    for i in xrange(batch_size.value):

        margins = tf.random_shuffle(extra_margins)
        margins = margins[:2]
        start_pts = tf.sub(offsets[i,:],margins)
        sliced = tf.slice(unpacked[i],start_pts,size)
        sliced_list.append(sliced)

    patches = tf.pack(sliced_list)
    patches = tf.expand_dims(patches,3)

    return patches
项目:triplet-reid    作者:VisualComputingInstitute    | 项目源码 | 文件源码
def sample_k_fids_for_pid(pid, all_fids, all_pids, batch_k):
    """ Given a PID, select K FIDs of that specific PID. """
    possible_fids = tf.boolean_mask(all_fids, tf.equal(all_pids, pid))

    # The following simply uses a subset of K of the possible FIDs
    # if more than, or exactly K are available. Otherwise, we first
    # create a padded list of indices which contain a multiple of the
    # original FID count such that all of them will be sampled equally likely.
    count = tf.shape(possible_fids)[0]
    padded_count = tf.cast(tf.ceil(batch_k / count), tf.int32) * count
    full_range = tf.mod(tf.range(padded_count), count)

    # Sampling is always performed by shuffling and taking the first k.
    shuffled = tf.random_shuffle(full_range)
    selected_fids = tf.gather(possible_fids, shuffled[:batch_k])

    return selected_fids, tf.fill([batch_k], pid)
项目:keras-fractalnet    作者:snf    | 项目源码 | 文件源码
def tensorflow_categorical(count, seed):
    assert count > 0
    arr = [1.] + [.0 for _ in range(count-1)]
    return tf.random_shuffle(arr, seed)

# Returns a random array [x0, x1, ...xn] where one is 1 and the others
# are 0. Ex: [0, 0, 1, 0].
项目:holographic_memory    作者:jramapuram    | 项目源码 | 文件源码
def create_permutation_matrix(input_size, seed=None):
        #return tf.random_shuffle(tf.eye(input_size), seed=seed)
        ind = np.arange(0, input_size)
        ind_shuffled = np.copy(ind)
        np.random.seed(seed)
        np.random.shuffle(ind)
        indices = np.asarray([[x,y] for x,y in zip(ind, ind_shuffled)], dtype=np.int32)
        values = np.ones([len(indices)], dtype=np.float32)
        indices = indices[indices[:, 0].argsort()]
        return tf.SparseTensor(indices, values, shape=[input_size, input_size])
项目:dataset    作者:analysiscenter    | 项目源码 | 文件源码
def create_bbox_batch(cls, inputs, batch_size=64):
        """ Create batch indices for bboxes. """
        batch = []
        for indices in inputs:
            indices = tf.random_shuffle(indices)
            start = [0] * 2
            size = [tf.minimum(batch_size, tf.shape(indices)[0]), -1]
            sample = tf.slice(indices, start, size)
            sample.set_shape([None, 1])
            batch.append(sample)
        batch = tf.tuple(batch)
        return batch
项目:adventures-in-ml-code    作者:adventuresinML    | 项目源码 | 文件源码
def cifar_filename_queue(filename_list):
    # convert the list to a tensor
    string_tensor = tf.convert_to_tensor(filename_list, dtype=tf.string)
    # randomize the tensor
    tf.random_shuffle(string_tensor)
    # create the queue
    fq = tf.FIFOQueue(capacity=10, dtypes=tf.string)
    # create our enqueue_op for this q
    fq_enqueue_op = fq.enqueue_many([string_tensor])
    # create a QueueRunner and add to queue runner list
    # we only need one thread for this simple queue
    tf.train.add_queue_runner(tf.train.QueueRunner(fq, [fq_enqueue_op] * 1))
    return fq
项目:auDeep    作者:auDeep    | 项目源码 | 文件源码
def fit(self,
            features: np.ndarray,
            labels: np.ndarray,
            quiet=False):
        # generic parameter checks
        super().fit(features, labels)

        self._num_labels = len(np.unique(labels))

        graph = tf.Graph()

        with graph.as_default():
            tf_inputs = tf.Variable(initial_value=features, trainable=False, dtype=tf.float32)
            tf_labels = tf.Variable(initial_value=labels, trainable=False, dtype=tf.int32)

            if self._shuffle_training:
                tf_inputs = tf.random_shuffle(tf_inputs, seed=42)
                tf_labels = tf.random_shuffle(tf_labels, seed=42)

            with tf.variable_scope("mlp"):
                tf_logits = self._model.inference(tf_inputs, self._keep_prob, self._num_labels)
                tf_loss = self._model.loss(tf_logits, tf_labels)
                tf_train_op = self._model.optimize(tf_loss, self._learning_rate)

            tf_init_op = tf.global_variables_initializer()
            tf_saver = tf.train.Saver(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="mlp"))

        session = tf.Session(graph=graph)
        session.run(tf_init_op)

        for epoch in range(self._num_epochs):
            session.run(tf_train_op)

        # timestamped model file
        self._latest_checkpoint = self._checkpoint_dir / "model_{:%Y%m%d%H%M%S%f}".format(datetime.datetime.now())
        tf_saver.save(session, str(self._latest_checkpoint), write_meta_graph=False)

        session.close()
项目:image-caption-baseline    作者:raingo    | 项目源码 | 文件源码
def _parse_example_proto(example_serialized):
  # parse record
  # decode jpeg
  # random select one caption, convert it into integers
  # compute the length of the caption
  feature_map = {
      'image/encoded': tf.FixedLenFeature([], dtype=tf.string),
      'image/coco-id': tf.FixedLenFeature([], dtype=tf.int64),
      'caption': tf.VarLenFeature(dtype=tf.string),
      # 'image/path': tf.FixedLenFeature([], dtype=tf.string),
  }

  features = tf.parse_single_example(example_serialized, feature_map)

  cocoid = features['image/coco-id']
  image = tf.image.decode_jpeg(
      features['image/encoded'],
      channels=3,
      try_recover_truncated=True)
  # the image COCO_train2014_000000167126.jpg was corrupted
  # replaced that image in my train2014/ directory
  # but do not want to re encode everything, so just try_recover_truncated
  # which is just part of the image

  # [0,255) --> [0,1)
  image = tf.image.convert_image_dtype(image, dtype=tf.float32)

  #image_path = features['image/path']

  caption = tf.sparse_tensor_to_dense(features['caption'], default_value=".")
  caption = tf.random_shuffle(caption)[0]
  record_defaults = [[PAD]] * MAX_SEQ_LEN
  caption_tids = tf.decode_csv(caption, record_defaults)
  caption_tids = tf.pack(caption_tids)

  return image, caption_tids, cocoid #, image_path
项目:FractalNet    作者:tensorpro    | 项目源码 | 文件源码
def random_column(columns):
  """Zeros out all except one of `columns`.

  Used for rounds with global drop path.

  Args:
    columns: the columns of a fractal block to be selected from.
  """
  num_columns = tensor_shape(columns)[0]
  mask = tf.random_shuffle([True]+[False]*(num_columns-1))
  return apply_mask(mask, columns)* num_columns
项目:keras-rcnn    作者:broadinstitute    | 项目源码 | 文件源码
def shuffle(x):
    """
    Modify a sequence by shuffling its contents. This function only shuffles
    the array along the first axis of a multi-dimensional array. The order of
    sub-arrays is changed but their contents remains the same.
    """
    return tensorflow.random_shuffle(x)
项目:tfdeploy    作者:riga    | 项目源码 | 文件源码
def test_RandomShuffle(self):
        t = tf.random_shuffle(self.random(10, 4))
        # compare only sum of first axis
        def comp(rtf, rtd):
            self.assertTrue(np.allclose(np.sum(rtf, axis=0), np.sum(rtd, axis=0)))
        self.check(t, comp=comp)
项目:RFCN-tensorflow    作者:xdever    | 项目源码 | 文件源码
def randomSelectIndex(fromCount, n):
    with tf.name_scope("randomSelectIndex"):
        n = tf.minimum(fromCount, n)
        i = tf.random_shuffle(tf.range(fromCount, dtype=tf.int32))[0:n]
        return tf.expand_dims(i,-1)
项目:LifelongVAE    作者:jramapuram    | 项目源码 | 文件源码
def shuffle_jointly(*args):
    '''
    accepts n args, concatinates them all together
    and then shuffles along batch_dim and returns them unsplit
    '''
    shps = [a.get_shape().as_list()[-1] for a in args]
    concated = tf.random_shuffle(tf.concat(values=args, axis=1))
    splits = []
    current_max = 0
    for begin in shps:
        splits.append(concated[:, current_max:current_max + begin])
        current_max += begin

    return splits
项目:tensorflow    作者:luyishisi    | 项目源码 | 文件源码
def subsample_indicator(indicator, num_samples):
    """Subsample indicator vector.

    Given a boolean indicator vector with M elements set to `True`, the function
    assigns all but `num_samples` of these previously `True` elements to
    `False`. If `num_samples` is greater than M, the original indicator vector
    is returned.

    Args:
      indicator: a 1-dimensional boolean tensor indicating which elements
        are allowed to be sampled and which are not.
      num_samples: int32 scalar tensor

    Returns:
      a boolean tensor with the same shape as input (indicator) tensor
    """
    indices = tf.where(indicator)
    indices = tf.random_shuffle(indices)
    indices = tf.reshape(indices, [-1])

    num_samples = tf.minimum(tf.size(indices), num_samples)
    selected_indices = tf.slice(indices, [0], tf.reshape(num_samples, [1]))

    selected_indicator = ops.indices_to_dense_vector(selected_indices,
                                                     tf.shape(indicator)[0])

    return tf.equal(selected_indicator, 1)
项目:nonlinearIB    作者:artemyk    | 项目源码 | 文件源码
def K_n_choose_k(n, k, seed=None):
        import tensorflow as tf
        if seed is None:
            seed = np.random.randint(10e6)
        x = tf.range(0, limit=n, dtype='int32')
        x = tf.random_shuffle(x, seed=seed)
        x = x[0:k]
        return x
项目:ActionVLAD    作者:rohitgirdhar    | 项目源码 | 文件源码
def preprocess_for_train(image,
                         output_height,
                         output_width,
                         mean_vals,
                         out_dim_scale=1.0):
  """Preprocesses the given image for training.

  Note that the actual resizing scale is sampled from
    [`resize_size_min`, `resize_size_max`].

  Args:
    image: A `Tensor` representing an image of arbitrary size.
    output_height: The height of the image after preprocessing.
    output_width: The width of the image after preprocessing.

  Returns:
    A preprocessed image.
  """
  num_channels = image.get_shape().as_list()[-1]
  image = tf.image.resize_images(image, [_RESIZE_HT, _RESIZE_WD])
  # compute the crop size
  base_size = float(min(_RESIZE_HT, _RESIZE_WD))
  scale_ratio_h = tf.random_shuffle(tf.constant(_SCALE_RATIOS))[0]
  scale_ratio_w = tf.random_shuffle(tf.constant(_SCALE_RATIOS))[0]
  image = _random_crop([image],
      tf.cast(output_height * scale_ratio_h, tf.int32),
      tf.cast(output_width * scale_ratio_w, tf.int32))[0]
  image = tf.image.resize_images(
    image, [int(output_height * out_dim_scale),
            int(output_width * out_dim_scale)])
  image = tf.to_float(image)
  image = tf.image.random_flip_left_right(image)
  image.set_shape([int(output_height * out_dim_scale),
                   int(output_width * out_dim_scale), num_channels])
  image = _mean_image_subtraction(image, mean_vals)
  image = tf.expand_dims(image, 0) # 1x... image, to be consistent with eval
  # Gets logged multiple times with NetVLAD, so gives an error.
  # I'm anyway logging from the train code, so removing it here.
  # tf.image_summary('final_distorted_image',
  #     tf.expand_dims(image / 128.0, 0))
  return image