Python random 模块,shuffle() 实例源码

我们从Python开源项目中,提取了以下49个代码示例,用于说明如何使用random.shuffle()

项目:robot-arena    作者:kenganong    | 项目源码 | 文件源码
def perform_priority_moves(state, interactive):
  to_move = [pos for cell, pos in state.board.traverse() if cell.content and cell.content[TYPE] == ROBOT
                and cell.content['move'] in PRIORITY_MOVES]
  random.shuffle(to_move)
  while len(to_move) > 0:
    pos = to_move.pop()
    robot = state.board.get_item(pos).content
    if robot['move'] == FORWARD_TWO:
      direction = robot[FACING]
    elif robot['move'] == SIDESTEP_LEFT:
      direction = turn_direction(robot[FACING], False)
    elif robot['move'] == SIDESTEP_RIGHT:
      direction = turn_direction(robot[FACING], True)
    pos = perform_move_in_direction(state, pos, direction, to_move, interactive)
    if pos != None and robot['move'] == FORWARD_TWO:
      pos = perform_move_in_direction(state, pos, direction, to_move, interactive)
    if pos != None:
      if robot[CHARGES] > 0:
        robot[CHARGES] -= 1
      else:
        robot[LIFE] -= 1
        if robot[LIFE] == 0:
          record_death(robot, 'malfunction', interactive)
项目:robot-arena    作者:kenganong    | 项目源码 | 文件源码
def perform_moves(state, interactive):
  to_move = [cell.content for cell, pos in state.board.traverse() if cell.content and cell.content[TYPE] == ROBOT
                and cell.content['move'] in [TURN_LEFT, TURN_RIGHT, U_TURN]]
  for robot in to_move:
    perform_turn(robot, robot['move'])
  to_move = [pos for cell, pos in state.board.traverse() if cell.content and cell.content[TYPE] == ROBOT
                and cell.content['move'] in [FORWARD, REVERSE]]
  random.shuffle(to_move)
  while len(to_move) > 0:
    pos = to_move.pop()
    robot = state.board.get_item(pos).content
    if robot['move'] == FORWARD:
      direction = robot[FACING]
    elif robot['move'] == REVERSE:
      direction = opposite_direction(robot[FACING])
    perform_move_in_direction(state, pos, direction, to_move, interactive)
项目:onto-lstm    作者:pdasigi    | 项目源码 | 文件源码
def process_train_data(self, input_file, onto_aware):
        print >>sys.stderr, "Reading training data"
        label_ind = []
        tagged_sentences = []
        for line in open(input_file):
            lnstrp = line.strip()
            label, tagged_sentence = lnstrp.split("\t")
            if label not in self.label_map:
                self.label_map[label] = len(self.label_map)
            label_ind.append(self.label_map[label])
            tagged_sentences.append(tagged_sentence)
        # Shuffling so that when Keras does validation split, it is not always at the end.
        sentences_and_labels = zip(tagged_sentences, label_ind)
        random.shuffle(sentences_and_labels)
        tagged_sentences, label_ind = zip(*sentences_and_labels)
        print >>sys.stderr, "Indexing training data"
        train_inputs = self.data_processor.prepare_paired_input(tagged_sentences, onto_aware=onto_aware,
                                                                for_test=False, remove_singletons=True)
        train_labels = self.data_processor.make_one_hot(label_ind)
        return train_inputs, train_labels
项目:facerecognition    作者:guoxiaolu    | 项目源码 | 文件源码
def split_dataset(dataset, split_ratio, mode):
    if mode=='SPLIT_CLASSES':
        nrof_classes = len(dataset)
        class_indices = np.arange(nrof_classes)
        np.random.shuffle(class_indices)
        split = int(round(nrof_classes*split_ratio))
        train_set = [dataset[i] for i in class_indices[0:split]]
        test_set = [dataset[i] for i in class_indices[split:-1]]
    elif mode=='SPLIT_IMAGES':
        train_set = []
        test_set = []
        min_nrof_images = 2
        for cls in dataset:
            paths = cls.image_paths
            np.random.shuffle(paths)
            split = int(round(len(paths)*split_ratio))
            if split<min_nrof_images:
                continue  # Not enough images for test set. Skip class...
            train_set.append(ImageClass(cls.name, paths[0:split]))
            test_set.append(ImageClass(cls.name, paths[split:-1]))
    else:
        raise ValueError('Invalid train/test split mode "%s"' % mode)
    return train_set, test_set
项目:variational-text-tensorflow    作者:carpedm20    | 项目源码 | 文件源码
def get_neighbors(words, word, window_size=2):
  if type(word) == str:
    idx = words.index(word)
  elif type(word) == int:
    idx = word
  else:
    raise Exception(" [!] Invalid type for word: %s" % type(word))

  if idx < window_size:
    ans = words[-(window_size - idx):] + words[:idx + window_size + 1]
  elif idx >= len(words) - window_size:
    ans = words[idx-window_size:] + words[:window_size + idx - len(words) + 1]
  else:
    ans = words[idx-window_size:idx+window_size+1]

  for _ in xrange(15):
    if random.random() < 0.1:
      ans.append(random.choice(ans))

  random.shuffle(ans)
  return ans
项目:tensorbuilder    作者:cgarciae    | 项目源码 | 文件源码
def split(self, *splits):
        """docstring for Batcher"""

        data_length = len(self.x)

        indexes = range(data_length)
        random.shuffle(indexes)

        splits = [0] + list(splits)
        splits_total = sum(splits)

        return (
            query(splits)
            .scan()
            .select(lambda n: int(data_length * n / splits_total))
            .then(_window, n=2)
            .select(lambda (start, end): np.array(indexes[start:end]))
            .select(lambda split: Data(**{k: source[split,:] for (k, source) in self.sources.iteritems()}))
            .to_list()
        )
项目:wiki-album-genre    作者:aliostad    | 项目源码 | 文件源码
def batch_iter(data, batch_size, num_epochs, shuffle=True):
    """
    Generates a batch iterator for a dataset.
    """
    data = np.array(data)
    data_size = len(data)
    num_batches_per_epoch = int(len(data)/batch_size) + 1
    for epoch in range(num_epochs):
        # Shuffle the data at each epoch
        if shuffle:
            shuffle_indices = np.random.permutation(np.arange(data_size))
            shuffled_data = data[shuffle_indices]
        else:
            shuffled_data = data
        for batch_num in range(num_batches_per_epoch):
            start_index = batch_num * batch_size
            end_index = min((batch_num + 1) * batch_size, data_size)
            yield shuffled_data[start_index:end_index]
项目:nlp_learn    作者:Li-Shang    | 项目源码 | 文件源码
def train(self, sentences):

        random.shuffle(sentences)
        for s, a_sentence in enumerate(sentences):
            words_and_tags = a_sentence.split('\n')
            words = [wt.split(' ')[0] for wt in words_and_tags]
            tags = [wt.split(' ')[1] for wt in words_and_tags]

            # ?????0 1 2 3 4 5 6 7 8 9
            for i in range(len(tags)):
                if tags[i][0] == 'I':
                    if i == len(tags)-1 or tags[i+1] != tags[i]:
                        tags[i] = 'E-' + tags[i][-3:]
                tags[i] = int(self.tags_dict[tags[i]])

            self.model.train(words, tags)
            if s % 5000 == 0:
                print('       -----> ' + str(s // 5000) + '/5')
项目:dogs-vs-cats    作者:yaricom    | 项目源码 | 文件源码
def make_list(args):
    image_list = list_image(args.root, args.recursive, args.exts)
    image_list = list(image_list)
    if args.shuffle is True:
        random.seed(100)
        random.shuffle(image_list)
    N = len(image_list)
    chunk_size = (N + args.chunks - 1) / args.chunks
    for i in xrange(args.chunks):
        chunk = image_list[i * chunk_size:(i + 1) * chunk_size]
        if args.chunks > 1:
            str_chunk = '_%d' % i
        else:
            str_chunk = ''
        sep = int(chunk_size * args.train_ratio)
        sep_test = int(chunk_size * args.test_ratio)
        if args.train_ratio == 1.0:
            write_list(args.prefix + str_chunk + '.lst', chunk)
        else:
            if args.test_ratio:
                write_list(args.prefix + str_chunk + '_test.lst', chunk[:sep_test])
            if args.train_ratio + args.test_ratio < 1.0:
                write_list(args.prefix + str_chunk + '_val.lst', chunk[sep_test + sep:])
            write_list(args.prefix + str_chunk + '_train.lst', chunk[sep_test:sep_test + sep])
项目:simple_rl    作者:david-abel    | 项目源码 | 文件源码
def _compute_max_qval_action_pair(self, state):
        '''
        Args:
            state (State)

        Returns:
            (tuple) --> (float, str): where the float is the Qval, str is the action.
        '''
        # Grab random initial action in case all equal
        best_action = random.choice(self.actions)
        max_q_val = float("-inf")
        shuffled_action_list = self.actions[:]
        random.shuffle(shuffled_action_list)

        # Find best action (action w/ current max predicted Q value)
        for action in shuffled_action_list:
            q_s_a = self.get_q_value(state, action)
            if q_s_a > max_q_val:
                max_q_val = q_s_a
                best_action = action

        return max_q_val, best_action
项目:simple_rl    作者:david-abel    | 项目源码 | 文件源码
def _compute_max_qval_action_pair(self, state, q_func_id=None):
        '''
        Args:
            state (State)
            q_func_id (str): either "A", "B", or None. If None, computes avg of A and B.

        Returns:
            (tuple) --> (float, str): where the float is the Qval, str is the action.
        '''
        # Grab random initial action in case all equal
        best_action = random.choice(self.actions)
        max_q_val = float("-inf")
        shuffled_action_list = self.actions[:]
        random.shuffle(shuffled_action_list)

        # Find best action (action w/ current max predicted Q value)
        for action in shuffled_action_list:
            q_s_a = self.get_q_value(state, action, q_func_id)
            if q_s_a > max_q_val:
                max_q_val = q_s_a
                best_action = action

        return max_q_val, best_action
项目:hdrnet_legacy    作者:mgharbi    | 项目源码 | 文件源码
def _batch_samples(self, sample):
    """Batch several samples together."""

    # Batch and shuffle
    if self.shuffle:
      samples = tf.train.shuffle_batch(
          sample,
          batch_size=self.batch_size,
          num_threads=self.nthreads,
          capacity=self.capacity,
          min_after_dequeue=self.min_after_dequeue)
    else:
      samples = tf.train.batch(
          sample,
          batch_size=self.batch_size,
          num_threads=self.nthreads,
          capacity=self.capacity)
    return samples
项目:PyPPSPP    作者:justas-    | 项目源码 | 文件源码
def handle_other_peers(self, swarm, data):
        """Handle other_peers message when not using ALTO"""

        # Shuffle the received members list
        mem_copy = data['details']
        random.shuffle(mem_copy)

        for member in mem_copy:
            if swarm._args.tcp:
                self.add_tcp_member(swarm, member[0], member[1])
            else:
                # This is UDP
                m = swarm.AddMember(member[0], member[1])
                if isinstance(m, str):
                    pass
                else:
                    m.SendHandshake()
项目:ProtScan    作者:gianlucacorrado    | 项目源码 | 文件源码
def split_keys(profiles, bin_sites, random_state=1234):
    """Balanced split over binding/non-binding sequences."""
    random.seed(random_state)
    pos_keys = bin_sites.keys()
    neg_keys = list(set(profiles.keys()) - set(pos_keys))
    random.shuffle(pos_keys)
    random.shuffle(neg_keys)

    len_pos = len(pos_keys)
    pos_keys1 = pos_keys[:len_pos / 2]
    pos_keys2 = pos_keys[len_pos / 2:]

    len_neg = len(neg_keys)
    neg_keys1 = neg_keys[:len_neg / 2]
    neg_keys2 = neg_keys[len_neg / 2:]

    return [pos_keys1, pos_keys2, neg_keys1, neg_keys2]
项目:anonymine    作者:oskar-skog    | 项目源码 | 文件源码
def runmoore(x=78, y=18, m=225):
    field = anonymine_fields.generic_field([x, y])
    field.set_callback('input', output, None)
    print(field)
    mines = field.all_cells()
    random.shuffle(mines)
    field.fill(mines[:m])

    for mine in mines[m:]:
        for neighbour in field.get_neighbours(mine):
            if neighbour in mines[:m]:
                break
        else:
            field.reveal(mine)
            break

    solver = anonymine_solver.solver()
    solver.field = field

    print(solver.solve())
项目:anonymine    作者:oskar-skog    | 项目源码 | 文件源码
def runneumann(x=78, y=18, m=225):
    field = anonymine_fields.generic_field([x, y], False)
    field.set_callback('input', output, None)

    mines = field.all_cells()
    random.shuffle(mines)
    field.fill(mines[:m])

    for mine in mines[m:]:
        for neighbour in field.get_neighbours(mine):
            if neighbour in mines[:m]:
                break
        else:
            field.reveal(mine)
            break

    solver = anonymine_solver.solver()
    solver.field = field
    print(solver.solve())
项目:anonymine    作者:oskar-skog    | 项目源码 | 文件源码
def runhex(x=39, y=18, m=112):
    field = anonymine_fields.hexagonal_field(x, y)
    field.set_callback('input', output, None)

    mines = field.all_cells()
    random.shuffle(mines)
    field.fill(mines[:m])

    for mine in mines[m:]:
        for neighbour in field.get_neighbours(mine):
            if neighbour in mines[:m]:
                break
        else:
            field.reveal(mine)
            break

    solver = anonymine_solver.solver()
    solver.field = field
    print(solver.solve())
项目:sharedbuffers    作者:jampp    | 项目源码 | 文件源码
def gen_values(self, n, reversed = False, shuffled = False, gen_dupes = False):
        if reversed:
            keys = xrange(n-1,-1,-1)
        else:
            keys = xrange(n)
        if shuffled:
            keys = list(keys)
            r = random.Random(1234827)
            r.shuffle(keys)
        if gen_dupes:
            return itertools.chain(
                itertools.izip(keys, xrange(0, 2*n, 2)),
                itertools.islice(itertools.izip(keys, xrange(0, 2*n, 2)), 10, None),
            )
        else:
            return itertools.izip(keys, xrange(0, 2*n, 2))
项目:sharedbuffers    作者:jampp    | 项目源码 | 文件源码
def testBsearch(self, dtype=dtype):
            testarray = range(1,101)
            random.shuffle(testarray)
            a = numpy.array(testarray[:50], dtype)
            b = numpy.array([0] + testarray[50:] + range(101,103), dtype)
            a = numpy.sort(a)
            self.assertEqual(mapped_struct.bsearch(a, 0), 0)
            self.assertEqual(mapped_struct.bsearch(a, 101), len(a))
            self.assertEqual(mapped_struct.bsearch(a, 102), len(a))
            for x in a:
                ix = mapped_struct.bsearch(a, x)
                self.assertLess(ix, len(a))
                self.assertEqual(a[ix], x)
                self.assertTrue(mapped_struct.sorted_contains(a, x))
            for x in b:
                ix = mapped_struct.bsearch(a, x)
                self.assertTrue(ix >= len(a) or a[ix] != x)
                self.assertFalse(mapped_struct.sorted_contains(a, x))
项目:storjspec    作者:StorjRND    | 项目源码 | 文件源码
def test_send_receive(self):
        random.shuffle(self.swarm)
        senders = self.swarm[:len(self.swarm)/2]
        receivers = self.swarm[len(self.swarm)/2:]
        for sender, receiver in zip(senders, receivers):
            message = binascii.hexlify(os.urandom(64))

            # check queue previously empty
            self.assertFalse(bool(receiver.message_list()))

            # send message
            self.assertTrue(sender.message_send(receiver.dht_id(), message))

            # check received
            received = receiver.message_list()
            self.assertTrue(sender.dht_id() in received)
            messages = received[sender.dht_id()]
            self.assertTrue(len(messages) == 1)
            self.assertEqual(messages[0], message)

            # check queue empty after call to message_list
            self.assertFalse(bool(receiver.message_list()))
项目:storjspec    作者:StorjRND    | 项目源码 | 文件源码
def test_ordering(self):
        random.shuffle(self.swarm)
        sender = self.swarm[0]
        receiver = self.swarm[-1]

        # send messages
        message_alpha = binascii.hexlify(os.urandom(64))
        message_beta = binascii.hexlify(os.urandom(64))
        message_gamma = binascii.hexlify(os.urandom(64))
        self.assertTrue(sender.message_send(receiver.dht_id(), message_alpha))
        self.assertTrue(sender.message_send(receiver.dht_id(), message_beta))
        self.assertTrue(sender.message_send(receiver.dht_id(), message_gamma))

        # check received in order
        received = receiver.message_list()
        self.assertTrue(sender.dht_id() in received)
        messages = received[sender.dht_id()]
        self.assertEqual(messages[0], message_alpha)
        self.assertEqual(messages[1], message_beta)
        self.assertEqual(messages[2], message_gamma)
项目:storjspec    作者:StorjRND    | 项目源码 | 文件源码
def test_json(self):
        random.shuffle(self.swarm)
        sender = self.swarm[0]
        receiver = self.swarm[-1]
        message = {
            "test_object": {"foo": "bar"},
            "test_array": [0, 1, 2, 3, 4, 5],
            "test_integer": 42,
            "test_float": 3.14,
            "test_bool": True,
            "test_null": None,
        }

        # send message
        self.assertTrue(sender.message_send(receiver.dht_id(), message))

        # check received
        received = receiver.message_list()
        self.assertTrue(sender.dht_id() in received)
        messages = received[sender.dht_id()]
        self.assertTrue(len(messages) == 1)
        self.assertEqual(messages[0], message)
项目:storjspec    作者:StorjRND    | 项目源码 | 文件源码
def test_multihop(self):
        random.shuffle(self.swarm)
        senders = self.swarm[:len(self.swarm) / 2]
        receivers = self.swarm[len(self.swarm) / 2:]
        for sender, receiver in zip(senders, receivers):

            # receiver subscribes to topic
            topic = "test_miltihop_{0}".format(binascii.hexlify(os.urandom(32)))
            receiver.pubsub_subscribe(topic)

            # wait until subscriptions propagate
            time.sleep(SLEEP_TIME)

            # send event
            event = binascii.hexlify(os.urandom(32))
            sender.pubsub_publish(topic, event)

            # wait until event propagates
            time.sleep(SLEEP_TIME)

            # check all peers received the event
            events = receiver.pubsub_events(topic)
            self.assertEqual(events, [event])
项目:neural-abstract-anaphora    作者:amarasovic    | 项目源码 | 文件源码
def get_batches(data, batch_size, vocabulary, pos_vocabulary):
    '''
    Get batches without any restrictions on number of antecedents and negative candidates.
    '''
    random.seed(24)
    random.shuffle(data)

    data_size = len(data)
    if data_size % float(batch_size) == 0:
        num_batches = int(data_size / float(batch_size))
    else:
        num_batches = int(data_size / float(batch_size)) + 1

    batches = []
    for batch_num in range(num_batches):
        start_index = batch_num * batch_size
        end_index = min((batch_num + 1) * batch_size, data_size)

        batch = pad_batch(data[start_index:end_index], vocabulary, pos_vocabulary)
        batches.append(batch)

    logging.info('Data size: %s' % len(data))
    logging.info('Number of batches: %s' % len(batches))

    return batches
项目:kaggle-review    作者:daxiongshu    | 项目源码 | 文件源码
def split(flags):
    if os.path.exists(flags.split_path):
        return np.load(flags.split_path).item()
    folds = flags.folds
    path = flags.input_path
    random.seed(6)
    img_list = ["%s/%s"%(path,img) for img in os.listdir(path)]
    random.shuffle(img_list)
    dic = {}
    n = len(img_list)
    num = (n+folds-1)//folds
    for i in range(folds):
        s,e = i*num,min(i*num+num,n)
        dic[i] = img_list[s:e]
    np.save(flags.split_path,dic)
    return dic
项目:mimic3-benchmarks    作者:YerevaNN    | 项目源码 | 文件源码
def __init__(self, reader, partition, discretizer, normalizer,
                 batch_size, steps, shuffle):
        self.reader = reader
        self.partition = partition
        self.discretizer = discretizer
        self.normalizer = normalizer
        self.batch_size = batch_size

        if steps is None:
            self.n_examples = reader.get_number_of_examples()
            self.steps = (self.n_examples + batch_size - 1) // batch_size
        else:
            self.n_examples = steps * batch_size
            self.steps = steps

        self.shuffle = shuffle
        self.chunk_size = min(1024, steps) * batch_size
        self.lock = threading.Lock()
        self.generator = self._generator()
项目:mimic3-benchmarks    作者:YerevaNN    | 项目源码 | 文件源码
def __init__(self, reader, discretizer, normalizer,
                 batch_size, steps, shuffle):
        self.reader = reader
        self.discretizer = discretizer
        self.normalizer = normalizer
        self.batch_size = batch_size

        if steps is None:
            self.n_examples = reader.get_number_of_examples()
            self.steps = (self.n_examples + batch_size - 1) // batch_size
        else:
            self.n_examples = steps * batch_size
            self.steps = steps

        self.shuffle = shuffle
        self.chunk_size = min(1024, steps) * batch_size
        self.lock = threading.Lock()
        self.generator = self._generator()
项目:mimic3-benchmarks    作者:YerevaNN    | 项目源码 | 文件源码
def _generator(self):
        B = self.batch_size
        while True:
            if self.shuffle:
                self.reader.random_shuffle()
            remaining = self.n_examples
            while remaining > 0:
                current_size = min(self.chunk_size, remaining)
                remaining -= current_size
                (data, ts, labels, header) = read_chunk(self.reader, current_size)
                data = preprocess_chunk(data, ts, self.discretizer, self.normalizer)
                data = (data, labels)
                data = common_utils.sort_and_shuffle(data, B)

                for i in range(0, current_size, B):
                    yield (nn_utils.pad_zeros(data[0][i:i + B]),
                           np.array(data[1][i:i + B]))
项目:code    作者:ActiveState    | 项目源码 | 文件源码
def justify_line(line, width):
    """Stretch a line to width by filling in spaces at word gaps.

    The gaps are picked randomly one-after-another, before it starts
    over again.

    """
    i = []
    while 1:
        # line not long enough already?
        if len(' '.join(line)) < width:
            if not i:
                # index list is exhausted
                # get list if indices excluding last word
                i = range(max(1, len(line)-1))
                # and shuffle it
                random.shuffle(i)
            # append space to a random word and remove its index
            line[i.pop(0)] += ' '
        else:
            # line has reached specified width or wider
            return ' '.join(line)
项目:CorpBot.py    作者:corpnewt    | 项目源码 | 文件源码
def botPick(self, ctx, bot, game):
        # Has the bot pick their card
        blackNum  = game['BlackCard']['Pick']
        if blackNum == 1:
            cardSpeak = 'card'
        else:
            cardSpeak = 'cards'
        i = 0
        cards = []
        while i < blackNum:
            randCard = random.randint(0, len(bot['Hand'])-1)
            cards.append(bot['Hand'].pop(randCard)['Text'])
            i += 1

        await self.typing(game)

        # Make sure we haven't laid any cards
        if bot['Laid'] == False and game['Judging'] == False:
            newSubmission = { 'By': bot, 'Cards': cards }
            game['Submitted'].append(newSubmission)
            # Shuffle cards
            shuffle(game['Submitted'])
            bot['Laid'] = True
            game['Time'] = currentTime = int(time.time())
            await self.checkSubmissions(ctx, game, bot)
项目:CorpBot.py    作者:corpnewt    | 项目源码 | 文件源码
def drawCard(self, game):
        # Draws a random unused card and shuffles the deck if needed
        totalDiscard = len(game['Discard'])
        for member in game['Members']:
            totalDiscard += len(member['Hand'])
        if totalDiscard >= len(self.deck['whiteCards']):
            # Tell everyone the cards were shuffled
            for member in game['Members']:
                if member['IsBot']:
                    continue
                user = member['User']
                await self.bot.send_message(user, 'Shuffling white cards...')
            # Shuffle the cards
            self.shuffle(game)
        while True:
            # Random grab a unique card
            index = random.randint(0, len(self.deck['whiteCards'])-1)
            if not index in game['Discard']:
                game['Discard'].append(index)
                text = self.deck['whiteCards'][index]
                text = self.cleanJson(text)
                card = { 'Index': index, 'Text': text }
                return card
项目:privcount    作者:privcount    | 项目源码 | 文件源码
def get_random_load_nonentry():
    '''
    Return a random item that probably isn't in match_func_result['load'].
    '''
    match_type = sys.argv[1]
    if match_type == 'ipasn':
        # Yes, we could do IPv6 here. But the type of the list doesn't matter:
        # a random IPv4 might not be in an IPv4 list, and it won't be in an
        # IPv6 list
        random_32_bit = random.randint(0, 2**32 - 1)
        ip = ipaddress.ip_address(random_32_bit)
        return ip
    else:
        char_list = list(get_random_load_entry())
        random.shuffle(char_list)
        return "".join(char_list)

# try to make sure that other processes don't warp the results too much
项目:studsup    作者:ebmscruff    | 项目源码 | 文件源码
def schedule_matches(self):
        mySchedule = []
        # for each home team
        for home in range(len(self.clubs)):
            for away in range(len(self.clubs)):
                if home == away:
                    continue
                # schedule will be in order at first 
                match = Match(self.clubs[home], self.clubs[away])
                mySchedule.append(match)
                # TODO: This will need majorly improved.
                # Issue created on github
                away += 1
        # shuffle that schedule 
        random.shuffle(mySchedule)
        # set current schedule to the new one
        self.schedule = mySchedule
项目:machine-learning    作者:zzw0929    | 项目源码 | 文件源码
def  hill_climbling_first_choice(status):
    '''??????????????????????????????????????

    ??????????
    '''
    global chess_status_count
    pos = [(x, y) for x in range(8) for y in range(8)]
    random.shuffle(pos)
    for col, row in pos:
        if status[col] == row:
            continue
        chess_status_count += 1
        status_copy = list(status)
        status_copy[col] = row
        if get_num_of_conglict(status_copy) < get_num_of_conglict(status):
            status[col] = row
            return status
    return status
项目:machine-learning    作者:zzw0929    | 项目源码 | 文件源码
def  hill_climbling_first_choice(status):
    '''??????????????????????????????????????

    ??????????
    '''
    global chess_status_count

    pos = [(x, y) for x in range(8) for y in range(8)]
    random.shuffle(pos)
    for col, row in pos:
        if status[col] == row:
            continue
        chess_status_count += 1
        status_copy = list(status)
        status_copy[col] = row
        if get_num_of_conglict(status_copy) < get_num_of_conglict(status):
            status[col] = row
            return status
    return status
项目:code-uai16    作者:thanhan    | 项目源码 | 文件源码
def reproduce(n = None, dataset = 'RCT', rand_shuffle = None, num_it = 3, split = None):
    """
    read save_ss files
    reproduce evaluation
    """

    filename = 'save_ss_' + dataset + ' ' + str(n) + '_' + str(rand_shuffle)
    f = open(filename, 'r')
    (tc_dic, mv_dic, vs_diag_dic, vs_full_dic) = pickle.load(f)

    start.main(dataset)
    lc = crowd_model.labels_collection(start.turk_data_id, start.rel)
    gold_dic = lc.get_true_ss()
    random.shuffle(start.turk_data_id, lambda : rand_shuffle)
    random.shuffle(start.rel, lambda : rand_shuffle)
    test_data = (start.turk_data_id[split:], start.rel[split:])

    print n    
    print "tc ", eval_cm(tc_dic, gold_dic, True, test_data)
    print "mv ", eval_cm(mv_dic, gold_dic, True, test_data)
    print "vs Full_Cov = False ", eval_cm(vs_diag_dic, gold_dic, True, test_data)
    print "vs Full_Cov = True " , eval_cm(vs_full_dic, gold_dic, True, test_data)
    f.close()
项目:code-uai16    作者:thanhan    | 项目源码 | 文件源码
def setup(dataset = 'proton-beam', n = 1000, ngold = 0, rand_shuffle = None):
    start.main(dataset)

    if rand_shuffle != None:
        random.shuffle(start.turk_data_id, lambda : rand_shuffle)
        random.shuffle(start.rel, lambda : rand_shuffle)

    lc_gold = crowd_model.labels_collection(start.turk_data_id, start.rel)
    gold_dic = lc_gold.get_true_ss()

    lc1 = crowd_model.labels_collection(start.turk_data_id[:n], start.rel[:ngold] + (n-ngold)*[None])
    tc = crowd_model.tc_model(lc1)

    lc2 = crowd_model.labels_collection(start.turk_data_id[:n], start.rel[:ngold] + (n-ngold)*[None])
    mv = crowd_model.mv_model(lc2)

    lc3 = crowd_model.labels_collection(start.turk_data_id[:n], start.rel[:ngold] + (n-ngold)*[None])
    vs_full = crowd_model.vss_model(lc3, full_cov = True)

    lc4 = crowd_model.labels_collection(start.turk_data_id[:n], start.rel[:ngold] + (n-ngold)*[None])
    vs_diag = crowd_model.vss_model(lc3, full_cov = False)

    return (gold_dic, mv, tc, vs_full, vs_diag)
项目:code-uai16    作者:thanhan    | 项目源码 | 文件源码
def get_balance_d():
    n = len(rel)
    a = np.arange(n)
    np.random.shuffle(a)

    n0 = 0; n1 = 0; indices = []
    for i in a:
        x = rel[i]
        if n0 < n1 and x == 1: continue
        if n1 < n0 and x == 0: continue
        indices.append(i)
        if x == 0: n0 += 1
        if x == 1: n1 += 1

    global bal_mat, bal_rel, bal_turk_data, bal_turk_data_uncer, bal_turk_data_id
    bal_mat = mat[indices]
    bal_rel = [rel[i] for i in indices]
    #bal_turk_data = [turk_data[i] for i in indices]
    #bal_turk_data_uncer = [turk_data_uncer[i] for i in indices]
    bal_turk_data_id = [turk_data_id[i] for i in indices]
项目:SoCFoundationFlow    作者:mattaw    | 项目源码 | 文件源码
def read_wafdir():
    try:
        os.listdir('waflib')
    except:
        raise ImportError('please provide a waflib directory in the current folder')

    d = 'waflib'
    lst = [d + os.sep + x for x in os.listdir(d) if x.endswith('.py')]
    e = d + os.sep + 'Tools'
    lst.extend([e + os.sep + x for x in os.listdir(e) if x.endswith('.py')])
    f = d + os.sep + 'extras'
    lst.extend([f + os.sep + x for x in os.listdir(f) if x.endswith('.py')])

    random.shuffle(lst)
    #lst.sort()
    return lst
项目:SoCFoundationFlow    作者:mattaw    | 项目源码 | 文件源码
def read_wafdir():
    try:
        os.listdir('waflib')
    except:
        raise ImportError('please provide a waflib directory in the current folder')

    d = 'waflib'
    lst = [d + os.sep + x for x in os.listdir(d) if x.endswith('.py')]
    e = d + os.sep + 'Tools'
    lst.extend([e + os.sep + x for x in os.listdir(e) if x.endswith('.py')])
    f = d + os.sep + 'extras'
    lst.extend([f + os.sep + x for x in os.listdir(f) if x.endswith('.py')])

    random.shuffle(lst)
    #lst.sort()
    return lst
项目:SoCFoundationFlow    作者:mattaw    | 项目源码 | 文件源码
def read_wafdir():
    try:
        os.listdir('waflib')
    except:
        raise ImportError('please provide a waflib directory in the current folder')

    d = 'waflib'
    lst = [d + os.sep + x for x in os.listdir(d) if x.endswith('.py')]
    e = d + os.sep + 'Tools'
    lst.extend([e + os.sep + x for x in os.listdir(e) if x.endswith('.py')])
    f = d + os.sep + 'extras'
    lst.extend([f + os.sep + x for x in os.listdir(f) if x.endswith('.py')])

    random.shuffle(lst)
    #lst.sort()
    return lst
项目:deep_srl    作者:luheng    | 项目源码 | 文件源码
def get_training_data(self, include_last_batch=False):
    """ Get shuffled training samples. Called at the beginning of each epoch.
    """
    # TODO: Speed up: Use variable size batches (different max length).  
    train_ids = range(len(self.train_sents))
    random.shuffle(train_ids)

    if not include_last_batch:
      num_batches = len(train_ids) // self.batch_size
      train_ids = train_ids[:num_batches * self.batch_size]

    num_samples = len(self.train_sents)
    tensors = [self.train_tensors[t] for t in train_ids]
    batched_tensors = [tensors[i: min(i+self.batch_size, num_samples)]
               for i in xrange(0, num_samples, self.batch_size)]
    results = [zip(*t) for t in batched_tensors]

    print("Extracted {} samples and {} batches.".format(num_samples, len(batched_tensors)))
    return results
项目:PokeMath    作者:rdp1070    | 项目源码 | 文件源码
def makeQ(self):
        nums = [10, 50]  # creating array of percentges
        num1 = random.choice(nums)  # choosing random percentage
        nums2 = [10, 20, 40, 100]
        print
        num2 = random.choice(nums2)
        q1 = ("What is {0} percent of {1}?").format(num1, num2)  # question string
        i = 0
        options = []
        while (i<4):
            options.append(random.randint(0,100))
            i+=1


        a1 = int((num1 / 100.0) * num2)  # num1 is the percentage, which should mutltiply by num2
        options.append(a1)
        random.shuffle(options)
        print("Choose the correct answer: {0}").format(options)
        return q1, a1, options
项目:PokeMath    作者:rdp1070    | 项目源码 | 文件源码
def makeQ(self):
        nums = [10, 20, 40, 80]  # creating array of percentges
        num1 = random.choice(nums)  # choosing random percentage
        nums2 = [10, 20, 40, 100]
        print
        num2 = random.choice(nums2)
        q1 = ("What is {0} percent of {1} ").format(num1, num2)  # question string

        i = 0
        options = []
        while (i<4):
                options.append(random.randint(0,100))
                i+=1

        a1 = int((num1 / 100.0) * num2)  # num1 is the percentage, which should mutltiply by num2

        options.append(a1)
        random.shuffle(options)
        print("Choose the correct answer: {0}").format(options)
        return q1, a1, options

#Subclass of Monster class for geometry-related monsters
项目:PokeMath    作者:rdp1070    | 项目源码 | 文件源码
def makeQ(self):
        nums1 = [1,2,5,10] #creating array of numbers to multiply
        num1 = random.choice(nums1) #choosing random number to multiply
        nums2 = [1,2,3,4,5,6,7,8,9,10]
        num2  = random.choice(nums2)
        q1 = ("What is {0} multiplied by {1}? ").format(num1, num2) #question string
        a1 = int( num1 * num2 ) #What is num1 times num2

        i = 0
        options = []
        while (i<4):
            options.append(random.randint(1, 100))
            i+=1
        options.append(a1)
        random.shuffle(options)
        print("Choose the correct answer: {0}").format(options)
        return q1, a1, options
项目:PokeMath    作者:rdp1070    | 项目源码 | 文件源码
def makeQ(self):
        nums1 = [2,3,4,5,6,7,8,9,10] #creating array of numbers to multiply
        num1 = random.choice(nums1) #choosing random number to multiply
        nums2 = [2,3,4,5,6,7,8,9,10]
        num2  = random.choice(nums2)
        q1 = ("What is {0} multiplied by {1}? ").format(num1, num2) #question string
        a1 = int( num1 * num2 ) #What is num1 times num2

        i = 0
        options = []
        while (i<4):
            options.append(random.randint(1,100))
            i+=1
        options.append(a1)
        random.shuffle(options)
        print("Choose the correct answer: {0}").format(options)
        return q1, a1, options
项目:word-classification    作者:vinsis    | 项目源码 | 文件源码
def createData():
    spwords = [unidecode(a.lower()) for a in set(nltk.corpus.cess_esp.words()) if len(a)>3]
    enwords = [a.lower() for a in set(nltk.corpus.brown.words()) if len(a)>3]
    jpwords = [unidecode(a) for a in jeita.words() if (len(unidecode(a)) and unidecode(a)[0].islower())]
    jpwords = [a for a in set(jpwords) if len(a)>3]
    # minLen = min(len(enwords), len(spwords), len(jpwords))

    featuresets = \
        [(createTupleDict(w,numChars),'English') for w in enwords] + \
        [(createTupleDict(w,numChars),'Spanish') for w in spwords] + \
        [(createTupleDict(w,numChars),'Japanese') for w in jpwords]

    random.shuffle(featuresets)

    l=int(len(featuresets)*0.8)

    training_set = featuresets[:l]
    testing_set = featuresets[l:]
    return (training_set, testing_set)
项目:deep-summarization    作者:harpribot    | 项目源码 | 文件源码
def _split_train_tst(self):
        """
        divide the data into training and testing data
        Create the X_trn, X_tst, for both forward and backward, and Y_trn and Y_tst
        Note that only the reviews are changed, and not the summary.

        :return: None
        """
        num_samples = self.Y.shape[0]
        mapper_file = self.checkpointer.get_mapper_file_location()
        if not self.checkpointer.is_mapper_checkpointed():
            print 'No mapper checkpoint found. Fresh loading in progress ...'
            # Now shuffle the data
            sample_id = range(num_samples)
            random.shuffle(sample_id)
            print 'Dumping the mapper shuffle for reuse.'
            Pickle.dump(sample_id, open(mapper_file, 'wb'))
            print 'Dump complete. Moving Forward...'
        else:
            print 'Mapper Checkpoint found... Reading from mapper dump'
            sample_id = Pickle.load(open(mapper_file, 'rb'))
            print 'Mapping unpickling complete.. Moving forward...'

        self.X_fwd = self.X_fwd[sample_id]
        self.X_bwd = self.X_bwd[sample_id]
        self.Y = self.Y[sample_id]
        # Now divide the data into test ans train set
        test_fraction = 0.01
        self.test_size = int(test_fraction * num_samples)
        self.train_size = num_samples - self.test_size
        # Forward review
        self.X_trn_fwd = self.X_fwd[0:self.train_size]
        self.X_tst_fwd = self.X_fwd[self.train_size:num_samples]
        # Backward review
        self.X_trn_bwd = self.X_bwd[0:self.train_size]
        self.X_tst_bwd = self.X_bwd[self.train_size:num_samples]
        # Summary
        self.Y_trn = self.Y[0:self.train_size]
        self.Y_tst = self.Y[self.train_size:num_samples]
项目:deep-summarization    作者:harpribot    | 项目源码 | 文件源码
def _split_train_tst(self):
        """
        divide the data into training and testing data
        Create the X_trn, X_tst, and Y_trn and Y_tst
        Note that only the reviews are changed, and not the summary.

        :return: None
        """
        num_samples = self.Y.shape[0]
        mapper_file = self.checkpointer.get_mapper_file_location()
        if not self.checkpointer.is_mapper_checkpointed():
            print 'No mapper checkpoint found. Fresh loading in progress ...'
            # Now shuffle the data
            sample_id = range(num_samples)
            random.shuffle(sample_id)
            print 'Dumping the mapper shuffle for reuse.'
            Pickle.dump(sample_id, open(mapper_file, 'wb'))
            print 'Dump complete. Moving Forward...'
        else:
            print 'Mapper Checkpoint found... Reading from mapper dump'
            sample_id = Pickle.load(open(mapper_file, 'rb'))
            print 'Mapping unpickling complete.. Moving forward...'

        self.X = self.X[sample_id]
        self.Y = self.Y[sample_id]
        # Now divide the data into test ans train set
        test_fraction = 0.01
        self.test_size = int(test_fraction * num_samples)
        self.train_size = num_samples - self.test_size
        # review
        self.X_trn = self.X[0:self.train_size]
        self.X_tst = self.X[self.train_size:num_samples]
        # Summary
        self.Y_trn = self.Y[0:self.train_size]
        self.Y_tst = self.Y[self.train_size:num_samples]