Python numpy.random 模块,shuffle() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.random.shuffle()

项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def prepare_oae_PU4(known_transisitons):
    print("Learn from pre + action label",
          "*** INCOMPATIBLE MODEL! ***",
          sep="\n")
    N = known_transisitons.shape[1] // 2

    y = generate_oae_action(known_transisitons)

    ind = np.where(np.squeeze(combined(y[:,N:])) > 0.5)[0]

    y = y[ind]

    actions = oae.encode_action(known_transisitons, batch_size=1000).round()
    positive = np.concatenate((known_transisitons[:,:N], np.squeeze(actions)), axis=1)
    actions = oae.encode_action(y, batch_size=1000).round()
    negative = np.concatenate((y[:,:N], np.squeeze(actions)), axis=1)
    # random.shuffle(negative)
    # negative = negative[:len(positive)]
    # normalize
    return (default_networks['PUDiscriminator'], *prepare_binary_classification_data(positive, negative))
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def prepare_oae_PU5(known_transisitons):
    print("Learn from pre + suc + action label",
          "*** INCOMPATIBLE MODEL! ***",
          sep="\n")
    N = known_transisitons.shape[1] // 2

    y = generate_oae_action(known_transisitons)

    ind = np.where(np.squeeze(combined(y[:,N:])) > 0.5)[0]

    y = y[ind]

    actions = oae.encode_action(known_transisitons, batch_size=1000).round()
    positive = np.concatenate((known_transisitons, np.squeeze(actions)), axis=1)
    actions = oae.encode_action(y, batch_size=1000).round()
    negative = np.concatenate((y, np.squeeze(actions)), axis=1)
    # random.shuffle(negative)
    # negative = negative[:len(positive)]
    # normalize
    return (default_networks['PUDiscriminator'], *prepare_binary_classification_data(positive, negative))
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def puzzle_plot(p):
    p.setup()
    def name(template):
        return template.format(p.__name__)
    from itertools import islice
    configs = list(islice(p.generate_configs(9), 1000)) # be careful, islice is not immutable!!!
    import numpy.random as random
    random.shuffle(configs)
    configs = configs[:10]
    puzzles = p.generate(configs, 3, 3)
    print(puzzles.shape, "mean", puzzles.mean(), "stdev", np.std(puzzles))
    plot_image(puzzles[-1], name("{}.png"))
    plot_image(np.clip(puzzles[-1]+np.random.normal(0,0.1,puzzles[-1].shape),0,1),name("{}+noise.png"))
    plot_image(np.round(np.clip(puzzles[-1]+np.random.normal(0,0.1,puzzles[-1].shape),0,1)),name("{}+noise+round.png"))
    plot_grid(puzzles, name("{}s.png"))
    _transitions = p.transitions(3,3,configs=configs)
    print(_transitions.shape)
    transitions_for_show = \
        np.einsum('ba...->ab...',_transitions) \
          .reshape((-1,)+_transitions.shape[2:])
    print(transitions_for_show.shape)
    plot_grid(transitions_for_show, name("{}_transitions.png"))
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def prepare(data):
    num = len(data)
    dim = data.shape[1]//2
    print("in prepare: ",data.shape,num,dim)
    pre, suc = data[:,:dim], data[:,dim:]

    suc_invalid = np.copy(suc)
    random.shuffle(suc_invalid)

    diff_valid   = suc         - pre
    diff_invalid = suc_invalid - pre

    inputs = np.concatenate((diff_valid,diff_invalid),axis=0)
    outputs = np.concatenate((np.ones((num,1)),np.zeros((num,1))),axis=0)
    print("in prepare: ",inputs.shape,outputs.shape)
    io = np.concatenate((inputs,outputs),axis=1)
    random.shuffle(io)

    train_n = int(2*num*0.9)
    train, test = io[:train_n], io[train_n:]
    train_in, train_out = train[:,:dim], train[:,dim:]
    test_in, test_out = test[:,:dim], test[:,dim:]
    print("in prepare: ",train_in.shape, train_out.shape, test_in.shape, test_out.shape)

    return train_in, train_out, test_in, test_out
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def prepare2(data):
    "valid data diff only"
    num = len(data)
    dim = data.shape[1]//2
    print("in prepare: ",data.shape,num,dim)
    pre, suc = data[:,:dim], data[:,dim:]

    diff_valid   = suc         - pre

    inputs = diff_valid
    outputs = np.ones((num,1))
    print("in prepare: ",inputs.shape,outputs.shape)
    io = np.concatenate((inputs,outputs),axis=1)
    random.shuffle(io)

    train_n = int(num*0.9)
    train, test = io[:train_n], io[train_n:]
    train_in, train_out = train[:,:dim], train[:,dim:]
    test_in, test_out = test[:,:dim], test[:,dim:]
    print("in prepare: ",train_in.shape, train_out.shape, test_in.shape, test_out.shape)

    return train_in, train_out, test_in, test_out
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def prepare3(data):
    "valid data only"
    num = len(data)
    dim = data.shape[1]//2
    print("in prepare: ",data.shape,num,dim)

    inputs = data
    outputs = np.ones((num,1))
    print("in prepare: ",inputs.shape,outputs.shape)
    io = np.concatenate((inputs,outputs),axis=1)
    random.shuffle(io)

    train_n = int(num*0.9)
    train, test = io[:train_n], io[train_n:]
    train_in, train_out = train[:,:2*dim], train[:,2*dim:]
    test_in, test_out = test[:,:2*dim], test[:,2*dim:]
    print("in prepare: ",train_in.shape, train_out.shape, test_in.shape, test_out.shape)

    return train_in, train_out, test_in, test_out
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def grid_search(task, default_parameters, parameters,
                report=None, report_best=None,
                limit=float('inf')):
    best = {'eval'    :None, 'params'  :None, 'artifact':None}
    results       = []
    import itertools
    names  = [ k for k, _ in parameters.items()]
    values = [ v for _, v in parameters.items()]
    all_params = list(itertools.product(*values))
    random.shuffle(all_params)
    [ print(r) for r in all_params]
    try:
        for i,params in enumerate(all_params):
            if i > limit:
                break
            local_parameters = { k:v for k,v in zip(names,params) }
            print("{}/{} {}".format(i, len(all_params), local_parameters))
            artifact, eval = task(merge_hash(default_parameters,local_parameters))
            _update_best(artifact, eval, local_parameters, results, best, report, report_best)
    finally:
        from colors import bold
        print(bold("*** Best parameter: ***\n{}\neval: {}".format(best['params'],best['eval'])))
        print(results)
    return best['artifact'],best['params'],best['eval']
项目:scikit-kge    作者:mnick    | 项目源码 | 文件源码
def _optim(self, xys):
        idx = np.arange(len(xys))
        self.batch_size = np.ceil(len(xys) / self.nbatches)
        batch_idx = np.arange(self.batch_size, len(xys), self.batch_size)

        for self.epoch in range(1, self.max_epochs + 1):
            # shuffle training examples
            self._pre_epoch()
            shuffle(idx)

            # store epoch for callback
            self.epoch_start = timeit.default_timer()

            # process mini-batches
            for batch in np.split(idx, batch_idx):
                # select indices for current batch
                bxys = [xys[z] for z in batch]
                self._process_batch(bxys)

            # check callback function, if false return
            for f in self.post_epoch:
                if not f(self):
                    break
项目:deep-learning-for-genomics    作者:chgroenbech    | 项目源码 | 文件源码
def createSampleData(m = 100, n = 20, scale = 2, p = 0.5):

    print("Creating sample data.")

    data = zeros((m, n))

    row = scale * random.rand(n)
    k = 0
    for i in range(m):
        u = random.rand()
        if u > p:
            row = scale * random.rand(n)
            k += 1
        data[i] = row

    random.shuffle(data)

    for i in range(m):
        for j in range(n):
            data[i, j] = random.poisson(data[i, j])

    print("Sample data created with {} different cell types.".format(k))

    return data
项目:Generative-Adversarial-Network    作者:K-Du    | 项目源码 | 文件源码
def prepare_dirs(delete_train_dir=False):
    # Create checkpoint dir (do not delete anything)
    if not tf.gfile.Exists(FLAGS.checkpoint_dir):
        tf.gfile.MakeDirs(FLAGS.checkpoint_dir)

    # Cleanup train dir
    # ----
    # This was removed to ensure Windows compatiblity
    # ----
    #if delete_train_dir:
    #    if tf.gfile.Exists(FLAGS.train_dir):
    #        tf.gfile.Remove(FLAGS.train_dir)
    #    tf.gfile.MakeDirs(FLAGS.train_dir)

    # Return names of training files
    if not tf.gfile.Exists(FLAGS.dataset) or \
       not tf.gfile.IsDirectory(FLAGS.dataset):
        raise FileNotFoundError("Could not find folder `%s'" % (FLAGS.dataset,))

    filenames = tf.gfile.ListDirectory(FLAGS.dataset)
    random.shuffle(filenames)
    filenames = [os.path.join(FLAGS.dataset, f) for f in filenames]

    return filenames
项目:alib    作者:vnep-approx    | 项目源码 | 文件源码
def _add_cactus_edges(self, req):
        sub_trees = [(req.graph["root"], list(req.nodes))]
        cycles = 0
        edges_on_cycle = set()
        while sub_trees and (cycles < self._raw_parameters["max_cycles"]):
            cycles += 1
            root_node, sub_tree = sub_trees.pop()
            i = random.choice(sub_tree)
            j = random.choice(sub_tree)
            while i == j or (i in req.get_out_neighbors(j)) or (j in req.get_out_neighbors(i)):
                i = random.choice(sub_tree)
                j = random.choice(sub_tree)
            if req.node[i]["layer"] > req.node[j]["layer"]:
                i, j = j, i  # make edges always point down the tree
            if random.random() < self._raw_parameters["probability"]:
                req.add_edge(i, j, self._edge_demand)
                edges_on_cycle.add((i, j))

                path_i = CactusRequestGenerator._path_to_root(req, i, root_node)
                path_j = CactusRequestGenerator._path_to_root(req, j, root_node)
                new_cycle = path_i.symmetric_difference(path_j)  # only edges on the path to the first common ancestor lie on cycle
                edges_on_cycle = edges_on_cycle.union(new_cycle)

                sub_trees = CactusRequestGenerator._list_nontrivial_allowed_subtrees(req, edges_on_cycle)
                random.shuffle(sub_trees)
项目:srez    作者:david-gpu    | 项目源码 | 文件源码
def prepare_dirs(delete_train_dir=False):
    # Create checkpoint dir (do not delete anything)
    if not tf.gfile.Exists(FLAGS.checkpoint_dir):
        tf.gfile.MakeDirs(FLAGS.checkpoint_dir)

    # Cleanup train dir
    if delete_train_dir:
        if tf.gfile.Exists(FLAGS.train_dir):
            tf.gfile.DeleteRecursively(FLAGS.train_dir)
        tf.gfile.MakeDirs(FLAGS.train_dir)

    # Return names of training files
    if not tf.gfile.Exists(FLAGS.dataset) or \
       not tf.gfile.IsDirectory(FLAGS.dataset):
        raise FileNotFoundError("Could not find folder `%s'" % (FLAGS.dataset,))

    filenames = tf.gfile.ListDirectory(FLAGS.dataset)
    filenames = sorted(filenames)
    random.shuffle(filenames)
    filenames = [os.path.join(FLAGS.dataset, f) for f in filenames]

    return filenames
项目:openanalysis    作者:OpenWeavers    | 项目源码 | 文件源码
def visualize(self, num=100, save=False):
        """
        Visualizes given Sorting Algorithm

        :param num: Number of points that has to be chosen for visualization
        :param save: Boolean indicating whether to save animation in 'output' directory
        """
        plt.title(self.sorter.name + " Visualization")
        plt.xlabel("Array Index")
        plt.ylabel("Element")
        data = np.arange(num)
        ran.shuffle(data)
        self.sorter.sort(data, visualization=True)
        self.hist_arr = self.sorter.hist_array
        self.scatter = plt.scatter(np.arange(self.hist_arr.shape[1]), self.hist_arr[0])  # plt.scatter(x-array,y-array)
        self.animation = animation.FuncAnimation(self.fig, self.__update, frames=self.hist_arr.shape[0], repeat=False,
                                                 blit=False, interval=1)
        if save:
            import os
            import errno
            path = "output"
            try:
                os.makedirs(path)
            except OSError as exc:
                if exc.errno == errno.EEXIST and os.path.isdir(path):
                    pass
                else:
                    raise
            path = os.path.join('output', self.sorter.name + ".mp4")
            p1 = Process(
                target=lambda: self.animation.save(path, writer=animation.FFMpegWriter(fps=100)))
            p1.start()
        plt.show()
项目:openanalysis    作者:OpenWeavers    | 项目源码 | 文件源码
def compare(algorithms, pts=2000, maxrun=5, progress=True):
        """
        Compares the given list of Sorting algorithms over and Plots a bar chart

        :param algorithms: List of Sorting algorithms
        :param pts: Number of elements in testing array
        :param maxrun: Number of iterations to take average
        :param progress: Whether to show progress bar or not
        """
        base_arr = np.arange(pts)
        np.random.shuffle(base_arr)
        algorithms = [x() for x in algorithms]  # Instantiate
        operations = {x.name: 0 for x in algorithms}
        print('Please wait while comparing Sorting Algorithms')
        if progress:
            import progressbar
            count = 0
            max_count = maxrun * len(algorithms)
            bar = progressbar.ProgressBar(max_value=max_count)
        for _ in range(maxrun):
            for algorithm in algorithms:
                if progress:
                    count += 1
                    bar.update(count)
                algorithm.sort(base_arr)
                operations[algorithm.name] += algorithm.count
                np.random.shuffle(base_arr)
        operations = [(k, v / maxrun) for k, v in operations.items()]
        plt.suptitle('Sorting Algorithm Comparision\nAveraged over {} loops'.format(maxrun))
        rects = plt.bar(left=np.arange(len(operations)), height=[y for (x, y) in operations])
        plt.xticks(np.arange(len(operations)), [x for (x, y) in operations])
        ax = plt.axes()
        for rect in rects:
            height = rect.get_height()
            ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * height,
                    '%d' % int(height),
                    ha='center', va='bottom')
        plt.ylabel('Average number of basic operations')
        plt.show()
项目:geocoder-ie    作者:devgateway    | 项目源码 | 文件源码
def read_rows():
        geography_records = get_geography_rows()
        # get same amount of none records
        none_records = get_none_rows(limit=len(geography_records))
        all_records = geography_records + none_records
        random.shuffle(all_records)
        return all_records
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def permute_suc(data):
    dim = data.shape[1]//2
    pre, suc = data[:,:dim], data[:,dim:]
    suc_invalid = np.copy(suc)
    random.shuffle(suc_invalid)
    data_invalid = np.concatenate((pre,suc_invalid),axis=1)
    data_invalid = set_difference(data_invalid, data)
    return data_invalid
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def generate_oae_action(known_transisitons):
    print("listing actions")
    actions = oae.encode_action(known_transisitons, batch_size=1000).round()
    histogram = np.squeeze(actions.sum(axis=0,dtype=int))
    available_actions = np.zeros((np.count_nonzero(histogram), actions.shape[1], actions.shape[2]), dtype=int)
    for i, pos in enumerate(np.where(histogram > 0)[0]):
        available_actions[i][0][pos] = 1

    N = known_transisitons.shape[1] // 2
    states = known_transisitons.reshape(-1, N)
    print("start generating transitions")
    y = oae.decode([
        # s1,s2,s3,s1,s2,s3,....
        repeat_over(states, len(available_actions), axis=0),
        # a1,a1,a1,a2,a2,a2,....
        np.repeat(available_actions, len(states), axis=0),], batch_size=1000) \
           .round().astype(np.int8)

    print("remove known transitions")
    y = set_difference(y, known_transisitons)
    print("shuffling")
    random.shuffle(y)
    return y

################################################################
# data preparation
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def test():
    valid = np.loadtxt(sae.local("all_actions.csv"),dtype=np.int8)
    random.shuffle(valid)
    N = valid.shape[1] // 2
    print("valid",len(valid))

    prediction = np.clip(discriminator.discriminate(valid,batch_size=1000).round(), 0,1)
    print("type1 error: ",100 * np.mean(1-prediction), "%")

    mixed = generate_oae_action(valid[:1000]) # x2x128 max
    p = latplan.util.puzzle_module(sae.local(""))
    pre_images = sae.decode_binary(mixed[:,:N],batch_size=1000)
    suc_images = sae.decode_binary(mixed[:,N:],batch_size=1000)
    answers = np.array(p.validate_transitions([pre_images, suc_images],batch_size=1000))
    invalid = mixed[np.logical_not(answers)]

    print("mixed",len(mixed), "invalid", len(invalid))

    prediction = np.clip(discriminator.discriminate(invalid,batch_size=1000).round(), 0,1)
    print("type2 error: ",100 * np.mean(prediction), "%")

    ind = np.where(np.squeeze(combined(invalid[:,N:])) > 0.5)[0]
    print("type2 error (w/o invalid states by sd3): ",100 * np.mean(prediction[ind]), "%")

    ind = p.validate_states(sae.decode_binary(invalid[:,N:],batch_size=1000),verbose=False,batch_size=1000)
    print("type2 error (w/o invalid states by validator): ",100 * np.mean(prediction[ind]), "%")
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def mnist_puzzle():
    strips.parameters = [[4000],[0.4],[49]]
    strips.epoch = 1000
    strips.batch_size = 2000
    print(strips.parameters,strips.epoch,strips.batch_size)

    import puzzles.mnist_puzzle as p
    def convert(panels):
        return np.array([
            [i for i,x in enumerate(panels) if x == p]
            for p in range(9)]).reshape(-1)
    ig_c = [convert([8,0,6,5,4,7,2,3,1]),
            convert([0,1,2,3,4,5,6,7,8])]
    ig = p.states(3,3,ig_c)
    train_c = np.array([ random_walk(ig_c[0],300, lambda config: p.successors(config,3,3))
                         for i in range(40) ])
    train_c = train_c.reshape((-1,9))
    train = p.states(3,3,train_c)

    configs = p.generate_configs(9)
    configs = np.array([ c for c in configs ])
    random.shuffle(configs)

    test_c = configs[:1000]
    test = p.states(3,3,test_c)

    ae = run(learn_flag,"samples/mnist_puzzle33p_rw_40restarts_{}/".format(strips.encoder), train, test)
    dump(ae, train, test, p.transitions(3,3,train_c,True))
    configs = p.generate_configs(9)
    configs = np.array([ c for c in configs ])
    dump_all_actions(ae,configs,lambda configs: p.transitions(3,3,configs))
    latent_plan(*ig, ae, option)
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def puzzle(type='mnist',width=3,height=3,N=36,num_examples=6500):
    parameters = {
        'layer'      :[1000],# [400,4000],
        'clayer'     :[16],# [400,4000],
        'dropout'    :[0.4], #[0.1,0.4],
        'noise'      :[0.4],
        'N'          :[N],  #[25,49],
        'dropout_z'  :[False],
        'activation' :['tanh'],
        'full_epoch' :[150],
        'epoch'      :[150],
        'batch_size' :[4000],
        'lr'         :[0.001],
    }
    import importlib
    p = importlib.import_module('latplan.puzzles.puzzle_{}'.format(type))
    p.setup()
    configs = p.generate_configs(width*height)
    configs = np.array([ c for c in configs ])
    assert len(configs) >= num_examples
    print(len(configs))
    random.shuffle(configs)
    transitions = p.transitions(width,height,configs[:num_examples],one_per_state=True)
    states = np.concatenate((transitions[0], transitions[1]), axis=0)
    print(states.shape)
    train = states[:int(len(states)*0.9)]
    test  = states[int(len(states)*0.9):]
    ae = run("_".join(map(str,("samples/puzzle",type,width,height,N,num_examples,encoder))), train, test, parameters)
    show_summary(ae, train, test)
    dump_autoencoding_image_if_necessary(ae,test[:1000],train[:1000])
    dump_actions(ae,transitions)
    dump_states (ae,states)
    dump_all_actions(ae,configs,        lambda configs: p.transitions(width,height,configs),)
    dump_all_states(ae,configs,        lambda configs: p.states(width,height,configs),)
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def hanoi(disks=7,towers=4,N=36,num_examples=6500):
    parameters = {
        'layer'      :[1000],# [400,4000],
        'clayer'     :[12],# [400,4000],
        'dropout'    :[0.6], #[0.1,0.4],
        'noise'      :[0.4],
        'N'          :[N],  #[25,49],
        'dropout_z'  :[False],
        'activation' : ['tanh'],
        'full_epoch' :[1000],
        'epoch'      :[1000],
        'lr_epoch'   :[0.5],
        'batch_size' :[500],
        'optimizer'  :['adam'],
        'lr'         :[0.001],
    }
    print("this setting is tuned for conv")
    import latplan.puzzles.hanoi as p
    configs = p.generate_configs(disks,towers)
    configs = np.array([ c for c in configs ])
    assert len(configs) >= num_examples
    print(len(configs))
    random.shuffle(configs)
    transitions = p.transitions(disks,towers,configs[:num_examples],one_per_state=True)
    states = np.concatenate((transitions[0], transitions[1]), axis=0)
    print(states.shape)
    train = states[:int(len(states)*0.9)]
    test  = states[int(len(states)*0.9):]
    ae = run("_".join(map(str,("samples/hanoi",disks,towers,N,num_examples,encoder))), train, test, parameters)
    print("*** NOTE *** if l_rec is above 0.01, it is most likely not learning the correct model")
    show_summary(ae, train, test)
    dump_autoencoding_image_if_necessary(ae,test[:1000],train[:1000])
    dump_actions(ae,transitions,repeat=100)
    dump_states (ae,states,repeat=100)
    dump_all_actions(ae,configs,        lambda configs: p.transitions(disks,towers,configs),repeat=100)
    dump_all_states(ae,configs,        lambda configs: p.states(disks,towers,configs),repeat=100)
项目:tensorflow-srgan    作者:olgaliak    | 项目源码 | 文件源码
def prepare_dirs(delete_train_dir=False, isTest=False):
    # Create checkpoint dir (do not delete anything)
    if not tf.gfile.Exists(FLAGS.checkpoint_dir):
        tf.gfile.MakeDirs(FLAGS.checkpoint_dir)

    # Cleanup train dir
    if delete_train_dir:
        if tf.gfile.Exists(FLAGS.train_dir):
            tf.gfile.DeleteRecursively(FLAGS.train_dir)
        tf.gfile.MakeDirs(FLAGS.train_dir)

    # Return names of training files
    if not tf.gfile.Exists(FLAGS.dataset) or \
       not tf.gfile.IsDirectory(FLAGS.dataset):
        raise FileNotFoundError("Could not find folder `%s'" % (FLAGS.dataset,))

    if isTest:
        dataDir = FLAGS.dataset_test
    else:
        dataDir = FLAGS.dataset

    filenames = tf.gfile.ListDirectory(dataDir)
    filenames = sorted(filenames)
    random.shuffle(filenames)
    filenames = [os.path.join(dataDir, f) for f in filenames]

    return filenames
项目:MTransE    作者:muhaochen    | 项目源码 | 文件源码
def __init__(self, dim = 100, save_dir = 'model_MtransE.bin'):
        self.dim = dim
        self.languages = []
        self.rate = 0.01 #learning rate
        self.trained_epochs = 0
        self.save_dir = save_dir
        #single-language models of each language
        self.models = {}
        self.triples = {}
        # cross-lingual linear transfer
        self.transfer = {}
        #intersect graph
        self.intersect_triples = np.array([0])
        #shuffle index for intersect triples
        self.intersect_index = np.array([0])
项目:MTransE    作者:muhaochen    | 项目源码 | 文件源码
def train_intersect_1epoch(self, shuffle=True, const_decay=1.0, sampling=False, L1=False):
        num_lan = len(self.languages)
        sum = 0.0
        count = 0
        index = None
        if shuffle == True:
            RD.shuffle(self.intersect_index)
            index = self.intersect_index
        else:
            index = range(len(self.intersect_index))
        for x in index:
            line = self.intersect_triples[x]
            count += 1
            if count % 50000 == 0:
                print "Scanned ",count," on intersect graph"

            transfer_index = ''
            for i in range(num_lan):
                for j in range(num_lan):
                    if i == j:
                        continue
                    l_left = self.languages[i]
                    l_right = self.languages[j]
                    transfer_index = l_left + l_right
                    this_transfer = self.transfer[transfer_index]
                    sum += self.gradient_decent(this_transfer, self.models[l_left].vec_e[line[i][0]], self.models[l_right].vec_e[line[j][0]], const_decay, L1)
                    sum += self.gradient_decent(this_transfer, self.models[l_left].vec_e[line[i][2]], self.models[l_right].vec_e[line[j][2]], const_decay, L1)
                    sum += self.gradient_decent(this_transfer, self.models[l_left].vec_r[line[i][1]], self.models[l_right].vec_r[line[j][1]], const_decay, L1)
        return sum
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_shuffle_mixed_dimension(self):
        # Test for trac ticket #2074
        for t in [[1, 2, 3, None],
                  [(1, 1), (2, 2), (3, 3), None],
                  [1, (2, 2), (3, 3), None],
                  [(1, 1), 2, 3, None]]:
            np.random.seed(12345)
            shuffled = list(t)
            random.shuffle(shuffled)
            assert_array_equal(shuffled, [t[0], t[3], t[1], t[2]])
项目:scikit-kge    作者:mnick    | 项目源码 | 文件源码
def _pre_epoch(self):
        self.nviolations = 0
        if self.samplef is None:
            shuffle(self.pxs)
            shuffle(self.nxs)
项目:krpcScripts    作者:jwvanderbeck    | 项目源码 | 文件源码
def test_shuffle_mixed_dimension(self):
        # Test for trac ticket #2074
        for t in [[1, 2, 3, None],
                  [(1, 1), (2, 2), (3, 3), None],
                  [1, (2, 2), (3, 3), None],
                  [(1, 1), 2, 3, None]]:
            np.random.seed(12345)
            shuffled = list(t)
            random.shuffle(shuffled)
            assert_array_equal(shuffled, [t[0], t[3], t[1], t[2]])
项目:epsilon_free_inference    作者:gpapamak    | 项目源码 | 文件源码
def gen(self, n_samples=1):
        """Generates independent samples from mog."""

        ii = helper.discrete_sample(self.a, n_samples)
        ns = [np.sum((ii == i).astype(int)) for i in xrange(self.n_components)]
        samples = [x.gen(n) for x, n in izip(self.xs, ns)]
        samples = np.concatenate(samples, axis=0)
        rng.shuffle(samples)

        return samples
项目:vqa-sva    作者:shtechair    | 项目源码 | 文件源码
def reset(self):
        if self.is_train:
            logging.info("Shuffling data...")
            random.shuffle(self.qa_list)
项目:TicTacTio    作者:DevelopForLizardz    | 项目源码 | 文件源码
def randomize(self):
        """
        Moves the neural networks into random positions
        """

        random.shuffle(self.nets)
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def test_shuffle_mixed_dimension(self):
        # Test for trac ticket #2074
        for t in [[1, 2, 3, None],
                  [(1, 1), (2, 2), (3, 3), None],
                  [1, (2, 2), (3, 3), None],
                  [(1, 1), 2, 3, None]]:
            np.random.seed(12345)
            shuffled = list(t)
            random.shuffle(shuffled)
            assert_array_equal(shuffled, [t[0], t[3], t[1], t[2]])
项目:aws-lambda-numpy    作者:vitolimandibhrata    | 项目源码 | 文件源码
def test_shuffle_mixed_dimension(self):
        # Test for trac ticket #2074
        for t in [[1, 2, 3, None],
                  [(1, 1), (2, 2), (3, 3), None],
                  [1, (2, 2), (3, 3), None],
                  [(1, 1), 2, 3, None]]:
            np.random.seed(12345)
            shuffled = list(t)
            random.shuffle(shuffled)
            assert_array_equal(shuffled, [t[0], t[3], t[1], t[2]])
项目:lambda-numba    作者:rlhotovy    | 项目源码 | 文件源码
def test_shuffle_mixed_dimension(self):
        # Test for trac ticket #2074
        for t in [[1, 2, 3, None],
                  [(1, 1), (2, 2), (3, 3), None],
                  [1, (2, 2), (3, 3), None],
                  [(1, 1), 2, 3, None]]:
            np.random.seed(12345)
            shuffled = list(t)
            random.shuffle(shuffled)
            assert_array_equal(shuffled, [t[0], t[3], t[1], t[2]])
项目:lambda-numba    作者:rlhotovy    | 项目源码 | 文件源码
def test_shuffle_of_array_of_different_length_strings(self):
        # Test that permuting an array of different length strings
        # will not cause a segfault on garbage collection
        # Tests gh-7710
        np.random.seed(1234)

        a = np.array(['a', 'a' * 1000])

        for _ in range(100):
            np.random.shuffle(a)

        # Force Garbage Collection - should not segfault.
        import gc
        gc.collect()
项目:lambda-numba    作者:rlhotovy    | 项目源码 | 文件源码
def test_shuffle_of_array_of_objects(self):
        # Test that permuting an array of objects will not cause
        # a segfault on garbage collection.
        # See gh-7719
        np.random.seed(1234)
        a = np.array([np.arange(1), np.arange(4)])

        for _ in range(1000):
            np.random.shuffle(a)

        # Force Garbage Collection - should not segfault.
        import gc
        gc.collect()
项目:War-cards-game-simulation    作者:ZeeGabByte    | 项目源码 | 文件源码
def distribute():
    """distribue les cartes"""
    cards = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
             0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
    random.shuffle(cards)
    return cards[:26], cards[26:]
项目:War-cards-game-simulation    作者:ZeeGabByte    | 项目源码 | 文件源码
def distribute_13():
    """distribue les cartes"""
    cards = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
             0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
    random.shuffle(cards)
    return cards[:26], cards[26:]
项目:War-cards-game-simulation    作者:ZeeGabByte    | 项目源码 | 文件源码
def distribute_n(n):
    """distribue les cartes avec n"""
    cards = list(range(0, n)) * 4
    random.shuffle(cards)
    return cards[:2 * n], cards[2 * n:]
项目:deliver    作者:orchestor    | 项目源码 | 文件源码
def test_shuffle_mixed_dimension(self):
        # Test for trac ticket #2074
        for t in [[1, 2, 3, None],
                  [(1, 1), (2, 2), (3, 3), None],
                  [1, (2, 2), (3, 3), None],
                  [(1, 1), 2, 3, None]]:
            np.random.seed(12345)
            shuffled = list(t)
            random.shuffle(shuffled)
            assert_array_equal(shuffled, [t[0], t[3], t[1], t[2]])
项目:deliver    作者:orchestor    | 项目源码 | 文件源码
def test_shuffle_of_array_of_different_length_strings(self):
        # Test that permuting an array of different length strings
        # will not cause a segfault on garbage collection
        # Tests gh-7710
        np.random.seed(1234)

        a = np.array(['a', 'a' * 1000])

        for _ in range(100):
            np.random.shuffle(a)

        # Force Garbage Collection - should not segfault.
        import gc
        gc.collect()
项目:deliver    作者:orchestor    | 项目源码 | 文件源码
def test_shuffle_of_array_of_objects(self):
        # Test that permuting an array of objects will not cause
        # a segfault on garbage collection.
        # See gh-7719
        np.random.seed(1234)
        a = np.array([np.arange(1), np.arange(4)])

        for _ in range(1000):
            np.random.shuffle(a)

        # Force Garbage Collection - should not segfault.
        import gc
        gc.collect()
项目:ademxapp    作者:itijyou    | 项目源码 | 文件源码
def __init__(self, perm_len, num_c, x2c):
        assert perm_len > 0
        self._perm_len = perm_len
        self._num_c = num_c
        self._x2c = np.array(x2c, np.int32)

        self._c2x = []
        for i in xrange(self._num_c):
            self._c2x.append(np.where(self._x2c == i)[0])
        self._cur_c = -1
        self._cls = npr.permutation(self._num_c).tolist()
        self._cur_x = [-1] * self._num_c
        for i in xrange(self._num_c):
            npr.shuffle(self._c2x[i])
项目:ademxapp    作者:itijyou    | 项目源码 | 文件源码
def _next_c(self):
        self._cur_c += 1
        if self._cur_c == self._num_c:
            npr.shuffle(self._cls)
            self._cur_c = 0
        return self._cls[self._cur_c]
项目:ademxapp    作者:itijyou    | 项目源码 | 文件源码
def _next_x(self, ind_c):
        self._cur_x[ind_c] += 1
        if self._cur_x[ind_c] == len(self._c2x[ind_c]):
            npr.shuffle(self._c2x[ind_c])
            self._cur_x[ind_c] = 0
        return self._c2x[ind_c][self._cur_x[ind_c]]
项目:Alfred    作者:jkachhadia    | 项目源码 | 文件源码
def test_shuffle_mixed_dimension(self):
        # Test for trac ticket #2074
        for t in [[1, 2, 3, None],
                  [(1, 1), (2, 2), (3, 3), None],
                  [1, (2, 2), (3, 3), None],
                  [(1, 1), 2, 3, None]]:
            np.random.seed(12345)
            shuffled = list(t)
            random.shuffle(shuffled)
            assert_array_equal(shuffled, [t[0], t[3], t[1], t[2]])
项目:Alfred    作者:jkachhadia    | 项目源码 | 文件源码
def test_shuffle_of_array_of_different_length_strings(self):
        # Test that permuting an array of different length strings
        # will not cause a segfault on garbage collection
        # Tests gh-7710
        np.random.seed(1234)

        a = np.array(['a', 'a' * 1000])

        for _ in range(100):
            np.random.shuffle(a)

        # Force Garbage Collection - should not segfault.
        import gc
        gc.collect()
项目:Alfred    作者:jkachhadia    | 项目源码 | 文件源码
def test_shuffle_of_array_of_objects(self):
        # Test that permuting an array of objects will not cause
        # a segfault on garbage collection.
        # See gh-7719
        np.random.seed(1234)
        a = np.array([np.arange(1), np.arange(4)])

        for _ in range(1000):
            np.random.shuffle(a)

        # Force Garbage Collection - should not segfault.
        import gc
        gc.collect()
项目:DAMR    作者:V2AI    | 项目源码 | 文件源码
def create_step10(maindir,mbconnect=None,maxsongs=500,nfilesbuffer=0,verbose=0):
    """
    Most likely the first step to the databse creation.
    Get artists from the EchoNest based on familiarity
    INPUT
       maindir       - MillionSongDataset main directory
       mbconnect     - open musicbrainz pg connection
       maxsongs      - max number of songs per artist
       nfilesbuffer  - number of files to leave when we reach the M songs,
                       e.g. we stop adding new ones if there are more
                            than 1M-nfilesbuffer already 
    RETURN
       number of songs actually created
    """
    # get all artists ids
    artists = get_most_familiar_artists(nresults=100)
    # shuffle them
    npr.shuffle(artists)
    # for each of them create all songs
    cnt_created = 0
    for artist in artists:
        # CLOSED CREATION?
        if CREATION_CLOSED:
            break
        if verbose>0: print 'doing artist:',artist; sys.stdout.flush()
        cnt_created += create_track_files_from_artist(maindir,artist,
                                                      mbconnect=mbconnect,
                                                      maxsongs=maxsongs)
        t1 = time.time()
        nh5 = count_h5_files(maindir)
        t2 = time.time()
        print 'found',nh5,'h5 song files in',maindir,'in',int(t2-t1),'seconds (pid='+str(os.getpid())+')'; sys.stdout.flush()
        # sanity stop
        if nh5 > TOTALNFILES - nfilesbuffer:
            return cnt_created
    # done
    return cnt_created
项目:openanalysis    作者:OpenWeavers    | 项目源码 | 文件源码
def analyze(self, maxpts=1000, progress=True):
        """
        Plots the running time of sorting algorithm
        Checks for 3 cases, pre-sorted array, reverse sorted array and shuffled array
        Analysis is done  by inputting randomly shuffled integer arrays with size staring
        from 100, and varying upto maxpts in the steps of 100, and counting the number of
        basic operations


        :param maxpts: Upper bound on elements chosen for analysing efficiency
        :param progress: Boolean indicating whether to show progress bar or not
        """
        # x is list of input sizes
        # y_1 running time in case of Sorted Array
        # y_2 running time in case of Shuffled Array
        # y_3 running time in case of Reverse Sorted Array
        x, y = np.array([0]), [np.array([0]), np.array([0]), np.array([0])]
        labels = ['Sorted Array', 'Shuffled Array', 'Reverse Sorted Array']
        print('Please wait while analyzing {} Algorithm'.format(self.sorter.name))
        if progress:
            import progressbar
            count = 0
            max_count = (maxpts - 100) // 100
            bar = progressbar.ProgressBar(max_value=max_count)
        for n in range(100, maxpts, 100):
            # Vary n from 100 to max in steps of 100
            if progress:
                count += 1
                bar.update(count)
            data = np.arange(n)
            input_data = [np.array(data), data[::-1]]
            np.random.shuffle(data)
            input_data.append(data)
            for i in range(3):
                self.sorter.sort(input_data[i], False)
                y[i] = np.vstack((y[i], [self.sorter.count]))
            x = np.vstack((x, [n]))
        plt.suptitle(self.sorter.name + " Analysis")
        for i in range(3):
            plt.subplot(2, 2, i + 1)
            plt.title(labels[i])
            plt.xlabel("No. of Elements")
            plt.ylabel("No. of Basic Operations")
            plt.scatter(x, y[i])
        plt.tight_layout(pad=2)
        plt.show()
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def grid_search(path, train_in, train_out, test_in, test_out):
    # perform random trials on possible combinations
    network = Discriminator
    best_error = float('inf')
    best_params = None
    best_ae     = None
    results = []
    print("Network: {}".format(network))
    try:
        import itertools
        names  = [ k for k, _ in parameters.items()]
        values = [ v for _, v in parameters.items()]
        all_params = list(itertools.product(*values))
        random.shuffle(all_params)
        [ print(r) for r in all_params]
        for i,params in enumerate(all_params):
            config.reload_session()
            params_dict = { k:v for k,v in zip(names,params) }
            print("{}/{} Testing model with parameters=\n{}".format(i, len(all_params), params_dict))
            ae = learn_model(path, train_in,train_out,test_in,test_out,
                             network=curry(network, parameters=params_dict),
                             params_dict=params_dict)
            error = ae.net.evaluate(test_in,test_out,batch_size=100,verbose=0)
            results.append({'error':error, **params_dict})
            print("Evaluation result for:\n{}\nerror = {}".format(params_dict,error))
            print("Current results:")
            results.sort(key=lambda result: result['error'])
            [ print(r) for r in results]
            if error < best_error:
                print("Found a better parameter:\n{}\nerror:{} old-best:{}".format(
                    params_dict,error,best_error))
                del best_ae
                best_params = params_dict
                best_error = error
                best_ae = ae
            else:
                del ae
        print("Best parameter:\n{}\nerror: {}".format(best_params,best_error))
    finally:
        print(results)
    best_ae.save()
    with open(best_ae.local("grid_search.log"), 'a') as f:
        import json
        f.write("\n")
        json.dump(results, f)
    return best_ae,best_params,best_error