Python numpy 模块,save() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.save()

项目:cloud-volume    作者:seung-lab    | 项目源码 | 文件源码
def encode_jpeg(arr):
    assert arr.dtype == np.uint8

    # simulate multi-channel array for single channel arrays
    if len(arr.shape) == 3:
        arr = np.expand_dims(arr, 3) # add channels to end of x,y,z

    arr = arr.transpose((3,2,1,0)) # channels, z, y, x
    reshaped = arr.reshape(arr.shape[3] * arr.shape[2], arr.shape[1] * arr.shape[0])
    if arr.shape[0] == 1:
        img = Image.fromarray(reshaped, mode='L')
    elif arr.shape[0] == 3:
        img = Image.fromarray(reshaped, mode='RGB')
    else:
        raise ValueError("Number of image channels should be 1 or 3. Got: {}".format(arr.shape[3]))

    f = io.BytesIO()
    img.save(f, "JPEG")
    return f.getvalue()
项目:Deep360Pilot-optical-flow    作者:yenchenlin    | 项目源码 | 文件源码
def gen_pruned_features(name):
    print name
    feature_dir = 'data/feature_' + args.domain + \
        '_' + str(args.n_boxes) + 'boxes/' + name + '/'
    n_clips = len(glob.glob(feature_dir + BOX_FEATURE + '*.npy'))
    for clip in xrange(1, n_clips+1):
        pruned_boxes = np.load(feature_dir + BOX_FEATURE + '{:04d}.npy'.format(clip)) # (50, args.n_boxes, 4)
        roisavg = np.load(feature_dir + 'roisavg{:04d}.npy'.format(clip)) # (50, args.n_boxes, 512)

        pruned_roisavg = np.zeros((50, args.n_boxes, 512))
        for frame in xrange(50):
            for box_id in xrange(args.n_boxes):
                if not np.array_equal(pruned_boxes[frame][box_id], np.zeros((4))):
                    pruned_roisavg[frame][box_id] = roisavg[frame][box_id]

        np.save('{}pruned_roisavg{:04d}'.format(feature_dir, clip), pruned_roisavg)
项目:a-nice-mc    作者:ermongroup    | 项目源码 | 文件源码
def visualize(self, zv, path):
        self.ax1.clear()
        self.ax2.clear()
        z, v = zv
        if path:
            np.save(path + '/trajectory.npy', z)

        z = np.reshape(z, [-1, 2])
        self.ax1.hist2d(z[:, 0], z[:, 1], bins=400)
        self.ax1.set(xlim=self.xlim(), ylim=self.ylim())

        v = np.reshape(v, [-1, 2])
        self.ax2.hist2d(v[:, 0], v[:, 1], bins=400)
        self.ax2.set(xlim=self.xlim(), ylim=self.ylim())

        if self.display:
            import matplotlib.pyplot as plt
            plt.show()
            plt.pause(0.1)
        elif path:
            self.fig.savefig(path + '/visualize.png')
项目:tfutils    作者:neuroailab    | 项目源码 | 文件源码
def load_rec(self):
        # first try and see if anything with the save data exists, since obviously
        # we dont' want to keep loading from the original load location if some work has
        # already been done
        load = self.load_from_db({'exp_id': self.exp_id},
                                 cache_filters=True)
        # if not, try loading from the loading location
        if not load and not self.sameloc:
            load = self.load_from_db(self.load_query,
                                     cache_filters=True,
                                     collfs=self.load_collfs,
                                     collfs_recent=self.load_collfs_recent)
            if load is None:
                raise Exception('You specified load parameters but no '
                                'record was found with the given spec.')
        self.load_data = load
项目:video_labelling_using_youtube8m    作者:LittleWat    | 项目源码 | 文件源码
def get_feature_mat_from_video(video_filename, output_dir='output'):
    yt_vid, extension = video_filename.split('/')[-1].split('.')

    assert extension in ['webm', 'mp4', '3gp']

    mkdir_if_not_exist(output_dir, False)

    output_filename = output_dir + '/' + yt_vid + '.npy'

    vid_reader = imageio.get_reader(video_filename, 'ffmpeg')

    img_list = get_img_list_from_vid_reader(vid_reader, extension)

    base_model = InceptionV3(include_top=True, weights='imagenet')
    model = Model(inputs=base_model.input, outputs=base_model.get_layer('avg_pool').output)

    feature_mat = get_feature_mat(model, img_list)

    np.save(output_filename, feature_mat)

    return feature_mat
项目:topically-driven-language-model    作者:jhlau    | 项目源码 | 文件源码
def compute_dt_dist(docs, labels, tags, model, max_len, batch_size, pad_id, idxvocab, output_file):
    #generate batches
    num_batches = int(math.ceil(float(len(docs)) / batch_size))
    dt_dist = []
    t = []
    combined = []
    docid = 0
    for i in xrange(num_batches):
        x, _, _, t, s = get_batch_doc(docs, labels, tags, i, max_len, cf.tag_len, batch_size, pad_id)
        attention, mean_topic = sess.run([model.attention, model.mean_topic], {model.doc: x, model.tag: t})
        dt_dist.extend(attention[:s])

        if debug:
            for si in xrange(s):
                d = x[si]
                print "\n\nDoc", docid, "=", " ".join([idxvocab[item] for item in d if (item != pad_id)])
                sorted_dist = matutils.argsort(attention[si], reverse=True)
                for ti in sorted_dist:
                    print "Topic", ti, "=", attention[si][ti]
                docid += 1

    np.save(open(output_file, "w"), dt_dist)
项目:kaggle-review    作者:daxiongshu    | 项目源码 | 文件源码
def predictPL(self):
        B = self.flags.batch_size
        W,H,C = self.flags.width, self.flags.height, self.flags.color
        inputs = tf.placeholder(dtype=tf.float32,shape=[None,H,W,C])

        #with open(self.flags.pred_path,'w') as f:
        #    pass

        self._build(inputs,resize=False)
        counter = 0
        with tf.Session() as sess:
            self.sess = sess
            sess.run(tf.global_variables_initializer())
            sess.run(tf.local_variables_initializer())
            for imgs,imgnames in self.DATA.test_generator():
                pred = sess.run(self.logit,feed_dict={inputs:imgs})
                np.save("%s/%d.npy"%(self.flags.pred_path,counter),{"pred":pred,"name":imgnames})
                counter+=len(imgs)
                if counter/B%10 ==0:
                    print_mem_time("%d images predicted"%counter)

    # train with placeholders
项目:kaggle-review    作者:daxiongshu    | 项目源码 | 文件源码
def show_embedding(self,name,save_model="model.ckpt",meta_path='metadata.tsv'):
        self._build()
        self._write_meta()
        from tensorflow.contrib.tensorboard.plugins import projector
        # Use the same LOG_DIR where you stored your checkpoint.
        with tf.Session() as sess:
            self.sess = sess
            sess.run(tf.global_variables_initializer())
            sess.run(tf.local_variables_initializer())
            summary_writer = tf.summary.FileWriter(self.flags.log_path, sess.graph)
            saver = tf.train.Saver()
            saver.save(sess, os.path.join(self.flags.log_path, save_model), 0)
        # Format: tensorflow/contrib/tensorboard/plugins/projector/projector_config.proto
        config = projector.ProjectorConfig()
        # You can add multiple embeddings. Here we add only one.
        embedding = config.embeddings.add()
        embedding.tensor_name = name
        # Link this tensor to its metadata file (e.g. labels).
        embedding.metadata_path = os.path.join(self.flags.log_path, meta_path)
        # Saves a configuration file that TensorBoard will read during startup.
        projector.visualize_embeddings(summary_writer, config)
项目:kaggle-review    作者:daxiongshu    | 项目源码 | 文件源码
def split(flags):
    if os.path.exists(flags.split_path):
        return np.load(flags.split_path).item()
    folds = flags.folds
    path = flags.input_path
    random.seed(6)
    img_list = ["%s/%s"%(path,img) for img in os.listdir(path)]
    random.shuffle(img_list)
    dic = {}
    n = len(img_list)
    num = (n+folds-1)//folds
    for i in range(folds):
        s,e = i*num,min(i*num+num,n)
        dic[i] = img_list[s:e]
    np.save(flags.split_path,dic)
    return dic
项目:autolab_core    作者:BerkeleyAutomation    | 项目源码 | 文件源码
def save(self, filename):
        """Saves the collection to a file.

        Parameters
        ----------
        filename : :obj:`str`
            The file to save the collection to.

        Raises
        ------
        ValueError
            If the file extension is not .npy or .npz.
        """
        file_root, file_ext = os.path.splitext(filename)
        if file_ext == '.npy':
            np.save(filename, self._data)
        elif file_ext == '.npz':
            np.savez_compressed(filename, self._data)
        else:
            raise ValueError('Extension %s not supported for point saves.' %(file_ext))
项目:BiMPM_keras    作者:ijinmao    | 项目源码 | 文件源码
def get_word_seq(train_ori1, train_ori2, test_ori1, test_ori2):
    # fit tokenizer
    tk = Tokenizer(num_words=TrainConfig.MAX_NB_WORDS)
    tk.fit_on_texts(train_ori1 + train_ori2 + test_ori1 + test_ori2)
    word_index = tk.word_index

    # q1, q2 training text sequence
    # (sentence_len, MAX_SEQUENCE_LENGTH)
    train_x1 = tk.texts_to_sequences(train_ori1)
    train_x1 = pad_sequences(train_x1, maxlen=TrainConfig.MAX_SEQUENCE_LENGTH)
    train_x2 = tk.texts_to_sequences(train_ori2)
    train_x2 = pad_sequences(train_x2, maxlen=TrainConfig.MAX_SEQUENCE_LENGTH)

    # q1, q2 testing text sequence
    test_x1 = tk.texts_to_sequences(test_ori1)
    test_x1 = pad_sequences(test_x1, maxlen=TrainConfig.MAX_SEQUENCE_LENGTH)
    test_x2 = tk.texts_to_sequences(test_ori2)
    test_x2 = pad_sequences(test_x2, maxlen=TrainConfig.MAX_SEQUENCE_LENGTH)

    np.save(open(DirConfig.Q1_CACHE_TRAIN, 'wb'), train_x1)
    np.save(open(DirConfig.Q2_CACHE_TRAIN, 'wb'), train_x2)
    np.save(open(DirConfig.Q1_CACHE_TEST, 'wb'), test_x1)
    np.save(open(DirConfig.Q2_CACHE_TEST, 'wb'), test_x2)
    np.save(open(DirConfig.WORD_INDEX_CACHE, 'wb'), word_index)
    return train_x1, train_x2, test_x1, test_x2, word_index
项目:BiMPM_keras    作者:ijinmao    | 项目源码 | 文件源码
def load_word2vec_matrix(vec_file, word_index, config):
    if os.path.isfile(DirConfig.W2V_CACHE):
        print('---- Load word vectors from cache.')
        embedding_matrix = np.load(open(DirConfig.W2V_CACHE, 'rb'))
        return embedding_matrix

    print('---- loading word2vec ...')
    word2vec = KeyedVectors.load_word2vec_format(
        vec_file, binary=True)
    print('Found %s word vectors of word2vec' % len(word2vec.vocab))

    nb_words = min(config.MAX_NB_WORDS, len(word_index)) + 1
    embedding_matrix = np.zeros((nb_words, config.WORD_EMBEDDING_DIM))
    for word, i in word_index.items():
        if word in word2vec.vocab:
            embedding_matrix[i] = word2vec.word_vec(word)
    print('Null word embeddings: %d' % \
          np.sum(np.sum(embedding_matrix, axis=1) == 0))

    # check the words which not in embedding vectors
    not_found_words = []
    for word, i in word_index.items():
        if word not in word2vec.vocab:
            not_found_words.append(word)

    np.save(open(DirConfig.W2V_CACHE, 'wb'), embedding_matrix)
    return embedding_matrix
项目:evaluation_tools    作者:JSALT-Rosetta    | 项目源码 | 文件源码
def af_h5_to_np(input_path, outpath):

    files = tables.open_file(input_path, mode = 'r+')
    speaker_nodes = files.root._f_list_nodes()

    for spk in speaker_nodes:
        file_nodes = spk._f_list_nodes()
        for fls in file_nodes:
            file_name = fls._v_name
            af_nodes = fls._f_list_nodes()
            af_list = []
            for fts in af_nodes:
                features = fts[:]
                mean = numpy.mean(features,1)
                normalised_feats = list(numpy.transpose(features)/mean)
                af_list += normalised_feats
            numpy.save(outpath + file_name, numpy.array(af_list))
项目:structured-output-ae    作者:sbelharbi    | 项目源码 | 文件源码
def save_params(self, weights_file, catched=False):
        """Save the model's parameters."""
        f_dump = open(weights_file, "w")
        params_vls = []
        if catched:
            if self.catched_params != []:
                params_vls = self.catched_params
            else:
                raise ValueError(
                    "You asked to save catched params," +
                    "but you didn't catch any!!!!!!!")
        else:
            for param in self.params:
                params_vls.append(param.get_value())
        pkl.dump(params_vls, f_dump, protocol=pkl.HIGHEST_PROTOCOL)
        f_dump.close()
项目:histwords    作者:williamleif    | 项目源码 | 文件源码
def main():
    args = docopt("""
    Usage:
        text2numpy.py <path>
    """)

    path = args['<path>']

    matrix = read_vectors(path)
    iw = sorted(matrix.keys())

    new_matrix = np.zeros(shape=(len(iw), len(matrix[iw[0]])), dtype=np.float32)
    for i, word in enumerate(iw):
        if word in matrix:
            new_matrix[i, :] = matrix[word]

    np.save(path + '.npy', new_matrix)
    save_vocabulary(path + '.vocab', iw)
项目:histwords    作者:williamleif    | 项目源码 | 文件源码
def main():
    args = docopt("""
    Usage:
        pmi2svd.py [options] <pmi_path> <output_path>

    Options:
        --dim NUM    Dimensionality of eigenvectors [default: 500]
        --neg NUM    Number of negative samples; subtracts its log from PMI [default: 1]
    """)

    pmi_path = args['<pmi_path>']
    output_path = args['<output_path>']
    dim = int(args['--dim'])
    neg = int(args['--neg'])

    explicit = PositiveExplicit(pmi_path, normalize=False, neg=neg)

    ut, s, vt = sparsesvd(explicit.m.tocsc(), dim)

    np.save(output_path + '.ut.npy', ut)
    np.save(output_path + '.s.npy', s)
    np.save(output_path + '.vt.npy', vt)
    save_vocabulary(output_path + '.words.vocab', explicit.iw)
    save_vocabulary(output_path + '.contexts.vocab', explicit.ic)
项目:histwords    作者:williamleif    | 项目源码 | 文件源码
def worker(proc_num, queue, out_dir, in_dir, count_dir, words, dim, num_words, min_count=100):
    while True:
        if queue.empty():
            break
        year = queue.get()
        print "Loading embeddings for year", year
        time.sleep(random.random() * 120)
        valid_words = set(words_above_count(count_dir, year, min_count))
        print len(valid_words)
        words = list(valid_words.intersection(words[year][:num_words]))
        print len(words)
        base_embed = Explicit.load((in_dir + INPUT_FORMAT).format(year=year), normalize=False)
        base_embed = base_embed.get_subembed(words, restrict_context=True)
        print "SVD for year", year
        u, s, v = randomized_svd(base_embed.m, n_components=dim, n_iter=5)
        print "Saving year", year
        np.save((out_dir + OUT_FORMAT).format(year=year, dim=dim) + "-u.npy", u)
        np.save((out_dir + OUT_FORMAT).format(year=year, dim=dim) + "-v.npy", v)
        np.save((out_dir + OUT_FORMAT).format(year=year, dim=dim) + "-s.npy", s)
        write_pickle(base_embed.iw, (out_dir + OUT_FORMAT).format(year=year, dim=dim) + "-vocab.pkl")
项目:histwords    作者:williamleif    | 项目源码 | 文件源码
def align_years(years, rep_type, in_dir, out_dir, count_dir, min_count, **rep_args):
    first_iter = True
    base_embed = None
    for year in years:
        print "Loading year:", year
        year_embed =  create_representation(rep_type, in_dir + str(year), **rep_args)
        year_words = words_above_count(count_dir, year, min_count)
        year_embed.get_subembed(year_words)
        print "Aligning year:", year
        if first_iter:
            aligned_embed = year_embed
            first_iter = False
        else:
            aligned_embed = alignment.smart_procrustes_align(base_embed, year_embed)
        base_embed = aligned_embed
        print "Writing year:", year
        foutname = out_dir + str(year)
        np.save(foutname + "-w.npy",aligned_embed.m)
        write_pickle(aligned_embed.iw, foutname + "-vocab.pkl")
项目:FrankWolfe    作者:neu-spiral    | 项目源码 | 文件源码
def safeWrite(rdd,outputfile,dvrdump=False):
    """Save the rdd in the given directory.

    Keyword arguments:
    --rdd: given rdd to be saved
    --outputfile: desired directory to save rdd
    """
    if os.path.isfile(outputfile):
       os.remove(outputfile)    
    elif os.path.isdir(outputfile):
       shutil.rmtree(outputfile)    

    if dvrdump:
    rdd_list = rdd.collect()
    with open(outputfile,'wb') as f:
        count = 0
        for item in rdd_list:
            f.write(str(item))   
            count = count+1
            if count < len(rdd_list):
            f.write("\n")  
    else:
       rdd.saveAsTextFile(outputfile)
项目:untwist    作者:IoSR-Surrey    | 项目源码 | 文件源码
def save(self):
        if self.index is None:
            self.index = np.array(range(self.X.shape[0]))
        metadata = {
            "index": self.index.tolist(),
            "x_shape": self.X.shape,
            "x_type": str(self.X.dtype),
            "running_mean": self.running_mean.tolist(),
            "running_dev": self.running_dev.tolist(),
            "running_min": self.running_min.tolist(),
            "running_max": self.running_max.tolist(),
        }
        if self.Y is not None:
            metadata["y_shape"] = self.Y.shape
            metadata["y_type"] = str(self.Y.dtype)

        with open(self.path+"/dataset.json", "wt") as f:
            f.write(json.dumps(metadata))
        self.X.flush()
        if self.Y is not None: self.Y.flush()
项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def stop(self):
        audio.say("Stopping Accuracy Test")
        logger.info('Stopping Accuracy_Test')
        self.screen_marker_state = 0
        self.active = False
        self.close_window()

        matched_data = calibrate.closest_matches_monocular(self.gaze_list,self.ref_list)
        pt_cloud = calibrate.preprocess_2d_data_monocular(matched_data)
        logger.info("Collected {} data points.".format(len(pt_cloud)))

        if len(pt_cloud) < 20:
            logger.warning("Did not collect enough data.")
            return

        pt_cloud = np.array(pt_cloud)
        np.save(os.path.join(self.g_pool.user_dir,'accuracy_test_pt_cloud.npy'),pt_cloud)
        gaze,ref = pt_cloud[:,0:2],pt_cloud[:,2:4]
        error_lines = np.array([[g,r] for g,r in zip(gaze,ref)])
        self.error_lines = error_lines.reshape(-1,2)
        self.pt_cloud = pt_cloud
项目:TGIF-Release    作者:raingo    | 项目源码 | 文件源码
def main():
    import sys
    save_dir = sys.argv[1]
    all_imgs = []
    all_fet = []
    for line in sys.stdin:
        fet = load_np(line.strip())
        all_fet.append(fet)
        all_imgs.append(line.strip())

    fet = np.vstack(all_fet)
    np.save(osp.join(save_dir, 'c3d.npy'), fet)
    with open(osp.join(save_dir, 'c3d.list'), 'w') as writer:
        for img in all_imgs:
            writer.write('%s\n' % img)

    pass
项目:PSPNet-Keras-tensorflow    作者:Vladkryvoruchko    | 项目源码 | 文件源码
def convert(def_path, caffemodel_path, data_output_path, code_output_path, phase):
    try:
        transformer = TensorFlowTransformer(def_path, caffemodel_path, phase=phase)
        print_stderr('Converting data...')
        if caffemodel_path is not None:
            data = transformer.transform_data()
            print_stderr('Saving data...')
            with open(data_output_path, 'wb') as data_out:
                np.save(data_out, data)
        if code_output_path:
            print_stderr('Saving source...')
            with open(code_output_path, 'wb') as src_out:
                src_out.write(transformer.transform_source())
        print_stderr('Done.')
    except KaffeError as err:
        fatal_error('Error encountered: {}'.format(err))
项目:brats17    作者:xf4j    | 项目源码 | 文件源码
def batch_works(k):
    if k == n_processes - 1:
        paths = all_paths[k * int(len(all_paths) / n_processes) : ]
    else:
        paths = all_paths[k * int(len(all_paths) / n_processes) : (k + 1) * int(len(all_paths) / n_processes)]

    for path in paths:
        o_path = os.path.join(output_path, os.path.basename(path))
        if not os.path.exists(o_path):
            os.makedirs(o_path)
        x, y, z = perturb_patch_locations(base_locs, patch_size / 16)
        probs = generate_patch_probs(path, (x, y, z), patch_size, image_size)
        selections = np.random.choice(range(len(probs)), size=patches_per_image, replace=False, p=probs)
        image = read_image(path)
        for num, sel in enumerate(selections):
            i, j, k = np.unravel_index(sel, (len(x), len(y), len(z)))
            patch = image[int(x[i] - patch_size / 2) : int(x[i] + patch_size / 2),
                          int(y[j] - patch_size / 2) : int(y[j] + patch_size / 2),
                          int(z[k] - patch_size / 2) : int(z[k] + patch_size / 2), :]
            f = os.path.join(o_path, str(num))
            np.save(f, patch)
项目:quoll    作者:LanguageMachines    | 项目源码 | 文件源码
def run(self):
        all_file_names = []
        all_labels = []

        for n, folder_name in enumerate(os.listdir(self.in_txtdir().path)):

            full_folder_name = self.in_txtdir().path+'/'+folder_name

            if os.path.isfile(full_folder_name):
                continue

            for file_name in os.listdir(full_folder_name):
                all_labels.append(n)
                all_file_names.append(full_folder_name+'/'+file_name)

        vectorizer = CountVectorizer(input='filename')
        vector = vectorizer.fit_transform(all_file_names)
        numpy.save(self.out_npy().path,vector)
        numpy.save('labels',numpy.array(all_labels)) #Where and how do we want to save this?

#This is just to test the tasks above
项目:PyFunt    作者:dnlcrl    | 项目源码 | 文件源码
def make_check_point(self):
        '''
        Save the solver's current status
        '''
        checkpoints = {
            'model': self.model,
            'epoch': self.epoch,
            'best_params': self.best_params,
            'best_val_acc': self.best_val_acc,
            'loss_history': self.loss_history,
            'val_acc_history': self.val_acc_history,
            'train_acc_history': self.train_acc_history}

        name = 'check_' + str(self.epoch)
        directory = os.path.join(self.path_checkpoints, name)
        if not os.path.exists(directory):
            os.makedirs(directory)
        try:
            np.save(checkpoints, os.path.join(
                directory, name + '.pkl'))
        except:
            print('sorry, I haven\'t fixed this line, but it should be easy to fix, if you want you can try now and make a pull request')
            raise()
项目:PyFunt    作者:dnlcrl    | 项目源码 | 文件源码
def export_histories(self, path):
        if not os.path.exists(path):
            os.makedirs(path)
        i = np.arange(len(self.loss_history)) + 1
        z = np.array(zip(i, i*self.batch_size, self.loss_history))
        np.savetxt(path + 'loss_history.csv', z, delimiter=',', fmt=[
                   '%d', '%d', '%f'], header='iteration, n_images, loss')

        i = np.arange(len(self.train_acc_history), dtype=np.int)

        z = np.array(zip(i, self.train_acc_history))
        np.savetxt(path + 'train_acc_history.csv', z, delimiter=',', fmt=[
            '%d', '%f'], header='epoch, train_acc')

        z = np.array(zip(i, self.val_acc_history))
        np.savetxt(path + 'val_acc_history.csv', z, delimiter=',', fmt=[
            '%d', '%f'], header='epoch, val_acc')
        np.save(path + 'loss', self.loss_history)
        np.save(path + 'train_acc_history', self.train_acc_history)
        np.save(path + 'val_acc_history', self.val_acc_history)
项目:tensorflow-action-conditional-video-prediction    作者:williamd4112    | 项目源码 | 文件源码
def main(args):
    with tf.Graph().as_default() as graph:
        # Create dataset
        logging.info('Create data flow from %s' % args.data)
        caffe_dataset = CaffeDataset(dir=args.data, num_act=args.num_act, mean_path=args.mean)

        # Config session
        config = get_config(args)

        x = tf.placeholder(dtype=tf.float32, shape=[None, 84, 84, 12])
        op = load_caffe_model(x, args.load)

        init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())

        # Start session
        with tf.Session(config=config) as sess:
            sess.run(init)
            i = 0
            for s, a in caffe_dataset(5):
                pred_data = sess.run([op], feed_dict={x: [s]})[0]
                print pred_data.shape
                np.save('tf-%03d.npy' % i, pred_data)
                i += 1
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_large_file_support():
    if (sys.platform == 'win32' or sys.platform == 'cygwin'):
        raise SkipTest("Unknown if Windows has sparse filesystems")
    # try creating a large sparse file
    tf_name = os.path.join(tempdir, 'sparse_file')
    try:
        # seek past end would work too, but linux truncate somewhat
        # increases the chances that we have a sparse filesystem and can
        # avoid actually writing 5GB
        import subprocess as sp
        sp.check_call(["truncate", "-s", "5368709120", tf_name])
    except:
        raise SkipTest("Could not create 5GB large file")
    # write a small array to the end
    with open(tf_name, "wb") as f:
        f.seek(5368709120)
        d = np.arange(5)
        np.save(f, d)
    # read it back
    with open(tf_name, "rb") as f:
        f.seek(5368709120)
        r = np.load(f)
    assert_array_equal(r, d)
项目:voc-classification    作者:philkr    | 项目源码 | 文件源码
def dataAsImageDataLayer(voc_dir, tmp_dir, image_set='train', **kwargs):
    from caffe_all import L
    from os import path
    from python_layers import PY
    Py = PY('data')
    voc_data = VOCData(voc_dir, image_set)

    # Create a text file with all the paths
    source_file = path.join(tmp_dir, image_set+"_images.txt")
    if not path.exists( source_file ):
        f = open(source_file, 'w')
        for n in voc_data.image_names:
            print('%s 0'% voc_data.image_path[n], file=f)
        f.close()

    # Create a label file
    lbl_file = path.join(tmp_dir, image_set+"_images.lbl")
    if not path.exists( lbl_file ):
        np.save(open(lbl_file, 'wb'), [voc_data.labels[n] for n in voc_data.image_names])
    cs = kwargs.get('transform_param',{}).get('crop_size',0)
    return L.ImageData(source=source_file, ntop=2, new_width=cs, new_height=cs, **kwargs)[0], Py.LabelData(label=lbl_file, batch_size=kwargs.get('batch_size',1))
项目:rl-attack-detection    作者:yenchenlin    | 项目源码 | 文件源码
def main(args):
    with tf.Graph().as_default() as graph:
        # Create dataset
        logging.info('Create data flow from %s' % args.data)
        caffe_dataset = CaffeDataset(dir=args.data, num_act=args.num_act, mean_path=args.mean)

        # Config session
        config = get_config(args)

        x = tf.placeholder(dtype=tf.float32, shape=[None, 84, 84, 12])
        op = load_caffe_model(x, args.load)

        init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())

        # Start session
        with tf.Session(config=config) as sess:
            sess.run(init)
            i = 0
            for s, a in caffe_dataset(5):
                pred_data = sess.run([op], feed_dict={x: [s]})[0]
                print pred_data.shape
                np.save('tf-%03d.npy' % i, pred_data)
                i += 1
项目:cortex    作者:rdevon    | 项目源码 | 文件源码
def save_mask(data, out_path):
    '''Save mask of data.

    Args:
        data (numpy.array): Data to mask
        out_path (str): Output path for mask.

    '''
    print 'Getting mask'

    s, n, x, y, z = data.shape
    mask = np.zeros((x, y, z))
    _data = data.reshape((s * n, x, y, z))

    mask[np.where(_data.mean(axis=0) > _data.mean())] = 1

    print 'Masked out %d out of %d voxels' % ((mask == 0).sum(), reduce(
        lambda x_, y_: x_ * y_, mask.shape))

    np.save(out_path, mask)
项目:OpenAI_Challenges    作者:AlwaysLearningDeeper    | 项目源码 | 文件源码
def main():
    for i in list(range(4))[::-1]:
        print(i+1)
        time.sleep(1)
    c=0
    last_time = time.time()
    while True:
        c+=1
        screen=grab_screen(title='')
        screenG=cv2.cvtColor(screen,cv2.COLOR_BGR2GRAY)
        screenG=cv2.resize(screenG,(80,60))
        keys=key_check()
        output=keys_to_output(keys)
        training_data.append([screenG,output])
        if c%10==0:
            print('Recording at ' + str((10 / (time.time() - last_time)))+' fps')
            last_time = time.time()

        if len(training_data) % 500 == 0:
            print(len(training_data))
            np.save(file_name,training_data)
项目:cupy    作者:cupy    | 项目源码 | 文件源码
def savez(file, *args, **kwds):
    """Saves one or more arrays into a file in uncompressed ``.npz`` format.

    Arguments without keys are treated as arguments with automatic keys named
    ``arr_0``, ``arr_1``, etc. corresponding to the positions in the argument
    list. The keys of arguments are used as keys in the ``.npz`` file, which
    are used for accessing NpzFile object when the file is read by
    :func:`cupy.load` function.

    Args:
        file (file or str): File or filename to save.
        *args: Arrays with implicit keys.
        **kwds: Arrays with explicit keys.

    .. seealso:: :func:`numpy.savez`

    """
    args = map(cupy.asnumpy, args)
    for key in kwds:
        kwds[key] = cupy.asnumpy(kwds[key])
    numpy.savez(file, *args, **kwds)
项目:ai-bs-summer17    作者:uchibe    | 项目源码 | 文件源码
def backupNetwork(self, model, backup):
        weightMatrix = []
        for layer in model.layers:
            weights = layer.get_weights()
            weightMatrix.append(weights)

        # np.save('weightMatrix.npy', weightMatrix)
        # print(weightMatrix.shape)
        i = 0
        for layer in backup.layers:
            weights = weightMatrix[i]
            layer.set_weights(weights)
            i += 1

    # def loadWeights(self,path):
    #     self.model.set_weights(load_model(path).get_weights())
项目:magenta    作者:tensorflow    | 项目源码 | 文件源码
def save_arrays(savedir, hparams, z_val):
  """Save arrays as npy files.

  Args:
    savedir: Directory where arrays are saved.
    hparams: Hyperparameters.
    z_val: Array to save.
  """
  z_save_val = np.array(z_val).reshape(-1, hparams.num_latent)

  name = FLAGS.tfrecord_path.split("/")[-1].split(".tfrecord")[0]
  save_name = os.path.join(savedir, "{}_%s.npy".format(name))
  with tf.gfile.Open(save_name % "z", "w") as f:
    np.save(f, z_save_val)

  tf.logging.info("Z_Save:{}".format(z_save_val.shape))
  tf.logging.info("Successfully saved to {}".format(save_name % ""))
项目:Auspex    作者:BBN-Q    | 项目源码 | 文件源码
def test_rabi_amp(self):
        """
        Test RabiAmpCalibration. Ideal data generated by simulate_rabiAmp.
        """

        ideal_data = [np.tile(simulate_rabiAmp(), self.nbr_round_robins)]
        np.save(self.filename, ideal_data)
        rabi_cal = cal.RabiAmpCalibration(self.q.label, num_steps = len(ideal_data[0])/(2*self.nbr_round_robins))
        cal.calibrate([rabi_cal])
        os.remove(self.filename)
        self.assertAlmostEqual(rabi_cal.pi_amp,1,places=2)
        self.assertAlmostEqual(rabi_cal.pi2_amp,0.5,places=2)
        #test update_settings
        new_settings = auspex.config.load_meas_file(cfg_file)
        self.assertAlmostEqual(rabi_cal.pi_amp, new_settings['qubits'][self.q.label]['control']['pulse_params']['piAmp'], places=4)
        self.assertAlmostEqual(rabi_cal.pi2_amp, new_settings['qubits'][self.q.label]['control']['pulse_params']['pi2Amp'], places=4)
        #restore original settings
        auspex.config.dump_meas_file(self.test_settings, cfg_file)
项目:sudokuextract    作者:hbldh    | 项目源码 | 文件源码
def _save_data(which, X, y, data_source):
    if data_source.lower() == 'mnist':
        data_source = 'mnist'
    else:
        data_source = 'se'

    if X.shape[0] != len(y):
        raise TypeError("Length of data samples ({0}) was not identical "
                        "to length of labels ({1})".format(X.shape[0], len(y)))

    # Convert to numpy array.
    if not isinstance(X, np.ndarray):
        X = np.array(X)
    if not isinstance(y, np.ndarray):
        y = np.array(y)

    # Write feature_data
    fname = resource_filename('sudokuextract.data', "{0}-{1}-data.gz".format(data_source, which))
    with gzip.GzipFile(fname, mode='wb') as f:
        np.save(f, X)

    # Write labels
    fname = resource_filename('sudokuextract.data', "{0}-{1}-labels.gz".format(data_source, which))
    with gzip.GzipFile(fname, mode='wb') as f:
        np.save(f, y)
项目:aueb.twitter.sentiment    作者:nlpaueb    | 项目源码 | 文件源码
def loadGlove(d=200):
    start = time.time()

    f1 = 'resources/words.pkl'
    f2 = 'resources/embeddings.npy'

    if (os.path.isfile(f1) and os.path.isfile(f2)):
        with open(f1, 'rb') as input:
            w = pickle.load(input)

        e = np.load(f2)
        glove = GloveDictionary.Glove(words=w, emb=e)
    else:
    glove = GloveDictionary.Glove(d)
    saveGlove(glove)

    end=time.time()
    return glove

#save trained moleds
项目:aueb.twitter.sentiment    作者:nlpaueb    | 项目源码 | 文件源码
def getConfidenceScores(features_train, labels_train, C):
    train_confidence = []
    #confidence scores for training data are computed using K-fold cross validation
    kfold = KFold(features_train.shape[0], n_folds=10)

    for train_index,test_index in kfold:
        X_train, X_test = features_train[train_index], features_train[test_index]
        y_train, y_test = labels_train[train_index], labels_train[test_index]

        #train classifier for the subset of train data
        m = SVM.train(X_train,y_train,c=C,k="linear")

        #predict confidence for test data and append it to list
        conf = m.decision_function(X_test)
        for x in conf:
                train_confidence.append(x)

    return np.array(train_confidence)

#save pos scores
项目:Cat-Segmentation    作者:ardamavi    | 项目源码 | 文件源码
def get_dataset(dataset_path='Data/Train_Data'):
    # Getting all data from data path:
    try:
        X = np.load('Data/npy_train_data/X.npy')
        Y = np.load('Data/npy_train_data/Y.npy')
    except:
        inputs_path = dataset_path+'/input'
        images = listdir(inputs_path) # Geting images
        X = []
        Y = []
        for img in images:
            img_path = inputs_path+'/'+img

            x_img = get_img(img_path).astype('float32').reshape(64, 64, 3)
            x_img /= 255.

            y_img = get_img(img_path.replace('input/', 'mask/mask_')).astype('float32').reshape(64, 64, 1)
            y_img /= 255.

            X.append(x_img)
            Y.append(y_img)
        X = np.array(X)
        Y = np.array(Y)
        # Create dateset:
        if not os.path.exists('Data/npy_train_data/'):
            os.makedirs('Data/npy_train_data/')
        np.save('Data/npy_train_data/X.npy', X)
        np.save('Data/npy_train_data/Y.npy', Y)
    X, X_test, Y, Y_test = train_test_split(X, Y, test_size=0.1, random_state=42)
    return X, X_test, Y, Y_test
项目:namegenderclassifier    作者:joaoalvarenga    | 项目源码 | 文件源码
def save(self, model_filename):
        self.__model.save("%s.model" % model_filename)
        np.save("%s.tvocab" % model_filename, np.asarray(self.__trigrams))
        np.save("%s.cvocab" % model_filename, np.asarray(self.__chars))
        np.save("%s.classes" % model_filename, np.asarray(self.__classes))
项目:variational-text-tensorflow    作者:carpedm20    | 项目源码 | 文件源码
def save_pkl(path, obj):
  with open(path, 'w') as f:
    cPickle.dump(obj, f)
    print(" [*] save %s" % path)
项目:variational-text-tensorflow    作者:carpedm20    | 项目源码 | 文件源码
def save_npy(path, obj):
  np.save(path, obj)
  print(" [*] save %s" % path)
项目:Deep360Pilot-optical-flow    作者:yenchenlin    | 项目源码 | 文件源码
def get_hof(name):
    print name

    FLOW_DIR = 'data/of_' + args.domain + '/' + name + '/'
    BOXES_DIR = 'data/feature_' + args.domain + \
        '_' + str(args.n_boxes) + 'boxes/' + name + '/'

    n_frames = len(glob.glob(FLOW_DIR + '*.png'))

    # init boxes
    clip_boxes_index = 1
    clip_boxes = np.load(BOXES_DIR + 'roislist{:04d}.npy'.format(clip_boxes_index))

    # init hof
    hof_shape = (50, args.n_boxes, 12)
    hof = np.zeros(hof_shape)

    for i in xrange(1, n_frames+1):
        print "{}, Flow {}, ".format(name, i)
        # boxes
        new_clip_boxes_index = (i-1) / 50 + 1
        if clip_boxes_index != new_clip_boxes_index:
            # 1.1 save hof and init a new one
            np.save(BOXES_DIR + 'hof{:04d}.npy'.format(clip_boxes_index), hof)
            hof = np.zeros(hof_shape)

            # 2.1 update clip_boxes
            clip_boxes_index = new_clip_boxes_index
            clip_boxes = np.load(BOXES_DIR + 'roislist{:04d}.npy'.format(clip_boxes_index))

        flow_img = np.array(cv2.imread(FLOW_DIR + '{:06d}.png'.format(i)), dtype=np.float32)

        frame_boxes = clip_boxes[(i-1) % 50].astype(int)
        for box_id, (xmin, ymin, xmax, ymax) in enumerate(frame_boxes):
            xmin, ymin, xmax, ymax = preprocess_box(xmin, ymin, xmax, ymax)
            box_flow_img = flow_img[ymin:ymax, xmin:xmax, :]
            hof[(i-1) % 50][box_id], _ = flow_to_hist(box_flow_img)

    # save latest hof
    np.save(BOXES_DIR + 'hof{:04d}.npy'.format(clip_boxes_index), hof)
项目:pointnet    作者:charlesq34    | 项目源码 | 文件源码
def collect_point_label(anno_path, out_filename, file_format='txt'):
    """ Convert original dataset files to data_label file (each line is XYZRGBL).
        We aggregated all the points from each instance in the room.

    Args:
        anno_path: path to annotations. e.g. Area_1/office_2/Annotations/
        out_filename: path to save collected points and labels (each line is XYZRGBL)
        file_format: txt or numpy, determines what file format to save.
    Returns:
        None
    Note:
        the points are shifted before save, the most negative point is now at origin.
    """
    points_list = []

    for f in glob.glob(os.path.join(anno_path, '*.txt')):
        cls = os.path.basename(f).split('_')[0]
        if cls not in g_classes: # note: in some room there is 'staris' class..
            cls = 'clutter'
        points = np.loadtxt(f)
        labels = np.ones((points.shape[0],1)) * g_class2label[cls]
        points_list.append(np.concatenate([points, labels], 1)) # Nx7

    data_label = np.concatenate(points_list, 0)
    xyz_min = np.amin(data_label, axis=0)[0:3]
    data_label[:, 0:3] -= xyz_min

    if file_format=='txt':
        fout = open(out_filename, 'w')
        for i in range(data_label.shape[0]):
            fout.write('%f %f %f %d %d %d %d\n' % \
                          (data_label[i,0], data_label[i,1], data_label[i,2],
                           data_label[i,3], data_label[i,4], data_label[i,5],
                           data_label[i,6]))
        fout.close()
    elif file_format=='numpy':
        np.save(out_filename, data_label)
    else:
        print('ERROR!! Unknown file format: %s, please use txt or numpy.' % \
            (file_format))
        exit()
项目:pointnet    作者:charlesq34    | 项目源码 | 文件源码
def collect_bounding_box(anno_path, out_filename):
    """ Compute bounding boxes from each instance in original dataset files on
        one room. **We assume the bbox is aligned with XYZ coordinate.**

    Args:
        anno_path: path to annotations. e.g. Area_1/office_2/Annotations/
        out_filename: path to save instance bounding boxes for that room.
            each line is x1 y1 z1 x2 y2 z2 label,
            where (x1,y1,z1) is the point on the diagonal closer to origin
    Returns:
        None
    Note:
        room points are shifted, the most negative point is now at origin.
    """
    bbox_label_list = []

    for f in glob.glob(os.path.join(anno_path, '*.txt')):
        cls = os.path.basename(f).split('_')[0]
        if cls not in g_classes: # note: in some room there is 'staris' class..
            cls = 'clutter'
        points = np.loadtxt(f)
        label = g_class2label[cls]
        # Compute tightest axis aligned bounding box
        xyz_min = np.amin(points[:, 0:3], axis=0)
        xyz_max = np.amax(points[:, 0:3], axis=0)
        ins_bbox_label = np.expand_dims(
            np.concatenate([xyz_min, xyz_max, np.array([label])], 0), 0)
        bbox_label_list.append(ins_bbox_label)

    bbox_label = np.concatenate(bbox_label_list, 0)
    room_xyz_min = np.amin(bbox_label[:, 0:3], axis=0)
    bbox_label[:, 0:3] -= room_xyz_min 
    bbox_label[:, 3:6] -= room_xyz_min 

    fout = open(out_filename, 'w')
    for i in range(bbox_label.shape[0]):
        fout.write('%f %f %f %f %f %f %d\n' % \
                      (bbox_label[i,0], bbox_label[i,1], bbox_label[i,2],
                       bbox_label[i,3], bbox_label[i,4], bbox_label[i,5],
                       bbox_label[i,6]))
    fout.close()
项目:deep-learning    作者:ljanyst    | 项目源码 | 文件源码
def save_checkpoint(self, session, step):
        self.saver.save(session, self.logdir + "/model.ckpt", step)

#-------------------------------------------------------------------------------
项目:cloud-volume    作者:seung-lab    | 项目源码 | 文件源码
def encode_npz(subvol):
    """
    This file format is unrelated to np.savez
    We are just saving as .npy and the compressing
    using zlib. 
    The .npy format contains metadata indicating
    shape and dtype, instead of np.tobytes which doesn't
    contain any metadata.
    """
    fileobj = io.BytesIO()
    if len(subvol.shape) == 3:
        subvol = np.expand_dims(subvol, 0)
    np.save(fileobj, subvol)
    cdz = zlib.compress(fileobj.getvalue())
    return cdz
项目:snake    作者:rhinech    | 项目源码 | 文件源码
def generate_data():
    """Generate grid of data for interpolation."""
    res = []
    for hopping in np.linspace(0.0, 0.12, GRID_SIZE):
        for mu in np.linspace(2.0, 3.0, GRID_SIZE):
            print(hopping, mu)
            res.append(np.concatenate([[hopping, mu], optimize(hopping, mu)]))
    res = np.array(res)
    np.save(r'data_%d' % GRID_SIZE, np.array(res))