Python progressbar 模块,ProgressBar() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用progressbar.ProgressBar()

项目:TAC-GAN    作者:dashayushman    | 项目源码 | 文件源码
def prepare_inception_data(o_dir, i_dir):
    if not os.path.exists(o_dir):
        os.makedirs(o_dir)
        cnt = 0
        bar = progressbar.ProgressBar(redirect_stdout=True,
                                      max_value=progressbar.UnknownLength)
        for root, subFolders, files in os.walk(i_dir):
            if files:
                for f in files:
                    if 'jpg' in f:
                        f_name = str(cnt) + '_ins.' + f.split('.')[-1]
                        cnt += 1
                        file_dir = os.path.join(root, f)
                        dest_path = os.path.join(o_dir, f)
                        dest_new_name = os.path.join(o_dir, f_name)
                        copy(file_dir, o_dir)
                        os.rename(dest_path, dest_new_name)
                        bar.update(cnt)
        bar.finish()
        print('Total number of files: {}'.format(cnt))
项目:TAC-GAN    作者:dashayushman    | 项目源码 | 文件源码
def load_images(o_dir, i_dir, n_images=3000, size=128):
    prepare_inception_data(o_dir, i_dir)
    image_list = []
    done = False
    cnt = 0
    bar = progressbar.ProgressBar(redirect_stdout=True,
                                  max_value=progressbar.UnknownLength)
    for root, dirs, files in os.walk(o_dir):
        if files:
            for f in files:
                cnt += 1
                file_dir = os.path.join(root, f)
                image_list.append(ip.load_image_inception(file_dir, 0))
                bar.update(cnt)
                if len(image_list) == n_images:
                    done = True
                    break
        if done:
            break
    bar.finish()
    print('Finished Loading Files')
    return image_list
项目:sound-machine    作者:rhelmot    | 项目源码 | 文件源码
def render(self, length=None, progress=False):
        """
        Render this signal into an numpy array of floats. Return the array.

        :param length:      The length to render, in seconds. Optional.
        :param progress:    Whether to show a progress bar for rendering
        """
        if progress and not progressbar:
            print('Install the progressbar module to see a progress bar for rendering')
            progress = False

        duration = self.duration if length is None else length * SAMPLE_RATE
        if duration == float('inf'):
            duration = 3*SAMPLE_RATE
        else:
            duration = int(duration)
        out = numpy.empty((duration, 1))

        pbar = progressbar.ProgressBar(widgets=['Rendering: ', progressbar.Percentage(), ' ', progressbar.Bar(), ' ', progressbar.ETA()], maxval=duration-1).start() if progress else None

        for i in range(duration):
            out[i] = self.amplitude(i)
            if pbar: pbar.update(i)
        if pbar: pbar.finish()
        return out
项目:glassdoor-analysis    作者:THEdavehogue    | 项目源码 | 文件源码
def multi_core_scrape(num_pages, db_coll):
    '''
    Map the API scrape across number of processors - 1 for performance boost.

    INPUT:
        num_pages: int, number of pages to scrape
        db_coll: pymongo collection object, collection to add documents to

    OUTPUT:
        None, records inserted into MongoDB
    '''
    cpus = cpu_count() - 1
    pool = Pool(processes=cpus)
    pages = range(1, num_pages + 1)
    employers = pool.map(scrape_api_page, pages)
    pool.close()
    pool.join()
    print 'Inserting Employer Records into MongoDB . . .'
    pbar = ProgressBar()
    for page in pbar(employers):
        db_coll.insert_many(page)
项目:keras-molecules    作者:maxhodak    | 项目源码 | 文件源码
def main():
    uri, outfile, dataset = get_arguments()
    fd = tempfile.NamedTemporaryFile()
    progress = ProgressBar(widgets=[Percentage(), ' ', Bar(), ' ', ETA(), ' ', FileTransferSpeed()])

    def update(count, blockSize, totalSize):
        if progress.maxval is None:
            progress.maxval = totalSize
            progress.start()
        progress.update(min(count * blockSize, totalSize))

    urllib.urlretrieve(uri, fd.name, reporthook = update)
    if dataset == 'zinc12':
        df = pandas.read_csv(fd.name, delimiter = '\t')
        df = df.rename(columns={'SMILES':'structure'})
        df.to_hdf(outfile, 'table', format = 'table', data_columns = True)
    elif dataset == 'chembl22':
        df = pandas.read_table(fd.name,compression='gzip')
        df = df.rename(columns={'canonical_smiles':'structure'})
        df.to_hdf(outfile, 'table', format = 'table', data_columns = True)
        pass
    else:
        df = pandas.read_csv(fd.name, delimiter = '\t')
        df.to_hdf(outfile, 'table', format = 'table', data_columns = True)
项目:Smelly-London    作者:Smelly-London    | 项目源码 | 文件源码
def main():

    start = timer()
    files = get_file_names()
    smell_results = []

    bar = progressbar.ProgressBar(max_value=len(files))
    processed_files = 0
    with concurrent.futures.ProcessPoolExecutor() as executor:
        for file, smell in zip(files, executor.map(worker, files)):
            smell_results = smell_results + smell
            processed_files += 1
            bar.update(processed_files)
    smell_results = [x for x in smell_results if x]

    end = timer()
    print(end - start)
    dataminer = SmellDataMine()
    dataminer.save_to_database(smell_results)
项目:Generative-ConvACs    作者:HUJI-Deep    | 项目源码 | 文件源码
def knn_masked_data(trX,trY,missing_data_dir, input_shape, k):

    raw_im_data = np.loadtxt(join(script_dir,missing_data_dir,'index.txt'),delimiter=' ',dtype=str)
    raw_mask_data = np.loadtxt(join(script_dir,missing_data_dir,'index_mask.txt'),delimiter=' ',dtype=str)
    # Using 'brute' method since we only want to do one query per classifier
    # so this will be quicker as it avoids overhead of creating a search tree
    knn_m = KNeighborsClassifier(algorithm='brute',n_neighbors=k)
    prob_Y_hat = np.zeros((raw_im_data.shape[0],int(np.max(trY)+1)))
    total_images = raw_im_data.shape[0]
    pbar = progressbar.ProgressBar(widgets=[progressbar.FormatLabel('\rProcessed %(value)d of %(max)d Images '), progressbar.Bar()], maxval=total_images, term_width=50).start()
    for i in range(total_images):
        mask_im=load_image(join(script_dir,missing_data_dir,raw_mask_data[i][0]), input_shape,1).reshape(np.prod(input_shape))
        mask = np.logical_not(mask_im > eps) # since mask is 1 at missing locations
        v_im=load_image(join(script_dir,missing_data_dir,raw_im_data[i][0]), input_shape, 255).reshape(np.prod(input_shape))
        rep_mask = np.tile(mask,(trX.shape[0],1))
        # Corrupt whole training set according to the current mask
        corr_trX = np.multiply(trX, rep_mask)        
        knn_m.fit(corr_trX, trY)
        prob_Y_hat[i,:] = knn_m.predict_proba(v_im.reshape(1,-1))
        pbar.update(i)
    pbar.finish()
    return prob_Y_hat
项目:jack    作者:uclmr    | 项目源码 | 文件源码
def preprocess(self, questions: List[QASetting],
                   answers: Optional[List[List[Answer]]] = None,
                   is_eval: bool = False) -> List[XQAAnnotation]:

        if answers is None:
            answers = [None] * len(questions)
        preprocessed = []
        if len(questions) > 1000:
            bar = progressbar.ProgressBar(
                max_value=len(questions),
                widgets=[' [', progressbar.Timer(), '] ', progressbar.Bar(), ' (', progressbar.ETA(), ') '])
            for q, a in bar(zip(questions, answers)):
                preprocessed.append(self.preprocess_instance(q, a))
        else:
            for q, a in zip(questions, answers):
                preprocessed.append(self.preprocess_instance(q, a))

        return preprocessed
项目:jack    作者:uclmr    | 项目源码 | 文件源码
def preprocess(self, questions: List[QASetting],
                   answers: Optional[List[List[Answer]]] = None,
                   is_eval: bool = False) -> List[MCAnnotation]:
        if answers is None:
            answers = [None] * len(questions)
        preprocessed = []
        if len(questions) > 1000:
            bar = progressbar.ProgressBar(
                max_value=len(questions),
                widgets=[' [', progressbar.Timer(), '] ', progressbar.Bar(), ' (', progressbar.ETA(), ') '])
            for i, (q, a) in bar(enumerate(zip(questions, answers))):
                preprocessed.append(self.preprocess_instance(i, q, a))
        else:
            for i, (q, a) in enumerate(zip(questions, answers)):
                preprocessed.append(self.preprocess_instance(i, q, a))

        return preprocessed
项目:blitznet    作者:dvornikita    | 项目源码 | 文件源码
def evaluate_network(self, ckpt):
        path = config.EVAL_DIR + '/Data/'
        self.filename = path + 'coco_%s_%s_%i.json' % (self.loader.split, args.run_name, ckpt)
        detections = []
        filenames = self.loader.get_filenames()

        bar = progressbar.ProgressBar()
        for i in bar(range(len(filenames))):
            img_id = filenames[i]
            detections.extend(self.process_image(img_id, i))
        with open(self.filename, 'w') as f:
            json.dump(detections, f)
        if args.segment:
            iou = self.compute_mean_iou()
        cocoEval = self.compute_ap()
        return self.compact_results(cocoEval.stats, ckpt)
项目:kripodb    作者:3D-e-Chem    | 项目源码 | 文件源码
def to_pairs(self, pairs):
        """Copies labels and scores from self to pairs matrix.

        Args:
            pairs (SimilarityMatrix):

        """
        six.print_('copy labels', flush=True)
        self.build_label_cache()
        pairs.labels.update(self.cache_l2i)

        six.print_('copy matrix to pairs', flush=True)
        limit = self.scores.shape[0]
        bar = ProgressBar()
        for query_id in bar(six.moves.range(0, limit)):
            subjects = self.scores[query_id, ...]
            filled_subjects_ids = subjects.nonzero()[0]
            filled_subjects = [(query_id, i, subjects[i]) for i in filled_subjects_ids if query_id < i]
            if filled_subjects:
                pairs.pairs.table.append(filled_subjects)
项目:imap2emlbackup    作者:Noneus    | 项目源码 | 文件源码
def collect_mailids(server):
    folders = server.list_folders()

    #construct progressbar
    progressbar_widgets = [
        '[Searching for mails on server] ',
        progressbar.Percentage(),
        progressbar.Bar(marker=progressbar.RotatingMarker()), ' ']
    progressbar_instance = progressbar.ProgressBar(widgets=progressbar_widgets, maxval=len(folders)).start()

    #collect all mailids for all folders
    folder_contents = {}
    folder_progress = 0
    for flags, delimiter, folder in folders:
        #read all mailids for the folder
        server.select_folder(folder, readonly=True)
        folder_contents[folder] = server.search()

        #update progrssbar
        folder_progress += 1
        progressbar_instance.update(folder_progress)

    progressbar_instance.finish()
    return folder_contents
项目:imap2emlbackup    作者:Noneus    | 项目源码 | 文件源码
def download(download_list, total_download_size):
    progressbar_widgets = [
        '[Downloading mails            ] ',
        progressbar.Percentage(),
        progressbar.Bar(marker=progressbar.RotatingMarker()), ' ',
        progressbar.ETA(), ' ',
        bitmath.integrations.BitmathFileTransferSpeed()]
    progressbar_instance = progressbar.ProgressBar(widgets=progressbar_widgets, maxval=int(total_download_size)).start()

    downloaded_size = bitmath.Byte(0)
    for folder, mails in download_list.items():
        server.select_folder(folder, readonly=True)
        for mailid, mailfilename, mailsize in mails:
            #make parent directory
            if not os.path.isdir(os.path.dirname(mailfilename)):
                os.makedirs(os.path.dirname(mailfilename))

            #download mail
            with open(mailfilename, 'wb') as mailfile:
                mailfile.write(server.fetch([mailid], ['RFC822'])[mailid][b'RFC822'])

            #update progressbar
            downloaded_size += mailsize
            progressbar_instance.update(int(downloaded_size))
    progressbar_instance.finish()
项目:gconv_experiments    作者:tscohen    | 项目源码 | 文件源码
def validate(test_data, test_labels, model, batchsize, silent, gpu):
    N_test = test_data.shape[0]
    pbar = ProgressBar(0, N_test)
    sum_accuracy = 0
    sum_loss = 0

    for i in range(0, N_test, batchsize):
        x_batch = test_data[i:i + batchsize]
        y_batch = test_labels[i:i + batchsize]

        if gpu >= 0:
            x_batch = cuda.to_gpu(x_batch.astype(np.float32))
            y_batch = cuda.to_gpu(y_batch.astype(np.int32))

        x = Variable(x_batch)
        t = Variable(y_batch)
        loss, acc = model(x, t, train=False)

        sum_loss += float(cuda.to_cpu(loss.data)) * y_batch.size
        sum_accuracy += float(cuda.to_cpu(acc.data)) * y_batch.size
        if not silent:
            pbar.update(i + y_batch.size)

    return sum_loss, sum_accuracy
项目:textobjdetection    作者:andfoy    | 项目源码 | 文件源码
def __filter_regions_by_class(self, regions):
        print("Filtering regions...")
        act_regions = []
        region_sub = {}
        bar = progressbar.ProgressBar()
        for region in bar(regions):
            try:
                reg_obj = self.region_objects[region.image.id][region.id]
                reg_obj = frozenset([x.lower()
                                     for x in reg_obj])
            except KeyError:
                reg_obj = frozenset({})

            if reg_obj in self.obj_idx:
                act_regions.append(region)
                if region.image.id not in region_sub:
                    region_sub[region.image.id] = {}
                reg_img = region_sub[region.image.id]
                global_region_img = self.region_objects[region.image.id]
                reg_img[region.id] = global_region_img[region.id]
        return act_regions, region_sub
项目:NANHM-for-GEC    作者:shinochin    | 项目源码 | 文件源码
def load_data(path):
    n_lines = count_lines(path)
    bar = progressbar.ProgressBar()
    train = []
    test = []
    print('loading...: %s' % path)
    with open(path) as f:
        i = 0
        for line in bar(f, max_value=n_lines):
            words = line.strip().split()
            if i < 1000:
                test.append(np.array(words))
                i+=1
            else:
                train.append(np.array(words))
    return train, test
项目:NANHM-for-GEC    作者:shinochin    | 项目源码 | 文件源码
def load_data(word_voc, char_voc, path):
    n_lines = count_lines(path)
    bar = progressbar.ProgressBar()
    data = []
    print('loading...: %s' % path)
    with open(path) as f:
        for line in bar(f, max_value=n_lines):
            words = line.strip().split()
            '''
            array = np.array([word_voc.get(w, UNK) for w in words], dtype=np.int32)
            unk_words = np.array(words)[array==UNK]
            unk_array = np.array([
                np.array([char_voc.get(c, UNK) for c in list(w)], dtype=np.int32)
                for w in unk_words])
            array = np.array([array, unk_array])
            if len(unk_array)!=0:
                print(array)
            '''
            data.append(np.array(words))
    return data
项目:fabric8-analytics-tagger    作者:fabric8-analytics    | 项目源码 | 文件源码
def progressbarize(iterable, progress=False):
    """Construct progressbar for loops if progressbar requested, otherwise return directly iterable.

    :param iterable: iterable to use
    :param progress: True if print progressbar
    """
    if progress:
        # The casting to list is due to possibly yielded value that prevents
        # ProgressBar to compute overall ETA
        return progressbar.ProgressBar(widgets=[
            progressbar.Timer(), ', ',
            progressbar.Percentage(), ', ',
            progressbar.SimpleProgress(), ', ',
            progressbar.ETA()
        ])(list(iterable))

    return iterable
项目:2FAssassin    作者:maxwellkoh    | 项目源码 | 文件源码
def bruteforce():
    import progressbar
    from time import sleep
    bar = progressbar.ProgressBar(maxval=60, \
        widgets=[progressbar.Bar('==', '[', ']'), ' ', progressbar.Percentage()])
    bar.start()
    for i in xrange(10):
        bar.update(i+1)
        sleep(0.05)
        wordlist = "/root/2fassassin/crack/wordlist/2fa-wordlist.txt"
        target = "/root/2fassassin/loot/*.pfx"
        sign = ""
        sign += "crackpkcs12 -v -b"
        sign += " "
        sign += target
        sign += "| tee crack.log"
        os.system(sign)
    bar.finish()
    sys.exit()
项目:2FAssassin    作者:maxwellkoh    | 项目源码 | 文件源码
def bruteforce():
    import progressbar
    from time import sleep
    bar = progressbar.ProgressBar(maxval=60, \
        widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])
    bar.start()
    for i in xrange(10):
        bar.update(i+1)
        sleep(0.05)
        wordlist = "/root/2fassassin/crack/wordlist/2fa-wordlist.txt"
        target = "/root/2fassassin/loot/*.pfx"
        sign = ""
        sign += "crackpkcs12 -v -b"
        sign += " "
        sign += target
        sign += "| tee crack.log"
        os.system(sign)
    bar.finish()
    sys.exit()
项目:ML-From-Scratch    作者:eriklindernoren    | 项目源码 | 文件源码
def __init__(self, n_estimators, learning_rate, min_samples_split,
                 min_impurity, max_depth, regression):
        self.n_estimators = n_estimators
        self.learning_rate = learning_rate
        self.min_samples_split = min_samples_split
        self.min_impurity = min_impurity
        self.max_depth = max_depth
        self.regression = regression
        self.bar = progressbar.ProgressBar(widgets=bar_widgets)

        # Square loss for regression
        # Log loss for classification
        self.loss = SquareLoss()
        if not self.regression:
            self.loss = CrossEntropy()

        # Initialize regression trees
        self.trees = []
        for _ in range(n_estimators):
            tree = RegressionTree(
                    min_samples_split=self.min_samples_split,
                    min_impurity=min_impurity,
                    max_depth=self.max_depth)
            self.trees.append(tree)
项目:ML-From-Scratch    作者:eriklindernoren    | 项目源码 | 文件源码
def __init__(self, n_estimators=100, max_features=None, min_samples_split=2,
                 min_gain=0, max_depth=float("inf")):
        self.n_estimators = n_estimators    # Number of trees
        self.max_features = max_features    # Maxmimum number of features per tree
        self.min_samples_split = min_samples_split
        self.min_gain = min_gain            # Minimum information gain req. to continue
        self.max_depth = max_depth          # Maximum depth for tree
        self.progressbar = progressbar.ProgressBar(widgets=bar_widgets)

        # Initialize decision trees
        self.trees = []
        for _ in range(n_estimators):
            self.trees.append(
                ClassificationTree(
                    min_samples_split=self.min_samples_split,
                    min_impurity=min_gain,
                    max_depth=self.max_depth))
项目:bookrat    作者:DexterLB    | 项目源码 | 文件源码
def precompute(db, dir):
    m = megatron.Megatron(db)
    m.database.drop_all()
    m.database.create_database()

    importer = import_book.BookImporter(m)
    progress = progressbar.ProgressBar()
    importer.import_from(dir, progress)

    counting_worker.run(m)

    tfidf = tf_idf.TFIDF(m)
    tfidf.compute_idf()

    tfidf.compute_tfidf()

    tfidf.compute_top_words()
项目:LoReAn    作者:lfaino    | 项目源码 | 文件源码
def assembly(overlap_length, percent_identity, threads, wd, verbose):
    """
    """
    manage = Manager()
    queue = manage.Queue()
    pool = Pool(processes=int(threads), maxtasksperchild=10)

    new_commands = []
    for root, dirs, file in os.walk(wd):
        for fasta_file in file:
            complete_data = (fasta_file, percent_identity, overlap_length, wd, verbose, queue)
            new_commands.append(complete_data)
    results = pool.map_async(iAssembler, new_commands)
    with progressbar.ProgressBar(max_value=len(new_commands)) as bar:
        while not results.ready():
            size = queue.qsize()
            bar.update(size)
            time.sleep(1)
项目:koko    作者:biggorilla-gh    | 项目源码 | 文件源码
def __load_embeding_model(self, file_path, max_vocab_size=100000):
        self.__embed_vectors = dict()
        if not file_path:
            print('Embeddings file not provided')
            return
        if not os.path.exists(file_path):
            print('Embeddings file not found:', file_path)
            return

        print('Loading the embedding model from:', file_path)
        bar = progressbar.ProgressBar(max_value=max_vocab_size)
        with open(file_path, "r") as embed_f:
            for line in embed_f:
                try:
                    tab = line.rstrip().split()
                    word = tab[0].lower()
                    if not word in self.__embed_vectors:
                        vec = numpy.array(tab[1:], dtype=float)
                        self.__embed_vectors[word] = vec
                except ValueError:
                    continue
                bar.update(len(self.__embed_vectors))
                if len(self.__embed_vectors) == max_vocab_size:
                    bar.finish()
                    return
项目:Steghide-Brute-Force-Tool    作者:Va5c0    | 项目源码 | 文件源码
def Steg_brute(ifile, dicc):
    i = 0
    ofile = ifile.split('.')[0] + "_flag.txt"
    nlines = len(open(dicc).readlines())
    with open(dicc, 'r') as passFile:
        pbar = ProgressBar(widgets=[Percentage(), Bar()], maxval=nlines).start()
        for line in passFile.readlines():
            password = line.strip('\n')
            r = commands.getoutput("steghide extract -sf %s -p '%s' -xf %s" % (ifile, password, ofile))
            if not "no pude extraer" in r and not "could not extract" in r:
                print(color.GREEN + "\n\n " + r + color.ENDC)
                print("\n\n [+] " + color.INFO + "Information obtained with password:" + color.GREEN + " %s\n" % password + color.ENDC)
                if check_file(ofile):
                    with open(ofile, 'r') as outfile:
                        for line in outfile.readlines():
                            print(line)
                break
            pbar.update(i + 1)
            i += 1
项目:mmd    作者:dougalsutherland    | 项目源码 | 文件源码
def emit(self, record):
        import progressbar as pb

        msg = json.loads(record.msg)
        # print(msg)
        if msg[0] == 'SET':
            pass
            self.pbar.update(msg[1])
        elif msg[0] == 'START':
            print(msg[1] + ':', file=sys.stderr)
            self.pbar = pb.ProgressBar(maxval=msg[2], **self.pbar_args)
            self.pbar.start()
        elif msg[0] == 'DONE':
            self.pbar.finish()
            del self.pbar
            print('', file=sys.stderr)
项目:esper    作者:scanner-research    | 项目源码 | 文件源码
def progress_bar(n):
    import progressbar
    return progressbar.ProgressBar(
        max_value=n,
        widgets=[
            progressxbar.Percentage(),
            ' ',
            '(',
            progressbar.SimpleProgress(),
            ')',
            ' ',
            progressbar.Bar(),
            ' ',
            progressbar.AdaptiveETA(),
        ])


# http://code.activestate.com/recipes/577058/
项目:nengo_dl    作者:nengo    | 项目源码 | 文件源码
def sub(self, msg=None, **kwargs):
        """Creates a new progress bar for tracking a sub-process.

        Parameters
        ----------
        msg : str, optional
            Description of sub-process
        """

        if self.sub_bar is not None and self.sub_bar.finished is False:
            self.sub_bar.finish()

        self.sub_bar = ProgressBar(
            present="%s: %s" % (self.present, msg) if msg else self.present,
            **kwargs)
        self.sub_bar.finish = partial(self.sub_bar.finish, end="\r")

        return self.sub_bar
项目:zabbix-scripts    作者:globocom    | 项目源码 | 文件源码
def deleteHostsByHostgroup(groupname):
    hostgroup = zapi.hostgroup.get(output=['groupid'],filter={'name': groupname})
    if hostgroup.__len__() != 1:
        logger.error('Hostgroup not found: %s\n\tFound this: %s' % (groupname,hostgroup))
    groupid = int(hostgroup[0]['groupid'])
    hosts = zapi.host.get(output=['name','hostid'],groupids=groupid)
    total = len(hosts)
    logger.info('Hosts found: %d' % (total))
    if ( args.run ):
        x = 0
        bar = ProgressBar(maxval=total,widgets=[Percentage(), ReverseBar(), ETA(), RotatingMarker(), Timer()]).start()
        logger.echo = False
        for host in hosts:
            x = x + 1
            bar.update(x)
            logger.debug('(%d/%d) >> Removing >> %s' % (x, total, host))
            out = zapi.globo.deleteMonitors(host['name'])
        bar.finish()
        logger.echo = True
    else:
        logger.info('No host removed due to --no-run arg. Full list of hosts:')
        for host in hosts:
            logger.info('%s' % host['name'])
    return
项目:zabbix-scripts    作者:globocom    | 项目源码 | 文件源码
def hosts_disable_all():
  """
  status de host 0 = enabled
  status de host 1 = disabled
  """
  logger.info('Disabling all hosts, in blocks of 1000')
  hosts = zapi.host.get(output=[ 'hostid' ], search={ 'status': 0 })
  maxval = int(ceil(hosts.__len__())/1000+1)
  bar = ProgressBar(maxval=maxval,widgets=[Percentage(), ReverseBar(), ETA(), RotatingMarker(), Timer()]).start()
  i = 0
  for i in xrange(maxval):
    block = hosts[:1000]
    del hosts[:1000]
    result = zapi.host.massupdate(hosts=[ x for x in block ], status=1)
    i += 1
    bar.update(i)
  bar.finish()
  logger.info('Done')
  return
项目:zabbix-scripts    作者:globocom    | 项目源码 | 文件源码
def proxy_passive_to_active():
  """
  status de prxy 5 = active
  status de prxy 6 = passive
  """
  logger.info('Change all proxys to active')
  proxys = zapi.proxy.get(output=[ 'shorten', 'host' ],
    filter={ 'status': 6 })
  if ( proxys.__len__() == 0 ):
    logger.info('Done')
    return
  bar = ProgressBar(maxval=proxys.__len__(),widgets=[Percentage(), ReverseBar(), ETA(), RotatingMarker(), Timer()]).start()
  i = 0
  for x in proxys:
    i += 1
    proxyid = x['proxyid']
    result = zapi.proxy.update(proxyid=proxyid, status=5)
    logger.echo = False
    logger.debug('Changed from passive to active proxy: %s' % (x['host']))
    bar.update(i)
  bar.finish()
  logger.echo = True
  logger.info('Done')
  return
项目:pyIceCat    作者:moonlitesolutions    | 项目源码 | 文件源码
def _parse(self, xml_file):
        self.xml_file = xml_file
        self.key_count = 0

        if not self.suppliers:
            self.suppliers = IceCatSupplierMapping(log=self.log, auth=self.auth, data_dir=self.data_dir)
        if not self.categories:
            self.categories = IceCatCategoryMapping(log=self.log, data_dir=self.data_dir, auth=self.auth)


        print("Parsing products from index file:", xml_file)
        with progressbar.ProgressBar(max_value=progressbar.UnknownLength) as self.bar:
            with open(self.xml_file, 'rb') as f:
                self.o = xmltodict.parse(f, attr_prefix='', postprocessor=self._postprocessor,
                    namespace_separator='', process_namespaces=True, namespaces=self._namespaces)
            f.closed

            # peel down to file key
            self.o = self.o['icecat-interface']['files.index']['file']
            self.log.info("Parsed {} products from IceCat catalog".format(str(len(self.o))))
        return len(self.o)
项目:cdm    作者:riptano    | 项目源码 | 文件源码
def load(self, table):
        cache = {}

        def save(row):
            (query, values) = self.get_insert(row, table)
            try:
                prepared = cache[query]
            except:
                prepared = self.session.prepare(query)
                cache[query] = prepared
            bound = prepared.bind(values)
            self.session.execute(bound)

        pool = Pool(100)
        i = 0
        print "Loading {}".format(table)
        with ProgressBar(max_value=len(self.dataframe)) as p:
            for _ in pool.imap_unordered(save, self.iter()):
                i += 1
                if i % 10 == 0:
                    p.update(i)
项目:VAE_tensorflow    作者:int8    | 项目源码 | 文件源码
def train(self, epochs, batch_size, learning_rate, save_to=None):

        self.train_step = pt.apply_optimizer(tf.train.AdamOptimizer(learning_rate, epsilon=1), losses = [self.error_function])
        init = tf.initialize_all_variables()
        self.sess.run(init)
        pbar = ProgressBar(widgets=[Percentage(), Bar()], maxval=epochs).start()
        while self.get_epoch() < epochs:
            input_data = self.hdf5reader.next()
            _, loss_value = self.sess.run(
                [self.train_step, self.error_function],
                {
                    self.encoder.input_data: input_data
                }
            )
            pbar.update(self.get_epoch())
        pbar.finish()
项目:VAE_tensorflow    作者:int8    | 项目源码 | 文件源码
def images_to_hdf5(dir_path, output_hdf5, size = (112,112), channels = 3, resize_to = None):
    files = sorted(os.listdir(dir_path))
    nr_of_images = len(files)
    if resize_to:
        size = resize_to
    i = 0
    pbar = ProgressBar(widgets=[Percentage(), Bar()], maxval=nr_of_images).start()
    data = np.empty(shape=(nr_of_images, size[0], size[1], channels), dtype=np.uint8)
    for f in files:
        datum = imread(dir_path + '/' + f)
        if resize_to:
            datum = np.asarray(Image.fromarray((datum), 'RGB').resize((size[0],size[1]), PIL.Image.ANTIALIAS))
        data[i,:,:,:] = datum
        i = i + 1
        pbar.update(i)
    pbar.finish()
    with h5py.File(output_hdf5, 'w') as hf:
        hf.create_dataset('data', data=data)
项目:pyroSAR    作者:johntruckenbrodt    | 项目源码 | 文件源码
def identify_many(scenes):
    """
    return metadata handlers of all valid scenes in a list, similar to function identify
    prints a progressbar
    :param scenes: a list of file names
    :return: a list of pyroSAR metadata handlers
    """
    idlist = []
    pbar = pb.ProgressBar(maxval=len(scenes)).start()
    for i, scene in enumerate(scenes):
        if isinstance(scene, ID):
            idlist.append(scene)
        else:
            try:
                id = identify(scene)
                idlist.append(id)
            except IOError:
                continue
        pbar.update(i + 1)
    pbar.finish()
    return idlist
项目:deepjets    作者:deepjets    | 项目源码 | 文件源码
def test(location):
    """Test with a single bar.

    Input: location - tuple (x, y) defining the position on the
                      screen of the progress bar
    """
    # fd is an object that has a .write() method
    writer = Writer(location)
    pbar = ProgressBar(fd=writer)
    # progressbar usage
    pbar.start()
    for i in range(100):
        # do stuff
        # time taken for process is function of line number
        # t_wait = location[1] / 100
        # time take is random
        t_wait = random.random() / 50
        time.sleep(t_wait)
        # update calls the write method
        pbar.update(i)

    pbar.finish()
项目:deepjets    作者:deepjets    | 项目源码 | 文件源码
def test_bars(locations):
    """Test with multiple bars.

    Input: locations - a list of location (x, y) tuples
    """
    writers = [Writer(loc) for loc in locations]
    pbars = [ProgressBar(fd=writer) for writer in writers]
    for pbar in pbars:
        pbar.start()

    for i in range(100):
        time.sleep(0.01)
        for pbar in pbars:
            pbar.update(i)

    for pbar in pbars:
        pbar.finish()
项目:peda-arm    作者:alset0326    | 项目源码 | 文件源码
def do():
    system_calls = {}
    p = progressbar.ProgressBar(maxval=500).start()
    for index, line in enumerate(open('unistd.h', 'r').readlines()):
        # print repr(line.strip())
        try:
            if '(' in line:
                p.update(index)
                num = re.search('\((.*)\)', line).group(1)
                num = int(eval(num))
                func_name = line.split('(')[0].strip()
                system_calls[num] = [func_name]
                system_calls[num].extend(get_system_call(func_name))
                # print system_calls[num][1]
        except Exception as e:
            print index, line, e.message

    p.finish()
    open('system_calls', 'w').write(zlib.compress(pickle.dumps(system_calls)))
项目:IBRel    作者:lasigeBioTM    | 项目源码 | 文件源码
def load_corpus(self, corenlpserver, process=True):
        """
        Use the PubMed web services to retrieve the title and abstract of each PMID
        :param corenlpserver:
        :param process:
        :return:
        """
        time_per_abs = []
        widgets = [pb.Percentage(), ' ', pb.Bar(), ' ', pb.AdaptiveETA(), ' ', pb.Timer()]
        pbar = pb.ProgressBar(widgets=widgets, maxval=len(self.pmids), redirect_stdout=True).start()
        for i, pmid in enumerate(self.pmids):
            t = time.time()
            newdoc = PubmedDocument(pmid)
            if newdoc.abstract == "":
                logging.info("ignored {} due to the fact that no abstract was found".format(pmid))
                continue
            newdoc.process_document(corenlpserver, "biomedical")
            self.documents["PMID" + pmid] = newdoc
            abs_time = time.time() - t
            time_per_abs.append(abs_time)
            pbar.update(i+1)
        pbar.finish()
        abs_avg = sum(time_per_abs)*1.0/len(time_per_abs)
        logging.info("average time per abstract: %ss" % abs_avg)
项目:aws-s3-bruteforce    作者:Ucnt    | 项目源码 | 文件源码
def run_random_search(search):
    #Create progressbar to show how many searches have been done, removing eta
    search.progressbar = ProgressBar(1)
    search.progressbar.fmt = '''%(percent)3d%% %(bar)s %(current)s/%(total_items)s   %(items_per_sec)s   Run time: %(run_time)s'''

    buckets_found = get_buckets_found(search.output_file)


    #Get all public butets that have been found so far
    search.buckets_found = get_buckets_found(search.output_file)
    #Create a string generator
    search.string_generator = createStringGenerator(search)

    my_queue = Queue.Queue()
    for i in range(search.threads):
        t = threading.Thread(target=search_instance, args=(search, ))
        my_queue.put(t)

    #Run all of the threads
    while not my_queue.empty():
        my_queue.get().start()
项目:Cortex-Analyzers    作者:CERT-BDF    | 项目源码 | 文件源码
def run(path, quiet=False):
    """
    Downloads all available hash files to a given path.

    :param path: Path to download directory
    :param quiet: If set to True, no progressbar is displayed
    """
    if os.path.isdir(path):
        session = requests.Session()
        session.headers = {'User-agent': 'Mozilla/5.0 Chrome/57.0.2987.110'}
        max_num = max(list(map(int, re.sub(r'[\<\>]',
                                           '',
                                           '\n'.join(re.findall(r'\>[1-9][0-9]{2}\<',
                                                                session.get('https://virusshare.com/hashes.4n6').text
                                                                )
                                                     )
                                           ).split('\n')
                               )
                           )
                      )
        if not quiet:
            p = progressbar.ProgressBar(max_value=max_num)
        for i in range(max_num):
            filename = str(i).zfill(3) + '.md5'
            if os.path.exists(os.path.join(path, filename)):
                continue
            if not quiet:
                p.update(i)
            url = URL + filename
            head = session.head(url)
            if head.status_code == 200:
                body = session.get(url, stream=True)
                with io.open(os.path.join(path, str(i).zfill(3) + '.md5'), mode='wb') as afile:
                    for chunk in body.iter_content(chunk_size=1024):
                        afile.write(b'' + chunk)
                body.close()
    else:
        print('Given path is not a directory.')
        sys.exit(1)
项目:Farm-server    作者:MakersLab    | 项目源码 | 文件源码
def main():
    print('Looking for latest release')
    response = requests.get(RELEASES_URL)
    if(response.ok):
        release = json.loads(response.text)
        print('Found latest release with version {0}'.format(release['tag_name']))
        if(len(release['assets']) > 0):
            downloadableAssetIndex = -1
            for index,asset in enumerate(release['assets']):
                if(asset['name'][0:5] == 'build' and downloadableAssetIndex == -1):
                    downloadableAssetIndex = index
            if(downloadableAssetIndex == -1):
                print('Could not find downloadable release build, aborting')
            else:
                print('Found downloadable build with name {0}'.format(release['assets'][downloadableAssetIndex]['name']))
                print('Downloading latest client release with version {0}'.format(release['tag_name']))
                buildDownloadUrl = release['assets'][downloadableAssetIndex]['browser_download_url']
                buildFileName = release['assets'][downloadableAssetIndex]['name']
                r = requests.get(buildDownloadUrl, stream=True)
                # bar = progressbar.ProgressBar(max_value=len(r.content))
                with progressbar.ProgressBar(max_value=len(r.content)) as bar:
                    deleteContentsOfFolder(DOWNLOAD_FOLDER)
                    with open(join(DOWNLOAD_FOLDER,buildFileName), 'wb') as file:
                        for chunk in r.iter_content(chunk_size=1024):
                            bar.update(len(chunk))
                            file.write(chunk)
                print('Download finished')
                deleteContentsOfFolder(DESTINATION_FOLDER)

                with zipfile.ZipFile(join(DOWNLOAD_FOLDER,buildFileName), 'r') as zip:
                    print('Extracting downloaded file into {0}'.format(DESTINATION_FOLDER))
                    zip.extractall(DESTINATION_FOLDER)
                print('Finished')
                return True
    else:
        print('Could not get info about latest release')
        return False
项目:dnsbrute    作者:XiphosResearch    | 项目源码 | 文件源码
def run(args):
    if args.download:
        resolvers = download_resolvers()
    else:
        resolvers = load_resolvers(args.resolvers)
    random.shuffle(resolvers)

    pool = gevent.pool.Pool(args.concurrency)

    bar = progressbar.ProgressBar(redirect_stdout=True, redirect_stderr=True)
    for resolver in bar(resolvers):
        pool.add(gevent.spawn(check_resolver, args, resolver))
    pool.join()
项目:dnsbrute    作者:XiphosResearch    | 项目源码 | 文件源码
def __init__(self, options):
        self.wildcards = []
        self.options = options
        self.domains = []
        if options.domains:
            self.domains += filter(None, options.domains.read().split("\n"))
        self.domains += options.domain
        self.domains = list(set(self.domains))
        random.shuffle(self.domains)
        self.resolvers = map(str.strip, filter(None, options.resolvers.read().split("\n")))
        random.shuffle(self.resolvers)
        self.names = [X for X in self._load_names(options.names)]
        if options.progress:
            self.progress = progressbar.ProgressBar(
                redirect_stdout=True,
                redirect_stderr=True,
                widgets=[
                    progressbar.Percentage(),
                    progressbar.Bar(),
                    ' (', progressbar.ETA(), ') ',
                ])
        else:
            self.progress = None
        self.finished = 0
        LOG.info("%d names, %d resolvers, %d domains",
                 len(self.names), len(self.resolvers), len(self.domains))
项目:SnapStitch    作者:avikj    | 项目源码 | 文件源码
def compute_embeddings(images):
  """Runs inference on an image.

  Args:
    image: Image file names.

  Returns:
    Dict mapping image file name to embedding.
  """

  # Creates graph from saved GraphDef.
  create_graph()
  filename_to_emb = {}
  config = tf.ConfigProto(device_count = {'GPU': 0})
  bar = progressbar.ProgressBar(widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])
  with tf.Session(config=config) as sess:
    i = 0
    for image in bar(images):
      if not tf.gfile.Exists(image):
        tf.logging.fatal('File does not exist %s', image) 
      image_data = tf.gfile.FastGFile(image, 'rb').read()
      # Some useful tensors:
      # 'softmax:0': A tensor containing the normalized prediction across
      #   1000 labels.
      # 'pool_3:0': A tensor containing the next-to-last layer containing 2048
      #   float description of the image.
      # 'DecodeJpeg/contents:0': A tensor containing a string providing JPEG
      #   encoding of the image.
      # Runs the softmax tensor by feeding the image_data as input to the graph.
      softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')
      embedding_tensor = sess.graph.get_tensor_by_name('pool_3:0')
      embedding = sess.run(embedding_tensor,
                             {'DecodeJpeg/contents:0': image_data})
      filename_to_emb[image] = embedding.reshape(2048)
      i += 1
      # print(image, i, len(images))
  return filename_to_emb

# temp_dir is a subdir of temp
项目:SnapStitch    作者:avikj    | 项目源码 | 文件源码
def main(project_id, video_basename, sampling_rate=3):
    # os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'  # or any {'0', '1', '2'}
    video_name = video_basename[:video_basename.index('.')]
    # extract video frames
    extracted_frame_dir = os.path.join('temp', project_id, video_name, 'frames')
    mkdir_p(extracted_frame_dir)
    if not os.path.isdir(extracted_frame_dir):
        os.mkdir(extracted_frame_dir)
    video_path = os.path.join('videos', project_id, video_basename)
    vidcap = cv2.VideoCapture(video_path)
    print('Extracting video frames...')
    bar = progressbar.ProgressBar(maxval=101, widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])
    bar.start()
    fps = vidcap.get(CV_CAP_PROP_FPS)# TODO
    fps = fps if fps != float('nan') else 25
    print 'actual fps', fps, 'sampling rate', sampling_rate
    success, image = vidcap.read()
    frames_to_extract = range(0, int(vidcap.get(CV_CAP_PROP_FRAME_COUNT)), int(round(fps / sampling_rate)))
    frame_count = len(frames_to_extract)
    for frame_pos in bar(frames_to_extract):
        vidcap.set(CV_CAP_PROP_POS_FRAMES, frame_pos)
        success, image = vidcap.read()
        # print('Read a new frame: %f ms'% vidcap.get(CV_CAP_PROP_POS_MSEC), success)
        cv2.imwrite(os.path.join(extracted_frame_dir, "%09d.jpg" % vidcap.get(CV_CAP_PROP_POS_MSEC)), image) # TODO (might still work)

    bar.finish()
项目:chakin    作者:chakki-works    | 项目源码 | 文件源码
def download(number, save_dir='./'):
    """Download pre-trained word vector
    :param number: integer, default ``None``
    :param save_dir: str, default './'
    :return: file path for downloaded file
    """
    df = load_datasets()

    row = df.iloc[[number]]
    url = ''.join(row.URL)
    if not url:
        print('The word vector you specified was not found. Please specify correct name.')

    widgets = ['Test: ', Percentage(), ' ', Bar(marker=RotatingMarker()), ' ', ETA(), ' ', FileTransferSpeed()]
    pbar = ProgressBar(widgets=widgets)

    def dlProgress(count, blockSize, totalSize):
        if pbar.max_value is None:
            pbar.max_value = totalSize
            pbar.start()

        pbar.update(min(count * blockSize, totalSize))

    file_name = url.split('/')[-1]
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    save_path = os.path.join(save_dir, file_name)
    path, _ = urlretrieve(url, save_path, reporthook=dlProgress)
    pbar.finish()
    return path
项目:downpour    作者:openstack    | 项目源码 | 文件源码
def __enter__(self):
        self.bar = progressbar.ProgressBar(
            widgets=[
                progressbar.Percentage(),
                ' ',
                progressbar.Bar(),
                progressbar.FileTransferSpeed(),
                ' ',
                progressbar.ETA(),
            ],
            max_value=self.max_value,
        )
        self.fd = open(self.output_path, 'wb')
        return self