Python click 模块,progressbar() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用click.progressbar()

项目:millilauncher    作者:fhfuih    | 项目源码 | 文件源码
def download(url, name, path):
    print('Downloading: {0}'.format(name))
    r = requests.get(url, stream=True)
    if r.status_code != requests.codes.ok:
        logging.log(level=logging.ERROR, msg='Unable to connect {0}'.format(url))
        r.raise_for_status()
    total_size = int(r.headers.get('Content-Length'))
    dir_name = os.path.dirname(path)
    temp_name = path + '.temp'
    if not os.path.exists(dir_name):
        os.makedirs(dir_name)
    if os.path.exists(temp_name):
        os.remove(temp_name)
    with click.progressbar(r.iter_content(1024), length=total_size) as bar, open(temp_name, 'wb') as file:
        for chunk in bar:
            file.write(chunk)
            bar.update(len(chunk))
    os.rename(temp_name, path)
项目:SIDR    作者:damurdock    | 项目源码 | 文件源码
def readFasta(fastaFile):
    """
    Reads a FASTA file and parses contigs for GC content.

    Args:
        fastaFile: The path to the FASTA file.
    Returns:
        contigs A dictionary mapping contigIDs to sidr.common.Contig objects with GC content as a variable.
    """
    contigs = []
    if ".gz" in fastaFile: # should support .fa.gz files in a seamless (if slow) way
        openFunc = gzip.open
    else:
        openFunc = open
    with openFunc(fastaFile) as data:
        click.echo("Reading %s" % fastaFile)
        with click.progressbar(FastaIterator(data)) as fi:
            for record in fi:  # TODO: conditional formatting
                contigs.append(common.Contig(record.id.split(' ')[0], variables={"GC": GC(record.seq)}))
    if len(contigs) != len(set([x.contigid for x in contigs])): # exit if duplicate contigs, https://stackoverflow.com/questions/5278122/checking-if-all-elements-in-a-list-are-unique
        raise ValueError("Input FASTA contains duplicate contigIDs, exiting")
    return dict((x.contigid, x) for x in contigs)  # https://stackoverflow.com/questions/3070242/reduce-python-list-of-objects-to-dict-object-id-object
项目:SIDR    作者:damurdock    | 项目源码 | 文件源码
def readBAM(BAMFile, contigs):
    """
    Parses an aligned BAM file for coverage.

    Args:
        BAMFile: The BAM file to parse.
        contigs: List of sidr.common.Contigs taken from input FASTA.
    Returns:
        contigs: Input contigs updated with coverage, measured as an
                 average over the whole contig.
    """
    alignment = pysam.AlignmentFile(BAMFile, "rb")
    click.echo("Reading BAM file")
    with click.progressbar(contigs) as ci:
        for contig in ci:
            covArray = [] # coverage over contig = sum(coverage per base)/number of bases
            for pile in alignment.pileup(region=str(contig)):
                covArray.append(pile.nsegments)
            try:
                contigs[contig].variables["Coverage"] = (sum(covArray) / len(covArray))
            except ZeroDivisionError: # should only occur if 0 coverage recorded
                contigs[contig].variables["Coverage"] = 0
    return contigs
项目:cget    作者:pfultz2    | 项目源码 | 文件源码
def download_to(url, download_dir, insecure=False):
    name = url.split('/')[-1]
    file = os.path.join(download_dir, name)
    click.echo("Downloading {0}".format(url))
    bar_len = 1000
    with click.progressbar(length=bar_len, width=70) as bar:
        def hook(count, block_size, total_size):
            percent = int(count*block_size*bar_len/total_size)
            if percent > 0 and percent < bar_len: 
                # Hack because we can't set the position
                bar.pos = percent
                bar.update(0)
        context = None
        if insecure: context = ssl._create_unverified_context()
        CGetURLOpener(context=context).retrieve(url, filename=file, reporthook=hook, data=None)
        bar.update(bar_len)
    if not os.path.exists(file):
        raise BuildError("Download failed for: {0}".format(url))
    return file
项目:Sentences-analysis    作者:sungminoh    | 项目源码 | 文件源码
def retrieve_full_sentence(lda_result_dic, fname):
    f = open(fname, 'w')
    csv_writer = csv.writer(f, delimiter=',')
    with click.progressbar(lda_result_dic.keys(), label='retrieving sentences') as bar:
        for i in bar:
            db = mdb.connect(**mysql_info)
            cur = db.cursor()
            for post_id, sentence_seq, categories in lda_result_dic[i]:
                query = """
                        select full_text from sentences where post_id = %s and sentence_seq = %s
                        """ % (post_id, sentence_seq)
                cur.execute(query)
                text = cur.fetchall()[0][0].encode('utf-8')
                csv_line = [i, post_id, sentence_seq,
                            ' '.join(categories),
                            text]
                csv_writer.writerow(csv_line)
            db.close()
    f.close()
项目:readquant    作者:Teichlab    | 项目源码 | 文件源码
def _requests_piecewise(self, handle):

        request_url = self.biomart_base + self.encoded_request()
        print("Making request to biomart...")
        response = requests.get(self.biomart_base + str(self), stream=True)
        if not response.ok:
            print("Request failed: %d" % response.status_code)
            return
        print("Response okay. Downloading...")
        total_length = response.headers.get('content-length')
        total_downloaded = 0
        if total_length is None:
            for block in response.iter_content(1024):
                handle.write(block)
                total_downloaded += 1024
                # stdscr.addstr('%d MB Downloaded' %
                #                  (total_downloaded / 1e6))
                # stdscr.refresh()
        else:
            with click.progressbar(length=int(total_length),
                                   label="Downloading reference") as bar:
                for block in response.iter_content(1024):
                    handle.write(block)
                    bar.update(1024)
项目:k2mosaic    作者:KeplerGO    | 项目源码 | 文件源码
def k2mosaic_mosaic_one(cadenceno, tpf_filenames, campaign, channel,
                        output_prefix='k2mosaic-c', progressbar=False, verbose=False):
    from .mosaic import KeplerChannelMosaic
    output_fn = "{}{:02d}-ch{:02d}-cad{}.fits".format(output_prefix, campaign, channel, cadenceno)
    if verbose:
        click.echo("\nStarted writing {}".format(output_fn))
    mosaic = KeplerChannelMosaic(campaign=campaign, channel=channel, cadenceno=cadenceno)
    if progressbar:
        with click.progressbar(tpf_filenames, label='Reading TPFs', show_pos=True) as bar:
            [mosaic.add_tpf(tpf) for tpf in bar]
    else:
        [mosaic.add_tpf(tpf) for tpf in tpf_filenames]
    mosaic.add_wcs()
    mosaic.writeto(output_fn)
    if verbose:
        click.secho('Finished writing {}'.format(output_fn), fg='green')
项目:rasberrypi-trapcamera    作者:Scifabric    | 项目源码 | 文件源码
def upload_task_pending(config):
    """Upload and create tasks for pending photos."""
    if connected_to_internet():
        img_files = [os.path.join(config.data, f)
                     for f in os.listdir(config.data)
                     if os.path.isfile(os.path.join(config.data, f))]
        if len(img_files) > 0:
            with click.progressbar(img_files,
                                   label="Uploading and creating task for  \
                                   pending images",
                                   ) as bar:
                for f in bar:
                    rsp = _upload_photo(config, f)
                    _create_task(config, rsp['photo_id'])
        else:
            msg = "WARNING: No files to upload. Images folder is empty."
            click.secho(msg, fg='yellow')
项目:client    作者:wandb    | 项目源码 | 文件源码
def pull(project, run, kind, entity):
    project, run = api.parse_slug(run, project=project)

    urls = api.download_urls(project, run=run, entity=entity)
    if len(urls) == 0:
        raise ClickException("Run has no files")
    click.echo("Downloading: {project}/{run}".format(
        project=click.style(project, bold=True), run=run
    ))

    for name in urls:
        if api.file_current(name, urls[name]['md5']):
            click.echo("File %s is up to date" % name)
        else:
            length, response = api.download_file(urls[name]['url'])
            with click.progressbar(length=length, label='File %s' % name,
                                   fill_char=click.style('&', fg='green')) as bar:
                with open(name, "wb") as f:
                    for data in response.iter_content(chunk_size=4096):
                        f.write(data)
                        bar.update(len(data))
项目:openag_python    作者:OpenAgInitiative    | 项目源码 | 文件源码
def load_fixture(fixture_file):
    """
    Populate the database from a JSON file. Reads the JSON file FIXTURE_FILE
    and uses it to populate the database. Fuxture files should consist of a
    dictionary mapping database names to arrays of objects to store in those
    databases.
    """
    utils.check_for_local_server()
    local_url = config["local_server"]["url"]
    server = Server(local_url)
    fixture = json.load(fixture_file)
    for db_name, _items in fixture.items():
        db = server[db_name]
        with click.progressbar(
            _items, label=db_name, length=len(_items)
        ) as items:
            for item in items:
                item_id = item["_id"]
                if item_id in db:
                    old_item = db[item_id]
                    item["_rev"] = old_item["_rev"]
                    if item == old_item:
                        continue
                db[item_id] = item
项目:pydead    作者:srgypetrov    | 项目源码 | 文件源码
def parse_files(basedir, paths):
    init_imports, defined, used = {}, {}, set()
    with click.progressbar(paths) as bar:
        for path in bar:
            pyfile = PyFile(basedir, path)
            pyfile.parse()
            used.update(pyfile.used)
            for name, items in pyfile.defined.items():
                defined.setdefault(name, []).extend(items)
            if path.endswith('__init__.py'):
                module = pyfile.dot_path.rstrip('.__init__')
                for item_name, item in pyfile.ast_imported.items():
                    key = '.'.join((module, item_name))
                    if key != item:
                        init_imports[key] = item
    return init_imports, defined, used
项目:jenskipper    作者:Stupeflix    | 项目源码 | 文件源码
def import_(jenkins_url, dest_dir):
    '''
    Import jobs from JENKINS_URL into DEST_DIR.
    '''
    if op.exists(dest_dir):
        utils.sechowrap('Destination dir "%s" already exists' % dest_dir,
                        fg='red', bold=True)
        sys.exit(1)
    jobs_names, jenkins_url = jenkins_api.handle_auth(dest_dir,
                                                      jenkins_api.list_jobs,
                                                      jenkins_url)
    with click.progressbar(jobs_names, label='Importing jobs') as bar:
        pipes_bits, jobs_templates = write_jobs_templates(dest_dir,
                                                          jenkins_url,
                                                          bar)
    write_jobs_defs(dest_dir, jobs_templates, 'w')
    write_pipelines(dest_dir, pipes_bits, 'w')
    _write_default_contexts(dest_dir)
    _write_conf(dest_dir, jenkins_url)
    utils.print_jobs_list('Imported jobs:', jobs_names, fg='green')
项目:stream2segment    作者:rizac    | 项目源码 | 文件源码
def get_progressbar(show, **kw):
    """Returns a `click.progressbar` if `show` is True, otherwise a No-op class, so that we can
    run programs from code (do not print progress) and from terminal (print progress) by simply
    doing:
isterminal = True  # or False for no-op class
    with get_progressbar(isterminal, length=..., ...) as bar:
        # do your stuff in iterators and call
        bar.update(num_increments)  # will update the terminal with a progressbar, or
                                    # do nothing (no-op) if isterminal=True
```
"""
if not show or kw.get('length', 1) == 0:
    yield Nop(**kw)
else:
    # some custom setup if missing:
    if 'fill_char' not in kw:
        kw['fill_char'] = "?"
    if 'empty_char' not in kw:
        kw['empty_char'] = '?'
    if 'bar_template' not in kw:
        kw['bar_template'] = '%(label)s %(bar)s %(info)s'
    with click_progressbar(**kw) as bar:
        yield bar

```

项目:pgocli    作者:pgocli    | 项目源码 | 文件源码
def require_steps(steps):
    for step in steps:
        if step not in INIT_STEPS:
            raise Exception('Unknown "{}" initialization step'.format(step))

    def require_steps_decorator(cmd):
        def cmd_wrapper(ctx, *args, **kwargs):
            api = ctx.obj.get('pgoapi')
            config = ctx.obj.get('config')

            if len(steps):
                with click.progressbar(
                    length=len(steps) * 2,
                    label='Initializing…'
                ) as progressbar:
                    for step in steps:
                        INIT_STEPS[step](ctx, api, config)
                        progressbar.update(1)
                        time.sleep(0.3)
                        progressbar.update(1)

                    click.echo()
            return cmd(ctx, *args, **kwargs)
        return cmd_wrapper
    return require_steps_decorator
项目:hocrviewer-mirador    作者:jbaiter    | 项目源码 | 文件源码
def index_documents(ctx, hocr_files, autocomplete_min_count):
    def show_fn(hocr_path):
        if hocr_path is None:
            return ''
        else:
            return hocr_path.name
    global repository
    if repository is None:
        repository = DatabaseRepository(ctx.obj['DB_PATH'])

    hocr_files = tuple(pathlib.Path(p) for p in hocr_files)
    with click.progressbar(hocr_files, item_show_func=show_fn) as hocr_files:
        for hocr_path in hocr_files:
            try:
                repository.ingest_document(hocr_path, autocomplete_min_count)
            except Exception as e:
                logger.error("Could not ingest {}".format(hocr_path))
                logger.exception(e)
项目:ntee    作者:studio-ousia    | 项目源码 | 文件源码
def build(db, entity_db, min_word_count, min_entity_count):
        word_counter = Counter()
        entity_counter = Counter()

        tokenizer = RegexpTokenizer()
        with click.progressbar(db.keys()) as bar:
            for title in bar:
                obj = db[title]
                text = obj['text']
                tokens = tokenizer.tokenize(text)

                word_counter.update(t.text.lower() for t in tokens)

                for (_, title, _) in obj['links']:
                    title = entity_db.resolve_redirect(title)
                    entity_counter[title] += 1

        word_dict = Trie([w for (w, c) in word_counter.iteritems()
                          if c >= min_word_count])
        entity_dict = Trie([e for (e, c) in entity_counter.iteritems()
                            if c >= min_entity_count])

        return Vocab(word_dict, entity_dict)
项目:ntee    作者:studio-ousia    | 项目源码 | 文件源码
def count_valid_words(self, vocab, max_text_len):
        tokenizer = RegexpTokenizer()
        keys = self.keys()
        words = frozenset(list(vocab.words()))
        word_count = 0

        with click.progressbar(keys) as bar:
            for key in bar:
                c = 0
                for token in tokenizer.tokenize(self[key]['text']):
                    if token.text.lower() in words:
                        c += 1

                word_count += min(c, max_text_len)

        return word_count
项目:zipline-chinese    作者:zhanghan1990    | 项目源码 | 文件源码
def write(self, filename, calendar, assets, show_progress=False):
        """
        Parameters
        ----------
        filename : str
            The location at which we should write our output.
        calendar : pandas.DatetimeIndex
            Calendar to use to compute asset calendar offsets.
        assets : pandas.Int64Index
            The assets for which to write data.
        show_progress : bool
            Whether or not to show a progress bar while writing.

        Returns
        -------
        table : bcolz.ctable
            The newly-written table.
        """
        _iterator = self.gen_tables(assets)
        if show_progress:
            pbar = progressbar(
                _iterator,
                length=len(assets),
                item_show_func=lambda i: i if i is None else str(i[0]),
                label="Merging asset files:",
            )
            with pbar as pbar_iterator:
                return self._write_internal(filename, calendar, pbar_iterator)
        return self._write_internal(filename, calendar, _iterator)
项目:lecli    作者:rapid7    | 项目源码 | 文件源码
def query(**kwargs):
    """
    Post query to Logentries.
    """
    date_from = kwargs.get('date_from')
    date_to = kwargs.get('date_to')
    time_from = kwargs.get('time_from')
    time_to = kwargs.get('time_to')
    relative_time_range = kwargs.get('relative_time_range')
    saved_query_id = kwargs.get('saved_query_id')
    query_string = kwargs.get('query_string')
    log_keys = kwargs.get('log_keys')
    favorites = kwargs.get('favorites')
    logset = kwargs.get('logset')
    if not validate_query(date_from=date_from, time_from=time_from, query_string=query_string,
                          relative_time_range=relative_time_range, saved_query_id=saved_query_id,
                          log_keys=log_keys, favorites=favorites, logset=logset):
        return False

    time_range = prepare_time_range(time_from, time_to, relative_time_range, date_from, date_to)
    if favorites:
        log_keys = api_utils.get_named_logkey_group(favorites)
    if logset:
        log_keys = api.get_log_keys_from_logset(logset)
    try:
        if saved_query_id:
            response = run_saved_query(saved_query_id, time_range, log_keys)
        else:
            response = post_query(log_keys, query_string, time_range)
        with click.progressbar(length=100, label='Progress\t') as progress_bar:
            handle_response(response, progress_bar)
        return True
    except requests.exceptions.RequestException as error:
        click.echo(error)
        sys.exit(1)
项目:QUANTAXIS    作者:yutiansut    | 项目源码 | 文件源码
def fill(self):
        self.init_db(self.engine)
        df = pd.read_sql("select * from fundamental", self.engine).sort_values(['report_date', 'quarter'])
        df['trade_date'] = df['report_date'] = pd.to_datetime(df['report_date'])

        with click.progressbar(df.groupby('code'),
                               label='writing data',
                               item_show_func=lambda x: x[0] if x else None) as bar:
            bar.is_hidden = False
            for stock, group in bar:
                group = group.drop_duplicates(subset='trade_date', keep="last").set_index('trade_date')
                sessions = pd.date_range(group.index[0], group.index[-1])
                d = group.reindex(sessions, copy=False).fillna(method='pad')
                d.to_sql('full', self.engine, if_exists='append', index_label='trade_date')
项目:lopocs    作者:Oslandia    | 项目源码 | 文件源码
def download(label, url, dest):
    '''
    download url using requests and a progressbar
    '''
    r = requests.get(url, stream=True)
    length = int(r.headers['content-length'])

    chunk_size = 512
    iter_size = 0
    with io.open(dest, 'wb') as fd:
        with click.progressbar(length=length, label=label) as bar:
            for chunk in r.iter_content(chunk_size):
                fd.write(chunk)
                iter_size += chunk_size
                bar.update(chunk_size)
项目:SIDR    作者:damurdock    | 项目源码 | 文件源码
def readBLAST(classification, taxdump, classificationLevel, contigs):
    """
    Reads a BLAST result file and combines it with other known information about the contigs.

    Args:
        classification: A string containing the filename of the BLAST results. The BLAST
            results must be in the format -outfmt '6 qseqid staxids', additional information
            can be added but the first two fields must be qseqid and staxids.
        taxdump: The NCBI taxdump as processed by parseTaxdump()
        classificationLevel: The level of classification to save into the corpus. Defaults to phylum.
        contigs: List of sidr.common.Contigs taken from input FASTA
    Returns:
        contigs: Input list of contigs updated with classification form BLAST
        classMap: A dictionary mapping class names to their class id used by scikit-learn.
        classList: A list of class names.
    """
    classList = []
    classMap = {}
    with open(classification) as data:
        click.echo("Reading %s" % classification)
        with click.progressbar(data) as dt:
            for line in dt:
                record = line.split("\t")
                contig = record[0]
                taxid = record[1].strip()
                taxonomy = common.taxidToLineage(taxid, taxdump, classificationLevel)
                taxonomy = taxonomy.lower()
                try:
                    if not contigs[contig].classification: # assume that the first hit in blast output is best
                        contigs[contig].classification = taxonomy
                        if taxonomy not in classList:
                            classList.append(taxonomy)
                except IndexError:  # if a contig is in BLAST but not FASTA (should be impossible but)
                    continue
    for idx, className in enumerate(classList):
        classMap[className] = idx
    return contigs, classMap, classList
项目:apio    作者:FPGAwars    | 项目源码 | 文件源码
def start(self):
        itercontent = self._request.iter_content(chunk_size=self.CHUNK_SIZE)
        f = open(self._destination, 'wb')
        chunks = int(ceil(self.get_size() / float(self.CHUNK_SIZE)))

        with click.progressbar(length=chunks, label='Downloading') as pb:
            for _ in pb:
                f.write(next(itercontent))
        f.close()
        self._request.close()

        self._preserve_filemtime(self.get_lmtime())
项目:apio    作者:FPGAwars    | 项目源码 | 文件源码
def start(self):
        with click.progressbar(self._unpacker.get_items(),
                               label="Unpacking") as pb:
            for item in pb:
                self._unpacker.extract_item(item, self._dest_dir)
        return True
项目:InplusTrader_Linux    作者:zhengwsh    | 项目源码 | 文件源码
def _init(self):
        trading_length = len(self._env.config.base.trading_calendar)
        self.progress_bar = click.progressbar(length=trading_length, show_eta=False)
项目:InplusTrader_Linux    作者:zhengwsh    | 项目源码 | 文件源码
def _init(self, event):
        self._trading_length = len(self._env.config.base.trading_calendar)
        self.progress_bar = click.progressbar(length=self._trading_length, show_eta=False)
项目:panoptes-cli    作者:zooniverse    | 项目源码 | 文件源码
def download_classifications(
    workflow_id,
    output_file,
    generate,
    generate_timeout
):
    """
    Downloads a workflow-specific classifications export for the given workflow.

    OUTPUT_FILE will be overwritten if it already exists. Set OUTPUT_FILE to -
    to output to stdout.
    """

    workflow = Workflow.find(workflow_id)

    if generate:
        click.echo("Generating new export...", err=True)

    export = workflow.get_export(
        'classifications',
        generate=generate,
        wait_timeout=generate_timeout
    )

    with click.progressbar(
        export.iter_content(chunk_size=1024),
        label='Downloading',
        length=(int(export.headers.get('content-length')) / 1024 + 1),
        file=click.get_text_stream('stderr'),
    ) as chunks:
        for chunk in chunks:
            output_file.write(chunk)
项目:panoptes-cli    作者:zooniverse    | 项目源码 | 文件源码
def download(project_id, output_file, generate, generate_timeout, data_type):
    """
    Downloads project-level data exports.

    OUTPUT_FILE will be overwritten if it already exists. Set OUTPUT_FILE to -
    to output to stdout.
    """

    project = Project.find(project_id)

    if generate:
        click.echo("Generating new export...", err=True)

    export = project.get_export(
        data_type,
        generate=generate,
        wait_timeout=generate_timeout
    )

    with click.progressbar(
        export.iter_content(chunk_size=1024),
        label='Downloading',
        length=(int(export.headers.get('content-length')) / 1024 + 1),
        file=click.get_text_stream('stderr'),
    ) as chunks:
        for chunk in chunks:
            output_file.write(chunk)
项目:luminoth    作者:tryolabs    | 项目源码 | 文件源码
def download_checkpoint(network, network_filename, checkpoint_path,
                        checkpoint_filename):
    tarball_filename = BASE_NETWORK_FILENAMES[network]
    url = TENSORFLOW_OFFICIAL_ENDPOINT + tarball_filename
    response = requests.get(url, stream=True)
    total_size = int(response.headers.get('Content-Length'))
    tarball_path = os.path.join(checkpoint_path, tarball_filename)
    tmp_tarball = tf.gfile.Open(tarball_path, 'wb')
    tf.logging.info('Downloading {} checkpoint.'.format(network_filename))
    with click.progressbar(length=total_size) as bar:
        for data in response.iter_content(chunk_size=4096):
            tmp_tarball.write(data)
            bar.update(len(data))
    tmp_tarball.flush()

    tf.logging.info('Saving checkpoint to {}'.format(checkpoint_path))
    # Open saved tarball as readable binary
    tmp_tarball = tf.gfile.Open(tarball_path, 'rb')
    # Open tarfile object
    tar_obj = tarfile.open(fileobj=tmp_tarball)
    # Create buffer with extracted network checkpoint
    checkpoint_fp = tar_obj.extractfile(network_filename)
    # Define where to save.
    checkpoint_file = tf.gfile.Open(checkpoint_filename, 'wb')
    # Write extracted checkpoint to file
    checkpoint_file.write(checkpoint_fp.read())
    checkpoint_file.flush()
    checkpoint_file.close()
    tmp_tarball.close()
    # Remove temp tarball
    tf.gfile.Remove(tarball_path)
项目:luminoth    作者:tryolabs    | 项目源码 | 文件源码
def save(self):
        """
        """
        tf.logging.info('Saving split "{}" in output_dir = {}'.format(
            self._split, self._output_dir))
        if not tf.gfile.Exists(self._output_dir):
            tf.gfile.MakeDirs(self._output_dir)

        # Save classes in simple json format for later use.
        classes_file = os.path.join(self._output_dir, CLASSES_FILENAME)
        json.dump(self._reader.classes, tf.gfile.GFile(classes_file, 'w'))
        record_file = os.path.join(
            self._output_dir, '{}.tfrecords'.format(self._split))
        writer = tf.python_io.TFRecordWriter(record_file)

        tf.logging.debug('Found {} images.'.format(self._reader.total))

        with click.progressbar(self._reader.iterate(),
                               length=self._reader.total) as record_list:
            for record in record_list:
                tf_record = self._record_to_tf(record)
                if tf_record is not None:
                    writer.write(tf_record.SerializeToString())

            writer.close()

        if self._reader.yielded_records == 0:
            tf.logging.error('Data is missing. Removing record file.')
            tf.gfile.Remove(record_file)
            return
        elif self._reader.errors > 0:
            tf.logging.warning(
                'Failed on {} records.'.format(
                    self._reader.errors, self._reader.yielded_records
                )
            )

        tf.logging.info('Saved {} records to "{}"'.format(
            self._reader.yielded_records, record_file))
项目:sbds    作者:steemit    | 项目源码 | 文件源码
def task_find_missing_block_nums(database_url, last_chain_block, task_num=4):
    task_message = fmt_task_message(
        'Finding blocks missing from db',
        emoji_code_point=u'\U0001F52D',
        task_num=task_num)
    click.echo(task_message)
    with isolated_engine(database_url) as engine:

        session = Session(bind=engine)

        missing_block_nums_gen = Block.get_missing_block_num_iterator(
            session, last_chain_block, chunksize=1000000)

        with click.progressbar(
                missing_block_nums_gen,
                label='Finding missing block_nums',
                **progress_bar_kwargs) as pbar:

            all_missing_block_nums = []
            for missing_gen in pbar:
                all_missing_block_nums.extend(missing_gen())

        success_msg = fmt_success_message('found %s missing blocks',
                                          len(all_missing_block_nums))
        click.echo(success_msg)
        engine.dispose()

    return all_missing_block_nums
项目:catalyst    作者:enigmampc    | 项目源码 | 文件源码
def maybe_show_progress(it,
                        show_progress,
                        empty_char=DEFAULT_EMPTY_CHAR,
                        fill_char=DEFAULT_FILL_CHAR,
                        bar_template=DEFAULT_BAR_TEMPLATE,
                        **kwargs):
    """Optionally show a progress bar for the given iterator.

    Parameters
    ----------
    it : iterable
        The underlying iterator.
    show_progress : bool
        Should progress be shown.
    **kwargs
        Forwarded to the click progress bar.

    Returns
    -------
    itercontext : context manager
        A context manager whose enter is the actual iterator to use.

    Examples
    --------
    .. code-block:: python

       with maybe_show_progress([1, 2, 3], True) as ns:
            for n in ns:
                ...
    """
    if show_progress:
        kwargs['bar_template'] = bar_template
        kwargs['empty_char'] = empty_char
        kwargs['fill_char'] = fill_char
        return click.progressbar(it, **kwargs)

    # context manager that just return `it` when we enter it
    return CallbackManager(lambda it=it: it)
项目:tempfiles_cmdline    作者:periket2000    | 项目源码 | 文件源码
def download_file(self, link, destination=None, is_directory=False):
        try:
            response = requests.get(link, stream=True)
            if 'content-disposition' in response.headers:
                filename = response.headers['content-disposition'].split("=", 1)[1]

                if destination and is_directory:
                    destination_file = destination + '/' + filename
                elif destination:
                    destination_file = destination
                else:
                    destination_file = filename

                option = 'y'
                if os.path.isfile(destination_file):
                    try:
                        option = raw_input(self.configuration_service.get('OVERWRITE').format(destination_file))
                    except NameError:
                        option = input(self.configuration_service.get('OVERWRITE').format(destination_file))
                if str(option) == 'y':
                    total_length = int(response.headers.get('content-length'))
                    with click.progressbar(length=total_length) as bar:
                        with open(destination_file, 'wb') as handle:
                            for block in response.iter_content(self.block_size):
                                handle.write(block)
                                bar.update(len(block))
                    print(ENDL)
                    self.configuration_service.log('COMPLETE', (destination_file,))
            else:
                self.configuration_service.log('NOT_FOUND')
        except requests.exceptions.MissingSchema:
            self.configuration_service.log('BAD_URL')
        except requests.exceptions.ConnectionError:
            self.configuration_service.log('SERVER_KO')
            raise
        except:
            print(self.configuration_service.get('ALIEN'), sys.exc_info()[0])
            raise
项目:tempfiles_cmdline    作者:periket2000    | 项目源码 | 文件源码
def create_callback(self, encoder):
        bar = click.progressbar(length=encoder.len)

        def callback(monitor):
            bar.update(monitor.bytes_read - self.prev_read)
            self.prev_read = monitor.bytes_read

        return callback
项目:temci    作者:parttimenerd    | 项目源码 | 文件源码
def _process_hist_cache(self, cache: t.Iterable[dict], title: str):
        pool = multiprocessing.Pool(4)
        pool_res = [pool.apply_async(self._process_hist_cache_entry, args=(entry,)) for entry in cache]
        if Settings().has_log_level("info"):
            with click.progressbar(pool_res, label=title) as pool_res:
                for res in pool_res:
                    res.get()
        else:
            for res in pool_res:
                res.get()
项目:temci    作者:parttimenerd    | 项目源码 | 文件源码
def _process_boxplot_cache(self, cache: t.Iterable[dict], title: str):
        pool = multiprocessing.Pool(4)
        pool_res = [pool.apply_async(self._process_boxplot_cache_entry, args=(entry,)) for entry in cache]
        if Settings().has_log_level("info"):
            with click.progressbar(pool_res, label=title) as pool_res:
                for res in pool_res:
                    res.get()
        else:
            for res in pool_res:
                res.get()
项目:postman-audit    作者:tylrd    | 项目源码 | 文件源码
def initialize(self):
        data = self.client.get_collections()
        if 'collections' in data:
            with click.progressbar(data['collections'], label="Initializing collections...") as collections:
                for collection in collections:
                    if 'name' in collection and 'uid' in collection:
                        logger.debug("Retrieving collection: %s", collection['name'])
                        collection_data = self.client.get_collection(collection['uid'])
                        self.collections.append(collection_data)
                        time.sleep(self.delay)
        else:
            logger.debug("Json response does not contain array of collections")
        return data
项目:utils    作者:ReCodEx    | 项目源码 | 文件源码
def evaluate_all_rs(config_path):
    """
    Request evaluation for all reference solutions
    """
    config = Config.load(Path.cwd() / (config_path or "import-config.yml"))
    api = ApiClient(config.api_url, config.api_token)

    with click.progressbar(api.get_exercises()) as bar:
        for exercise in bar:
            try:
                api.evaluate_reference_solutions(exercise["id"])
            except Exception as e:
                logging.error("Error in exercise {}: {}".format(exercise["id"], str(e)))
项目:Sentences-analysis    作者:sungminoh    | 项目源码 | 文件源码
def save_file(obj, path, update=False):
        if not os.path.isfile(path) or update:
            _, ext = os.path.splitext(path)
            with open(path, 'w') as f:
                if ext == '.csv':
                    csv_file = csv.writer(f, delimiter=',')
                    with click.progressbar(obj, label='Writing CSV file.') as bar:
                        for line in bar:
                            striped = list(Utils.strip_all(line))
                            csv_file.writerow(striped)
                elif ext == '.pkl':
                    pickle.dump(obj, f)
项目:k2mosaic    作者:KeplerGO    | 项目源码 | 文件源码
def k2mosaic_mosaic(tpf_filenames, mission, campaign, channel, cadencelist,
                    output_prefix='', verbose=True, processes=None):
    """Mosaic a set of TPF files for a set of cadences."""
    task = partial(k2mosaic_mosaic_one, tpf_filenames=tpf_filenames,
                   campaign=campaign, channel=channel,
                   output_prefix=output_prefix, verbose=verbose)
    if processes is None or processes > 1:  # Use parallel processing
        from multiprocessing import Pool
        pool = Pool(processes=processes)
        with click.progressbar(pool.imap(task, cadencelist), label='Mosaicking', show_pos=True) as iterable:
            [job for job in iterable]
    else:  # Single process
        with click.progressbar(cadencelist, label='Mosaicking', show_pos=True) as iterable:
            [task(job) for job in iterable]
项目:k2mosaic    作者:KeplerGO    | 项目源码 | 文件源码
def gather_pixels(self):
        """Figures out the files needed and adds the pixels."""
        print("Querying MAST to obtain a list of target pixel files...")
        from .mast import get_tpf_urls
        urls = get_tpf_urls(self.campaign, channel=self.channel)
        print("Found {} target pixel files.".format(len(urls)))
        with click.progressbar(urls, label="Reading target pixel files",
                               show_pos=True) as bar:
            for url in bar:
                if self.data_store is not None:
                    path = url.replace("http://archive.stsci.edu/missions/k2/target_pixel_files", self.data_store)
                else:
                    path = url
                self.add_tpf(path)
项目:k2mosaic    作者:KeplerGO    | 项目源码 | 文件源码
def export_ffi_headers(output_fn=FFI_HEADERS_FILE, ffi_store=None):
    """Writes the headers of all available K2 FFI's to a csv table.

    This will enable us to inject WCS keywords from real FFI's into the sparse
    FFI's created by k2mosaic."""
    if ffi_store is None:
        ffi_store = os.path.join(os.getenv("K2DATA"), 'ffi')
    ffi_headers = []
    ffi_filenames = glob.glob(os.path.join(ffi_store, '*cal.fits'))
    with click.progressbar(ffi_filenames, label="Reading FFI files",
                           show_pos=True) as bar:
        for filename in bar:
            basename = os.path.basename(filename)
            # Extract the campaign number from the FFI filename
            campaign = int(re.match(".*c([0-9]+)_.*", basename).group(1))
            fts = fits.open(filename)
            for ext in range(1, 85):
                try:
                    keywords = OrderedDict()
                    keywords['campaign'] = campaign
                    keywords['filename'] = basename
                    keywords['extension'] = ext
                    for kw in WCS_KEYS:
                        keywords[kw] = fts[ext].header[kw]
                    ffi_headers.append(keywords)
                except KeyError:
                    pass
    # Convert to a pandas dataframe and then export to csv
    df = pd.DataFrame(ffi_headers)
    df = df.sort_values(["campaign", "filename"])
    columns = list(ffi_headers[0].keys())  # Keep column order as in FITS files
    df[columns].to_csv(output_fn, index=False)
项目:k2mosaic    作者:KeplerGO    | 项目源码 | 文件源码
def export_frames(self, extension=1, cut=None):
        for fn in click.progressbar(self.mosaic_filenames, label="Reading mosaics", show_pos=True):
            try:
                frame = KeplerMosaicMovieFrame(fn)
                fig = frame.to_fig(rowrange=self.rowrange, colrange=self.colrange, extension=extension, cut=cut)
                out_fn = "movie-frame-" + os.path.basename(fn) + ".png"
                fig.savefig(out_fn, cmap='Greys_r', facecolor='#333333')
                pl.close(fig)
            except InvalidFrameException:
                print("InvalidFrameException for {}".format(fn))
项目:Rock_star    作者:RoJoHub    | 项目源码 | 文件源码
def make_me_a_rockstar(self):
        self.repo = git.Repo.init(self.repo_path)
        label = 'Making you a Rockstar Programmer'
        with click.progressbar(self._get_dates_list(), label=label) as bar:
            for commit_date in bar:
                self._edit_and_commit(str(uuid.uuid1()), commit_date)
        self._make_last_commit()
        print('\nYou are now a Rockstar Programmer!')
项目:elasticsearch_loader    作者:moshe    | 项目源码 | 文件源码
def load(lines, config):
    bulks = grouper(lines, config['bulk_size'] * 3)
    if config['progress']:
        bulks = [x for x in bulks]
    with click.progressbar(bulks) as pbar:
        for i, bulk in enumerate(pbar):
            try:
                single_bulk_to_es(bulk, config, config['with_retry'])
            except Exception as e:
                log('warn', 'Chunk {i} got exception ({e}) while processing'.format(e=e, i=i))
项目:notary    作者:sxn    | 项目源码 | 文件源码
def run():
    """
    Crawls https://choosealicense.com/licenses and fetches all open source license urls.
    It then crawls each individual license page and stores it in the {LICENSE_DIR}
    folder, under {slug}.md.
    """
    response = requests.get(LICENSES_URL)

    if response.status_code != 200:
        click.echo(
            "URL {0} returned status {1}".
            format(green(LICENSES_URL), red(response.status_code))
        )
        sys.exit(1)

    soup = BeautifulSoup(response.content, 'html.parser')
    url_tuples = [
        (BASE_URL, license_overview.div.h3.a.get('href'))
        for license_overview in soup.find_all('div', {'class': 'license-overview'})
    ]

    with click.progressbar(
            iterable=url_tuples, show_pos=True, label="Fetching licenses"
    ) as urls:
        for url_tuple in urls:
            click.echo()
            url = ''.join(url_tuple)
            response = requests.get(url)
            license_soup = BeautifulSoup(response.content, 'html.parser')
            try:
                lic = License(
                    url,
                    url_tuple[1].split('/')[2],
                    license_soup.h1.string,
                    license_soup.find(id='license-text').string
                )
                with lic.open('w') as f:
                    f.write(lic.content)
                click.echo("Finished crawling {0}.".format(green(url)))
            except AttributeError:
                click.echo("Could not fetch license from {0}".format(green(url)))
项目:EvadeML-Zoo    作者:mzweilin    | 项目源码 | 文件源码
def generate_jsma_examples(sess, model, x, y, X, Y, attack_params, verbose, attack_log_fpath):
    """
    Targeted attack, with target classes in Y.
    """
    Y_target = Y

    nb_classes = Y.shape[1]

    jsma = SaliencyMapMethod(model, back='tf', sess=sess)
    jsma_params = {'theta': 1., 'gamma': 0.1,
                   'nb_classes': nb_classes, 'clip_min': 0.,
                   'clip_max': 1., 'targets': y,
                   'y_val': None}
    jsma_params = override_params(jsma_params, attack_params)

    adv_x_list = []

    with click.progressbar(range(0, len(X)), file=sys.stderr, show_pos=True, 
                           width=40, bar_template='  [%(bar)s] JSMA Attacking %(info)s', 
                           fill_char='>', empty_char='-') as bar:
        # Loop over the samples we want to perturb into adversarial examples
        for sample_ind in bar:
            sample = X[sample_ind:(sample_ind+1)]

            jsma_params['y_val'] = Y_target[[sample_ind],]
            adv_x = jsma.generate_np(sample, **jsma_params)
            adv_x_list.append(adv_x)

    return np.vstack(adv_x_list)
项目:EvadeML-Zoo    作者:mzweilin    | 项目源码 | 文件源码
def generate_carlini_li_examples(sess, model, x, y, X, Y, attack_params, verbose, attack_log_fpath):
    model_wrapper = wrap_to_carlini_model(model, X, Y)

    if 'batch_size' in attack_params:
        batch_size = attack_params['batch_size']
        del attack_params['batch_size']
    else:
        batch_size= 10

    accepted_params = ['targeted', 'learning_rate', 'max_iterations', 'abort_early', 'initial_const', 'largest_const', 'reduce_const', 'decrease_factor', 'const_factor', 'confidence']
    for k in attack_params:
        if k not in accepted_params:
            raise NotImplementedError("Unsuporrted params in Carlini Li: %s" % k)

    attack = CarliniLi(sess, model_wrapper, **attack_params)

    X_adv_list = []

    with click.progressbar(range(0, len(X)), file=sys.stderr, show_pos=True, 
                           width=40, bar_template='  [%(bar)s] Carlini Li Attacking %(info)s', 
                           fill_char='>', empty_char='-') as bar:
        for i in bar:
            if i % batch_size == 0:
                X_sub = X[i:min(i+batch_size, len(X)),:]
                Y_sub = Y[i:min(i+batch_size, len(X)),:]
                if not verbose:
                    disablePrint(attack_log_fpath)
                X_adv_sub = attack.attack(X_sub - 0.5, Y_sub) + 0.5
                if not verbose:
                    enablePrint()
                X_adv_list.append(X_adv_sub)

    X_adv = np.vstack(X_adv_list)
    return X_adv
项目:EvadeML-Zoo    作者:mzweilin    | 项目源码 | 文件源码
def generate_carlini_l0_examples(sess, model, x, y, X, Y, attack_params, verbose, attack_log_fpath):
    model_wrapper = wrap_to_carlini_model(model, X, Y)

    if 'batch_size' in attack_params:
        batch_size = attack_params['batch_size']
        del attack_params['batch_size']
    else:
        batch_size= 10

    accepted_params = ['targeted', 'learning_rate', 'max_iterations', 'abort_early', 'initial_const', 'largest_const', 'reduce_const', 'decrease_factor', 'const_factor', 'independent_channels', 'confidence']
    for k in attack_params:
        if k not in accepted_params:
            raise NotImplementedError("Unsuporrted params in Carlini L0: %s" % k)

    attack = CarliniL0(sess, model_wrapper, **attack_params)

    X_adv_list = []

    with click.progressbar(range(0, len(X)), file=sys.stderr, show_pos=True, 
                           width=40, bar_template='  [%(bar)s] Carlini L0 Attacking %(info)s', 
                           fill_char='>', empty_char='-') as bar:
        for i in bar:
            if i % batch_size == 0:
                X_sub = X[i:min(i+batch_size, len(X)),:]
                Y_sub = Y[i:min(i+batch_size, len(X)),:]
                if not verbose:
                    disablePrint(attack_log_fpath)
                X_adv_sub = attack.attack(X_sub - 0.5, Y_sub) + 0.5
                if not verbose:
                    enablePrint()
                X_adv_list.append(X_adv_sub)

    X_adv = np.vstack(X_adv_list)
    return X_adv
项目:EvadeML-Zoo    作者:mzweilin    | 项目源码 | 文件源码
def generate_deepfool_examples(sess, model, x, y, X, Y, attack_params, verbose, attack_log_fpath):
    """
    Untargeted attack. Y is not needed.
    """

    # TODO: insert a uint8 filter to f.
    f, grad_fs = prepare_attack(sess, model, x, y, X, Y)

    params = {'num_classes': 10, 'overshoot': 0.02, 'max_iter': 50}
    params = override_params(params, attack_params)

    adv_x_list = []
    aux_info = {}
    aux_info['r_tot'] = []
    aux_info['loop_i'] = []
    aux_info['k_i'] = []

    with click.progressbar(range(0, len(X)), file=sys.stderr, show_pos=True,
                           width=40, bar_template='  [%(bar)s] DeepFool Attacking %(info)s',
                           fill_char='>', empty_char='-') as bar:
        # Loop over the samples we want to perturb into adversarial examples
        for i in bar:
            image = X[i:i+1,:,:,:]

            if not verbose:
                disablePrint(attack_log_fpath)

            r_tot, loop_i, k_i, pert_image = deepfool(image, f, grad_fs, **params)

            if not verbose:
                enablePrint()

            adv_x_list.append(pert_image)

            aux_info['r_tot'].append(r_tot)
            aux_info['loop_i'].append(loop_i)
            aux_info['k_i'].append(k_i)

    return np.vstack(adv_x_list), aux_info