Python progressbar 模块,ETA 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用progressbar.ETA

项目:sound-machine    作者:rhelmot    | 项目源码 | 文件源码
def render(self, length=None, progress=False):
        """
        Render this signal into an numpy array of floats. Return the array.

        :param length:      The length to render, in seconds. Optional.
        :param progress:    Whether to show a progress bar for rendering
        """
        if progress and not progressbar:
            print('Install the progressbar module to see a progress bar for rendering')
            progress = False

        duration = self.duration if length is None else length * SAMPLE_RATE
        if duration == float('inf'):
            duration = 3*SAMPLE_RATE
        else:
            duration = int(duration)
        out = numpy.empty((duration, 1))

        pbar = progressbar.ProgressBar(widgets=['Rendering: ', progressbar.Percentage(), ' ', progressbar.Bar(), ' ', progressbar.ETA()], maxval=duration-1).start() if progress else None

        for i in range(duration):
            out[i] = self.amplitude(i)
            if pbar: pbar.update(i)
        if pbar: pbar.finish()
        return out
项目:keras-molecules    作者:maxhodak    | 项目源码 | 文件源码
def main():
    uri, outfile, dataset = get_arguments()
    fd = tempfile.NamedTemporaryFile()
    progress = ProgressBar(widgets=[Percentage(), ' ', Bar(), ' ', ETA(), ' ', FileTransferSpeed()])

    def update(count, blockSize, totalSize):
        if progress.maxval is None:
            progress.maxval = totalSize
            progress.start()
        progress.update(min(count * blockSize, totalSize))

    urllib.urlretrieve(uri, fd.name, reporthook = update)
    if dataset == 'zinc12':
        df = pandas.read_csv(fd.name, delimiter = '\t')
        df = df.rename(columns={'SMILES':'structure'})
        df.to_hdf(outfile, 'table', format = 'table', data_columns = True)
    elif dataset == 'chembl22':
        df = pandas.read_table(fd.name,compression='gzip')
        df = df.rename(columns={'canonical_smiles':'structure'})
        df.to_hdf(outfile, 'table', format = 'table', data_columns = True)
        pass
    else:
        df = pandas.read_csv(fd.name, delimiter = '\t')
        df.to_hdf(outfile, 'table', format = 'table', data_columns = True)
项目:jack    作者:uclmr    | 项目源码 | 文件源码
def preprocess(self, questions: List[QASetting],
                   answers: Optional[List[List[Answer]]] = None,
                   is_eval: bool = False) -> List[XQAAnnotation]:

        if answers is None:
            answers = [None] * len(questions)
        preprocessed = []
        if len(questions) > 1000:
            bar = progressbar.ProgressBar(
                max_value=len(questions),
                widgets=[' [', progressbar.Timer(), '] ', progressbar.Bar(), ' (', progressbar.ETA(), ') '])
            for q, a in bar(zip(questions, answers)):
                preprocessed.append(self.preprocess_instance(q, a))
        else:
            for q, a in zip(questions, answers):
                preprocessed.append(self.preprocess_instance(q, a))

        return preprocessed
项目:jack    作者:uclmr    | 项目源码 | 文件源码
def preprocess(self, questions: List[QASetting],
                   answers: Optional[List[List[Answer]]] = None,
                   is_eval: bool = False) -> List[MCAnnotation]:
        if answers is None:
            answers = [None] * len(questions)
        preprocessed = []
        if len(questions) > 1000:
            bar = progressbar.ProgressBar(
                max_value=len(questions),
                widgets=[' [', progressbar.Timer(), '] ', progressbar.Bar(), ' (', progressbar.ETA(), ') '])
            for i, (q, a) in bar(enumerate(zip(questions, answers))):
                preprocessed.append(self.preprocess_instance(i, q, a))
        else:
            for i, (q, a) in enumerate(zip(questions, answers)):
                preprocessed.append(self.preprocess_instance(i, q, a))

        return preprocessed
项目:imap2emlbackup    作者:Noneus    | 项目源码 | 文件源码
def download(download_list, total_download_size):
    progressbar_widgets = [
        '[Downloading mails            ] ',
        progressbar.Percentage(),
        progressbar.Bar(marker=progressbar.RotatingMarker()), ' ',
        progressbar.ETA(), ' ',
        bitmath.integrations.BitmathFileTransferSpeed()]
    progressbar_instance = progressbar.ProgressBar(widgets=progressbar_widgets, maxval=int(total_download_size)).start()

    downloaded_size = bitmath.Byte(0)
    for folder, mails in download_list.items():
        server.select_folder(folder, readonly=True)
        for mailid, mailfilename, mailsize in mails:
            #make parent directory
            if not os.path.isdir(os.path.dirname(mailfilename)):
                os.makedirs(os.path.dirname(mailfilename))

            #download mail
            with open(mailfilename, 'wb') as mailfile:
                mailfile.write(server.fetch([mailid], ['RFC822'])[mailid][b'RFC822'])

            #update progressbar
            downloaded_size += mailsize
            progressbar_instance.update(int(downloaded_size))
    progressbar_instance.finish()
项目:fabric8-analytics-tagger    作者:fabric8-analytics    | 项目源码 | 文件源码
def progressbarize(iterable, progress=False):
    """Construct progressbar for loops if progressbar requested, otherwise return directly iterable.

    :param iterable: iterable to use
    :param progress: True if print progressbar
    """
    if progress:
        # The casting to list is due to possibly yielded value that prevents
        # ProgressBar to compute overall ETA
        return progressbar.ProgressBar(widgets=[
            progressbar.Timer(), ', ',
            progressbar.Percentage(), ', ',
            progressbar.SimpleProgress(), ', ',
            progressbar.ETA()
        ])(list(iterable))

    return iterable
项目:django-geoware    作者:un33k    | 项目源码 | 文件源码
def __init__(self, *args, **kwargs):
        self.dld = FileDownloader()
        self.dld.stage(self.cmd_name)

        load_continents()
        load_oceans()
        load_currencies()
        load_languages()

        self.widgets = [
            MemoryUsage(),
            progressbar.ETA(),
            ' |Processed: ',
            progressbar.Counter(),
            ' |Done: ',
            progressbar.Percentage(),
            progressbar.Bar(),
        ]
        return super().__init__(*args, **kwargs)
项目:zabbix-scripts    作者:globocom    | 项目源码 | 文件源码
def deleteHostsByHostgroup(groupname):
    hostgroup = zapi.hostgroup.get(output=['groupid'],filter={'name': groupname})
    if hostgroup.__len__() != 1:
        logger.error('Hostgroup not found: %s\n\tFound this: %s' % (groupname,hostgroup))
    groupid = int(hostgroup[0]['groupid'])
    hosts = zapi.host.get(output=['name','hostid'],groupids=groupid)
    total = len(hosts)
    logger.info('Hosts found: %d' % (total))
    if ( args.run ):
        x = 0
        bar = ProgressBar(maxval=total,widgets=[Percentage(), ReverseBar(), ETA(), RotatingMarker(), Timer()]).start()
        logger.echo = False
        for host in hosts:
            x = x + 1
            bar.update(x)
            logger.debug('(%d/%d) >> Removing >> %s' % (x, total, host))
            out = zapi.globo.deleteMonitors(host['name'])
        bar.finish()
        logger.echo = True
    else:
        logger.info('No host removed due to --no-run arg. Full list of hosts:')
        for host in hosts:
            logger.info('%s' % host['name'])
    return
项目:zabbix-scripts    作者:globocom    | 项目源码 | 文件源码
def hosts_disable_all():
  """
  status de host 0 = enabled
  status de host 1 = disabled
  """
  logger.info('Disabling all hosts, in blocks of 1000')
  hosts = zapi.host.get(output=[ 'hostid' ], search={ 'status': 0 })
  maxval = int(ceil(hosts.__len__())/1000+1)
  bar = ProgressBar(maxval=maxval,widgets=[Percentage(), ReverseBar(), ETA(), RotatingMarker(), Timer()]).start()
  i = 0
  for i in xrange(maxval):
    block = hosts[:1000]
    del hosts[:1000]
    result = zapi.host.massupdate(hosts=[ x for x in block ], status=1)
    i += 1
    bar.update(i)
  bar.finish()
  logger.info('Done')
  return
项目:zabbix-scripts    作者:globocom    | 项目源码 | 文件源码
def proxy_passive_to_active():
  """
  status de prxy 5 = active
  status de prxy 6 = passive
  """
  logger.info('Change all proxys to active')
  proxys = zapi.proxy.get(output=[ 'shorten', 'host' ],
    filter={ 'status': 6 })
  if ( proxys.__len__() == 0 ):
    logger.info('Done')
    return
  bar = ProgressBar(maxval=proxys.__len__(),widgets=[Percentage(), ReverseBar(), ETA(), RotatingMarker(), Timer()]).start()
  i = 0
  for x in proxys:
    i += 1
    proxyid = x['proxyid']
    result = zapi.proxy.update(proxyid=proxyid, status=5)
    logger.echo = False
    logger.debug('Changed from passive to active proxy: %s' % (x['host']))
    bar.update(i)
  bar.finish()
  logger.echo = True
  logger.info('Done')
  return
项目:LetvCloud    作者:jiangchao0304    | 项目源码 | 文件源码
def getProgress(self, url, fileSize):
        status = json.loads(urllib.urlopen(url).read())
        if len(status["data"]) ==0 :
            logger.info(url + " upload done ")
            return  True
        widgets = ['Progress: ', Percentage(), ' ', Bar(
            marker=RotatingMarker('>-=')), ' ', ETA(), ' ', FileTransferSpeed()]
        pbar = ProgressBar(widgets=widgets, maxval=fileSize).start()
        upload_size = 0
        while upload_size < fileSize:
            _response = self.doGet(url)
            _data = json.loads(_response)
            upload_size = long(_data["data"]["upload_size"])
            total_size = long(_data["data"]["total_size"])
            if upload_size == 0 and total_size == 0:
                break
            pbar.update(upload_size)
            time.sleep(1)
        pbar.finish()
        logger.info(url + " upload done")
        return True

        """
        ??????
        """
项目:sportsball    作者:jgershen    | 项目源码 | 文件源码
def scrape_mlb_odds_range(min_date=None, max_date=None):
  min_date = min_date or datetime.datetime.today() - datetime.timedelta(days=1)
  max_date = max_date or datetime.datetime.today()

  if isinstance(min_date, basestring):
    min_date = parser.parse(min_date)
  if isinstance(max_date, basestring):
    max_date = parser.parse(max_date)

  date = min_date
  pbar = progressbar.ProgressBar(widgets=[progressbar.Percentage(), ' ', progressbar.Bar(), ' ', progressbar.ETA()],
                                 maxval=int((max_date-min_date).total_seconds() / (60*60*24)) + 1)
  pbar.start()
  saved = 0
  hit = 0
  while date <= max_date:
    day_odds = load_odds_for_day(date)
    if day_odds is not None and len(day_odds) > 0:
      save_sbr_odds_info('mlb', date, day_odds)
      saved += 1
    hit += 1
    date += datetime.timedelta(days=1)
    pbar.update(value=hit)
  pbar.finish()
  return saved
项目:sportsball    作者:jgershen    | 项目源码 | 文件源码
def scrape_nba_odds_range(min_date=None, max_date=None):
  min_date = min_date or datetime.datetime.today() - datetime.timedelta(days=1)
  max_date = max_date or datetime.datetime.today()

  if isinstance(min_date, basestring):
    min_date = parser.parse(min_date)
  if isinstance(max_date, basestring):
    max_date = parser.parse(max_date)

  date = min_date
  pbar = progressbar.ProgressBar(widgets=[progressbar.Percentage(), ' ', progressbar.Bar(), ' ', progressbar.ETA()],
                                 maxval=int((max_date-min_date).total_seconds() / (60*60*24)) + 1)
  pbar.start()
  saved = 0
  hit = 0
  while date <= max_date:
    day_odds = load_odds_for_day(date)
    if day_odds is not None and len(day_odds) > 0:
      save_sbr_odds_info('nba', date, day_odds)
      saved += 1
    hit += 1
    date += datetime.timedelta(days=1)
    pbar.update(value=hit)
  pbar.finish()
  return saved
项目:Sentry    作者:NetEaseGame    | 项目源码 | 文件源码
def __iter__(self):
        if self.count != 0:
            widgets = [
                '%s: ' % (self.caption,),
                progressbar.Percentage(),
                ' ',
                progressbar.Bar(),
                ' ',
                progressbar.ETA(),
            ]
            pbar = progressbar.ProgressBar(widgets=widgets, maxval=self.count)
            pbar.start()
            for idx, item in enumerate(self.iterator):
                yield item
                pbar.update(idx)
            pbar.finish()
项目:httphose    作者:HarryR    | 项目源码 | 文件源码
def _setup_progress(self, options):
        if options.progress:
            if self.beanstalk:
                # With Beanstalk C&C we don't know how many...
                self.progress = progressbar.ProgressBar(
                    redirect_stdout=True,
                    redirect_stderr=True,
                    widgets=[
                        'Total: ',
                        progressbar.Counter(),
                        ', ',
                        progressbar.Timer()
                    ])
            else:
                self.progress = progressbar.ProgressBar(
                    redirect_stdout=True,
                    redirect_stderr=True,
                    widgets=[
                        progressbar.Percentage(),
                        progressbar.Bar(),
                        ' (', progressbar.ETA(), ') ',
                    ])
        else:
            self.progress = None
项目:dnsbrute    作者:XiphosResearch    | 项目源码 | 文件源码
def __init__(self, options):
        self.wildcards = []
        self.options = options
        self.domains = []
        if options.domains:
            self.domains += filter(None, options.domains.read().split("\n"))
        self.domains += options.domain
        self.domains = list(set(self.domains))
        random.shuffle(self.domains)
        self.resolvers = map(str.strip, filter(None, options.resolvers.read().split("\n")))
        random.shuffle(self.resolvers)
        self.names = [X for X in self._load_names(options.names)]
        if options.progress:
            self.progress = progressbar.ProgressBar(
                redirect_stdout=True,
                redirect_stderr=True,
                widgets=[
                    progressbar.Percentage(),
                    progressbar.Bar(),
                    ' (', progressbar.ETA(), ') ',
                ])
        else:
            self.progress = None
        self.finished = 0
        LOG.info("%d names, %d resolvers, %d domains",
                 len(self.names), len(self.resolvers), len(self.domains))
项目:chakin    作者:chakki-works    | 项目源码 | 文件源码
def download(number, save_dir='./'):
    """Download pre-trained word vector
    :param number: integer, default ``None``
    :param save_dir: str, default './'
    :return: file path for downloaded file
    """
    df = load_datasets()

    row = df.iloc[[number]]
    url = ''.join(row.URL)
    if not url:
        print('The word vector you specified was not found. Please specify correct name.')

    widgets = ['Test: ', Percentage(), ' ', Bar(marker=RotatingMarker()), ' ', ETA(), ' ', FileTransferSpeed()]
    pbar = ProgressBar(widgets=widgets)

    def dlProgress(count, blockSize, totalSize):
        if pbar.max_value is None:
            pbar.max_value = totalSize
            pbar.start()

        pbar.update(min(count * blockSize, totalSize))

    file_name = url.split('/')[-1]
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    save_path = os.path.join(save_dir, file_name)
    path, _ = urlretrieve(url, save_path, reporthook=dlProgress)
    pbar.finish()
    return path
项目:downpour    作者:openstack    | 项目源码 | 文件源码
def __enter__(self):
        self.bar = progressbar.ProgressBar(
            widgets=[
                progressbar.Percentage(),
                ' ',
                progressbar.Bar(),
                progressbar.FileTransferSpeed(),
                ' ',
                progressbar.ETA(),
            ],
            max_value=self.max_value,
        )
        self.fd = open(self.output_path, 'wb')
        return self
项目:CNN-MNIST    作者:m516825    | 项目源码 | 文件源码
def train(self):

        data = Data(self.train_dat, self.train_lab)
        batch_num = self.length/self.batch_size if self.length%self.batch_size == 0 else self.length/self.batch_size + 1

        model = self.add_model()

        with self.sess as sess:

            tf.initialize_all_variables().run()

            for ite in range(self.iterations):
                print "Iteration {}".format(ite)
                cost = 0.
                pbar = pb.ProgressBar(widgets=[pb.Percentage(), pb.Bar(), pb.ETA()], maxval=batch_num).start()
                for i in range(batch_num):
                    batch_x, batch_y = data.next_batch(self.batch_size)

                    c, _ = self.sess.run([model['loss'], model['optimizer']], feed_dict={model['train_x']:batch_x, model['train_y']:batch_y, model['p_keep_dens']:0.75})

                    cost += c / batch_num
                    pbar.update(i+1)
                pbar.finish()

                print ">>cost: {}".format(cost)

                t_acc, d_acc = self.eval(model, 3000)
                # early stop
                if t_acc >= 0.995 and d_acc >= 0.995:
                    break

            self.predict(model)
项目:jack    作者:uclmr    | 项目源码 | 文件源码
def __tag__(self):
        return "ETA"
项目:jack    作者:uclmr    | 项目源码 | 文件源码
def __call__(self, epoch):
        if self._batches is None:
            logger.info("Preparing evaluation data...")
            self._batches = self.reader.input_module.batch_generator(self._dataset, self._batch_size, is_eval=True)

        logger.info("Started evaluation %s" % self._info)
        metrics = defaultdict(lambda: list())
        bar = progressbar.ProgressBar(
            max_value=len(self._dataset) // self._batch_size + 1,
            widgets=[' [', progressbar.Timer(), '] ', progressbar.Bar(), ' (', progressbar.ETA(), ') '])
        for i, batch in bar(enumerate(self._batches)):
            inputs = self._dataset[i * self._batch_size:(i + 1) * self._batch_size]
            predictions = self.reader.model_module(batch, self._ports)
            m = self.apply_metrics(inputs, predictions)
            for k in self._metrics:
                metrics[k].append(m[k])

        metrics = self.combine_metrics(metrics)
        super().add_to_history(metrics, self._iter, epoch)

        printmetrics = sorted(metrics.keys())
        res = "Epoch %d\tIter %d\ttotal %d" % (epoch, self._iter, self._total)
        for m in printmetrics:
            res += '\t%s: %.3f' % (m, metrics[m])
            self.update_summary(self._iter, self._info + '_' + m, metrics[m])
            if self._write_metrics_to is not None:
                with open(self._write_metrics_to, 'a') as f:
                    f.write("{0} {1} {2:.5}\n".format(datetime.now(), self._info + '_' + m,
                                                      np.round(metrics[m], 5)))
        res += '\t' + self._info
        logger.info(res)

        if self._side_effect is not None:
            self._side_effect_state = self._side_effect(metrics, self._side_effect_state)
项目:qrn    作者:uwnlp    | 项目源码 | 文件源码
def get_pbar(num, prefix=""):
    assert isinstance(prefix, str)
    pbar = pb.ProgressBar(widgets=[prefix, pb.Percentage(), pb.Bar(), pb.ETA()], maxval=num)
    return pbar
项目:imap2emlbackup    作者:Noneus    | 项目源码 | 文件源码
def collect_mailinfos(server, folder_contents, outpath_format):
    #construct progressbar
    progressbar_widgets = [
        '[Choosing mails for download  ] ',
        progressbar.Percentage(),
        progressbar.Bar(marker=progressbar.RotatingMarker()), ' ', progressbar.ETA()]
    total_count = 0
    for folder, mailids in folder_contents.items():
        total_count += len(mailids)
    progressbar_instance = progressbar.ProgressBar(widgets=progressbar_widgets, maxval=total_count).start()

    #collect all mailinfos
    mailinfos = {}
    mailinfo_count = 0
    for folder, mailids in folder_contents.items():
        mailinfos[folder] = []

        #get mailinfo bit by bit
        server.select_folder(folder, readonly=True)
        for mailid in mailids:
            #fetch mail information
            mailinfo = server.fetch([mailid], ['ENVELOPE', 'INTERNALDATE', 'RFC822.SIZE'])[mailid]
            mailsize = bitmath.Byte(mailinfo[b'RFC822.SIZE'])
            mailfilename = construct_mailfilename(outpath_format, mailinfo, args.outdir, folder, mailid)                

            #only add if mailfilename can be constructed
            if mailfilename:
                mailinfos[folder].append((mailid, mailfilename, mailsize))

            mailinfo_count += 1
            progressbar_instance.update(mailinfo_count)

    progressbar_instance.finish()
    return mailinfos
项目:audio-feeder    作者:pganssle    | 项目源码 | 文件源码
def create_app(load_db=True, populate_qr_cache=True, progressbar=False):
    # Set up logging
    log_level = os.environ.get('AF_LOGGING_LEVEL', None)
    if log_level is not None:
        log_levels = ('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL')
        if log_level.upper() in log_levels:
            log_level = getattr(log, log_level)

            log.basicConfig(level=log_level)
        else:
            log.warning('Invalid log level: {}'.format(log_level.upper()))
    else:
        log.warning('No log level set, using default level.')

    log.info('Creating Flask application')
    app = Flask(__name__)
    app.register_blueprint(root)

    # Now load the database if requested
    if load_db:
        from . import database_handler as dh
        log.info('Loading database.')
        dh.get_database()       # This loads the database into memory.
        log.info('Database loaded.')

    if populate_qr_cache:
        if progressbar:
            from progressbar import ProgressBar, Bar, Timer, ETA
            pbar = ProgressBar(widgets=['Populating QR cache: ', Bar(),
                                        ' ', Timer(), ' ', ETA()])
            kwargs = {'pbar': pbar}
        else:
            log.info('Populating QR cache.')
            kwargs = {}

        from .cache_utils import populate_qr_cache
        populate_qr_cache(**kwargs)

    return app
项目:isar    作者:ilbers    | 项目源码 | 文件源码
def __init__(self, msg, maxval, widgets=None, extrapos=-1):
        self.msg = msg
        self.extrapos = extrapos
        if not widgets:
            widgets = [progressbar.Percentage(), ' ', progressbar.Bar(), ' ',
            progressbar.ETA()]
            self.extrapos = 4

        try:
            self._resize_default = signal.getsignal(signal.SIGWINCH)
        except:
            self._resize_default = None
        progressbar.ProgressBar.__init__(self, maxval, [self.msg + ": "] + widgets, fd=sys.stdout)
项目:deepdream-neural-style-transfer    作者:rdcolema    | 项目源码 | 文件源码
def _create_pbar(self, max_iter):
        """
            Creates a progress bar.
        """

        self.grad_iter = 0
        self.pbar = pb.ProgressBar()
        self.pbar.widgets = ["Optimizing: ", pb.Percentage(),
                             " ", pb.Bar(marker=pb.AnimatedMarker()),
                             " ", pb.ETA()]
        self.pbar.maxval = max_iter
项目:baiji    作者:bodylabs    | 项目源码 | 文件源码
def setup_progressbar(self):
        from progressbar import ProgressBar, FileTransferSpeed, Bar, Percentage, ETA
        return ProgressBar(widgets=[FileTransferSpeed(), ' <<<', Bar(), '>>> ', Percentage(), ' ', ETA()])
项目:mmd    作者:dougalsutherland    | 项目源码 | 文件源码
def __init__(self, widgets=None, **kwargs):
        import progressbar as pb

        logging.Handler.__init__(self)

        if widgets is None:
            class CommaProgress(pb.widgets.WidgetBase):
                def __call__(self, progress, data):
                    return '{value:,} of {max_value:,}'.format(**data)

            widgets = [' ', CommaProgress(), ' (', pb.Percentage(), ') ',
                       pb.Bar(), ' ', pb.ETA()]

        self.pbar_args = {'widgets': widgets}
        self.pbar_args.update(kwargs)
项目:django-geoware    作者:un33k    | 项目源码 | 文件源码
def _get_progress_widgets(self):
        """
        Returns the progress widgets for a file download.
        """
        format_custom_text = progressbar.FormatCustomText(
            'Fetching [ %(file)s ] :: ', dict(file=self.remote_file_name),
        )

        widgets = [
            format_custom_text,
            progressbar.ETA(),
            progressbar.Percentage(),
            progressbar.Bar(),
        ]
        return widgets
项目:Deep_Learning_In_Action    作者:SunnyMarkLiu    | 项目源码 | 文件源码
def transfer(self):
        image_reshape = np.ndarray(shape=(self.pre_images.shape[0], self.output_rows, self.output_cols, 3),
                                   dtype=np.float16)

        widgets = ['Transfer: ', pbar.Percentage(), ' ', pbar.Bar('>'), ' ', pbar.ETA()]
        image_bar = pbar.ProgressBar(widgets=widgets, maxval=self.pre_images.shape[0]).start()

        for i in range(0, self.pre_images.shape[0]):
            image = self.pre_images[i].reshape(self.pre_img_rows, self.pre_img_cols)
            image = image.astype('uint8')
            im = Image.fromarray(image)  # monochromatic image
            imrgb = im.convert('RGB')
            imrgb = imrgb.resize((self.output_rows, self.output_cols), Image.ANTIALIAS)

            im = np.array(imrgb, dtype=np.float16)
            im[:, :, 0] -= imagenet_mean['R']
            im[:, :, 1] -= imagenet_mean['G']
            im[:, :, 2] -= imagenet_mean['B']
            # 'RGB'->'BGR', historical reasons in OpenCV
            im = im[:, :, ::-1]
            image_reshape[i] = im

            # test for correct convert!
            # if i < 3:
            #     img = Image.fromarray(np.uint8(im))
            #     img.save(str(i) + '.jpeg', 'jpeg')
            image_bar.update(i + 1)
        image_bar.finish()
        print('image_reshape:', image_reshape.shape)

        return image_reshape
项目:object-detector    作者:penny4860    | 项目源码 | 文件源码
def _generate_negative_patches(self, negative_image_files, window_size, step, pyramid_scale, threshold_prob):

        widgets = ["Generating negative samples which represent high probability: ", 
        progressbar.Percentage(), " ", progressbar.Bar(), " ", progressbar.ETA()]
        pbar = progressbar.ProgressBar(maxval=len(negative_image_files), widgets=widgets).start()

        for i, image_file in enumerate(negative_image_files):
            image = cv2.imread(image_file)
            image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

            # detect objects in the image
            (boxes, probs) = self.run(image, 
                                      window_size, step, pyramid_scale, 
                                      threshold_prob, 
                                      do_nms=False, 
                                      show_result=False, 
                                      show_operation=False)

            pbar.update(i)

            for (y1, y2, x1, x2), prob in zip(boxes, probs):
                negative_patch = cv2.resize(image[y1:y2, x1:x2], (window_size[1], window_size[0]), interpolation=cv2.INTER_AREA)
                yield negative_patch, prob

        pbar.finish()

    # todo: code review
项目:zabbix-scripts    作者:globocom    | 项目源码 | 文件源码
def createSQL(table,values,name='insert'):
    '''
    Generate the SQL insert line, breaking each insert to up to ~1k values
    and up to ~1k insert's (~1M values total for each SQL file)
    '''
    logger.info('Generating SQL file')
    queryInsert='INSERT INTO %s (itemid,clock,num,value_min,value_avg,value_max) VALUES' % table
    i=0 # Controls the progress bar
    x=0 # Controls number of inserts in one line
    y=0 # Controls number of lines in one file
    z=0 # Controls number of file name
    valuesLen=values.__len__()
    sqlFile='%s.sql.%d' % (name,z)
    logger.debug('Total itens for %s: %d' % (name,valuesLen))

    if valuesLen > 0:
        bar=ProgressBar(maxval=valuesLen,widgets=[Percentage(), ReverseBar(), ETA(), RotatingMarker(), Timer()]).start()
        for value in values:
            i+=1
            x+=1
            if x != 1: # First line only
                sqlInsert='%s,%s' % (sqlInsert,value)
            else:
                sqlInsert=value
            if y >= 1000: # If there is more than 1k lines, write to new file
                z+=1
                y=0
            if x >= 1000 or i == valuesLen: # If there is more than 1k values or we finished our list, write to file
                sqlFile='%s.sql.%d' % (name,z)
                fileAppend(f=sqlFile,content='%s %s;\n' % (queryInsert,sqlInsert))
                x=0
                y+=1
                sqlInsert=''
            if args.loglevel.upper() != 'DEBUG': # Dont print progressbar if in debug mode
                bar.update(i)
        bar.finish()
    else:
        logger.warning('No values received')
项目:zabbix-scripts    作者:globocom    | 项目源码 | 文件源码
def discovery_disable_all(status=0):
  """
  Alterar status de todos os discoveries *auto*
  Status 0 = enable
  Status 1 = disable
  """ 
  logger.info('Disabling all network discoveries')
  druleids = zapi.drule.get(output=[ 'druleid', 'iprange', 'name', 'proxy_hostid', 'status' ],
      selectDChecks='extend', filter={ 'status': 0 })
  if ( druleids.__len__() == 0 ):
    logger.info('Done')
    return
  bar = ProgressBar(maxval=druleids.__len__(),widgets=[Percentage(), ReverseBar(), ETA(), RotatingMarker(), Timer()]).start()
  i = 0
  for x in druleids:
    params_disable = {
      'druleid': x['druleid'],
      'iprange': x['iprange'],
      'name': x['name'],
      'dchecks': x['dchecks'],
      'status': 1
    }
    out = zapi.drule.update(**params_disable)
    logger.echo = False
    if out:
      logger.debug('\tNew status: %s (%s) --> %d' % (x['name'],out['druleids'],status))
    else:
      logger.warning('\tFAILED to change status: %s (%s) --> %d' % (x['name'],out['druleids'],status))
    i += 1
    bar.update(i)
  logger.echo = True
  bar.finish()
  logger.info('Done')
  return
项目:ZabbixTuner    作者:janssenlima    | 项目源码 | 文件源码
def desabilitaItensNaoSuportados():
    query = {
            "output": "extend",
            "filter": {
                "state": 1
            },
            "monitored": True
        }

    filtro = raw_input('Qual a busca para key_? [NULL = ENTER] ')
    if filtro.__len__() > 0:
        query['search']={'key_': filtro}

    limite = raw_input('Qual o limite de itens? [NULL = ENTER] ')
    if limite.__len__() > 0:
        try:
            query['limit']=int(limite)
        except:
            print 'Limite invalido'
            raw_input("Pressione ENTER para voltar")
            main()

    opcao = raw_input("Confirma operação? [s/n]")
    if opcao == 's' or opcao == 'S':
        itens = zapi.item.get(query)
        print 'Encontramos {} itens'.format(itens.__len__())
        bar = ProgressBar(maxval=itens.__len__(),widgets=[Percentage(), ReverseBar(), ETA(), RotatingMarker(), Timer()]).start()
        i = 0
        for x in itens:
            result = zapi.item.update({"itemid": x['itemid'], "status": 1})
            i += 1
            bar.update(i)
        bar.finish()
        print "Itens desabilitados!!!"
        raw_input("Pressione ENTER para continuar")
    main()
项目:cohda    作者:ambimanus    | 项目源码 | 文件源码
def __init__(self, maxval):
        pbar.ProgressBar.__init__(self, widgets=[pbar.Percentage(), ' ',
                pbar.Bar(), ' ', pbar.ETA(), ' ', GeneratorSpeed()],
                maxval=maxval)

    # def update(self, value=None):
    #     if value is None:
    #         pbar.ProgressBar.update(self, self.currval + 1)
    #     else:
    #         pbar.ProgressBar.update(self, value)
项目:Stylus    作者:amaneureka    | 项目源码 | 文件源码
def find_samples_bounding_rect(path):

    min_w = 0
    min_h = 0

    print ('finding bounding box:')
    bar = progressbar.ProgressBar(maxval=num_classes*num_samples,
        widgets=[
        ' [', progressbar.Timer(), '] ',
        progressbar.Bar(),
        ' (', progressbar.ETA(), ') ',
    ])
    bar.start()
    counter = 0

    for i in range(1, num_classes + 1):
        for j in range(1, num_samples + 1):

            filename = '{0}/Sample{1:03d}/img{1:03d}-{2:03d}.png'.format(path, i, j)

            # opencv read -> Gray Image -> Bounding Rect
            im = cv2.imread(filename)
            imgray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
            imgray = cv2.bitwise_not(imgray)
            _, contours, _ = cv2.findContours(imgray, cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
            _, _, w, h = cv2.boundingRect(contours[len(contours) - 1])

            # find maximum resolution
            min_w = max(min_w, w)
            min_h = max(min_h, h)

            # update progress bar
            counter = counter + 1
            bar.update(counter)

    bar.finish()
    return min_w, min_h
项目:nway    作者:JohannesBuchner    | 项目源码 | 文件源码
def bar(ndigits=3, **kwargs):
    if progressbar.__version__ > '3':
        counterfmt = '%(value)'+str(ndigits)+'d'
    else:
        counterfmt = '%'+str(ndigits)+'d'

    pbar = IncrementingProgressBar(widgets=[
        progressbar.Percentage(), '|', progressbar.Counter(counterfmt),
        progressbar.Bar(), progressbar.ETA()], **kwargs)
    return pbar
项目:fang    作者:rgrosse    | 项目源码 | 文件源码
def pbar(maxval):
    widgets = [progressbar.Percentage(), ' ', progressbar.Bar(), progressbar.ETA()]
    return progressbar.ProgressBar(widgets=widgets, maxval=maxval).start()
项目:InstaLooter    作者:althonos    | 项目源码 | 文件源码
def _init_pbar(self, ini_val, max_val, label):
        self._pbar = progressbar.ProgressBar(
            min_value=0,
            max_value=max_val,
            initial_value=ini_val,
            widgets=[
                label,
                progressbar.Percentage(),
                '(', progressbar.SimpleProgress(), ')',
                progressbar.Bar(),
                progressbar.Timer(), ' ',
                '|', progressbar.ETA(),
            ]
        )
        self._pbar.start()
项目:IBRel    作者:lasigeBioTM    | 项目源码 | 文件源码
def load_corpus(self, corenlpserver, process=True):
        """Load the CHEMDNER corpus file on the dir element"""
        # open filename and parse lines
        total_lines = sum(1 for line in open(self.path))
        widgets = [pb.Percentage(), ' ', pb.Bar(), ' ', pb.ETA(), ' ', pb.Timer()]
        pbar = pb.ProgressBar(widgets=widgets, maxval=total_lines).start()
        n_lines = 1
        time_per_abs = []
        with io.open(self.path, 'r', encoding="utf-8") as inputfile:
            for line in inputfile:
                t = time.time()
                # each line is PMID  title   abs
                tsv = line.split('\t')
                doctext = tsv[1].strip().replace("<", "(").replace(">", ")").replace(". ", ", ") + ". "
                doctext += tsv[2].strip().replace("<", "(").replace(">", ")")
                newdoc = Document(doctext, process=False,
                                  did=tsv[0], title=tsv[1].strip() + ".")
                newdoc.sentence_tokenize("biomedical")
                if process:
                    newdoc.process_document(corenlpserver, "biomedical")
                self.documents[newdoc.did] = newdoc
                abs_time = time.time() - t
                time_per_abs.append(abs_time)
                pbar.update(n_lines)
                n_lines += 1
        pbar.finish()
        abs_avg = sum(time_per_abs)*1.0/len(time_per_abs)
        logging.info("average time per abstract: %ss" % abs_avg)
项目:IBRel    作者:lasigeBioTM    | 项目源码 | 文件源码
def load_corpus(self, corenlpserver, process=True):

        soup = BeautifulSoup(codecs.open(self.path, 'r', "utf-8"), 'html.parser')
        docs = soup.find_all("article")
        widgets = [pb.Percentage(), ' ', pb.Bar(), ' ', pb.ETA(), ' ', pb.Timer()]
        pbar = pb.ProgressBar(widgets=widgets, maxval=len(docs)).start()
        n_lines = 1
        time_per_abs = []
        for doc in docs:
            did = "GENIA" + doc.articleinfo.bibliomisc.text.split(":")[1]
            title = doc.title.sentence.get_text()
            sentences = doc.abstract.find_all("sentence")
            doc_sentences = []
            doc_text = title + " "
            doc_offset = 0
            for si, s in enumerate(sentences):
                t = time.time()
                stext = s.get_text()
                sid = did + ".s" + str(si)
                doc_text += stext + " "
                this_sentence = Sentence(stext, offset=doc_offset, sid=sid, did=did)
                doc_offset = len(doc_text)
                doc_sentences.append(this_sentence)
            newdoc = Document(doc_text, process=False, did=did)
            newdoc.sentences = doc_sentences[:]
            newdoc.process_document(corenlpserver, "biomedical")
            #logging.info(len(newdoc.sentences))
            self.documents[newdoc.did] = newdoc
            abs_time = time.time() - t
            time_per_abs.append(abs_time)
            logging.debug("%s sentences, %ss processing time" % (len(newdoc.sentences), abs_time))
            pbar.update(n_lines)
            n_lines += 1
        pbar.finish()
        abs_avg = sum(time_per_abs)*1.0/len(time_per_abs)
        logging.info("average time per abstract: %ss" % abs_avg)
项目:LetvCloud    作者:jiangchao0304    | 项目源码 | 文件源码
def doUploadFileProgress(self,filePath, url):
        startTime = getNow()
        result = False
        try:
            widgets = ['Progress: ', Percentage(), ' ', Bar(
                marker=RotatingMarker('>-=')), ' ', ETA(), ' ', FileTransferSpeed()]
            pbar = ProgressBar(widgets=widgets, maxval=os.path.getsize(filePath)).start()
            progress = Progress()
            fileSizeStr = formatSize(os.path.getsize(filePath))
            logger.info("??????{0} ?? {1}".format(filePath,fileSizeStr))
            stream = file_with_callback(filePath, 'rb', progress.update,os.path.basename(filePath),pbar)
            params = {"filedata": stream}
            datagen, headers = multipart_encode(params)
            upload_request =urllib2.Request(url, datagen, headers)
            response = urllib2.urlopen(upload_request).read()
            endTime = getNow()
            totlaTime = caltime(startTime, endTime)
            logger.info("??????{0} ????{1} ????{2} ????{3} ??{4} ????{5}"
                        .format(filePath,startTime, endTime, fileSizeStr, totlaTime,response))
            #???????????'4b ? 0'???json ??errmsg("Extra data", s, end, len(s) ??????,??????????
            if "code\":0" in response.replace(' ', ''):
                result = True
            else:
                result = json.loads(response)["code"] == 0
        except Exception as e:
            logger.error("??????{0} exception: {1}".format(filePath,e))
        return result
项目:angrop    作者:salls    | 项目源码 | 文件源码
def _addresses_to_check_with_caching(self, show_progress=True):
        num_addrs = len(list(self._addresses_to_check()))
        widgets = ['ROP: ', progressbar.Percentage(), ' ',
                   progressbar.Bar(marker=progressbar.RotatingMarker()),
                   ' ', progressbar.ETA(), ' ', progressbar.FileTransferSpeed()]
        progress = progressbar.ProgressBar(widgets=widgets, maxval=num_addrs)
        if show_progress:
            progress.start()
        self._cache = dict()
        seen = dict()
        for i, a in enumerate(self._addresses_to_check()):
            if show_progress:
                progress.update(i)
            try:
                bl = self.project.factory.block(a)
                if bl.size > self._max_block_size:
                    continue
                block_data = bl.bytes
            except (SimEngineError, SimMemoryError):
                continue
            if block_data in seen:
                self._cache[seen[block_data]].add(a)
                continue
            else:
                if len(bl.vex.constant_jump_targets) == 0 and not self._block_has_ip_relative(a, bl):
                    seen[block_data] = a
                    self._cache[a] = set()
                yield a
        if show_progress:
            progress.finish()
项目:sportsball    作者:jgershen    | 项目源码 | 文件源码
def update_salary_history(sport, min_date=None, max_date=None):
  min_date = min_date or datetime.datetime.today() - datetime.timedelta(days=1)
  max_date = max_date or datetime.datetime.today()

  if isinstance(min_date, basestring):
    min_date = parser.parse(min_date)
  if isinstance(max_date, basestring):
    max_date = parser.parse(max_date)

  date = min_date
  pbar = progressbar.ProgressBar(widgets=[progressbar.Percentage(), ' ', progressbar.Bar(), ' ', progressbar.ETA()],
                                 maxval=int((max_date-min_date).total_seconds() / (60*60*24)) + 1)
  pbar.start()
  saved = 0
  hit = 0
  while date <= max_date:
    time.sleep(1)
    day_salaries = load_positions_for_day(sport, date)
    if len(day_salaries) > 0:
      save_rg_salary_info(sport, date, day_salaries)
      saved += 1
    hit += 1
    date += datetime.timedelta(days=1)
    pbar.update(value=hit)
  pbar.finish()
  return saved
项目:sportsball    作者:jgershen    | 项目源码 | 文件源码
def load_overview_pages(players):
  """
  Hit the overview page and load gamelog_url_list for each of the players in the player dict.
  Maybe this should be in the webio submodule? I am leaving it here since it controls scraping program flow.
  :param players: player dict
  :return dict: player dict
  """
  # Helper function to guess which position a player plays from the overview table of stats.
  # Just grab the position from the most recent year in which it was defined, and return that.
  def quick_position_guess(overview_table):
    return overview_table.dropna(subset=['Pos'])['Pos'].iloc[-1]
  pbar = progressbar.ProgressBar(widgets=[progressbar.Percentage(), ' ', progressbar.Bar(), ' ', progressbar.ETA()])
  print 'Accessing and parsing overview pages...'
  for i, (bref_id, player_dict) in pbar(list(enumerate(players.items()))):
    overview_soup = getSoupFromURL(players[bref_id]['overview_url'])
    players[bref_id]['overview_url_content'] = overview_soup.text
    # the links to each year's game logs are in <li> tags, and the text contains 'Game Logs'
    # so we can use those to pull out our urls.
    for li in overview_soup.find_all('li'):
      if 'Game Logs' in li.getText():
        game_log_links = li.findAll('a')
        for game_log_link in game_log_links:
          players[bref_id]['gamelog_url_list'].append('http://www.basketball-reference.com' + game_log_link.get('href'))
    player_name = overview_soup.find('h1').text
    players[bref_id]['info']['name'] = player_name
    # Read (guess?) player's position
    overview_table = dfFromOverviewPage(overview_soup)
    if len(overview_table.dropna(subset=['Pos'])) > 0:
      players[bref_id]['info']['pos'] = quick_position_guess(overview_table)
    else:
      players[bref_id]['info']['pos'] = '?'  # this will only happen for chumps but by defining a value we should block exceptions
  return players
项目:sportsball    作者:jgershen    | 项目源码 | 文件源码
def update_players(year=None):
  year_to_update = year
  if not year_to_update:
    year_to_update = datetime.date.today().year
    if datetime.date.today().month > 8: # it's really the 201X-201(x+1) season, we should use x+1 as year
      year_to_update += 1
  logging.info("update_players: Loading all stats for new players and re-examining stats from %d" % year_to_update)
  scrape_overview_for_new_players()
  players = load_overview_dict()
  players = load_dataframes(players)
  # Identify players we know of, but haven't loaded full stats for.
  # This will include any players we just found with scrape_overview_for_new_players.
  players_to_load = [p for p in players if 'gamelog_data' not in players[p]]
  if players_to_load:
    logging.info("update_players: loading first-time stats for %d players", len(players_to_load))
    pbar = progressbar.ProgressBar(widgets=[progressbar.Percentage(), ' ', progressbar.Bar(), ' ', progressbar.ETA()])
    for player in pbar(players_to_load):
      logging.info('update_players: loading first-time stats for %s', player)
      players = load_player(players, player)
  pbar = progressbar.ProgressBar(widgets=[progressbar.Percentage(), ' ', progressbar.Bar(), ' ', progressbar.ETA()])
  for player in pbar(players.keys()):
    # found name, load player data
    logging.info('update_players: updating player data for %s...', player)
    players = update_player(players, player, year=year_to_update)
  save_dataframes(players)
  update_mapping_df(players)
项目:sportsball    作者:jgershen    | 项目源码 | 文件源码
def get_active_players(letters=list('abcdefghijklmnopqrstuvwxyz')):
  players = []
  print 'Loading currently active players from basketball-reference.com...'
  pbar = progressbar.ProgressBar(widgets=[progressbar.Percentage(), ' ', progressbar.Bar(), ' ', progressbar.ETA()])
  for letter in pbar(letters):
    letter_page = getSoupFromURL('http://www.basketball-reference.com/players/%s/' % (letter))
    # we know that all the currently active players have <strong> tags, so we'll limit our names to those
    current_names = letter_page.findAll('strong')
    for n in current_names:
      name_data = n.children.next()
      full_url = 'http://www.basketball-reference.com' + name_data.attrs['href']
      bref_id = bbr_id_regex.match(full_url).group('pid')
      players.append((bref_id, full_url))
  players = dict(players)
  return players
项目:sportsball    作者:jgershen    | 项目源码 | 文件源码
def get_active_players():
  letters = list('abcdefghijklmnopqrstuvwxyz')
  player_and_url_list = []
  print 'Checking currently active players on baseball-reference.com...'
  pbar = progressbar.ProgressBar(widgets=[progressbar.Percentage(), ' ', progressbar.Bar(), ' ', progressbar.ETA()])
  for letter in pbar(letters):
    letter_page = getSoupFromURL('http://www.baseball-reference.com/players/%s/' % (letter))
    # we don't just need active players (<b> tags), we need anyone who played in 2015!
    prefix_sections = letter_page.findAll('pre')
    for section in prefix_sections:
      player_and_url_list += list(_parse_bsbr_prefix_section(section))
  bref_id_dict = dict(player_and_url_list)
  return bref_id_dict
项目:sportsball    作者:jgershen    | 项目源码 | 文件源码
def update_numberfire_history():
  # Start by updating our slug dict and overall numberfire player information
  overall_stats = scrape_numberfire_overview_page()
  save_nf_overview_data(sport, overall_stats)

  # We only load & update numberfire slug information for players appearing in the most recent batch of overview data
  # and only if we are also able to match this player to a BREF ID. A side effect of this is that we will make no
  # predictions for any NBA players who haven't played yet this year.
  pids_to_load = []
  for ix, row in overall_stats.iterrows():
    pid, confidence = name2nbaid(row['name_player'], player_team=row['name_team'], get_confidence=True)
    if confidence > 75:
      pids_to_load.append((pid, row['slug_player']))
  old_predictions = load_nf_histplayerinfo(sport, identifiers_to_load=pids_to_load)
  scraped_salaries = {}

  new_dataframes, updated_dataframes = 0, 0
  print "Scraping updated player predictions from Numberfire..."
  pbar = progressbar.ProgressBar(widgets=[progressbar.Percentage(), ' ', progressbar.Bar(), ' ', progressbar.ETA()])
  for pid, slug in pbar(pids_to_load):
    time.sleep(1)
    player_df, salary_df = load_stats_tables_from_history_page(nf_player_url.format(slug=slug))
    old_player_df = old_predictions.get(pid)
    if old_player_df is None:
      old_predictions[pid] = player_df
      new_dataframes += 1
    else:
      try:
        new_data = old_player_df.combine_first(player_df)
        old_predictions[pid] = new_data
      except ValueError as ex:
        ipdb.set_trace()
      updated_dataframes += 1
    scraped_salaries[pid] = salary_df
  logging.info('Saving scraped predictions (%d updated, %d added)', updated_dataframes, new_dataframes)
  save_nf_histplayerinfo(sport, old_predictions)
  save_nf_salary_info(sport, scraped_salaries)
项目:git-big    作者:vertexai    | 项目源码 | 文件源码
def make_progress_bar(name, size):
    widgets = [
        '%s: ' % name,
        progressbar.Percentage(),
        ' ',
        progressbar.Bar(),
        ' ',
        progressbar.ETA(),
        ' ',
        progressbar.DataSize(),
    ]
    return progressbar.ProgressBar(widgets=widgets, max_value=size)