Python timeit 模块,default_timer() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用timeit.default_timer()

项目:run_lambda    作者:ethantkoenig    | 项目源码 | 文件源码
def build(self):
            end_time = timeit.default_timer()
            end_mem = memory_profiler.memory_usage()[0]

            sys.stdout = self._previous_stdout

            self._log.write("END RequestId: {r}\n".format(
                r=self._context.aws_request_id))

            duration_in_millis = int(math.ceil(1000 * (end_time - self._start_time)))
            # The memory overhead of setting up the AWS Lambda environment
            # (when actually run in AWS) is roughly 14 MB
            max_memory_used_in_mb = (end_mem - self._start_mem) / 1048576 + 14

            self._log.write(
                "REPORT RequestId: {r}\tDuration: {d} ms\t"
                "Max Memory Used: {m} MB\n"
                .format(r=self._context.aws_request_id,
                        d=duration_in_millis,
                        m=max_memory_used_in_mb))

            log = self._log.getvalue()
            return LambdaCallSummary(duration_in_millis, max_memory_used_in_mb, log)
项目:Harmonbot    作者:Harmon758    | 项目源码 | 文件源码
def reaction_time(self, ctx):
        '''Reaction time game'''
        response, embed = await self.bot.say("Please choose 10 reactions")
        while len(response.reactions) < 10:
            await self.bot.wait_for_reaction(message = response)
            response = await self.bot.get_message(ctx.message.channel, response.id)
        reactions = response.reactions
        reaction = random.choice(reactions)
        await self.bot.edit_message(response, "Please wait..")
        for _reaction in reactions:
            try:
                await self.bot.add_reaction(response, _reaction.emoji)
            except discord.errors.HTTPException:
                await self.bot.edit_message(response, ":no_entry: Error: Please don't deselect your reactions before I've selected them")
                return
        for countdown in range(10, 0, -1):
            await self.bot.edit_message(response, "First to select the reaction _ wins.\nMake sure to have all the reactions deselected.\nGet ready! {}".format(countdown))
            await asyncio.sleep(1)
        await self.bot.edit_message(response, "First to select the reaction {} wins. Go!".format(reaction.emoji))
        start_time = timeit.default_timer()
        winner = await self.bot.wait_for_reaction(message = response, emoji = reaction.emoji)
        elapsed = timeit.default_timer() - start_time
        await self.bot.edit_message(response, "{} was the first to select {} and won with a time of {:.5} seconds!".format(winner.user.display_name, reaction.emoji, elapsed))
项目:aesop    作者:BioMoDeL    | 项目源码 | 文件源码
def run(self):
        """Summary
        Perform a compuational alanine scan on the initialized Alascan class.

        Returns
        -------
        None
            Outputs text to STDOUT when run is complete, will be made optional
            in the future.
        """
        start = ti.default_timer()
        self.logs = []
        self.genTruncatedPQR()
        self.calcAPBS()
        self.calcCoulomb()
        self.status = 1
        stop = ti.default_timer()
        print '%s:\tAESOP alanine scan completed in %.2f seconds' % (
            self.jobname, stop - start)
        warn = self.checkwarnings()
        err = self.checkerrors()
        if warn != 0:
            print 'WARNINGS detected, please view log files!'
        if err != 0:
            print 'ERRORS detected, please view log files!'
项目:aesop    作者:BioMoDeL    | 项目源码 | 文件源码
def run(self):
        """Summary
        Perform a directed mutagenesis scan on the initialized class.

        Returns
        -------
        None
            Outputs text to STDOUT when run is complete, will be made optional
            in the future.
        """
        start = ti.default_timer()
        self.logs = []
        self.genPDB()
        self.genPQR()
        self.calcAPBS()
        self.calcCoulomb()
        stop = ti.default_timer()
        print '%s:\tAESOP directed mutagenesis scan completed' \
            ' in %.2f seconds' % (self.jobname, stop - start)
        warn = self.checkwarnings()
        err = self.checkerrors()
        if warn != 0:
            print 'WARNINGS detected, please view log files!'
        if err != 0:
            print 'ERRORS detected, please view log files!'
项目:SmartSocks    作者:waylybaye    | 项目源码 | 文件源码
def run(self):
        request = self.request
        try:
            if ((timeit.default_timer() - self.starttime) <= self.timeout and
                    not SHUTDOWN_EVENT.isSet()):
                try:
                    f = urlopen(request)
                except TypeError:
                    # PY24 expects a string or buffer
                    # This also causes issues with Ctrl-C, but we will concede
                    # for the moment that Ctrl-C on PY24 isn't immediate
                    request = build_request(self.request.get_full_url(),
                                            data=request.data.read(self.size))
                    f = urlopen(request)
                f.read(11)
                f.close()
                self.result = sum(self.request.data.total)
            else:
                self.result = 0
        except (IOError, SpeedtestUploadTimeout):
            self.result = sum(self.request.data.total)
项目:KATE    作者:hugochan    | 项目源码 | 文件源码
def train(args):
    vocab = load_json(args.vocab)
    # import pdb;pdb.set_trace()
    # load corpus
    corpus = CorpusIter20News(args.corpus[0], recursive=True, stem=True, with_docname=False)
    # corpus = CorpusIterMRD(args.corpus[0], load_json(args.docnames), stem=True, with_docname=False)
    # corpus = CorpusIterWiki10plus(args.corpus[0], load_json(args.docnames), stem=True, with_docname=False)
    # corpus = CorpusIterReuters(args.corpus, load_json(args.docnames), with_docname=False)
    # print len([1 for x in corpus])
    corpus_iter = lambda: ([word for word in sentence if word in vocab] for sentence in corpus)
    w2v = Word2Vec(args.n_dim, window=args.window_size, \
        negative=args.negative, epoches=args.n_epoch)

    start = timeit.default_timer()
    w2v.train(corpus_iter)
    print 'runtime: %ss' % (timeit.default_timer() - start)

    save_w2v(w2v.model, args.save_model)
    import pdb;pdb.set_trace()
项目:Wall-EEG    作者:neurotechuoft    | 项目源码 | 文件源码
def run(self):
        while True:
            # check FPS + listen for new connections
            new_tick = timeit.default_timer()
            elapsed_time = new_tick - self.tick
            current_samples_in = nb_samples_in
            current_samples_out = nb_samples_out
            print "--- at t: ", (new_tick - self.start_tick), " ---"
            print "elapsed_time: ", elapsed_time
            print "nb_samples_in: ", current_samples_in - self.nb_samples_in
            print "nb_samples_out: ", current_samples_out - self.nb_samples_out
            self.tick = new_tick
            self.nb_samples_in = nb_samples_in
            self.nb_samples_out = nb_samples_out
            # time to watch for connection
            # FIXME: not so great with threads
            server.check_connections()
            time.sleep(1)
项目:Wall-EEG    作者:neurotechuoft    | 项目源码 | 文件源码
def __call__(self, sample):
        t = timeit.default_timer() - self.start_time

        # print timeSinceStart|Sample Id
        if self.verbose:
            print("CSV: %f | %d" % (t, sample.id))

        row = ''
        row += str(t)
        row += self.delim
        row += str(sample.id)
        row += self.delim
        for i in sample.channel_data:
            row += str(i)
            row += self.delim
        for i in sample.aux_data:
            row += str(i)
            row += self.delim
        # remove last comma
        row += '\n'
        with open(self.file_name, 'a') as f:
            f.write(row)
项目:Eskapade    作者:KaveIO    | 项目源码 | 文件源码
def stop_timer(self, start_time=None):
        """Stop the run timer

        Stop the timer.  The timer is used to compute the run time.  The
        elapsed time since the timer start is returned.

        :param float start_time: function start_time input
        :returns: time difference with start in seconds
        :rtype: float
        """

        self._stop_time = timeit.default_timer()

        diff_time = self._stop_time - (start_time if start_time is not None else self._start_time)
        self._total_time += diff_time

        return diff_time
项目:borgbench    作者:dragetd    | 项目源码 | 文件源码
def runConfig(comp, cmin, cmax, cavg, data):
    # make sure there is no leftover repository. This will throw a warning on the shell if there is no folder, but it can be ignored
    subprocess.call(["rm", "-r", "/tmp/borgbench/"+comp])
    # run borg
    subprocess.call(["borg", "init", "-e", "none", "/tmp/borgbench/"+comp])
    start = timer()
    proc=subprocess.Popen(["borg", "create", "/tmp/borgbench/"+comp+"::test", "-v", "-s", "-C", comp, data], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    output = proc.stderr.read()
    duration = timer() - start
    # parse output
    m = re.match(".*This archive: +(\d+\.?\d+ ..) +(\d+\.?\d+ ..) +(\d+\.?\d+ ..).*Chunk index: +(\d+) +(\d+)", str(output))
    if m:
        print(comp+";"+str(cmin)+";"+str(cmax)+";"+str(cavg)+";"+m.group(1)+";"+m.group(2)+";"+m.group(3)+";"+m.group(4)+";"+m.group(5)+";"+str(duration))
    else:
        print("Error")
    # and clean up
    subprocess.call(["rm", "-r", "/tmp/borgbench/"+comp])


# Benchmark calls
# For speed reasons, this should be a tmpfs
项目:Smelly-London    作者:Smelly-London    | 项目源码 | 文件源码
def main():

    start = timer()
    files = get_file_names()
    smell_results = []

    bar = progressbar.ProgressBar(max_value=len(files))
    processed_files = 0
    with concurrent.futures.ProcessPoolExecutor() as executor:
        for file, smell in zip(files, executor.map(worker, files)):
            smell_results = smell_results + smell
            processed_files += 1
            bar.update(processed_files)
    smell_results = [x for x in smell_results if x]

    end = timer()
    print(end - start)
    dataminer = SmellDataMine()
    dataminer.save_to_database(smell_results)
项目:QXSConsolas    作者:qxsch    | 项目源码 | 文件源码
def remove(self, app):
        self._affectedServers = {}
        self.app = app
        if not "--env:" in self.app.options:
            self.app.options["--env:"] = "ALL"
        else:
            self.app.options["--env:"] = self.app.options["--env:"].upper()

        if "ALL"  in self.app.options['--app:']:
            raise AppNotFoundException("Cannot create an app called ALL, because it is a reserved word")

        t = timeit.default_timer()

        ssh = SSH()
        for appname in self.app.options['--app:']:
            self._removeApp(ssh, appname)

        t = timeit.default_timer() - t
        if t < self.WarnDeploymentTime:
            self.app.logger.info("Removal took: {:.4f} seconds".format(t))
        else:
            self.app.logger.warning("Removal took: {:.4f} seconds".format(t))

        self._deleteInventoryEntries()
项目:QXSConsolas    作者:qxsch    | 项目源码 | 文件源码
def backup(self, app):
        self._affectedServers = {}
        self.app = app
        self._checkNodeConfiguration()
        ssh = SSH()

        t = timeit.default_timer()

        envs = self.app.configuration.get("SplunkNodes.envs")
        role = self._roles[envs[self.app.options["--env:"]][self.app.options["--role:"]]["role"]]
        role.setRoleInfo(self.app.logger, self.app.options["--env:"], envs[self.app.options["--env:"]], self.app.options["--role:"], envs[self.app.options["--env:"]][self.app.options["--role:"]])
        self.app.logger.info("Taking a backup for the selected apps (" + ", ".join(self.app.options["--app:"]) + ") from environment \"" + self.app.options["--env:"] + "\" and role \"" + self.app.options["--role:"] + "\" to local path \"" + self.app.options["--path:"] + "\"")
        role.backup(list(self.app.options["--app:"]), ssh, self.app.options["--path:"])

        t = timeit.default_timer() - t
        if t < self.WarnDeploymentTime:
            self.app.logger.info("Backup took: {:.4f} seconds".format(t))
        else:
            self.app.logger.warning("Backup took: {:.4f} seconds".format(t))
项目:QXSConsolas    作者:qxsch    | 项目源码 | 文件源码
def restore(self, app):
        self._affectedServers = {}
        self.app = app
        self._checkNodeConfiguration()
        ssh = SSH()

        t = timeit.default_timer()

        envs = self.app.configuration.get("SplunkNodes.envs")
        for appName in self.app.options["--app:"]:
            assert os.path.exists(os.path.join(self.app.options["--path:"], appName)), "The app \"" + appName + "\" does not exist under: " + self.app.options["--path:"]

        role = self._roles[envs[self.app.options["--env:"]][self.app.options["--role:"]]["role"]]
        role.setRoleInfo(self.app.logger, self.app.options["--env:"], envs[self.app.options["--env:"]], self.app.options["--role:"], envs[self.app.options["--env:"]][self.app.options["--role:"]])
        self.app.logger.info("Restoring a backup for the selected apps (" + ", ".join(self.app.options["--app:"]) + ") from local path \"" + self.app.options["--path:"] + "\" to environment \"" + self.app.options["--env:"] + "\" and role \"" + self.app.options["--role:"] + "\"")
        role.restore(list(self.app.options["--app:"]), ssh, self.app.options["--path:"])

        t = timeit.default_timer() - t
        if t < self.WarnDeploymentTime:
            self.app.logger.info("Restore took: {:.4f} seconds".format(t))
        else:
            self.app.logger.warning("Restore took: {:.4f} seconds".format(t))
项目:tiny-png    作者:Waterstrong    | 项目源码 | 文件源码
def compress_images(target_images):
    current = 0
    total_number = len(target_images)
    total_time = 0
    for image_file in target_images:
        current += 1
        write_log('Start compressing image: {}'.format(realpath(image_file)))
        if os.path.exists(image_file):
            time_start = timeit.default_timer()
            tinify_image(image_file)
            time_diff = round(timeit.default_timer() - time_start, 2)
            total_time += time_diff
            write_log('Compression done takes {} seconds! ({}/{})\n'.format(time_diff, current, total_number))
        else:
            write_log('Ignored: target image does not exist! ({}/{})\n'.format(current, total_number))
    if total_time > 0:
        write_log('Totally takes {} seconds to complete!'.format(total_time))
项目:Learning-Concurrency-in-Python    作者:PacktPublishing    | 项目源码 | 文件源码
def main():

  t1 = timeit.default_timer()
  with ProcessPoolExecutor(max_workers=4) as executor:
        for number, prime in zip(PRIMES, executor.map(is_prime, PRIMES)):
            print('%d is prime: %s' % (number, prime))

  print("{} Seconds Needed for ProcessPoolExecutor".format(timeit.default_timer() - t1))

  t2 = timeit.default_timer()
  with ThreadPoolExecutor(max_workers=4) as executor:
        for number, prime in zip(PRIMES, executor.map(is_prime, PRIMES)):
            print('%d is prime: %s' % (number, prime))
  print("{} Seconds Needed for ThreadPoolExecutor".format(timeit.default_timer() - t2))

  t3 = timeit.default_timer()
  for number in PRIMES:
    isPrime = is_prime(number)
    print("{} is prime: {}".format(number, isPrime))
  print("{} Seconds needed for single threaded execution".format(timeit.default_timer()-t3))
项目:scikit-kge    作者:mnick    | 项目源码 | 文件源码
def _optim(self, xys):
        idx = np.arange(len(xys))
        self.batch_size = np.ceil(len(xys) / self.nbatches)
        batch_idx = np.arange(self.batch_size, len(xys), self.batch_size)

        for self.epoch in range(1, self.max_epochs + 1):
            # shuffle training examples
            self._pre_epoch()
            shuffle(idx)

            # store epoch for callback
            self.epoch_start = timeit.default_timer()

            # process mini-batches
            for batch in np.split(idx, batch_idx):
                # select indices for current batch
                bxys = [xys[z] for z in batch]
                self._process_batch(bxys)

            # check callback function, if false return
            for f in self.post_epoch:
                if not f(self):
                    break
项目:tomato    作者:sertansenturk    | 项目源码 | 文件源码
def filter_pitch(self, pitch, aligned_notes):
        tic = timeit.default_timer()
        self.vprint(u"- Filtering predominant melody of {0:s} after "
                    u"audio-score alignment.".format(pitch['source']))
        aligned_notes_ = [IO.dict_keys_to_camel_case(n)
                          for n in deepcopy(aligned_notes)]

        pitch_temp, notes_filtered, synth_pitch = \
            self._aligned_pitch_filter.filter(pitch['pitch'], aligned_notes_)

        notes_filtered = [IO.dict_keys_to_snake_case(n)
                          for n in notes_filtered]

        pitch_filtered = deepcopy(pitch)
        pitch_filtered['pitch'] = pitch_temp
        pitch_filtered['citation'] = 'SenturkThesis'
        pitch_filtered['procedure'] = 'Pitch filtering according to ' \
                                      'audio-score alignment'

        # print elapsed time, if verbose
        self.vprint_time(tic, timeit.default_timer())

        return pitch_filtered, notes_filtered
项目:tomato    作者:sertansenturk    | 项目源码 | 文件源码
def compute_note_models(self, pitch, aligned_notes, tonic_symbol):
        tic = timeit.default_timer()
        self.vprint(u"- Computing the note models for {0:s}".
                    format(pitch['source']))

        aligned_notes_ = [IO.dict_keys_to_camel_case(n)
                          for n in deepcopy(aligned_notes)]

        note_models, pitch_distribution, tonic = self._aligned_note_model.\
            get_models(pitch['pitch'], aligned_notes_, tonic_symbol)

        for note in note_models.keys():
            note_models[note] = IO.dict_keys_to_snake_case(
                note_models[note])

        tonic = IO.dict_keys_to_snake_case(tonic['alignment'])
        tonic['source'] = pitch['source']

        # print elapsed time, if verbose
        self.vprint_time(tic, timeit.default_timer())
        return note_models, pitch_distribution, tonic
项目:tomato    作者:sertansenturk    | 项目源码 | 文件源码
def compute_melodic_progression(self, pitch):
        tic = timeit.default_timer()
        self.vprint(u"- Computing the melodic progression model of {0:s}"
                    .format(pitch['source']))

        if self._mel_prog_params['frame_dur'] is None:
            # compute number of frames from some simple "rule of thumb"
            duration = pitch['pitch'][-1][0]
            frame_dur = duration / self._mel_prog_params['min_num_frames']
            frame_dur = int(5 * round(float(frame_dur) / 5))  # round to 5sec

            # force to be between 5 and max_frame_dur
            if frame_dur < 5:
                frame_dur = 5
            elif frame_dur > self._mel_prog_params['max_frame_dur']:
                frame_dur = self._mel_prog_params['max_frame_dur']
        else:
            frame_dur = self._mel_prog_params['frame_dur']

        melodic_progression = self._melodic_progression_analyzer.analyze(
            pitch['pitch'], frame_dur=frame_dur,
            hop_ratio=self._mel_prog_params['hop_ratio'])
        self.vprint_time(tic, timeit.default_timer())

        return melodic_progression
项目:pymapd    作者:mapd    | 项目源码 | 文件源码
def benchmark(func):
    @wraps(func)
    def wrapper(*args, **kwargs):
        warmup = kwargs.pop('warmup', False)

        if warmup:
            func(*args, **kwargs)

        kind = args[0]
        t0 = timer()
        try:
            result = func(*args, **kwargs)
        except Exception:
            logger.warning("finished,%s,%s,%s", func.__name__, kind,
                           float('nan'))
        else:
            t1 = timer()
            logger.info("finished,%s,%s,%s", func.__name__, kind, t1 - t0)
            return result

    _benchmarks.append(wrapper)
    return wrapper
项目:nimo    作者:wolfram2012    | 项目源码 | 文件源码
def detect(self, det_iter, show_timer=False):

        num_images = det_iter._size
        # if not isinstance(det_iter, mx.io.PrefetchingIter):
        #     det_iter = mx.io.PrefetchingIter(det_iter)
        start = timer()
        detections = self.mod.predict(det_iter).asnumpy()
        time_elapsed = timer() - start
        if show_timer:
            print("Detection time for {} images: {:.4f} sec".format(
                num_images, time_elapsed))
        result = []
        for i in range(detections.shape[0]):
            det = detections[i, :, :]
            res = det[np.where(det[:, 0] >= 0)[0]]
            result.append(res)
        return result
项目:dask-ml    作者:dask    | 项目源码 | 文件源码
def fit(data, use_scikit_learn=False):
    logger.info("Starting to cluster")
    # Cluster
    n_clusters = 8
    oversampling_factor = 2
    if use_scikit_learn:
        km = sk.KMeans(n_clusters=n_clusters, random_state=0)
    else:
        km = KMeans(n_clusters=n_clusters,
                    oversampling_factor=oversampling_factor,
                    random_state=0)
    t0 = tic()
    logger.info("Starting n_clusters=%2d, oversampling_factor=%2d",
                n_clusters, oversampling_factor)
    km.fit(data)
    t1 = tic()
    logger.info("Finished in %.2f", t1 - t0)
项目:krafters    作者:GianlucaBortoli    | 项目源码 | 文件源码
def rethinkdb_append_entry(connection):

    global ITERATION
    value = {"id": DEFAULT_VALUE, "value": ITERATION}


    try:
        t = timeit.default_timer()
        r.table(RETHINKDB_TABLE_NAME).insert(value, conflict='replace').run(connection, durability="hard", read_mode='majority')
        v = r.table(RETHINKDB_DB_NAME, read_mode='majority').run(connection, durability="hard", read_mode='majority')
        ITERATION += 1
        ITERATION %= 100
        logging.info('key added')
    except:
        logging.error('{} not added'.format(value))
    finally:
        return timeit.default_timer() - t
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def __call__(self, transformer, callback_data, phase, data, idx):
        if phase == CallbackPhase.train_pre_:
            self.total_iterations = callback_data['config'].attrs['total_iterations']
            num_intervals = self.total_iterations // self.frequency
            for loss_name in self.interval_loss_comp.output_keys:
                callback_data.create_dataset("cost/{}".format(loss_name), (num_intervals,))
            callback_data.create_dataset("time/loss", (num_intervals,))
        elif phase == CallbackPhase.train_post:
            losses = loop_eval(self.dataset, self.interval_loss_comp)
            tqdm.write("Training complete.  Avg losses: {}".format(losses))
        elif phase == CallbackPhase.minibatch_post and ((idx + 1) % self.frequency == 0):
            start_loss = default_timer()
            interval_idx = idx // self.frequency

            losses = loop_eval(self.dataset, self.interval_loss_comp)

            for loss_name, loss in losses.items():
                callback_data["cost/{}".format(loss_name)][interval_idx] = loss

            callback_data["time/loss"][interval_idx] = (default_timer() - start_loss)
            tqdm.write("Interval {} Iteration {} complete.  Avg losses: {}".format(
                interval_idx + 1, idx + 1, losses))
项目:pyrpl    作者:lneuhaus    | 项目源码 | 文件源码
def _new_point_arrived(self, point):
        if self._paused:
            return
        self._update_benchmark()
        try:
            point = point.result()
        except CancelledError:
            self._point_cancelled()
            return #  exit the loop (could be restarted latter for RunFuture)
        self._add_point(point)

        # if zero span mode, data_x is time measured, not frequency
        if self._module.is_zero_span():
            if self.current_avg==1:
                time_now = timeit.default_timer() - self._time_first_point
                self.data_x[self.current_point] = time_now
                self._module._data_x[self.current_point] = time_now

        self.current_point+=1
        if self.current_point==self.n_points:
            self._scan_finished()
        else:
            self._setup_next_point()
项目:bifrost    作者:ledatelescope    | 项目源码 | 文件源码
def run_benchmark(self):
        with bf.Pipeline() as pipeline:
            datafile = "numpy_data0.bin"

            bc = bf.BlockChainer()
            bc.blocks.binary_read(
                    [datafile], gulp_size=GULP_SIZE, gulp_nframe=GULP_FRAME, dtype='cf32')
            bc.blocks.copy('cuda', gulp_nframe=GULP_FRAME)
            for _ in range(NUMBER_FFT):
                bc.blocks.fft(['gulped'], axis_labels=['ft_gulped'], gulp_nframe=GULP_FRAME_FFT)
                bc.blocks.fft(['ft_gulped'], axis_labels=['gulped'], inverse=True, gulp_nframe=GULP_FRAME_FFT)

            start = timer()
            pipeline.run()
            end = timer()
            self.total_clock_time = end-start
项目:bifrost    作者:ledatelescope    | 项目源码 | 文件源码
def timeit(self, method):
        """ Decorator for timing execution of a method 

        Returns:

            function: the original function, wrapped
                       with a time accumulator
        """
        def timed(*args, **kw):
            ts = timer()
            result = method(*args, **kw)
            te = timer()

            self.relevant_clock_time += te-ts
            return result
        return timed
项目:k8scntkSamples    作者:weehyong    | 项目源码 | 文件源码
def init():
    """ Initialise ResNet 152 model
    """
    global trainedModel, labelLookup, mem_after_init

    start = t.default_timer()

    # Load the model and labels from disk
    with open('synset.txt', 'r') as f:
        labelLookup = [l.rstrip() for l in f]

    # Load model and load the model from brainscript (3rd index)
    trainedModel = load_model('ResNet_152.model')
    trainedModel = combine([trainedModel.outputs[3].owner])
    end = t.default_timer()

    loadTimeMsg = "Model loading time: {0} ms".format(round((end-start)*1000, 2))
    logger.info(loadTimeMsg)
项目:yadll    作者:pchavanne    | 项目源码 | 文件源码
def timer(what_to_show="Function execution"):
    """
    decorator that send the execution time of the argument function to the logger

    Parameters
    ----------
    what_to_show : `string`, optional
        message displayed after execution

    """
    def func_wrapper(func):
        @wraps(func)
        def wrapper(*args, **kwargs):
            start_time = timeit.default_timer()
            res = func(*args, **kwargs)
            end_time = timeit.default_timer()
            s = end_time - start_time
            try:
                msg = what_to_show + ' ' + args[0].name
            except (AttributeError, IndexError, TypeError):
                msg = what_to_show
            logger.info('%s took %s' % (msg, format_sec(s)))
            return res
        return wrapper
    return func_wrapper
项目:vinci    作者:Phylliade    | 项目源码 | 文件源码
def on_episode_end(self, episode, logs):
        duration = timeit.default_timer() - self.starts[episode]

        metrics = self.metrics[episode]
        if np.isnan(metrics).all():
            mean_metrics = np.array([np.nan for _ in self.metrics_names])
        else:
            mean_metrics = np.nanmean(metrics, axis=0)
        assert len(mean_metrics) == len(self.metrics_names)

        data = list(zip(self.metrics_names, mean_metrics))
        data += list(logs.items())
        data += [('episode', episode), ('duration', duration)]
        for key, value in data:
            if key not in self.data:
                self.data[key] = []
            self.data[key].append(value)

        if self.interval is not None and episode % self.interval == 0:
            self.save_data()

        # Clean up.
        del self.metrics[episode]
        del self.starts[episode]
项目:sporco    作者:bwohlberg    | 项目源码 | 文件源码
def elapsed(self, total=True):
        """Return the elapsed time for the timer.

        Parameters
        ----------
        total : bool, optional (default True)
          If ``True`` return the total elapsed time since the first
          call of :meth:`start` for the selected timer, otherwise
          return the elapsed time since the most recent call of
          :meth:`start` for which there has not been a corresponding
          call to :meth:`stop`.

        Returns
        -------
        dlt : float
          Elapsed time
        """

        return self.timer.elapsed(self.label, total=total)
项目:py-vanitygen    作者:1200wd    | 项目源码 | 文件源码
def address_search(pipeout, search_for='12o'):
    privkey = random.randrange(2**256)
    address = ''
    count = 0
    start = timeit.default_timer()

    os.write(pipeout, "Searching for %s (pid %s)" % (search_for, os.getpid()))

    while not search_for in address:
        privkey += 1
        pubkey_point = fast_multiply(G, privkey)
        address = pubkey_to_address(pubkey_point)
        count += 1
        if not count % 1000:
            os.write(pipeout, "Searched %d in %d seconds (pid %d)" % (count, timeit.default_timer()-start, os.getpid()))

    os.write(pipeout, "Found address %s" % address)
    os.write(pipeout, "Private key HEX %s" % encode_privkey(privkey,'hex'))
项目:zippy    作者:securesystemslab    | 项目源码 | 文件源码
def test_zetazero():
    cases = [\
    (399999999, 156762524.6750591511),
    (241389216, 97490234.2276711795),
    (526196239, 202950727.691229534),
    (542964976, 209039046.578535272),
    (1048449112, 388858885.231056486),
    (1048449113, 388858885.384337406),
    (1048449114, 388858886.002285122),
    (1048449115, 388858886.00239369),
    (1048449116, 388858886.690745053)
    ]
    for n, v in cases:
        print(n, v)
        t1 = clock()
        ok = zetazero(n).ae(complex(0.5,v))
        t2 = clock()
        print("ok =", ok, ("(time = %s)" % round(t2-t1,3)))
    print("Now computing two huge zeros (this may take hours)")
    print("Computing zetazero(8637740722917)")
    ok = zetazero(8637740722917).ae(complex(0.5,2124447368584.39296466152))
    print("ok =", ok)
    ok = zetazero(8637740722918).ae(complex(0.5,2124447368584.39298170604))
    print("ok =", ok)
项目:zippy    作者:securesystemslab    | 项目源码 | 文件源码
def doctests(filter=[]):
    try:
        import psyco; psyco.full()
    except ImportError:
        pass
    import sys
    from timeit import default_timer as clock
    for i, arg in enumerate(sys.argv):
        if '__init__.py' in arg:
            filter = [sn for sn in sys.argv[i+1:] if not sn.startswith("-")]
            break
    import doctest
    globs = globals().copy()
    for obj in globs: #sorted(globs.keys()):
        if filter:
            if not sum([pat in obj for pat in filter]):
                continue
        sys.stdout.write(str(obj) + " ")
        sys.stdout.flush()
        t1 = clock()
        doctest.run_docstring_examples(globs[obj], {}, verbose=("-v" in sys.argv))
        t2 = clock()
        print(round(t2-t1, 3))
项目:treecat    作者:posterior    | 项目源码 | 文件源码
def __enter__(self):
        self.elapsed -= default_timer()
项目:treecat    作者:posterior    | 项目源码 | 文件源码
def __exit__(self, type, value, traceback):
        self.elapsed += default_timer()
        self.count += 1
项目:CubeSter    作者:BlendingJake    | 项目源码 | 文件源码
def execute(self, context):
        verts, faces = [], []

        start = timeit.default_timer()         
        scene = bpy.context.scene
        error = False

        if scene.cubester_audio_image == "image":
            if create_mesh_from_image(self, scene, verts, faces) == -1:
                error = True

            frames = find_sequence_images(self, context)
            created = len(frames[0])
        else:
            create_mesh_from_audio(self, scene, verts, faces)
            created = int(scene.cubester_audio_file_length)

        stop = timeit.default_timer()    

        if not error:
            if scene.cubester_mesh_style == "blocks" or scene.cubester_audio_image == "audio":
                self.report({"INFO"}, "CubeSter: {} blocks and {} frame(s) in {}s".format(str(int(len(verts) / 8)),
                                                                                          str(created),
                                                                                          str(round(stop - start, 4))))
            else:
                self.report({"INFO"}, "CubeSter: {} points and {} frame(s) in {}s" .format(str(len(verts)),
                                                                                           str(created),
                                                                                           str(round(stop - start, 4))))

        return {"FINISHED"}
项目:run_lambda    作者:ethantkoenig    | 项目源码 | 文件源码
def __init__(self, context):
            self._context = context

            self._start_mem = memory_profiler.memory_usage()[0]

            self._log = StringIO()
            self._log.write("START RequestId: {r} Version: {v}\n".format(
                r=context.aws_request_id, v=context.function_version
            ))
            self._start_time = timeit.default_timer()
            self._previous_stdout = sys.stdout

            handler = logging.StreamHandler(stream=self._log)
            logging.getLogger().addHandler(handler)
            sys.stdout = self._log
项目:public-dns    作者:ssut    | 项目源码 | 文件源码
def main():
    sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
    from publicdns.client import PublicDNS

    domains = []
    filename = os.path.join(os.path.dirname(__file__), 'google_domains.txt')
    with open(filename, 'r') as f:
        domains = f.read().split('\n')
    size = len(domains)

    tqdmargs = {
        'total': 100,
        'unit': 'it',
        'unit_scale': True,
        'leave': True,
    }

    with ThreadPoolExecutor(max_workers=4) as pool:
        print('- dns.resolver')
        started = timeit.default_timer()
        resolver = dns_resolver.Resolver()
        resolver.nameservers = ['8.8.8.8', '8.8.4.4']
        futures = [pool.submit(resolver.query, domains[i % size], 'A')
                   for i in range(100)]
        for _ in tqdm(as_completed(futures), **tqdmargs):
            pass
        elapsed = timeit.default_timer() - started
        print('dns.resolver * 100 - took {}s'.format(elapsed))

    with ThreadPoolExecutor(max_workers=4) as pool:
        print('- PublicDNS')
        started = timeit.default_timer()
        client = PublicDNS()
        futures = [pool.submit(client.query, domains[i % size], 'A')
                   for i in range(100)]
        for _ in tqdm(as_completed(futures), **tqdmargs):
            pass
        elapsed = timeit.default_timer() - started
        print('\nPublicDNS * 100 - took {}s'.format(elapsed))
项目:metadataproxy    作者:lyft    | 项目源码 | 文件源码
def __enter__(self):
        self.start_time = timeit.default_timer()
        return self
项目:metadataproxy    作者:lyft    | 项目源码 | 文件源码
def __exit__(self, *args):
        self.end_time = timeit.default_timer()
        self.exec_duration = self.end_time - self.start_time
项目:saapy    作者:ashapochka    | 项目源码 | 文件源码
def export_scitools(ctx, udb_path, output_path):
    if os.path.exists(output_path):
        try:
            os.remove(output_path)
        except OSError as e:
            print("Error: %s - %s." % (e.filename, e.strerror))
    scitools_db = scitools_to_structs(udb_path)
    start = timer()
    with open(output_path, 'w') as output_stream:
        yaml.dump(scitools_db, output_stream)
    end = timer()
    execution_time = end - start
    print('transfer time:', timedelta(seconds=execution_time))
项目:deb-python-cassandra-driver    作者:openstack    | 项目源码 | 文件源码
def test_nts_token_performance(self):
        """
        Tests to ensure that when rf exceeds the number of nodes available, that we dont'
        needlessly iterate trying to construct tokens for nodes that don't exist.

        @since 3.7
        @jira_ticket PYTHON-379
        @expected_result timing with 1500 rf should be same/similar to 3rf if we have 3 nodes

        @test_category metadata
        """

        token_to_host_owner = {}
        ring = []
        dc1hostnum = 3
        current_token = 0
        vnodes_per_host = 500
        for i in range(dc1hostnum):

            host = Host('dc1.{0}'.format(i), SimpleConvictionPolicy)
            host.set_location_info('dc1', "rack1")
            for vnode_num in range(vnodes_per_host):
                md5_token = MD5Token(current_token+vnode_num)
                token_to_host_owner[md5_token] = host
                ring.append(md5_token)
            current_token += 1000

        nts = NetworkTopologyStrategy({'dc1': 3})
        start_time = timeit.default_timer()
        nts.make_token_replica_map(token_to_host_owner, ring)
        elapsed_base = timeit.default_timer() - start_time

        nts = NetworkTopologyStrategy({'dc1': 1500})
        start_time = timeit.default_timer()
        nts.make_token_replica_map(token_to_host_owner, ring)
        elapsed_bad = timeit.default_timer() - start_time
        difference = elapsed_bad - elapsed_base
        self.assertTrue(difference < 1 and difference > -1)
项目:code    作者:ActiveState    | 项目源码 | 文件源码
def __init__(self, timer=None, disable_gc=False, verbose=True):
        if timer is None:
            timer = timeit.default_timer
        self.timer = timer
        self.disable_gc = disable_gc
        self.verbose = verbose
        self.start = self.end = self.interval = None
项目:aesop    作者:BioMoDeL    | 项目源码 | 文件源码
def run_parallel(self, n_workers=None):
        """Summary
        Perform a computational alanine scan on the initialized Alascan class
        using multiple processes in parallel.

        Parameters
        ----------
        n_workers : int
            Number of processes to run. If None, method will use all available
            threads.

        Returns
        -------
        None
            Outputs text to STDOUT when run is complete, will be made optional
            in the future.
        """
        start = ti.default_timer()
        self.logs = []
        self.genTruncatedPQR()
        self.calcAPBS_parallel(n_workers)
        self.calcCoulomb_parallel(n_workers)
        self.status = 1
        stop = ti.default_timer()
        print '%s:\tAESOP alanine scan completed in %.2f seconds' % (
            self.jobname, stop - start)
        warn = self.checkwarnings()
        err = self.checkerrors()
        if warn != 0:
            print 'WARNINGS detected, please view log files!'
        if err != 0:
            print 'ERRORS detected, please view log files!'
项目:aesop    作者:BioMoDeL    | 项目源码 | 文件源码
def run_parallel(self, n_workers=None):
        """Summary
        Perform a computational directed mutagenesis scan on the initialized
        class using multiple processes in parallel.

        Parameters
        ----------
        n_workers : int
            Number of processes to run. If None, method will use all
            available threads.

        Returns
        -------
        None
            Outputs text to STDOUT when run is complete, will be made
            optional in the future.
        """
        start = ti.default_timer()
        self.logs = []
        self.genPDB()
        self.genPQR()
        self.calcAPBS_parallel()
        self.calcCoulomb_parallel()
        stop = ti.default_timer()
        print '%s:\tAESOP directed mutagenesis scan completed' \
            ' in %.2f seconds' % (self.jobname, stop - start)
        warn = self.checkwarnings()
        err = self.checkerrors()
        if warn != 0:
            print 'WARNINGS detected, please view log files!'
        if err != 0:
            print 'ERRORS detected, please view log files!'
项目:aesop    作者:BioMoDeL    | 项目源码 | 文件源码
def run(self,
            center=False,
            superpose=False,
            esi=False,
            esd=True,
            selstr=None,
            idx=0,
            minim=False):
        start = ti.default_timer()
        self.logs = []
        if center:
            self.centerPDB()
        if self.minim or minim:
            self.minimPDB()
        if superpose:
            self.superposePDB()
        self.initializeGrid()
        self.genPQR()
        if selstr is not None:
            self.mutatePQR(selstr=selstr)
        if len(self.pdbfiles) == 1 and selstr is None:
            self.mutatePQR()
        self.genDX()
        if esd:
            self.calcESD()
        if esi:
            self.calcESI(idx=idx)
        stop = ti.default_timer()
        print '%s:\tAESOP electrostatic similarity comparison ' \
            'completed in %.2f seconds' % (self.jobname, stop - start)
        warn = self.checkwarnings()
        err = self.checkerrors()
        if warn != 0:
            print 'WARNINGS detected, please view log files!'
        if err != 0:
            print 'ERRORS detected, please view log files!'
项目:SmartSocks    作者:waylybaye    | 项目源码 | 文件源码
def run(self):
        try:
            if (timeit.default_timer() - self.starttime) <= self.timeout:
                f = urlopen(self.request)
                while (not SHUTDOWN_EVENT.isSet() and
                        (timeit.default_timer() - self.starttime) <=
                        self.timeout):
                    self.result.append(len(f.read(10240)))
                    if self.result[-1] == 0:
                        break
                f.close()
        except IOError:
            pass
项目:SmartSocks    作者:waylybaye    | 项目源码 | 文件源码
def read(self, n=10240):
        if ((timeit.default_timer() - self.start) <= self.timeout and
                not SHUTDOWN_EVENT.isSet()):
            chunk = self.data.read(n)
            self.total.append(len(chunk))
            return chunk
        else:
            raise SpeedtestUploadTimeout()