Python time 模块,clock() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用time.clock()

项目:pycos    作者:pgiri    | 项目源码 | 文件源码
def acquire(self, blocking=True, timeout=-1):
        """Must be used with 'yield' as 'yield lock.acquire()'.
        """
        if not blocking and self._owner is not None:
            raise StopIteration(False)
        if not self._scheduler:
            self._scheduler = Pycos.scheduler()
        task = Pycos.cur_task(self._scheduler)
        if timeout < 0:
            timeout = None
        while self._owner is not None:
            if timeout is not None:
                if timeout <= 0:
                    raise StopIteration(False)
                start = _time()
            self._waitlist.append(task)
            if (yield task._await_(timeout)) is None:
                try:
                    self._waitlist.remove(task)
                except ValueError:
                    pass
            if timeout is not None:
                timeout -= (_time() - start)
        self._owner = task
        raise StopIteration(True)
项目:tsproxy    作者:WPO-Foundation    | 项目源码 | 文件源码
def SendMessage(self, message, main_thread = True):
    global connections, in_pipe, out_pipe
    message_sent = False
    now = time.clock()
    if message['message'] == 'closed':
      message['time'] = now
    else:
      message['time'] = time.clock() + self.latency
    message['size'] = .0
    if 'data' in message:
      message['size'] = float(len(message['data']))
    try:
      connection_id = message['connection']
      # Send messages directly, bypassing the queues is throttling is disabled and we are on the main thread
      if main_thread and connection_id in connections and self.peer in connections[connection_id]and self.latency == 0 and self.kbps == .0:
        message_sent = self.SendPeerMessage(message)
    except:
      pass
    if not message_sent:
      try:
        self.queue.put(message)
      except:
        pass
项目:tsproxy    作者:WPO-Foundation    | 项目源码 | 文件源码
def SendPeerMessage(self, message):
    global last_activity
    last_activity = time.clock()
    message_sent = False
    connection_id = message['connection']
    if connection_id in connections:
      if self.peer in connections[connection_id]:
        try:
          connections[connection_id][self.peer].handle_message(message)
          message_sent = True
        except:
          # Clean up any disconnected connections
          try:
            connections[connection_id]['server'].close()
          except:
            pass
          try:
            connections[connection_id]['client'].close()
          except:
            pass
          del connections[connection_id]
    return message_sent
项目:kinect-2-libras    作者:inessadl    | 项目源码 | 文件源码
def trace_dispatch(self, frame, event, arg):
        timer = self.timer
        t = timer()
        t = t[0] + t[1] - self.t - self.bias

        if event == "c_call":
            self.c_func_name = arg.__name__

        if self.dispatch[event](self, frame,t):
            t = timer()
            self.t = t[0] + t[1]
        else:
            r = timer()
            self.t = r[0] + r[1] - t # put back unrecorded delta

    # Dispatch routine for best timer program (return = scalar, fastest if
    # an integer but float works too -- and time.clock() relies on that).
项目:stackimpact-python    作者:stackimpact    | 项目源码 | 文件源码
def process_sample(self, signal_frame, sample_time, main_thread_id):
        if self.profile:
            start = time.clock()

            current_frames = sys._current_frames()
            items = current_frames.items()
            for thread_id, thread_frame in items:
                if thread_id == main_thread_id:
                    thread_frame = signal_frame

                stack = self.recover_stack(thread_frame)
                if stack:
                    current_node = self.profile
                    for frame in reversed(stack):
                        current_node = current_node.find_or_add_child(str(frame))
                    current_node.increment(sample_time, 1)

                thread_id, thread_frame, stack = None, None, None

            items = None
            current_frames = None

            self.profile._overhead += (time.clock() - start)
项目:code    作者:ActiveState    | 项目源码 | 文件源码
def build_graph():
    # Build GUI environment.
    global frame_handle, y, x, start, sec, timer_text, clock_handle
    frame_handle = graph.after(1000 / FRAMES_PER_SEC, update)
    graph.bind('<1>', change)
    graph['background'] = GAME_COLOR
    # Draw environment.
    y = HEIGHT - WALL + BALL_RADIUS + 2
    graph.create_rectangle((0, 0, WALL - BALL_RADIUS, y), fill=FORCE_COLOR)
    graph.create_rectangle((WIDTH - WALL + BALL_RADIUS, 0, WIDTH, y), fill=FORCE_COLOR)
    graph.create_line((0, y, WIDTH, y), fill=FLOOR_COLOR, width=3)
    # Prepare timer data.
    x = (WALL - BALL_RADIUS) / 2
    y = (y + HEIGHT) / 2
    start = time.clock()
    sec = 0
    timer_text = graph.create_text(x, y, text=f_time(TIME_LIMIT))
    clock_handle = graph.after(1000, update_clock)

################################################################################

# ANIMATION LOOP FUNCTIONS
项目:code    作者:ActiveState    | 项目源码 | 文件源码
def update():
    'Run physics and update screen.'
    global frame
    try:
        for mutate in wall, floor, gravity, friction, governor:
            for ball in balls:
                mutate(ball)
        for index, ball_1 in enumerate(balls[:-1]):
            for ball_2 in balls[index+1:]:
                ball_1.crash(ball_2)
        for ball in balls:
            ball.move(FPS)
        screen.delete('animate')
        for ball in balls:
            x1 = ball.pos.x - ball.rad
            y1 = ball.pos.y - ball.rad
            x2 = ball.pos.x + ball.rad
            y2 = ball.pos.y + ball.rad
            screen.create_oval(x1, y1, x2, y2, fill=BALL_COLOR, tag='animate')
        frame += 1
        screen.after(int((start + frame / FPS - time.clock()) * 1000), update)
    except:
        screen.delete(Tkinter.ALL)
        screen.create_text(SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2, text=traceback.format_exc(), font='Courier 10', fill='red', tag='animate')
项目:quartz-browser    作者:ksharindam    | 项目源码 | 文件源码
def print_status(progress, file_size, start):
    """
    This function - when passed as `on_progress` to `Video.download` - prints
    out the current download progress.

    :params progress:
        The lenght of the currently downloaded bytes.
    :params file_size:
        The total size of the video.
    :params start:
        The time when started
    """

    percent_done = int(progress) * 100. / file_size
    done = int(50 * progress / int(file_size))
    dt = (clock() - start)
    if dt > 0:
        stdout.write("\r  [%s%s][%3.2f%%] %s at %s/s " %
                     ('=' * done, ' ' * (50 - done), percent_done,
                      sizeof(file_size), sizeof(progress // dt)))
    stdout.flush()
项目:TrackToTrip    作者:ruipgil    | 项目源码 | 文件源码
def score(train_labels, train_features, test_labels, test_features, save_file, use_tree=False):
    if use_tree:
        train_clf = Classifier(tree.DecisionTreeClassifier())
    else:
        train_clf = Classifier()

    print train_clf.clf
    print ''

    t_start = time.clock()
    train_clf.learn(train_features, train_labels)
    t_end = time.clock()
    if save_file:
        train_clf.save_to_file(open(save_file, 'w'))

    p_start = time.clock()
    predicted = train_clf.clf.predict(test_features)
    p_end = time.clock()

    test_labels_t = train_clf.labels.transform(test_labels)
    print classification_report(test_labels_t, predicted, target_names=train_clf.labels.classes_)
    print 'Training time: %fs' % (t_end - t_start)
    print 'Predicting time: %fs' % (p_end - p_start)
    print 'Mean squared error: %f' % mean_squared_error(test_labels_t, predicted)
    return train_clf.score(test_features, test_labels)
项目:python-ceph-cfg    作者:oms4suse    | 项目源码 | 文件源码
def _create_check_retry(self, **kwargs):
        """
        Check the mon service is started and responding with time out.

        On heavily overloaded hardware it can takes a while for the mon service
        to start
        """
        # Number of seconds before a time out.
        timeout = 60
        time_start = time.clock()
        time_end = time_start + timeout
        if self._create_check_responding(**kwargs):
            return True
        while time.clock() < time_end:
            log.info("Mon service did not start up, waiting.")
            time.sleep(5)
            log.info("Retrying mon service.")
            if self._create_check_responding(**kwargs):
                return True
        log.error("Timed out starting mon service")
        raise Error("Failed to get mon service status after '%s' seconds." % (timeout))
项目:mobot    作者:JokerQyou    | 项目源码 | 文件源码
def __call__(self, func, *args, **kwargs):
        '''Used to process callbacks in throughput-limiting thread
        through queue.
        Args:
            func (:obj:`callable`): the actual function (or any callable) that
                is processed through queue.
            *args: variable-length `func` arguments.
            **kwargs: arbitrary keyword-arguments to `func`.
        Returns:
            None
        '''
        if not self.is_alive() or self.__exit_req:
            raise DelayQueueError('Could not process callback in stopped thread')
        self._queue.put((func, args, kwargs))


# The most straightforward way to implement this is to use 2 sequenital delay
# queues, like on classic delay chain schematics in electronics.
# So, message path is:
# msg --> group delay if group msg, else no delay --> normal msg delay --> out
# This way OS threading scheduler cares of timings accuracy.
# (see time.time, time.clock, time.perf_counter, time.sleep @ docs.python.org)
项目:monogreedy    作者:jinjunqi    | 项目源码 | 文件源码
def validate(beam_searcher, dataset, logger=None, res_file=None):
    if logger is None:
        logger = Logger(None)
    # generating captions
    all_candidates = []
    tic = time.clock()
    for i in xrange(dataset.n_image):
        data = dataset.iterate_batch()  # data: id, img, scene...
        sent = beam_searcher.generate(data[1:])
        cap = ' '.join([dataset.vocab[word] for word in sent])
        print '[{}], id={}, \t\t {}'.format(i, data[0], cap)
        all_candidates.append({'image_id': data[0], 'caption': cap})
    toc = time.clock() - tic
    running_time = toc / 5000.0

    if res_file is None:
        res_file = 'tmp.json'
    json.dump(all_candidates, open(res_file, 'w'))
    gt_file = osp.join(dataset.data_dir, 'captions_'+dataset.data_split+'.json')
    scores = evaluate(gt_file, res_file, logger)
    if res_file == 'tmp.json':
        os.system('rm -rf %s' % res_file)

    return scores, running_time
项目:hostapd-mana    作者:adde88    | 项目源码 | 文件源码
def trace_dispatch(self, frame, event, arg):
        timer = self.timer
        t = timer()
        t = t[0] + t[1] - self.t - self.bias

        if event == "c_call":
            self.c_func_name = arg.__name__

        if self.dispatch[event](self, frame,t):
            t = timer()
            self.t = t[0] + t[1]
        else:
            r = timer()
            self.t = r[0] + r[1] - t # put back unrecorded delta

    # Dispatch routine for best timer program (return = scalar, fastest if
    # an integer but float works too -- and time.clock() relies on that).
项目:dragonchain    作者:dragonchain    | 项目源码 | 文件源码
def calc_latency(self, node_to_calc):
        """ calculate latency of given node, remove if node is not connected """
        start = time.clock()
        success = False
        try:
            for i in range(5):
                node_to_calc.client.ping()

            node_to_calc.latency = ((time.clock() - start) / 5) * 1000  # converting to ms
            success = True
        except:  # node not connected
            print(str(sys.exc_info()))
            logger().warning("error attempting to ping an unregistered node: disconnecting node")
            if node_to_calc in self.connections:  # if a registered node disconnects
                self.connections.remove(node_to_calc)
                self.remove_from_peer_dict(node_to_calc)
            try:
                net_dao.update_failed_ping(node_to_calc)
            except Exception as ex:
                template = "An exception of type {0} occurred. Arguments:\n{1!r}"
                message = template.format(type(ex).__name__, ex.args)
                logger().warning(message)

        return success
项目:geocoder-ie    作者:devgateway    | 项目源码 | 文件源码
def extract(sentences, ignore_entities=get_ignore_entities()):
    tic = time.clock()
    nlp = pycorenlp.corenlp.StanfordCoreNLP("http://{0}:{1}/".format(get_ner_host(), get_ner_port()))
    extraction = []

    for s in sentences:
        output = nlp.annotate(s, properties={"annotators": "ner", "outputFormat": "json"})
        locations_found = [(t['originalText']) for t in output["sentences"][0]["tokens"] for item in output if
                           t['ner'] in ['LOCATION', 'PERSON'] and t[
                               'originalText'].lower() not in ignore_entities]
        if len(locations_found) > 0:
            extraction.append(({'text': s, 'entities': locations_found}))

    tac = time.clock()
    logger.info('NER extraction took {time}ms'.format(time=tac - tic))
    return extraction


# Perform natural language processing to text, get annotated entities and entities relations
项目:geocoder-ie    作者:devgateway    | 项目源码 | 文件源码
def extract_ner(sentences, ignore_entities=get_ignore_entities()):
    try:
        tagger = Ner(host=get_ner_host(), port=get_ner_port())
        tic = time.clock()
        extraction = []

        for s, file in sentences:
            output = tagger.get_entities(s.replace('\n', ' ').replace('\r', ''))
            locations_found = [text for text, tag in output if
                               tag in ['LOCATION', 'PERSON'] and text.lower() not in ignore_entities]

            if len(locations_found) > 0:
                extraction.append(({'text': {'text': s, 'file': file}, 'entities': locations_found}))

        tac = time.clock()
        logger.info('NER extraction took {time}ms'.format(time=tac - tic))
        return extraction
    except Exception as detail:
        logger.error('Error during ner extraction {}'.format(detail))
        raise
项目:danmu-bilibili    作者:saberxxy    | 项目源码 | 文件源码
def main():
    startTime = time.clock()
    start = getMaxAvId()
    if start == None:  # ????????av?
        start = 1
    # print(start)
    print ("av start: ", start)
    stop = int(input("av stop: "))

    # print(123)

    for i in range(start+1, stop+1):
        saveDanmu(i)
        # print(i)
    stopTime = time.clock()
    print ((stopTime - startTime)/60,)
    print ("mins")
项目:tvlinker    作者:ozmartian    | 项目源码 | 文件源码
def download_file(self) -> None:
        req = requests.get(self.download_link, stream=True, proxies=self.proxy)
        filesize = int(req.headers['Content-Length'])
        filename = os.path.basename(self.download_path)
        downloadedChunk = 0
        blockSize = 8192
        start = time.clock()
        with open(self.download_path, 'wb') as f:
            for chunk in req.iter_content(chunk_size=blockSize):
                if self.cancel_download or not chunk:
                    req.close()
                    break
                f.write(chunk)
                downloadedChunk += len(chunk)
                progress = float(downloadedChunk) / filesize
                self.dlProgress.emit(progress * 100)
                dlspeed = downloadedChunk//(time.clock() - start) / 1000
                progressTxt = '<b>Downloading {0}</b>:<br/>{1} of <b>{3}</b> [{2:.2%}] [{4} kbps]' \
                    .format(filename, downloadedChunk, progress, size(filesize, system=alternative), dlspeed)
                self.dlProgressTxt.emit(progressTxt)
        self.dlComplete.emit()
项目:muscle-plotter    作者:PedroLopes    | 项目源码 | 文件源码
def nextFrame(self, arg):
        # (arg is the frame number, which we don't need)
        self.frame_c += 1
        print("computed frame:" + str(self.frame_c))
        if WindSim.performanceData and (arg % 30 == 0) and (arg > 0):
            endTime = clock()
            print ("Took {0} seconds".format(endTime - self.startTime))
            print ("%1.1f" % (30 / (endTime - self.startTime)),
                   'frames per second')
            self.startTime = endTime
            if (WindSim.save):
                frameName = 'output/frame%04d.png' % arg
                plt.savefig(frameName)
                self.frameList.write(frameName + '\n')
        for step in range(WindSim.step_range):
            self.stream()
            self.collide()
        self.fluidImage.set_array(self.curl(WindSim.ux, WindSim.uy))
        return (self.fluidImage, self.barrierImage)
项目:OSPTF    作者:xSploited    | 项目源码 | 文件源码
def TestConnection(dbname):
    # Create the ADO connection object, and link the event
    # handlers into it
    c = DispatchWithEvents("ADODB.Connection", ADOEvents)

    # Initiate the asynchronous open
    dsn = "Driver={Microsoft Access Driver (*.mdb)};Dbq=%s" % dbname
    user = "system"
    pw = "manager"
    c.Open(dsn, user, pw, constants.adAsyncConnect)

    # Sit in a loop, until our event handler (above) sets the
    # "finished" flag or we time out.
    end_time = time.clock() + 10
    while time.clock() < end_time:
        # Pump messages so that COM gets a look in
        pythoncom.PumpWaitingMessages()
    if not finished:
        print "XXX - Failed to connect!"
项目:OSPTF    作者:xSploited    | 项目源码 | 文件源码
def WaitWhileProcessingMessages(event, timeout = 2):
    start = time.clock()
    while True:
        # Wake 4 times a second - we can't just specify the
        # full timeout here, as then it would reset for every
        # message we process.
        rc = win32event.MsgWaitForMultipleObjects( (event,), 0,
                                250,
                                win32event.QS_ALLEVENTS)
        if rc == win32event.WAIT_OBJECT_0:
            # event signalled - stop now!
            return True
        if (time.clock() - start) > timeout:
            # Timeout expired.
            return False
        # must be a message.
        pythoncom.PumpWaitingMessages()
项目:nojs    作者:chrisdickinson    | 项目源码 | 文件源码
def WaitUntil(predicate, timeout_seconds=1):
  """Blocks until the provided predicate (function) is true.

  Returns:
    Whether the provided predicate was satisfied once (before the timeout).
  """
  start_time = time.clock()
  sleep_time_sec = 0.025
  while True:
    if predicate():
      return True

    if time.clock() - start_time > timeout_seconds:
      return False

    time.sleep(sleep_time_sec)
    sleep_time_sec = min(1, sleep_time_sec * 2)  # Don't wait more than 1 sec.


# Implementation of chrome_test_server_spawner.PortForwarder that doesn't
# forward ports. Instead the tests are expected to connect to the host IP
# address inside the virtual network provided by qemu. qemu will forward
# these connections to the corresponding localhost ports.
项目:piwall-cvtools    作者:infinnovation    | 项目源码 | 文件源码
def __init__(self, video_src = 0, 
                 interactive = True, 
                 video = 'chess.avi', fallback = 'synth:bg=./data/hi.jpg:noise=0.05', nFrames = 500):
        cam = create_capture(video_src, fallback=fallback)
        if not cam:
            print("Problem initialising cam")

        vwriter = VideoWriter(video)
        run = True
        t = clock()
        frameCounter = nFrames
        while frameCounter>0:
            ret, img = cam.read()
            vwriter.addFrame(img, width=1920)
            frameCounter-=1
            print("%d" % frameCounter)
        print("Created chessboard video : saved %d frames to %s" % (nFrames, video))
        vwriter.finalise()


#####################################################################################################################
# Tutorials
#####################################################################################################################

# Placeholder for snippets used in development taken from the tutorials.
项目:cupy    作者:cupy    | 项目源码 | 文件源码
def seed(self, seed=None):
        """Resets the state of the random number generator with a seed.

        .. seealso::
            :func:`cupy.random.seed` for full documentation,
            :meth:`numpy.random.RandomState.seed`

        """
        if seed is None:
            try:
                seed_str = binascii.hexlify(os.urandom(8))
                seed = numpy.uint64(int(seed_str, 16))
            except NotImplementedError:
                seed = numpy.uint64(time.clock() * 1000000)
        else:
            seed = numpy.asarray(seed).astype(numpy.uint64, casting='safe')

        curand.setPseudoRandomGeneratorSeed(self._generator, seed)
        curand.setGeneratorOffset(self._generator, 0)
项目:cupy    作者:cupy    | 项目源码 | 文件源码
def seed(seed=None):
    """Resets the state of the random number generator with a seed.

    This function resets the state of the global random number generator for
    the current device. Be careful that generators for other devices are not
    affected.

    Args:
        seed (None or int): Seed for the random number generator. If ``None``,
            it uses :func:`os.urandom` if available or :func:`time.clock`
            otherwise. Note that this function does not support seeding by an
            integer array.

    """
    get_random_state().seed(seed)


# CuPy specific functions
项目:bpy_lambda    作者:bcongdon    | 项目源码 | 文件源码
def flatwrite(me, uvs, matimage = False) :
    #t = clock()
    newuvs = []
    #print('uv funcinput : %s'%(len(uvs)))
    # uvi : uvlayer id  uvlist : uv coordinates list
    for uvi, uvlist in enumerate(uvs) :
        #print('uvlist input : %s'%(len(uvlist)))
        #print(uvlist[0:5])
        uv = me.uv_textures.new()
        uv.name = 'UV%s'%uvi
        uvlayer = me.uv_layers[-1].data
        # flatuv = awaited uvlist length
        #flatuv = list( range(len(uvlayer) * 2) )
        #print('uvlist need : %s'%(len(flatuv)))
        uvlayer.foreach_set('uv',uvlist)

        newuvs.append(uv)
    #print('uvs in ',clock() - t)
    return newuvs

# face are squared or rectangular, 
# any orientation
# vert order width then height 01 and 23 = x 12 and 03 = y
# normal default when face has been built
项目:bpy_lambda    作者:bcongdon    | 项目源码 | 文件源码
def BENCH():
    debug_prints(func="BENCH", text="BEGIN BENCHMARK")
    bt0 = time.clock()
    # make a big list
    tsize = 25
    tlist = []
    for x in range(tsize):
        for y in range(tsize):
            for z in range(tsize):
                tlist.append((x, y, z))
                tlist.append((x, y, z))

    # function to test
    bt1 = time.clock()
    bt2 = time.clock()
    btRUNb = bt2 - bt1
    btRUNa = bt1 - bt0

    debug_prints(func="BENCH", text="SETUP TIME", var=btRUNa)
    debug_prints(func="BENCH", text="BENCHMARK TIME", var=btRUNb)
    debug_print_vars(
            "\n[BENCH]\n",
            "GRIDSIZE: ", tsize, ' - ', tsize * tsize * tsize
            )
项目:kripodb    作者:3D-e-Chem    | 项目源码 | 文件源码
def _ingest_pairs(self, pairs, oid2nid, frame_size, limit, single_sided):
        oid2nid_v = np.vectorize(oid2nid.get)
        # whole pairs set does not fit in memory, so split it in frames with `frame_size` number of pairs.
        for start in range(0, limit, frame_size):
            stop = frame_size + start
            t1 = process_time()
            six.print_('Fetching pairs {0}:{1} of {2} ... '.format(start, stop, limit), end='', flush=True)
            raw_frame = pairs.read(start=start, stop=stop)
            t2 = process_time()
            six.print_('{0}s, Parsing ... '.format(int(t2 - t1)), flush=True)
            frame = self._translate_frame(raw_frame, oid2nid_v, single_sided)
            t3 = process_time()
            six.print_('Writing ... '.format(int(t3 - t2)), flush=True)
            # alternate direction, to make use of cached chunks of prev frame
            self._ingest_pairs_frame(frame)
            del frame
            t4 = process_time()
            six.print_('{0}s, Done with {1}:{2} in {3}s'.format(int(t4 - t3), start, stop, int(t4 - t1)), flush=True)
项目:Parallel-Processing-Nadig    作者:madhug-nadig    | 项目源码 | 文件源码
def parallel_cosine_similarity(self,x,y):

        pool = mp.Pool(processes= 16)
        s = time.clock()
        nums = pool.starmap(self.multplierr, zip(x,y))
        numerator = sum(nums)

        #x_sqr = pool.starmap( self.multplierr, zip(x,x))
        #y_sqr = pool.starmap( self.multplierr, zip(y,y))

        #denominator = round(sqrt(sum(x_sqr))) * round(sqrt(sum(y_sqr)))
        denominator = self.square_rooted(x)*self.square_rooted(y)

        e = time.clock()
        print("Parallel Cosine Exec Time: ", e-s)
        return round(numerator/float(denominator),3)

    #JACCARD SIMILARITY

    #Serial Jaccard Similarity
项目:Parallel-Processing-Nadig    作者:madhug-nadig    | 项目源码 | 文件源码
def parallel_jaccard_similarity(self,x,y):

        p = 16
        pool = mp.Pool(processes= p)

        chunk_X = []
        chunk_Y = []

        for i in range(0, len(x), p):

            chunk_X.append(x[int(i):int((i+1)*p)])
            chunk_Y.append(y[int(i):int((i+1)*p)])

        s = time.clock()

        intersection_cardinality = sum(pool.starmap(self.interc_card_locl, zip(chunk_X,chunk_Y)))
        union_cardinality = sum(pool.starmap(self.union_card_locl, zip(chunk_X,chunk_Y)))
        print(intersection_cardinality, union_cardinality)
        e = time.clock()
        print("Parallel Jaccard Exec Time: ", e-s)
        return intersection_cardinality/float(union_cardinality)
项目:Taigabot    作者:FrozenPigs    | 项目源码 | 文件源码
def wolframalpha(inp, bot=None):
    """wa <query> -- Computes <query> using Wolfram Alpha."""
    server = 'http://api.wolframalpha.com/v2/query.jsp'
    api_key = bot.config.get("api_keys", {}).get("wolframalpha", None)

    if not api_key:
        return formatting.output('WolframAlpha', ['error: missing api key'])

    import time
    start = time.clock()

    scantimeout = '3.0'
    podtimeout = '4.0'
    formattimeout = '8.0'
    async = 'True'

    waeo = WolframAlphaEngine(api_key, server)

    waeo.ScanTimeout = scantimeout
    waeo.PodTimeout = podtimeout
    waeo.FormatTimeout = formattimeout
    waeo.Async = async
项目:raiden    作者:raiden-network    | 项目源码 | 文件源码
def greenlet_profiler(event, args):
    if event in ('switch', 'throw'):  # both events are in the target context
        now = clock()

        try:
            # we need to account the time for the user function
            frame = sys._getframe(1)
        except ValueError:
            # the first greenlet.switch() and when the greenlet is being
            # destroied there is nothing more in the stack, so this function is
            # the first function called
            frame = sys._getframe(0)

        origin, target = args

        origin_state = _state[origin]
        target_state = ensure_thread_state(target, frame)

        origin_state.switch_enter(now)  # origin is entering the "sleep" state
        target_state.switch_exit(now)   # target might be leaving the "sleep"
项目:pandachaika    作者:pandabuilder    | 项目源码 | 文件源码
def write_file_update_progress(self, cmd: str, callback: Callable, filesize: int=0, blocksize: int=8192, rest: bool=None) -> str:
        self.ftps.voidcmd('TYPE I')  # type: ignore
        with self.ftps.transfercmd(cmd, rest) as conn:  # type: ignore
            self.current_download['filesize'] = filesize
            self.current_download['downloaded'] = 0
            self.current_download['filename'] = cmd.replace('RETR ', '')
            start = time.clock()
            while 1:
                data = conn.recv(blocksize)
                if not data:
                    break
                downloaded = len(data)
                self.current_download['downloaded'] += downloaded
                current = time.clock()
                if current > start:
                    self.current_download['speed'] = self.current_download['downloaded'] / ((current - start) * 1024)
                callback(data)
            self.current_download['filename'] = ''
            self.current_download['speed'] = 0
            self.current_download['filesize'] = 0
            # shutdown ssl layer
            if _SSLSocket is not None and isinstance(conn, _SSLSocket):
                conn.unwrap()
        return self.ftps.voidresp()  # type: ignore
项目:pycos    作者:pgiri    | 项目源码 | 文件源码
def _add_timeout(self, fd):
                if fd._timeout:
                    self._lock.acquire()
                    fd._timeout_id = _time() + fd._timeout + 0.0001
                    i = bisect_left(self._timeouts, (fd._timeout_id, fd))
                    self._timeouts.insert(i, (fd._timeout_id, fd))
                    if self._polling:
                        self.interrupt()
                    self._lock.release()
                else:
                    fd._timeout_id = None
项目:pycos    作者:pgiri    | 项目源码 | 文件源码
def _add_timeout(self, fd):
            if fd._timeout:
                fd._timeout_id = _time() + fd._timeout + 0.0001
                i = bisect_left(self._timeouts, (fd._timeout_id, fd))
                self._timeouts.insert(i, (fd._timeout_id, fd))
            else:
                fd._timeout_id = None
项目:pycos    作者:pgiri    | 项目源码 | 文件源码
def wait(self, timeout=None):
        """Must be used with 'yield' as 'yield cv.wait()'.
        """
        task = Pycos.cur_task(self._scheduler)
        if self._owner != task:
            raise RuntimeError('"%s"/%s: invalid lock release - owned by "%s"/%s' %
                               (task._name, task._id, self._owner._name, self._owner._id))
        assert self._depth > 0
        depth = self._depth
        self._depth = 0
        self._owner = None
        if self._waitlist:
            wake = self._waitlist.pop(0)
            wake._proceed_(True)
        self._notifylist.append(task)
        start = _time()
        if (yield task._await_(timeout)) is None:
            try:
                self._notifylist.remove(task)
            except ValueError:
                pass
            raise StopIteration(False)
        while self._owner is not None:
            self._waitlist.insert(0, task)
            if timeout is not None:
                timeout -= (_time() - start)
                if timeout <= 0:
                    raise StopIteration(False)
                start = _time()
            if (yield task._await_(timeout)) is None:
                try:
                    self._waitlist.remove(task)
                except ValueError:
                    pass
                raise StopIteration(False)
        assert self._depth == 0
        self._owner = task
        self._depth = depth
        raise StopIteration(True)
项目:pycos    作者:pgiri    | 项目源码 | 文件源码
def _suspend(self, task, timeout, alarm_value, state):
        """Internal use only. See sleep/suspend in Task.
        """
        self._lock.acquire()
        if self.__cur_task != task:
            self._lock.release()
            logger.warning('invalid "suspend" - "%s" != "%s"', task, self.__cur_task)
            return -1
        tid = task._id
        if state == Pycos._AwaitMsg_ and task._msgs:
            s, update = task._msgs[0]
            if s == state:
                task._msgs.popleft()
                self._lock.release()
                return update
        if timeout is None:
            task._timeout = None
        else:
            if not isinstance(timeout, (float, int)):
                logger.warning('invalid timeout %s', timeout)
                self._lock.release()
                return -1
            if timeout <= 0:
                self._lock.release()
                return alarm_value
            else:
                task._timeout = _time() + timeout + 0.0001
                heappush(self._timeouts, (task._timeout, tid, alarm_value))
        self._scheduled.discard(tid)
        self._suspended.add(tid)
        task._state = state
        self._lock.release()
        return 0
项目:pycos    作者:pgiri    | 项目源码 | 文件源码
def _add_timeout(self, fd):
                if fd._timeout:
                    self._lock.acquire()
                    fd._timeout_id = _time() + fd._timeout + 0.0001
                    i = bisect_left(self._timeouts, (fd._timeout_id, fd))
                    self._timeouts.insert(i, (fd._timeout_id, fd))
                    if self._polling:
                        self.interrupt()
                    self._lock.release()
                else:
                    fd._timeout_id = None
项目:pycos    作者:pgiri    | 项目源码 | 文件源码
def acquire(self, blocking=True, timeout=-1):
        """Must be used with 'yield' as 'yield rlock.acquire()'.
        """
        if not self._scheduler:
            self._scheduler = Pycos.scheduler()
        task = Pycos.cur_task(self._scheduler)
        if self._owner == task:
            assert self._depth > 0
            self._depth += 1
            raise StopIteration(True)
        if not blocking and self._owner is not None:
            raise StopIteration(False)
        if timeout < 0:
            timeout = None
        while self._owner is not None:
            if timeout is not None:
                if timeout <= 0:
                    raise StopIteration(False)
                start = _time()
            self._waitlist.append(task)
            if (yield task._await_(timeout)) is None:
                try:
                    self._waitlist.remove(task)
                except ValueError:
                    pass
            if timeout is not None:
                timeout -= (_time() - start)
        assert self._depth == 0
        self._owner = task
        self._depth = 1
        raise StopIteration(True)
项目:pycos    作者:pgiri    | 项目源码 | 文件源码
def acquire(self, blocking=True, timeout=-1):
        """Must be used with 'yield' as 'yield cv.acquire()'.
        """
        if not self._scheduler:
            self._scheduler = Pycos.scheduler()
        task = Pycos.cur_task(self._scheduler)
        if self._owner == task:
            self._depth += 1
            raise StopIteration(True)
        if not blocking and self._owner is not None:
            raise StopIteration(False)
        if timeout < 0:
            timeout = None
        while self._owner is not None:
            if timeout is not None:
                if timeout <= 0:
                    raise StopIteration(False)
                start = _time()
            self._waitlist.append(task)
            if (yield task._await_(timeout)) is None:
                try:
                    self._waitlist.remove(task)
                except ValueError:
                    pass
            if timeout is not None:
                timeout -= (_time() - start)
        assert self._depth == 0
        self._owner = task
        self._depth = 1
        raise StopIteration(True)
项目:pycos    作者:pgiri    | 项目源码 | 文件源码
def wait(self, timeout=None):
        """Must be used with 'yield' as 'yield cv.wait()'.
        """
        task = Pycos.cur_task(self._scheduler)
        if self._owner != task:
            raise RuntimeError('"%s"/%s: invalid lock release - owned by "%s"/%s' %
                               (task._name, task._id, self._owner._name, self._owner._id))
        assert self._depth > 0
        depth = self._depth
        self._depth = 0
        self._owner = None
        if self._waitlist:
            wake = self._waitlist.pop(0)
            wake._proceed_(True)
        self._notifylist.append(task)
        start = _time()
        if (yield task._await_(timeout)) is None:
            try:
                self._notifylist.remove(task)
            except ValueError:
                pass
            raise StopIteration(False)
        while self._owner is not None:
            self._waitlist.insert(0, task)
            if timeout is not None:
                timeout -= (_time() - start)
                if timeout <= 0:
                    raise StopIteration(False)
                start = _time()
            if (yield task._await_(timeout)) is None:
                try:
                    self._waitlist.remove(task)
                except ValueError:
                    pass
                raise StopIteration(False)
        assert self._depth == 0
        self._owner = task
        self._depth = depth
        raise StopIteration(True)
项目:pycos    作者:pgiri    | 项目源码 | 文件源码
def receive(self, category=None, timeout=None, alarm_value=None):
        """Similar to 'receive' of Task, except it retrieves (waiting, if
        necessary) messages in given 'category'.
        """
        # assert Pycos.cur_task() == self._task
        c = self._categories.get(category, None)
        if c:
            msg = c.popleft()
            raise StopIteration(msg)
        if timeout:
            start = _time()
        while 1:
            msg = yield self._task.receive(timeout=timeout, alarm_value=alarm_value)
            if msg == alarm_value:
                raise StopIteration(msg)
            for categorize in self._categorize:
                c = categorize(msg)
                if c == category:
                    raise StopIteration(msg)
                if c is not None:
                    bucket = self._categories.get(c, None)
                    if not bucket:
                        bucket = self._categories[c] = collections.deque()
                    bucket.append(msg)
                    break
            else:
                self._categories[None].append(msg)
            if timeout:
                now = _time()
                timeout -= now - start
                start = now
项目:tsproxy    作者:WPO-Foundation    | 项目源码 | 文件源码
def __init__(self, direction, latency, kbps):
    self.direction = direction
    self.latency = latency
    self.kbps = kbps
    self.queue = Queue.Queue()
    self.last_tick = time.clock()
    self.next_message = None
    self.available_bytes = .0
    self.peer = 'server'
    if self.direction == self.PIPE_IN:
      self.peer = 'client'
项目:tsproxy    作者:WPO-Foundation    | 项目源码 | 文件源码
def tick(self):
    global connections
    global flush_pipes
    processed_messages = False
    now = time.clock()
    try:
      if self.next_message is None:
        self.next_message = self.queue.get_nowait()

      # Accumulate bandwidth if an available packet/message was waiting since our last tick
      if self.next_message is not None and self.kbps > .0 and self.next_message['time'] <= now:
        elapsed = now - self.last_tick
        accumulated_bytes = elapsed * self.kbps * 1000.0 / 8.0
        self.available_bytes += accumulated_bytes

      # process messages as long as the next message is sendable (latency or available bytes)
      while (self.next_message is not None) and\
          (flush_pipes or ((self.next_message['time'] <= now) and
                          (self.kbps <= .0 or self.next_message['size'] <= self.available_bytes))):
        self.queue.task_done()
        processed_messages = True
        if self.kbps > .0:
          self.available_bytes -= self.next_message['size']
        self.SendPeerMessage(self.next_message)
        self.next_message = None
        self.next_message = self.queue.get_nowait()
    except:
      pass

    # Only accumulate bytes while we have messages that are ready to send
    if self.next_message is None or self.next_message['time'] > now:
      self.available_bytes = .0
    self.last_tick = now

    return processed_messages


########################################################################################################################
#   Threaded DNS resolver
########################################################################################################################
项目:yolo_tensorflow    作者:hizhangp    | 项目源码 | 文件源码
def tic(self):
        # using time.time instead of time.clock because time time.clock
        # does not normalize for multithreading
        self.start_time = time.time()
项目:socket-http    作者:thisforeda    | 项目源码 | 文件源码
def test_module(module,url,times):
        timelist = []
        for index in range(times):
                try :
                        start = time.clock()
                        obj = module.urlopen(url)
                        timelist.append((time.clock()-start))
                except:
                        continue
                #if isinstance(obj,httpx.ResponseHandler):
                #        print(obj.http_header('statuscode'))
        return timelist
项目:tripletloss    作者:luhaofang    | 项目源码 | 文件源码
def tic(self):
        # using time.time instead of time.clock because time time.clock
        # does not normalize for multithreading
        self.start_time = time.time()
项目:kinect-2-libras    作者:inessadl    | 项目源码 | 文件源码
def _get_time_times(timer=os.times):
        t = timer()
        return t[0] + t[1]

# Using getrusage(3) is better than clock(3) if available:
# on some systems (e.g. FreeBSD), getrusage has a higher resolution
# Furthermore, on a POSIX system, returns microseconds, which
# wrap around after 36min.
项目:stackimpact-python    作者:stackimpact    | 项目源码 | 文件源码
def process_sample(self, signal_frame):
        if self.profile:
            start = time.clock()
            if signal_frame:
                stack = self.recover_stack(signal_frame)
                if stack:
                    self.update_profile(self.profile, stack)

                stack = None

            self.profile._overhead += (time.clock() - start)
项目:pycraft    作者:traverseda    | 项目源码 | 文件源码
def process_queue(self, ticks_per_sec):
        """Process the entire queue while taking periodic breaks. This allows
        the game loop to run smoothly. The queue contains calls to
        _show_block() and _hide_block() so this method should be called if
        add_block() or remove_block() was called with immediate=False
        """
        start = time.clock()
        while self.show_hide_queue and time.clock() - start < 1.0 / ticks_per_sec:
            self._dequeue()