Python concurrent.futures 模块,FIRST_COMPLETED 实例源码

我们从Python开源项目中,提取了以下37个代码示例,用于说明如何使用concurrent.futures.FIRST_COMPLETED

项目:batch-scoring    作者:datarobot    | 项目源码 | 文件源码
def perform_requests(self):
        signal.signal(signal.SIGINT, self.exit_fast)
        signal.signal(signal.SIGTERM, self.exit_fast)

        self.state = b'E'
        for q_batch in self.get_batch():
            for (_, _) in self.split_batch(q_batch):
                if self.state != b"R":
                    self.state = b'R'
                yield
                continue

        #  wait for all batches to finish before returning
        self.state = b'W'
        while self.futures:
            f_len = len(self.futures)
            self.futures = [i for i in self.futures if not i.done()]
            if f_len != len(self.futures):
                self.ui.debug('Waiting for final requests to finish. '
                              'remaining requests: {}'
                              ''.format(len(self.futures)))
            wait(self.futures, return_when=FIRST_COMPLETED)
        self.state = b'D'
        yield True
项目:deb-python-cassandra-driver    作者:openstack    | 项目源码 | 文件源码
def __init__(self, cluster, hosts, keyspace=None):
        self.cluster = cluster
        self.hosts = hosts
        self.keyspace = keyspace

        self._lock = RLock()
        self._pools = {}
        self._profile_manager = cluster.profile_manager
        self._metrics = cluster.metrics
        self._request_init_callbacks = []
        self._protocol_version = self.cluster.protocol_version

        self.encoder = Encoder()

        # create connection pools in parallel
        self._initial_connect_futures = set()
        for host in hosts:
            future = self.add_or_renew_pool(host, is_host_addition=False)
            if future:
                self._initial_connect_futures.add(future)

        futures = wait_futures(self._initial_connect_futures, return_when=FIRST_COMPLETED)
        while futures.not_done and not any(f.result() for f in futures.done):
            futures = wait_futures(futures.not_done, return_when=FIRST_COMPLETED)

        if not any(f.result() for f in self._initial_connect_futures):
            msg = "Unable to connect to any servers"
            if self.keyspace:
                msg += " using keyspace '%s'" % self.keyspace
            raise NoHostAvailable(msg, [h.address for h in hosts])
项目:uchroma    作者:cyanogen    | 项目源码 | 文件源码
def _get_layers(self):
        """
        Wait for renderers to produce new layers, yields until at least one
        layer is active.
        """
        # schedule tasks to wait on each renderer queue
        for r_idx in range(0, len(self.layers)):
            layer = self.layers[r_idx]

            if layer.waiter is None or layer.waiter.done():
                layer.waiter = ensure_future(self._dequeue(r_idx))

        # async wait for at least one completion
        waiters = [layer.waiter for layer in self.layers]
        if len(waiters) == 0:
            return

        await asyncio.wait(waiters, return_when=futures.FIRST_COMPLETED)

        # check the rest without waiting
        for r_idx in range(0, len(self.layers)):
            layer = self.layers[r_idx]

            if layer.waiter is not None and not layer.waiter.done():
                self._dequeue_nowait(r_idx)
项目:deb-python-concurrent.futures    作者:openstack    | 项目源码 | 文件源码
def test_first_completed(self):
        future1 = self.executor.submit(mul, 21, 2)
        future2 = self.executor.submit(time.sleep, 1.5)

        done, not_done = futures.wait(
                [CANCELLED_FUTURE, future1, future2],
                 return_when=futures.FIRST_COMPLETED)

        self.assertEqual(set([future1]), done)
        self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done)
项目:deb-python-concurrent.futures    作者:openstack    | 项目源码 | 文件源码
def test_first_completed_some_already_completed(self):
        future1 = self.executor.submit(time.sleep, 1.5)

        finished, pending = futures.wait(
                 [CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1],
                 return_when=futures.FIRST_COMPLETED)

        self.assertEqual(
                set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]),
                finished)
        self.assertEqual(set([future1]), pending)
项目:rci    作者:seecloud    | 项目源码 | 文件源码
def run(self):
        for job in self.jobs:
            task = self.root.loop.create_task(job.run())
            self.task_job_map[task] = job
            self.job_started_cb(job, task)

        while self.task_job_map:
            done, pending = await asyncio.wait(list(self.task_job_map.keys()),
                                               return_when=FIRST_COMPLETED)
            for task in done:
                job = self.task_job_map.pop(task)
                self.job_finished_cb(job, task)
                self.root.start_coro(job.cleanup())
        self.status = "finished"
        logging.info("%s: all jobs finished.", self)
项目:rci    作者:seecloud    | 项目源码 | 文件源码
def wait_fs(self, fs):
        """Wait for futures.

        :param fs: dict where key is future and value is related object
        """
        self.log.debug("Waiting for %s" % fs.values())
        while fs:
            done, pending = yield from asyncio.wait(
                    list(fs.keys()), return_when=futures.FIRST_COMPLETED)
            for fut in done:
                if fut in fs:
                    del(fs[fut])
            self.log.debug("Pending %s" % pending)
项目:flasky    作者:RoseOu    | 项目源码 | 文件源码
def run(self):
        # init listeners, add them to the event loop
        for s in self.sockets:
            s.setblocking(False)
            self.poller.register(s, selectors.EVENT_READ, self.accept)

        timeout = self.cfg.timeout or 0.5

        while self.alive:
            # notify the arbiter we are alive
            self.notify()

            # can we accept more connections?
            if self.nr_conns < self.worker_connections:
                # wait for an event
                events = self.poller.select(0.02)
                for key, mask in events:
                    callback = key.data
                    callback(key.fileobj)

            if not self.is_parent_alive():
                break

            # hanle keepalive timeouts
            self.murder_keepalived()

            # if the number of connections is < to the max we can handle at
            # the same time there is no need to wait for one
            if len(self.futures) < self.cfg.threads:
                continue

            result = futures.wait(self.futures, timeout=timeout,
                    return_when=futures.FIRST_COMPLETED)

            if not result.done:
                break
            else:
                [self.futures.remove(f) for f in result.done]

        self.tpool.shutdown(False)
        self.poller.close()
项目:zippy    作者:securesystemslab    | 项目源码 | 文件源码
def test_first_completed(self):
        future1 = self.executor.submit(mul, 21, 2)
        future2 = self.executor.submit(time.sleep, 1.5)

        done, not_done = futures.wait(
                [CANCELLED_FUTURE, future1, future2],
                 return_when=futures.FIRST_COMPLETED)

        self.assertEqual(set([future1]), done)
        self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done)
项目:zippy    作者:securesystemslab    | 项目源码 | 文件源码
def test_first_completed_some_already_completed(self):
        future1 = self.executor.submit(time.sleep, 1.5)

        finished, pending = futures.wait(
                 [CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1],
                 return_when=futures.FIRST_COMPLETED)

        self.assertEqual(
                set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]),
                finished)
        self.assertEqual(set([future1]), pending)
项目:Price-Comparator    作者:Thejas-1    | 项目源码 | 文件源码
def run(self):
        # init listeners, add them to the event loop
        for s in self.sockets:
            s.setblocking(False)
            self.poller.register(s, selectors.EVENT_READ, self.accept)

        timeout = self.cfg.timeout or 0.5

        while self.alive:
            # notify the arbiter we are alive
            self.notify()

            # can we accept more connections?
            if self.nr_conns < self.worker_connections:
                # wait for an event
                events = self.poller.select(0.02)
                for key, mask in events:
                    callback = key.data
                    callback(key.fileobj)

            if not self.is_parent_alive():
                break

            # hanle keepalive timeouts
            self.murder_keepalived()

            # if the number of connections is < to the max we can handle at
            # the same time there is no need to wait for one
            if len(self.futures) < self.cfg.threads:
                continue

            result = futures.wait(self.futures, timeout=timeout,
                    return_when=futures.FIRST_COMPLETED)

            if not result.done:
                break
            else:
                [self.futures.remove(f) for f in result.done]

        self.tpool.shutdown(False)
        self.poller.close()
项目:web_ctp    作者:molebot    | 项目源码 | 文件源码
def test_first_completed(self):
        future1 = self.executor.submit(mul, 21, 2)
        future2 = self.executor.submit(time.sleep, 1.5)

        done, not_done = futures.wait(
                [CANCELLED_FUTURE, future1, future2],
                 return_when=futures.FIRST_COMPLETED)

        self.assertEqual(set([future1]), done)
        self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done)
项目:web_ctp    作者:molebot    | 项目源码 | 文件源码
def test_first_completed_some_already_completed(self):
        future1 = self.executor.submit(time.sleep, 1.5)

        finished, pending = futures.wait(
                 [CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1],
                 return_when=futures.FIRST_COMPLETED)

        self.assertEqual(
                set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]),
                finished)
        self.assertEqual(set([future1]), pending)
项目:ouroboros    作者:pybee    | 项目源码 | 文件源码
def test_first_completed(self):
        future1 = self.executor.submit(mul, 21, 2)
        future2 = self.executor.submit(time.sleep, 1.5)

        done, not_done = futures.wait(
                [CANCELLED_FUTURE, future1, future2],
                 return_when=futures.FIRST_COMPLETED)

        self.assertEqual(set([future1]), done)
        self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done)
项目:ouroboros    作者:pybee    | 项目源码 | 文件源码
def test_first_completed_some_already_completed(self):
        future1 = self.executor.submit(time.sleep, 1.5)

        finished, pending = futures.wait(
                 [CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1],
                 return_when=futures.FIRST_COMPLETED)

        self.assertEqual(
                set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]),
                finished)
        self.assertEqual(set([future1]), pending)
项目:logo-gen    作者:jellene4eva    | 项目源码 | 文件源码
def run(self):
        # init listeners, add them to the event loop
        for s in self.sockets:
            s.setblocking(False)
            self.poller.register(s, selectors.EVENT_READ, self.accept)

        timeout = self.cfg.timeout or 0.5

        while self.alive:
            # notify the arbiter we are alive
            self.notify()

            # can we accept more connections?
            if self.nr_conns < self.worker_connections:
                # wait for an event
                events = self.poller.select(0.02)
                for key, mask in events:
                    callback = key.data
                    callback(key.fileobj)

            if not self.is_parent_alive():
                break

            # hanle keepalive timeouts
            self.murder_keepalived()

            # if the number of connections is < to the max we can handle at
            # the same time there is no need to wait for one
            if len(self.futures) < self.cfg.threads:
                continue

            result = futures.wait(self.futures, timeout=timeout,
                    return_when=futures.FIRST_COMPLETED)

            if not result.done:
                break
            else:
                [self.futures.remove(f) for f in result.done]

        self.tpool.shutdown(False)
        self.poller.close()
项目:compatify    作者:hatooku    | 项目源码 | 文件源码
def run(self):
        # init listeners, add them to the event loop
        for s in self.sockets:
            s.setblocking(False)
            self.poller.register(s, selectors.EVENT_READ, self.accept)

        timeout = self.cfg.timeout or 0.5

        while self.alive:
            # notify the arbiter we are alive
            self.notify()

            # can we accept more connections?
            if self.nr_conns < self.worker_connections:
                # wait for an event
                events = self.poller.select(0.02)
                for key, mask in events:
                    callback = key.data
                    callback(key.fileobj)

            if not self.is_parent_alive():
                break

            # hanle keepalive timeouts
            self.murder_keepalived()

            # if the number of connections is < to the max we can handle at
            # the same time there is no need to wait for one
            if len(self.futures) < self.cfg.threads:
                continue

            result = futures.wait(self.futures, timeout=timeout,
                    return_when=futures.FIRST_COMPLETED)

            if not result.done:
                break
            else:
                [self.futures.remove(f) for f in result.done]

        self.tpool.shutdown(False)
        self.poller.close()
项目:Data-visualization    作者:insta-code1    | 项目源码 | 文件源码
def run(self):
        # init listeners, add them to the event loop
        for s in self.sockets:
            s.setblocking(False)
            self.poller.register(s, selectors.EVENT_READ, self.accept)

        timeout = self.cfg.timeout or 0.5

        while self.alive:
            # notify the arbiter we are alive
            self.notify()

            # can we accept more connections?
            if self.nr_conns < self.worker_connections:
                # wait for an event
                events = self.poller.select(0.02)
                for key, mask in events:
                    callback = key.data
                    callback(key.fileobj)

            if not self.is_parent_alive():
                break

            # hanle keepalive timeouts
            self.murder_keepalived()

            # if the number of connections is < to the max we can handle at
            # the same time there is no need to wait for one
            if len(self.futures) < self.cfg.threads:
                continue

            result = futures.wait(self.futures, timeout=timeout,
                    return_when=futures.FIRST_COMPLETED)

            if not result.done:
                break
            else:
                [self.futures.remove(f) for f in result.done]

        self.tpool.shutdown(False)
        self.poller.close()
项目:kbe_server    作者:xiaohaoppy    | 项目源码 | 文件源码
def test_first_completed(self):
        future1 = self.executor.submit(mul, 21, 2)
        future2 = self.executor.submit(time.sleep, 1.5)

        done, not_done = futures.wait(
                [CANCELLED_FUTURE, future1, future2],
                 return_when=futures.FIRST_COMPLETED)

        self.assertEqual(set([future1]), done)
        self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done)
项目:kbe_server    作者:xiaohaoppy    | 项目源码 | 文件源码
def test_first_completed_some_already_completed(self):
        future1 = self.executor.submit(time.sleep, 1.5)

        finished, pending = futures.wait(
                 [CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1],
                 return_when=futures.FIRST_COMPLETED)

        self.assertEqual(
                set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]),
                finished)
        self.assertEqual(set([future1]), pending)
项目:Lixiang_zhaoxin    作者:hejaxian    | 项目源码 | 文件源码
def run(self):
        # init listeners, add them to the event loop
        for s in self.sockets:
            s.setblocking(False)
            self.poller.register(s, selectors.EVENT_READ, self.accept)

        timeout = self.cfg.timeout or 0.5

        while self.alive:
            # notify the arbiter we are alive
            self.notify()

            # can we accept more connections?
            if self.nr_conns < self.worker_connections:
                # wait for an event
                events = self.poller.select(0.02)
                for key, mask in events:
                    callback = key.data
                    callback(key.fileobj)

            if not self.is_parent_alive():
                break

            # hanle keepalive timeouts
            self.murder_keepalived()

            # if the number of connections is < to the max we can handle at
            # the same time there is no need to wait for one
            if len(self.futures) < self.cfg.threads:
                continue

            result = futures.wait(self.futures, timeout=timeout,
                    return_when=futures.FIRST_COMPLETED)

            if not result.done:
                break
            else:
                [self.futures.remove(f) for f in result.done]

        self.tpool.shutdown(False)
        self.poller.close()
项目:iCompleteMe    作者:jerrymarino    | 项目源码 | 文件源码
def test_first_completed(self):
        future1 = self.executor.submit(mul, 21, 2)
        future2 = self.executor.submit(time.sleep, 1.5)

        done, not_done = futures.wait(
                [CANCELLED_FUTURE, future1, future2],
                 return_when=futures.FIRST_COMPLETED)

        self.assertEqual(set([future1]), done)
        self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done)
项目:iCompleteMe    作者:jerrymarino    | 项目源码 | 文件源码
def test_first_completed_some_already_completed(self):
        future1 = self.executor.submit(time.sleep, 1.5)

        finished, pending = futures.wait(
                 [CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1],
                 return_when=futures.FIRST_COMPLETED)

        self.assertEqual(
                set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]),
                finished)
        self.assertEqual(set([future1]), pending)
项目:python-dse-driver    作者:datastax    | 项目源码 | 文件源码
def __init__(self, cluster, hosts, keyspace=None):
        self.cluster = cluster
        self.hosts = hosts
        self.keyspace = keyspace

        self._lock = RLock()
        self._pools = {}
        self._profile_manager = cluster.profile_manager
        self._metrics = cluster.metrics
        self._request_init_callbacks = []
        self._protocol_version = self.cluster.protocol_version

        self.encoder = Encoder()

        # create connection pools in parallel
        self._initial_connect_futures = set()
        for host in hosts:
            future = self.add_or_renew_pool(host, is_host_addition=False)
            if future:
                self._initial_connect_futures.add(future)

        futures = wait_futures(self._initial_connect_futures, return_when=FIRST_COMPLETED)
        while futures.not_done and not any(f.result() for f in futures.done):
            futures = wait_futures(futures.not_done, return_when=FIRST_COMPLETED)

        if not any(f.result() for f in self._initial_connect_futures):
            msg = "Unable to connect to any servers"
            if self.keyspace:
                msg += " using keyspace '%s'" % self.keyspace
            raise NoHostAvailable(msg, [h.address for h in hosts])
项目:loman    作者:janusassetallocation    | 项目源码 | 文件源码
def _compute_nodes(self, names, raise_exceptions=False):
        LOG.debug('Computing nodes {}'.format(list(map(str, names))))

        futs = {}

        def run(name):
            f, executor_name, args, kwds = self._get_func_args_kwds(name)
            if executor_name is None:
                executor = self.default_executor
            else:
                executor = self.executor_map[executor_name]
            fut = executor.submit(self._eval_node, name, f, args, kwds, raise_exceptions)
            futs[fut] = name

        computed = set()

        for name in names:
            node0 = self.dag.node[name]
            state = node0[_AN_STATE]
            if state == States.COMPUTABLE:
                run(name)

        while len(futs) > 0:
            done, not_done = wait(futs.keys(), return_when=FIRST_COMPLETED)
            for fut in done:
                name = futs.pop(fut)
                node0 = self.dag.node[name]
                value, exc, tb, start_dt, end_dt = fut.result()
                delta = (end_dt - start_dt).total_seconds()
                if exc is None:
                    self._set_state_and_value(name, States.UPTODATE, value)
                    node0[_AN_TIMING] = TimingData(start_dt, end_dt, delta)
                    self._set_descendents(name, States.STALE)
                    for n in self.dag.successors(name):
                        logging.debug(str(name) + ' ' + str(n) + ' ' + str(computed))
                        if n in computed:
                            raise LoopDetectedException("Calculating {} for the second time".format(name))
                        self._try_set_computable(n)
                        node0 = self.dag.node[n]
                        state = node0[_AN_STATE]
                        if state == States.COMPUTABLE and n in names:
                            run(n)
                else:
                    self._set_state_and_value(name, States.ERROR, Error(exc, tb))
                    self._set_descendents(name, States.STALE)
                computed.add(name)
项目:chihu    作者:yelongyu    | 项目源码 | 文件源码
def run(self):
        # init listeners, add them to the event loop
        for sock in self.sockets:
            sock.setblocking(False)
            # a race condition during graceful shutdown may make the listener
            # name unavailable in the request handler so capture it once here
            server = sock.getsockname()
            acceptor = partial(self.accept, server)
            self.poller.register(sock, selectors.EVENT_READ, acceptor)

        while self.alive:
            # notify the arbiter we are alive
            self.notify()

            # can we accept more connections?
            if self.nr_conns < self.worker_connections:
                # wait for an event
                events = self.poller.select(1.0)
                for key, mask in events:
                    callback = key.data
                    callback(key.fileobj)

                # check (but do not wait) for finished requests
                result = futures.wait(self.futures, timeout=0,
                        return_when=futures.FIRST_COMPLETED)
            else:
                # wait for a request to finish
                result = futures.wait(self.futures, timeout=1.0,
                        return_when=futures.FIRST_COMPLETED)

            # clean up finished requests
            for fut in result.done:
                self.futures.remove(fut)

            if not self.is_parent_alive():
                break

            # hanle keepalive timeouts
            self.murder_keepalived()

        self.tpool.shutdown(False)
        self.poller.close()

        for s in self.sockets:
            s.close()

        futures.wait(self.futures, timeout=self.cfg.graceful_timeout)
项目:ShelbySearch    作者:Agentscreech    | 项目源码 | 文件源码
def run(self):
        # init listeners, add them to the event loop
        for sock in self.sockets:
            sock.setblocking(False)
            # a race condition during graceful shutdown may make the listener
            # name unavailable in the request handler so capture it once here
            server = sock.getsockname()
            acceptor = partial(self.accept, server)
            self.poller.register(sock, selectors.EVENT_READ, acceptor)

        while self.alive:
            # notify the arbiter we are alive
            self.notify()

            # can we accept more connections?
            if self.nr_conns < self.worker_connections:
                # wait for an event
                events = self.poller.select(1.0)
                for key, mask in events:
                    callback = key.data
                    callback(key.fileobj)

                # check (but do not wait) for finished requests
                result = futures.wait(self.futures, timeout=0,
                        return_when=futures.FIRST_COMPLETED)
            else:
                # wait for a request to finish
                result = futures.wait(self.futures, timeout=1.0,
                        return_when=futures.FIRST_COMPLETED)

            # clean up finished requests
            for fut in result.done:
                self.futures.remove(fut)

            if not self.is_parent_alive():
                break

            # hanle keepalive timeouts
            self.murder_keepalived()

        self.tpool.shutdown(False)
        self.poller.close()

        for s in self.sockets:
            s.close()

        futures.wait(self.futures, timeout=self.cfg.graceful_timeout)
项目:tabmaster    作者:NicolasMinghetti    | 项目源码 | 文件源码
def run(self):
        # init listeners, add them to the event loop
        for sock in self.sockets:
            sock.setblocking(False)
            # a race condition during graceful shutdown may make the listener
            # name unavailable in the request handler so capture it once here
            server = sock.getsockname()
            acceptor = partial(self.accept, server)
            self.poller.register(sock, selectors.EVENT_READ, acceptor)

        while self.alive:
            # notify the arbiter we are alive
            self.notify()

            # can we accept more connections?
            if self.nr_conns < self.worker_connections:
                # wait for an event
                events = self.poller.select(1.0)
                for key, mask in events:
                    callback = key.data
                    callback(key.fileobj)

                # check (but do not wait) for finished requests
                result = futures.wait(self.futures, timeout=0,
                        return_when=futures.FIRST_COMPLETED)
            else:
                # wait for a request to finish
                result = futures.wait(self.futures, timeout=1.0,
                        return_when=futures.FIRST_COMPLETED)

            # clean up finished requests
            for fut in result.done:
                self.futures.remove(fut)

            if not self.is_parent_alive():
                break

            # hanle keepalive timeouts
            self.murder_keepalived()

        self.tpool.shutdown(False)
        self.poller.close()

        for s in self.sockets:
            s.close()

        futures.wait(self.futures, timeout=self.cfg.graceful_timeout)
项目:infiblog    作者:RajuKoushik    | 项目源码 | 文件源码
def run(self):
        # init listeners, add them to the event loop
        for sock in self.sockets:
            sock.setblocking(False)
            # a race condition during graceful shutdown may make the listener
            # name unavailable in the request handler so capture it once here
            server = sock.getsockname()
            acceptor = partial(self.accept, server)
            self.poller.register(sock, selectors.EVENT_READ, acceptor)

        while self.alive:
            # notify the arbiter we are alive
            self.notify()

            # can we accept more connections?
            if self.nr_conns < self.worker_connections:
                # wait for an event
                events = self.poller.select(1.0)
                for key, mask in events:
                    callback = key.data
                    callback(key.fileobj)

                # check (but do not wait) for finished requests
                result = futures.wait(self.futures, timeout=0,
                        return_when=futures.FIRST_COMPLETED)
            else:
                # wait for a request to finish
                result = futures.wait(self.futures, timeout=1.0,
                        return_when=futures.FIRST_COMPLETED)

            # clean up finished requests
            for fut in result.done:
                self.futures.remove(fut)

            if not self.is_parent_alive():
                break

            # hanle keepalive timeouts
            self.murder_keepalived()

        self.tpool.shutdown(False)
        self.poller.close()

        for s in self.sockets:
            s.close()

        futures.wait(self.futures, timeout=self.cfg.graceful_timeout)
项目:metrics    作者:Jeremy-Friedman    | 项目源码 | 文件源码
def run(self):
        # init listeners, add them to the event loop
        for sock in self.sockets:
            sock.setblocking(False)
            # a race condition during graceful shutdown may make the listener
            # name unavailable in the request handler so capture it once here
            server = sock.getsockname()
            acceptor = partial(self.accept, server)
            self.poller.register(sock, selectors.EVENT_READ, acceptor)

        while self.alive:
            # notify the arbiter we are alive
            self.notify()

            # can we accept more connections?
            if self.nr_conns < self.worker_connections:
                # wait for an event
                events = self.poller.select(1.0)
                for key, mask in events:
                    callback = key.data
                    callback(key.fileobj)

                # check (but do not wait) for finished requests
                result = futures.wait(self.futures, timeout=0,
                        return_when=futures.FIRST_COMPLETED)
            else:
                # wait for a request to finish
                result = futures.wait(self.futures, timeout=1.0,
                        return_when=futures.FIRST_COMPLETED)

            # clean up finished requests
            for fut in result.done:
                self.futures.remove(fut)

            if not self.is_parent_alive():
                break

            # hanle keepalive timeouts
            self.murder_keepalived()

        self.tpool.shutdown(False)
        self.poller.close()

        for s in self.sockets:
            s.close()

        futures.wait(self.futures, timeout=self.cfg.graceful_timeout)
项目:metrics    作者:Jeremy-Friedman    | 项目源码 | 文件源码
def run(self):
        # init listeners, add them to the event loop
        for sock in self.sockets:
            sock.setblocking(False)
            # a race condition during graceful shutdown may make the listener
            # name unavailable in the request handler so capture it once here
            server = sock.getsockname()
            acceptor = partial(self.accept, server)
            self.poller.register(sock, selectors.EVENT_READ, acceptor)

        while self.alive:
            # notify the arbiter we are alive
            self.notify()

            # can we accept more connections?
            if self.nr_conns < self.worker_connections:
                # wait for an event
                events = self.poller.select(1.0)
                for key, mask in events:
                    callback = key.data
                    callback(key.fileobj)

                # check (but do not wait) for finished requests
                result = futures.wait(self.futures, timeout=0,
                        return_when=futures.FIRST_COMPLETED)
            else:
                # wait for a request to finish
                result = futures.wait(self.futures, timeout=1.0,
                        return_when=futures.FIRST_COMPLETED)

            # clean up finished requests
            for fut in result.done:
                self.futures.remove(fut)

            if not self.is_parent_alive():
                break

            # hanle keepalive timeouts
            self.murder_keepalived()

        self.tpool.shutdown(False)
        self.poller.close()

        for s in self.sockets:
            s.close()

        futures.wait(self.futures, timeout=self.cfg.graceful_timeout)
项目:liberator    作者:libscie    | 项目源码 | 文件源码
def run(self):
        # init listeners, add them to the event loop
        for sock in self.sockets:
            sock.setblocking(False)
            # a race condition during graceful shutdown may make the listener
            # name unavailable in the request handler so capture it once here
            server = sock.getsockname()
            acceptor = partial(self.accept, server)
            self.poller.register(sock, selectors.EVENT_READ, acceptor)

        while self.alive:
            # notify the arbiter we are alive
            self.notify()

            # can we accept more connections?
            if self.nr_conns < self.worker_connections:
                # wait for an event
                events = self.poller.select(1.0)
                for key, mask in events:
                    callback = key.data
                    callback(key.fileobj)

                # check (but do not wait) for finished requests
                result = futures.wait(self.futures, timeout=0,
                        return_when=futures.FIRST_COMPLETED)
            else:
                # wait for a request to finish
                result = futures.wait(self.futures, timeout=1.0,
                        return_when=futures.FIRST_COMPLETED)

            # clean up finished requests
            for fut in result.done:
                self.futures.remove(fut)

            if not self.is_parent_alive():
                break

            # hanle keepalive timeouts
            self.murder_keepalived()

        self.tpool.shutdown(False)
        self.poller.close()

        for s in self.sockets:
            s.close()

        futures.wait(self.futures, timeout=self.cfg.graceful_timeout)
项目:djanoDoc    作者:JustinChavez    | 项目源码 | 文件源码
def run(self):
        # init listeners, add them to the event loop
        for sock in self.sockets:
            sock.setblocking(False)
            # a race condition during graceful shutdown may make the listener
            # name unavailable in the request handler so capture it once here
            server = sock.getsockname()
            acceptor = partial(self.accept, server)
            self.poller.register(sock, selectors.EVENT_READ, acceptor)

        while self.alive:
            # notify the arbiter we are alive
            self.notify()

            # can we accept more connections?
            if self.nr_conns < self.worker_connections:
                # wait for an event
                events = self.poller.select(1.0)
                for key, mask in events:
                    callback = key.data
                    callback(key.fileobj)

                # check (but do not wait) for finished requests
                result = futures.wait(self.futures, timeout=0,
                        return_when=futures.FIRST_COMPLETED)
            else:
                # wait for a request to finish
                result = futures.wait(self.futures, timeout=1.0,
                        return_when=futures.FIRST_COMPLETED)

            # clean up finished requests
            for fut in result.done:
                self.futures.remove(fut)

            if not self.is_parent_alive():
                break

            # hanle keepalive timeouts
            self.murder_keepalived()

        self.tpool.shutdown(False)
        self.poller.close()

        for s in self.sockets:
            s.close()

        futures.wait(self.futures, timeout=self.cfg.graceful_timeout)
项目:zenchmarks    作者:squeaky-pl    | 项目源码 | 文件源码
def run(self):
        # init listeners, add them to the event loop
        for sock in self.sockets:
            sock.setblocking(False)
            # a race condition during graceful shutdown may make the listener
            # name unavailable in the request handler so capture it once here
            server = sock.getsockname()
            acceptor = partial(self.accept, server)
            self.poller.register(sock, selectors.EVENT_READ, acceptor)

        while self.alive:
            # notify the arbiter we are alive
            self.notify()

            # can we accept more connections?
            if self.nr_conns < self.worker_connections:
                # wait for an event
                events = self.poller.select(1.0)
                for key, mask in events:
                    callback = key.data
                    callback(key.fileobj)

                # check (but do not wait) for finished requests
                result = futures.wait(self.futures, timeout=0,
                        return_when=futures.FIRST_COMPLETED)
            else:
                # wait for a request to finish
                result = futures.wait(self.futures, timeout=1.0,
                        return_when=futures.FIRST_COMPLETED)

            # clean up finished requests
            for fut in result.done:
                self.futures.remove(fut)

            if not self.is_parent_alive():
                break

            # hanle keepalive timeouts
            self.murder_keepalived()

        self.tpool.shutdown(False)
        self.poller.close()

        for s in self.sockets:
            s.close()

        futures.wait(self.futures, timeout=self.cfg.graceful_timeout)
项目:django-next-train    作者:bitpixdigital    | 项目源码 | 文件源码
def run(self):
        # init listeners, add them to the event loop
        for sock in self.sockets:
            sock.setblocking(False)
            # a race condition during graceful shutdown may make the listener
            # name unavailable in the request handler so capture it once here
            server = sock.getsockname()
            acceptor = partial(self.accept, server)
            self.poller.register(sock, selectors.EVENT_READ, acceptor)

        while self.alive:
            # notify the arbiter we are alive
            self.notify()

            # can we accept more connections?
            if self.nr_conns < self.worker_connections:
                # wait for an event
                events = self.poller.select(1.0)
                for key, mask in events:
                    callback = key.data
                    callback(key.fileobj)

                # check (but do not wait) for finished requests
                result = futures.wait(self.futures, timeout=0,
                        return_when=futures.FIRST_COMPLETED)
            else:
                # wait for a request to finish
                result = futures.wait(self.futures, timeout=1.0,
                        return_when=futures.FIRST_COMPLETED)

            # clean up finished requests
            for fut in result.done:
                self.futures.remove(fut)

            if not self.is_parent_alive():
                break

            # hanle keepalive timeouts
            self.murder_keepalived()

        self.tpool.shutdown(False)
        self.poller.close()

        for s in self.sockets:
            s.close()

        futures.wait(self.futures, timeout=self.cfg.graceful_timeout)
项目:batch-scoring    作者:datarobot    | 项目源码 | 文件源码
def perform_requests(self):
        signal.signal(signal.SIGINT, self.exit_fast)
        signal.signal(signal.SIGTERM, self.exit_fast)

        self.state = b'E'
        for q_batch in self.get_batch():
            for (batch, data) in self.split_batch(q_batch):

                hook = partial(self._response_callback, batch=batch)
                r = requests.Request(
                    method='POST',
                    url=self.endpoint,
                    headers=self.headers,
                    data=data,
                    auth=(self.user, self.api_token),
                    hooks={'response': hook})

                self.n_requests += 1

                while True:
                    self.futures = [i for i in self.futures if not i.done()]
                    if len(self.futures) < self.concurrency:
                        self.state = b'R'
                        f = self._executor.submit(self._request, r)
                        f.add_done_callback(self.request_cb)
                        self.futures.append(f)
                        break
                    else:
                        self.state = b'F'
                        wait(self.futures, return_when=FIRST_COMPLETED)
                yield
        #  wait for all batches to finish before returning
        self.state = b'W'
        while self.futures:
            f_len = len(self.futures)
            self.futures = [i for i in self.futures if not i.done()]
            if f_len != len(self.futures):
                self.ui.debug('Waiting for final requests to finish. '
                              'remaining requests: {}'
                              ''.format(len(self.futures)))
            wait(self.futures, return_when=FIRST_COMPLETED)
        self.state = b'D'
        yield True
项目:Alfred    作者:jkachhadia    | 项目源码 | 文件源码
def run(self):
        # init listeners, add them to the event loop
        for sock in self.sockets:
            sock.setblocking(False)
            # a race condition during graceful shutdown may make the listener
            # name unavailable in the request handler so capture it once here
            server = sock.getsockname()
            acceptor = partial(self.accept, server)
            self.poller.register(sock, selectors.EVENT_READ, acceptor)

        while self.alive:
            # notify the arbiter we are alive
            self.notify()

            # can we accept more connections?
            if self.nr_conns < self.worker_connections:
                # wait for an event
                events = self.poller.select(1.0)
                for key, mask in events:
                    callback = key.data
                    callback(key.fileobj)

                # check (but do not wait) for finished requests
                result = futures.wait(self.futures, timeout=0,
                        return_when=futures.FIRST_COMPLETED)
            else:
                # wait for a request to finish
                result = futures.wait(self.futures, timeout=1.0,
                        return_when=futures.FIRST_COMPLETED)

            # clean up finished requests
            for fut in result.done:
                self.futures.remove(fut)

            if not self.is_parent_alive():
                break

            # hanle keepalive timeouts
            self.murder_keepalived()

        self.tpool.shutdown(False)
        self.poller.close()

        for s in self.sockets:
            s.close()

        futures.wait(self.futures, timeout=self.cfg.graceful_timeout)