Python multiprocessing 模块,SimpleQueue() 实例源码

我们从Python开源项目中,提取了以下7个代码示例,用于说明如何使用multiprocessing.SimpleQueue()

项目:async-ipython-magic    作者:leriomaggio    | 项目源码 | 文件源码
def __init__(self):
        super(ExecutionHandler, self).__init__(factory=SimpleQueue)
项目:GulpIO    作者:TwentyBN    | 项目源码 | 文件源码
def __init__(self, loader):
        self.dataset = loader.dataset
        self.collate_fn = loader.collate_fn
        self.batch_sampler = loader.batch_sampler
        self.num_workers = loader.num_workers
        self.done_event = threading.Event()

        self.sample_iter = iter(self.batch_sampler)

        if self.num_workers > 0:
            self.index_queue = SimpleQueue()
            self.data_queue = SimpleQueue()
            self.batches_outstanding = 0
            self.shutdown = False
            self.send_idx = 0
            self.rcvd_idx = 0
            self.reorder_dict = {}

            self.workers = [
                Process(
                    target=_worker_loop,
                    args=(self.dataset, self.index_queue, self.data_queue, self.collate_fn))
                for _ in range(self.num_workers)]

            for w in self.workers:
                w.daemon = True  # ensure that the worker exits on process exit
                w.start()

            # prime the prefetch loop
            for _ in range(2 * self.num_workers):
                self._put_indices()
项目:ouroboros    作者:pybee    | 项目源码 | 文件源码
def __init__(self, max_workers=None):
        """Initializes a new ProcessPoolExecutor instance.

        Args:
            max_workers: The maximum number of processes that can be used to
                execute the given calls. If None or not given then as many
                worker processes will be created as the machine has processors.
        """
        _check_system_limits()

        if max_workers is None:
            self._max_workers = os.cpu_count() or 1
        else:
            self._max_workers = max_workers

        # Make the call queue slightly larger than the number of processes to
        # prevent the worker processes from idling. But don't make it too big
        # because futures in the call queue cannot be cancelled.
        self._call_queue = multiprocessing.Queue(self._max_workers +
                                                 EXTRA_QUEUED_CALLS)
        # Killed worker processes can produce spurious "broken pipe"
        # tracebacks in the queue's own worker thread. But we detect killed
        # processes anyway, so silence the tracebacks.
        self._call_queue._ignore_epipe = True
        self._result_queue = SimpleQueue()
        self._work_ids = queue.Queue()
        self._queue_management_thread = None
        # Map of pids to processes
        self._processes = {}

        # Shutdown is a two-step process.
        self._shutdown_thread = False
        self._shutdown_lock = threading.Lock()
        self._broken = False
        self._queue_count = 0
        self._pending_work_items = {}
项目:RD-MCL    作者:biologyguy    | 项目源码 | 文件源码
def __init__(self, db_file="sqlite_db.sqlite", lock_wait_time=120):
        self.db_file = db_file
        self.connection = sqlite3.connect(self.db_file)
        self.broker_cursor = self.connection.cursor()
        self.broker_queue = SimpleQueue()
        self.broker = None
        self.lock_wait_time = lock_wait_time
        # ToDo: Set up a process pool to limit number of query threads
项目:kbe_server    作者:xiaohaoppy    | 项目源码 | 文件源码
def __init__(self, max_workers=None):
        """Initializes a new ProcessPoolExecutor instance.

        Args:
            max_workers: The maximum number of processes that can be used to
                execute the given calls. If None or not given then as many
                worker processes will be created as the machine has processors.
        """
        _check_system_limits()

        if max_workers is None:
            self._max_workers = os.cpu_count() or 1
        else:
            self._max_workers = max_workers

        # Make the call queue slightly larger than the number of processes to
        # prevent the worker processes from idling. But don't make it too big
        # because futures in the call queue cannot be cancelled.
        self._call_queue = multiprocessing.Queue(self._max_workers +
                                                 EXTRA_QUEUED_CALLS)
        # Killed worker processes can produce spurious "broken pipe"
        # tracebacks in the queue's own worker thread. But we detect killed
        # processes anyway, so silence the tracebacks.
        self._call_queue._ignore_epipe = True
        self._result_queue = SimpleQueue()
        self._work_ids = queue.Queue()
        self._queue_management_thread = None
        # Map of pids to processes
        self._processes = {}

        # Shutdown is a two-step process.
        self._shutdown_thread = False
        self._shutdown_lock = threading.Lock()
        self._broken = False
        self._queue_count = 0
        self._pending_work_items = {}
项目:misp-privacy-aware-exchange    作者:MISP    | 项目源码 | 文件源码
def argument_matching(crypto, values=args.attribute):
    attributes = OrderedDict(pair.split("=") for pair in values)
    match = SimpleQueue()
    matching(attributes, match, Lock(), crypto)

    # Print matches (Easy to modify)
    for match in iterator_result(match):
        print(match)
项目:misp-privacy-aware-exchange    作者:MISP    | 项目源码 | 文件源码
def redis_matching(crypto):
    # Data is enriched in logstash
    conf = Configuration()
    r = redis.StrictRedis(host=conf['redis']['host'], port=conf['redis']['port'], db=conf['redis']['db'])

    lock = Lock()
    match = SimpleQueue()
    if args.multiprocess > 0:
        n = min(args.multiprocess, cpu_count()-1)
        processes = list()
        for i in range(n):
            process = Process(target=redis_matching_process, args=(r, match, lock, crypto))
            process.start()
            processes.append(process)

        # Print match(es)
        print_process = Process(target=print_queue_process, args=([match]))
        print_process.start()
        for process in processes:
            process.join()
        print_process.terminate()
    else:
        redis_matching_process(r, match, lock, crypto)
        for item in iterator_result(match):
            print(item)

# For Benchmarking