Python multiprocessing 模块,Process() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用multiprocessing.Process()

项目:fccforensics    作者:RagtagOpen    | 项目源码 | 文件源码
def run(self):
        self.total = self.get_total() or 5000000
        if not self.total:
            print('error loading document total; using estimate')

        index_queue = multiprocessing.Queue()

        bulk_index_process = multiprocessing.Process(
            target=self.bulk_index, args=(index_queue,),
        )
        bulk_index_process.start()

        for comment in self.iter_comments():
            self.stats['fetched'] += 1
            if not self.stats['fetched'] % 500:
                print('fetched %s/%s\t%s%%\t%s' % (self.stats['fetched'], self.total,
                    int(self.stats['fetched'] / self.total * 100),
                    comment['date_disseminated']))
            index_queue.put(comment)

        index_queue.put(None)
        bulk_index_process.join()
        return self.stats['fetched']
项目:PyJFuzz    作者:mseclab    | 项目源码 | 文件源码
def __init__(self, configuration):
        self.client_queue = multiprocessing.Queue(0)
        self.apply_patch()
        self.logger = self.init_logger()
        if ["debug", "html", "content_type", "notify", "ports"] not in configuration:
            raise PJFMissingArgument()
        if configuration.debug:
            print("[\033[92mINFO\033[0m] Starting HTTP ({0}) and HTTPS ({1}) built-in server...".format(
                configuration.ports["servers"]["HTTP_PORT"],
                configuration.ports["servers"]["HTTPS_PORT"]
            ))
        if not configuration.content_type:
            configuration.content_type = False
        if not configuration.content_type:
            configuration.content_type = "application/json"
        self.config = configuration
        self.json = PJFFactory(configuration)
        self.https = SSLWSGIRefServer(host="0.0.0.0", port=self.config.ports["servers"]["HTTPS_PORT"])
        self.http = WSGIRefServer(host="0.0.0.0", port=self.config.ports["servers"]["HTTP_PORT"])
        self.httpsd = multiprocessing.Process(target=run, kwargs={"server": self.https, "quiet": True})
        self.httpd = multiprocessing.Process(target=run, kwargs={"server": self.http, "quiet": True})
        if self.config.fuzz_web:
            self.request_checker = Thread(target=self.request_pool, args=())
        self.logger.debug("[{0}] - PJFServer successfully initialized".format(time.strftime("%H:%M:%S")))
项目:ipwb    作者:oduwsdl    | 项目源码 | 文件源码
def startReplay(warcFilename):
    global p
    pathOfWARC = os.path.join(os.path.dirname(moduleLocation) +
                              '/samples/warcs/' + warcFilename)
    tempFilePath = '/tmp/' + ''.join(random.sample(
        string.ascii_uppercase + string.digits * 6, 6)) + '.cdxj'
    print('B2' + tempFilePath)
    p = Process(target=replay.start, args=[tempFilePath])
    p.start()
    sleep(5)

    cdxjList = indexer.indexFileAt(pathOfWARC, quiet=True)
    cdxj = '\n'.join(cdxjList)

    with open(tempFilePath, 'w') as f:
        f.write(cdxj)
项目:oscars2016    作者:0x0ece    | 项目源码 | 文件源码
def main(argv):
    parser = argparse.ArgumentParser(
        description='A command line interface to move tweets from pubsub to bigquery')
    parser.add_argument('project_name', help='Project name in console')
    parser.add_argument('subscription', help='subscription to read from')
    parser.add_argument('-w','--workers', help='change the number of workers',
        default = 10, type=int)

    args = parser.parse_args(argv[1:])
    pool = [Process(target = worker, args = (args,)) for i in xrange(args.workers)]
    print("Starting pool of %d worker"%args.workers)
    for i in pool:
        i.start()

    for i in pool:
        i.join()
项目:oscars2016    作者:0x0ece    | 项目源码 | 文件源码
def main(argv):
    parser = argparse.ArgumentParser(
        description='A command line interface to move tweets from pubsub to bigquery')
    parser.add_argument('project_name', help='Project name in console')
    parser.add_argument('subscription', help='subscription to read from')
    parser.add_argument('-w','--workers', help='change the number of workers',
        default = 10, type=int)

    args = parser.parse_args(argv[1:])
    pool = [Process(target = worker, args = (args,)) for i in xrange(args.workers)]
    print("Starting pool of %d worker"%args.workers)
    for i in pool:
        i.start()

    for i in pool:
        i.join()
项目:sphinxcontrib-versioning    作者:Robpol86    | 项目源码 | 文件源码
def build(source, target, versions, current_name, is_root):
    """Build Sphinx docs for one version. Includes Versions class instance with names/urls in the HTML context.

    :raise HandledError: If sphinx-build fails. Will be logged before raising.

    :param str source: Source directory to pass to sphinx-build.
    :param str target: Destination directory to write documentation to (passed to sphinx-build).
    :param sphinxcontrib.versioning.versions.Versions versions: Versions class instance.
    :param str current_name: The ref name of the current version being built.
    :param bool is_root: Is this build in the web root?
    """
    log = logging.getLogger(__name__)
    argv = ('sphinx-build', source, target)
    config = Config.from_context()

    log.debug('Running sphinx-build for %s with args: %s', current_name, str(argv))
    child = multiprocessing.Process(target=_build, args=(argv, config, versions, current_name, is_root))
    child.start()
    child.join()  # Block.
    if child.exitcode != 0:
        log.error('sphinx-build failed for branch/tag: %s', current_name)
        raise HandledError
项目:Tensormodels    作者:asheshjain399    | 项目源码 | 文件源码
def _launch_pipeline(self):
    """This method creates two queues.
    filename_queue: stores the list of filesnames in data_file and label_file
    data_queue: stores the mini-batch
    """

    self.data_processes = [] # Holds process handles

    queue_size = 2 * self.num_preprocess_threads + 2 * self.num_gpu_towers
    self.data_queue = Queue(queue_size)  # This queue stores the data
    image_files = open(self.data_file, 'r').readlines()
    labels = open(self.label_file, 'r').readlines()
    print 'Size of queue: ', queue_size

    self.filename_queue = Queue(len(image_files))  # This queue stores the filenames
    p = Process(target=self._create_filename_queue, args=(self.filename_queue, image_files, labels, self.num_epochs))
    p.start()
    self.data_processes.append(p)

    print 'Data feeder started'
    for each_worker in range(self.num_preprocess_threads):
      p = Process(target=self._each_worker_process, args=(self.data_queue,))
      p.start()
      self.data_processes.append(p)
项目:crankycoin    作者:cranklin    | 项目源码 | 文件源码
def __init__(self, host, reward_address, **kwargs):
        self.host = host
        self.request_nodes_from_all()
        self.reward_address = reward_address
        self.broadcast_node(host)
        self.full_nodes.add(host)

        block_path = kwargs.get("block_path")
        if block_path is None:
            self.blockchain = Blockchain()
        else:
            self.load_blockchain(block_path)

        mining = kwargs.get("mining")
        if mining is True:
            self.NODE_TYPE = "miner"
            self.mining_process = Process(target=self.mine)
            self.mining_process.start()
            logger.debug("mining node started on %s with reward address of %s...", host, reward_address)
        logger.debug("full node server starting on %s with reward address of %s...", host, reward_address)
        self.node_process = Process(target=self.app.run, args=(host, self.FULL_NODE_PORT))
        self.node_process.start()
        logger.debug("full node server started on %s with reward address of %s...", host, reward_address)
项目:RSPET    作者:panagiks    | 项目源码 | 文件源码
def udp_flood(self):
        """Get target ip and port from server, start UPD flood wait for 'KILL'."""
        en_data = self.receive(3) # Max ip+port+payload length 999 chars
        en_data = self.receive(int(en_data))
        en_data = en_data.split(":")
        target_ip = en_data[0]
        target_port = int(en_data[1])
        msg = en_data[2]
        proc = Process(target=udp_flood_start, args=(target_ip, target_port, msg))
        proc.start()
        killed = False
        while not killed:
            en_data = self.receive(5)
            try:
                en_data = self.comm_dict[en_data]
            except KeyError:
                continue
            if en_data == 'KILL':
                proc.terminate()
                killed = True
        return 0
项目:RSPET    作者:panagiks    | 项目源码 | 文件源码
def udp_spoof(self):
        """Get target/spoofed ip and port from server, start UPD spoof wait for 'KILL'."""
        en_data = self.receive(3) # Max ip+port+spoofedip+spoofed port+payload length 999 chars
        en_data = self.receive(int(en_data))
        en_data = en_data.split(":")
        target_ip = en_data[0]
        target_port = int(en_data[1])
        spoofed_ip = en_data[2]
        spoofed_port = int(en_data[3])
        payload = en_data[4].encode('UTF-8')
        proc = Process(target=udp_spoof_start, args=(target_ip, target_port,
                                                     spoofed_ip, spoofed_port,
                                                     payload))
        proc.start()
        killed = False
        while not killed:
            en_data = self.receive(5)
            try:
                en_data = self.comm_dict[en_data]
            except KeyError:
                continue
            if en_data == 'KILL':
                proc.terminate()
                killed = True
        return 0
项目:supremm    作者:ubccr    | 项目源码 | 文件源码
def main():
    """
    main entry point for script
    """
    opts = getoptions(False)

    setuplogger(opts['log'])

    config = Config()

    threads = opts['threads']

    if threads <= 1:
        processjobs(config, opts, None)
        return
    else:
        proclist = []
        for procid in xrange(threads):
            p = Process(target=processjobs, args=(config, opts, procid))
            p.start()
            proclist.append(p)

        for proc in proclist:
            p.join()
项目:deep-prior    作者:moberweger    | 项目源码 | 文件源码
def processFilesThreaded(self, filenames):
        """
        Run detector from files
        :param filenames: filenames to load
        :return: None
        """

        allstart = time.time()
        if not isinstance(filenames, list):
            raise ValueError("Files must be list of filenames.")

        p = Process(target=self.threadProducerFiles, args=[filenames])
        p.daemon = True
        c = Process(target=self.threadConsumer, args=[])
        c.daemon = True
        p.start()
        c.start()

        c.join()
        p.join()

        print("DONE in {}s".format((time.time() - allstart)))
项目:deep-prior    作者:moberweger    | 项目源码 | 文件源码
def processVideoThreaded(self, device):
        """
        Use video as input
        :param device: device id
        :return: None
        """

        p = Process(target=self.threadProducerVideo, args=[device])
        p.daemon = True
        c = Process(target=self.threadConsumer, args=[])
        c.daemon = True
        p.start()
        c.start()

        c.join()
        p.join()
项目:adversarial-frcnn    作者:xiaolonw    | 项目源码 | 文件源码
def get_solvers(net_name):
    # Faster R-CNN Alternating Optimization
    n = 'faster_rcnn_alt_opt'
    # Solver for each training stage
    solvers = [[net_name, n, 'stage1_rpn_solver60k80k.pt'],
               [net_name, n, 'stage1_fast_rcnn_solver30k40k.pt'],
               [net_name, n, 'stage2_rpn_solver60k80k.pt'],
               [net_name, n, 'stage2_fast_rcnn_solver30k40k.pt']]
    solvers = [os.path.join(cfg.MODELS_DIR, *s) for s in solvers]
    # Iterations for each training stage
    max_iters = [80000, 40000, 80000, 40000]
    # max_iters = [100, 100, 100, 100]
    # Test prototxt for the RPN
    rpn_test_prototxt = os.path.join(
        cfg.MODELS_DIR, net_name, n, 'rpn_test.pt')
    return solvers, max_iters, rpn_test_prototxt

# ------------------------------------------------------------------------------
# Pycaffe doesn't reliably free GPU memory when instantiated nets are discarded
# (e.g. "del net" in Python code). To work around this issue, each training
# stage is executed in a separate process using multiprocessing.Process.
# ------------------------------------------------------------------------------
项目:Tktr    作者:Intuity    | 项目源码 | 文件源码
def run_wsgi(cls):
        if cls.wsgi_process != None:
            cls.make_browser()
            return
        cls.wsgi_process = Process(target=cls._run_wsgi)
        cls.wsgi_process.start()
        # Wait for it to come up
        success = False
        for i in range(10):
            try:
                if urllib.urlopen("http://localhost:%i/" % cls.port_num).getcode() == 200:
                    success = True
                    break
            except Exception:
                pass
            time.sleep(2)
        # Create a second app for routing etc
        cls.app = cls._make_app()
        # If we failed to run WSGI then clean-up
        if not success:
            cls.stop_wsgi()
            cls.wsgi_process = None
            raise Exception("Couldn't bring up WSGI server")
        cls.make_browser()
项目:dsb3    作者:EliasVansteenkiste    | 项目源码 | 文件源码
def buffered_gen_mp(source_gen, buffer_size=2):
    """
    Generator that runs a slow source generator in a separate process.
    buffer_size: the maximal number of items to pre-generate (length of the buffer)
    """
    if buffer_size < 2:
        raise RuntimeError("Minimal buffer size is 2!")

    buffer = mp.Queue(maxsize=buffer_size - 1)

    # the effective buffer size is one less, because the generation process
    # will generate one extra element and block until there is room in the buffer.

    def _buffered_generation_process(source_gen, buffer):
        for data in source_gen:
            buffer.put(data, block=True)
        buffer.put(None)  # sentinel: signal the end of the iterator
        buffer.close()  # unfortunately this does not suffice as a signal: if buffer.get()
        # was called and subsequently the buffer is closed, it will block forever.

    process = mp.Process(target=_buffered_generation_process, args=(source_gen, buffer))
    process.start()

    for data in iter(buffer.get, None):
        yield data
项目:oshino    作者:CodersOfTheNight    | 项目源码 | 文件源码
def stub_server(request):
    from multiprocessing import Process
    from stubilous.server import run
    from stubilous.builder import Builder
    builder = Builder()
    builder.server(host="localhost", port=9998)
    builder.route("GET", "/health")("Ok", 200)
    config = builder.build()
    proc = Process(target=run, args=(config,))

    def on_close():
        proc.terminate()
        proc.join()

    request.addfinalizer(on_close)
    proc.start()
    return proc
项目:faster-rcnn-resnet    作者:Eniac-Xie    | 项目源码 | 文件源码
def get_solvers(net_name):
    # Faster R-CNN Alternating Optimization
    n = 'faster_rcnn_alt_opt'
    # Solver for each training stage
    solvers = [[net_name, n, 'stage1_rpn_solver60k80k.pt'],
               [net_name, n, 'stage1_fast_rcnn_solver30k40k.pt'],
               [net_name, n, 'stage2_rpn_solver60k80k.pt'],
               [net_name, n, 'stage2_fast_rcnn_solver30k40k.pt']]
    solvers = [os.path.join(cfg.MODELS_DIR, *s) for s in solvers]
    # Iterations for each training stage
    max_iters = [80000, 40000, 80000, 40000]
    # max_iters = [100, 100, 100, 100]
    # Test prototxt for the RPN
    rpn_test_prototxt = os.path.join(
        cfg.MODELS_DIR, net_name, n, 'rpn_test.pt')
    return solvers, max_iters, rpn_test_prototxt

# ------------------------------------------------------------------------------
# Pycaffe doesn't reliably free GPU memory when instantiated nets are discarded
# (e.g. "del net" in Python code). To work around this issue, each training
# stage is executed in a separate process using multiprocessing.Process.
# ------------------------------------------------------------------------------
项目:Static-UPnP    作者:nigelb    | 项目源码 | 文件源码
def start(self):
        self.setup_sockets()
        import StaticUPnP_Settings
        permissions = Namespace(**StaticUPnP_Settings.permissions)
        print(permissions)
        if permissions.drop_permissions:
            self.drop_privileges(permissions.user, permissions.group)

        self.running = Value(ctypes.c_int, 1)
        self.queue = Queue()
        self.reciever_thread = Process(target=self.socket_handler, args=(self.queue, self.running))
        self.reciever_thread.start()
        self.schedule_thread = Process(target=self.schedule_handler, args=(self.running,))
        self.schedule_thread.start()
        self.response_thread = Process(target=self.response_handler, args=(self.queue, self.running))
        self.response_thread.start()
项目:MUTE    作者:K4YT3X    | 项目源码 | 文件源码
def aireplay():
    """
    Controls the attacks function
    Starts attack function in a sub-process
    """
    start_attack_proc = multiprocessing.Process(target=attack)
    start_attack_proc.start()
    while True:
        try:
            print('', end='', flush=True)
            pass
        except KeyboardInterrupt:
            os.system('clear')
            print(G + "[+] INFO: Stopping Attack..." + W)
            start_attack_proc.terminate()
            break


# --------------------------------ICON--------------------------------
项目:histwords    作者:williamleif    | 项目源码 | 文件源码
def run_parallel(num_processes, out_dir, source):
    page = requests.get("http://storage.googleapis.com/books/ngrams/books/datasetsv2.html")
    pattern = re.compile('href=\'(.*%s-%s-%s-.*\.gz)' % (source, TYPE, VERSION))
    urls = pattern.findall(page.text)
    del page
    queue = Queue()
    for url in urls:
        queue.put(url)
    ioutils.mkdir(out_dir + '/' + source + '/raw')
    download_dir = out_dir + '/' + source + '/raw/'
    ioutils.mkdir(download_dir)
    procs = [Process(target=split_main, args=[i, queue, download_dir]) for i in range(num_processes)]
    for p in procs:
        p.start()
    for p in procs:
        p.join()
项目:hostapd-mana    作者:adde88    | 项目源码 | 文件源码
def _import_mp():
    global Process, Queue, Pool, Event, Value, Array
    try:
        from multiprocessing import Manager, Process
        #prevent the server process created in the manager which holds Python 
        #objects and allows other processes to manipulate them using proxies
        #to interrupt on SIGINT (keyboardinterrupt) so that the communication
        #channel between subprocesses and main process is still usable after
        #ctrl+C is received in the main process.
        old=signal.signal(signal.SIGINT, signal.SIG_IGN)
        m = Manager()
        #reset it back so main process will receive a KeyboardInterrupt
        #exception on ctrl+c
        signal.signal(signal.SIGINT, old)
        Queue, Pool, Event, Value, Array = (
                m.Queue, m.Pool, m.Event, m.Value, m.Array
        )
    except ImportError:
        warn("multiprocessing module is not available, multiprocess plugin "
             "cannot be used", RuntimeWarning)
项目:hostapd-mana    作者:adde88    | 项目源码 | 文件源码
def startProcess(self, iworker, testQueue, resultQueue, shouldStop, result):
        currentaddr = Value('c',bytes_(''))
        currentstart = Value('d',time.time())
        keyboardCaught = Event()
        p = Process(target=runner,
                   args=(iworker, testQueue,
                         resultQueue,
                         currentaddr,
                         currentstart,
                         keyboardCaught,
                         shouldStop,
                         self.loaderClass,
                         result.__class__,
                         pickle.dumps(self.config)))
        p.currentaddr = currentaddr
        p.currentstart = currentstart
        p.keyboardCaught = keyboardCaught
        old = signal.signal(signal.SIGILL, signalhandler)
        p.start()
        signal.signal(signal.SIGILL, old)
        return p
项目:aqbot    作者:aqisnotliquid    | 项目源码 | 文件源码
def __init__(self):
        try:
            with open(self.AQBOT_CONF) as f:
                config = json.load(f)
        except IOError:
            exit(1)
        except:
            exit(1)

        try:
            for network in config['networks']:
                for channel in network['channels']:
                    worker = mp.Process(target=self._connect, args=(network, channel,))
                    worker.start()
        except:
            exit(1)
项目:highfive    作者:abau171    | 项目源码 | 文件源码
def run_worker_pool(job_handler, host="localhost", port=48484,
                      *, max_workers=None):
    """
    Runs a pool of workers which connect to a remote HighFive master and begin
    executing calls.
    """

    if max_workers is None:
        max_workers = multiprocessing.cpu_count()

    processes = []
    for _ in range(max_workers):
        p = multiprocessing.Process(target=worker_main,
                args=(job_handler, host, port))
        p.start()
        processes.append(p)

    logger.debug("workers started")

    for p in processes:
        p.join()

    logger.debug("all workers completed")
项目:IPProxy    作者:yutian2011    | 项目源码 | 文件源码
def main():
    ip_queue = multiprocessing.Queue()
    msg_queue = multiprocessing.Queue()
    p1 = multiprocessing.Process(target=get_proxy,args=(ip_queue,msg_queue))
    p2 = multiprocessing.Process(target=test_and_verify.verify_db_data,args=(ip_queue,msg_queue))
    p3 = [multiprocessing.Process(target=test_and_verify.gevent_queue,args=(ip_queue,msg_queue)) for i in range(settings.TEST_PROCESS_NUM)]
    p4 = multiprocessing.Process(target=web_cache_run,args=(ip_queue,))
    p1.start()
    p2.start()
    for p in p3:
        p.start()
    pid_list = [os.getpid(),p1.pid,p2.pid,]
    pid_list.extend(p.pid for p in p3)
    if WEB_USE_REDIS_CACHE:
        p4.start()
        pid_list.append(p4.pid)
    with open(PID,"w") as f:
        f.write(json.dumps(pid_list))
    p1.join()
    p2.join()
    for p in p3:
        p.join()
    if WEB_USE_REDIS_CACHE:
        p4.join()
项目:zatt    作者:simonacca    | 项目源码 | 文件源码
def create_server(self, is_leader=True):
        def server_factory(config):
            serverSetup(config)
            loop = asyncio.get_event_loop()
            loop.run_forever()

        self.server = Process(target=server_factory, args=(self.config,))
        self.server.start()

        if is_leader and self.address == self.leader_address:
            sleep(1)
            print('Restarting Leader to increment term')
            self.server.terminate()
            self.create_server(is_leader=False)  # prevents recurtion
            sleep(1)
        else:
            sleep(3)
        self.leader = DistributedDict(*self.leader_address)
        self.leader.config_cluster('add', *self.address)
项目:zatt    作者:simonacca    | 项目源码 | 文件源码
def test_5_add_server(self):
        print('Add new server')
        d = DistributedDict('127.0.0.1', 9110)
        d['test'] = 0
        self.pool.stop(self.pool.ids)
        self.pool.start(self.pool.ids)

        self.pool.configs[10] = {'address': ('127.0.0.1', 9120),
                                 'cluster': {('127.0.0.1', 9120), },
                                 'storage': '20.persist', 'debug': False}
        self.pool.servers[10] = Process(target=self.pool._run_server,
                                        args=(self.pool.configs[10],))
        self.pool.start(10)
        sleep(1)

        d.config_cluster('add', '127.0.0.1', 9120)
        sleep(1)

        del d
        d = DistributedDict('127.0.0.1', 9120)

        self.assertEqual(d, {'test': 0})
项目:Reconnoitre    作者:codingo    | 项目源码 | 文件源码
def target_file(target_hosts, output_directory, dns_server, quiet, quick):
    targets = load_targets(target_hosts, output_directory, quiet)
    target_file = open(targets, 'r')
    try:
        target_file = open(targets, 'r')
        print("[*] Loaded targets from: %s" % targets)
    except:
        print("[!] Unable to load: %s" % targets)

    for ip_address in target_file:
       ip_address = ip_address.strip()
       create_dir_structure(ip_address, output_directory)

       host_directory = output_directory + "/" + ip_address
       nmap_directory = host_directory + "/scans"

       jobs = []
       p = multiprocessing.Process(target=nmap_scan, args=(ip_address, nmap_directory, dns_server, quick))
       jobs.append(p)
       p.start()
    target_file.close()
项目:Reconnoitre    作者:codingo    | 项目源码 | 文件源码
def target_file(target_hosts, output_directory, quiet):
    targets = load_targets(target_hosts, output_directory, quiet)
    target_file = open(targets, 'r')
    try:
        target_file = open(targets, 'r')
        print("[*] Loaded targets from: %s" % targets)
    except:
        print("[!] Unable to load: %s" % targets)

    for ip_address in target_file:
       ip_address = ip_address.strip()

       snmp_directory = output_directory + '/' + ip_address+ '/scans/snmp/'
       check_directory(snmp_directory)

       jobs = []
       p = multiprocessing.Process(target=snmp_scans, args=(ip_address, snmp_directory))
       jobs.append(p)
       p.start()
    target_file.close()
项目:driller    作者:shellphish    | 项目源码 | 文件源码
def driller_callback(self, fuzz):
        l.warning("Driller stuck callback triggered!")
        # remove any workers that aren't running
        self._running_workers = [x for x in self._running_workers if x.is_alive()]

        # get the files in queue
        queue = self._queue_files(fuzz)
        #for i in range(1, fuzz.fuzz_id):
        #    fname = "fuzzer-%d" % i
        #    queue.extend(self.queue_files(fname))

        # start drilling
        not_drilled = set(queue) - self._already_drilled_inputs
        if len(not_drilled) == 0:
            l.warning("no inputs left to drill")

        while len(self._running_workers) < self._num_workers and len(not_drilled) > 0:
            to_drill_path = list(not_drilled)[0]
            not_drilled.remove(to_drill_path)
            self._already_drilled_inputs.add(to_drill_path)

            proc = multiprocessing.Process(target=_run_drill, args=(self, fuzz, to_drill_path))
            proc.start()
            self._running_workers.append(proc)
项目:Brightside    作者:BrighterCommand    | 项目源码 | 文件源码
def run(self, started_event: Event) -> Process:

        p = Process(target=_sub_process_main, args=(
            started_event,
            self._channel_name,
            self._connection,
            self._consumer_configuration,
            self._consumer_factory,
            self._command_processor_factory,
            self._mapper_func))

        self._logger.debug("Starting worker process for channel: %s on exchange %s on server %s",
                           self._channel_name, self._connection.exchange, self._connection.amqp_uri)

        p.start()

        started_event.wait(timeout=1)

        return p
项目:dpspider    作者:doupengs    | 项目源码 | 文件源码
def multiWorker(processNum=4,serverHost='127.0.0.1',serverAuthkey='',serverPort=5000,logFile=None,color=True,debug=4):
    '''
    :param processNum: default=4 <class int>
    :param serverHost: default='127.0.0.1' <class str>
    :param serverAuthkey: default='' <class bytes>
    :param serverPort: default=5000 <class int>
    :param logFile: default=None <class str>
    :param color: default=True <class bool>
    :param debug: default=4 <class int|0 NONE,1 [Error],2 [Error][WARING],3 [Error][WARING][INFO],4 ALL>
    :function: multiprocessing download
    '''
    while True:
        pools = []
        for num in range(processNum):
            pools.append(Process(target=DownloadWorker(serverHost,serverAuthkey,serverPort,logFile,color,debug).run,args=('Worker%d'%num,)))
        for p in pools:
            p.start()
        for p in pools:
            p.join()
项目:geppetto    作者:datosio    | 项目源码 | 文件源码
def __init__(self, *args, **kwargs):
        test_notes = global_vars['test_notes']
        pause_reporting = global_vars['pause_reporting']

        def wrapper(func, test_notes, pause_reporting, **kwargs):
            """

            :param func: function to pass to multiprocessing.Process.
            :param test_notes: multiprocessing Queue() instance. Allows us to add notes to
            :param disable_reporting: multiprocessing Event() instance. Turns off reporting to terminal when input needed.
            :param kwargs: dictionary that contains all args and kwargs being sent to wrapped function.
            :return:
            """
            global_vars['test_notes'] = test_notes
            global_vars['pause_reporting'] = pause_reporting
            args_ = kwargs['args'] if 'args' in kwargs else ()
            kwargs_ = kwargs['kwargs'] if 'kwargs' in kwargs else {}
            return func(*args_, **kwargs_)

        wrapper_args = [kwargs['target'], test_notes, pause_reporting]
        wrapper_kwargs = kwargs

        multiprocessing.Process.__init__(self, target=wrapper, args=wrapper_args, kwargs=wrapper_kwargs)
项目:osbrain    作者:opensistemas-hub    | 项目源码 | 文件源码
def _process_single_event(self, socket):
        """
        Process a socket's event.

        Parameters
        ----------
        socket : zmq.Socket
            Socket that generated the event.
        """
        data = socket.recv()
        address = self.address[socket]
        if address.kind == 'SUB':
            self._process_sub_event(socket, address, data)
        elif address.kind == 'PULL':
            self._process_pull_event(socket, address, data)
        elif address.kind == 'REP':
            self._process_rep_event(socket, address, data)
        else:
            self._process_single_event_complex(address, socket, data)
项目:osbrain    作者:opensistemas-hub    | 项目源码 | 文件源码
def _process_single_event_complex(self, address, socket, data):
        """
        Process a socket's event for complex sockets (channels).

        Parameters
        ----------
        address : AgentAddress or AgentChannel
            Agent address or channel associated to the socket.
        socket : zmq.Socket
            Socket that generated the event.
        data
            Received in the socket.
        """
        if address.kind == 'ASYNC_REP':
            self._process_async_rep_event(socket, address, data)
        elif address.kind == 'PULL_SYNC_PUB':
            self._process_sync_pub_event(socket, address.channel, data)
        else:
            raise NotImplementedError('Unsupported kind %s!' % address.kind)
项目:osbrain    作者:opensistemas-hub    | 项目源码 | 文件源码
def _process_rep_event(self, socket, addr, data):
        """
        Process a REP socket's event.

        Parameters
        ----------
        socket : zmq.Socket
            Socket that generated the event.
        addr : AgentAddress
            AgentAddress associated with the socket that generated the event.
        data : bytes
            Data received on the socket.
        """
        message = deserialize_message(message=data, serializer=addr.serializer)
        handler = self.handler[socket]
        if inspect.isgeneratorfunction(handler):
            generator = handler(self, message)
            socket.send(serialize_message(next(generator), addr.serializer))
            execute_code_after_yield(generator)
        else:
            reply = handler(self, message)
            socket.send(serialize_message(reply, addr.serializer))
项目:osbrain    作者:opensistemas-hub    | 项目源码 | 文件源码
def _process_pull_event(self, socket, addr, data):
        """
        Process a PULL socket's event.

        Parameters
        ----------
        socket : zmq.Socket
            Socket that generated the event.
        addr : AgentAddress
            AgentAddress associated with the socket that generated the event.
        data : bytes
            Data received on the socket.
        """
        message = deserialize_message(message=data, serializer=addr.serializer)
        handler = self.handler[socket]
        if not isinstance(handler, (list, dict, tuple)):
            handler = [handler]
        for h in handler:
            h(self, message)
项目:osbrain    作者:opensistemas-hub    | 项目源码 | 文件源码
def test_nameserver_spawn_process(nsproxy):
    """
    A name server should be able to spawn child processes.

    It is a way to make sure name servers are run as non-daemonic processes,
    which are not allowed to have children.
    """
    class Spawner(NameServer):
        def spawn_process(self):
            p = multiprocessing.Process()
            p.start()
            return True

    ns = run_nameserver(base=Spawner)
    assert ns.spawn_process()
    ns.shutdown()
项目:son-mano-framework    作者:sonata-nfv    | 项目源码 | 文件源码
def setUpClass(self):

        self.smr_proc = Process(target=SpecificManagerRegistry)

        self.smr_proc.daemon = True

        self.manoconn = ManoBrokerRequestResponseConnection('son-plugin.SpecificManagerRegistry')


        self.wait_for_ssm_event = threading.Event()
        self.wait_for_ssm_event.clear()

        self.wait_for_fsm_event = threading.Event()
        self.wait_for_fsm_event.clear()

        self.event1 = False
        self.event2 = False

        self.smr_proc.start()
        time.sleep(4)
项目:hienoi    作者:christophercrouzet    | 项目源码 | 文件源码
def _main_process(logger, downwards, upwards, process_count, configs):
    """Main process."""
    try:
        while True:
            message = _receive_message(upwards, block=True)
            if message.type == _MESSAGE_ERROR:
                logger.error("Process '%s' [%d]:\n%s" % (
                    message.process_name, message.process_id, message.message))
                return Status.FAILURE
            elif message.type == _MESSAGE_QUIT:
                break
    finally:
        for _ in _range(process_count):
            _send_message(downwards, _MESSAGE_STOP)

        downwards.close()

    return Status.SUCCESS
项目:ParlAI    作者:facebookresearch    | 项目源码 | 文件源码
def test_concurrent_access(self):
        st = SharedTable({'cnt': 0})

        def inc():
            for _ in range(50):
                with st.get_lock():
                    st['cnt'] += 1
                time.sleep(random.randint(1, 5) / 10000)

        threads = []
        for _ in range(5):  # numthreads
            threads.append(Process(target=inc))
        for t in threads:
            t.start()
        for t in threads:
            t.join()
        assert st['cnt'] == 250
项目:Learning-Concurrency-in-Python    作者:PacktPublishing    | 项目源码 | 文件源码
def main():
  print("Starting number crunching")
  t0 = time.time()

  procs = []

  # Here we create our processes and kick them off
  for i in range(10):
    proc = Process(target=executeProc, args=())
    procs.append(proc)
    proc.start()

  # Again we use the .join() method in order to wait for 
  # execution to finish for all of our processes
  for proc in procs:
    proc.join()

  t1 = time.time()
  totalTime = t1 - t0
  # we print out the total execution time for our 10
  # procs.
  print("Execution Time: {}".format(totalTime))
项目:ringbuffer    作者:bslatkin    | 项目源码 | 文件源码
def main():
    ring = ringbuffer.RingBuffer(slot_bytes=50000, slot_count=10)
    ring.new_writer()

    processes = [
        multiprocessing.Process(target=reader, args=(ring, ring.new_reader())),
        multiprocessing.Process(target=reader, args=(ring, ring.new_reader())),
        multiprocessing.Process(target=writer, args=(ring, 1, 1000)),
    ]

    for p in processes:
        p.daemon = True
        p.start()

    for p in processes:
        p.join(timeout=20)
        assert not p.is_alive()
        assert p.exitcode == 0
项目:ringbuffer    作者:bslatkin    | 项目源码 | 文件源码
def new_reader(self):
        """Returns a new unique reader into the buffer.

        This must only be called in the parent process. It must not be
        called in a child multiprocessing.Process. See class docstring. To
        enforce this policy, no readers may be allocated after the first
        write has occurred.
        """
        with self.lock.for_write():
            writer_position = self.writer.get()
            if writer_position.counter > 0:
                raise MustCreatedReadersBeforeWritingError

            reader = Pointer(self.slot_count, start=writer_position.counter)
            self.readers.append(reader)
            return reader
项目:ringbuffer    作者:bslatkin    | 项目源码 | 文件源码
def main():
    ring = ringbuffer.RingBuffer(slot_bytes=50000, slot_count=100)
    ring.new_writer()

    processes = [
        multiprocessing.Process(target=writer, args=(ring,)),
    ]
    for i in range(10):
        processes.append(multiprocessing.Process(
            target=reader, args=(ring, ring.new_reader())))

    for p in processes:
        p.start()

    for p in processes:
        p.join(timeout=20)
        assert not p.is_alive()
        assert p.exitcode == 0
项目:OSPTF    作者:xSploited    | 项目源码 | 文件源码
def main():
    while True:
        indata=raw_input("Enter domain name and port, example: google.com:443 : ")
        print indata
        domain=indata.split(':')[0]
        try:
            port=int(indata.split(':')[1])
        except:
            port=443 
        if len(domain)<5:
                        print 'wrong domain'
                        continue
        print 'fucking '+domain+' @ port '+str(port)+'...'
        fn=open(domain+'.bin','ab')
        for j in xrange(THREADS):
                t = Process(target=fuckit,args=(domain,port,fn))
                t.daemon=True
                t.start()
项目:py-faster-rcnn-tk1    作者:joeking11829    | 项目源码 | 文件源码
def get_solvers(net_name):
    # Faster R-CNN Alternating Optimization
    n = 'faster_rcnn_alt_opt'
    # Solver for each training stage
    solvers = [[net_name, n, 'stage1_rpn_solver60k80k.pt'],
               [net_name, n, 'stage1_fast_rcnn_solver30k40k.pt'],
               [net_name, n, 'stage2_rpn_solver60k80k.pt'],
               [net_name, n, 'stage2_fast_rcnn_solver30k40k.pt']]
    solvers = [os.path.join(cfg.ROOT_DIR, 'models', *s) for s in solvers]
    # Iterations for each training stage
    max_iters = [80000, 40000, 80000, 40000]
    # max_iters = [100, 100, 100, 100]
    # Test prototxt for the RPN
    rpn_test_prototxt = os.path.join(
        cfg.ROOT_DIR, 'models', net_name, n, 'rpn_test.pt')
    return solvers, max_iters, rpn_test_prototxt

# ------------------------------------------------------------------------------
# Pycaffe doesn't reliably free GPU memory when instantiated nets are discarded
# (e.g. "del net" in Python code). To work around this issue, each training
# stage is executed in a separate process using multiprocessing.Process.
# ------------------------------------------------------------------------------
项目:Pillage    作者:kostrin    | 项目源码 | 文件源码
def addProcess(self, method, arguments):
    p = multiprocessing.Process(target=method, args=(arguments,))   
    p.start()
项目:Pillage    作者:kostrin    | 项目源码 | 文件源码
def addProcess(self, method, arguments):
        p = multiprocessing.Process(target=method, args=(arguments,))   
        p.start()