Python multiprocessing 模块,Lock() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用multiprocessing.Lock()

项目:clopure    作者:vbkaisetsu    | 项目源码 | 文件源码
def iter_split_evaluate_wrapper(self, fn, local_vars, in_size, q_in, q_out):
        l = Lock()
        idx_q = Queue()
        def split_iter():
            try:
                while True:
                    l.acquire()
                    i, data_in = q_in.get()
                    idx_q.put(i)
                    if data_in is EOFMessage:
                        return
                    yield data_in
            except BaseException:
                traceback.print_exc(file=sys.stdout)
        gs = itertools.tee(split_iter(), in_size)
        for data_out in self.evaluate((fn,) + tuple((lambda i: (x[i] for x in gs[i]))(i) for i in range(in_size)), local_vars=local_vars):
            q_out.put((idx_q.get(), data_out))
            l.release()
        q_out.put((0, EOFMessage))
项目:appcompatprocessor    作者:mbevilacqua    | 项目源码 | 文件源码
def removeProducer(self, noLock = False):
        if self.num_producers > 0:
            # Lock internal
            if not noLock: self.__internalLock__.acquire()

            # Remove last worker from worker pool
            (worker_num, producer, extra_arg_list) = self.producer_pool.pop()
            logger.debug("Removing Producer-%d" % worker_num)
            # Remove last worker's exitFlag
            producer_exitEvent = self.producer_pool_exitEvent.pop()

            # Set the worker's exit event
            if not producer_exitEvent.is_set():
                logger.debug("Producer-%d exitEvent SET" % worker_num)
                producer_exitEvent.set()

            # Update producer count
            self.num_producers -= 1

            # Release internal
            if not noLock: self.__internalLock__.release()
        else:
            logger.error("Attempted to remove producer from empty pool.")
项目:shellbot    作者:bernard357    | 项目源码 | 文件源码
def __init__(self, bot=None, machines=None, **kwargs):
        """
        Implements a sequence of multiple machines

        :param machines: the sequence of machines to be ran
        :type machines: list of Machine

        """
        self.bot = bot
        self.machines = machines

        self.lock = Lock()

        # prevent Manager() process to be interrupted
        handler = signal.signal(signal.SIGINT, signal.SIG_IGN)

        self.mutables = Manager().dict()

        # restore current handler for the rest of the program
        signal.signal(signal.SIGINT, handler)

        self.on_init(**kwargs)
项目:shellbot    作者:bernard357    | 项目源码 | 文件源码
def __init__(self, settings=None, filter=None):
        """
        Stores settings across multiple independent processing units

        :param settings: the set of variables managed in this context
        :type settings: dict

        :param filter: a function to interpret values on check()
        :type filter: callable

        """

        # prevent Manager() process to be interrupted
        handler = signal.signal(signal.SIGINT, signal.SIG_IGN)

        self.lock = Lock()
        self.values = Manager().dict()

        # restore current handler for the rest of the program
        signal.signal(signal.SIGINT, handler)

        self.filter = filter if filter else self._filter

        if settings:
            self.apply(settings)
项目:photo2map    作者:eatfears    | 项目源码 | 文件源码
def main():
    threads = []

    output_image = Image.new('RGB', (output_width, output_height))
    output_image.save(output_filename)

    output_image_mutex = multiprocessing.Lock()

    for i in range(threads_num):
        t = multiprocessing.Process(target=worker, args=(i, output_image_mutex,))
        threads.append(t)
        t.start()

    for thread in threads:
        thread.join()

    print("Image saved")
项目:idascripts    作者:ctfhacker    | 项目源码 | 文件源码
def __init__(self):
        '''Execute a function asynchronously in another thread.'''

        # management of execution queue
        res = multiprocessing.Lock()
        self.queue = multiprocessing.Condition(res)
        self.state = []

        # results
        self.result = Queue.Queue()

        # thread management
        self.ev_unpaused = multiprocessing.Event()
        self.ev_terminating = multiprocessing.Event()
        self.thread = threading.Thread(target=self.__run__, name="Thread-{:s}-{:x}".format(self.__class__.__name__, id(self)))

        # FIXME: we can support multiple threads, but since this is
        #        being bound by a single lock due to my distrust for IDA
        #        and race-conditions...we only use one.
        self.lock = multiprocessing.Lock()

        return self.__start()
项目:cebl    作者:idfah    | 项目源码 | 文件源码
def __init__(self, mgr, pollSize=2):
        """
        Construct a new source for replaying EEG saved to file.

            pollSize:   Number of data samples collected during each poll.
                        Higher values result in better timing and marker
                        resolution but more CPU usage while higher values
                        typically use less CPU but worse timing results.
        """
        self.pollSize = pollSize
        self.lock = mp.Lock()

        self.replayData = None
        self.startIndex = 0

        Source.__init__(self, mgr=mgr, sampRate=256, chans=[str(i) for i in range(8)],
            configPanelClass=ReplayConfigPanel)
项目:dbnet_tensorflow    作者:yuanluya    | 项目源码 | 文件源码
def __init__(self, num_processor, batch_size, phase,
                 batch_idx_init = 0, data_ids_init = train_ids, capacity = 10):
        self.num_processor = num_processor
        self.batch_size = batch_size
        self.data_load_capacity = capacity
        self.manager = Manager()
        self.batch_lock = Lock()
        self.mutex = Lock()
        self.cv_full = Condition(self.mutex)
        self.cv_empty = Condition(self.mutex)
        self.data_load_queue = self.manager.list()
        self.cur_batch = self.manager.list([batch_idx_init])
        self.processors = []
        if phase == 'train':
            self.data_ids = self.manager.list(data_ids_init)
        elif phase == 'test':
            self.data_ids = self.manager.list(test_ids)
        else:
            raise ValueError('Could not set phase to %s' % phase)
项目:ouroboros    作者:pybee    | 项目源码 | 文件源码
def test_thousand(self):
        if self.TYPE == 'manager':
            self.skipTest('test not appropriate for {}'.format(self.TYPE))
        passes = 1000
        lock = self.Lock()
        conn, child_conn = self.Pipe(False)
        for j in range(self.N):
            p = self.Process(target=self._test_thousand_f,
                           args=(self.barrier, passes, child_conn, lock))
            p.start()

        for i in range(passes):
            for j in range(self.N):
                self.assertEqual(conn.recv(), i)

#
#
#
项目:ouroboros    作者:pybee    | 项目源码 | 文件源码
def test_getobj_getlock(self):
        val1 = self.Value('i', 5)
        lock1 = val1.get_lock()
        obj1 = val1.get_obj()

        val2 = self.Value('i', 5, lock=None)
        lock2 = val2.get_lock()
        obj2 = val2.get_obj()

        lock = self.Lock()
        val3 = self.Value('i', 5, lock=lock)
        lock3 = val3.get_lock()
        obj3 = val3.get_obj()
        self.assertEqual(lock, lock3)

        arr4 = self.Value('i', 5, lock=False)
        self.assertFalse(hasattr(arr4, 'get_lock'))
        self.assertFalse(hasattr(arr4, 'get_obj'))

        self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')

        arr5 = self.RawValue('i', 5)
        self.assertFalse(hasattr(arr5, 'get_lock'))
        self.assertFalse(hasattr(arr5, 'get_obj'))
项目:Triplet_Loss_SBIR    作者:TuBui    | 项目源码 | 文件源码
def load_next_batch(self):
    res = {}
    #7
    lock = Lock()
    threads = [self.pool.apply_async(self.load_next_pair,(lock,)) for \
                i in range (self.batch_size)]
    thread_res = [thread.get() for thread in threads]
    res['data_s'] = np.asarray([tri['sketch'] for tri in thread_res])[:,None,:,:]
    res['data_i'] = np.asarray([tri['image'] for tri in thread_res])[:,None,:,:]
    res['label_s'] = np.asarray([tri['label_s'] for tri in thread_res],dtype=np.float32)[:,None]
    res['label_i'] = np.asarray([tri['label_i'] for tri in thread_res],dtype=np.float32)[:,None]
    return res
#==============================================================================
#     res['data_s'] = np.zeros((self.batch_size,1,self.outshape[0],\
#                             self.outshape[1]),dtype = np.float32)
#     res['data_i'] = np.zeros_like(res['data_a'],dtype=np.float32)
#     res['label'] = np.zeros((self.batch_size,1),dtype = np.float32)
#     for itt in range(self.batch_size):
#       trp = self.load_next_pair(1)
#       res['data_s'][itt,...] = trp['sketch']
#       res['data_i'][itt,...] = trp['image']
#       res['label'][itt,...] = trp['label']
#     return res
#==============================================================================
项目:Triplet_Loss_SBIR    作者:TuBui    | 项目源码 | 文件源码
def load_next_batch(self):
    res = {}
    #7
    lock = Lock()
    threads = [self.pool.apply_async(self.load_next_image,(lock,)) for \
                i in range (self.batch_size)]
    thread_res = [thread.get() for thread in threads]
    res['data'] = np.asarray([datum[0] for datum in thread_res])[:,None,:,:]
    res['label'] = np.asarray([datum[1] for datum in thread_res],dtype=np.float32)
    return res

#==============================================================================
#     res['data'] = np.zeros((self.batch_size,1,self.outshape[0],self.outshape[1]),dtype = np.float32)
#     res['label'] = np.zeros(self.batch_size,dtype = np.float32)
#     for itt in range(self.batch_size):
#       img, label = self.load_next_image(1)
#       res['data'][itt,...] = img
#       res['label'][itt] = label
#     return res
#==============================================================================
项目:Triplet_Loss_SBIR    作者:TuBui    | 项目源码 | 文件源码
def load_next_batch(self):
    res = {}
    #7
    lock = Lock()
    threads = [self.pool.apply_async(self.load_next_pair,(lock,)) for \
                i in range (self.batch_size)]
    thread_res = [thread.get() for thread in threads]
    res['data_s'] = np.asarray([tri['sketch'] for tri in thread_res])[:,None,:,:]
    res['data_i'] = np.asarray([tri['image'] for tri in thread_res])[:,None,:,:]
    res['label_s'] = np.asarray([tri['label_s'] for tri in thread_res],dtype=np.float32)[:,None]
    res['label_i'] = np.asarray([tri['label_i'] for tri in thread_res],dtype=np.float32)[:,None]
    return res
#==============================================================================
#     res['data_s'] = np.zeros((self.batch_size,1,self.outshape[0],\
#                             self.outshape[1]),dtype = np.float32)
#     res['data_i'] = np.zeros_like(res['data_a'],dtype=np.float32)
#     res['label'] = np.zeros((self.batch_size,1),dtype = np.float32)
#     for itt in range(self.batch_size):
#       trp = self.load_next_pair(1)
#       res['data_s'][itt,...] = trp['sketch']
#       res['data_i'][itt,...] = trp['image']
#       res['label'][itt,...] = trp['label']
#     return res
#==============================================================================
项目:ccdetection    作者:tommiu    | 项目源码 | 文件源码
def printResults(flow):
    try:
        stats_path = getArg(flow, 's', 'statsfile')
    except:
        stats_path = DEFAULT_STATS_PATH

    if os.path.isfile(stats_path):
        clones = SharedData(stats_path, multiprocessing.Lock()).getClones()
        print "Found %d code clones saved in file '%s':" % (
                                                        len(clones), stats_path
                                                        )
        for i, clone in enumerate(clones):
            print str(i) + ".", clone

    else:
        print "Given path is not a valid file: '%s'" % (stats_path)
项目:memex_ad_features    作者:giantoak    | 项目源码 | 文件源码
def create_phone_files(dataframe):
    # Drop all rows without a phone number
    dataframe = dataframe.dropna(subset=['phone'])
    dataframe['phone'] = dataframe['phone'].map(lambda x: re.sub('[^0-9]', '', str(x)))

    # Break file up by phone
    phone_numbers = dataframe.phone.unique()
    phone_dataframe = {phone_number: pandas.DataFrame() for phone_number in phone_numbers}
    for key in phone_dataframe.keys():
        phone_dataframe[key] = dataframe[:][dataframe.phone == key]

    # Check if file already exists for each location, if so then append, if not then create a new file
    print 'Appending location data to existing files'

    # Lock all processes while work is being done to save files
    for key, value in phone_dataframe.iteritems():
        if os.path.isfile('{0}phone_{1}.csv'.format(config['phone_data'], str(key))):
            lock.acquire()
            print 'lock has been set for file {0}'.format(file)
            value.to_csv('{0}phone_{1}.csv'.format(config['phone_data'], str(key)), mode='a', header=False, encoding='utf-8')
            lock.release()
        else:
            value.to_csv('{0}phone_{1}.csv'.format(config['phone_data'], str(key)), header=True, encoding='utf-8')
    print 'finished file {0}'.format(file)
项目:memex_ad_features    作者:giantoak    | 项目源码 | 文件源码
def create_phone_files(dataframe):
    # Drop all rows without a phone number
    dataframe = dataframe.dropna(subset=['phone'])
    dataframe['phone'] = dataframe['phone'].map(lambda x: re.sub('[^0-9]', '', str(x)))

    # Break file up by phone
    phone_numbers = dataframe.phone.unique()
    phone_dataframe = {phone_number: pandas.DataFrame() for phone_number in phone_numbers}
    for key in phone_dataframe.keys():
        phone_dataframe[key] = dataframe[:][dataframe.phone == key]

    # Check if file already exists for each location, if so then append, if not then create a new file
    print 'Appending location data to existing files'

    # Lock all processes while work is being done to save files
    for key, value in phone_dataframe.iteritems():
        if os.path.isfile('{0}phone_{1}.csv'.format(config['phone_data'], str(key))):
            lock.acquire()
            print 'lock has been set for file {0}'.format(file)
            value.to_csv('{0}phone_{1}.csv'.format(config['phone_data'], str(key)), mode='a', header=False, encoding='utf-8')
            lock.release()
        else:
            value.to_csv('{0}phone_{1}.csv'.format(config['phone_data'], str(key)), header=True, encoding='utf-8')
    print 'finished file {0}'.format(file)
项目:crawler_old    作者:salmonx    | 项目源码 | 文件源码
def manager():

    cpus = mp.cpu_count()
    que = mp.Queue()

    lock = mp.Lock()
    plist = []
    for i in xrange(cpus-1):
        p = mp.Process(target=worker, args=(que, lock, i+1))
        p.start()
        plist.append(p)

    for p in plist:
        p.join()     

    ret=set()
    while que.qsize() > 0:

            item = que.get()
            ret.add(item)
    print ret
项目:crawler_old    作者:salmonx    | 项目源码 | 文件源码
def manager(dbs):
    # leave one cpu 
    tasks = mp.cpu_count() -1 
    tasks = 1
    que = mp.Queue()
    for db in dbs:
        que.put(db)

    lock = mp.Lock()
    plist = []
    for i in xrange(tasks):
        p = mp.Process(target=worker, args=(que, lock, i+1 ))
        p.start()
        plist.append(p)

    for p in plist:
        p.join()
项目:crawler_old    作者:salmonx    | 项目源码 | 文件源码
def manager(dbs):
    # leave one cpu 
    tasks = mp.cpu_count() -1 
    tasks = 1
    que = mp.Queue()
    for db in dbs:
        que.put(db)

    lock = mp.Lock()
    plist = []
    for i in xrange(tasks):
        p = mp.Process(target=worker, args=(que, lock, i+1 ))
        p.start()
        plist.append(p)

    for p in plist:
        p.join()
项目:crawler_old    作者:salmonx    | 项目源码 | 文件源码
def manager(dbs):

    cpus = mp.cpu_count()
    que = mp.Queue()
    for db in dbs:
        que.put(db)

    lock = mp.Lock()
    plist = []
    for i in xrange(cpus-1):
        p = mp.Process(target=worker, args=(que, lock, i+1))
        p.start()
        plist.append(p)

    for p in plist:
        p.join()
项目:crawler_old    作者:salmonx    | 项目源码 | 文件源码
def manager(dbs, urldict):

    cpus = mp.cpu_count()
    que = mp.Queue()
    for db in dbs:
        que.put(db)

    lock = mp.Lock()
    plist = []
    for i in xrange(cpus-1):
        p = mp.Process(target=worker, args=(que, lock, i+1, urldict))
        p.start()
        plist.append(p)

    for p in plist:
        p.join()
项目:crawler_old    作者:salmonx    | 项目源码 | 文件源码
def manager(dbs):
    tasks = mp.cpu_count()-1

    #que to store the db tasks , outque to store result for each cpu
    que = mp.Queue()
    outque = mp.Queue()
    for db in dbs:
        que.put(db)

    lock = mp.Lock()
    plist = []
    for i in xrange(tasks):
        p = mp.Process(target=worker, args=(que, lock, i+1, outque))
        p.start()
        plist.append(p)

    for p in plist:
        p.join()

    #here we got all extract tasks done
    #then merge it and insert into redis
项目:crawler_old    作者:salmonx    | 项目源码 | 文件源码
def manager():

    tasks = mp.cpu_count() - 1
    que = mp.Queue()
    lock = mp.Lock()
    plist = []

    initque(que)    

    for i in xrange(tasks):
        p = mp.Process(target=worker, args=(que, lock, i+1))
        p.start()
        plist.append(p)

    for p in plist:
        p.join()
项目:crawler_old    作者:salmonx    | 项目源码 | 文件源码
def manager(dbs):
    # leave one cpu 
    tasks = mp.cpu_count() -1 
    #tasks = 1
    que = mp.Queue()
    for db in dbs:
        que.put(db)

    lock = mp.Lock()
    plist = []
    for i in xrange(tasks):
        p = mp.Process(target=worker, args=(que, lock, i+1))
        p.start()
        plist.append(p)

    for p in plist:
        p.join()
项目:PBSuite    作者:dbrowneup    | 项目源码 | 文件源码
def __init__(self, filename, version=None, columns=None, parameters=None, mode='r'):
        if mode not in ['r', 'w', 'a']:
            logging.error("Mode must be r(ead), w(rite), or (a)ppend!")
            logging.error("Forcing 'r'")
        self.filename = filename
        if mode == 'w' and os.path.exists(self.filename):
            logging.error("Ouput H5 %s already exists!" % self.filename)
            exit(1)
        if mode == 'r' and not os.path.exists(self.filename):
            logging.error("Output H5 %s doesn't exist!" % self.filename)
            exit(1)
        if mode != 'r':
            self.__reopen(mode)
            self.__results.attrs["version"] = version
            self.__results.attrs["columns"] = columns
            self.__results.attrs["parameters"] = parameters
            self.__close()   

        self.__lock = multiprocessing.Lock()
项目:PBSuite    作者:dbrowneup    | 项目源码 | 文件源码
def __init__(self, filename, version=None, columns=None, parameters=None, mode='r'):
        if mode not in ['r', 'w', 'a']:
            logging.error("Mode must be r(ead), w(rite), or (a)ppend!")
            logging.error("Forcing 'r'")
        self.filename = filename
        if mode == 'w' and os.path.exists(self.filename):
            logging.error("Ouput H5 %s already exists!" % self.filename)
            exit(1)
        if mode == 'r' and not os.path.exists(self.filename):
            logging.error("Output H5 %s doesn't exist!" % self.filename)
            exit(1)
        if mode != 'r':
            self.__reopen(mode)
            self.__results.attrs["version"] = version
            self.__results.attrs["columns"] = columns
            self.__results.attrs["parameters"] = parameters
            self.__close()   

        self.__lock = multiprocessing.Lock()
项目:PBSuite    作者:dbrowneup    | 项目源码 | 文件源码
def __init__(self, filename, version=None, columns=None, parameters=None, mode='r'):
        if mode not in ['r', 'w', 'a']:
            logging.error("Mode must be r(ead), w(rite), or (a)ppend!")
            logging.error("Forcing 'r'")
        self.filename = filename
        if mode == 'w' and os.path.exists(self.filename):
            logging.error("Ouput H5 %s already exists!" % self.filename)
            exit(1)
        if mode == 'r' and not os.path.exists(self.filename):
            logging.error("Output H5 %s doesn't exist!" % self.filename)
            exit(1)
        if mode != 'r':
            self.__reopen(mode)
            self.__results.attrs["version"] = version
            self.__results.attrs["columns"] = columns
            self.__results.attrs["parameters"] = parameters
            self.__close()   

        self.__lock = multiprocessing.Lock()
项目:kbe_server    作者:xiaohaoppy    | 项目源码 | 文件源码
def test_thousand(self):
        if self.TYPE == 'manager':
            self.skipTest('test not appropriate for {}'.format(self.TYPE))
        passes = 1000
        lock = self.Lock()
        conn, child_conn = self.Pipe(False)
        for j in range(self.N):
            p = self.Process(target=self._test_thousand_f,
                           args=(self.barrier, passes, child_conn, lock))
            p.start()

        for i in range(passes):
            for j in range(self.N):
                self.assertEqual(conn.recv(), i)

#
#
#
项目:kbe_server    作者:xiaohaoppy    | 项目源码 | 文件源码
def test_getobj_getlock(self):
        val1 = self.Value('i', 5)
        lock1 = val1.get_lock()
        obj1 = val1.get_obj()

        val2 = self.Value('i', 5, lock=None)
        lock2 = val2.get_lock()
        obj2 = val2.get_obj()

        lock = self.Lock()
        val3 = self.Value('i', 5, lock=lock)
        lock3 = val3.get_lock()
        obj3 = val3.get_obj()
        self.assertEqual(lock, lock3)

        arr4 = self.Value('i', 5, lock=False)
        self.assertFalse(hasattr(arr4, 'get_lock'))
        self.assertFalse(hasattr(arr4, 'get_obj'))

        self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')

        arr5 = self.RawValue('i', 5)
        self.assertFalse(hasattr(arr5, 'get_lock'))
        self.assertFalse(hasattr(arr5, 'get_obj'))
项目:AutoTrade    作者:changye    | 项目源码 | 文件源码
def __init__(self, account, password, notifier, ocr_service, debug_single_step=False):
        self.__account = account
        self.__password = password
        self.__notifier = notifier
        self.__ocr_service = ocr_service

        self.__manager = Manager()

        self.__job_list = self.__manager.list()
        self.__job_list_lock = Lock()

        self.__map = self.__manager.dict()
        self.__entrust_map = self.__manager.dict()

        self.__process = None
        self.__keep_working = Value('i', 1)

        if debug_single_step:
            self.__debug_single_step = Value('i', 1)
        else:
            self.__debug_single_step = Value('i', 0)

        self.__debug_single_step_go = Value('i', 0)
        self.__debug_single_step_lock = Lock()
项目:More-I-O    作者:koltafrickenfer    | 项目源码 | 文件源码
def trainPool(population,envNum,species,queue,env): 
    before = time.time()
    results = []
    jobs = Queue()
    lock = multiprocessing.Lock()
    s = 0
    for specie in species:
        g=0
        for genome in specie.genomes:
            genome.generateNetwork()
            jobs.put((s,g,genome))
            g+=1
        s+=1

    mPool = multiprocessing.Pool(processes=envNum,initializer = poolInitializer,initargs=(jobs,lock,))
    results = mPool.map(jobTrainer,[env]*envNum)
    mPool.close()
    mPool.join()
    after = time.time()
    killFCEUX()

    print("next generation")

    queue.put(results)
项目:fuzzinator    作者:renatahodovan    | 项目源码 | 文件源码
def __init__(self, controller, style):
        # Shared objects to help event handling.
        self.events = Queue()
        self.lock = Lock()

        self.view = MainWindow(controller)
        self.screen = raw_display.Screen()
        self.screen.set_terminal_properties(256)

        self.loop = MainLoop(widget=self,
                             palette=style,
                             screen=self.screen,
                             unhandled_input=Tui.exit_handler,
                             pop_ups=True)

        self.pipe = self.loop.watch_pipe(self.update_ui)
        self.loop.set_alarm_in(0.1, Tui.update_timer, self.view.logo.timer)
        super(Tui, self).__init__(self.view)

        connect_signal(self.view.issues_table, 'refresh', lambda source: self.loop.draw_screen())
        connect_signal(self.view.stat_table, 'refresh', lambda source: self.loop.draw_screen())
项目:yelp_kafka    作者:Yelp    | 项目源码 | 文件源码
def __init__(self, topics, config, consumer_factory):
        self.config = config
        self.termination_flag = None
        self.partitioner = Partitioner(
            config,
            topics,
            self.acquire,
            self.release,
        )
        self.consumers = None
        self.consumers_lock = Lock()
        self.consumer_procs = {}
        self.consumer_factory = consumer_factory
        self.log = logging.getLogger(self.__class__.__name__)
        self.pre_rebalance_callback = config.pre_rebalance_callback
        self.post_rebalance_callback = config.post_rebalance_callback
项目:pymp    作者:classner    | 项目源码 | 文件源码
def array(shape, dtype=_np.float64, autolock=False):
    """Factory method for shared memory arrays supporting all numpy dtypes."""
    assert _NP_AVAILABLE, (
        "To use the shared array object, numpy must be available!")
    if not isinstance(dtype, _np.dtype):
        dtype = _np.dtype(dtype)
    # Not bothering to translate the numpy dtypes to ctype types directly,
    # because they're only partially supported. Instead, create a byte ctypes
    # array of the right size and use a view of the appropriate datatype.
    shared_arr = _multiprocessing.Array(
        'b', int(_np.prod(shape) * dtype.alignment), lock=autolock)
    with _warnings.catch_warnings():
        # For more information on why this is necessary, see
        # https://www.reddit.com/r/Python/comments/j3qjb/parformatlabpool_replacement
        _warnings.simplefilter('ignore', RuntimeWarning)
        data = _np.ctypeslib.as_array(shared_arr).view(dtype).reshape(shape)
    return data
项目:pyparadox_alarm    作者:PollieKrismis    | 项目源码 | 文件源码
def connect(self):
        '''
        Opens a serial connection to the Paradox Alarm Panel.
        To do: Add a loop to attempt a connection several times before giving up.
        '''
        self._lock = Lock() #Does this do anything?

        try:
            self._pipe = serial.Serial(self._port, self._speed, timeout=1)
            self._pipe.flushInput() #Gets rid of /X0 after being disconnected for long?
        except SerialException:
            if self._port is None:
                _LOGGER.error(str.format('Port not configured yet.'))
            else:
                self.reconnect()
        else:
            #Connection should now be open
            self._shutdown = False
            _LOGGER.info(str.format("Connected to Paradox on port: {0}, speed: {1}",
                                    self._port, self._speed))
项目:kinect-2-libras    作者:inessadl    | 项目源码 | 文件源码
def refactor(self, items, write=False, doctests_only=False,
                 num_processes=1):
        if num_processes == 1:
            return super(MultiprocessRefactoringTool, self).refactor(
                items, write, doctests_only)
        try:
            import multiprocessing
        except ImportError:
            raise MultiprocessingUnsupported
        if self.queue is not None:
            raise RuntimeError("already doing multiple processes")
        self.queue = multiprocessing.JoinableQueue()
        self.output_lock = multiprocessing.Lock()
        processes = [multiprocessing.Process(target=self._child)
                     for i in xrange(num_processes)]
        try:
            for p in processes:
                p.start()
            super(MultiprocessRefactoringTool, self).refactor(items, write,
                                                              doctests_only)
        finally:
            self.queue.join()
            for i in xrange(num_processes):
                self.queue.put(None)
            for p in processes:
                if p.is_alive():
                    p.join()
            self.queue = None
项目:code    作者:ActiveState    | 项目源码 | 文件源码
def __init__(self, maxsize=0):
        '''initialize the queue'''
        self.mutex = multiprocessing.Lock()
        self.not_empty = multiprocessing.Condition(self.mutex)
        self.not_full = multiprocessing.Condition(self.mutex)
        self.maxsize = maxsize
        self._tags = {}  # list of refid's for each tag
        self._queue = {}  # the actual queue data
        self._refcount = {}  # how many tags refer to a given refid in the queue
        self.id_generator = id_generator()
项目:kAFL    作者:RUB-SysSec    | 项目源码 | 文件源码
def __init__(self, num_processes=1, tasks_per_requests=1, bitmap_size=(64 << 10)):
        self.to_update_queue = multiprocessing.Queue()
        self.to_master_queue = multiprocessing.Queue()
        self.to_master_from_mapserver_queue = multiprocessing.Queue()
        self.to_master_from_slave_queue = multiprocessing.Queue()
        self.to_mapserver_queue = multiprocessing.Queue()

        self.to_slave_queues = []
        for i in range(num_processes):
            self.to_slave_queues.append(multiprocessing.Queue())

        self.slave_locks_A = []
        self.slave_locks_B = []
        for i in range(num_processes):
            self.slave_locks_A.append(multiprocessing.Lock())
            self.slave_locks_B.append(multiprocessing.Lock())
            self.slave_locks_B[i].acquire()

        self.reload_semaphore = multiprocessing.Semaphore(multiprocessing.cpu_count()/2)
        self.num_processes = num_processes
        self.tasks_per_requests = tasks_per_requests

        self.stage_abortion_notifier = multiprocessing.Value('b', False)
        self.slave_termination = multiprocessing.Value('b', False, lock=False)
        self.sampling_failed_notifier = multiprocessing.Value('b', False)
        self.effector_mode = multiprocessing.Value('b', False)

        self.files = ["/dev/shm/kafl_fuzzer_master_", "/dev/shm/kafl_fuzzer_mapserver_", "/dev/shm/kafl_fuzzer_bitmap_"]
        self.sizes = [(65 << 10), (65 << 10), bitmap_size]
        self.tmp_shm = [{}, {}, {}]
项目:crankycoin    作者:cranklin    | 项目源码 | 文件源码
def __init__(self, blocks=None):
        self.blocks_lock = Lock()
        self.unconfirmed_transactions_lock = Lock()
        self.unconfirmed_transactions = Manager().list
        if blocks is None:
            genesis_block = self.get_genesis_block()
            self.add_block(genesis_block)
        else:
            for block in blocks:
                self.add_block(block)
项目:pymotw3    作者:reingart    | 项目源码 | 文件源码
def worker_with(lock, stream):
    with lock:
        stream.write('Lock acquired via with\n')
项目:pymotw3    作者:reingart    | 项目源码 | 文件源码
def worker_no_with(lock, stream):
    lock.acquire()
    try:
        stream.write('Lock acquired directly\n')
    finally:
        lock.release()
项目:pymotw3    作者:reingart    | 项目源码 | 文件源码
def __init__(self):
        super(ActivePool, self).__init__()
        self.mgr = multiprocessing.Manager()
        self.active = self.mgr.list()
        self.lock = multiprocessing.Lock()
项目:zeronet-debian    作者:bashrc    | 项目源码 | 文件源码
def __init__(self, database):
        self._handle = open(database, 'rb')
        self._size = os.fstat(self._handle.fileno()).st_size
        if not hasattr(os, 'pread'):
            self._lock = Lock()
项目:ParlAI    作者:facebookresearch    | 项目源码 | 文件源码
def __init__(self, opt, data_loader=None, cands=None, shared=None, **kwargs):
        # super() call initiates stream in self.data by calling _load()
        super().__init__(opt, data_loader, cands, shared, **kwargs)
        self.cycle = kwargs['cycle'] if 'cycle' in kwargs else True
        if shared:
            # auxiliary instances hold pointer to main datastream (in self.data)
            self.reset_data = shared['reset']
            # Share datafile and data_loader for computing num_exs and num_eps
            self.datafile = shared['datafile']
            self.data_loader = shared['data_loader']
            if 'lock' in shared:
                self.lock = shared['lock']
        else:
            # main instance holds the stream and shares pointer to it
            self.data_loader = data_loader
            self.datafile = opt['datafile']
            self.reset_data = None
            self.is_reset = True
            if opt.get('numthreads', 1) > 1:
                print('WARNING: multithreaded steaming will process every '
                      'example numthreads times.')
                self.lock = Lock()
        self.entry_idx = 0
        self.next_episode = None
        self.num_eps = None
        self.num_exs = None
项目:ringbuffer    作者:bslatkin    | 项目源码 | 文件源码
def __init__(self):
        self.lock = multiprocessing.Lock()
        self.readers_condition = multiprocessing.Condition(self.lock)
        self.writer_condition = multiprocessing.Condition(self.lock)
        self.readers = multiprocessing.RawValue(ctypes.c_uint, 0)
        self.writer = multiprocessing.RawValue(ctypes.c_bool, False)
项目:chromecastslack    作者:sh0oki    | 项目源码 | 文件源码
def __init__(self, player, bot):
        self._song = None
        self._player = player
        self._bot = bot
        self._lock = Lock()
项目:aquests    作者:hansroh    | 项目源码 | 文件源码
def __init__(self, out, cacheline = 100, flushnow = 0):
        self.out = out
        self.cacheline = cacheline
        self.flushnow = flushnow
        self.lock = multiprocessing.Lock()
        self.filter = []
        self.__cache = []
项目:osm_rg    作者:Scitator    | 项目源码 | 文件源码
def __init__(self, ndata, nprocs):
        self._ndata = mp.RawValue(ctypes.c_int, ndata)
        self._start = mp.RawValue(ctypes.c_int, 0)
        self._lock = mp.Lock()
        min_chunk = ndata // nprocs
        min_chunk = ndata if min_chunk <= 2 else min_chunk
        self._chunk = min_chunk
项目:pupy    作者:ru-faraon    | 项目源码 | 文件源码
def __init__(self, dstconf, transport_class, transport_kwargs):
        super(PupyAsyncStream, self).__init__()
        self.active=True
        #buffers for streams
        self.buf_in=Buffer()
        self.buf_out=Buffer()
        self.buf_tmp=Buffer()
        self.cookie=''.join(random.SystemRandom().choice("abcdef0123456789") for _ in range(32))
        self.buf_in.cookie=self.cookie
        self.buf_out.cookie=self.cookie
        self.buf_tmp.cookie=self.cookie
        #buffers for transport
        self.upstream=Buffer(transport_func=addGetPeer(("127.0.0.1", 443)))
        self.downstream=Buffer(transport_func=addGetPeer(("127.0.0.1", 443)))
        self.upstream_lock=multiprocessing.Lock()
        self.downstream_lock=multiprocessing.Lock()
        self.transport=transport_class(self, **transport_kwargs)

        self.max_pull_interval=2
        self.pull_interval=0
        self.pull_event=multiprocessing.Event()
        self.MAX_IO_CHUNK=32000*100 #3Mo because it is a async transport

        self.client_side=self.transport.client
        if self.client_side:
            self.poller_thread=multiprocessing.Process(target=self.poller_loop)
            self.poller_thread.daemon=True
            self.poller_thread.start()
        self.on_connect()
项目:pupy    作者:ru-faraon    | 项目源码 | 文件源码
def __init__(self, transport_class, transport_kwargs):
        self.bufin=Buffer(transport_func=addGetPeer(("127.0.0.1", 443)))
        self.bufout=Buffer(transport_func=addGetPeer(("127.0.0.1", 443)))
        self.upstream=Buffer(transport_func=addGetPeer(("127.0.0.1", 443)))
        self.downstream=Buffer(transport_func=addGetPeer(("127.0.0.1", 443)))
        self.transport=transport_class(self, **transport_kwargs)
        self.lockin=multiprocessing.Lock()
        self.lockout=multiprocessing.Lock()