Python multiprocessing 模块,Array() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用multiprocessing.Array()

项目:ouroboros    作者:pybee    | 项目源码 | 文件源码
def test_sharedctypes(self, lock=False):
        x = Value('i', 7, lock=lock)
        y = Value(c_double, 1.0/3.0, lock=lock)
        foo = Value(_Foo, 3, 2, lock=lock)
        arr = self.Array('d', list(range(10)), lock=lock)
        string = self.Array('c', 20, lock=lock)
        string.value = latin('hello')

        p = self.Process(target=self._double, args=(x, y, foo, arr, string))
        p.daemon = True
        p.start()
        p.join()

        self.assertEqual(x.value, 14)
        self.assertAlmostEqual(y.value, 2.0/3.0)
        self.assertEqual(foo.x, 6)
        self.assertAlmostEqual(foo.y, 4.0)
        for i in range(10):
            self.assertAlmostEqual(arr[i], i*2)
        self.assertEqual(string.value, latin('hellohello'))
项目:bmlingam    作者:taku-y    | 项目源码 | 文件源码
def _create_cache_shared(xs, hparamss):
    """Create shared cache. 
    """
    cache_source = create_cache_source(xs, hparamss)
    cache_shared = {}

    # for k, v in cache_source.iteritems():
    for k, v in six.iteritems(cache_source):
        assert(v.dtype == np.float32 or v.dtype == np.float64 or 
               v.dtype == float)
        n = len(v.reshape(-1))
        shared_array_base = multiprocessing.Array(ctypes.c_double, n)
        shape = v.shape

        view = np.ctypeslib.as_array(shared_array_base.get_obj())
        view = view.reshape(shape)
        view[:] = v[:]
        del view

        cache_shared.update({k: (shared_array_base, shape)})

    return cache_shared
项目:pymp    作者:classner    | 项目源码 | 文件源码
def array(shape, dtype=_np.float64, autolock=False):
    """Factory method for shared memory arrays supporting all numpy dtypes."""
    assert _NP_AVAILABLE, (
        "To use the shared array object, numpy must be available!")
    if not isinstance(dtype, _np.dtype):
        dtype = _np.dtype(dtype)
    # Not bothering to translate the numpy dtypes to ctype types directly,
    # because they're only partially supported. Instead, create a byte ctypes
    # array of the right size and use a view of the appropriate datatype.
    shared_arr = _multiprocessing.Array(
        'b', int(_np.prod(shape) * dtype.alignment), lock=autolock)
    with _warnings.catch_warnings():
        # For more information on why this is necessary, see
        # https://www.reddit.com/r/Python/comments/j3qjb/parformatlabpool_replacement
        _warnings.simplefilter('ignore', RuntimeWarning)
        data = _np.ctypeslib.as_array(shared_arr).view(dtype).reshape(shape)
    return data
项目:mx    作者:graalvm    | 项目源码 | 文件源码
def initSharedMemoryState(self):
        ProjectBuildTask.initSharedMemoryState(self)
        self._newestBox = multiprocessing.Array('c', 2048)
项目:Dragonfly    作者:duaneloh    | 项目源码 | 文件源码
def predict(self, event=None):
        try:
            first = int(self.predict_first.text())
            last = int(self.predict_last.text())
            num_proc = int(self.num_proc.text())
        except ValueError:
            sys.stderr.write('Integers only\n')
            return

        if last < 0:
            last = self.parent.num_frames
        if self.get_and_convert(first).shape[0] != self.parent.converted.shape[1]:
            sys.stderr.write('Wrong length for converted image (expected %d, got %d). You may need to update converter.\n' %
                (self.parent.converted.shape[1], self.get_and_convert(first).shape[0]))
            return

        predictions = multiprocessing.Array(ctypes.c_char, self.parent.num_frames)
        jobs = []
        for i in range(num_proc):
            p = multiprocessing.Process(target=self.predict_worker, args=(i, num_proc, np.arange(first, last, dtype='i4'), predictions))
            jobs.append(p)
            p.start()
        for j in jobs:
            j.join()
        sys.stderr.write('\r%d/%d\n' % (last, last))

        self.predictions = np.frombuffer(predictions.get_obj(), dtype='S1')
        self.gen_predict_summary()
项目:Dragonfly    作者:duaneloh    | 项目源码 | 文件源码
def convert_frames(self, event=None):
        try:
            start = int(self.first_frame.text())
            end = int(self.last_frame.text())
            num_proc = int(self.num_proc.text())
        except ValueError:
            sys.stderr.write('Integers only for frame range and number of processors\n')
            return

        self.indices = np.arange(start, end, dtype='i4')
        clist = self.parent.classes.clist[start:end]
        if self.class_chars.text() != '':
            sel = np.array([clist==c for c in self.class_chars.text()]).any(axis=0)
            self.indices = self.indices[sel]
        if len(self.indices) == 0:
            sys.stderr.write('No frames of class %s in frame range\n'%self.class_chars.text())
            return
        else:
            sys.stderr.write('Converting %d frames with %d processors\n' % (len(self.indices), num_proc))

        arr = self.get_and_convert(0)
        converted = multiprocessing.Array(ctypes.c_double, arr.size*len(self.indices))
        jobs = []
        for i in range(num_proc):
            p = multiprocessing.Process(target=self.convert_worker, args=(i, num_proc, self.indices, arr.size, converted))
            jobs.append(p)
            p.start()
        for j in jobs:
            j.join()
        sys.stderr.write('\r%d/%d\n' % (len(self.indices), len(self.indices)))

        self.parent.converted = np.frombuffer(converted.get_obj()).reshape(len(self.indices), -1)
        if self.save_flag.isChecked():
            sys.stderr.write('Saving angular correlations to %s\n'%self.save_fname.text())
            np.save(self.save_fname.text(), self.parent.converted)
项目:Dragonfly    作者:duaneloh    | 项目源码 | 文件源码
def class_powder(self, event=None):
        cnum = self.class_num.checkedId() - 1
        if cnum == self.old_cnum:
            powder = self.class_powder
        elif cnum == -1:
            powder = self.emc_reader.get_powder()
            self.class_powder = powder
            self.old_cnum = cnum
        else:
            points = np.where(self.classes.key_pos == cnum)[0]
            num_proc = int(self.num_proc.text())
            powders = multiprocessing.Array(ctypes.c_double, num_proc*self.parent.geom.mask.size)
            pshape = (num_proc,) + self.parent.geom.mask.shape 
            print 'Calculating powder sum for class %s using %d threads' % (self.class_num.checkedButton().text(), num_proc)
            jobs = []
            for i in range(num_proc):
                p = multiprocessing.Process(target=self.powder_worker, args=(i, points[i::num_proc], pshape, powders))
                jobs.append(p)
                p.start()
            for j in jobs:
                j.join()
            sys.stderr.write('\r%d/%d\n'%(len(points), len(points)))
            powder = np.frombuffer(powders.get_obj()).reshape(pshape).sum(0)
            self.class_powder = powder
            self.old_cnum = cnum
        self.plot_frame(frame=powder)
项目:annotated-py-sanic    作者:hhstore    | 项目源码 | 文件源码
def skip_test_multiprocessing():
    app = Sanic('test_json')

    response = Array('c', 50)
    @app.route('/')
    async def handler(request):
        return json({"test": True})

    stop_event = Event()
    async def after_start(*args, **kwargs):
        http_response = await local_request('get', '/')
        response.value = http_response.text.encode()
        stop_event.set()

    def rescue_crew():
        sleep(5)
        stop_event.set()

    rescue_process = Process(target=rescue_crew)
    rescue_process.start()

    app.serve_multiple({
        'host': HOST,
        'port': PORT,
        'after_start': after_start,
        'request_handler': app.handle_request,
        'request_max_size': 100000,
    }, workers=2, stop_event=stop_event)

    rescue_process.terminate()

    try:
        results = json_loads(response.value)
    except:
        raise ValueError("Expected JSON response but got '{}'".format(response))

    assert results.get('test') == True
项目:osm_rg    作者:Scitator    | 项目源码 | 文件源码
def __init__(self, data_list, leafsize=30):
        data = np.array(data_list)
        n, m = data.shape
        self.shmem_data = mp.Array(ctypes.c_double, n * m)

        _data = shmem_as_nparray(self.shmem_data).reshape((n, m))
        _data[:, :] = data

        self._leafsize = leafsize
        super(cKDTree_MP, self).__init__(_data, leafsize=leafsize)
项目:osm_rg    作者:Scitator    | 项目源码 | 文件源码
def pquery(self, x_list, k=1, eps=0, p=2,
               distance_upper_bound=np.inf):
        x = np.array(x_list)
        nx, mx = x.shape
        shmem_x = mp.Array(ctypes.c_double, nx * mx)
        shmem_d = mp.Array(ctypes.c_double, nx * k)
        shmem_i = mp.Array(ctypes.c_double, nx * k)

        _x = shmem_as_nparray(shmem_x).reshape((nx, mx))
        _d = shmem_as_nparray(shmem_d).reshape((nx, k))

        _i = shmem_as_nparray(shmem_i)
        if k != 1:
            _i = _i.reshape((nx, k))

        _x[:, :] = x

        nprocs = num_cpus()
        scheduler = Scheduler(nx, nprocs)

        ierr = mp.Value(ctypes.c_int, 0)

        query_args = (scheduler,
                      self.shmem_data, self.n, self.m, self.leafsize,
                      shmem_x, nx, shmem_d, shmem_i,
                      k, eps, p, distance_upper_bound,
                      ierr
                      )
        pool = [mp.Process(target=_pquery, args=query_args) for n in
                range(nprocs)]
        for p in pool: p.start()
        for p in pool: p.join()
        if ierr.value != 0:
            raise RuntimeError('%d errors in worker processes' % (ierr.value))

        return _d.copy(), _i.astype(int).copy()
项目:aiotools    作者:achimnol    | 项目源码 | 文件源码
def test_server_extra_proc(set_timeout, restore_signal):

    extras = mp.Array('i', [0, 0])

    def extra_proc(key, _, pidx, args):
        assert _ is None
        extras[key] = 980 + key
        try:
            while True:
                time.sleep(0.1)
        except KeyboardInterrupt:
            print(f'extra[{key}] interrupted', file=sys.stderr)
        except Exception as e:
            print(f'extra[{key}] exception', e, file=sys.stderr)
        finally:
            print(f'extra[{key}] finish', file=sys.stderr)
            extras[key] = 990 + key

    @aiotools.actxmgr
    async def myworker(loop, pidx, args):
        yield

    def interrupt():
        os.kill(0, signal.SIGINT)

    set_timeout(0.2, interrupt)
    aiotools.start_server(myworker, extra_procs=[
                              functools.partial(extra_proc, 0),
                              functools.partial(extra_proc, 1)],
                          num_workers=3, args=(123, ))

    assert extras[0] == 990
    assert extras[1] == 991
项目:DataMining    作者:lidalei    | 项目源码 | 文件源码
def find_nearest_instances(training_data_instances, training_data_labels, test_data_instances, test_data_labels):
    start_time = time.time()
    # speed using multiple processes
    NUMBER_OF_PROCESSES = 4
    processes = []
    # shared by different processes, to be mentioned is that
    # global variable is only read within processes
    # the update of global variable within a process will not be submitted 
    classified_results = multiprocessing.Array('i', len(test_data_instances), lock = False)
    test_data_subdivisions = range(0, len(test_data_instances) + 1,\
                                    int(len(test_data_instances) / NUMBER_OF_PROCESSES))
    test_data_subdivisions[-1] = len(test_data_instances)
    for process_index in range(NUMBER_OF_PROCESSES):
        process = multiprocessing.Process(target = find_nearest_instances_subprocess,
                                          args = (training_data_instances,
                                                  training_data_labels,
                                                  test_data_instances,
                                                  test_data_subdivisions[process_index],
                                                  test_data_subdivisions[process_index + 1],
                                                  classified_results))
        process.start()
        processes.append(process)

    print "Waiting..."
    # wait until all processes are finished
    for process in processes:
        process.join()
    print "Complete."
    print "--- %s seconds ---" % (time.time() - start_time)

    error_count = 0
    confusion_matrix = np.zeros((10, 10), dtype=np.int)
    for test_instance_index, classified_label in zip(range(len(test_data_instances)),\
                                                      classified_results):        
        if test_data_labels[test_instance_index] != classified_label:
            error_count += 1
        confusion_matrix[test_data_labels[test_instance_index]][classified_label] += 1        

    error_rate = 100.0 * error_count / len(test_data_instances)

    return classified_results, error_rate, confusion_matrix
项目:cebl    作者:idfah    | 项目源码 | 文件源码
def __init__(self, mgr, sampRate=128,
                 chans=[str(n)+'x' for n in np.power(2, np.arange(8))/2.0],
                 waveform='sinusoid', freq=1.0, mix='none', pollSize=2):
        """
        Construct a new wave generator source.

        Args:
            sampRate:   Floating point value of the initial sampling frequency.

            chans:      Tuple of strings containing the initial channel
                        configuration.

            waveform:   String describing the type of waveform to produce.
                        May be 'sinusoid' or 'sawtooth' or 'square'

            freq:       Base frequency.  Each channel is a power-of-two
                        multiple of this frequency.

            pollSize:    Number of data samples collected during each poll.
                        Higher values result in better timing and marker
                        resolution but more CPU usage while higher values
                        typically use less CPU but worse timing results.
        """

        self.waveform = mp.Value('I', 0)
        self.freq = mp.Value('d', freq)
        self.t0 = mp.Value('d', 0.0)
        self.t0.value = 0.0
        self.pollSize = pollSize
        self.lock = mp.Lock()

        Source.__init__(self, mgr=mgr, sampRate=sampRate, chans=chans,
            configPanelClass=WaveGenConfigPanel)

        self.setWaveform(waveform)

        self.mixArr = mp.Array('d', self.getNChan()*self.getNChan())
        self.mixMat = (np.frombuffer(self.mixArr.get_obj())
                        .reshape((-1,self.getNChan())))
        self.setMix(mix)
项目:cebl    作者:idfah    | 项目源码 | 文件源码
def initWalk(self):
        self.walk0Array = mp.Array('d', self.getNChan())
        self.walk0 = np.frombuffer(self.walk0Array.get_obj())
        self.walk0[:] = 0.0 # set start of random walk to zero
项目:tensorflow_yolo2    作者:wenxichen    | 项目源码 | 文件源码
def prepare_multithread(self):
        """Preperation for mutithread processing."""

        self.reset = False
        # num_batch_left should always be -1 until the last batch block of the epoch
        self.num_batch_left = -1
        self.num_child = 10
        self.child_processes = [None] * self.num_child
        self.batch_cursor_read = 0
        self.batch_cursor_fetched = 0
        # TODO: add this to cfg file
        self.prefetch_size = 5  # in terms of batch
        # TODO: may not need readed_batch after validating everything
        self.read_batch_array_size = self.total_batch + self.prefetch_size * self.batch_size
        self.readed_batch = Array('i', self.read_batch_array_size)
        for i in range(self.read_batch_array_size):
            self.readed_batch[i] = 0
        self.prefetched_images = np.zeros((self.batch_size * self.prefetch_size
                                           * self.num_child,
                                           self.image_size, self.image_size, 3))
        self.prefetched_labels = np.zeros(
            (self.batch_size * self.prefetch_size * self.num_child))
        self.queue_in = []
        self.queue_out = []
        for i in range(self.num_child):
            self.queue_in.append(Queue())
            self.queue_out.append(Queue())
            self.start_process(i)
            self.start_prefetch(i)

        # fetch the first one
        desc = 'receive the first half: ' + \
            str(self.num_child * self.prefetch_size / 2) + ' batches'
        for i in trange(self.num_child / 2, desc=desc):
            #     print "collecting", i
            self.collect_prefetch(i)
项目:tensorflow_yolo2    作者:wenxichen    | 项目源码 | 文件源码
def prepare_multithread(self):
        """Preperation for mutithread processing."""

        self.reset = False
        # num_batch_left should always be -1 until the last batch block of the epoch
        self.num_batch_left = -1
        self.num_child = 10
        self.child_processes = [None] * self.num_child
        self.batch_cursor_read = 0
        self.batch_cursor_fetched = 0
        # TODO: add this to cfg file
        self.prefetch_size = 5  # in terms of batch
        # TODO: may not need readed_batch after validating everything
        self.read_batch_array_size = self.total_batch + self.prefetch_size * self.batch_size
        self.readed_batch = Array('i', self.read_batch_array_size)
        for i in range(self.read_batch_array_size):
            self.readed_batch[i] = 0
        self.prefetched_images = np.zeros((self.batch_size * self.prefetch_size
                                           * self.num_child,
                                           self.image_size, self.image_size, 3))
        self.prefetched_labels = np.zeros(
            (self.batch_size * self.prefetch_size * self.num_child))
        self.queue_in = []
        self.queue_out = []
        for i in range(self.num_child):
            self.queue_in.append(Queue())
            self.queue_out.append(Queue())
            self.start_process(i)
            self.start_prefetch(i)

        # fetch the first one
        desc = 'receive the first half: ' + \
            str(self.num_child * self.prefetch_size / 2) + ' batches'
        for i in trange(self.num_child / 2, desc=desc):
            #     print "collecting", i
            self.collect_prefetch(i)
项目:shelter    作者:seznam    | 项目源码 | 文件源码
def __init__(self, config):
        self._value = multiprocessing.Array(ctypes.c_char, 8)
        super(Context, self).__init__(config)
项目:cellstar    作者:Fafa87    | 项目源码 | 文件源码
def conv_single_image(image):
    shared_array_base = Array(ctypes.c_double, image.size)
    shared_array = np.ctypeslib.as_array(shared_array_base.get_obj())
    shared_array = shared_array.reshape(image.shape)
    shared_array[:] = image

    return shared_array
项目:icing    作者:slipguru    | 项目源码 | 文件源码
def _dense_distance_dual(lock, list1, list2, global_idx, shared_arr, dist_function):
    """Parallelize a general computation of a distance matrix.

    Parameters
    ----------
    lock : multiprocessing.synchronize.Lock
        Value returned from multiprocessing.Lock().
    input_list : list
        List of values to compare to input_list[idx] (from 'idx' on).
    shared_arr : array_like
        Numpy array created as a shared object. Iteratively updated with the
        result.
        Example:
            shared_array = np.frombuffer(mp.Array('d', n*n).get_obj()).reshape((n,n))

    Returns
    -------

    """
    list_len = len(list1)
    # PID = os.getpid()
    # print("PID {} takes index {}".format(PID, index_i))
    while global_idx.value < list_len:
        with lock:
            if not global_idx.value < list_len: return
            idx = global_idx.value
            global_idx.value += 1
            # if idx % 100 == 0: progressbar(idx, list_len)
        elem_1 = list1[idx]
        for idx_j in range(len(list2)):
            shared_arr[idx, idx_j] = dist_function(elem_1, list2[idx_j])
项目:icing    作者:slipguru    | 项目源码 | 文件源码
def dense_dm_dual(list1, list2, dist_function, condensed=False):
    """Compute in a parallel way a distance matrix for a 1-d array.

    Parameters
    ----------
    input_array : array_like
        1-dimensional array for which to compute the distance matrix.
    dist_function : function
        Function to use for the distance computation.

    Returns
    -------
    dist_matrix : array_like
        Symmetric NxN distance matrix for each input_array element.
    """
    n, m = len(list1), len(list2)
    n_proc = min(mp.cpu_count(), n)
    index = mp.Value('i', 0)
    shared_array = np.frombuffer(mp.Array('d', n*m).get_obj()).reshape((n,m))
    ps = []
    lock = mp.Lock()
    try:
        for _ in range(n_proc):
            p = mp.Process(target=_dense_distance_dual,
                        args=(lock, list1, list2, index, shared_array, dist_function))
            p.start()
            ps.append(p)

        for p in ps:
            p.join()
    except (KeyboardInterrupt, SystemExit): _terminate(ps,'Exit signal received\n')
    except Exception as e: _terminate(ps,'ERROR: %s\n' % e)
    except: _terminate(ps,'ERROR: Exiting with unknown exception\n')

    dist_matrix = shared_array.flatten() if condensed else shared_array
    # progressbar(n,n)
    return dist_matrix
项目:icing    作者:slipguru    | 项目源码 | 文件源码
def _dense_distance(lock, input_list, global_idx, shared_arr, dist_function):
    """Parallelize a general computation of a distance matrix.

    Parameters
    ----------
    lock : multiprocessing.synchronize.Lock
        Value returned from multiprocessing.Lock().
    input_list : list
        List of values to compare to input_list[idx] (from 'idx' on).
    shared_arr : array_like
        Numpy array created as a shared object. Iteratively updated with the result.
        Example:
            shared_array = np.frombuffer(mp.Array('d', n*n).get_obj()).reshape((n,n))

    Returns
    -------

    """
    list_len = len(input_list)
    # PID = os.getpid()
    # print("PID {} takes index {}".format(PID, index_i))
    while global_idx.value < list_len:
        with lock:
            if not global_idx.value < list_len: return
            idx = global_idx.value
            global_idx.value += 1
            if (idx) % 100 == 0: progressbar(idx, list_len)

        elem_1 = input_list[idx]
        for idx_j in range(idx+1, list_len):
            shared_arr[idx, idx_j] = dist_function(elem_1, input_list[idx_j])
项目:icing    作者:slipguru    | 项目源码 | 文件源码
def dense_dm(input_array, dist_function, condensed=False):
    """Compute in a parallel way a distance matrix for a 1-d array.

    Parameters
    ----------
    input_array : array_like
        1-dimensional array for which to compute the distance matrix.
    dist_function : function
        Function to use for the distance computation.

    Returns
    -------
    dist_matrix : array_like
        Symmetric NxN distance matrix for each input_array element.
    """
    n = len(input_array)
    n_proc = min(mp.cpu_count(), n)
    index = mp.Value('i', 0)
    shared_array = np.frombuffer(mp.Array('d', n*n).get_obj()).reshape((n,n))
    # np.savetxt("shared_array", shared_array, fmt="%.2f", delimiter=',')
    ps = []
    lock = mp.Lock()
    try:
        for _ in range(n_proc):
            p = mp.Process(target=_dense_distance,
                        args=(lock, input_array, index, shared_array, dist_function))
            p.start()
            ps.append(p)

        for p in ps:
            p.join()
    except (KeyboardInterrupt, SystemExit): _terminate(ps,'Exit signal received\n')
    except Exception as e: _terminate(ps,'ERROR: %s\n' % e)
    except: _terminate(ps,'ERROR: Exiting with unknown exception\n')

    dist_matrix = shared_array + shared_array.T
    if condensed: dist_matrix = scipy.spatial.distance.squareform(dist_matrix)
    progressbar(n,n)
    return dist_matrix
项目:icing    作者:slipguru    | 项目源码 | 文件源码
def _sparse_distance(lock, input_list, global_idx, rows, cols, data, dist_function):
    """Parallelize a general computation of a sparse distance matrix.

    Parameters
    ----------
    lock : multiprocessing.synchronize.Lock
        Value returned from multiprocessing.Lock().
    input_list : list
        List of values to compare to input_list[idx] (from 'idx' on).
    shared_arr : array_like
        Numpy array created as a shared object. Iteratively updated with the result.
        Example:
            shared_array = np.frombuffer(mp.Array('d', n*n).get_obj()).reshape((n,n))

    Returns
    -------

    """
    list_len = len(input_list)
    # PID = os.getpid()
    # print("PID {} takes index {}".format(PID, index_i))
    while global_idx.value < list_len:
        with lock:
            if not global_idx.value < list_len: return
            idx = global_idx.value
            global_idx.value += 1
            if (idx) % 100 == 0: progressbar(idx, list_len)

        elem_1 = input_list[idx]
        for idx_j in range(idx+1, list_len):
             _res = dist_function(elem_1, input_list[idx_j])
             if _res > 0:
                 i, j, d = idx, idx_j, list_len
                 c_idx = d*(d-1)/2 - (d-i)*(d-i-1)/2 + j - i - 1
                 data[c_idx] = _res
                 rows[c_idx] = i
                 cols[c_idx] = j
项目:icing    作者:slipguru    | 项目源码 | 文件源码
def _sparse_distance_opt(lock, input_list, global_idx, rows, cols, data, func):
    """Parallelize a general computation of a sparse distance matrix.

    Parameters
    ----------
    lock : multiprocessing.synchronize.Lock
        Value returned from multiprocessing.Lock().
    input_list : list
        List of values to compare to input_list[idx] (from 'idx' on).
    shared_arr : array_like
        Numpy array created as a shared object. Iteratively updated with the
        result.
        Example:
            shared_array = np.frombuffer(mp.Array('d', n*n).get_obj()).reshape((n,n))

    Returns
    -------

    """
    list_len = input_list.shape[0]
    # PID = os.getpid()
    # print("PID {} takes index {}".format(PID, index_i))
    while global_idx.value < list_len:
        with lock:
            if not global_idx.value < list_len: return
            idx = global_idx.value
            global_idx.value += 1
            if (idx) % 100 == 0: progressbar(idx, list_len)

        for i in range(idx, list_len-1):
             _res = func(input_list[i], input_list[i + 1])
             if _res > 0:
                 j, d = i+1, list_len
                 c_idx = d*(d-1)/2 - (d-i)*(d-i-1)/2 + j - i - 1
                 data[c_idx] = _res
                 rows[c_idx] = i
                 cols[c_idx] = j
项目:TICC    作者:davidhallac    | 项目源码 | 文件源码
def SetRhoUpdateFunc(Func=None):
    global rho_update_func
    rho_update_func = Func if Func else __default_rho_update_func

# Tuple of indices to identify the information package for each node. Actual
# length of specific package (list) may vary depending on node degree.
# X_NID: Node ID
# X_OBJ: CVXPY Objective
# X_VARS: CVXPY Variables (entry from node_variables structure)
# X_CON: CVXPY Constraints
# X_IND: Starting index into shared node_vals Array
# X_LEN: Total length (sum of dimensions) of all variables
# X_DEG: Number of neighbors
# X_NEIGHBORS: Placeholder for information about each neighbors
#   Information for each neighbor is two entries, appended in order.
#   Starting index of the corresponding z-value in edge_z_vals. Then for u.
项目:TICC    作者:davidhallac    | 项目源码 | 文件源码
def getValue(arr, index, length):
    return numpy.array(arr[index:(index + length)])

# Write value of numpy array nparr (with given length) to a shared Array at
# the given starting index.
项目:TICC    作者:davidhallac    | 项目源码 | 文件源码
def writeValue(sharedarr, index, nparr, length):
    if length == 1:
        nparr = [nparr]
    sharedarr[index:(index + length)] = nparr

# Write the values for all of the Variables involved in a given Objective to
# the given shared Array.
# variables should be an entry from the node_values structure.
项目:fabricio    作者:renskiy    | 项目源码 | 文件源码
def once_per_command(func=None, block=False, default=None):
    if func is None:
        return functools.partial(once_per_command, block=block, default=default)

    @functools.wraps(func)
    def _func(*args, **kwargs):
        lock = last_hash.get_lock()
        if lock.acquire(block):
            try:
                command = fab.env.command or ''
                infrastructure = fab.env.infrastructure or ''
                current_session = hashlib.md5()
                current_session.update(command.encode('utf-16be'))
                current_session.update(infrastructure.encode('utf-16be'))
                for host in fab.env.all_hosts:
                    current_session.update(host.encode('utf-16be'))
                current_hash = current_session.digest()
                if current_hash != last_hash.raw:
                    last_hash.raw = current_hash
                    return func(*args, **kwargs)
                return default
            finally:
                lock.release()

    last_hash = multiprocessing.Array(ctypes.c_char, hashlib.md5().digest_size)
    return _func
项目:ouroboros    作者:pybee    | 项目源码 | 文件源码
def test_array(self, raw=False):
        seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
        if raw:
            arr = self.RawArray('i', seq)
        else:
            arr = self.Array('i', seq)

        self.assertEqual(len(arr), len(seq))
        self.assertEqual(arr[3], seq[3])
        self.assertEqual(list(arr[2:7]), list(seq[2:7]))

        arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])

        self.assertEqual(list(arr[:]), seq)

        self.f(seq)

        p = self.Process(target=self.f, args=(arr,))
        p.daemon = True
        p.start()
        p.join()

        self.assertEqual(list(arr[:]), seq)
项目:ouroboros    作者:pybee    | 项目源码 | 文件源码
def test_array_from_size(self):
        size = 10
        # Test for zeroing (see issue #11675).
        # The repetition below strengthens the test by increasing the chances
        # of previously allocated non-zero memory being used for the new array
        # on the 2nd and 3rd loops.
        for _ in range(3):
            arr = self.Array('i', size)
            self.assertEqual(len(arr), size)
            self.assertEqual(list(arr), [0] * size)
            arr[:] = range(10)
            self.assertEqual(list(arr), list(range(10)))
            del arr
项目:ouroboros    作者:pybee    | 项目源码 | 文件源码
def test_getobj_getlock_obj(self):
        arr1 = self.Array('i', list(range(10)))
        lock1 = arr1.get_lock()
        obj1 = arr1.get_obj()

        arr2 = self.Array('i', list(range(10)), lock=None)
        lock2 = arr2.get_lock()
        obj2 = arr2.get_obj()

        lock = self.Lock()
        arr3 = self.Array('i', list(range(10)), lock=lock)
        lock3 = arr3.get_lock()
        obj3 = arr3.get_obj()
        self.assertEqual(lock, lock3)

        arr4 = self.Array('i', range(10), lock=False)
        self.assertFalse(hasattr(arr4, 'get_lock'))
        self.assertFalse(hasattr(arr4, 'get_obj'))
        self.assertRaises(AttributeError,
                          self.Array, 'i', range(10), lock='notalock')

        arr5 = self.RawArray('i', range(10))
        self.assertFalse(hasattr(arr5, 'get_lock'))
        self.assertFalse(hasattr(arr5, 'get_obj'))

#
#
#
项目:bmlingam    作者:taku-y    | 项目源码 | 文件源码
def _create_xs_shared(xs):
    """Create shared variable for data (xs).
    """
    n = len(xs.reshape(-1))
    xs_shared_base = multiprocessing.Array(ctypes.c_double, n)
    shape = xs.shape

    view = np.ctypeslib.as_array(xs_shared_base.get_obj())
    view = view.reshape(shape)
    view[:] = xs[:]
    del view

    xs_shared = (xs_shared_base, shape)

    return xs_shared
项目:ngram2vec    作者:zhezhaoa    | 项目源码 | 文件源码
def init_net(size, words_num, contexts_num):
    tmp = np.random.uniform(low=-0.5/size, high=0.5/size, size=(words_num, size))
    syn0 = np.ctypeslib.as_ctypes(tmp)
    syn0 = Array(syn0._type_, syn0, lock=False)

    tmp = np.zeros(shape=(contexts_num, size))
    syn1 = np.ctypeslib.as_ctypes(tmp)
    syn1 = Array(syn1._type_, syn1, lock=False)
    return (syn0, syn1)
项目:gymexperiments    作者:tambetm    | 项目源码 | 文件源码
def run(args):
    # create dummy environment to be able to create model
    env = gym.make(args.environment)
    assert isinstance(env.observation_space, Box)
    assert isinstance(env.action_space, Discrete)
    print("Observation space:", env.observation_space)
    print("Action space:", env.action_space)

    # create main model
    model = create_model(env, args)
    model.summary()
    env.close()

    # force runner processes to use cpu
    os.environ["CUDA_VISIBLE_DEVICES"] = ""
    # for better compatibility with Theano and Tensorflow
    multiprocessing.set_start_method('spawn')

    # create shared buffer for sharing weights
    blob = pickle.dumps(model.get_weights(), pickle.HIGHEST_PROTOCOL)
    shared_buffer = Array('c', len(blob))
    shared_buffer.raw = blob

    # create fifos and threads for all runners
    fifos = []
    for i in range(args.num_runners):
        fifo = Queue(args.queue_length)
        fifos.append(fifo)
        process = Process(target=runner, args=(shared_buffer, fifo, args))
        process.start()

    # start trainer in main thread
    trainer(model, fifos, shared_buffer, args)
项目:urh    作者:jopohl    | 项目源码 | 文件源码
def test_continuous_send_dialog(self):
        self.add_signal_to_form("esaver.complex")
        self.__add_first_signal_to_generator()

        port = self.__get_free_port()

        gframe = self.form.generator_tab_controller
        expected = np.zeros(gframe.total_modulated_samples, dtype=np.complex64)
        expected = gframe.modulate_data(expected)
        current_index = Value("L", 0)
        buffer = Array("f", 4 * len(expected))

        process = Process(target=receive, args=(port, current_index, 2*len(expected), buffer))
        process.daemon = True
        process.start()
        time.sleep(0.1)  # ensure server is up

        ContinuousModulator.BUFFER_SIZE_MB = 10

        continuous_send_dialog = self.__get_continuous_send_dialog()
        continuous_send_dialog.device.set_client_port(port)
        continuous_send_dialog.ui.spinBoxNRepeat.setValue(2)
        continuous_send_dialog.ui.btnStart.click()
        QTest.qWait(100)
        time.sleep(1)
        process.join(1)

        # CI sometimes swallows a sample
        self.assertGreaterEqual(current_index.value, len(expected)  - 1)

        buffer = np.frombuffer(buffer.get_obj(), dtype=np.complex64)
        for i in range(len(expected)):
            self.assertEqual(buffer[i], expected[i], msg=str(i))

        continuous_send_dialog.ui.btnStop.click()
        continuous_send_dialog.ui.btnClear.click()
        QTest.qWait(1)

        self.__close_dialog(continuous_send_dialog)
项目:urh    作者:jopohl    | 项目源码 | 文件源码
def __init__(self, size: int):
        self.__data = Array("f", 2*size)
        self.size = size
        self.__left_index = Value("L", 0)
        self.__right_index = Value("L", 0)
        self.__length = Value("L", 0)
项目:urh    作者:jopohl    | 项目源码 | 文件源码
def perform_filter(result_array: Array, data, f_low, f_high, filter_bw):
    result_array = np.frombuffer(result_array.get_obj(), dtype=np.complex64)
    result_array[:] = Filter.apply_bandpass_filter(data, f_low, f_high, filter_bw=filter_bw)
项目:urh    作者:jopohl    | 项目源码 | 文件源码
def on_bandpass_filter_triggered(self, f_low: float, f_high: float):
        self.filter_abort_wanted = False

        QApplication.instance().setOverrideCursor(Qt.WaitCursor)
        filter_bw = Filter.read_configured_filter_bw()
        filtered = Array("f", 2 * self.signal.num_samples)
        p = Process(target=perform_filter, args=(filtered, self.signal.data, f_low, f_high, filter_bw))
        p.daemon = True
        p.start()

        while p.is_alive():
            QApplication.instance().processEvents()

            if self.filter_abort_wanted:
                p.terminate()
                p.join()
                QApplication.instance().restoreOverrideCursor()
                return

            time.sleep(0.1)

        filtered = np.frombuffer(filtered.get_obj(), dtype=np.complex64)
        signal = self.signal.create_new(new_data=filtered.astype(np.complex64))
        signal.name = self.signal.name + " filtered with f_low={0:.4n} f_high={1:.4n} bw={2:.4n}".format(f_low, f_high,
                                                                                                         filter_bw)
        self.signal_created.emit(signal)
        QApplication.instance().restoreOverrideCursor()
项目:TVGL    作者:davidhallac    | 项目源码 | 文件源码
def SetRhoUpdateFunc(Func=None):
    global rho_update_func
    rho_update_func = Func if Func else __default_rho_update_func

# Tuple of indices to identify the information package for each node. Actual
# length of specific package (list) may vary depending on node degree.
# X_NID: Node ID
# X_OBJ: CVXPY Objective
# X_VARS: CVXPY Variables (entry from node_variables structure)
# X_CON: CVXPY Constraints
# X_IND: Starting index into shared node_vals Array
# X_LEN: Total length (sum of dimensions) of all variables
# X_DEG: Number of neighbors
# X_NEIGHBORS: Placeholder for information about each neighbors
#   Information for each neighbor is two entries, appended in order.
#   Starting index of the corresponding z-value in edge_z_vals. Then for u.
项目:TVGL    作者:davidhallac    | 项目源码 | 文件源码
def getValue(arr, index, length):
    return numpy.array(arr[index:(index + length)])

# Write value of numpy array nparr (with given length) to a shared Array at
# the given starting index.
项目:TVGL    作者:davidhallac    | 项目源码 | 文件源码
def writeValue(sharedarr, index, nparr, length):
    if length == 1:
        nparr = [nparr]
    sharedarr[index:(index + length)] = nparr

# Write the values for all of the Variables involved in a given Objective to
# the given shared Array.
# variables should be an entry from the node_values structure.
项目:TVGL    作者:davidhallac    | 项目源码 | 文件源码
def SetRhoUpdateFunc(Func=None):
    global rho_update_func
    rho_update_func = Func if Func else __default_rho_update_func

# Tuple of indices to identify the information package for each node. Actual
# length of specific package (list) may vary depending on node degree.
# X_NID: Node ID
# X_OBJ: CVXPY Objective
# X_VARS: CVXPY Variables (entry from node_variables structure)
# X_CON: CVXPY Constraints
# X_IND: Starting index into shared node_vals Array
# X_LEN: Total length (sum of dimensions) of all variables
# X_DEG: Number of neighbors
# X_NEIGHBORS: Placeholder for information about each neighbors
#   Information for each neighbor is two entries, appended in order.
#   Starting index of the corresponding z-value in edge_z_vals. Then for u.
项目:TVGL    作者:davidhallac    | 项目源码 | 文件源码
def getValue(arr, index, length):
    return numpy.array(arr[index:(index + length)])

# Write value of numpy array nparr (with given length) to a shared Array at
# the given starting index.
项目:TVGL    作者:davidhallac    | 项目源码 | 文件源码
def SetRhoUpdateFunc(Func=None):
    global rho_update_func
    rho_update_func = Func if Func else __default_rho_update_func

# Tuple of indices to identify the information package for each node. Actual
# length of specific package (list) may vary depending on node degree.
# X_NID: Node ID
# X_OBJ: CVXPY Objective
# X_VARS: CVXPY Variables (entry from node_variables structure)
# X_CON: CVXPY Constraints
# X_IND: Starting index into shared node_vals Array
# X_LEN: Total length (sum of dimensions) of all variables
# X_DEG: Number of neighbors
# X_NEIGHBORS: Placeholder for information about each neighbors
#   Information for each neighbor is two entries, appended in order.
#   Starting index of the corresponding z-value in edge_z_vals. Then for u.
项目:TVGL    作者:davidhallac    | 项目源码 | 文件源码
def getValue(arr, index, length):
    return numpy.array(arr[index:(index + length)])

# Write value of numpy array nparr (with given length) to a shared Array at
# the given starting index.
项目:TVGL    作者:davidhallac    | 项目源码 | 文件源码
def writeValue(sharedarr, index, nparr, length):
    if length == 1:
        nparr = [nparr]
    sharedarr[index:(index + length)] = nparr

# Write the values for all of the Variables involved in a given Objective to
# the given shared Array.
# variables should be an entry from the node_values structure.
项目:TVGL    作者:davidhallac    | 项目源码 | 文件源码
def SetRhoUpdateFunc(Func=None):
    global rho_update_func
    rho_update_func = Func if Func else __default_rho_update_func

# Tuple of indices to identify the information package for each node. Actual
# length of specific package (list) may vary depending on node degree.
# X_NID: Node ID
# X_OBJ: CVXPY Objective
# X_VARS: CVXPY Variables (entry from node_variables structure)
# X_CON: CVXPY Constraints
# X_IND: Starting index into shared node_vals Array
# X_LEN: Total length (sum of dimensions) of all variables
# X_DEG: Number of neighbors
# X_NEIGHBORS: Placeholder for information about each neighbors
#   Information for each neighbor is two entries, appended in order.
#   Starting index of the corresponding z-value in edge_z_vals. Then for u.
项目:TVGL    作者:davidhallac    | 项目源码 | 文件源码
def getValue(arr, index, length):
    return numpy.array(arr[index:(index + length)])

# Write value of numpy array nparr (with given length) to a shared Array at
# the given starting index.
项目:TVGL    作者:davidhallac    | 项目源码 | 文件源码
def SetRhoUpdateFunc(Func=None):
    global rho_update_func
    rho_update_func = Func if Func else __default_rho_update_func

# Tuple of indices to identify the information package for each node. Actual
# length of specific package (list) may vary depending on node degree.
# X_NID: Node ID
# X_OBJ: CVXPY Objective
# X_VARS: CVXPY Variables (entry from node_variables structure)
# X_CON: CVXPY Constraints
# X_IND: Starting index into shared node_vals Array
# X_LEN: Total length (sum of dimensions) of all variables
# X_DEG: Number of neighbors
# X_NEIGHBORS: Placeholder for information about each neighbors
#   Information for each neighbor is two entries, appended in order.
#   Starting index of the corresponding z-value in edge_z_vals. Then for u.
项目:TVGL    作者:davidhallac    | 项目源码 | 文件源码
def getValue(arr, index, length):
    return numpy.array(arr[index:(index + length)])

# Write value of numpy array nparr (with given length) to a shared Array at
# the given starting index.