Python multiprocessing 模块,RawArray() 实例源码

我们从Python开源项目中,提取了以下24个代码示例,用于说明如何使用multiprocessing.RawArray()

项目:sporco    作者:bwohlberg    | 项目源码 | 文件源码
def mpraw_as_np(shape, dtype):
    """Construct a numpy array of the specified shape and dtype for which the
    underlying storage is a multiprocessing RawArray in shared memory.

    Parameters
    ----------
    shape : tuple
      Shape of numpy array
    dtype : data-type
      Data type of array

    Returns
    -------
    arr : ndarray
      Numpy array
    """

    sz = int(np.product(shape))
    csz = sz * np.dtype(dtype).itemsize
    raw = mp.RawArray('c', csz)
    return np.frombuffer(raw, dtype=dtype, count=sz).reshape(shape)
项目:denet    作者:lachlants    | 项目源码 | 文件源码
def __init__(self, shape, dtype = numpy.float32):
        num_elems = numpy.prod(shape)

        if dtype == numpy.int32:
            c_type = ctypes.c_int
        elif dtype == numpy.float32:
            c_type = ctypes.c_float
        elif dtype == numpy.float64:
            c_type = ctypes.c_double
        else:
            assert(0)

        #shared storage for numpy array
        self.shape = shape
        self.dtype = dtype
        self.base = mp.RawArray(c_type, int(num_elems))
        self.lock = mp.RLock()

    #overloaded operators for convienince
项目:a3c    作者:hercky    | 项目源码 | 文件源码
def extract_params_as_shared_arrays(model):
    """
    converts params to shared arrays
    """
    # can get in the form of list -> shared + policy + value
    shared_arrays = []

    weights_dict = model.get_all_weights()
    weight_list = []


    for k,v in weights_dict.items():
        weight_list += v

    for weights in weight_list:
        shared_arrays.append(mp.RawArray('f', weights.ravel()))
    return shared_arrays
项目:lustre_task_driven_monitoring_framework    作者:GSI-HPC    | 项目源码 | 文件源码
def __init__(self):

        # # RETURNS STDOUT: self._state = "TEXT" + str(NUMBER)
        # # RETURNS BAD VALUE: self._timestamp.value = 1234567890.99
        # self._state = multiprocessing.RawValue(ctypes.c_char_p)
        # self._ost_name = multiprocessing.RawValue(ctypes.c_char_p)
        # self._timestamp = multiprocessing.RawValue(ctypes.c_float)

        self._state = multiprocessing.RawValue(ctypes.c_int, WorkerState.NOT_READY)
        self._ost_name = multiprocessing.RawArray('c', 64)
        self._timestamp = multiprocessing.RawValue(ctypes.c_uint, 0)
项目:ringbuffer    作者:bslatkin    | 项目源码 | 文件源码
def __init__(self, *, slot_bytes, slot_count):
        """Initializer.

        Args:
            slot_bytes: How big each buffer in the array should be.
            slot_count: How many buffers should be in the array.
        """
        self.slot_bytes = slot_bytes
        self.slot_count = slot_count
        self.length_bytes = 4
        slot_type = ctypes.c_byte * (slot_bytes + self.length_bytes)
        self.array = multiprocessing.RawArray(slot_type, slot_count)
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def extract_params_as_shared_arrays(link):
    assert isinstance(link, chainer.Link)
    shared_arrays = {}
    for param_name, param in link.namedparams():
        shared_arrays[param_name] = mp.RawArray('f', param.data.ravel())
    return shared_arrays
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def extract_states_as_shared_arrays(optimizer):
    assert isinstance(optimizer, chainer.Optimizer)
    assert hasattr(optimizer, 'target'), 'Optimizer.setup must be called first'
    shared_arrays = {}
    for param_name, param in optimizer.target.namedparams():
        shared_arrays[param_name] = {}
        ensure_initialized_update_rule(param)
        state = param.update_rule.state
        for state_name, state_val in state.items():
            shared_arrays[param_name][
                state_name] = mp.RawArray('f', state_val.ravel())
    return shared_arrays
项目:sporco    作者:bwohlberg    | 项目源码 | 文件源码
def init_mpraw(mpv, npv):
    """Set a global variable as a multiprocessing RawArray in shared
    memory with a numpy array wrapper and initialise its value.

    Parameters
    ----------
    mpv : string
      Name of global variable to set
    npv : ndarray
      Numpy array to use as initialiser for global variable value
    """

    globals()[mpv] = mpraw_as_np(npv.shape, npv.dtype)
    globals()[mpv][:] = npv
项目:async-rl    作者:muupan    | 项目源码 | 文件源码
def extract_params_as_shared_arrays(link):
    assert isinstance(link, chainer.Link)
    shared_arrays = {}
    for param_name, param in link.namedparams():
        shared_arrays[param_name] = mp.RawArray('f', param.data.ravel())
    return shared_arrays
项目:async-rl    作者:muupan    | 项目源码 | 文件源码
def extract_states_as_shared_arrays(optimizer):
    assert isinstance(optimizer, chainer.Optimizer)
    assert hasattr(optimizer, 'target'), 'Optimizer.setup must be called first'
    shared_arrays = {}
    for state_name, state in optimizer._states.items():
        shared_arrays[state_name] = {}
        for param_name, param in state.items():
            shared_arrays[state_name][
                param_name] = mp.RawArray('f', param.ravel())
    return shared_arrays
项目:CElegansBehaviour    作者:ChristophKirst    | 项目源码 | 文件源码
def createSharedNumpyArray(dimensions, ctype = ctypes.c_double):
  # create array in shared memory segment
  shared_array_base = multiprocessing.RawArray(ctype, np.prod(dimensions))

  # convert to numpy array vie ctypeslib
  shared_array = np.ctypeslib.as_array(shared_array_base)

  return shared_array.reshape(dimensions);
项目:pyndl    作者:quantling    | 项目源码 | 文件源码
def _activation_matrix(indices_list, weights, number_of_threads):
    """
    Estimate activation for indices in weights

    Memory overhead for multiprocessing is one copy of weights
    plus a copy of cues for each thread.

    Parameters
    ----------
    indices_list : list[int]
        events as cue indices in weights
    weights : numpy.array
        weight matrix with shape (outcomes, cues)
    number_of_threads : int

    Returns
    -------
    activation_matrix : numpy.array
        estimated activations as matrix with shape (outcomes, events)

    """
    assert number_of_threads >= 1, "Can't run with less than 1 thread"

    activations_dim = (weights.shape[0], len(indices_list))
    if number_of_threads == 1:
        activations = np.empty(activations_dim, dtype=np.float64)
        for row, event_cues in enumerate(indices_list):
            activations[:, row] = weights[:, event_cues].sum(axis=1)
        return activations
    else:
        shared_activations = mp.RawArray(ctypes.c_double, int(np.prod(activations_dim)))
        weights = np.ascontiguousarray(weights)
        shared_weights = mp.sharedctypes.copy(np.ctypeslib.as_ctypes(np.float64(weights)))
        initargs = (shared_weights, weights.shape, shared_activations, activations_dim)
        with mp.Pool(number_of_threads, initializer=_init_mp_activation_matrix, initargs=initargs) as pool:
            pool.starmap(_run_mp_activation_matrix, enumerate(indices_list))
        activations = np.ctypeslib.as_array(shared_activations)
        activations.shape = activations_dim
        return activations
项目:tensorflow-rl    作者:steveKapturowski    | 项目源码 | 文件源码
def malloc_contiguous(self, size, initial_val=None):
        if initial_val is None:
            return RawArray(ctypes.c_float, size)
        else:
            return RawArray(ctypes.c_float, initial_val)
项目:tensorflow-rl    作者:steveKapturowski    | 项目源码 | 文件源码
def __init__(self, num_actors):
        self.updated = RawArray(ctypes.c_int, num_actors)
项目:blog    作者:sopticek    | 项目源码 | 文件源码
def test_can_pass_custom_create_storage(self):
        create_storage = functools.partial(
            multiprocessing.RawArray,
            ctypes.c_int
        )
        m = SM(3, create_storage)
        m[1, 2] = 5
        self.assertEqual(m[2, 1], 5)
项目:blog    作者:sopticek    | 项目源码 | 文件源码
def test_can_pass_custom_create_storage(self):
        create_storage = functools.partial(
            multiprocessing.RawArray,
            ctypes.c_int
        )
        m = SM(3, create_storage)
        m[1, 2] = 5
        self.assertEqual(m[2, 1], 5)
项目:ouroboros    作者:pybee    | 项目源码 | 文件源码
def test_array(self, raw=False):
        seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
        if raw:
            arr = self.RawArray('i', seq)
        else:
            arr = self.Array('i', seq)

        self.assertEqual(len(arr), len(seq))
        self.assertEqual(arr[3], seq[3])
        self.assertEqual(list(arr[2:7]), list(seq[2:7]))

        arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])

        self.assertEqual(list(arr[:]), seq)

        self.f(seq)

        p = self.Process(target=self.f, args=(arr,))
        p.daemon = True
        p.start()
        p.join()

        self.assertEqual(list(arr[:]), seq)
项目:ouroboros    作者:pybee    | 项目源码 | 文件源码
def test_getobj_getlock_obj(self):
        arr1 = self.Array('i', list(range(10)))
        lock1 = arr1.get_lock()
        obj1 = arr1.get_obj()

        arr2 = self.Array('i', list(range(10)), lock=None)
        lock2 = arr2.get_lock()
        obj2 = arr2.get_obj()

        lock = self.Lock()
        arr3 = self.Array('i', list(range(10)), lock=lock)
        lock3 = arr3.get_lock()
        obj3 = arr3.get_obj()
        self.assertEqual(lock, lock3)

        arr4 = self.Array('i', range(10), lock=False)
        self.assertFalse(hasattr(arr4, 'get_lock'))
        self.assertFalse(hasattr(arr4, 'get_obj'))
        self.assertRaises(AttributeError,
                          self.Array, 'i', range(10), lock='notalock')

        arr5 = self.RawArray('i', range(10))
        self.assertFalse(hasattr(arr5, 'get_lock'))
        self.assertFalse(hasattr(arr5, 'get_obj'))

#
#
#
项目:async-deep-rl    作者:traai    | 项目源码 | 文件源码
def malloc_contiguous(self, size, initial_val=None):
        if initial_val is None:
            return RawArray(ctypes.c_float, size)
        else:
            return RawArray(ctypes.c_float, initial_val)
项目:async-deep-rl    作者:traai    | 项目源码 | 文件源码
def __init__(self, num_actors):
        self.updated = RawArray(ctypes.c_int, num_actors)
项目:DeepJet    作者:mstoye    | 项目源码 | 文件源码
def __createArr(self,shapeinfo):
        import ctypes
        import multiprocessing
        fulldim=1
        for d in shapeinfo:
            fulldim*=d 
        if fulldim < 0: #catch some weird things that happen when there is a file IO error
            fulldim=0 
        # reserve memory for array
        shared_array_base = multiprocessing.RawArray(ctypes.c_float, int(fulldim))
        shared_array = numpy.ctypeslib.as_array(shared_array_base)#.get_obj())
        #print('giving shape',shapeinfo)
        shared_array = shared_array.reshape(shapeinfo)
        #print('gave shape',shapeinfo)
        return shared_array
项目:Synkhronos    作者:astooke    | 项目源码 | 文件源码
def main():

    x = np.ctypeslib.as_array(mp.RawArray('f', N * C * H * W)).reshape(N, C, H, W)
    print(x.shape)

    b = mp.Barrier(G)

    workers = [mp.Process(target=worker, args=(x, b, rank)) for rank in range(1, G)]
    for w in workers:
        w.start()

    worker(x, b, 0)

    for w in workers:
        w.join()
项目:Synkhronos    作者:astooke    | 项目源码 | 文件源码
def np_mp_arr(t_or_tc, size_or_init):
    return np.ctypeslib.as_array(mp.RawArray(t_or_tc, size_or_init))
项目:ParlAI    作者:facebookresearch    | 项目源码 | 文件源码
def __init__(self, init_dict=None):
        """Create a shared memory version of each element of the initial
        dictionary. Creates an empty array otherwise, which will extend
        automatically when keys are added.

        Each different type (all supported types listed in the ``types`` array
        above) has its own array. For each key we store an index into the
        appropriate array as well as the type of value stored for that key.
        """
        # idx is dict of {key: (array_idx, value_type)}
        self.idx = {}
        # arrays is dict of {value_type: array_of_ctype}
        self.arrays = {}
        self.tensors = {}

        if init_dict:
            sizes = {typ: 0 for typ in self.types.keys()}
            for k, v in init_dict.items():
                if 'Tensor' in str(type(v)):
                    # add tensor to tensor dict--don't try to put in rawarray
                    self.tensors[k] = v
                    continue
                elif type(v) not in sizes:
                    raise TypeError('SharedTable does not support values of ' +
                                    'type ' + str(type(v)))
                sizes[type(v)] += 1
            # pop tensors from init_dict
            for k in self.tensors.keys():
                init_dict.pop(k)
            # create raw arrays for each type
            for typ, sz in sizes.items():
                self.arrays[typ] = RawArray(self.types[typ], sz)
            # track indices for each key, assign them to their typed rawarray
            idxs = {typ: 0 for typ in self.types.keys()}
            for k, v in init_dict.items():
                val_type = type(v)
                self.idx[k] = (idxs[val_type], val_type)
                if val_type == str:
                    v = sys.intern(v)
                self.arrays[val_type][idxs[val_type]] = v
                idxs[val_type] += 1
        # initialize any needed empty arrays
        for typ, ctyp in self.types.items():
            if typ not in self.arrays:
                self.arrays[typ] = RawArray(ctyp, 0)
        self.lock = Lock()