Python mpi4py.MPI 模块,COMM_WORLD 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用mpi4py.MPI.COMM_WORLD

项目:baselines    作者:openai    | 项目源码 | 文件源码
def mpi_moments(x, axis=0):
    x = np.asarray(x, dtype='float64')
    newshape = list(x.shape)
    newshape.pop(axis)
    n = np.prod(newshape,dtype=int)
    totalvec = np.zeros(n*2+1, 'float64')
    addvec = np.concatenate([x.sum(axis=axis).ravel(), 
        np.square(x).sum(axis=axis).ravel(), 
        np.array([x.shape[axis]],dtype='float64')])
    MPI.COMM_WORLD.Allreduce(addvec, totalvec, op=MPI.SUM)
    sum = totalvec[:n]
    sumsq = totalvec[n:2*n]
    count = totalvec[2*n]
    if count == 0:
        mean = np.empty(newshape); mean[:] = np.nan
        std = np.empty(newshape); std[:] = np.nan
    else:
        mean = sum/count
        std = np.sqrt(np.maximum(sumsq/count - np.square(mean),0))
    return mean, std, count
项目:planetplanet    作者:rodluger    | 项目源码 | 文件源码
def __init__(self, comm=None, loadbalance=False, debug=False,
                 wait_on_start = True, exit_on_end = True, 
                 cores_per_task = 1, **kwargs):
        if MPI is None:
            raise ImportError("Please install mpi4py")

        self.comm = MPI.COMM_WORLD if comm is None else comm
        self.rank = self.comm.Get_rank()
        if cores_per_task > 1:
          self.size  = max(1, self.comm.Get_size() // cores_per_task)
        else:
          self.size = self.comm.Get_size() - 1
        self.function = _error_function
        self.loadbalance = loadbalance
        self.debug = debug
        if self.size == 0:
            raise ValueError("Tried to create an MPI pool, but there "
                             "was only one MPI process available. "
                             "Need at least two.")
        self.exit_on_end = exit_on_end

        # Enter main loop for workers?
        if wait_on_start:
            if self.is_worker():
                self.wait()
项目:baselines    作者:openai    | 项目源码 | 文件源码
def test_runningmeanstd():
    comm = MPI.COMM_WORLD
    np.random.seed(0)
    for (triple,axis) in [
        ((np.random.randn(3), np.random.randn(4), np.random.randn(5)),0),
        ((np.random.randn(3,2), np.random.randn(4,2), np.random.randn(5,2)),0),
        ((np.random.randn(2,3), np.random.randn(2,4), np.random.randn(2,4)),1),
        ]:


        x = np.concatenate(triple, axis=axis)
        ms1 = [x.mean(axis=axis), x.std(axis=axis), x.shape[axis]]


        ms2 = mpi_moments(triple[comm.Get_rank()],axis=axis)

        for (a1,a2) in zipsame(ms1, ms2):
            print(a1, a2)
            assert np.allclose(a1, a2)
            print("ok!")
项目:pytomo3d    作者:computational-seismology    | 项目源码 | 文件源码
def is_mpi_env():
    """
    Test if current environment is MPI or not
    """
    try:
        import mpi4py
    except ImportError:
        return False

    try:
        import mpi4py.MPI
    except ImportError:
        return False

    if mpi4py.MPI.COMM_WORLD.size == 1 and mpi4py.MPI.COMM_WORLD.rank == 0:
        return False

    return True
项目:pytomo3d    作者:computational-seismology    | 项目源码 | 文件源码
def smart_read_json(mpi_mode, json_file):
    """
    read json file under mpi and multi-processing environment
    """
    if not mpi_mode:
        json_obj = read_json_file(json_file)
    else:
        from mpi4py import MPI
        comm = MPI.COMM_WORLD
        rank = comm.Get_rank()
        if rank == 0:
            json_obj = read_json_file(json_file)
        else:
            json_obj = None
        json_obj = comm.bcast(json_obj, root=0)
    return json_obj
项目:Fluid2d    作者:pvthinker    | 项目源码 | 文件源码
def smooth(self,x,b,nite):
        for l in range(self.nbsmooth):
            for k in range(nite):
                t0 = time()
#                MPI.COMM_WORLD.Barrier()
                # we apply the smoothing twice
                smoothtwicewitha(self.msk,self.A,x,b,self.omega,self.yo)
#                smoothoncewitha(self.msk,self.A,x,b,self.omega,self.yo)
#                smoothoncewitha(self.msk,self.A,x,b,self.omega,self.yo)
                t1 = time()
                self.time['smooth']+=t1-t0
                self.ncalls['smooth']+=1

#                MPI.COMM_WORLD.Barrier()
                t0 = time()
                self.time['barrier']+=t0-t1
                self.ncalls['barrier']+=1

                self.halo.fill(x)
                t1 = time()
                self.time['halo']+=t1-t0
                self.ncalls['halo']+=1
项目:Fluid2d    作者:pvthinker    | 项目源码 | 文件源码
def residual(self,x,b,r):
        for l in range(self.nbresidual):
            t0 = time()
#            MPI.COMM_WORLD.Barrier()
            computeresidualwitha(self.msk,self.A,x,b,r)
            t1 = time()
            self.time['res']+=t1-t0
            self.ncalls['res']+=1

#            MPI.COMM_WORLD.Barrier()
            t0 = time()
            self.time['barrier']+=t0-t1
            self.ncalls['barrier']+=1

            self.halo.fill(r)
            t1 = time()
            self.time['halo']+=t1-t0
            self.ncalls['halo']+=1
项目:Fluid2d    作者:pvthinker    | 项目源码 | 文件源码
def sum(self,x):
        """  sum(x) """
        t0 = time() 
#        MPI.COMM_WORLD.Barrier()
        nbduplicates = (self.np0*self.mp0)/(self.np*self.mp)
        local_sum = computesum(self.msk,x,self.nh)
        t1 = time()
        self.time['sum']+=t1-t0
        self.ncalls['sum']+=1
        global_sum=array(MPI.COMM_WORLD.allgather(local_sum))
        t0 = time()
        self.time['reduce']+=t0-t1
        self.ncalls['reduce']+=1

        return global_sum.sum() / nbduplicates

#----------------------------------------
项目:Fluid2d    作者:pvthinker    | 项目源码 | 文件源码
def coarsetofine(coarse,fine,x,y):
    """ input = x is on coarse / output = y is on fine """
    # these are function outside the object Grid
    # because it works on two different grids ...
    for k in range(coarse.nbcoarsetofine):
        if coarse.flag=='peak':
            t0 = time()  
            coarse.subdom.split(x,y) 
            t1 = time()
            coarse.time['split']+=t1-t0
            coarse.ncalls['split']+=1
        else:
            t0 = time()  
            interpolate(fine.msk,coarse.msk,x,fine.nh,y)
            t1 = time()
            fine.time['interpolate']+=t1-t0
            fine.ncalls['interpolate']+=1
            if coarse.nh<=2:
                fine.halo.fill(y)
                t0 = time() 
                fine.time['halo']+=t0-t1
                fine.ncalls['halo']+=1
            # else there is enough points in the halo to skip filling!
#        MPI.COMM_WORLD.Barrier()
项目:abcpy    作者:eth-cscs    | 项目源码 | 文件源码
def __init__(self, master_node_ranks=[0]):
        self.comm = MPI.COMM_WORLD
        self.size = self.comm.Get_size()
        self.rank = self.comm.Get_rank()

        if self.size < 2:
            raise ValueError('A minimum of 2 ranks are required for the MPI backend')


        #Set the global backend
        globals()['backend'] = self


        #Call the appropriate constructors and pass the required data
        if self.rank == 0:
            super().__init__(master_node_ranks)
        else:
            super().__init__()
            raise Exception("Slaves exitted main loop.")
项目:abcpy    作者:eth-cscs    | 项目源码 | 文件源码
def setUpModule():
    '''
    If an exception is raised in a setUpModule then none of 
    the tests in the module will be run. 

    This is useful because the slaves run in a while loop on initialization
    only responding to the master's commands and will never execute anything else.

    On termination of master, the slaves call quit() that raises a SystemExit(). 
    Because of the behaviour of setUpModule, it will not run any unit tests
    for the slave and we now only need to write unit-tests from the master's 
    point of view. 
    '''
    global rank,backend_mpi
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    backend_mpi = BackendMPI()
项目:CAAPR    作者:Stargrazer82301    | 项目源码 | 文件源码
def __init__(self):

        # Delayed ImportError of mpi4py
        if not HAS_MPI4PY:
         raise ImportError("No module named mpi4py, you must install mpi4py to use MPIMIgration!")

        super(MPIMigration, self).__init__()

        self.comm = MPI.COMM_WORLD
        self.pid = self.comm.rank

        if self.pid == 0:
         self.source = self.comm.size - 1
        else:
         self.source = self.comm.rank - 1

        self.dest = (self.comm.rank + 1) % (self.comm.size)

        self.all_stars = None

    # -----------------------------------------------------------------
项目:CAAPR    作者:Stargrazer82301    | 项目源码 | 文件源码
def __init__(self):

        # Delayed ImportError of mpi4py
        if not HAS_MPI4PY:
         raise ImportError("No module named mpi4py, you must install mpi4py to use MPIMIgration!")

        super(MPIMigration, self).__init__()

        self.comm = MPI.COMM_WORLD
        self.pid = self.comm.rank

        if self.pid == 0:
         self.source = self.comm.size - 1
        else:
         self.source = self.comm.rank - 1

        self.dest = (self.comm.rank + 1) % (self.comm.size)

        self.all_stars = None

    # -----------------------------------------------------------------
项目:DL4NMT_Theano    作者:fyabc    | 项目源码 | 文件源码
def all_reduce_params(sent_shared_params, rec_buffers, average_cnt = 1):
    from mpi4py import MPI
    mpi_communicator = MPI.COMM_WORLD
    commu_time = 0.0
    gpu2cpu_cp_time = 0.0
    for (sent_model, rec_model) in zip(sent_shared_params, rec_buffers):
        cp_start = time.time()
        model_val = sent_model.get_value()
        gpu2cpu_cp_time += time.time() - cp_start

        commu_start = time.time()
        mpi_communicator.Allreduce([model_val, MPI.FLOAT], [rec_model, MPI.FLOAT], op=MPI.SUM)
        commu_time += time.time() - commu_start
        if average_cnt != 1: #try to avoid dividing since it is very cost
            rec_model = rec_model / average_cnt

        cp_start = time.time()
        sent_model.set_value(rec_model)
        gpu2cpu_cp_time += time.time() - cp_start
    return commu_time,  gpu2cpu_cp_time
项目:eddylicious    作者:timofeymukha    | 项目源码 | 文件源码
def get_times(reader, readPath):
    """Read the time values associated with the precursor database."""

    # Grab the existing times and sort them
    if reader == "foamFile":
        dataDir = os.path.join(readPath, "postProcessing", "sampledSurface")
        times = os.listdir(dataDir)
        times = np.sort(times)
    elif reader == "hdf5":
        # Set the readPath to the file itself
        readPath = h5py.File(readPath, 'r', driver='mpio', comm=MPI.COMM_WORLD)
        times = readPath["velocity"]["times"][:]
        readPath.close()
    else:
        raise ValueError("Unknown reader: "+reader)

    return times
项目:eddylicious    作者:timofeymukha    | 项目源码 | 文件源码
def get_y_prec(reader, readPath):
    """Read the mean velocity profile of the precursor
       and the total number of points in the y direction.

    """
    if reader == "foamFile":
        uMeanTimes = os.listdir(os.path.join(readPath, "postProcessing",
                                             "collapsedFields"))
        y = np.genfromtxt(os.path.join(readPath, "postProcessing",
                                       "collapsedFields",
                                       uMeanTimes[-1],
                                       "UMean_X.xy"))[:, 0]
    elif reader == "hdf5":
        readPath = h5py.File(readPath, 'r', driver='mpio', comm=MPI.COMM_WORLD)
        y = readPath["points"]["pointsY"][:, 0]
        readPath.close()
    else:
        raise ValueError("Unknown reader: "+reader)

    return y
项目:cykdtree    作者:cykdtree    | 项目源码 | 文件源码
def test_consolidate(periodic=False, ndim=2):
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    size = comm.Get_size()
    pts, le, re, ls = make_points(20, ndim, leafsize=3)
    Tpara0 = cykdtree.PyParallelKDTree(pts, le, re, leafsize=ls,
                                       periodic=periodic)
    Tpara = Tpara0.consolidate()
    if rank == 0:
        if False:
            from cykdtree.plot import plot2D_serial
            plot2D_serial(Tpara, label_boxes=True,
                          plotfile='test_consolidate.png')
        Tseri = cykdtree.PyKDTree(pts, le, re, leafsize=ls,
                                  periodic=periodic)
        Tpara.assert_equal(Tseri, strict_idx=False)
    else:
        assert(Tpara is None)
项目:cykdtree    作者:cykdtree    | 项目源码 | 文件源码
def test_consolidate_edges(periodic=False, ndim=2):
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    size = comm.Get_size()
    pts, le, re, ls = make_points(20, ndim, leafsize=3)
    Tpara = cykdtree.PyParallelKDTree(pts, le, re, leafsize=ls,
                                      periodic=periodic)
    LEpara, REpara = Tpara.consolidate_edges()
    if rank == 0:
        Tseri = cykdtree.PyKDTree(pts, le, re, leafsize=ls,
                                  periodic=periodic)
        LEseri, REseri = Tseri.consolidate_edges()
    else:
        LEseri, REseri = None, None
    LEseri, REseri = comm.bcast((LEseri, REseri), root=0)
    np.testing.assert_allclose(LEpara, LEseri)
    np.testing.assert_allclose(REpara, REseri)
项目:cykdtree    作者:cykdtree    | 项目源码 | 文件源码
def test_parallel_distribute(ndim=2):
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    size = comm.Get_size()
    npts = 50
    if rank == 0:
        pts = np.random.rand(npts, ndim).astype('float64')
    else:
        pts = None
    total_pts = comm.bcast(pts, root=0)
    local_pts, local_idx = parallel_utils.py_parallel_distribute(pts)
    npts_local = npts/size
    if rank < (npts%size):
        npts_local += 1
    assert_equal(local_pts.shape, (npts_local, ndim))
    assert_equal(local_idx.shape, (npts_local, ))
    np.testing.assert_array_equal(total_pts[local_idx], local_pts)
项目:cykdtree    作者:cykdtree    | 项目源码 | 文件源码
def test_parallel_pivot_value(ndim=2, npts=50):
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    size = comm.Get_size()
    if rank == 0:
        pts = np.random.rand(npts, ndim).astype('float64')
    else:
        pts = None
    total_pts = comm.bcast(pts, root=0)
    local_pts, local_idx = parallel_utils.py_parallel_distribute(pts)
    pivot_dim = ndim-1

    piv = parallel_utils.py_parallel_pivot_value(local_pts, pivot_dim)

    nmax = (7*npts/10 + 6)
    assert(np.sum(total_pts[:, pivot_dim] < piv) <= nmax)
    assert(np.sum(total_pts[:, pivot_dim] > piv) <= nmax)

    # Not equivalent because each processes does not have multiple of 5 points
    # if rank == 0:
    #     pp, idx = utils.py_pivot(total_pts, pivot_dim)
    #     np.testing.assert_approx_equal(piv, total_pts[idx[pp], pivot_dim])
项目:cykdtree    作者:cykdtree    | 项目源码 | 文件源码
def test_calc_rounds():
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    size = comm.Get_size()
    # Get answers
    ans_nrounds = int(np.ceil(np.log2(size))) + 1
    ans_src_round = 0
    curr_rank = rank
    curr_size = size
    while curr_rank != 0:
        split_rank = parallel_utils.py_calc_split_rank(curr_size)
        if curr_rank < split_rank:
            curr_size = split_rank
            curr_rank = curr_rank
        else:
            curr_size = curr_size - split_rank
            curr_rank = curr_rank - split_rank
        ans_src_round += 1
    # Test
    nrounds, src_round = parallel_utils.py_calc_rounds()
    assert_equal(nrounds, ans_nrounds)
    assert_equal(src_round, ans_src_round)
项目:cykdtree    作者:cykdtree    | 项目源码 | 文件源码
def test_kdtree_parallel_distribute(ndim=2, npts=50):
    total_npts = npts
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    size = comm.Get_size()
    if rank == 0:
        total_pts = np.random.rand(total_npts, ndim).astype('float64')
    else:
        total_pts = None
    pts, idx, le, re, ple, pre = parallel_utils.py_kdtree_parallel_distribute(total_pts)
    total_pts = comm.bcast(total_pts, root=0)
    assert_equal(pts.shape[0], idx.size)
    np.testing.assert_array_equal(pts, total_pts[idx,:])
    for d in range(ndim):
        assert_less_equal(pts[:,d], re[d])
        assert_less_equal(le[d], pts[:,d])
项目:schwimmbad    作者:adrn    | 项目源码 | 文件源码
def __init__(self, comm=None):
        _import_mpi()

        if comm is None:
            comm = MPI.COMM_WORLD
        self.comm = comm

        self.master = 0
        self.rank = self.comm.Get_rank()
        self.workers = set(range(self.comm.size))
        self.workers.discard(self.master)

        self.size = self.comm.Get_size() - 1

        if self.size == 0:
            raise ValueError("Tried to create an MPI pool, but there "
                             "was only one MPI process available. "
                             "Need at least two.")
项目:rl-teacher    作者:nottombrown    | 项目源码 | 文件源码
def mpi_moments(x, axis=0):
    x = np.asarray(x, dtype='float64')
    newshape = list(x.shape)
    newshape.pop(axis)
    n = np.prod(newshape,dtype=int)
    totalvec = np.zeros(n*2+1, 'float64')
    addvec = np.concatenate([x.sum(axis=axis).ravel(), 
        np.square(x).sum(axis=axis).ravel(), 
        np.array([x.shape[axis]],dtype='float64')])
    MPI.COMM_WORLD.Allreduce(addvec, totalvec, op=MPI.SUM)
    sum = totalvec[:n]
    sumsq = totalvec[n:2*n]
    count = totalvec[2*n]
    if count == 0:
        mean = np.empty(newshape); mean[:] = np.nan
        std = np.empty(newshape); std[:] = np.nan
    else:
        mean = sum/count
        std = np.sqrt(np.maximum(sumsq/count - np.square(mean),0))
    return mean, std, count
项目:rl-teacher    作者:nottombrown    | 项目源码 | 文件源码
def test_runningmeanstd():
    comm = MPI.COMM_WORLD
    np.random.seed(0)
    for (triple,axis) in [
        ((np.random.randn(3), np.random.randn(4), np.random.randn(5)),0),
        ((np.random.randn(3,2), np.random.randn(4,2), np.random.randn(5,2)),0),
        ((np.random.randn(2,3), np.random.randn(2,4), np.random.randn(2,4)),1),
        ]:


        x = np.concatenate(triple, axis=axis)
        ms1 = [x.mean(axis=axis), x.std(axis=axis), x.shape[axis]]


        ms2 = mpi_moments(triple[comm.Get_rank()],axis=axis)

        for (a1,a2) in zipsame(ms1, ms2):
            print(a1, a2)
            assert np.allclose(a1, a2)
            print("ok!")
项目:chainer-pspnet    作者:mitmul    | 项目源码 | 文件源码
def create_result_dir(prefix='result'):
    comm = MPI.COMM_WORLD
    if comm.rank == 0:
        result_dir = 'results/{}_{}_0'.format(
            prefix, time.strftime('%Y-%m-%d_%H-%M-%S'))
        while os.path.exists(result_dir):
            i = result_dir.split('_')[-1]
            result_dir = re.sub('_[0-9]+$', result_dir, '_{}'.format(i))
        if not os.path.exists(result_dir):
            os.makedirs(result_dir)
    else:
        result_dir = None
    result_dir = comm.bcast(result_dir, root=0)
    if not os.path.exists(result_dir):
        os.makedirs(result_dir)
    return result_dir
项目:supremm    作者:ubccr    | 项目源码 | 文件源码
def main():
    """
    main entry point for script
    """

    comm = MPI.COMM_WORLD

    opts = getoptions(True)

    opts['threads'] = comm.Get_size()

    logout = "mpiOutput-{}.log".format(comm.Get_rank())

    # For MPI jobs, do something sane with logging.
    setuplogger(logging.ERROR, logout, opts['log'])

    config = Config()

    if comm.Get_size() < 2:
        logging.error("Must run MPI job with at least 2 processes")
        sys.exit(1)

    myhost = MPI.Get_processor_name()
    logging.info("Nodename: %s", myhost)

    processjobs(config, opts, comm.Get_rank(), comm)

    logging.info("Rank: %s FINISHED", comm.Get_rank())
项目:maggma    作者:materialsproject    | 项目源码 | 文件源码
def get_mpi():
    """
    Helper that returns the mpi communicator, rank and size.
    """
    from mpi4py import MPI

    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    size = comm.Get_size()

    return comm, rank, size
项目:planetplanet    作者:rodluger    | 项目源码 | 文件源码
def enabled():
        '''

        '''

        if MPI is not None:
            if MPI.COMM_WORLD.size > 1:
                return True
        return False
项目:baselines    作者:openai    | 项目源码 | 文件源码
def update(self, x):
        x = x.astype('float64')
        n = int(np.prod(self.shape))
        totalvec = np.zeros(n*2+1, 'float64')
        addvec = np.concatenate([x.sum(axis=0).ravel(), np.square(x).sum(axis=0).ravel(), np.array([len(x)],dtype='float64')])
        MPI.COMM_WORLD.Allreduce(addvec, totalvec, op=MPI.SUM)
        self.incfiltparams(totalvec[0:n].reshape(self.shape), totalvec[n:2*n].reshape(self.shape), totalvec[2*n])
项目:baselines    作者:openai    | 项目源码 | 文件源码
def test_dist():
    np.random.seed(0)
    p1,p2,p3=(np.random.randn(3,1), np.random.randn(4,1), np.random.randn(5,1))
    q1,q2,q3=(np.random.randn(6,1), np.random.randn(7,1), np.random.randn(8,1))

    # p1,p2,p3=(np.random.randn(3), np.random.randn(4), np.random.randn(5))
    # q1,q2,q3=(np.random.randn(6), np.random.randn(7), np.random.randn(8))

    comm = MPI.COMM_WORLD
    assert comm.Get_size()==2
    if comm.Get_rank()==0:
        x1,x2,x3 = p1,p2,p3
    elif comm.Get_rank()==1:
        x1,x2,x3 = q1,q2,q3
    else:
        assert False

    rms = RunningMeanStd(epsilon=0.0, shape=(1,))
    U.initialize()

    rms.update(x1)
    rms.update(x2)
    rms.update(x3)

    bigvec = np.concatenate([p1,p2,p3,q1,q2,q3])

    def checkallclose(x,y):
        print(x,y)
        return np.allclose(x,y)

    assert checkallclose(
        bigvec.mean(axis=0),
        U.eval(rms.mean)
    )
    assert checkallclose(
        bigvec.std(axis=0),
        U.eval(rms.std)
    )
项目:baselines    作者:openai    | 项目源码 | 文件源码
def __init__(self, var_list, *, beta1=0.9, beta2=0.999, epsilon=1e-08, scale_grad_by_procs=True, comm=None):
        self.var_list = var_list
        self.beta1 = beta1
        self.beta2 = beta2
        self.epsilon = epsilon
        self.scale_grad_by_procs = scale_grad_by_procs
        size = sum(U.numel(v) for v in var_list)
        self.m = np.zeros(size, 'float32')
        self.v = np.zeros(size, 'float32')
        self.t = 0
        self.setfromflat = U.SetFromFlat(var_list)
        self.getflat = U.GetFlat(var_list)
        self.comm = MPI.COMM_WORLD if comm is None else comm
项目:brainiak    作者:brainiak    | 项目源码 | 文件源码
def run(self, clf):
        """Run correlation-based voxel selection in master-worker model.

        Sort the voxels based on the cross-validation accuracy
        of their correlation vectors

        Parameters
        ----------
        clf: classification function
            the classifier to be used in cross validation

        Returns
        -------
        results: list of tuple (voxel_id, accuracy)
            the accuracy numbers of all voxels, in accuracy descending order
            the length of array equals the number of voxels
        """
        rank = MPI.COMM_WORLD.Get_rank()
        if rank == self.master_rank:
            results = self._master()
            # Sort the voxels
            results.sort(key=lambda tup: tup[1], reverse=True)
        else:
            self._worker(clf)
            results = []
        return results
项目:brainiak    作者:brainiak    | 项目源码 | 文件源码
def _worker(self, clf):
        """Worker node's operation.

        Receiving tasks from the master to process and sending the result back

        Parameters
        ----------
        clf: classification function
            the classifier to be used in cross validation

        Returns
        -------
        None
        """
        logger.debug(
            'worker %d is running, waiting for tasks from master at rank %d' %
            (MPI.COMM_WORLD.Get_rank(), self.master_rank)
        )
        comm = MPI.COMM_WORLD
        status = MPI.Status()
        while 1:
            task = comm.recv(source=self.master_rank,
                             tag=MPI.ANY_TAG,
                             status=status)
            if status.Get_tag():
                break
            comm.send(self._voxel_scoring(task, clf),
                      dest=self.master_rank)
项目:brainiak    作者:brainiak    | 项目源码 | 文件源码
def __init__(self, sl_rad=1, max_blk_edge=10, shape=Cube,
                 min_active_voxels_proportion=0):
        """Constructor

        Parameters
        ----------

        sl_rad: radius, in voxels, of the sphere inscribed in the
                   searchlight cube, not counting the center voxel

        max_blk_edge: max edge length, in voxels, of the 3D block

        shape: brainiak.searchlight.searchlight.Shape indicating the
        shape in voxels of the searchlight region

        min_active_voxels_proportion: float
            If a searchlight region does not have more than this minimum
            proportion of active voxels in the mask, it is not processed by the
            searchlight function. The mask used for the test is the
            intersection of the global (brain) mask and the `Shape` mask. The
            seed (central) voxel of the searchlight region is taken into
            consideration.
        """
        self.sl_rad = sl_rad
        self.max_blk_edge = max_blk_edge
        self.min_active_voxels_proportion = min_active_voxels_proportion
        self.comm = MPI.COMM_WORLD
        self.shape = shape(sl_rad).mask_
        self.bcast_var = None
项目:brainiak    作者:brainiak    | 项目源码 | 文件源码
def test_searchlight_with_cube():
    sl = Searchlight(sl_rad=3)
    comm = MPI.COMM_WORLD
    rank = comm.rank
    size = comm.size
    dim0, dim1, dim2 = (50, 50, 50)
    ntr = 30
    nsubj = 3
    mask = np.zeros((dim0, dim1, dim2), dtype=np.bool)
    data = [np.empty((dim0, dim1, dim2, ntr), dtype=np.object)
            if i % size == rank
            else None
            for i in range(0, nsubj)]

    # Put a spot in the mask
    mask[10:17, 10:17, 10:17] = True

    sl.distribute(data, mask)
    global_outputs = sl.run_searchlight(cube_sfn)

    if rank == 0:
        assert global_outputs[13, 13, 13] == 1.0
        global_outputs[13, 13, 13] = None

        for i in range(global_outputs.shape[0]):
            for j in range(global_outputs.shape[1]):
                for k in range(global_outputs.shape[2]):
                    assert global_outputs[i, j, k] is None
项目:brainiak    作者:brainiak    | 项目源码 | 文件源码
def test_searchlight_with_diamond():
    sl = Searchlight(sl_rad=3, shape=Diamond)
    comm = MPI.COMM_WORLD
    rank = comm.rank
    size = comm.size
    dim0, dim1, dim2 = (50, 50, 50)
    ntr = 30
    nsubj = 3
    mask = np.zeros((dim0, dim1, dim2), dtype=np.bool)
    data = [np.empty((dim0, dim1, dim2, ntr), dtype=np.object)
            if i % size == rank
            else None
            for i in range(0, nsubj)]

    # Put a spot in the mask
    mask[10:17, 10:17, 10:17] = Diamond(3).mask_

    sl.distribute(data, mask)
    global_outputs = sl.run_searchlight(diamond_sfn)

    if rank == 0:
        assert global_outputs[13, 13, 13] == 1.0
        global_outputs[13, 13, 13] = None

        for i in range(global_outputs.shape[0]):
            for j in range(global_outputs.shape[1]):
                for k in range(global_outputs.shape[2]):
                    assert global_outputs[i, j, k] is None
项目:veros    作者:dionhaefner    | 项目源码 | 文件源码
def __init__(self, fortran=None, *args, **kwargs):
        """
        To use the pyOM2 legacy interface point the fortran argument to the Veros fortran library:

        > simulation = GlobalOneDegree(fortran = "pyOM_code.so")

        """
        if fortran:
            self.legacy_mode = True
            try:
                self.fortran = LowercaseAttributeWrapper(imp.load_dynamic("pyOM_code", fortran))
                self.use_mpi = False
            except ImportError:
                self.fortran = LowercaseAttributeWrapper(imp.load_dynamic("pyOM_code_MPI", fortran))
                self.use_mpi = True
                from mpi4py import MPI
                self.mpi_comm = MPI.COMM_WORLD
            self.main_module = LowercaseAttributeWrapper(self.fortran.main_module)
            self.isoneutral_module = LowercaseAttributeWrapper(self.fortran.isoneutral_module)
            self.idemix_module = LowercaseAttributeWrapper(self.fortran.idemix_module)
            self.tke_module = LowercaseAttributeWrapper(self.fortran.tke_module)
            self.eke_module = LowercaseAttributeWrapper(self.fortran.eke_module)
        else:
            self.legacy_mode = False
            self.use_mpi = False
            self.fortran = self
            self.main_module = self
            self.isoneutral_module = self
            self.idemix_module = self
            self.tke_module = self
            self.eke_module = self
        self.modules = (self.main_module, self.isoneutral_module, self.idemix_module,
                        self.tke_module, self.eke_module)

        if self.use_mpi and self.mpi_comm.Get_rank() != 0:
            kwargs["loglevel"] = "critical"
        super(VerosLegacy, self).__init__(*args, **kwargs)
项目:uncover-ml    作者:GeoscienceAustralia    | 项目源码 | 文件源码
def main(tiff, outfile, overlayname):
    """
    Turn a geotiff into a KMZ that can be dragged onto an instance of Terria
    Map. This also constructs a JPEG of the Geotiff, as it is required for the
    KMZ.
    """

    # MPI globals
    comm = MPI.COMM_WORLD
    chunk_index = comm.Get_rank()
    # This runs on the root node only
    if chunk_index != 0:
        return

    # Get tiff info
    I = geoio.Image(tiff)

    # Save tiff as jpeg
    if outfile is not None:
        outfile = os.path.splitext(outfile)[0]
    else:
        outfile = os.path.splitext(tiff)[0]
    jpg = outfile + ".jpg"

    # Convert tiff to jpg
    Im = Image.open(tiff)
    Im.save(jpg)

    # Construct KMZ
    kml = simplekml.Kml()
    if overlayname is None:
        overlayname = os.path.basename(outfile)
    ground = kml.newgroundoverlay(name=overlayname)
    ground.icon.href = jpg
    ground.latlonbox.west = I.xmin
    ground.latlonbox.east = I.xmax
    ground.latlonbox.north = I.ymax
    ground.latlonbox.south = I.ymin

    kml.savekmz("{}.kmz".format(outfile))
项目:uncover-ml    作者:GeoscienceAustralia    | 项目源码 | 文件源码
def convert_files(files, output_dir, opt, mask_file):
    comm = MPI.COMM_WORLD
    rank = comm.rank
    size = comm.size

    if mask_file:
        # temporary cropped mask file
        cropped_mask_file = tempfile.mktemp(suffix='.tif', dir=TMPDIR)

        # crop/reproject/resample the mask
        crop_reproject_resample(opt.mask_file,
                                cropped_mask_file,
                                sampling='near',
                                extents=opt.extents,
                                reproject=opt.reproject)
    else:
        cropped_mask_file = opt.mask_file

    for i in range(rank, len(files), size):
        in_file = files[i]
        print('============file no: {} of {}==========='.format(i, len(files)))
        log.info("operating on {file} in process {rank}".format(file=in_file,
                                                                rank=rank))
        out_file = join(output_dir, basename(in_file))
        log.info('Output file: {}'.format(out_file))
        do_work(input_file=in_file,
                mask_file=cropped_mask_file,
                output_file=out_file,
                options=options)

    if mask_file:
        os.remove(cropped_mask_file)
        log.info('removed intermediate cropped '
                 'mask file {}'.format(cropped_mask_file))

    comm.Barrier()


# TODO: use click here
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def write_server_info(filename, port):
    pid = os.getpid()
    rank = MPI.COMM_WORLD.Get_rank()
    server_info = '{}:{}:{}:{}:{}'.format(LINE_TOKEN, rank, pid, port, LINE_TOKEN).strip()
    logger.debug("write_server_info: line %s, filename %s", server_info, filename)

    time.sleep(0.1 * rank)
    with open(filename, "a") as f:
        fcntl.lockf(f, fcntl.LOCK_EX)
        f.write(server_info + '\n')
        f.flush()
        os.fsync(f.fileno())
        fcntl.lockf(f, fcntl.LOCK_UN)
    return server_info
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def serve():
    parser = argparse.ArgumentParser()
    parser.add_argument("-tf", "--tmpfile", nargs=1)
    parser.add_argument("-p", "--ports", nargs='+')
    args = parser.parse_args()
    comm = MPI.COMM_WORLD

    options = [('grpc.max_send_message_length', -1), ('grpc.max_receive_message_length', -1)]
    server = grpc.server(futures.ThreadPoolExecutor(max_workers=1), options=options)
    hetr_pb2_grpc.add_HetrServicer_to_server(HetrServer(comm, server), server)
    logger.debug("server: rank %d, tmpfile %s, ports %s",
                 comm.Get_rank(), args.tmpfile[0], args.ports if args.ports is not None else "")

    if args.ports is not None and len(args.ports) > comm.Get_rank():
        p = args.ports[comm.Get_rank()]
        if is_port_open(p):
            port = server.add_insecure_port('[::]:' + p)
        else:
            raise RuntimeError("port %s is already in use!", p)
    else:
        port = server.add_insecure_port('[::]:0')

    server.start()
    write_server_info(args.tmpfile[0], port)

    try:
        while True:
            time.sleep(_ONE_DAY_IN_SECONDS)
    except KeyboardInterrupt:
        server.stop(0)
项目:nanopores    作者:mitschabaude    | 项目源码 | 文件源码
def __init__(self, target=None, comm=None, debug=False, loadbalance=False):
        self.comm = MPI.COMM_WORLD if comm is None else comm
        self.rank = self.comm.Get_rank()
        self.size = self.comm.Get_size() - 1
        self.debug = debug
        self.function = _error_function if target is None else target
        self.loadbalance = loadbalance
        if self.size == 0:
            raise ValueError("Tried to create an MPI pool, but there "
                             "was only one MPI process available. "
                             "Need at least two.")
项目:Theano-MPI    作者:uoguelph-mlrg    | 项目源码 | 文件源码
def get_internode_comm(self):

        self.comm=MPI.COMM_WORLD

        self.rank=self.comm.rank
        self.size=self.comm.size
项目:Theano-MPI    作者:uoguelph-mlrg    | 项目源码 | 文件源码
def get_internode_comm():

    from mpi4py import MPI
    comm=MPI.COMM_WORLD

    return comm

# intra-node comm
项目:Theano-MPI    作者:uoguelph-mlrg    | 项目源码 | 文件源码
def get_internode_comm():

    from mpi4py import MPI
    comm=MPI.COMM_WORLD

    return comm

# intra-node comm
项目:gaft    作者:PytLab    | 项目源码 | 文件源码
def bcast(self, data):
        if MPI_INSTALLED:
            mpi_comm = MPI.COMM_WORLD
            bdata = mpi_comm.bcast(data, root=0)
        else:
            bdata = data
        return bdata

    # Wrapper for common MPI interfaces.
项目:gaft    作者:PytLab    | 项目源码 | 文件源码
def barrier(self):
        if MPI_INSTALLED:
            mpi_comm = MPI.COMM_WORLD
            mpi_comm.barrier()
项目:gaft    作者:PytLab    | 项目源码 | 文件源码
def rank(self):
        if MPI_INSTALLED:
            mpi_comm = MPI.COMM_WORLD
            return mpi_comm.Get_rank()
        else:
            return 0
项目:gaft    作者:PytLab    | 项目源码 | 文件源码
def size(self):
        if MPI_INSTALLED:
            mpi_comm = MPI.COMM_WORLD
            return mpi_comm.Get_size()
        else:
            return 1