hello.py
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
print("hello world from process ", rank)
C:> mpiexec -n 5 python helloWorld_MPI.py
jieguo
('hello world from process ', 1)
('hello world from process ', 0)
('hello world from process ', 2)
('hello world from process ', 3)
('hello world from process ', 4)
2、进程间通信
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.rank
print("my rank is : " , rank)
if rank == 0:
data = 10000000
destination_process = 4
comm.send(data,dest=destination_process)
print("sending data % s " % data + "to process % d" % destination_process)
if rank == 1:
destination_process = 8
data = "hello"
comm.send(data,dest=destination_process)
print("sending data % s :" % data + "to process % d" % destination_process)
if rank == 4:
data = comm.recv(source = 0)
print("data received is = % s" % data)
if rank == 8:
data1 = comm.recv(source = 1)
print("data1 received is = % s" % data1)
mpiexec -n 9 python pointToPointCommunication.py
3、sendrecv 避免死锁
if rank==1:
data_send= "a"
destination_process = 5
source_process = 5
data_received=comm.sendrecv(data_send,dest=destination_process,source =source_process)
if rank==5:
data_send= "b"
destination_process = 1
source_process = 1
data_received=comm.sendrecv(data_send,dest=destination_process, source=source_process)
4、共享变量 bcast
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
if rank == 0:
variable_to_share = 100
else:
variable_to_share = None
variable_to_share = comm.bcast(variable_to_share, root=0)
print("process = %d" %rank + " variable shared = %d " %variable_to_share)
C:>mpiexec -n 10 python broadcast.py
process = 0 variable shared = 100
process = 8 variable shared = 100
process = 2 variable shared = 100
process = 3 variable shared = 100
process = 4 variable shared = 100
process = 5 variable shared = 100
process = 9 variable shared = 100
process = 6 variable shared = 100
process = 1 variable shared = 100
process = 7 variable shared = 100
5、scatter 发送不同数据
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
if rank == 0:
array_to_share = [1, 2, 3, 4 ,5 ,6 ,7, 8 ,9 ,10]
else:
array_to_share = None
recvbuf = comm.scatter(array_to_share, root=0)
print("process = %d" %rank + " recvbuf = %d " %recvbuf)
C:>mpiexec -n 10 python scatter.py
process = 0 variable shared = 1
process = 4 variable shared = 5
process = 6 variable shared = 7
process = 2 variable shared = 3
process = 5 variable shared = 6
process = 3 variable shared = 4
process = 7 variable shared = 8
process = 1 variable shared = 2
process = 8 variable shared = 9
process = 9 variable shared = 10
6、gather反向scatter
from mpi4py import MPI
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
data = (rank+1)2
data = comm.gather(data, root=0)
if rank == 0:
print ("rank = %s " %rank + "...receiving data to other process")
for i in range(1, size):
data[i] = (i+1)2
value = data[i]
print(" process %s receiving %s from process %s" % (rank , value , i))
C:>mpiexec -n 5 python gather.py
rank = 0 ...receiving data to other process
process 0 receiving 4 from process 1
process 0 receiving 9 from process 2
process 0 receiving 16 from process 3
process 0 receiving 25 from process 4
7\ alltoall
from mpi4py import MPI
import numpy
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
a_size = 1
senddata = (rank+1)numpy.arange(size,dtype=int)
recvdata = numpy.empty(sizea_size,dtype=int)
comm.Alltoall(senddata,recvdata)
print(" process %s sending %s receiving %s" % (rank , senddata , recvdata))
C:>mpiexec -n 5 python alltoall.py
process 0 sending [0 1 2 3 4] receiving [0 0 0 0 0]
process 1 sending [0 2 4 6 8] receiving [1 2 3 4 5]
process 2 sending [0 3 6 9 12] receiving [2 4 6 8 10]
process 3 sending [0 4 8 12 16] receiving [3 6 9 12 15]
process 4 sending [0 5 10 15 20] receiving [4 8 12 16 20]
8\comm.reduce:
comm.Reduce(sendbuf, recvbuf, rank_of_root_process, op =type_of_reduction_operation)
//
MPI.MAX : 返回最大的元素
MPI.MIN : 返回最小的元素
MPI.SUM : 对所有元素相加
MPI.PROD : 对所有元素相乘
MPI.LAND : 对所有元素进行逻辑操作
MPI.MAXLOC : 返回最大值,以及拥有它的进程
MPI.MINLOC : 返回最小值,以及拥有它的进程
import numpy
import numpy as np
from mpi4py import MPI
comm = MPI.COMM_WORLD
size = comm.size
rank = comm.rank
array_size = 3
recvdata = numpy.zeros(array_size, dtype=numpy.int)
senddata = (rank+1)*numpy.arange(size,dtype=numpy.int)
print("process %s sending %s " % (rank , senddata))
comm.Reduce(senddata, recvdata, root=0, op=MPI.SUM)
print('on task', rank, 'after Reduce: data = ', recvdata)
C:>mpiexec -n 3 python reduction2.py
process 2 sending [0 3 6]
on task 2 after Reduce: data = [0 0 0]
process 1 sending [0 2 4]
on task 1 after Reduce: data = [0 0 0]
process 0 sending [0 1 2]
on task 0 after Reduce: data = [ 0 6 12]
9\comm.Create_cart???
https://python-parallel-programmning-cookbook.readthedocs.io/zh_CN/latest/chapter3/19_How_to_optimize_communication.html