我有以下代碼來計算總理的數量,我已經將循環中的工作劃分爲跨處理器。問題是,當子例程將循環的子部分分配給按等級分配的處理器,並且我似乎無法控制它們到達的順序如何在MPI中的進程之間劃分工作
即,我想隨後來到0,1,2 ,3 ...而不是像2,1,0,3 ..
因此,如果在循環和5個處理器中有500次迭代。
- 序號爲0的執行[1 - 100]
等級1個執行[101-200]等...
program main implicit none include 'mpif.h' integer(4), parameter :: n = 36500 integer(4) :: a(n) integer(4) :: i integer(4) :: j integer(4) :: ista integer(4) :: iend integer(4) :: sum integer(4) :: f=0 integer(4) :: ssum integer(4) :: ierr integer(4) :: iproc integer(4) :: nproc call MPI_INIT(ierr) call MPI_COMM_SIZE(MPI_COMM_WORLD, nproc, ierr) call MPI_COMM_RANK(MPI_COMM_WORLD, iproc, ierr) call loop_range(2, n, nproc, iproc, ista, iend) sum = 0.0 print *,ista,"-",iend,">",iproc do i = ista, iend f=0 do j=2,INT(SQRT(REAL(i))) if(MOD(i,j)==0) then f=1 end if end do if(f==0) then sum = sum + 1 end if end do call MPI_REDUCE(sum, ssum, 1, MPI_INTEGER,MPI_SUM, 0,MPI_COMM_WORLD, ierr) if (iproc == 0) write(6,*)'Total No of primes=', ssum call MPI_FINALIZE(ierr) end program main subroutine para_range(n1, n2, nprocs, irank, ista, iend) integer(4) :: n1 ! Lowest value of iteration variable integer(4) :: n2 ! Highest value of iteration variable integer(4) :: nprocs ! No of Cores/Processors you want to use integer(4) :: irank ! Process rank integer(4) :: ista ! Start of iterations for rank iproc integer(4) :: iend ! End of iterations for rank iproc integer(4) :: iwork1, iwork2 print *,irank iwork1 = (n2 - n1 + 1)/nprocs iwork2 = MOD(n2 - n1 + 1, nprocs) ista = irank * iwork1 + n1 + MIN(irank, iwork2) iend = ista + iwork1 - 1 if (iwork2 > irank) then iend = iend + 1 end if end subroutine para_range
我使用開放MPI。
可能重複[開放MPI的隊伍是不是爲了(http://stackoverflow.com/questions/20633008/open-mpi-ranks-是 - 不按順序) –