2016-04-30 54 views
1

晚安正確的方法來創建鬼區MPI [暈]

我正在參加並行編程課程。老師給了我們一個涉及模板計算域分區的作業。對於這種類型的計算(有限差分),並行化代碼的最常見方式是對域進行分區並創建一些鬼區域(光暈)。

爲了更好地理解在MPI中創建重影區域,我編程了這個簡單的例子,它初始化了一些內部值爲123和邊界值爲88的數組。在所有通信結束時,所有重影值都應該保持爲8.在一個節點我得到123個值。

串行(沒有鬼):

123 - 123 - ... - 123 - 123 

兩個分區:

123 - 123 - ... - 88 ||| 88 - ... - 123 - 123 

三分區:

123 - 123 - ... - 88 ||| 88 - ... - 123 - 123 - 88 ||| 88 - ... - 123 - 123 
從這個bug

除此之外,這裏的主要問題是如何正確方法來創建和維護更新的幽靈區域。是否有此除了我凌亂的,如果(身份識別碼== ....否則,如果(身份識別碼= ...其他類型的實現?怎麼人們通常實現這種並行的?

#include<mpi.h> 
#include<stdio.h> 
#include<stdlib.h> 

int WhichSize(int mpiId, int numProc, int tam); 

int main(int argc, char *argv[]){ 

    int i; 
    int localSize; 
    int numProc; 
    int myid; 

    int leftProc; 
    int rightProc; 

    int * myArray; 
    int fullDomainSize = 16; 

    MPI_Request request; 

    MPI_Init(&argc, &argv); 
    MPI_Comm_size(MPI_COMM_WORLD, &numProc); 
    MPI_Comm_rank(MPI_COMM_WORLD, &myid); 


    // Lets get each partition size. 
    localSize = WhichSize(myid, numProc, fullDomainSize); 


    // Allocate arrays acording to proc number. 
    if(numProc == 1){ 

     //printf("Allocating Array for serial usage\n"); 
     myArray = (int*)malloc(localSize*sizeof(int)); 

    } else if(numProc == 2) { 

     //printf("Allocating Array for 2 proc usage\n"); 
     myArray = (int*)malloc((localSize+ 1)*sizeof(int)); 

    } else if(numProc > 2) { 

     if (myid == 0 || myid == numProc - 1){ 

      //printf("Allocating array for boundary nodes usage\n"); 
      myArray = (int*)malloc((localSize+ 1)*sizeof(int)); 

     } else { 

      //printf("Allocating array for inner nodes usage\n"); 
      myArray = (int*)malloc((localSize+ 2)*sizeof(int)); 

     } 

    } 


    // Now we will fill the arrays with a dummy value 123. For the 
    // boundaries (ghosts) we will fill than with 80. Just to differe 
    // ntiate. 

    if(numProc == 1){ 

     //printf("----------------------------------------\n"); 
     //printf("Filling the serial array with values... \n"); 

     for (i = 0; i<localSize; i++){ 
      myArray[i] = 123; 
     } 

    } else if(numProc == 2) { 

     ////printf("------------------------------------------------\n"); 
     //printf("Filling array for two proc usage with values... \n"); 

     for (i = 0; i<localSize; i++){ 
      myArray[i] = 123; 
     } 

     // ghost. 
     myArray[localSize+1] = 8; 

    } else if(numProc > 2) { 

     if (myid == 0 || myid == numProc - 1){ 

      //printf("--------------------------------------------------\n"); 
      //printf("Filling boundary node arrays usage with values... \n"); 

      for (i = 0; i<localSize; i++){ 
       myArray[i] = 123; 
      } 

      // ghosts. 
      myArray[localSize+1] = 8; 

     } else { 

      //printf("--------------------------------------------------\n"); 
      //printf("Filling inner node arrays usage with values... \n"); 

      for (i = 0; i<localSize; i++){ 
       myArray[i] = 123; 
      } 

      // ghosts. 
      myArray[localSize+1] = 8; 
      myArray[0] = 8; 

     } 

    } 


    // Now lets comunicate the ghosts with MPI_Sendrecv(). 

    if(numProc == 1){ 

     //printf("Serial usage, no ghost to comunicate \n"); 

    } else if(numProc == 2) { 

     if (myid == 0){ 

      //printf("Sending ghost value from proc %d to %d\n", myid, myid + 1); 
      MPI_Isend(&myArray[localSize+1], 
         1, 
         MPI_INT, 
         1, 
         12345, 
         MPI_COMM_WORLD, 
         &request); 

     } else if (myid == 1) { 

      //printf("Receiving ghost value from proc %d to %d\n", myid-1, myid); 
      MPI_Irecv(&myArray[localSize+1], 
         1, 
         MPI_INT, 
         0, 
         12345, 
         MPI_COMM_WORLD, 
         &request); 
     } 


    } else if(numProc > 2) { 

     if (myid == 0){ 

      rightProc = myid + 1; 

      if (myid == 0){ 

       //printf("-------------------------------\n"); 
       //printf("Communicating Boundary ghosts !\n"); 
       //printf("-------------------------------\n"); 

       //printf("Sending ghost value from proc %d to %d\n", myid, myid + 1); 
       MPI_Isend(&myArray[localSize+1], 
          1, 
          MPI_INT, 
          rightProc, 
          12345, 
          MPI_COMM_WORLD, 
          &request); 

      } else if (myid == rightProc) { 

       //printf("Receiving ghost value from proc %d to %d\n", myid-1, myid); 
       MPI_Irecv(&myArray[localSize+1], 
          1, 
          MPI_INT, 
          0, 
          12345, 
          MPI_COMM_WORLD, 
          &request); 
      } 

     } else if (myid == numProc - 1) { 

      leftProc = myid - 1; 

      if (myid == numProc - 1){ 

       //printf("-------------------------------\n"); 
       //printf("Communicating Boundary ghosts !\n"); 
       //printf("-------------------------------\n"); 

       ////printf("Sending ghost value from proc %d to %d\n", myid, myid + 1); 
       MPI_Isend(&myArray[localSize+1], 
          1, 
          MPI_INT, 
          leftProc, 
          12345, 
          MPI_COMM_WORLD, 
          &request); 

      } else if (myid == leftProc) { 

       rightProc = myid + 1; 

       //printf("Receiving ghost value from proc %d to %d\n", myid-1, myid); 
       MPI_Irecv(&myArray[localSize+1], 
          1, 
          MPI_INT, 
          rightProc, 
          12345, 
          MPI_COMM_WORLD, 
          &request); 
      } 

     } else { 

       //printf("-------------------------------\n"); 
       //printf("Communicating Inner ghosts baby\n"); 
       //printf("-------------------------------\n"); 

       leftProc = myid - 1; 
       rightProc = myid + 1; 

       // Communicate tail ghost. 
       if (myid == leftProc) { 
        MPI_Isend(&myArray[localSize+1], 
           1, 
           MPI_INT, 
           rightProc, 
           12345, 
           MPI_COMM_WORLD, 
           &request); 

       } else if (myid == rightProc){ 
        MPI_Irecv(&myArray[localSize+1], 
           1, 
           MPI_INT, 
           leftProc, 
           12345, 
           MPI_COMM_WORLD, 
           &request); 
       } 

       // Communicate head ghost. 
       if (myid == leftProc) { 
        MPI_Isend(&myArray[0], 
           1, 
           MPI_INT, 
           rightProc, 
           12345, 
           MPI_COMM_WORLD, 
           &request); 

       } else if (myid == rightProc){ 
        MPI_Irecv(&myArray[0], 
           1, 
           MPI_INT, 
           leftProc, 
           12345, 
           MPI_COMM_WORLD, 
           &request); 
       } 
     } 
    } 


    // Now I Want to see if the ghosts are in place !. 

    if (myid == 0){ 
     printf("The ghost value is: %d\n", myArray[localSize + 1]); 
    } else if (myid == numProc - 1){ 
     printf("The ghost value is: %d\n", myArray[0]); 
    } else { 
     printf("The head ghost is: %d\n", myArray[0]); 
     printf("The tail ghost is: %d\n", myArray[localSize + 1]); 
    } 


    MPI_Finalize(); 

    exit(0); 
} 

int WhichSize(int mpiId, int numProc, int tam){ 

    double resto; 
    int tamLocal; 

    tamLocal = tam/numProc; 

    resto = tam - tamLocal*numProc; 

    if (mpiId < resto) tamLocal = tamLocal + 1; 


    return tamLocal; 
} 

感謝一個清潔的解決方案你們!

回答

5

暈可以在MPI使用笛卡爾虛擬拓撲結構和發送接收操作典雅的實現。

首先,其在有條件的經營者很多取決於秩的邏輯使得代碼難以閱讀和理解,當代碼是對稱的時候,比如當所有的隊伍執行相同的代碼時,情況會好得多使用MPI_PROC_NULL空位 - 發送或接收來自該排名的結果爲空操作。因此,足夠做:

// Compute the rank of the left neighbour 
leftProc = myid - 1; 
if (leftProc < 0) leftProc = MPI_PROC_NULL; 
// Compute the rank of the right neighbour 
rightProc = myid + 1; 
if (rightProc >= numProc) rightProc = MPI_PROC_NULL; 

// Halo exchange in forward direction 
MPI_Sendrecv(&myArray[localSize], 1, MPI_INT, rightProc, 0, // send last element to the right 
      &myArray[0], 1, MPI_INT, leftProc, 0,   // receive into left halo 
      MPI_COMM_WORLD); 
// Halo exchange in reverse direction 
MPI_Sendrecv(&myArray[1], 1, MPI_INT, leftProc, 0,   // send first element to the left 
      &myArray[localSize+1], 1, MPI_INT, rightProc, 0, // receive into right halo 
      MPI_COMM_WORLD); 

該代碼適用於任何級別,即使是對那些在兩端 - 有源或目標是零級,並在相應的方向沒有發生實際轉移。它也適用於任何數量的MPI流程,從一個到多個。它要求所有的隊伍都有雙方的光環,包括那些並不需要它的人(兩個角落隊伍)。人們可以在這些虛擬光暈中存儲有用的東西,例如邊界值(例如,在解決偏微分方程時)或簡單地處理內存浪費,這通常可以忽略不計。

在您的代碼中,您使用了不正確的非阻塞操作。這些都很棘手,需要小心謹慎。可以並應該使用MPI_Sendrecv。它同時執行發送和接收操作,從而防止死鎖(只要每次發送有匹配的接收)。

如果域是週期性的,那麼排序計算邏輯就變得簡單:

// Compute the rank of the left neighbour 
leftProc = (myid - 1 + numProc) % numProc; 
// Compute the rank of the right neighbour 
rightProc = (myid + 1) % numProc; 

而不是做算術的,一個可以創建一個笛卡爾虛擬拓撲結構,然後用MPI_Cart_shift找到這兩個鄰國的行列:

// Create a non-periodic 1-D Cartesian topology 
int dims[1] = { numProc }; 
int periods[1] = { 0 }; // 0 - non-periodic, 1 - periodic 
MPI_Comm cart_comm; 
MPI_Cart_create(MPI_COMM_WORLD, 1, dims, periods, 1, &cart_comm); 

// Find the two neighbours 
MPI_Cart_shift(cart_comm, 0, 1, &leftProc, &rightProc); 

的光環交易所代碼保持唯一的區別同樣是cart_comm應更換MPI_COMM_WORLDMPI_Cart_shift會自動處理角落案例,並在適當時返回MPI_PROC_NULL。該方法的優點是,只需簡單地翻轉periods[]數組中的值,就可以輕鬆地在非週期域和週期域之間切換。

光環必須根據需要進行更新,這取決於算法。對於大多數迭代方案,更新必須在每次迭代開始時進行。人們可以通過引入多層光環來降低通信頻率,並使用外層的值來計算內層的值。

最後,您main功能可以減少到(不使用笛卡爾拓撲):

int main(int argc, char *argv[]){ 

    int i; 
    int localSize; 
    int numProc; 
    int myid; 

    int leftProc; 
    int rightProc; 

    int * myArray; 
    int fullDomainSize = 16; 

    MPI_Init(&argc, &argv); 
    MPI_Comm_size(MPI_COMM_WORLD, &numProc); 
    MPI_Comm_rank(MPI_COMM_WORLD, &myid); 

    // Compute neighbouring ranks 
    rightProc = myid + 1; 
    if (rightProc >= numProc) rightProc = MPI_PROC_NULL; 
    leftProc = myid - 1; 
    if (leftProc < 0) leftProc = MPI_PROC_NULL; 

    // Lets get each partition size. 
    localSize = WhichSize(myid, numProc, fullDomainSize); 

    // Allocate arrays. 
    myArray = (int*)malloc((localSize+ 2)*sizeof(int)); 

    // Now we will fill the arrays with a dummy value 123. For the 
    // boundaries (ghosts) we will fill than with 80. Just to differe 
    // ntiate. 

    //printf("--------------------------------------------------\n"); 
    //printf("Filling node arrays usage with values... \n"); 

    for (i = 1; i<localSize; i++){ 
     myArray[i] = 123; 
    } 

    // ghosts. 
    myArray[localSize+1] = 8; 
    myArray[0] = 8; 

    //printf("-------------------------------\n"); 
    //printf("Communicating Boundary ghosts !\n"); 
    //printf("-------------------------------\n"); 

    //printf("Sending ghost value to the right\n"); 
    MPI_Sendrecv(&myArray[localSize], 1, MPI_INT, rightProc, 12345, 
       &myArray[0], 1, MPI_INT, leftProc, 12345, 
       MPI_COMM_WORLD); 

    //printf("Sending ghost value to the left\n"); 
    MPI_Sendrecv(&myArray[1], 1, MPI_INT, leftProc, 12345, 
       &myArray[localSize+1], 1, MPI_INT, rightProc, 12345, 
       MPI_COMM_WORLD); 

    // Now I Want to see if the ghosts are in place !. 

    printf("[%d] The head ghost is: %d\n", myid, myArray[0]); 
    printf("[%d] The tail ghost is: %d\n", myid, myArray[localSize + 1]); 

    MPI_Finalize(); 

    return 0; 
} 
+0

謝謝您的回覆! –