MPI jacobi iterator (c++ version)

Original link

Two-hour introduction to MPI and parallel computing (5): Peer-to-peer mode (parallel computing to achieve Jacobi iteration) – Zhihu

Because the original text only has Fortran, and the code in the comment link does not consider that C++ is the row major storage format, the corresponding C++ code is provided here.

The corresponding 2D case in the original text

In order to correspond to the original text here, only “mysize” processors can be used for calculations during runtime, that is, when mysize=4, run

mpirun -n 4 ./jacobi

Otherwise, printf(“Please use %d processors!\
“, mysize); will be prompted.

In addition, in order to better understand the process of mpi initialization, I added the variable of calcu_jacobi. If I comment it out, the operation of jacobi iteration will not be performed, but the result of initialization will be output directly.

/* ********************************************* ***************************
> File Name: jacobi.cpp
> Author: Roy
> Created Time: Tuesday, October 17, 2023 AM11:27:18
> Description:
 *************************************************** **********************/
#include "mpi.h"
#include <iostream>
#include <iomanip>
#include <stdio.h>

#define calcu_jacobi // A flag whether calculate jacobi iiterations or not
int main(int argc, char *argv[])
{
    int steps = 10; //Number of iterations
    const int Acolsize = 16; //Number of colomns
    const int mysize = 4; //Number of processors
    const int Arowsize = mysize + 2; //2 additional rows for buffer
    double A[Arowsize][Acolsize] = {0};// processors' data matrix
    double B[Arowsize][Acolsize] = {0};// processors' buffer matrix
    int begin_row = 1, end_row = Arowsize - 2; // Indices for begin col and end col

    int myid = 0, numprocs = 0; //MPI rank and total numprocessors, but here we have already set it 4
    int up = 0, down = 0; //rank ID for up and down processors
    int tag1 = 3, tag2 = 4; //MPI send and recv tags
    MPI_Status status;
    MPI_Init( & amp;argc, & amp;argv);
    MPI_Comm_rank(
            MPI_COMM_WORLD,
             &myid
            );
    MPI_Comm_size(
            MPI_COMM_WORLD,
             &numprocs
            );
    if (numprocs!=mysize){

        printf("Please use %d processors!\
", mysize);
        MPI_Finalize();
        return 0;
    }
    printf("Process %d of %d is alive\
", myid, numprocs); // print MPI proc information

    // assign values to A so that the gathered matrix are all 0 except for boundaries (assign to 8.0)
    for (int i = 0; i < Arowsize; i + + ){

        for (int j = 0; j < Acolsize; j + + ){
            A[i][j] = 0.0;
        }
        A[i][0] = 8.0;
        A[i][Acolsize-1] = 8.0;
    }

    // Assume rank 0 is the uppermost part and 3 is the lowest part of the gathered matrix
    // proc0 additional assignment
    if (myid == 0){
        for (int j = 0; j < Acolsize; j + + ){
            A[1][j] = 8.0;
        }
    }
    // proc3 additional assignment
    if (myid == 3){
        for (int j = 0; j < Acolsize; j + + ){
            A[Arowsize-2][j] = 8.0;
        }
    }
#ifdef calcu_jacobi
    // Calculate nearby ranks
    // 0 is the uppermost and 3 is the lowest
    if (myid > 0){
        up = myid - 1;
    }
    else
        up = MPI_PROC_NULL;
    if (myid < 3){
        down = myid + 1;
    }
    else
        down = MPI_PROC_NULL;

    // Jacobi iterations
    for (int i = 0; i < steps; i + + ){
        // up to down transform
        MPI_Sendrecv(
                 & amp;A[Arowsize - 2][0], // onst void *sendbuf; lowest data row (excluding the buffer row)
                Acolsize, // int sendcount; length of data sent
                MPI_DOUBLE, // MPI_Datatype;
                down, // int dest; rank of receiver;
                tag1, // int sending,
                //----------------------------------
                 & amp;A[0][0], // onst void *recvbuf; uppermost buffer row
                Acolsize, // int recvcount; length of data received
                MPI_DOUBLE, // MPI_Datatype;
                up, // int source; rank of sender;
                tag1, // int sendtag,
                MPI_COMM_WORLD, // MPI_Comm comm;
                 & status
                );
        
        // down to up transform
        MPI_Sendrecv(
                 & amp;A[1][0], // onst void *sendbuf; uppermost data row (excluding the buffer row)
                Acolsize, // int sendcount; length of data sent
                MPI_DOUBLE, // MPI_Datatype;
                up, // int dest; rank of receiver;
                tag2, // int sendtag,
                //----------------------------------
                 & amp;A[Arowsize - 1][0], // onst void *recvbuf; lowest buffer row
                Acolsize, // int recvcount; length of data received
                MPI_DOUBLE, // MPI_Datatype;
                down, // int source; rank of sender;
                tag2, // int sending,
                MPI_COMM_WORLD, // MPI_Comm comm;
                 & status
                );
    }
    if (myid == 0){
        begin_row = 2; // For 0, not modify the upper boundary
    }
    if (myid == 3){
        end_row = Arowsize - 3; // For 3, not modify the lower boundary
    }

    // Iterate from the starting row to end rwo, and excluding the buffer colomns
    for (int i = begin_row; i <= end_row; i + + ){
        for (int j = 1; j <= Acolsize - 2; j + + ){
            B[i][j] = (A[i][j + 1] + A[i][j - 1] + A[i + 1][j] + A[i - 1][j]) * 0.25;
        }
    }

    // Update A from B
    for (int i = begin_row; i <= end_row; i + + ){
        for (int j = 1; j <= Acolsize - 2; j + + ){
            A[i][j] = B[i][j];
        }
    }


#endif

    // Print result
    for (int row = 1; row <= Arowsize - 2; row + + ){
        std::cout << "proc (" << myid << "): ";
        for(int col = 0; col <= Acolsize - 1; col + + ){
            std::cout << std::setiosflags(std::ios_base::left)
                << std::setw(2) <<A[row][col]
                << std::resetiosflags(std::ios_base::left);
        }
        std::cout << std::endl;
    }
    MPI_Finalize();
}







Expand it: 3D jacobi iterator

The settings are the same as the above code, the only difference is that this is to calculate the 3D case

/* ********************************************* ***************************
> File Name: 3Djacobi.cpp
> Author: Roy
> Created Time: Thursday, October 19, 2023 PM04:25:03
> Description: mpi jacobi calculator for 3D matrix operations
 *************************************************** ************************/
#include "mpi.h"
#include <iostream>
#include <iomanip>
#include <stdio.h>
#include <cmath>
#define calcu_jacobi // A flag whether calculate jacobi iiterations or not
int main(int argc, char *argv[])
{
    int steps = 1; //Number of iterations
    // We want a 3D matrix of size P*R*C, or more precisely [mysize*4][5][16], whose index=(p*R + r)*C + c
    // Two additional planes should be allocated for buffer purpose, so we need to have A as A[mysize + 2][5][16] with 4 copies opposed by 4 processors
    const int Acolsize = 16; //Number of colomns
    const int mysize = 4; //Number of processors
    const int Arowsize = 5; //Number of rows
    const int Aplanesize = mysize + 2; //2 additional planes for buffer
    double A[Aplanesize][Arowsize][Acolsize] = {0};// processors' data matrix
    double B[Aplanesize][Arowsize][Acolsize] = {0};// processors' buffer matrix
    int begin_p = 1, end_p = Aplanesize - 2; // Indices for begin planes and end planes

    int myid = 0, numprocs = 0; //MPI rank and total numprocessors, but here we have already set it 4
    int up = 0, down = 0; //rank ID for up and down processors
    int tag1 = 3, tag2 = 4; //MPI send and recv tags
    MPI_Status status;
    MPI_Init( & amp;argc, & amp;argv);
    MPI_Comm_rank(
            MPI_COMM_WORLD,
             &myid
            );
    MPI_Comm_size(
            MPI_COMM_WORLD,
             &numprocs
            );
    if (numprocs!=mysize){

        printf("Please use %d processors!\
", mysize);
        MPI_Finalize();
        return 0;
    }
    printf("Process %d of %d is alive\
", myid, numprocs); // print MPI proc information

    // assign values to A so that the gathered matrix are all 0 except for boundaries (assign to 8.0)
    for (int i = 0; i < Aplanesize; i + + ){
        for (int j = 1; j < Arowsize - 1; j + + ){
            for (int k = 1; k < Acolsize - 1; k + + ){
                A[i][j][k] = 0.0;
            }
        }
        for (int k = 0; k < Acolsize; k + + ){
            A[i][0][k] = 8.0;
            A[i][Arowsize - 1][k] = 8.0;
        }
        for (int j = 0; j < Arowsize; j + + ){
            A[i][j][0] = 8.0;
            A[i][j][Acolsize - 1] = 8.0;
        }
    }
   
    // Assume rank 0 is the uppermost plane and 3 is the lowest plane of the gathered matrix
    // proc0 additional assignment
    if (myid == 0){
        for (int j = 0; j < Arowsize; j + + ){
            for (int k = 0; k < Acolsize; k + + ){
                A[1][j][k] = 8.0;
            }
        }
    }
    // proc3 additional assignment
    if (myid == 3){
        for (int j = 0; j < Arowsize; j + + ){
            for (int k = 0; k < Acolsize; k + + ){
                A[Aplanesize - 2][j][k] = 8.0;
            }
        }
    }
#ifdef calcu_jacobi
    // Calculate nearby ranks
    // 0 is the uppermost and 3 is the lowest
    if (myid > 0){
        up = myid - 1;
    }
    else
        up = MPI_PROC_NULL;
    if (myid < 3){
        down = myid + 1;
    }
    else
        down = MPI_PROC_NULL;

    // Jacobi iterations
    for (int i = 0; i < steps; i + + ){
        // up to down transform
        MPI_Sendrecv(
                 & amp;A[Aplanesize - 2][0][0], // onst void *sendbuf; lowest data plane (excluding the buffer plane)
                Arowsize*Acolsize, // int sendcount; length of data sent
                MPI_DOUBLE, // MPI_Datatype;
                down, // int dest; rank of receiver;
                tag1, // int sending,
                //----------------------------------
                 & amp;A[0][0][0], // onst void *recvbuf; uppermost buffer plane
                Arowsize*Acolsize, // int recvcount; length of data received
                MPI_DOUBLE, // MPI_Datatype;
                up, // int source; rank of sender;
                tag1, // int sendtag,
                MPI_COMM_WORLD, // MPI_Comm comm;
                 & status
                );
        
        // down to up transform
        MPI_Sendrecv(
                 & amp;A[1][0][0], // onst void *sendbuf; uppermost data row (excluding the buffer row)
                Arowsize*Acolsize, // int sendcount; length of data sent
                MPI_DOUBLE, // MPI_Datatype;
                up, // int dest; rank of receiver;
                tag2, // int sendtag,
                //----------------------------------
                 & amp;A[Aplanesize - 1][0][0], // onst void *recvbuf; lowest buffer plane
                Arowsize*Acolsize, // int recvcount; length of data received
                MPI_DOUBLE, // MPI_Datatype;
                down, // int source; rank of sender;
                tag2, // int sending,
                MPI_COMM_WORLD, // MPI_Comm comm;
                 & status
                );
    }
    if (myid == 0){
        begin_p = 2; // For 0, not modify the upper boundary
    }
    if (myid == 3){
        end_p = Aplanesize - 3; // For 3, not modify the lower boundary
    }

    // Iterate from the starting plane to end plane, and excluding the buffer elements
    for (int i = begin_p; i <= end_p; i + + ){
        for (int j = 1; j <= Arowsize - 2; j + + ){
            for (int k = 1; k <= Acolsize - 2; k + + ){
            B[i][j][k] = (A[i][j][k + 1] + A[i][j][k - 1] + A[i][j + 1][k] + A[i][j - 1][k] + A[i + 1][j][k] + A[i - 1][j][k]) / 6.0;
            }
        }
    }

    // Update A from B
    for (int i = begin_p; i <= end_p; i + + ){
        for (int j = 1; j <= Arowsize - 2; j + + ){
            for (int k = 1; k <= Acolsize - 2; k + + ){
            A[i][j][k] = B[i][j][k];
            }
        }
    }


#endif

    // Print result
    int printid = 3;
    if (myid == printid)
    {
    for (int plane = 1; plane <= Aplanesize - 2; plane + + ){
        for(int row = 0; row <= Arowsize - 1; row + + ){
            printf("proc (" %d ") @ A(%d, %d, k) = ", myid, plane, row);
            for(int col = 0; col <= Acolsize - 1; col + + ){
                std::cout << std::setiosflags(std::ios_base::left)
                    << std::setw(5) << std::ceil(A[plane][row][col] * 100.0) / 100.0 //rounding to 2 decimal places
                    << std::resetiosflags(std::ios_base::left);
            }
            std::cout << std::endl;
        }
    }
    }
    MPI_Finalize();
}

The knowledge points of the article match the official knowledge files, and you can further learn related knowledge. Algorithm skill tree Home page Overview 56886 people are learning the system