/* PURE MPI LAPLACE SOLVER - SEE THE REPORT FOR EXACT PROBLEM DEFINITION */ #include "mpi.h" #include #include #include /* problem parameters */ #define N 2500 // no of gridpoints .. Current code validated for square grid only..Nx = Ny = N #define ITER 200 // total no of iterations to be performed #define MANAGER 0 // id of the first processor /* define message tags for sending and receiving MPI calls */ #define BEGIN 1 // message type #define DONE 2 // message type #define STH 3 // message type #define NTH 4 // message type /* define the number of threads to spawn */ /* function prototypes */ void init(int nx, int ny, float *u); void save(int nx, int ny, float *u1, char *output); float errorcheck(int start, int end, int nx, int ny, float *u); int main(int argc,char *argv[]) { int myid, nprocs; MPI_Status status; int Nx = N; int Ny = N; /* variable declarations */ float u[2][Ny][Nx]; //variable to solve int min_rows, overflow; int worker, south, north; //processor identity and its neighbors' identity int num_rows; // num_rows for each processor int destination, source; // for convenient msg passing int msg; int base; int i,j,k; int start, end; //starting and ending j indices of each chunk of row for each processor - row wise domain distribution float eps=0.1; int count; double start_time,end_time; /* Initialize the MPI environment */ MPI_Init(&argc,&argv); /* current id and total no of processes */ MPI_Comm_size(MPI_COMM_WORLD,&nprocs); MPI_Comm_rank(MPI_COMM_WORLD,&myid); start_time = MPI_Wtime(); /* Manager Task. Divides the data among processors and collects and collocates data back. No computation performed by manager */ if(myid==MANAGER) { printf("Gridsize Nx x Ny= %d x %d; \t ; \t Max Iterations= %d; \n",Nx, Ny, ITER); printf("Initializing the solution \n\n"); init(Nx, Ny, u); /* only nprocs-1 processors are performing actual computation. Manager is just coordinating */ min_rows = Ny/(nprocs-1); overflow = Ny%(nprocs-1); base=0; for(i=1;i<=nprocs-1;i++) { if(i<=overflow) num_rows=min_rows+1; else num_rows=min_rows; /* processor 0 is our Manager. Processors 1, 2, 3.....nprocs-1 are the actual working processors */ if(i==1) south=0; //no south neighbor for the first processor else south=i-1; if(i==nprocs-1) north=0; //no north neighbor for the last processor else north=i+1; destination = i; worker = i; msg = BEGIN; /* Send the required information to each node */ MPI_Send(&worker, 1, MPI_INT, destination, msg, MPI_COMM_WORLD); MPI_Send(&base, 1, MPI_INT, destination, msg, MPI_COMM_WORLD); MPI_Send(&num_rows, 1, MPI_INT, destination, msg, MPI_COMM_WORLD); MPI_Send(&south, 1, MPI_INT, destination, msg, MPI_COMM_WORLD); MPI_Send(&north, 1, MPI_INT, destination, msg, MPI_COMM_WORLD); MPI_Send(&u[0][base][0], num_rows*Nx, MPI_FLOAT, destination, msg, MPI_COMM_WORLD); printf("Sent to= %d; \t j_index= %d; \t num_rows= %d; \t south_neighbor= %d; \t north_neighbor=%d\n", destination,base,num_rows,south,north); base += num_rows; } /* Collecting and collocating the results */ for(i=1;i<=nprocs-1;i++) { source = i; msg = DONE; MPI_Recv(&base, 1, MPI_INT, source, msg, MPI_COMM_WORLD, &status); MPI_Recv(&num_rows, 1, MPI_INT, source, msg, MPI_COMM_WORLD, &status); MPI_Recv(&u[0][base][0], num_rows*Nx, MPI_FLOAT, source, msg, MPI_COMM_WORLD, &status); } /* WRITE FINAL SOLUTION */ // save(Nx, Ny, &u[0][0][0], "output.dat"); } /**************** End of manager code *********************************/ /* Workers code */ if (myid != MANAGER) { for(k=0;k<2;k++) for (i=0; i=0; i--) for (j = 0; j <= ny-1; j++) fprintf(f_out, "%d %d %6.12f\n", i, j, *(u1+j*nx+i)); fclose(f_out); } float errorcheck(int start, int end, int nx, int ny, float *u) { int i,j; float sum = 0.0; float exact; double hx, hy; hx=1.0/(nx-1); hy=1.0/(ny-1); for (j = start; j <= end; j++) { for (i = 1; i <= nx-2; i++) { exact = (sin(3.14*i*hx))*exp(-3.14*j*hy); //exact solution at the point sum = sum + (exact - *(u+j*nx+i))*(exact - *(u+j*nx+i)); //relative error } } return sqrt(sum); } /*END OF PROGRAM*/ /****************************************************************************************************************/