[go: up one dir, main page]

0% found this document useful (0 votes)
709 views26 pages

10 MPI Programmes

Download as doc, pdf, or txt
Download as doc, pdf, or txt
Download as doc, pdf, or txt
You are on page 1/ 26

1

Structure of a MPI program

#include<mpi.h> void main(int argc, char *argv[]) { int rank,size; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD,&rank); MPI_Comm_size(MPI_COMM_WORLD,&size); /* ... C code ...*/ MPI_Finalize(); }

1. Parallel program to print Hello World using MPI.

#include <stdio.h> #include <mpi.h> int main (int argc, char **argv) { int nProcId, nProcNo; int nNameLen; char szMachineName[MPI_MAX_PROCESSOR_NAME]; MPI_Init (&argc, &argv); // Start up MPI MPI_Comm_size (MPI_COMM_WORLD,&nProcNo); // Find out number of processes MPI_Comm_rank (MPI_COMM_WORLD, &nProcId); // Find out process rank

MPI_Get_processor_name (szMachineName, &nNameLen); // Get machine name printf ("Hello World from process %d on %s\r\n", nProcId, szMachineName); if (nProcId == 0) printf ("Number of Processes: %d\r\n", nProcNo); MPI_Finalize (); // Shut down MPI return 0;} [09msc005@scse-hn ~]$mpicc hello.c [09msc005@scse-hn ~]$ mpirun -r ssh -n 72 ./a.out WARNING: Unable to read mpd.hosts or list of hosts isn't provided. MPI job will be run on the current machine only. Hello World from process 13 on scse-hn Hello World from process 27 on scse-hn Hello World from process 30 on scse-hn Hello World from process 20 on scse-hn Hello World from process 44 on scse-hn Hello World from process 47 on scse-hn Hello World from process 71 on scse-hn Hello World from process 56 on scse-hn Hello World from process 42 on scse-hn Hello World from process 31 on scse-hn Hello World from process 25 on scse-hn Hello World from process 38 on scse-hn

Hello World from process 41 on scse-hn Hello World from process 61 on scse-hn Hello World from process 7 on scse-hn Hello World from process 45 on scse-hn Hello World from process 10 on scse-hn Hello World from process 17 on scse-hn Hello World from process 48 on scse-hn Hello World from process 53 on scse-hn Hello World from process 12 on scse-hn Hello World from process 11 on scse-hn Hello World from process 50 on scse-hn Hello World from process 64 on scse-hn Hello World from process 58 on scse-hn Hello World from process 69 on scse-hn Hello World from process 16 on scse-hn Hello World from process 62 on scse-hn Hello World from process 54 on scse-hn Hello World from process 6 on scse-hn Hello World from process 1 on scse-hn Hello World from process 18 on scse-hn Hello World from process 28 on scse-hn Hello World from process 9 on scse-hn

Hello World from process 67 on scse-hn Hello World from process 70 on scse-hn Hello World from process 0 on scse-hn Number of Processes: 72 Hello World from process 34 on scse-hn Hello World from process 55 on scse-hn Hello World from process 65 on scse-hn Hello World from process 32 on scse-hn Hello World from process 68 on scse-hn Hello World from process 63 on scse-hn Hello World from process 49 on scse-hn Hello World from process 36 on scse-hn Hello World from process 4 on scse-hn Hello World from process 19 on scse-hn Hello World from process 14 on scse-hn Hello World from process 66 on scse-hn Hello World from process 15 on scse-hn Hello World from process 43 on scse-hn Hello World from process 37 on scse-hn Hello World from process 2 on scse-hn Hello World from process 21 on scse-hn Hello World from process 5 on scse-hn

Hello World from process 39 on scse-hn Hello World from process 3 on scse-hn Hello World from process 51 on scse-hn Hello World from process 52 on scse-hn Hello World from process 57 on scse-hn Hello World from process 26 on scse-hn Hello World from process 59 on scse-hn Hello World from process 33 on scse-hn Hello World from process 22 on scse-hn Hello World from process 46 on scse-hn Hello World from process 23 on scse-hn Hello World from process 24 on scse-hn Hello World from process 35 on scse-hn Hello World from process 60 on scse-hn Hello World from process 8 on scse-hn Hello World from process 29 on scse-hn Hello World from process 40 on scse-hn

2. Program to calculate biggest and smallest among three numbers in two different processes in parallel

#include<stdio.h> #include<mpi.h> int main(int argc,char **argv) { int a=1,b=8,c=3; int p; int id; MPI_Init(&argc,&argv); MPI_Comm_size(MPI_COMM_WORLD,&p); MPI_Comm_rank(MPI_COMM_WORLD,&id); if(id==0){ if(a>b && a>c) printf("Largest among %d %d %d is =%d \n ",a,b,c,a); if(b>c && b>a) printf("%d is largest among %d %d %d\n",b,a,b,c); if(c>a && c>b ) printf("%d is largest\n",c);} if(id==1){ if(a<b && a<c)

printf("Smallest number= %d\n",a); if(b<a && b<c) printf("Smallest number= %d\n",b); if(c<a && c<b) printf("Smallest number=%d\n",c); } printf("Process %d has finished task\n",id); MPI_Finalize(); fflush(stdout); return 0; } [09msc005@scse-hn ~]$ mpicc big.c [09msc005@scse-hn ~]$ mpirun -r ssh -n 2 ./a.out WARNING: Unable to read mpd.hosts or list of hosts isn't provided. MPI job will be run on the current machine only. 8 is largest among 1 8 3 Process 0 has finished task Smallest number= 1 Process 1 has finished task

3.Sum of N natural numbers with elapsed time and private and public data can be analyzed here

#include<stdio.h> #include<mpi.h> int main(int argc,char **argv) { int i; int p,p2; int id; int global_sum; int sum; double start_time,finish_time; MPI_Init(&argc,&argv); MPI_Barrier(MPI_COMM_WORLD); start_time=MPI_Wtime(); MPI_Comm_size(MPI_COMM_WORLD,&p); MPI_Comm_rank(MPI_COMM_WORLD,&id); p2=p-1; if(id!=0) { sum=0; for(i=id;i<=100;i+=p2) sum+=i; }

else{sum=0;} printf("process %d is done sum = %d \n",id,sum); MPI_Reduce(&sum,&global_sum,1,MPI_INT,MPI_SUM,0,MPI_COMM_WORLD); finish_time=MPI_Wtime(); MPI_Finalize(); fflush(stdout); if(id==0){ printf("The total sum = %d\n",global_sum); printf("Time elapsed= %.16f\n",finish_time-start_time); } return 0; } ****This programme can be modified to find the sum of the square of natural numbers.

[09msc005@scse-hn ~]$ mpicc sum.c [09msc005@scse-hn ~]$ mpirun -r ssh -n 72 ./a.out WARNING: Unable to read mpd.hosts or list of hosts isn't provided. MPI job will be run on the current machine only. process 1 is done sum = 73 process 3 is done sum = 77 process 7 is done sum = 85 process 4 is done sum = 79

10

process 0 is done sum = 0 process 2 is done sum = 75 process 5 is done sum = 81 process 11 is done sum = 93 process 26 is done sum = 123 process 15 is done sum = 101 process 32 is done sum = 32 process 16 is done sum = 103 process 64 is done sum = 64 process 24 is done sum = 119 process 29 is done sum = 129 process 28 is done sum = 127 process 13 is done sum = 97 process 61 is done sum = 61 process 14 is done sum = 99 process 60 is done sum = 60 process 20 is done sum = 111 process 41 is done sum = 41 process 40 is done sum = 40 process 17 is done sum = 105 process 36 is done sum = 36 process 31 is done sum = 31

11

process 48 is done sum = 48 process 52 is done sum = 52 process 12 is done sum = 95 process 25 is done sum = 121 process 23 is done sum = 117 process 47 is done sum = 47 process 49 is done sum = 49 process 6 is done sum = 83 process 56 is done sum = 56 process 30 is done sum = 30 process 62 is done sum = 62 process 27 is done sum = 125 process 43 is done sum = 43 process 10 is done sum = 91 process 9 is done sum = 89 process 39 is done sum = 39 process 21 is done sum = 113 process 35 is done sum = 35 process 58 is done sum = 58 process 59 is done sum = 59 process 37 is done sum = 37 process 42 is done sum = 42

12

process 68 is done sum = 68 process 63 is done sum = 63 process 18 is done sum = 107 process 71 is done sum = 71 process 46 is done sum = 46 process 44 is done sum = 44 process 51 is done sum = 51 process 34 is done sum = 34 process 8 is done sum = 87 process 19 is done sum = 109 process 66 is done sum = 66 process 50 is done sum = 50 process 55 is done sum = 55 process 67 is done sum = 67 process 45 is done sum = 45 process 57 is done sum = 57 process 53 is done sum = 53 process 38 is done sum = 38 process 22 is done sum = 115 process 33 is done sum = 33 process 69 is done sum = 69 process 65 is done sum = 65

13

process 54 is done sum = 54 process 70 is done sum = 70 The total sum = 5050 Time elapsed= 0.0065221786499023

4. Sum of odd and even numbers.

#include<stdio.h> #include<mpi.h> int main(int argc,char **argv) { int i; int p,p2; int id; int global_sum_odd,global_sum_even; int odd_sum,even_sum; double start_time,finish_time; MPI_Init(&argc,&argv); MPI_Barrier(MPI_COMM_WORLD); start_time=MPI_Wtime(); MPI_Comm_size(MPI_COMM_WORLD,&p); MPI_Comm_rank(MPI_COMM_WORLD,&id);

14

p2=p-1; if(id!=0){ odd_sum=0; even_sum=0; for(i=id;i<=100;i+=p2){ if(i%2!=0) odd_sum+=i; else even_sum+=i; } } else{odd_sum=0;even_sum=0;} printf("process %d is done odd sum = %d and even sum= %d \n",id,odd_sum,even_sum); MPI_Reduce(&odd_sum,&global_sum_odd,1,MPI_INT,MPI_SUM,0,MPI_COMM_WORLD); MPI_Reduce(&even_sum,&global_sum_even,1,MPI_INT,MPI_SUM,0,MPI_COMM_WORLD ); finish_time=MPI_Wtime(); MPI_Finalize(); fflush(stdout); if(id==0){ printf("The total sum of odd numbers = %d\n",global_sum_odd);

printf("The total sum of even numbers = %d\n",global_sum_even); printf("Time elapsed= %.16f\n",finish_time-start_time);

15

} return 0;} [09msc005@scse-hn ~]$ mpicc oddeven.c [09msc005@scse-hn ~]$ mpirun -r ssh -n 72 ./a.out WARNING: Unable to read mpd.hosts or list of hosts isn't provided. MPI job will be run on the current machine only. process 18 is done odd sum = 89 and even sum= 18 process 38 is done odd sum = 0 and even sum= 38 process 8 is done odd sum = 79 and even sum= 8 process 20 is done odd sum = 91 and even sum= 20 process 22 is done odd sum = 93 and even sum= 22 process 13 is done odd sum = 13 and even sum= 84 process 56 is done odd sum = 0 and even sum= 56 process 30 is done odd sum = 0 and even sum= 30 process 54 is done odd sum = 0 and even sum= 54 process 42 is done odd sum = 0 and even sum= 42 process 44 is done odd sum = 0 and even sum= 44 process 46 is done odd sum = 0 and even sum= 46 process 36 is done odd sum = 0 and even sum= 36 process 16 is done odd sum = 87 and even sum= 16 process 34 is done odd sum = 0 and even sum= 34 process 70 is done odd sum = 0 and even sum= 70

16

process 32 is done odd sum = 0 and even sum= 32 process 64 is done odd sum = 0 and even sum= 64 process 62 is done odd sum = 0 and even sum= 62 process 58 is done odd sum = 0 and even sum= 58 process 4 is done odd sum = 75 and even sum= 4 process 5 is done odd sum = 5 and even sum= 76 process 50 is done odd sum = 0 and even sum= 50 process 48 is done odd sum = 0 and even sum= 48 process 52 is done odd sum = 0 and even sum= 52 process 53 is done odd sum = 53 and even sum= 0 process 26 is done odd sum = 97 and even sum= 26 process 12 is done odd sum = 83 and even sum= 12 process 1 is done odd sum = 1 and even sum= 72 process 66 is done odd sum = 0 and even sum= 66 process 24 is done odd sum = 95 and even sum= 24 process 0 is done odd sum = 0 and even sum= 0 process 28 is done odd sum = 99 and even sum= 28 process 49 is done odd sum = 49 and even sum= 0 process 60 is done odd sum = 0 and even sum= 60 process 40 is done odd sum = 0 and even sum= 40 process 3 is done odd sum = 3 and even sum= 74 process 17 is done odd sum = 17 and even sum= 88

17

process 41 is done odd sum = 41 and even sum= 0 process 65 is done odd sum = 65 and even sum= 0 process 68 is done odd sum = 0 and even sum= 68 process 9 is done odd sum = 9 and even sum= 80 process 55 is done odd sum = 55 and even sum= 0 process 23 is done odd sum = 23 and even sum= 94 process 51 is done odd sum = 51 and even sum= 0 process 57 is done odd sum = 57 and even sum= 0 process 47 is done odd sum = 47 and even sum= 0 process 25 is done odd sum = 25 and even sum= 96 process 15 is done odd sum = 15 and even sum= 86 process 33 is done odd sum = 33 and even sum= 0 process 71 is done odd sum = 71 and even sum= 0 process 19 is done odd sum = 19 and even sum= 90 process 39 is done odd sum = 39 and even sum= 0 process 14 is done odd sum = 85 and even sum= 14 process 2 is done odd sum = 73 and even sum= 2 process 21 is done odd sum = 21 and even sum= 92 process 10 is done odd sum = 81 and even sum= 10 process 11 is done odd sum = 11 and even sum= 82 process 69 is done odd sum = 69 and even sum= 0 process 63 is done odd sum = 63 and even sum= 0

18

process 37 is done odd sum = 37 and even sum= 0 process 45 is done odd sum = 45 and even sum= 0 process 43 is done odd sum = 43 and even sum= 0 process 61 is done odd sum = 61 and even sum= 0 process 59 is done odd sum = 59 and even sum= 0 process 27 is done odd sum = 27 and even sum= 98 process 35 is done odd sum = 35 and even sum= 0 process 29 is done odd sum = 29 and even sum= 100 process 7 is done odd sum = 7 and even sum= 78 process 31 is done odd sum = 31 and even sum= 0 process 67 is done odd sum = 67 and even sum= 0 process 6 is done odd sum = 77 and even sum= 6 The total sum of odd numbers = 2500 The total sum of even numbers = 2550 Time elapsed= 0.0054359436035156

5. Find sum, average, biggest and smallest among array elements.

#include<stdio.h> #include<mpi.h> int main(int argc,char **argv) {

19

int a[100]={5,4,2,3,6,9,8,7,14,4,5,2,4,21}; int p; int id; int sum; int i,big,small; MPI_Init(&argc,&argv); MPI_Comm_size(MPI_COMM_WORLD,&p); MPI_Comm_rank(MPI_COMM_WORLD,&id); if(id==0){ big=a[0]; for(i=0;i<14;i++) { if(a[i]>big) big=a[i]; } printf("The biggest number is= %d\n",big); } if(id==1){ small=a[0]; for(i=0;i<14;i++) { if(a[i]<small)

20

small=a[i]; } printf("\n The smallest number is= %d\n",small); } if(id==2){ sum=0; for(i=0;i<14;i++) sum+=i; printf("\n The sum of the elements is= %d\n",sum); printf("\n The avg = %f",sum/14.00); } printf("Process %d has finished task\n",id); MPI_Finalize(); fflush(stdout); return 0; } [09msc005@scse-hn ~]$ mpicc arr.c [09msc005@scse-hn ~]$ mpirun -r ssh -n 3 ./a.out WARNING: Unable to read mpd.hosts or list of hosts isn't provided. MPI job will be run on the current machine only. The biggest number is= 21 Process 0 has finished task

21

The smallest number is= 2

The sum of the elements is= 91

The avg = 6.500000Process 2 has finished task Process 1 has finished task

6. Different loop- chunks

Cyclic alocation: For(i=id;i<n;i+=p) //Id is process id & p is total number of processes Say n=20 p=5 {Do operarion with the loop} Iterations assigned Process 0 : Process 1: Process 2: Process 3: Process 4: 0,5,10,15,20 1,6,11,16 2,7,12,17 3,8,13,18 4,9,14,19

If(PID=0)

22

{ For(i=0;i<10;i++) //Do operation } If(PID=1) { For(i=11;i<20;i++) // } .. So on till maximum number reaches.. But this is not an effective method

7. Communication between to processes.

#include<mpi.h> void main(int argc, char *argv[]) { `int rank, i, count; double data[100],value[200]; MPI_Status status; MPI_Init(&argc,&argv); MPI_Comm_rank(MPI_COMM_WORLD,&rank); if(rank==1) { for(i=0;i<100;++i) data[i]=i; MPI_Send(data,100,MPI_DOUBLE,0,55,MPI_COMM_WORLD); } else { MPI_Recv(value,200,MPI_DOUBLE,MPI_ANY_SOURCE,55,MPI_COMM_WOR LD,&status);

23

printf("P:%d Got data from processor %d \n",rank, status.MPI_SOURCE); MPI_Get_count(&status,MPI_DOUBLE,&count); printf("P:%d Got %d elements \n",rank,count); printf("P:%d value[5]=%lf\n",rank,value[5]); } MPI_Finalize(); } ---------------------------P: 0 Got data from processor 1 P: 0 Got 100 elements P: 0 value[5]=5.000000

8. Computation of Pi
#include "mpi.h" #include <math.h> int main(int argc, char *argv[]) { int done = 0, n, myid, numprocs, i, rc; double PI25DT = 3.141592653589793238462643; double mypi, pi, h, sum, x, a; MPI_Init(&argc,&argv); MPI_Comm_size(MPI_COMM_WORLD,&numprocs); MPI_Comm_rank(MPI_COMM_WORLD,&myid); while (!done) { if (myid == 0) { printf("Enter the number of intervals: (0 quits) "); scanf("%d",&n); } MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD); if (n == 0) break; h = 1.0 / (double) n; sum = 0.0; for (i = myid + 1; i <= n; i += numprocs) { x = h * ((double)i - 0.5); sum += 4.0 / (1.0 + x*x); } mypi = h * sum; MPI_Reduce(&mypi, &pi, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); if (myid == 0) printf("pi is approximately %.16f, Error is %.16f\n", pi, fabs(pi - PI25DT));

24

} MPI_Finalize(); return 0; }

9. Program to demonstrate MPI derived data types.


#include<mpi.h> void main(int argc, char *argv[]) { int rank; MPI_status status; struct{ int x; int y; int z; }point; MPI_Datatype ptype; MPI_Init(&argc,&argv); MPI_Comm_rank(MPI_COMM_WORLD,&rank); MPI_Type_contiguous(3,MPI_INT,&ptype); MPI_Type_commit(&ptype); if(rank==3){ point.x=15; point.y=23; point.z=6; MPI_Send(&point,1,ptype,1,52,MPI_COMM_WORLD); } else if(rank==1) { MPI_Recv(&point,1,ptype,3,52,MPI_COMM_WORLD,&status); printf("P:%d received coords are (%d,%d,%d) \n",rank,point.x,point.y,point.z); } MPI_Finalize(); } ---------------------------P:1 received coords are (15,23,6)

25

10. Vector data type example.


User completely specifies memory locations defining the "vector" C: int MPI_Type_vector (int count, int blocklength, int stride, MPI_Datatype oldtype, MPI_Datatype *newtype) Vector Datatype Example

count = 2 stride = 5 blocklength = 3

#include<mpi.h> void main(int argc, char *argv[]) { int rank,i,j; MPI_status status; double x[4][8]; MPI_Datatype coltype; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD,&rank); MPI_Type_vector(4,1,8,MPI_DOUBLE,&coltype); MPI_Type_commit(&coltype); if(rank==3){ for(i=0;i<4;++i) for(j=0;j<8;++j) x[i][j]=pow(10.0,i+1)+j;

26

MPI_Send(&x[0][7],1,coltype,,1,52,MPI_COMM_WORLD); } else if(rank==1) { MPI_Recv(&x[0][2],1,coltype,,3,52,MPI_COMM_WORLD,&status); for(i=0;i<4;++i)printf("P:%d my x[%d][2]=%1f\n",rank,i,x[i][2]); } MPI_Finalize(); } ---------------------------P:1 my x[0][2]=17.000000 P:1 my x[1][2]=107.000000 P:1 my x[2][2]=1007.000000 P:1 my x[3][2]=10007.000000

You might also like