1 /******************************************************************************
2 * FILE: mpl.ex1.c
3 * DESCRIPTION:
4 * In this simple example, the master task initiates numtasks-1 number of
5 * worker tasks. It then distributes an equal portion of an array to each
6 * worker task. Each worker task receives its portion of the array, and
7 * performs a simple value assignment to each of its elements. The value
8 * assigned to each element is simply that element's index in the array+1.
9 * Each worker task then sends its portion of the array back to the master
10 * task. As the master receives back each portion of the array, selected
11 * elements are displayed.
12 * AUTHOR: Blaise Barney
13 * LAST REVISED: 09/14/93 for latest API changes Blaise Barney
14 * LAST REVISED: 01/10/94 changed API to MPL Stacy Pendell
15 * CONVERTED TO MPI: 11/12/94 by Xianneng Shen
16 ****************************************************************************/
17
18 #include <stdio.h>
19 #include "mpi.h"
20 #define ARRAYSIZE 60000
21 #define MASTER 0 /* taskid of first process */
22
23 MPI_Status status;
24 main(int argc, char **argv)
25 {
26 int numtasks, /* total number of MPI process in partitiion */
27 numworkers, /* number of worker tasks */
28 taskid, /* task identifier */
29 dest, /* destination task id to send message */
30 index, /* index into the array */
31 i, /* loop variable */
32 arraymsg = 1, /* setting a message type */
33 indexmsg = 2, /* setting a message type */
34 source, /* origin task id of message */
35 chunksize; /* for partitioning the array */
36 float data[ARRAYSIZE], /* the intial array */
37 result[ARRAYSIZE]; /* for holding results of array operations */
38
39 /************************* initializations ***********************************
40 * Find out how many tasks are in this partition and what my task id is. Then
41 * define the number of worker tasks and the array partition size as chunksize.
42 * Note: For this example, the MP_PROCS environment variable should be set
43 * to an odd number...to insure even distribution of the array to numtasks-1
44 * worker tasks.
45 ******************************************************************************/
46 MPI_Init(&argc, &argv);
47 MPI_Comm_rank(MPI_COMM_WORLD, &taskid);
48 MPI_Comm_size(MPI_COMM_WORLD, &numtasks);
49 numworkers = numtasks-1;
50 chunksize = (ARRAYSIZE / numworkers);
51
52 /**************************** master task ************************************/
53 if (taskid == MASTER) {
54 printf("\n*********** Starting MPI Example 1 ************\n");
55 printf("MASTER: number of worker tasks will be= %d\n",numworkers);
56 fflush(stdout);
57
58 /* Initialize the array */
59 for(i=0; i<ARRAYSIZE; i++)
60 data[i] = 0.0;
61 index = 0;
62
63 /* Send each worker task its portion of the array */
64 for (dest=1; dest<= numworkers; dest++) {
65 printf("Sending to worker task= %d\n",dest);
66 fflush(stdout);
67 MPI_Send(&index, 1, MPI_INT, dest, 0, MPI_COMM_WORLD);
68 MPI_Send(&data[index], chunksize, MPI_FLOAT, dest, 0, MPI_COMM_WORLD);
69 index = index + chunksize;
70 }
71
72 /* Now wait to receive back the results from each worker task and print */
73 /* a few sample values */
74 for (i=1; i<= numworkers; i++) {
75 source = i;
76 MPI_Recv(&index, 1, MPI_INT, source, 1, MPI_COMM_WORLD, &status);
77 MPI_Recv(&result[index], chunksize, MPI_FLOAT, source, 1, MPI_COMM_WORLD,
78 &status);
79
80 printf("---------------------------------------------------\n");
81 printf("MASTER: Sample results from worker task = %d\n",source);
82 printf(" result[%d]=%f\n", index, result[index]);
83 printf(" result[%d]=%f\n", index+100, result[index+100]);
84 printf(" result[%d]=%f\n\n", index+1000, result[index+1000]);
85 fflush(stdout);
86 }
87
88 printf("MASTER: All Done! \n");
89 }
90
91
92 /**************************** worker task ************************************/
93 if (taskid > MASTER) {
94 /* Receive my portion of array from the master task */
95 source = MASTER;
96 MPI_Recv(&index, 1, MPI_INT, source, 0, MPI_COMM_WORLD, &status);
97 MPI_Recv(&result[index], chunksize, MPI_FLOAT, source, 0,
98 MPI_COMM_WORLD, &status);
99 /* Do a simple value assignment to each of my array elements */
100 for(i=index; i < index + chunksize; i++)
101 result[i] = i + 1;
102
103 /* Send my results back to the master task */
104
105 MPI_Send(&index, 1, MPI_INT, MASTER, 1, MPI_COMM_WORLD);
106 MPI_Send(&result[index], chunksize, MPI_FLOAT, MASTER, 1, MPI_COMM_WORLD);
107
108 }
109 MPI_Finalize();
110 }
|