MOOSE - Multiscale Object Oriented Simulation Environment
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros
proc2.cpp
Go to the documentation of this file.
1 #include <mpi.h>
2 #include <vector>
3 using namespace std;
4 
5 #define WORKTAG 1
6 #define DIETAG 2
7 
8 
9 /* Local functions */
10 
11 const int numEntries = 10;
12 const int totCalls = 2 * 65536;
13 static vector< vector< double > > recvBuf;
14 static int clearPending( int numNodes, int myrank, MPI_Request *recvReq,
15  double& tot );
16 
17 static double* get_next_work_item( int numCalls )
18 {
19  static vector< double > ret( numEntries );
20  for ( int i = 0; i < numEntries; ++i )
21  ret[i] = i + numCalls;
22 
23  if ( numCalls >= totCalls )
24  return 0;
25  return &ret[0];
26 }
27 
28 static double doWork(double* work);
29 
30 int main(int argc, char **argv)
31 {
32  double tot = 0.0;
33  double tc = totCalls;
34  double ne = numEntries;
35  double expectedTot =
36  tc * ( ( ne * (ne - 1.0) )/2.0 ) +
37  ne * ( tc * (tc - 1.0) )/2.0;
38  int myrank;
39  int numNodes;
40 
41  /* Initialize MPI */
42 
43  MPI_Init(&argc, &argv);
44 
45  /* Find out my identity in the default communicator */
46 
47  MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
48  MPI_Comm_size(MPI_COMM_WORLD, &numNodes);
49 
50  MPI_Request recvReq[ numNodes ];
51  MPI_Request sendReq[ numNodes ];
52  for ( int i = 0; i < numNodes; ++i )
53  recvReq[i] = sendReq[i] = 0;
54  vector< double > temp( numEntries, 0.0 );
55  recvBuf.resize( numNodes, temp );
56 
57  int numCallsPerNode = totCalls / numNodes;
58 
59  int begin = myrank * numCallsPerNode;
60  int end = begin + numCallsPerNode;
61 
62  int k = 0;
63  for ( int i = 0; i < numNodes; ++i ) {
64  if ( i != myrank ) {
65  MPI_Irecv(
66  &(recvBuf[i][0]), /* message buffer */
67  numEntries, /* buffer size */
68  MPI_DOUBLE, /* of type double real */
69  i, /* receive from specified node */
70  MPI_ANY_TAG, /* any type of message */
71  MPI_COMM_WORLD, /* default communicator */
72  &recvReq[k++]); /* info about the received message */
73  }
74  }
75 
76  for ( int i = 0; i < numCallsPerNode; ++i ) {
77  double* work = get_next_work_item( i + begin );
78  int targetNode = i % numNodes;
79  if ( targetNode == myrank ) {
80  tot += doWork( work );
81  } else { // Ship it out to work; and handle shipments that come in.
82  /*
83  MPI_Send(work, // message buffer
84  numEntries, // one data item
85  MPI_DOUBLE, // data item is an integer
86  targetNode, // Where to send to
87  WORKTAG, // user chosen message tag
88  MPI_COMM_WORLD // default communicator
89  );
90  */
91  MPI_Isend(work, // message buffer
92  numEntries, // one data item
93  MPI_DOUBLE, // data item is an integer
94  targetNode, // Where to send to
95  WORKTAG, // user chosen message tag
96  MPI_COMM_WORLD, // default communicator
97  &sendReq[targetNode] // Info about outgoing message
98  );
99  }
100  if ( targetNode == numNodes - 1 ) {
101  int numDone = 1;
102  while ( numDone < numNodes ) // Ensure we clear all once a cycle
103  numDone += clearPending( numNodes, myrank, recvReq, tot );
104  }
105  }
106  // One last send with the consolidated result. Irecvs should have
107  // been posted already.
108  vector< double > work( numEntries, 0.0 );
109  work[0] = tot;
110  for ( int i = 0; i < numNodes; ++i ) {
111  if ( i == myrank ) continue;
112  MPI_Send(&work[0], // message buffer
113  numEntries, // one data item
114  MPI_DOUBLE, // data item is an integer
115  i, // Where to send to
116  WORKTAG, // user chosen message tag
117  MPI_COMM_WORLD // default communicator
118  );
119  }
120 
121  int numDone = 1;
122  while ( numDone < numNodes ) // Ensure we clear all once a cycle
123  numDone += clearPending( numNodes, myrank, recvReq, tot );
124 
125  cout << myrank << ": Tot = " << tot <<
126  ", expected = " << expectedTot <<
127  ", subtot = " << work[0] << endl;
128 
129  /* Shut down MPI */
130 
131  MPI_Finalize();
132  return 0;
133 }
134 
135 int clearPending( int numNodes, int myrank, MPI_Request *recvReq, double& tot )
136 {
137  if ( numNodes == 1 )
138  return 0;
139  int doneIndices[ numNodes ];
140  int done = 0;
141  MPI_Status doneStatus[ numNodes ];
142  for ( int i = 0; i < numNodes; ++i ) {
143  doneIndices[i] = 0;
144  MPI_Status &ds = doneStatus[i];
145  ds.MPI_SOURCE = ds.MPI_TAG = ds.MPI_ERROR = ds._count = ds._cancelled = 0;
146  }
147 
148  int numDone = MPI_Testsome( numNodes - 1, recvReq, &done,
149  doneIndices, doneStatus );
150  // cout << "numDone = " << numDone << ", " << done << ", numNodes = " << numNodes << ", myrank = " << myrank << endl << flush;
151  if ( done == MPI_UNDEFINED )
152  return 0;
153  for ( unsigned int i = 0; i < done; ++i ) {
154  int recvNode = doneIndices[i];
155  if ( recvNode >= myrank )
156  recvNode += 1; // Skip myrank
157  double* work = &(recvBuf[recvNode][0]);
158  tot += doWork( work );
159  // Post the Recv again.
160  MPI_Irecv(
161  work, /* message buffer */
162  numEntries, /* buffer size */
163  MPI_DOUBLE, /* of type double real */
164  recvNode, /* receive from specified node */
165  MPI_ANY_TAG, /* any type of message */
166  MPI_COMM_WORLD, /* default communicator */
167  &recvReq[doneIndices[i]]); /* info about the received message */
168  }
169  return done;
170 }
171 
172 
173 static double
174 doWork(double* work)
175 {
176  double tot = 0;
177  for (int i =0; i < numEntries; ++i )
178  tot += work[i];
179 }
static vector< vector< double > > recvBuf
Definition: proc2.cpp:13
const int numEntries
Definition: proc2.cpp:11
const int totCalls
Definition: proc2.cpp:12
#define WORKTAG
Definition: proc2.cpp:5
static double doWork(double *work)
Definition: proc2.cpp:174
static int clearPending(int numNodes, int myrank, MPI_Request *recvReq, double &tot)
Definition: proc2.cpp:135
int main(int argc, char **argv)
Definition: proc2.cpp:30
static unsigned int numNodes
static double * get_next_work_item(int numCalls)
Definition: proc2.cpp:17