MOOSE - Multiscale Object Oriented Simulation Environment
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros
proc5.cpp
Go to the documentation of this file.
1 #include <mpi.h>
2 #include <vector>
3 using namespace std;
4 
5 #define WORKTAG 1
6 #define DIETAG 2
7 
8 // Tests out the use of much larger recv buffers than send
9 // sizes.
10 
11 /* Local functions */
12 
13 const int numEntries = 10;
14 const int totCalls = 2 * 65536;
15 static vector< vector< double > > recvBuf;
16 static int clearPending( int numNodes, int myrank, MPI_Request *recvReq,
17  double& tot );
18 
19 static double* get_next_work_item( int numCalls )
20 {
21  static vector< double > ret( numEntries );
22  for ( int i = 0; i < numEntries; ++i )
23  ret[i] = i + numCalls;
24 
25  if ( numCalls >= totCalls )
26  return 0;
27  return &ret[0];
28 }
29 
30 static double doWork(double* work);
31 
32 int main(int argc, char **argv)
33 {
34  double tot = 0.0;
35  double tc = totCalls;
36  double ne = numEntries;
37  double expectedTot =
38  tc * ( ( ne * (ne - 1.0) )/2.0 ) +
39  ne * ( tc * (tc - 1.0) )/2.0;
40  int myrank;
41  int numNodes;
42 
43  /* Initialize MPI */
44 
45  MPI_Init(&argc, &argv);
46 
47  /* Find out my identity in the default communicator */
48 
49  MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
50  MPI_Comm_size(MPI_COMM_WORLD, &numNodes);
51 
52  MPI_Request recvReq[ numNodes ];
53  MPI_Request sendReq[ numNodes ];
54  for ( int i = 0; i < numNodes; ++i )
55  recvReq[i] = sendReq[i] = 0;
56  vector< double > temp( numEntries * numNodes, 0.0 );
57  recvBuf.resize( numNodes, temp );
58 
59  int numCallsPerNode = totCalls / numNodes;
60 
61  int begin = myrank * numCallsPerNode;
62  int end = begin + numCallsPerNode;
63 
64  int k = 0;
65  for ( int i = 0; i < numNodes; ++i ) {
66  if ( i != myrank ) {
67  MPI_Irecv(
68  &(recvBuf[i][0]), /* message buffer */
69  numEntries * (1 + i ), /* buffer size */
70  MPI_DOUBLE, /* of type double real */
71  i, /* receive from specified node */
72  MPI_ANY_TAG, /* any type of message */
73  MPI_COMM_WORLD, /* default communicator */
74  &recvReq[k++]); /* info about the received message */
75  }
76  }
77 
78  for ( int i = 0; i < numCallsPerNode; ++i ) {
79  double* work = get_next_work_item( i + begin );
80  int targetNode = i % numNodes;
81  if ( targetNode == myrank ) {
82  tot += doWork( work );
83  } else { // Ship it out to work; and handle shipments that come in.
84  /*
85  MPI_Send(work, // message buffer
86  numEntries, // one data item
87  MPI_DOUBLE, // data item is an integer
88  targetNode, // Where to send to
89  WORKTAG, // user chosen message tag
90  MPI_COMM_WORLD // default communicator
91  );
92  */
93  MPI_Isend(work, // message buffer
94  numEntries, // one data item
95  MPI_DOUBLE, // data item is an integer
96  targetNode, // Where to send to
97  WORKTAG, // user chosen message tag
98  MPI_COMM_WORLD, // default communicator
99  &sendReq[targetNode] // Info about outgoing message
100  );
101  }
102  if ( targetNode == numNodes - 1 ) {
103  int numDone = 1;
104  while ( numDone < numNodes ) // Ensure we clear all once a cycle
105  numDone += clearPending( numNodes, myrank, recvReq, tot );
106  }
107  }
108  // One last send with the consolidated result. Irecvs should have
109  // been posted already.
110  vector< double > work( numEntries, 0.0 );
111  work[0] = tot;
112  for ( int i = 0; i < numNodes; ++i ) {
113  if ( i == myrank ) continue;
114  MPI_Send(&work[0], // message buffer
115  numEntries, // one data item
116  MPI_DOUBLE, // data item is an integer
117  i, // Where to send to
118  WORKTAG, // user chosen message tag
119  MPI_COMM_WORLD // default communicator
120  );
121  }
122 
123  int numDone = 1;
124  while ( numDone < numNodes ) // Ensure we clear all once a cycle
125  numDone += clearPending( numNodes, myrank, recvReq, tot );
126 
127  cout << myrank << ": Tot = " << tot <<
128  ", expected = " << expectedTot <<
129  ", subtot = " << work[0] << endl;
130 
131  /* Shut down MPI */
132 
133  MPI_Finalize();
134  return 0;
135 }
136 
137 int clearPending( int numNodes, int myrank, MPI_Request *recvReq, double& tot )
138 {
139  if ( numNodes == 1 )
140  return 0;
141  int doneIndices[ numNodes ];
142  int done = 0;
143  MPI_Status doneStatus[ numNodes ];
144  for ( int i = 0; i < numNodes; ++i ) {
145  doneIndices[i] = 0;
146  MPI_Status &ds = doneStatus[i];
147  ds.MPI_SOURCE = ds.MPI_TAG = ds.MPI_ERROR = ds._count = ds._cancelled = 0;
148  }
149 
150  int numDone = MPI_Testsome( numNodes - 1, recvReq, &done,
151  doneIndices, doneStatus );
152  // cout << "numDone = " << numDone << ", " << done << ", numNodes = " << numNodes << ", myrank = " << myrank << endl << flush;
153  if ( done == MPI_UNDEFINED )
154  return 0;
155  for ( unsigned int i = 0; i < done; ++i ) {
156  int recvNode = doneIndices[i];
157  if ( recvNode >= myrank )
158  recvNode += 1; // Skip myrank
159  double* work = &(recvBuf[recvNode][0]);
160  tot += doWork( work );
161  // Post the Recv again.
162  MPI_Irecv(
163  work, /* message buffer */
164  numEntries * (1 + myrank), /* buffer size */
165  MPI_DOUBLE, /* of type double real */
166  recvNode, /* receive from specified node */
167  MPI_ANY_TAG, /* any type of message */
168  MPI_COMM_WORLD, /* default communicator */
169  &recvReq[doneIndices[i]]); /* info about the received message */
170  }
171  return done;
172 }
173 
174 
175 static double
176 doWork(double* work)
177 {
178  double tot = 0;
179  for (int i =0; i < numEntries; ++i )
180  tot += work[i];
181 }
const int totCalls
Definition: proc5.cpp:14
#define WORKTAG
Definition: proc5.cpp:5
static int clearPending(int numNodes, int myrank, MPI_Request *recvReq, double &tot)
Definition: proc5.cpp:137
static double doWork(double *work)
Definition: proc5.cpp:176
int main(int argc, char **argv)
Definition: proc5.cpp:32
static unsigned int numNodes
static double * get_next_work_item(int numCalls)
Definition: proc5.cpp:19
static vector< vector< double > > recvBuf
Definition: proc5.cpp:15
const int numEntries
Definition: proc5.cpp:13