MOOSE - Multiscale Object Oriented Simulation Environment
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros
PostMaster.cpp
Go to the documentation of this file.
1 /**********************************************************************
2 ** This program is part of 'MOOSE', the
3 ** Messaging Object Oriented Simulation Environment.
4 ** Copyright (C) 2003-2013 Upinder S. Bhalla. and NCBS
5 ** It is made available under the terms of the
6 ** GNU Lesser General Public License version 2.1
7 ** See the file COPYING.LIB for the full notice.
8 **********************************************************************/
9 
10 #include "header.h"
11 #include "PostMaster.h"
12 #include "../shell/Shell.h"
13 
14 const unsigned int TgtInfo::headerSize =
15  1 + ( sizeof( TgtInfo ) - 1 )/sizeof( double );
16 
17 const unsigned int PostMaster::reserveBufSize = 1048576;
18 const unsigned int PostMaster::setRecvBufSize = 1048576;
19 const int PostMaster::MSGTAG = 1;
20 const int PostMaster::SETTAG = 2;
21 const int PostMaster::GETTAG = 3;
22 const int PostMaster::RETURNTAG = 4;
23 const int PostMaster::CONTROLTAG = 5;
24 const int PostMaster::DIETAG = 100;
26  :
27  recvBufSize_( reserveBufSize ),
28  setSendBuf_( setRecvBufSize, 0 ),
29  setRecvBuf_( setRecvBufSize, 0 ),
30  sendBuf_( Shell::numNodes() ),
31  recvBuf_( Shell::numNodes() ),
32  sendSize_( Shell::numNodes(), 0 ),
33  getHandlerBuf_( TgtInfo::headerSize, 0 ),
34  doneIndices_( Shell::numNodes(), 0 ),
35  isSetSent_( 1 ), // Flag. Have any pending 'set' gone?
36  isSetRecv_( 0 ), // Flag. Has some data come in?
37  setSendSize_( 0 ),
38  numRecvDone_( 0 )
39 {
40  for ( unsigned int i = 0; i < Shell::numNodes(); ++i ) {
41  sendBuf_[i].resize( reserveBufSize, 0 );
42  }
43 #ifdef USE_MPI
44  MPI_Barrier( MPI_COMM_WORLD );
45  // Post recv for set calls
46  MPI_Irecv( &setRecvBuf_[0], setRecvBufSize, MPI_DOUBLE,
47  MPI_ANY_SOURCE,
48  SETTAG, MPI_COMM_WORLD,
49  &setRecvReq_
50  );
51  // Post recv for get calls.
52  MPI_Irecv( &getHandlerBuf_[0], TgtInfo::headerSize, MPI_DOUBLE,
53  MPI_ANY_SOURCE,
54  GETTAG, MPI_COMM_WORLD,
55  &getHandlerReq_
56  );
57  recvReq_.resize( Shell::numNodes() );
58  sendReq_.resize( Shell::numNodes() );
59  unsigned int k = 0;
60  for ( unsigned int i = 0; i < Shell::numNodes(); ++i ) {
61  // Set up the Recv already for later sends. This might be a problem
62  // for some polling-based implementations, but let's try for now.
63  MPI_Status temp;
64  temp.MPI_SOURCE = temp.MPI_TAG = temp.MPI_ERROR = 0;
65  doneStatus_.resize( Shell::numNodes(), temp );
66  if ( i != Shell::myNode() ) {
67  recvBuf_[i].resize( recvBufSize_, 0 );
68  MPI_Irecv( &recvBuf_[i][0], recvBufSize_, MPI_DOUBLE,
69  i, MSGTAG, MPI_COMM_WORLD,
70  &recvReq_[k++]
71  // Need to be careful about contiguous indexing for
72  // the MPI_request array.
73  );
74  }
75  }
76 #endif
77 }
78 
80 // Moose class stuff.
83 {
85  // Field Definitions
88  "numNodes",
89  "Returns number of nodes that simulation runs on.",
91  );
93  "myNode",
94  "Returns index of current node.",
96  );
98  "bufferSize",
99  "Size of the send a receive buffers for each node.",
102  );
104  // MsgDest Definitions
106  static DestFinfo process( "process",
107  "Handles process call",
109  static DestFinfo reinit( "reinit",
110  "Handles reinit call",
112 
114  // SharedFinfo Definitions
116  static Finfo* procShared[] = {
117  &process, &reinit
118  };
119  static SharedFinfo proc( "proc",
120  "Shared message for process and reinit",
121  procShared, sizeof( procShared ) / sizeof( const Finfo* )
122  );
123 
124  static Finfo* postMasterFinfos[] = {
125  &numNodes, // ReadOnlyValue
126  &myNode, // ReadOnlyValue
127  &bufferSize, // ReadOnlyValue
128  &proc // SharedFinfo
129  };
130 
131  static Dinfo< PostMaster > dinfo;
132  static Cinfo postMasterCinfo (
133  "PostMaster",
135  postMasterFinfos,
136  sizeof( postMasterFinfos ) / sizeof ( Finfo* ),
137  &dinfo
138  );
139 
140  return &postMasterCinfo;
141 }
142 
148 {
149 #ifdef USE_MPI
150  static vector< MPI_Status > status( Shell::numNodes() );
151  MPI_Waitall( Shell::numNodes() -1, &sendReq_[0], &status[0] );
152  for (unsigned int i = 0; i < Shell::numNodes(); ++i ) {
153  sendSize_[i] = 0;
154  }
155 #endif
156 }
157 
158 //
164 void PostMaster::reinit( const Eref& e, ProcPtr p )
165 {
166 #ifdef USE_MPI
167  // MPI_Barrier( MPI_COMM_WORLD );
168  unsigned int reqIndex = 0;
169  for ( unsigned int i = 0; i < Shell::numNodes(); ++i )
170  {
171  if ( i == Shell::myNode() ) continue;
172  // MPI_scatter would have been better but it doesn't allow
173  // one to post larger recvs than the actual data sent.
174  MPI_Isend(
175  &sendBuf_[i][0], sendSize_[i], MPI_DOUBLE,
176  i, // Where to send to.
177  MSGTAG, MPI_COMM_WORLD,
178  &sendReq_[ reqIndex++ ]
179  );
180  clearPending(); // Try to interleave communications.
181  }
182  while ( numRecvDone_ < Shell::numNodes() -1 )
183  clearPending();
184  finalizeSends();
185  MPI_Barrier( MPI_COMM_WORLD );
186  numRecvDone_ = 0;
187 #endif
188 }
189 
190 void PostMaster::process( const Eref& e, ProcPtr p )
191 {
192 #ifdef USE_MPI
193  unsigned int reqIndex = 0;
194  for ( unsigned int i = 0; i < Shell::numNodes(); ++i )
195  {
196  if ( i == Shell::myNode() ) continue;
197  // MPI_scatter would have been better but it doesn't allow
198  // one to post larger recvs than the actual data sent.
199  MPI_Isend(
200  &sendBuf_[i][0], sendSize_[i], MPI_DOUBLE,
201  i, // Where to send to.
202  MSGTAG, MPI_COMM_WORLD,
203  &sendReq_[ reqIndex++ ]
204  );
205  clearPending(); // Try to interleave communications.
206  }
207  while ( numRecvDone_ < Shell::numNodes() -1 )
208  clearPending();
209  finalizeSends();
210  MPI_Barrier( MPI_COMM_WORLD );
211  numRecvDone_ = 0;
212 #endif
213 }
214 
216 {
217  if ( Shell::numNodes() == 1 )
218  return;
221 }
222 
224  const Eref& e, const OpFunc* op, int requestingNode )
225 {
226 #ifdef USE_MPI
227  static double getReturnBuf[reserveBufSize];
228  op->opBuffer( e, &getReturnBuf[0] ); // stuff return value into buf.
229  // Send out the data. Blocking. Don't want any other gets till done
230  int size = getReturnBuf[0];
231  MPI_Send(
232  &getReturnBuf[1], size, MPI_DOUBLE,
233  requestingNode, // Where to send to.
234  RETURNTAG, MPI_COMM_WORLD
235  );
236 #endif // USE_MPI
237 }
238 
249 int innerGetVec( const Eref& e, const OpFunc* op,
250  double* getReturnBuf )
251 {
252  static double buf[PostMaster::reserveBufSize];
253  // Would like to use eref iterator here.
254  Element* elm = e.element();
255  unsigned int start = elm->localDataStart();
256  int k = 1; // first entry is for numOnNode;
257  if ( elm->hasFields() ) {
258  DataId di = e.dataIndex();
259  unsigned int numField = elm->numField( di - start );
260  getReturnBuf[0] = numField;
261  for ( unsigned int j = 0; j < numField; ++j ) {
262  Eref er( elm, di, j );
263  // stuff return value into buf.
264  op->opBuffer( er, buf );
265  unsigned int size = buf[0];
266  memcpy( &getReturnBuf[k], &buf[1], size * sizeof( double ) );
267  k += size;
268  }
269  } else {
270  unsigned int end = start + elm->numLocalData();
271  getReturnBuf[0] = elm->numLocalData();
272  for ( unsigned int i = start; i < end; ++i ) {
273  Eref er( elm, i, 0 );
274  // stuff return value into buf.
275  op->opBuffer( er, buf );
276  unsigned int size = buf[0];
277  memcpy( &getReturnBuf[k], &buf[1], size * sizeof( double ) );
278  k += size;
279  }
280  }
281  return k;
282 }
283 
285  const Eref& e, const OpFunc* op, int requestingNode )
286 {
287 #ifdef USE_MPI
288  static double getReturnBuf[reserveBufSize];
289  int k = innerGetVec( e, op, getReturnBuf );
290  // Send out the data. Blocking. Don't want any other gets till done
291  MPI_Send(
292  &getReturnBuf[0], k, MPI_DOUBLE,
293  requestingNode, // Where to send to.
294  RETURNTAG, MPI_COMM_WORLD
295  );
296 #endif // USE_MPI
297 }
298 
300 {
301 #ifdef USE_MPI
302  // isSetSent_ is checked before doing another x-node set operation
303  // in dispatchSetBuf.
304  if ( !isSetSent_ ) {
305  MPI_Test( &setSendReq_, &isSetSent_, &setSendStatus_ );
306  assert ( isSetSent_ != MPI_UNDEFINED );
307  }
308 
309  MPI_Test( &setRecvReq_, &isSetRecv_, &setRecvStatus_ );
310  if ( isSetRecv_ && isSetRecv_ != MPI_UNDEFINED )
311  {
312  isSetRecv_ = 0;
313  int requestingNode = setRecvStatus_.MPI_SOURCE;
314  int count = 0;
315  MPI_Get_count( &setRecvStatus_, MPI_DOUBLE, &count );
316  // Immediately post another Recv. Needed because we may call
317  // the clearPendingSetGet() function recursively. So copy
318  // data to another buffer first.
319  vector< double > temp( setRecvBuf_.begin(),
320  setRecvBuf_.begin() + count );
321  MPI_Irecv( &setRecvBuf_[0], setRecvBufSize, MPI_DOUBLE,
322  MPI_ANY_SOURCE,
323  SETTAG, MPI_COMM_WORLD,
324  &setRecvReq_
325  );
326 
327  double* buf = &temp[0];
328  // Handle arrived Set call
329  const TgtInfo* tgt = reinterpret_cast< const TgtInfo * >( buf );
330  const Eref& e = tgt->eref();
331  const OpFunc *op = OpFunc::lookop( tgt->bindIndex() );
332  assert( op );
333  if ( tgt->dataSize() == MooseSetHop ) {
334  op->opBuffer( e, buf + TgtInfo::headerSize );
335  } else if ( tgt->dataSize() == MooseSetVecHop ) {
336  op->opVecBuffer( e, buf + TgtInfo::headerSize );
337  } else if ( tgt->dataSize() == MooseGetHop ) {
338  handleRemoteGet( e, op, requestingNode );
339  } else if ( tgt->dataSize() == MooseGetVecHop ) {
340  handleRemoteGetVec( e, op, requestingNode );
341  }
342  }
343 #endif // USE_MPI
344 }
345 
346 /*
347 // Handles incoming 'get' request and posts stuff back to requestor.
348 void PostMaster::clearPendingGet()
349 {
350  static double getReturnBuf[reserveBufSize];
351  static MPI_Status getReturnStatus;
352  int getRequestArrived = 0;
353 #ifdef USE_MPI
354  MPI_Test( &getHandlerReq_, &getRequestArrived, &getReturnStatus );
355  if ( getRequestArrived && getRequestArrived != MPI_UNDEFINED )
356  {
357  int requestingNode = getReturnStatus.MPI_SOURCE;
358  TgtInfo* tgt = reinterpret_cast< TgtInfo* >( &getHandlerBuf_[0] );
359  const Eref& e = tgt->fullEref();
360  const OpFunc *op = OpFunc::lookop( tgt->bindIndex() );
361  assert( op );
362  op->opBuffer( e, &getReturnBuf[0] ); // stuff return value into buf.
363 
364  // Refresh the handler for incoming get requests.
365  MPI_Irecv( &getHandlerBuf_[0], TgtInfo::headerSize, MPI_DOUBLE,
366  MPI_ANY_SOURCE,
367  GETTAG, MPI_COMM_WORLD,
368  &getHandlerReq_
369  );
370 
371  // Send out the data. Blocking. Don't want any other gets till done
372  int size = getReturnBuf[0];
373  MPI_Send(
374  &getReturnBuf[1], size, MPI_DOUBLE,
375  requestingNode, // Where to send to.
376  RETURNTAG, MPI_COMM_WORLD
377  );
378  }
379 #endif // USE_MPI
380 }
381 */
382 
384 {
385 #ifdef USE_MPI
386  int done = 0;
387  bool report = false; // for debugging
388  MPI_Testsome( Shell::numNodes() -1, &recvReq_[0], &done,
389  &doneIndices_[0], &doneStatus_[0] );
390  if ( done == MPI_UNDEFINED )
391  return;
392  for ( int i = 0; i < done; ++i ) {
393  int doneIndex = doneIndices_[i];
394  unsigned int recvNode = doneIndex;
395  if ( recvNode >= Shell::myNode() )
396  recvNode += 1; // Skip myNode
397  int recvSize = 0;
398  MPI_Get_count( &doneStatus_[i], MPI_DOUBLE, &recvSize );
399  int j = 0;
400  assert( recvSize <= static_cast< int >( recvBufSize_ ) );
401  double* buf = &recvBuf_[ recvNode ][0];
402  if ( report ) {
403  for ( int j = 0; j < recvSize; j += 4 ) {
404  TgtInfo* tgt = reinterpret_cast< TgtInfo * >(buf + j);
405  cout << j / 4 << ": " << tgt->eref().dataIndex() << ", " <<
406  buf[j+3] << endl;
407  }
408  }
409  while ( j < recvSize ) {
410  const TgtInfo* tgt = reinterpret_cast< const TgtInfo * >( buf );
411  const Eref& e = tgt->eref();
412  const Finfo *f =
413  e.element()->cinfo()->getSrcFinfo( tgt->bindIndex() );
414  buf += TgtInfo::headerSize;
415  const SrcFinfo* sf = dynamic_cast< const SrcFinfo* >( f );
416  assert( sf );
417  sf->sendBuffer( e, buf );
418  buf += tgt->dataSize();
419  j += TgtInfo::headerSize + tgt->dataSize();
420  assert( buf - &recvBuf_[recvNode][0] == j );
421  }
422  // Post the next Irecv.
423  unsigned int k = recvNode;
424  if ( recvNode > Shell::myNode() )
425  k--;
426  MPI_Irecv( &recvBuf_[recvNode][0],
427  recvBufSize_, MPI_DOUBLE,
428  recvNode,
429  MSGTAG, MPI_COMM_WORLD,
430  &recvReq_[ k ]
431  // Ensure we have contiguous entries in recvReq_
432  );
433  }
434  numRecvDone_ += done;
435 #endif
436 }
437 
439 // Data transfer and fillup operations.
441 
442 double* PostMaster::addToSendBuf( const Eref& e, unsigned int bindIndex,
443  unsigned int size )
444 {
445  unsigned int node = e.fieldIndex(); // nasty evil wicked hack
446  unsigned int end = sendSize_[node];
447  if ( end + TgtInfo::headerSize + size > recvBufSize_ ) {
448  // Here we need to activate the fallback second send which will
449  // deal with the big block. Also various routines for tracking
450  // send size so we don't get too big or small.
451  cerr << "Error: PostMaster::addToSendBuf on node " <<
452  Shell::myNode() <<
453  ": Data size (" << size << ") goes past end of buffer\n";
454  assert( 0 );
455  }
456  TgtInfo* tgt = reinterpret_cast< TgtInfo* >( &sendBuf_[node][end] );
457  tgt->set( e.objId(), bindIndex, size );
458  end += TgtInfo::headerSize;
459  sendSize_[node] = end + size;
460  return &sendBuf_[node][end];
461 }
462 
463 double* PostMaster::addToSetBuf( const Eref& e, unsigned int opIndex,
464  unsigned int size, unsigned int hopType )
465 {
466  if ( TgtInfo::headerSize + size > setRecvBufSize ) {
467  // Here we need to activate the fallback second send which will
468  // deal with the big block. Also various routines for tracking
469  // send size so we don't get too big or small.
470  cerr << "Error: PostMaster::addToSetBuf on node " <<
471  Shell::myNode() <<
472  ": Data size (" << size << ") goes past end of buffer\n";
473  assert( 0 );
474  }
475  while ( isSetSent_ == 0 ) { // Can't add a set while old set is pending
476  clearPending();
477  }
478  isSetSent_ = 0;
479  TgtInfo* tgt = reinterpret_cast< TgtInfo* >( &setSendBuf_[0] );
480  // tgt->set( e.id(), e.dataIndex(), opIndex, e.fieldIndex() );
481  tgt->set( e.objId(), opIndex, hopType );
482  unsigned int end = TgtInfo::headerSize;
483  setSendSize_ = end + size;
484  return &setSendBuf_[end];
485 }
486 
488 {
489  assert ( e.element()->isGlobal() || e.getNode() != Shell::myNode() );
490 #ifdef USE_MPI
491  if ( e.element()->isGlobal() ) {
492  for ( unsigned int i = 0; i < Shell::numNodes(); ++i ) {
493  if ( i != Shell::myNode() ) {
494  // A bcast would be marginally more efficient, but would need
495  // us to inform all target nodes to expect one. So just do
496  // multiple sends.
497  MPI_Isend(
498  &setSendBuf_[0], setSendSize_, MPI_DOUBLE,
499  i, // Where to send to.
500  SETTAG, MPI_COMM_WORLD,
501  &setSendReq_
502  // Need to monitor all the sends to make sure they all complete
503  // before permitting another 'set'
504  );
505  }
506  }
507  } else {
508  MPI_Isend(
509  &setSendBuf_[0], setSendSize_, MPI_DOUBLE,
510  e.getNode(), // Where to send to.
511  SETTAG, MPI_COMM_WORLD,
512  &setSendReq_
513  );
514  }
515 #endif
516 }
517 
519 // that come into the current node.
520 double* PostMaster::remoteGet( const Eref& e, unsigned int bindIndex )
521 {
522  static double getRecvBuf[setRecvBufSize];
523 #ifdef USE_MPI
524  static double getSendBuf[TgtInfo::headerSize];
525  static MPI_Request getSendReq;
526  static MPI_Request getRecvReq;
527  static MPI_Status getSendStatus;
528 
529  while ( isSetSent_ == 0 ) {
530  // Can't request a 'get' while old set is
531  // pending, lest the 'get' depend on the 'set'.
532  clearPending();
533  }
534  TgtInfo* tgt = reinterpret_cast< TgtInfo* >( &getSendBuf[0] );
535  tgt->set( e.objId(), bindIndex, MooseGetHop );
536  assert ( !e.element()->isGlobal() && e.getNode() != Shell::myNode() );
537  // Post receive for return value.
538  MPI_Irecv( &getRecvBuf[0],
539  setRecvBufSize, MPI_DOUBLE,
540  e.getNode(),
541  RETURNTAG, MPI_COMM_WORLD,
542  &getRecvReq
543  );
544  // Now post send to request the data
545  MPI_Isend(
546  &getSendBuf[0], TgtInfo::headerSize, MPI_DOUBLE,
547  e.getNode(), // Where to send to.
548  SETTAG, MPI_COMM_WORLD,
549  &getSendReq
550  );
551  int complete = 0;
552  // Poll till the value comes back. We don't bother to
553  // check what happened with the send.
554  // While polling be sure to handle any other requests to avoid deadlock
555  while ( !complete ) {
556  MPI_Test( &getRecvReq, &complete, &getSendStatus );
557  assert ( complete != MPI_UNDEFINED );
558  clearPending();
559  }
560 #endif
561  return &getRecvBuf[0];
562 }
563 
565 //requests that come into the current node.
566 // Here we request data only from the one node that holds the data,
567 // since all field data is on a single DataEntry.
568 void PostMaster::remoteFieldGetVec( const Eref& e, unsigned int bindIndex,
569  vector< double >& getRecvBuf )
570 {
571 #ifdef USE_MPI
572  static double getSendBuf[TgtInfo::headerSize];
573  static MPI_Request getSendReq;
574  static MPI_Request getRecvReq;
575  static MPI_Status doneStatus;
576 #endif
577  unsigned int targetNode = e.getNode();
578  assert( targetNode != Shell::myNode() );
579  getRecvBuf.clear();
580  getRecvBuf.resize( reserveBufSize );
581 
582 #ifdef USE_MPI
583  while ( isSetSent_ == 0 ) {
584  // Can't request a 'get' while old set is
585  // pending, lest the 'get' depend on the 'set'.
586  clearPending();
587  }
588  TgtInfo* tgt = reinterpret_cast< TgtInfo* >( &getSendBuf[0] );
589  tgt->set( e.objId(), bindIndex, MooseGetVecHop );
590  assert ( !e.element()->isGlobal() );
591 
592  // Post receive for return value.
593  MPI_Irecv( &getRecvBuf[0],
594  reserveBufSize, MPI_DOUBLE,
595  targetNode,
596  RETURNTAG, MPI_COMM_WORLD,
597  &getRecvReq
598  );
599  // Now post send to request the data
600  MPI_Isend(
601  &getSendBuf[0], TgtInfo::headerSize, MPI_DOUBLE,
602  targetNode, // Where to send to.
603  SETTAG, MPI_COMM_WORLD,
604  &getSendReq
605  );
606  // Poll till the value comes back. We don't bother to
607  // check what happened with the send.
608  // While polling be sure to handle any other requests to avoid deadlock
609  int done = 0;
610  while( !done ) {
611  MPI_Test( &getRecvReq, &done, &doneStatus );
612  assert ( done != MPI_UNDEFINED );
613  clearPending();
614  }
615  // Now we have the data back.
616 #endif
617 }
618 
620 //requests that come into the current node.
621 // getRecvBuf and size are already sized at numNodes.
622 // But getRecvBuf individual entries need to be sized.
623 void PostMaster::remoteGetVec( const Eref& e, unsigned int bindIndex,
624  vector< vector< double > >& getRecvBuf,
625  vector< unsigned int >& numOnNode )
626 {
627 #ifdef USE_MPI
628  static double getSendBuf[TgtInfo::headerSize];
629  static vector< MPI_Request > getSendReq( Shell::numNodes() );
630  static vector< MPI_Request > getRecvReq( Shell::numNodes() );
631  static vector< MPI_Status > doneStatus( Shell::numNodes() );
632 #endif
633  static vector< double > temp( reserveBufSize, 0 );
634  numOnNode.clear();
635  numOnNode.resize( Shell::numNodes(), 0 );
636  getRecvBuf.clear();
637  getRecvBuf.resize( Shell::numNodes(), temp );
638 
639 #ifdef USE_MPI
640  while ( isSetSent_ == 0 ) {
641  // Can't request a 'get' while old set is
642  // pending, lest the 'get' depend on the 'set'.
643  clearPending();
644  }
645  TgtInfo* tgt = reinterpret_cast< TgtInfo* >( &getSendBuf[0] );
646  tgt->set( e.objId(), bindIndex, MooseGetVecHop );
647  assert ( !e.element()->isGlobal() );
648 
649  unsigned int k = 0;
650  for ( unsigned int i = 0; i < Shell::numNodes(); ++i ) {
651  if ( i != Shell::myNode() ) {
652  // Post receive for return value.
653  MPI_Irecv( &getRecvBuf[i][0],
654  reserveBufSize, MPI_DOUBLE,
655  i,
656  RETURNTAG, MPI_COMM_WORLD,
657  &getRecvReq[k]
658  );
659  // Now post send to request the data
660  MPI_Isend(
661  &getSendBuf[0], TgtInfo::headerSize, MPI_DOUBLE,
662  i, // Where to send to.
663  SETTAG, MPI_COMM_WORLD,
664  &getSendReq[k]
665  );
666  k++;
667  }
668  }
669  // Poll till the value comes back. We don't bother to
670  // check what happened with the send.
671  // While polling be sure to handle any other requests to avoid deadlock
672  int done = 0;
673  unsigned int received = 0;
674  vector< int > getDoneIndices( Shell::numNodes(), 0 );
675  while( received < Shell::numNodes() - 1 ) {
676  MPI_Testsome( Shell::numNodes() -1, &getRecvReq[0], &done,
677  &getDoneIndices[0], &doneStatus[0] );
678  if ( done == MPI_UNDEFINED )
679  continue;
680  received += done;
681  for ( int i = 0; i < done; ++i ) {
682  int doneIndex = getDoneIndices[i];
683  unsigned int recvNode = doneIndex;
684  if ( recvNode >= Shell::myNode() )
685  recvNode += 1; // Skip myNode
686  /*
687  int recvSize = 0;
688  MPI_Get_count( &doneStatus[doneIndex],
689  MPI_DOUBLE, &recvSize );
690  size[recvNode] = recvSize;
691  */
692  numOnNode[recvNode] = getRecvBuf[recvNode][0];
693  }
694  clearPending();
695  }
696  // Now we have the whole mess back.
697 #endif
698 }
699 
701 // Fields
703 
704 unsigned int PostMaster::getNumNodes() const
705 {
706  return Shell::numNodes();
707 }
708 
709 unsigned int PostMaster::getMyNode() const
710 {
711  return Shell::myNode();
712 }
713 
714 unsigned int PostMaster::getBufferSize() const
715 {
716  if ( sendBuf_.size() == 0 )
717  return 0;
718 
719  return sendBuf_[0].size();
720 }
721 
722 void PostMaster::setBufferSize( unsigned int size )
723 {
724  for ( unsigned int i =0; i < sendBuf_.size(); ++i )
725  sendBuf_[i].resize( size );
726 }
const unsigned char MooseGetVecHop
Definition: OpFuncBase.cpp:16
static const int DIETAG
Definition: PostMaster.h:154
static const OpFunc * lookop(unsigned int opIndex)
Definition: OpFuncBase.cpp:42
vector< double > setRecvBuf_
Definition: PostMaster.h:160
static const int SETTAG
Definition: PostMaster.h:150
void handleRemoteGet(const Eref &e, const OpFunc *op, int requestingNode)
Handles 'get' calls from another node, to an object on mynode.
Definition: PostMaster.cpp:223
Eref eref() const
Definition: PostMaster.h:79
void dispatchSetBuf(const Eref &e)
Sends off contets of Set buffer.
Definition: PostMaster.cpp:487
static double op(double x)
unsigned int getNumNodes() const
Definition: PostMaster.cpp:704
unsigned int bindIndex() const
Definition: PostMaster.h:93
unsigned int getNode() const
Definition: Eref.cpp:52
Definition: Dinfo.h:60
Definition: EpFunc.h:64
double * addToSendBuf(const Eref &e, unsigned int bindIndex, unsigned int size)
Returns pointer to Send buffer for filling in arguments.
Definition: PostMaster.cpp:442
unsigned int dataIndex() const
Definition: Eref.h:50
unsigned int dataSize() const
Definition: PostMaster.h:89
void remoteGetVec(const Eref &e, unsigned int bindIndex, vector< vector< double > > &getRecvBuf, vector< unsigned int > &size)
This is a blocking call. However, it must still handle other.
Definition: PostMaster.cpp:623
virtual void sendBuffer(const Eref &e, double *buf) const =0
vector< vector< double > > recvBuf_
Definition: PostMaster.h:162
static const unsigned int headerSize
Definition: PostMaster.h:97
static const int GETTAG
Definition: PostMaster.h:151
Element * element() const
Definition: Eref.h:42
unsigned int fieldIndex() const
Definition: Eref.h:61
static const int CONTROLTAG
Definition: PostMaster.h:153
unsigned int getBufferSize() const
Definition: PostMaster.cpp:714
int isSetSent_
Definition: PostMaster.h:176
int setSendSize_
Definition: PostMaster.h:178
virtual void opBuffer(const Eref &e, double *buf) const =0
Executes the OpFunc by converting args.
vector< unsigned int > sendSize_
Definition: PostMaster.h:163
void reinit(const Eref &e, ProcPtr p)
Definition: PostMaster.cpp:164
virtual void opVecBuffer(const Eref &e, double *buf) const
Executes the OpFunc for all data by converting a vector of args.
Definition: OpFuncBase.h:61
const unsigned char MooseGetHop
Definition: OpFuncBase.cpp:15
void process(const Eref &e, ProcPtr p)
Definition: PostMaster.cpp:190
vector< double > setSendBuf_
Definition: PostMaster.h:159
unsigned int recvBufSize_
Definition: PostMaster.h:157
double * remoteGet(const Eref &e, unsigned int bindIndex)
Blocking call to get a value from a remote node.
Definition: PostMaster.cpp:520
static const int MSGTAG
Definition: PostMaster.h:149
static unsigned int myNode
vector< int > doneIndices_
Definition: PostMaster.h:175
void clearPendingSetGet()
Clears arrived set and get calls.
Definition: PostMaster.cpp:299
int innerGetVec(const Eref &e, const OpFunc *op, double *getReturnBuf)
Definition: PostMaster.cpp:249
void setBufferSize(unsigned int size)
Definition: PostMaster.cpp:722
virtual unsigned int numField(unsigned int rawIndex) const =0
Returns number of field entries for specified data.
virtual bool hasFields() const =0
ObjId objId() const
Definition: Eref.cpp:57
Definition: Eref.h:26
unsigned int getMyNode() const
Definition: PostMaster.cpp:709
Finfo * getSrcFinfo(unsigned int i) const
Definition: Cinfo.cpp:406
const Cinfo * cinfo() const
Definition: Element.cpp:66
const unsigned char MooseSetVecHop
Definition: OpFuncBase.cpp:14
const unsigned char MooseSetHop
Definition: OpFuncBase.cpp:13
virtual bool isGlobal() const =0
True if there is a copy of every dataEntry on all nodes.
virtual unsigned int localDataStart() const =0
Returns index of first data entry on this node.
vector< vector< double > > sendBuf_
Definition: PostMaster.h:161
virtual unsigned int numLocalData() const =0
Returns number of local data entries on this node.
void clearPendingRecv()
Clears arrived messages.
Definition: PostMaster.cpp:383
unsigned int numRecvDone_
Definition: PostMaster.h:179
void clearPending()
All arrived messages and set calls are handled and cleared.
Definition: PostMaster.cpp:215
vector< vector< T > > resize(vector< vector< T > >table, unsigned int n, T init)
static unsigned int numNodes
void set(ObjId id, unsigned int bindIndex, unsigned int size)
Definition: PostMaster.h:83
void remoteFieldGetVec(const Eref &e, unsigned int bindIndex, vector< double > &getRecvBuf)
This is a blocking call. However, it must still handle other.
Definition: PostMaster.cpp:568
void handleRemoteGetVec(const Eref &e, const OpFunc *op, int requestingNode)
Definition: PostMaster.cpp:284
void finalizeSends()
Checks that all sends have gone out.
Definition: PostMaster.cpp:147
static unsigned int myNode()
double * addToSetBuf(const Eref &e, unsigned int opIndex, unsigned int size, unsigned int hopType)
Returns pointer to Set buffer for filling in arguments.
Definition: PostMaster.cpp:463
static const Cinfo * initCinfo()
Definition: PostMaster.cpp:82
static const Cinfo * initCinfo()
Definition: Neutral.cpp:16
static unsigned int numNodes()
vector< double > getHandlerBuf_
Definition: PostMaster.h:164
int isSetRecv_
Definition: PostMaster.h:177
unsigned int DataId
Definition: header.h:47
static const unsigned int reserveBufSize
Definition: PostMaster.h:147
Definition: Cinfo.h:18
static const int RETURNTAG
Definition: PostMaster.h:152
Definition: Shell.h:43
static const unsigned int setRecvBufSize
Definition: PostMaster.h:148
Definition: Finfo.h:12