FEI Package Browser (Single Doxygen Collection)  Version of the Day
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
fei_NodeCommMgr.cpp
Go to the documentation of this file.
1 /*--------------------------------------------------------------------*/
2 /* Copyright 2005 Sandia Corporation. */
3 /* Under the terms of Contract DE-AC04-94AL85000, there is a */
4 /* non-exclusive license for use of this work by or on behalf */
5 /* of the U.S. Government. Export of this program may require */
6 /* a license from the United States Government. */
7 /*--------------------------------------------------------------------*/
8 
9 #include <fei_macros.hpp>
10 
11 #include <fei_mpi.h>
12 
13 #include <fei_defs.h>
14 
15 #include <fei_TemplateUtils.hpp>
16 #include <fei_mpiTraits.hpp>
17 #include <fei_CommUtils.hpp>
18 #include <fei_NodeDescriptor.hpp>
19 #include <fei_NodeCommMgr.hpp>
20 #include <SNL_FEI_Structure.hpp>
21 
22 #include <fei_NodeDatabase.hpp>
23 
24 #undef fei_file
25 #define fei_file "fei_NodeCommMgr.cpp"
26 #include <fei_ErrMacros.hpp>
27 
28 //------Constructor-------------------------------------------------------------
29 NodeCommMgr::NodeCommMgr(MPI_Comm comm, const SNL_FEI_Structure& problemStructure, int sharedNodeOwnership)
30  : sharedNodes_(NULL),
31  sharedNodesAllocated_(false),
32  sharedNodeOwnership_(sharedNodeOwnership),
33  localNodeIDs(),
34  remoteNodeIDs(),
35  sharedNodeIDs(),
36  sharedNodeSubdomains(),
37  trivialSubdomainList(1),
38  sharingProcs_(),
39  sharedNodeNumbers(),
40  remoteOwnerProcs_(),
41  remoteSharingProcs_(),
42  nodesPerOwnerProc_(),
43  nodesPerSharingProc_(),
44  comm_(comm),
45  numProcs_(1),
46  localProc_(0),
47  maxFields_(0),
48  maxBlocks_(0),
49  maxSubdomains_(0),
50  initCompleteCalled_(false),
51  probStruc(problemStructure)
52 {
56 }
57 
58 //-----Destructor---------------------------------------------------------------
60 
61  for(unsigned i=0; i<sharedNodeIDs.size(); i++) {
62  delete sharingProcs_[i];
63  }
64 
65  delete [] sharedNodes_;
66  sharedNodesAllocated_ = false;
67 }
68 
69 //------------------------------------------------------------------------------
71 {
72  return( fei::binarySearch(nodeID, &sharedNodeIDs[0], sharedNodeIDs.size()) );
73 }
74 
75 //------------------------------------------------------------------------------
77 {
78  int index = getSharedNodeIndex(nodeID);
79 
80  //If the node isn't one of our shared nodes, then return 1, signifying that
81  //the node is in 1 subdomain, that being the local subdomain.
82  if (index < 0) return(1);
83 
84  //Since the node is one of our shared nodes, it should have an entry in our
85  //sharedNodeNumSubdomains array indicating how many subdomains it
86  //appears in. So return this number.
87  return(sharedNodeSubdomains[index].size());
88 }
89 
90 //------------------------------------------------------------------------------
92 {
93  int index = getSharedNodeIndex(nodeID);
94 
95  //If the node isn't one of our shared nodes, then return 1, signifying that
96  //the node is in 1 subdomain, that being the local subdomain.
97  if (index < 0) return( &trivialSubdomainList );
98 
99  //Since the node is one of our shared nodes, it should have an entry in our
100  //sharedNodeSubdomains array.
101  return( &(sharedNodeSubdomains[index]) );
102 }
103 
104 //------------------------------------------------------------------------------
106 //
107 //NodeCommMgr is being informed that 'node' is present in the local
108 //active node list.
109 //
110 //This means that either:
111 // 1. it is a locally-owned shared node, or
112 // 3. it isn't even a shared node (it's a purely local node), in which
113 // case we'll do nothing.
114 //
115 
116  if (numProcs_ == 1) return(0);
117 
118  GlobalID nodeID = node.getGlobalNodeID();
119 
120  int sharedIndex = getSharedNodeIndex(nodeID);
121 
122  //if this node isn't a shared node, then simply return.
123  if (sharedIndex < 0) return(0);
124 
125  //Since this node is present as a shared node, let's put nodeID in the
126  //localNodeIDs list if it isn't already there.
127 
128  int index = fei::sortedListInsert(nodeID, localNodeIDs);
129 
130  //if index is -2, it means the localNodeIDs array had an allocation failure.
131  if (index == -2) return(-2);
132 
133  return(0);
134 }
135 
136 //------------------------------------------------------------------------------
137 int NodeCommMgr::getGlobalMaxFieldsBlocks(int& maxFields, int& maxBlocks)
138 {
139  std::vector<int> localMax(2, 0), globalMax(2, 0);
140 
141  for(unsigned i=0; i<sharedNodeIDs.size(); i++) {
142  int numFlds = sharedNodes_[i]->getNumFields();
143  if (numFlds > localMax[0]) localMax[0] = numFlds;
144 
145  int numBlks = sharedNodes_[i]->getNumBlocks();
146  if (numBlks > localMax[1]) localMax[1] = numBlks;
147  }
148 
149  int err = fei::GlobalMax(comm_, localMax, globalMax);
150  if (err != 0) return(err);
151 
152  maxFields = globalMax[0];
153  maxBlocks = globalMax[1];
154 
155  return(0);
156 }
157 
158 //------------------------------------------------------------------------------
160 {
161  std::vector<int> localMax(3, 0), globalMax(3, 0);
162 
163  for(unsigned i=0; i<sharedNodeIDs.size(); i++) {
164  int numFlds = sharedNodes_[i]->getNumFields();
165  if (numFlds > localMax[0]) localMax[0] = numFlds;
166 
167  int numBlks = sharedNodes_[i]->getNumBlocks();
168  if (numBlks > localMax[1]) localMax[1] = numBlks;
169 
170  int numShrd = sharingProcs_[i]->size();
171  if (numShrd > localMax[2]) localMax[2] = numShrd;
172  }
173 
174  int err = fei::GlobalMax(comm_, localMax, globalMax);
175  if (err != 0) return(err);
176 
177  maxFields_ = globalMax[0];
178  maxBlocks_ = globalMax[1];
179  maxSubdomains_ = globalMax[2];
180 
181  return(0);
182 }
183 
184 //------------------------------------------------------------------------------
185 std::vector<int>& NodeCommMgr::getSendProcs()
186 {
187  return( remoteSharingProcs_ );
188 }
189 
190 //------------------------------------------------------------------------------
191 std::vector<int>& NodeCommMgr::getRecvProcs()
192 {
193  return( remoteOwnerProcs_ );
194 }
195 
196 //------------------------------------------------------------------------------
197 int NodeCommMgr::getSendMessageLength(int destProc, int& messageLength)
198 {
199  std::vector<int>::iterator
200  rs_iter = std::lower_bound(remoteSharingProcs_.begin(),
201  remoteSharingProcs_.end(), destProc);
202  if (rs_iter == remoteSharingProcs_.end() || destProc != *rs_iter) {
203  ERReturn(-1);
204  }
205 
206  int idx = rs_iter - remoteSharingProcs_.begin();
207 
208  int len = 7+maxFields_*2 + maxBlocks_ + maxSubdomains_;
209  messageLength = nodesPerSharingProc_[idx] * (len+1);
210  return(0);
211 }
212 
213 //------------------------------------------------------------------------------
214 int NodeCommMgr::getSendMessage(int destProc, std::vector<int>& message)
215 {
216  std::vector<int>::iterator
217  rs_iter = std::lower_bound(remoteSharingProcs_.begin(),
218  remoteSharingProcs_.end(), destProc);
219  if (rs_iter == remoteSharingProcs_.end() || destProc != *rs_iter) {
220  ERReturn(-1);
221  }
222 
223  int idx = rs_iter - remoteSharingProcs_.begin();
224  int len = 0;
225  CHK_ERR( getSendMessageLength(destProc, len) );
226  message.resize(len);
227 
228  packLocalNodesAndData(&message[0], destProc,
229  nodesPerSharingProc_[idx], len);
230  return(0);
231 }
232 
233 //------------------------------------------------------------------------------
234 int NodeCommMgr::processRecvMessage(int srcProc, std::vector<int>& message)
235 {
236  int idx = fei::binarySearch(srcProc, &remoteOwnerProcs_[0],
237  remoteOwnerProcs_.size());
238  int numNodes = nodesPerOwnerProc_[idx];
239  int* msgPtr = &message[0];
240  int offset = 0;
241 
242  for(int j=0; j<numNodes; j++) {
243  int nIndex = fei::binarySearch(msgPtr[j], &sharedNodeIDs[0], sharedNodeIDs.size());
244  if (nIndex < 0) return(-1);
245  NodeDescriptor* node = sharedNodes_[nIndex];
246 
247  int nodeNum = msgPtr[numNodes+offset++];
248  int numFields = msgPtr[numNodes+offset++];
249  int numBlocks = msgPtr[numNodes+offset++];
250  int numSubdomains = msgPtr[numNodes+offset++];
251 
252  node->setNodeNumber(nodeNum);
253  node->setNumNodalDOF( msgPtr[numNodes+offset++]);
254  node->setBlkEqnNumber(msgPtr[numNodes+offset++]);
255 
256  for(int fld=0; fld<numFields; fld++) {
257  int fieldID = msgPtr[numNodes+offset++];
258  int eqnNum = msgPtr[numNodes+offset++];
259  node->addField(fieldID);
260  node->setFieldEqnNumber(fieldID, eqnNum);
261  }
262 
263  for(int blk=0; blk<numBlocks; blk++) {
264  int blk_idx = probStruc.getIndexOfBlock(msgPtr[numNodes+offset++]);
265  //if blk_idx < 0 it means the incoming blockID doesn't exist on this proc
266  if (blk_idx >= 0) {
267  node->addBlockIndex(blk_idx);
268  }
269  }
270 
271  sharedNodeSubdomains[nIndex].resize(numSubdomains);
272  for(int sd=0; sd<numSubdomains; sd++) {
273  (sharedNodeSubdomains[nIndex])[sd] =
274  msgPtr[numNodes+offset++];
275  }
276  }
277 
278  return(0);
279 }
280 
281 //------------------------------------------------------------------------------
283 {
284  //
285  //This function will perform the communication necessary to:
286  //
287  // 1. For each locally owned node, send to each remote sharing proc:
288  // the node's nodeNumber, whether or not it appears in local elements,
289  // fieldID(s), the field-size(s), the first global equation numbers
290  // for those fields, and the processor-subdomains that contain the node.
291  // 2. For each remotely owned node, receive the above information from
292  // the owners of these nodes.
293  //
294  //This is a collective function. All procs must enter it before any can
295  //exit it.
296  //
297  //Most of this function is #ifdef'd according to whether FEI_SER is
298  //defined.
299 
300 #ifndef FEI_SER
301  if (numProcs_ == 1) return(0);
302 
303  //each proc will find out a max. number of fields, blocks
304  //and subdomains to expect per node.
305 
307 
308  CHK_ERR( fei::exchange(comm_, this) );
309 
311 
312 #endif //#ifndef FEI_SER
313 
314  return(0);
315 }
316 
317 //------------------------------------------------------------------------------
319  int proc, int numNodes, int len)
320 {
321 //This function packs up nodeIDs, as well as the list containing, for
322 //each node, the following:
323 // node-number
324 // numFields
325 // numBlocks
326 // numSubdomains
327 // num-nodal-dof
328 // blk-eqn-number
329 // 'numFields' pairs of (fieldID,eqnNumber)
330 // subdomain list, length 'numSubdomains'
331 //
332 //Incoming parameter len is:
333 // numNodes * (7 + maxFields*2 + maxBlocks + maxSubdomains),
334 //where maxFields is the maximum number of fields associated with any node,
335 //maxBlocks is the maximum number of blocks associated with any node, and
336 //maxSubdomains is the maximum number of subdomains containing any node.
337 //data is of length numNodes*(len+1).
338 //
339 //The above data will all be packed into the 'data' list, with nodeIDs
340 //occupying the first numNodes positions, followed by the rest of the data.
341 //
342  int nodeCounter = 0;
343  int offset = 0;
344 
345  for(unsigned i=0; i<sharedNodeIDs.size(); i++) {
346  if (sharedNodes_[i]->getOwnerProc() != localProc_) continue;
347 
348  NodeDescriptor* node = sharedNodes_[i];
349 
350  //is this local node associated with processor 'proc'?
351 
352  std::vector<int>& sProcs = *(sharingProcs_[i]);
353  int index = fei::binarySearch(proc, &sProcs[0], sProcs.size());
354 
355  //if not, skip to the next iteration...
356  if (index < 0) continue;
357 
358  if (nodeCounter >= numNodes) {
359  fei::console_out() << "NodeCommMgr::packLocalNodesAndData: ERROR,"
360  << " nodeCounter >= numNodes." << FEI_ENDL;
361  }
362 
363  data[nodeCounter++] = (int)(node->getGlobalNodeID());
364 
365  int nodeNum = node->getNodeNumber();
366  int numFields = node->getNumFields();
367  int numBlocks = node->getNumBlocks();
368  const int* fieldIDsPtr = node->getFieldIDList();
369  const int* fieldEqnNums = node->getFieldEqnNumbers();
370  int blkEqnNumber = node->getBlkEqnNumber();
371 
372  const std::vector<unsigned>& nodeBlocks = node->getBlockIndexList();
373  std::vector<int>& subdomains = sharedNodeSubdomains[i];
374 
375  data[numNodes+offset++] = nodeNum;
376  data[numNodes+offset++] = numFields;
377  data[numNodes+offset++] = numBlocks;
378  data[numNodes+offset++] = subdomains.size();
379  data[numNodes+offset++] = node->getNumNodalDOF();
380  data[numNodes+offset++] = blkEqnNumber;
381 
382  for(int j=0; j<numFields; j++) {
383  data[numNodes+offset++] = fieldIDsPtr[j];
384 
385  if (offset >= len) {
386  fei::console_out() << "NodeCommMgr::packLocalNodesAndData: ERROR,"
387  << " offset >= len." << FEI_ENDL;
388  }
389 
390  data[numNodes+offset++] = fieldEqnNums[j];
391  }
392 
393  for(int kk=0; kk<numBlocks; kk++) {
394  GlobalID blkID = probStruc.getBlockID(nodeBlocks[kk]);
395  data[numNodes+offset++] = blkID;
396  }
397 
398  for(unsigned k=0; k<subdomains.size(); k++) {
399  data[numNodes+offset++] = subdomains[k];
400  }
401  }
402 }
403 
404 //------------------------------------------------------------------------------
406  int proc, int numNodes, int len)
407 {
408 //
409 //This function packs up the nodeIDs owned by proc, as well as the list
410 //containing, for each node, the following:
411 // residesLocally (0 or 1) indicating whether it appears in the local
412 // processor's element domain.
413 // numFields
414 // numBlocks
415 // numNodalDOF
416 // 'numFields' entries of (fieldID)
417 // 'numBlocks' entries of (block)
418 //
419 //Incoming parameter len is numNodes * (3 + maxFields + maxBlocks),
420 //where maxFields is the maximum number of fields associated with any node,
421 //and maxBlocks is the maximum number of blocks associated with any node.
422 //nodeIDs is of length numNodes, and
423 //data is of length numNodes*len.
424 //
425 //The above data will all be put in the 'data' list, with the nodeIDs
426 //occupying the first numNodes positions, followed by the rest of the data.
427 //
428  int nodeCounter = 0;
429  int offset = 0;
430 
431  for(unsigned i=0; i<sharedNodeIDs.size(); i++) {
432  NodeDescriptor* node = sharedNodes_[i];
433 
434  int thisProc = node->getOwnerProc();
435  if (thisProc != proc) continue;
436 
437  if (nodeCounter >= numNodes) {
438  fei::console_out() << localProc_ << ": NodeCommMgr::packRemoteNodesAndData: ERROR,"
439  << " nodeCounter >= numNodes: " << numNodes << FEI_ENDL;
440  }
441 
442  data[nodeCounter++] = node->getGlobalNodeID();
443 
444  int numFields = node->getNumFields();
445  int numBlocks = node->getNumBlocks();
446  const int* fieldIDsPtr = node->getFieldIDList();
447 
448  const std::vector<unsigned>& nodeBlocks = node->getBlockIndexList();
449  int lindex = fei::binarySearch(sharedNodeIDs[i], &localNodeIDs[0], localNodeIDs.size());
450 
451  data[numNodes+offset++] = (lindex >= 0) ? 1 : 0;
452  data[numNodes+offset++] = (GlobalID)numFields;
453  data[numNodes+offset++] = (GlobalID)numBlocks;
454  data[numNodes+offset++] = (GlobalID)node->getNumNodalDOF();
455 
456  for(int j=0; j<numFields; j++) {
457  if (offset >= len) {
458  fei::console_out() << "NodeCommMgr::packRemoteNodesAndData: ERROR,"
459  << " offset >= len." << FEI_ENDL;
460  }
461 
462  data[numNodes+offset++] = (GlobalID)fieldIDsPtr[j];
463  }
464 
465  for(int k=0; k<numBlocks; k++) {
466  if (offset >= len) {
467  fei::console_out() << "NodeCommMgr::packRemoteNodesAndData: ERROR,"
468  << " offset >= len." << FEI_ENDL;
469  }
470 
471  data[numNodes+offset++] = probStruc.getBlockID(nodeBlocks[k]);
472  }
473  }
474 }
475 
476 //------------------------------------------------------------------------------
477 int NodeCommMgr::createProcList(std::vector<int>& itemsPerProc,
478  std::vector<int>& procs)
479 {
480 //
481 //This function looks through the itemsPerProc list and counts how many
482 //positions in this list are greater than 0. Then it creates a list of
483 //the indices of those positions. i.e., itemsPerProc is a list of how many
484 //items are to be sent to or recvd from each proc. When itemsPerProc is
485 //greater than 0, that proc is put in the sharingProcs list.
486 //
487  int numProcs = 0;
488  int len = itemsPerProc.size();
489 
490  for(int i=0; i<len; i++) {
491  if (itemsPerProc[i] > 0) numProcs++;
492  }
493 
494  procs.resize(numProcs);
495 
496  int offset = 0;
497 
498  for(int i=0; i<len; i++) {
499  if (itemsPerProc[i] > 0) procs[offset++] = i;
500  }
501  return(0);
502 }
503 
504 //------------------------------------------------------------------------------
506 {
507  for(unsigned i=0; i<sharedNodeNumbers.size(); ++i) {
508  if (sharedNodeNumbers[i] == nodeNumber) return(i);
509  }
510 
511  return(-1);
512 }
513 
514 //------------------------------------------------------------------------------
516  int numNodes,
517  const int* const* procs,
518  const int* numProcs )
519 {
520  //
521  //Store the incoming nodeIDs and proc-numbers in the sharedNodeIDs array and
522  //sharingProcs_ table.
523  //
524 
525  try {
526 
527  for(int i=0; i<numNodes; i++) {
528  int insertPoint = -1;
529  int index = fei::binarySearch(nodeIDs[i], sharedNodeIDs, insertPoint);
530  if (index < 0) {
531  sharingProcs_.insert(sharingProcs_.begin()+insertPoint, new std::vector<int>);
532 
533  sharedNodeIDs.insert(sharedNodeIDs.begin()+insertPoint, nodeIDs[i]);
534 
535  index = insertPoint;
536  }
537 
538  int err = storeNodeProcs(index, sharingProcs_, procs[i], numProcs[i]);
539  if (err != 0) return(err);
540  }
541 
542  }
543  catch(std::runtime_error& exc) {
544  fei::console_out() << exc.what() << FEI_ENDL;
545  ERReturn(-1);
546  }
547 
548  return(0);
549 }
550 
551 //------------------------------------------------------------------------------
553 {
554  //This function is called when all shared nodes have been added. We now
555  //allocate a list of pointer-to-NodeDescriptor, of length
556  //sharedNodeIDs.length(), and fill that list with NodeDescriptor-pointers
557  //from the node-database.
558 
559  if (sharedNodeIDs.size() == 0) return(0);
560 
561  if (sharedNodes_ != NULL) delete [] sharedNodes_;
563  if (sharedNodes_ == NULL) return(-1);
564 
565  for(unsigned i=0; i<sharedNodeIDs.size(); i++) {
566  NodeDescriptor* node = NULL;
567  int err = nodeDB.getNodeWithID(sharedNodeIDs[i], node);
568  if (err != 0) return(-1);
569 
570  sharedNodes_[i] = node;
571  }
572 
573  sharedNodesAllocated_ = true;
574  return(0);
575 }
576 
577 //------------------------------------------------------------------------------
578 int NodeCommMgr::initComplete(NodeDatabase& nodeDB, bool safetyCheck)
579 {
580 //
581 //This function is called when initialization is complete (i.e., when
582 //all sharedNodes have been added, allocatedNodeDescriptorPtrs() has been
583 //called, and informLocal() has been called for all nodes that appear in
584 //the local finite-element structure.
585 //
586 //The task of this function is to assign owner-procs to nodes.
587 //
588 //if 'safetyCheck' is true, a global consistency check of the shared node info
589 //will be performed before the communication is attempted.
590 //
591 //return value is 0 if successful, non-zero if an error was encountered
592 //
593  int err = allocateNodeDescriptorPtrs(nodeDB);
594  if (err != 0) return(err);
595 
596  //Run through the shared nodes, and for each one that has been
597  //identified as local, assign its owner to be the lowest-numbered sharing
598  //processor, which may or may not be localProc_.
599 
600  for(unsigned ii=0; ii<sharedNodeIDs.size(); ii++) {
601  std::vector<int>& shProcs = *(sharingProcs_[ii]);
602 
603  //first, insert localProc_ in this node's list of sharing proc, since the
604  //FEI's initSharedNodes function doesn't mandate that the local processor be
605  //included in the list of sharing processors. (i.e., localProc_ may not be
606  //in this list yet...)
607  std::vector<int>::iterator sh_iter =
608  std::lower_bound(shProcs.begin(), shProcs.end(), localProc_);
609  if (sh_iter == shProcs.end() || localProc_ != *sh_iter) {
610  shProcs.insert(sh_iter, localProc_);
611  }
612 
613  int proc = shProcs[0];
614 
615  sharedNodes_[ii]->setOwnerProc(proc);
616  }
617 
618  //One of the tasks of this object is to gather information on the number
619  //of subdomains each shared node appears in. So one thing we'll do here is
620  //size and zero the array that will hold that information.
621  sharedNodeSubdomains.resize(sharedNodeIDs.size());
622 
623  for(unsigned i=0; i<sharedNodeSubdomains.size(); ++i) {
624  sharedNodeSubdomains[i].resize(0);
625  }
626 
627  //now add the local processor to the sharedNodeSubdomains for each node that
628  //appears in our localNodeIDs list.
629  for(unsigned i=0; i<sharedNodeIDs.size(); i++) {
630  int index = fei::binarySearch(sharedNodeIDs[i], &localNodeIDs[0], localNodeIDs.size());
631  if (index >= 0) {
632  sharedNodeSubdomains[i].push_back(localProc_);
633  }
634  }
635 
637  err = adjustSharedOwnership();
638  if (err != 0) return(err);
639  }
640 
641  err = createProcLists();
642 
643  if (safetyCheck) {
644  err = checkSharedNodeInfo();
645  if (err != 0) return(-1);
646  }
647 
649 
650  initCompleteCalled_ = true;
651 
652  return(0);
653 }
654 
655 //------------------------------------------------------------------------------
656 #undef _feiFunc_
657 #define _feiFunc_ "NodeCommMgr::checkSharedNodeInfo"
659 {
660  //This function's task is to "audit" the shared-node info. I.e., to make sure
661  //that the info is globally symmetric (e.g., if the local processor thinks it
662  //shares a node with another processor, does that other processor also think
663  //it shares a node with the local proc?).
664  //If this function finds that the shared-node info is consistent/correct, then
665  //the return-value is 0. If the shared-node info is found to be wrong, then
666  //one or more messages will be written to stderr, and the return-value is -1.
667  //
668  //This is a collective function, which is relatively expensive. It does a
669  //few global reductions...
670  //
671 
672  if (numProcs_==1) return(0);
673 
674  //Make sure that the processors we think are "remote owner"
675  //procs, think we are a "remote sharing" proc, and vice-versa.
676 
677  std::vector<int> globalOwnerProcs, globalSharingProcs;
678  std::vector<int> recvOwnerLengths, recvSharingLengths;
679 
680  std::vector<int> globalNodesPerOwnerProcs, globalNodesPerSharingProcs;
681  std::vector<int> recvNodesPerOwnerLengths, recvNodesPerSharingLengths;
682 
683  //First, gather up each processor's list of remote procs and nodes-per-proc
684  //onto all other processors...
685 
687  recvOwnerLengths, globalOwnerProcs) );
688 
690  recvNodesPerOwnerLengths,
691  globalNodesPerOwnerProcs) );
692 
694  recvSharingLengths, globalSharingProcs) );
695 
697  recvNodesPerSharingLengths,
698  globalNodesPerSharingProcs) );
699 
700  //Now check the consistency of the global "owners" data against local "sharing"
701  //data.
702  int err = checkCommArrays( "owners",
703  globalOwnerProcs, globalNodesPerOwnerProcs,
704  recvOwnerLengths,
706 
707  //Now check the consistency of the global "sharing" data against local "owners"
708  //data.
709  err += checkCommArrays( "sharing",
710  globalSharingProcs, globalNodesPerSharingProcs,
711  recvSharingLengths,
713 
714  int globalErr = 0;
715 
716  CHK_ERR( fei::GlobalSum(comm_, err, globalErr) );
717 
718  return(globalErr);
719 }
720 
721 //------------------------------------------------------------------------------
722 int NodeCommMgr::checkCommArrays(const char* whichCheck,
723  std::vector<int>& globalRemoteProcs,
724  std::vector<int>& globalNodesPerRemoteProc,
725  std::vector<int>& globalRemoteProcLengths,
726  std::vector<int>& nodesPerRemoteProc,
727  std::vector<int>& remoteProcs)
728 {
729  int offset = 0;
730 
731  for(int i=0; i<numProcs_; i++) {
732  int length = globalRemoteProcLengths[i];
733 
734  if (i==localProc_) { offset += length; continue; }
735 
736  for(int j=0; j<length; j++) {
737  if (globalRemoteProcs[offset+j] == localProc_) {
738  //proc i says that we (localProc_) own nodes that it shares.
739  int numShared = globalNodesPerRemoteProc[offset+j];
740 
741  int index = fei::binarySearch(i, &remoteProcs[0], remoteProcs.size());
742  if (index < 0) {
743  //we don't think proc i shares any nodes that we own.
744  fei::console_out() << "FEI NodeCommMgr::checkSharedNodeInfo "<<whichCheck
745  << " ERROR. Local proc (" << localProc_
746  << ") doesn't share nodes with proc " << i << " but proc " << i
747  << " thinks it shares nodes with proc " << localProc_ << FEI_ENDL;
748  return(-1);
749  }
750 
751  //We think that we own nodesPerRemoteProc[index] nodes that proc i
752  //shares.
753  int numWeThinkWeShare = nodesPerRemoteProc[index];
754  if (numWeThinkWeShare != numShared) {
755  fei::console_out() << "FEI NodeCommMgr::checkSharedNodeInfo "<<whichCheck
756  << " ERROR. Local proc (" << localProc_ << ") thinks it shares "
757  << numWeThinkWeShare << " nodes with proc " << i << ", but proc "
758  << i << " thinks it shares " << numShared << " nodes with proc "
759  << localProc_ << "." << FEI_ENDL;
760  return(-1);
761  }
762  }
763  }
764 
765  offset += length;
766  }
767 
768  return(0);
769 }
770 
771 //------------------------------------------------------------------------------
773 {
774  //For each shared node that has not been identified as local, assign its
775  //owner to be the next lowest-numbered sharing proc. (Each list of sharing
776  //procs is sorted by processor-number, so we just assign the owner to be the
777  //next one in the list.)
778  //
779  //If a node is not local, and localProc_ is the lowest sharing proc, then we
780  //also need to flag that node as remote and tell other processors that we
781  //don't own it.
782  //
783  remoteNodeIDs.resize(0);
784  int err;
785  for(unsigned i=0; i<sharedNodeIDs.size(); i++) {
786  GlobalID nodeID = sharedNodeIDs[i];
787 
788  std::vector<int>& shProcs = *(sharingProcs_[i]);
789 
790  if (fei::binarySearch(nodeID, &localNodeIDs[0], localNodeIDs.size()) >= 0) continue;
791 
792  int proc = shProcs[0];
793 
794  if (proc == localProc_) {
795  sharedNodes_[i]->setOwnerProc(shProcs[1]);
796  err = fei::sortedListInsert(nodeID, remoteNodeIDs);
797  if (err == -2) return(err);
798  }
799  }
800 
801  //Now we need to let the other processors know that the remote nodes
802  //aren't owned by us. This is going to require some communication. We'll
803  //gather the nodeIDs onto all processors, after which each processor
804  //will reset the owner proc for that node. (Later, as an optimization, I'll
805  //do this without all-to-all communication.)
806 
807  std::vector<GlobalID> allRemoteNodeIDs;
808  std::vector<int> numPerProc;
809 
810  err = fei::Allgatherv(comm_, remoteNodeIDs, numPerProc, allRemoteNodeIDs);
811  if (err != 0) return(-1);
812 
813  //Now we need to run through the global list of 'special' nodes, and for the ones
814  //that we do own (appear locally), add them to a new list that will be once again
815  //globally gathered. That new list will then be used by each processor in setting
816  //the nodes' real owners.
817 
818  //we'll keep the 'new' list in remoteNodeIDs.
819  remoteNodeIDs.resize(0);
820 
821  int offset = 0;
822  for(unsigned i=0; i<numPerProc.size(); i++) {
823  for(int j=0; j<numPerProc[i]; j++) {
824 
825  //skip the nodes that we sent, we already know we don't own those.
826  if ((int)i==localProc_) {offset++; continue;}
827 
828  GlobalID nodeID = allRemoteNodeIDs[offset++];
829  int index = getSharedNodeIndex(nodeID);
830 
831  //if it's not even one of our shared nodes, then continue.
832  if (index < 0) continue;
833 
834  if (fei::binarySearch(nodeID, &localNodeIDs[0], localNodeIDs.size()) >= 0) {
836  }
837  }
838  }
839 
840  //now re-gather the remoteNodeIDs list to all processors. This time, we should only
841  //receive nodeIDs from processors that can be valid owners. i.e., processors that
842  //have those nodes in at least one local element.
843  err = fei::Allgatherv(comm_, remoteNodeIDs, numPerProc, allRemoteNodeIDs);
844  if (err != 0) return(-1);
845 
846  //Now we run the 'allRemoteNodeIDs' list for the last time, setting the owner-proc
847  //for each node. We'll run the list from the back to the front so that if multiple
848  //processors are possible owners, the lowest-numbered one will be the last one that
849  //get's set.
850  offset = allRemoteNodeIDs.size()-1;
851  for(int i=(int)numPerProc.size()-1; i>=0; i--) {
852  for(int j=0; j<numPerProc[i]; j++) {
853  GlobalID nodeID = allRemoteNodeIDs[offset--];
854  int index = getSharedNodeIndex(nodeID);
855 
856  if (index < 0) continue;
857 
858  sharedNodes_[index]->setOwnerProc(i);
859  }
860  }
861 
862  return(0);
863 }
864 
865 //------------------------------------------------------------------------------
867 {
868  sharedNodeNumbers.resize(sharedNodeIDs.size());
869 
870  for(unsigned i=0; i<sharedNodeIDs.size(); i++) {
872  }
873 }
874 
875 //------------------------------------------------------------------------------
877 {
878  std::vector<int> localNodesPerProc(numProcs_, 0);
879  std::vector<int> remoteNodesPerProc(numProcs_, 0);
880 
881  //first, figure out how many locally-owned nodes each remote processor is
882  //associated with, and how many remotely-owned nodes we'll be recv'ing info
883  //about from each remote processor.
884 
885  for(unsigned i=0; i<sharedNodeIDs.size(); i++) {
886  int proc = sharedNodes_[i]->getOwnerProc();
887 
888  if (proc != localProc_) {
889  remoteNodesPerProc[proc]++;
890  }
891  else {
892  std::vector<int>& shProcs = *(sharingProcs_[i]);
893  for(unsigned j=0; j<shProcs.size(); j++) {
894  int sproc = shProcs[j];
895 
896  if (sproc != localProc_) {
897  localNodesPerProc[sproc]++;
898  }
899  }
900  }
901  }
902 
903  //now create condensed lists of remote owner procs, and
904  //remote sharing procs.
905  int err = createProcList(remoteNodesPerProc, remoteOwnerProcs_);
906  if (err != 0) return(err);
907 
908  err = createProcList(localNodesPerProc, remoteSharingProcs_);
909  if (err != 0) return(err);
910 
911 
912  nodesPerOwnerProc_.resize(remoteOwnerProcs_.size());
913 
915 
916  int offset = 0;
917  for(int i=0; i<numProcs_; i++) {
918  if (remoteNodesPerProc[i] > 0)
919  nodesPerOwnerProc_[offset++] = remoteNodesPerProc[i];
920  }
921 
922  offset = 0;
923  for(int i=0; i<numProcs_; i++) {
924  if (localNodesPerProc[i] > 0)
925  nodesPerSharingProc_[offset++] = localNodesPerProc[i];
926  }
927 
928  return(0);
929 }
930 
931 //------------------------------------------------------------------------------
933 {
934  //first each proc will find out a max. number of fields and blocks to expect
935  //per node.
936 
937  //most of this function is #ifdef'd according to whether FEI_SER is
938  //defined.
939 #ifndef FEI_SER
940  int maxFields, maxBlocks;
941  int err = getGlobalMaxFieldsBlocks(maxFields, maxBlocks);
942  if (err) return(-1);
943 
944  //now we can allocate lists to recv into and launch the irecv's.
945  //from each processor, we'll recv a list of length:
946  // num-nodes*(4+ maxFields + maxBlocks)
947 
948  int len = 4 + maxFields + maxBlocks;
949 
950  GlobalID** recvData = NULL;
951  MPI_Request* recvDataReqs = NULL;
952 
953  unsigned i, numRecvProcs = remoteSharingProcs_.size();
954 
955  if (numRecvProcs > 0) {
956  recvData = new GlobalID*[numRecvProcs];
957  recvDataReqs = new MPI_Request[numRecvProcs];
958  }
959 
960  int dataTag = 19904;
961 
962  int numRcvStarted = 0;
963  for(i=0; i<remoteSharingProcs_.size(); i++) {
964  int numRecvNodes = nodesPerSharingProc_[i];
965  recvData[i] = new GlobalID[numRecvNodes*(len+1)];
966  MPI_Irecv(recvData[i], numRecvNodes*(len+1),
967  fei::mpiTraits<GlobalID>::mpi_type(),
968  remoteSharingProcs_[i], dataTag, comm_, &recvDataReqs[i]);
969  numRcvStarted++;
970  }
971 
972  //next, send all outgoing messages.
973 
975 
976  for(i=0; i<remoteOwnerProcs_.size(); i++) {
977  int numSendNodes = nodesPerOwnerProc_[i];
978 
979  std::vector<GlobalID> sendData(numSendNodes*(len+1), 0);
980 
981  packRemoteNodesAndData(&sendData[0], remoteOwnerProcs_[i],
982  numSendNodes, numSendNodes*len);
983 
984  MPI_Send(&sendData[0], sendData.size(),
985  fei::mpiTraits<GlobalID>::mpi_type(),
986  remoteOwnerProcs_[i], dataTag, comm_);
987  }
988 
989  //finally, complete the irecvs and put away the node field info.
990  int numCompleted = 0;
991  for(i=0; i<remoteSharingProcs_.size(); i++) {
992  MPI_Status status;
993  int index = i;
994  MPI_Wait(&recvDataReqs[index], &status);
995  numCompleted++;
996  int remoteProc = status.MPI_SOURCE;
997 
998  int offset = 0;
999  int numNodes = nodesPerSharingProc_[index];
1000 
1001  for(int j=0; j<numNodes; j++) {
1002  int nIndex = fei::binarySearch(recvData[index][j], &sharedNodeIDs[0], sharedNodeIDs.size());
1003  if (nIndex < 0) {
1004  fei::console_out() << "NodeCommMgr::exchangeSharedRemote...: error, unknown nodeID "
1005  << (int)recvData[index][j] << ", " << j
1006  << "th node recvd from proc "
1007  <<remoteSharingProcs_[index]
1008  << ". Probably a communication mis-match, we expected "
1009  << numNodes
1010  << " nodes from that proc, but recvd less than that." << FEI_ENDL;
1011  std::abort();
1012  }
1013 
1014  int residesRemotely = (int)recvData[index][numNodes+offset++];
1015 
1016  if (residesRemotely) {
1017  std::vector<int>& snSubd = sharedNodeSubdomains[nIndex];
1018  std::vector<int>::iterator sn_iter =
1019  std::lower_bound(snSubd.begin(), snSubd.end(), remoteProc);
1020  if (sn_iter == snSubd.end() || remoteProc != *sn_iter) {
1021  snSubd.insert(sn_iter, remoteProc);
1022  }
1023  }
1024  int numFields = (int)recvData[index][numNodes+offset++];
1025  int numBlocks = (int)recvData[index][numNodes+offset++];
1026  sharedNodes_[nIndex]->
1027  setNumNodalDOF((int)recvData[index][numNodes+offset++]);
1028 
1029  for(int fld=0; fld<numFields; fld++) {
1030  int fieldID = (int)recvData[index][numNodes+offset++];
1031 
1032  sharedNodes_[nIndex]->addField(fieldID);
1033  }
1034 
1035  for(int blk=0; blk<numBlocks; blk++) {
1036  int blk_idx = probStruc.getIndexOfBlock(recvData[index][numNodes+offset++]);
1037  //if blk_idx < 0 it means the incoming blockID doesn't exist on this proc
1038  if (blk_idx >= 0) {
1039  sharedNodes_[nIndex]->addBlockIndex(blk_idx);
1040  }
1041  }
1042  }
1043  }
1044 
1045  if (numRcvStarted != numCompleted) {
1046  fei::console_out() << "NodeCommMgr::exchangeSharedRemote...: recv-send mismatch;"
1047  << " numRcvStarted: " << numRcvStarted << ", numCompleted: "
1048  << numCompleted << FEI_ENDL;
1049  std::abort();
1050  }
1051 
1052  for(i=0; i<numRecvProcs; i++) {
1053  delete [] recvData[i];
1054  }
1055 
1056  delete [] recvData;
1057  delete [] recvDataReqs;
1058 
1059 #endif //#ifndef FEI_SER
1060 
1061  return(0);
1062 }
1063 
1064 //------------------------------------------------------------------------------
1066  std::vector<std::vector<int>*>& procTable,
1067  const int* procs, int numProcs)
1068 {
1069 //Private NodeCommMgr function.
1070 //
1071 //This function stores 'procs' in row 'index' of procTable, maintaining order
1072 //in that row.
1073 //
1074  std::vector<int>& row_index = *(procTable[index]);
1075  for(int i=0; i<numProcs; i++) {
1076  std::vector<int>::iterator r_iter =
1077  std::lower_bound(row_index.begin(), row_index.end(), procs[i]);
1078  if (r_iter == row_index.end() || procs[i] != *r_iter) {
1079  row_index.insert(r_iter, procs[i]);
1080  }
1081  }
1082 
1083  return(0);
1084 }
1085 
NodeDescriptor ** sharedNodes_
int GlobalSum(MPI_Comm comm, std::vector< T > &local, std::vector< T > &global)
int createProcList(std::vector< int > &itemsPerProc, std::vector< int > &procs)
size_t getNumBlocks() const
int sortedListInsert(const T &item, std::vector< T > &list)
void setBlkEqnNumber(int blkEqn)
std::vector< int > trivialSubdomainList
std::vector< GlobalID > sharedNodeIDs
int Allgatherv(MPI_Comm comm, std::vector< T > &sendbuf, std::vector< int > &recvLengths, std::vector< T > &recvbuf)
int getNumFields() const
int adjustSharedOwnership()
int addSharedNodes(const GlobalID *nodeIDs, int numNodes, const int *const *procs, const int *numProcs)
int getNumNodalDOF() const
int getBlockID(unsigned index) const
int GlobalID
Definition: fei_defs.h:60
const int * getFieldIDList() const
std::vector< int > nodesPerOwnerProc_
std::vector< GlobalID > localNodeIDs
std::vector< int > & getSendProcs()
const int * getFieldEqnNumbers() const
void Barrier(MPI_Comm comm)
void setFieldEqnNumber(int fieldID, int eqn)
std::vector< int > sharedNodeNumbers
virtual ~NodeCommMgr()
void addBlockIndex(unsigned blk_idx)
int getSharedNodeIndex(GlobalID nodeID)
#define MPI_Comm
Definition: fei_mpi.h:56
#define MPI_Request
Definition: fei_mpi.h:57
int allocateNodeDescriptorPtrs(NodeDatabase &nodeDB)
const SNL_FEI_Structure & probStruc
int initComplete(NodeDatabase &nodeDB, bool safetyCheck)
std::vector< int > nodesPerSharingProc_
std::vector< GlobalID > remoteNodeIDs
int getSharedNodeIndex_num(int nodeNumber)
int binarySearch(const T &item, const T *list, int len)
NodeCommMgr(MPI_Comm comm, const SNL_FEI_Structure &problemStructure, int sharedNodeOwnership=STRICTLY_LOW_PROC)
int checkSharedNodeInfo()
int getGlobalMaxFieldsBlocksSubdomains()
int getOwnerProc() const
std::vector< int > remoteOwnerProcs_
std::vector< std::vector< int > > sharedNodeSubdomains
void setNodeNumbersArray()
int storeNodeProcs(int index, std::vector< std::vector< int > * > &procTable, const int *procs, int numProcs)
std::vector< int > remoteSharingProcs_
int getSendMessage(int destProc, std::vector< int > &message)
std::vector< std::vector< int > * > sharingProcs_
GlobalID getGlobalNodeID() const
void setNumNodalDOF(int dof)
int informLocal(const NodeDescriptor &node)
#define ERReturn(a)
int exchange(MPI_Comm comm, MessageHandler< T > *msgHandler)
void packLocalNodesAndData(int *data, int proc, int numNodes, int len)
int getBlkEqnNumber() const
void packRemoteNodesAndData(GlobalID *data, int proc, int numNodes, int len)
int getSharedNodeNumSubdomains(GlobalID nodeID)
void setNodeNumber(int nn)
#define FEI_ENDL
std::vector< int > & getRecvProcs()
std::ostream & console_out()
int GlobalMax(MPI_Comm comm, std::vector< T > &local, std::vector< T > &global)
const std::vector< unsigned > & getBlockIndexList() const
bool sharedNodesAllocated_
int getGlobalMaxFieldsBlocks(int &maxFields, int &maxBlocks)
int localProc(MPI_Comm comm)
int checkCommArrays(const char *whichCheck, std::vector< int > &globalRemoteProcs, std::vector< int > &globalNodesPerRemoteProc, std::vector< int > &globalRemoteProcLengths, std::vector< int > &nodesPerRemoteProc, std::vector< int > &remoteProcs)
int exchangeSharedRemoteFieldsBlks()
int getSendMessageLength(int destProc, int &messageLength)
#define CHK_ERR(a)
int getNodeWithID(GlobalID nodeID, const NodeDescriptor *&node) const
void setOwnerProc(int proc)
void addField(int fieldID)
int processRecvMessage(int srcProc, std::vector< int > &message)
int getNodeNumber() const
int numProcs(MPI_Comm comm)
std::vector< int > * getSharedNodeSubdomainList(GlobalID nodeID)
int getIndexOfBlock(GlobalID blockID) const