FEI Package Browser (Single Doxygen Collection)  Version of the Day
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
fei_EqnCommMgr.cpp
Go to the documentation of this file.
1 /*--------------------------------------------------------------------*/
2 /* Copyright 2005 Sandia Corporation. */
3 /* Under the terms of Contract DE-AC04-94AL85000, there is a */
4 /* non-exclusive license for use of this work by or on behalf */
5 /* of the U.S. Government. Export of this program may require */
6 /* a license from the United States Government. */
7 /*--------------------------------------------------------------------*/
8 
9 #include <fei_macros.hpp>
10 
11 #include <fei_defs.h>
12 
13 #include <fei_EqnCommMgr.hpp>
14 
15 #include <fei_CommUtils.hpp>
16 
17 #include <fei_ProcEqns.hpp>
18 #include <fei_EqnBuffer.hpp>
19 #include <fei_TemplateUtils.hpp>
20 
21 #include <algorithm>
22 
23 #undef fei_file
24 #define fei_file "EqnCommMgr.cpp"
25 #include <fei_ErrMacros.hpp>
26 
27 //-----Constructor--------------------------------------------------------------
28 EqnCommMgr::EqnCommMgr(MPI_Comm comm, bool accumulate)
29  : accumulate_(accumulate),
30  localProc_(0),
31  recvProcEqns_(NULL),
32  exchangeIndicesCalled_(false),
33  recvEqns_(NULL),
34  solnValues_(),
35  sendProcEqns_(NULL),
36  sendEqns_(NULL),
37  sendEqnSoln_(),
38  essBCEqns_(NULL),
39  comm_(comm)
40 {
42  recvEqns_ = new EqnBuffer();
43  sendEqns_ = new EqnBuffer();
44  recvProcEqns_ = new ProcEqns();
45  sendProcEqns_ = new ProcEqns();
46 
47  essBCEqns_ = new EqnBuffer();
48 }
49 
50 //-----CopyConstructor--------------------------------------------------------------
52  : accumulate_(src.accumulate_),
53  localProc_(0),
54  recvProcEqns_(NULL),
55  exchangeIndicesCalled_(false),
56  recvEqns_(NULL),
57  solnValues_(),
58  sendProcEqns_(NULL),
59  sendEqns_(NULL),
60  sendEqnSoln_(),
61  essBCEqns_(NULL),
62  comm_(src.comm_)
63 {
64  *this = src;
65 }
66 
67 //-----Destructor---------------------------------------------------------------
69 {
70  delete recvEqns_;
71  delete sendEqns_;
72  delete recvProcEqns_;
73  delete sendProcEqns_;
74 
75  delete essBCEqns_;
76 }
77 
78 //------------------------------------------------------------------------------
80 {
81  delete recvProcEqns_;
83 
86 
87  delete recvEqns_;
88  recvEqns_ = src.recvEqns_->deepCopy();
89 
90  int len = src.solnValues_.size();
91 
92  solnValues_.resize(len);
93 
94  for(int i=0; i<len; i++) {
95  solnValues_[i] = src.solnValues_[i];
96  }
97 
98  delete sendProcEqns_;
100 
101  delete sendEqns_;
102  sendEqns_ = src.sendEqns_->deepCopy();
103 
104  len = src.sendEqnSoln_.size();
105  sendEqnSoln_.resize(len);
106 
107  for(int i=0; i<len; i++) {
108  sendEqnSoln_[i] = src.sendEqnSoln_[i];
109  }
110 
111  delete essBCEqns_;
112  essBCEqns_ = src.essBCEqns_->deepCopy();
113 
114  return(*this);
115 }
116 
117 //------------------------------------------------------------------------------
119 {
120 //
121 //This function is like a copy-constructor. This function produces not just a
122 //structural copy of the current object, it copies EVERYTHING, including data
123 //contents.
124 //
125  EqnCommMgr* dest = new EqnCommMgr(comm_);
126  *dest = *this;
127  return(dest);
128 }
129 
130 //------------------------------------------------------------------------------
131 void EqnCommMgr::addLocalEqn(int eqnNumber, int srcProc) {
132 //
133 //This function adds the eqnNumber, srcProc pair to the recvProcsEqns_
134 //object, which does the right thing as far as only putting it in lists that
135 //it isn't already in, preserving order, etc.
136 //
137  if (srcProc == localProc_) {
138  fei::console_out() << "EqnCommMgr::addRecvEqn: ERROR, srcProc == localProc_, "
139  << "which is a recipe for a deadlock." << FEI_ENDL;
140  std::abort();
141  }
142 
143  recvProcEqns_->addEqn(eqnNumber, srcProc);
144 }
145 
146 //------------------------------------------------------------------------------
147 void EqnCommMgr::addSolnValues(int* eqnNumbers, double* values, int num)
148 {
149  if (!exchangeIndicesCalled_) {
150  fei::console_out() << "EqnCommMgr::addSolnValues: ERROR, you may not call this until"
151  " after exchangeIndices has been called." << FEI_ENDL;
152  std::abort();
153  }
154 
155  std::vector<int>& recvEqnNumbers = recvEqns_->eqnNumbers();
156 
157  double* solnValuesPtr = &solnValues_[0];
158  for(int i=0; i<num; i++) {
159  int index = fei::binarySearch(eqnNumbers[i], recvEqnNumbers);
160 
161  if (index < 0) continue;
162 
163  solnValuesPtr[index] = values[i];
164  }
165 }
166 
167 //------------------------------------------------------------------------------
169 //
170 //This function performs the communication necessary to exchange remote
171 //contributions to the matrix structure (indices only, not coefficients)
172 //among all participating processors.
173 //
174 //Most of this function is #ifdef'd according to whether FEI_SER is
175 //defined...
176 //
177 #ifndef FEI_SER
178 
179  int numSendEqns = sendEqns_->getNumEqns();
180  fei::CSVec** sendEqnsPtr = numSendEqns>0 ?&(sendEqns_->eqns()[0]) : NULL;
181  std::vector<int>& sendEqnNumbers = sendEqns_->eqnNumbers();
182  std::vector<int> sendEqnLengths(numSendEqns);
183  for(int i=0; i<numSendEqns; ++i) {
184  sendEqnLengths[i] = sendEqnsPtr[i]->size();
185  }
186 
187  if (sendEqnNumbers.size() > 0) {
188  sendProcEqns_->setProcEqnLengths(&sendEqnNumbers[0],
189  &sendEqnLengths[0],
190  numSendEqns);
191  }
192 
195 
198 
199  size_t numRecvProcs = recvProcEqns_->getNumProcs();
200  size_t numSendProcs = sendProcEqns_->getNumProcs();
201  std::vector<int>& sendProcs = sendProcEqns_->procsPtr();
202 
203  //while we're here, let's allocate the array into which we will (later)
204  //recv the soln values for the remote equations we've contributed to.
205  sendEqnSoln_.resize(numSendEqns);
206 
207  //First we'll figure out the expected receive-lengths and the expected
208  //send lengths.
209 
210  std::vector<int> recvProcTotalLengths(numRecvProcs);
211  std::vector<int> sendProcTotalLengths(numSendProcs);
212 
213  std::vector<int>& recvProcs = recvProcEqns_->procsPtr();
214  std::vector<int>& eqnsPerRecvProc = recvProcEqns_->eqnsPerProcPtr();
215  std::vector<std::vector<int>*>& recvProcEqnLengths =
217  std::vector<std::vector<int>*>& recvProcEqnNumbers =
219 
220  for(unsigned i=0; i<numRecvProcs; i++) {
221 
222  //first we need the total of eqn-lengths for this recv-proc
223  int totalLength = 0;
224  for(int j=0; j<eqnsPerRecvProc[i]; j++) {
225  totalLength += (*(recvProcEqnLengths[i]))[j];
226  }
227  recvProcTotalLengths[i] = totalLength;
228  }
229 
230  std::vector<int>& eqnsPerSendProc = sendProcEqns_->eqnsPerProcPtr();
231  std::vector<std::vector<int>*>& sendProcEqnNumbers =
233  std::vector<std::vector<int>*>& sendProcLengths =
235 
236  for(unsigned i=0; i<numSendProcs; i++) {
237  int totalLength = 0;
238  for(int j=0; j<eqnsPerSendProc[i]; j++) {
239  totalLength += (*(sendProcLengths[i]))[j];
240  }
241  sendProcTotalLengths[i] = totalLength;
242  }
243 
244  //Before we get too carried away here, lets make sure that the messages we
245  //expect to receive from each other processor are the same length as the
246  //other processor plans to send. (Many times I've had crashes in MPI_Wait
247  //below, which are always mysterious to figure out. They usually result
248  //from mis-matched send/recv lengths.)
249 
250  CHK_ERR( consistencyCheck("exchangeIndices", recvProcs, recvProcTotalLengths,
251  sendProcs, sendProcTotalLengths) );
252 
253  int** recvProcEqnIndices = NULL;
254 
255  if (numRecvProcs > 0) {
256  recvProcEqnIndices = new int*[numRecvProcs];
257  }
258 
259  MPI_Request* indRequests = NULL;
260 
261  if (numRecvProcs > 0) {
262  indRequests = new MPI_Request[numRecvProcs];
263  }
264 
265  int numRecvsStarted = 0;
266  int indTag = 9904;
267 
268  //now, let's start the recvs for the incoming indices.
269  for(unsigned i=0; i<numRecvProcs; i++) {
270 
271  int totalLength = recvProcTotalLengths[i];
272 
273  recvProcEqnIndices[i] = new int[totalLength];
274 
275  //launch the recvs for the indices now.
276  if (MPI_Irecv(recvProcEqnIndices[i], totalLength, MPI_INT,
277  recvProcs[i], indTag, comm_, &indRequests[i]) != MPI_SUCCESS) {
278  ERReturn(-1);
279  }
280 
281  numRecvsStarted++;
282  }
283 
284  //now we need to build the lists of outgoing indices and send those.
285  std::vector<int> indices;
286 
287  for(unsigned i=0; i<numSendProcs; i++) {
288  int totalLength = sendProcTotalLengths[i];
289  int j;
290 
291  indices.resize(totalLength);
292  int* indicesPtr = &indices[0];
293 
294  int offset = 0;
295 
296  for(j=0; j<eqnsPerSendProc[i]; j++) {
297  int eqnLoc = sendEqns_->getEqnIndex((*(sendProcEqnNumbers[i]))[j]);
298  std::vector<int>& sendIndices = sendEqns_->eqns()[eqnLoc]->indices();
299  int* sendIndicesPtr = &sendIndices[0];
300 
301  for(int k=0; k<(*(sendProcLengths[i]))[j]; k++) {
302  indicesPtr[offset++] = sendIndicesPtr[k];
303  }
304  }
305 
306  if (MPI_Send(indicesPtr, totalLength, MPI_INT, sendProcs[i],
307  indTag, comm_) != MPI_SUCCESS) ERReturn(-1)
308  }
309 
310  //and finally, we're ready to complete the irecvs for the indices and put
311  //them away.
312  int numCompleted = 0;
313  for(unsigned i=0; i<numRecvProcs; i++) {
314  MPI_Status status;
315  int index = i;
316  MPI_Wait(&indRequests[i], &status);
317  numCompleted++;
318 
319  int offset = 0;
320  for(int j=0; j<eqnsPerRecvProc[index]; j++) {
321  int eqn = (*(recvProcEqnNumbers[index]))[j];
322  int* indxs = &(recvProcEqnIndices[index][offset]);
323  int len = (*(recvProcEqnLengths[index]))[j];
324 
325  recvEqns_->addIndices(eqn, indxs, len);
326 
327  offset += len;
328  }
329 
330  delete [] recvProcEqnIndices[index];
331  }
332 
333  delete [] recvProcEqnIndices;
334  delete [] indRequests;
335 
336  if (numRecvsStarted != numCompleted) {
337  fei::console_out() << "EqnCommMgr::exchangeIndices: recv-send mismatch; "
338  << "numRecvsStarted: " << numRecvsStarted << ", numCompleted: "
339  << numCompleted << FEI_ENDL;
340  std::abort();
341  }
342 
343  //allocate the solnValue_ list, which is of size numRecvEqns.
344  int numRecvEqns = recvEqns_->getNumEqns();
345  solnValues_.resize(numRecvEqns);
346 
347  exchangeIndicesCalled_ = true;
348 
349  if (dbgOut != NULL) {
350  FEI_OSTREAM& os = *dbgOut;
351  os << "#ereb exchangeIndices, sendEqns_:"<<FEI_ENDL;
352 // os << *sendEqns_<<FEI_ENDL;
353  }
354 
355 #else
356  (void)dbgOut;
357 #endif // #ifndef FEI_SER
358 
359  return(0);
360 }
361 
362 //------------------------------------------------------------------------------
363 int EqnCommMgr::consistencyCheck(const char* caller,
364  std::vector<int>& recvProcs,
365  std::vector<int>& recvProcTotalLengths,
366  std::vector<int>& sendProcs,
367  std::vector<int>& sendProcTotalLengths)
368 {
369  int err = 0;
370  //First we'll gather each processors send-lengths onto all other processors.
371  std::vector<int> globalProcSendLengths, globalSendProcs;
372  std::vector<int> gatherSizes;
373 
374  CHK_ERR( fei::Allgatherv(comm_, sendProcTotalLengths,
375  gatherSizes, globalProcSendLengths) );
376 
377  CHK_ERR( fei::Allgatherv(comm_, sendProcs,
378  gatherSizes, globalSendProcs) );
379 
380  //Now check the consistency of the global send-lengths against local
381  //receive-lengths.
382  int offset = 0;
383  for(unsigned i=0; i<gatherSizes.size(); i++) {
384  int size = gatherSizes[i];
385  if ((int)i==localProc_) {offset += size; continue; }
386 
387  //now we're ready to stride through processor i's sendProcs. Only do this
388  //if processor i is one of our recvProcs.
389  std::vector<int>::iterator rp_iter =
390  std::lower_bound(recvProcs.begin(), recvProcs.end(), (int)i);
391  int rpIndex = -1;
392  if (rp_iter != recvProcs.end() && (int)i == *rp_iter) {
393  rpIndex = (int)(rp_iter - recvProcs.begin());
394  }
395 
396  if (rpIndex < 0) {
397  // proc i is not one of our recvProcs. Let's make sure
398  //that we're not one of proc i's sendProcs.
399  for(int j=0; j<size; j++) {
400  if (globalSendProcs[offset+j] == localProc_) {
401  fei::console_out() << "EqnCommMgr::"<<caller<<" ERROR: proc " << localProc_
402  << " is not expecting to receive from proc " << i << " but proc "
403  << i << " is expecting to send to proc " << localProc_ << FEI_ENDL;
404  err = -1;
405  }
406  }
407  if (err == -1) break;
408 
409  //if err != -1, simply jump to the next for(i... iteration.
410  offset += size;
411  continue;
412  }
413 
414  for(int j=0; j<size; j++) {
415  if (globalSendProcs[offset+j] == localProc_) {
416  int sendLength = globalProcSendLengths[offset+j];
417  int recvLength = recvProcTotalLengths[rpIndex];
418  if (sendLength != recvLength) {
419  fei::console_out() << "EqnCommMgr::"<<caller<<" ERROR: proc " << localProc_
420  << " is expecting to receive " << recvLength << " indices from "
421  << "proc " << i << " but proc " << i << " is expecting to send "
422  << sendLength << " indices to proc " << localProc_ << FEI_ENDL;
423  err = -1;
424  }
425  }
426  }
427 
428  offset += size;
429  }
430 
431  int globalErr = 0;
432  CHK_ERR( fei::GlobalSum(comm_, err, globalErr) );
433 
434  return(globalErr);
435 }
436 
437 //------------------------------------------------------------------------------
439 {
440  //
441  //This function performs the communication necessary to exchange remote
442  //equations (both indices and coefficients) among all participating processors.
443  //
444 
446 
447  if (dbgOut != NULL) {
448  FEI_OSTREAM& os = *dbgOut;
449  os << "#ereb exchangeEqns begin, sendEqns_:"<<FEI_ENDL;
450 // os << *sendEqns_<<FEI_ENDL;
451  }
452 
455 
456  if (dbgOut != NULL) {
457  FEI_OSTREAM& os = *dbgOut;
458  os << "#ereb exchangeEqns end, sendEqns_:"<<FEI_ENDL;
459 // os << *sendEqns_<<FEI_ENDL;
460  }
461 
462  return(0);
463 }
464 
465 //------------------------------------------------------------------------------
467  EqnBuffer* sendEqns, ProcEqns* recvProcEqns,
468  EqnBuffer* recvEqns, bool accumulate)
469 {
470 //
471 //This function performs the communication necessary to exchange remote
472 //equations (both indices and coefficients) among all participating processors.
473 //
474 //Most of this function is #ifdef'd according to whether FEI_SER is
475 //defined.
476 #ifdef FEI_SER
477  (void)comm;
478  (void)sendProcEqns;
479  (void)sendEqns;
480  (void)recvProcEqns;
481  (void)recvEqns;
482  (void)accumulate;
483 #else
484  int indTag = 9113, coefTag = 9114;
485 
486  size_t numRecvProcs = recvProcEqns->getNumProcs();
487  size_t numSendProcs = sendProcEqns->getNumProcs();
488  if ((numRecvProcs == 0) && (numSendProcs == 0)) return(0);
489 
490  //we assume that sendProcEqns and recvProcEqns are fully populated with
491  //equation-numbers and their lengths, and that this data is consistent with
492  //the data in sendEqns...
493 
494  MPI_Request* indRequests = NULL;
495  MPI_Request* coefRequests = NULL;
496  int** recvProcEqnIndices = NULL;
497  double** recvProcEqnCoefs = NULL;
498 
499  if (numRecvProcs > 0) {
500  indRequests = new MPI_Request[numRecvProcs];
501  coefRequests = new MPI_Request[numRecvProcs];
502  recvProcEqnIndices = new int*[numRecvProcs];
503  recvProcEqnCoefs = new double*[numRecvProcs];
504  }
505 
506  int numRHSs = sendEqns->getNumRHSs();
507 
508  //now, let's allocate the space for the incoming equations.
509  //each row of recvProcEqnIndices will be of length
510  //sum-of-recvProcEqnLengths[i], and each row of recvProcEqnCoefs will be
511  //of length sum-of-recvProcEqnLengths[i] + numRHSs*eqnsPerRecvProc[i].
512 
513  std::vector<int>& recvProcs = recvProcEqns->procsPtr();
514  std::vector<int>& eqnsPerRecvProc = recvProcEqns->eqnsPerProcPtr();
515  std::vector<std::vector<int>*>& recvProcEqnNumbers =
516  recvProcEqns->procEqnNumbersPtr();
517  std::vector<std::vector<int>*>& recvProcEqnLengths =
518  recvProcEqns->procEqnLengthsPtr();
519 
520  int padding = 2;
521  for(unsigned i=0; i<numRecvProcs; i++) {
522  int totalLength = 0;
523 
524  for(int j=0; j<eqnsPerRecvProc[i]; j++) {
525  totalLength += (*(recvProcEqnLengths[i]))[j];
526  }
527 
528  //in case we're only exchanging rhs coefs, (i.e., there are no
529  //column-indices) let's make the length totalLength+padding so we can
530  //send the new*Data_ indicators.
531  recvProcEqnIndices[i] = new int[totalLength+padding];
532 
533  int coefLength = totalLength + numRHSs*eqnsPerRecvProc[i];
534 
535  recvProcEqnCoefs[i] = new double[coefLength];
536  //let's go ahead and launch the recvs for the indices and coefs now.
537  MPI_Irecv(recvProcEqnIndices[i], totalLength+padding, MPI_INT,
538  recvProcs[i], indTag, comm, &indRequests[i]);
539 
540  MPI_Irecv(recvProcEqnCoefs[i], coefLength, MPI_DOUBLE,
541  recvProcs[i], coefTag, comm, &coefRequests[i]);
542  }
543 
544  //ok, now we need to build the lists of outgoing indices and coefs, and
545  //send those.
546  std::vector<int>& sendProcs = sendProcEqns->procsPtr();
547  std::vector<int>& eqnsPerSendProc = sendProcEqns->eqnsPerProcPtr();
548  std::vector<std::vector<int>*>& sendProcEqnNumbers =
549  sendProcEqns->procEqnNumbersPtr();
550  std::vector<std::vector<int>*>& sendProcEqnLengths =
551  sendProcEqns->procEqnLengthsPtr();
552 
553  std::vector<std::vector<double>*>& sendRHS = *(sendEqns->rhsCoefsPtr());
554 
555  for(unsigned i=0; i<numSendProcs; i++) {
556  int totalLength = 0;
557  int* sendProcEqnNumbers_i = &(*(sendProcEqnNumbers[i]))[0];
558  int* sendProcEqnLengths_i = &(*(sendProcEqnLengths[i]))[0];
559  int j;
560  for(j=0; j<eqnsPerSendProc[i]; j++)
561  totalLength += sendProcEqnLengths_i[j];
562 
563  //As with the recv code above, let's make the indices length
564  //be totalLength+padding...
565  std::vector<int> indices(totalLength+padding);
566  int* indicesPtr = &indices[0];
567  int coefLength = totalLength + numRHSs*eqnsPerSendProc[i];
568 
569  std::vector<double> coefs(coefLength);
570  double* coefsPtr = &coefs[0];
571 
572  int offset = 0;
573 
574  //first pack up the coefs and indices
575  for(j=0; j<eqnsPerSendProc[i]; j++) {
576  int eqnLoc = sendEqns->getEqnIndex(sendProcEqnNumbers_i[j]);
577  int* sendIndices = &(sendEqns->eqns()[eqnLoc]->indices())[0];
578  double* sendCoefs= &(sendEqns->eqns()[eqnLoc]->coefs())[0];
579 
580  for(int k=0; k<sendProcEqnLengths_i[j]; k++) {
581  indicesPtr[offset] = sendIndices[k];
582  coefsPtr[offset++] = sendCoefs[k];
583  }
584  }
585 
586  //now append the new*Data_ indicators to the end of the indices array
587  indicesPtr[offset] = sendEqns->newCoefData_;
588  indicesPtr[offset+1] = sendEqns->newRHSData_;
589 
590  //now append the RHS coefs to the end of the coefs array
591  for(j=0; j<eqnsPerSendProc[i]; j++) {
592  int eqnLoc = sendEqns->getEqnIndex(sendProcEqnNumbers_i[j]);
593 
594  for(int k=0; k<numRHSs; k++) {
595  coefsPtr[offset++] = (*(sendRHS[eqnLoc]))[k];
596  }
597  }
598 
599  MPI_Send(&indices[0], (int)indices.size(), MPI_INT, sendProcs[i],
600  indTag, comm);
601  MPI_Send(&coefs[0], coefLength, MPI_DOUBLE, sendProcs[i],
602  coefTag, comm);
603  }
604 
605  //and finally, we're ready to complete the irecvs for the indices and coefs,
606  //and put them away.
607  for(unsigned i=0; i<numRecvProcs; i++) {
608  MPI_Status status;
609  int index = i;
610  MPI_Wait(&indRequests[index], &status);
611  MPI_Wait(&coefRequests[index], &status);
612 
613  int j, offset = 0;
614  for(j=0; j<eqnsPerRecvProc[index]; j++) {
615  int eqn = (*(recvProcEqnNumbers[index]))[j];
616  int* indices = &(recvProcEqnIndices[index][offset]);
617  double* coefs = &(recvProcEqnCoefs[index][offset]);
618  int len = (*(recvProcEqnLengths[index]))[j];
619 
620  recvEqns->addEqn(eqn, coefs, indices, len, accumulate);
621 
622  offset += len;
623  }
624 
625  recvEqns->newCoefData_ += recvProcEqnIndices[index][offset];
626  recvEqns->newRHSData_ += recvProcEqnIndices[index][offset+1];
627  delete [] recvProcEqnIndices[index];
628  }
629 
630  //now unpack the RHS entries
631 
632  recvEqns->setNumRHSs(numRHSs);
633 
634  for(unsigned i=0; i<numRecvProcs; i++) {
635  int j, offset = 0;
636  for(j=0; j<eqnsPerRecvProc[i]; j++) {
637  offset += (*(recvProcEqnLengths[i]))[j];
638  }
639 
640  for(j=0; j<eqnsPerRecvProc[i]; j++) {
641  int eqn = (*(recvProcEqnNumbers[i]))[j];
642 
643  for(int k=0; k<numRHSs; k++) {
644  CHK_ERR( recvEqns->addRHS(eqn, k, recvProcEqnCoefs[i][offset++],
645  accumulate));
646  }
647  }
648 
649  delete [] recvProcEqnCoefs[i];
650  }
651 
652  delete [] recvProcEqnIndices;
653  delete [] recvProcEqnCoefs;
654  delete [] indRequests;
655  delete [] coefRequests;
656 
657 #endif //#ifndef FEI_SER
658 
659  return(0);
660 }
661 
662 //------------------------------------------------------------------------------
664 {
665  //Most of this function is #ifdef'd according to whether FEI_SER
666  //is defined...
667 #ifndef FEI_SER
668 
669  int solnTag = 19906;
670 
671  MPI_Request* solnRequests = NULL;
672  double** solnBuffer = NULL;
673 
674  size_t numSendProcs = sendProcEqns_->getNumProcs();
675  std::vector<int>& sendProcs = sendProcEqns_->procsPtr();
676  std::vector<int>& eqnsPerSendProc = sendProcEqns_->eqnsPerProcPtr();
677 
678  if (numSendProcs > 0) {
679  solnRequests = new MPI_Request[numSendProcs];
680  solnBuffer = new double*[numSendProcs];
681  }
682 
683  MPI_Comm comm = comm_;
684 
685  //let's launch the recv's for the incoming solution values.
686  for(unsigned i=0; i<numSendProcs; i++) {
687  solnBuffer[i] = new double[eqnsPerSendProc[i]];
688 
689  MPI_Irecv(solnBuffer[i], eqnsPerSendProc[i], MPI_DOUBLE, sendProcs[i],
690  solnTag, comm, &solnRequests[i]);
691  }
692 
693  size_t numRecvProcs = recvProcEqns_->getNumProcs();
694  std::vector<int>& recvProcs = recvProcEqns_->procsPtr();
695  std::vector<int>& eqnsPerRecvProc = recvProcEqns_->eqnsPerProcPtr();
696  std::vector<std::vector<int>*>& recvProcEqnNumbers =
698 
699  //now let's send the outgoing solutions.
700  for(unsigned i=0; i<numRecvProcs; i++) {
701  double* solnBuff = new double[eqnsPerRecvProc[i]];
702 
703  for(int j=0; j<eqnsPerRecvProc[i]; j++) {
704  int eqnNumber = (*(recvProcEqnNumbers[i]))[j];
705 
706  int index = recvEqns_->getEqnIndex(eqnNumber);
707  solnBuff[j] = solnValues_[index];
708  }
709 
710  MPI_Send(solnBuff, eqnsPerRecvProc[i], MPI_DOUBLE, recvProcs[i],
711  solnTag, comm);
712 
713  delete [] solnBuff;
714  }
715 
716  std::vector<std::vector<int>*>& sendProcEqnNumbers =
718 
719  //make sure the sendEqnSoln_ array is big enough...
720  sendEqnSoln_.resize(sendEqns_->getNumEqns());
721 
722  //ok, complete the above recvs and store the soln values.
723  for(unsigned i=0; i<numSendProcs; i++) {
724  int index;
725  MPI_Status status;
726  MPI_Waitany((int)numSendProcs, solnRequests, &index, &status);
727 
728  for(int j=0; j<eqnsPerSendProc[index]; j++) {
729  int eqnNumber = (*(sendProcEqnNumbers[index]))[j];
730  int ind = sendEqns_->getEqnIndex(eqnNumber);
731 
732  sendEqnSoln_[ind] = solnBuffer[index][j];
733  }
734 
735  delete [] solnBuffer[index];
736  }
737 
738  delete [] solnRequests;
739  delete [] solnBuffer;
740 #endif //#ifndef FEI_SER
741 }
742 
743 //------------------------------------------------------------------------------
744 int EqnCommMgr::mirrorProcEqns(ProcEqns& inProcEqns, ProcEqns& outProcEqns)
745 {
746  //Beginning assumption: we (the local processor) have a populated ProcEqns
747  //object (inProcEqns) which contains information pairing certain equations
748  //with certain remote processors. These can be equations that we will be
749  //receiving in an all-to-all exchange, or equations that we will be sending in
750  //an all-to-all exchange. In either case, the "mirror" of that information is
751  //needed before the exchange can be performed. i.e., if we know which eqns
752  //we'll be sending, then the mirror info concerns the eqns we'll be recv'ing,
753  //and vice-versa if we already know which eqns we'll be recv'ing.
754  //
755  //This function is to obtain that mirror info, and return it in outProcEqns.
756  //
757  //Given a populated ProcEqns object, we want to populate the mirror ProcEqns
758  //object.
759  //
760  //First figure out how many procs belong in outProcEqns.
761  //Then receive, from each of those procs, the list of associated equations.
762  //Then we'll have the info necessary to populate the 'outProcEqns' object.
763 
764  //Most of this function is #ifdef'd according to whether FEI_SER
765  //is defined.
766 #ifdef FEI_SER
767  (void)inProcEqns;
768  (void)outProcEqns;
769 #else
771 
772  if (numProcs < 2) return(0);
773 
774  std::vector<int> buf(numProcs*2, 0);
775 
776  std::vector<int>& inProcs = inProcEqns.procsPtr();
777  std::vector<int> outProcs;
778  fei::mirrorProcs(comm_, inProcs, outProcs);
779 
780  std::vector<int>& eqnsPerInProc = inProcEqns.eqnsPerProcPtr();
781 
782  size_t numOutProcs = outProcs.size();
783 
784  std::vector<int> recvbuf(numOutProcs, 0);
785 
786  //now send a length (the contents of buf[i]) to each "in-proc"
787  //(length of the list of equation data that is to follow).
788  MPI_Request* requests = new MPI_Request[numOutProcs];
789  MPI_Status* statuses = new MPI_Status[numOutProcs];
790  int firsttag = 1014;
791  int offset = 0;
792  for(size_t i=0; i<numOutProcs; ++i) {
793  if (MPI_Irecv(&(recvbuf[i]), 1, MPI_INT, outProcs[i], firsttag,
794  comm_, &requests[offset++]) != MPI_SUCCESS) ERReturn(-1);
795  }
796 
797  size_t numInProcs = inProcs.size();
798  for(size_t i=0; i<numInProcs; ++i) {
799  if (MPI_Send(&(eqnsPerInProc[i]), 1, MPI_INT, inProcs[i], firsttag,
800  comm_) != MPI_SUCCESS) ERReturn(-1);
801  }
802 
803  MPI_Waitall((int)numOutProcs, requests, statuses);
804 
805  delete [] requests;
806  delete [] statuses;
807 
808  std::vector<int> lengths(numOutProcs);
809 
810  offset = 0;
811  for(unsigned i=0; i<numOutProcs; ++i) {
812  if (recvbuf[i] > 0) {
813  lengths[offset++] = recvbuf[i];
814  }
815  }
816 
817  //now we need to create 'numOutProcs' lists, into which we'll receive the
818  //equation-numbers that those procs send to us.
819  std::vector<std::vector<int>*>* outEqns = NULL;
820  if (numOutProcs > 0) {
821  outEqns = new std::vector<std::vector<int>*>(numOutProcs);
822  }
823 
824  for(unsigned i=0; i<numOutProcs; i++) {
825  (*outEqns)[i] = new std::vector<int>(lengths[i]);
826  }
827 
828  //finally we're ready to exchange lists of equations.
829 
830  CHK_ERR( fei::exchangeData(comm_, inProcs,
831  inProcEqns.procEqnNumbersPtr(),
832  outProcs, true, *outEqns) );
833 
834  //now we've completed all the communication, so we're ready to put the data
835  //we received into the outProcEqns object.
836  for(unsigned i=0; i<numOutProcs; i++) {
837  std::vector<int>* eqnArray = (*outEqns)[i];
838  int* eqns = &(*eqnArray)[0];
839  size_t len = eqnArray->size();
840  for(unsigned j=0; j<len; j++) {
841  outProcEqns.addEqn(eqns[j], outProcs[i]);
842  }
843  delete eqnArray;
844  }
845 
846  delete outEqns;
847 
848 #endif //#ifndef FEI_SER
849 
850  return(0);
851 }
852 
853 //------------------------------------------------------------------------------
855  ProcEqns& outProcEqns)
856 {
857  //Support function to set up information needed for exchanging equation
858  //info among processors.
859  //This function plays a similar role to that of the above 'mirrorProcEqns'
860  //function, but exchanges the length information rather than the
861  //equation-number information. THIS FUNCTION ASSUMES that both inProcEqns and
862  //outProcEqns are already populated with eqn-number information.
863 
864  //Most of this function is #ifdef'd according to whether FEI_SER
865  //is defined.
866 #ifdef FEI_SER
867  (void)inProcEqns;
868  (void)outProcEqns;
869 #else
870  if (fei::numProcs(comm_) == 1) return(0);
871 
872  CHK_ERR( fei::exchangeData( comm_, inProcEqns.procsPtr(),
873  inProcEqns.procEqnLengthsPtr(),
874  outProcEqns.procsPtr(),
875  true,
876  outProcEqns.procEqnLengthsPtr() ) );
877 #endif //#ifndef FEI_SER
878 
879  return(0);
880 }
881 
882 //------------------------------------------------------------------------------
883 int EqnCommMgr::addRemoteEqn(int eqnNumber, int destProc,
884  const double* coefs, const int* indices, int num) {
885  (void)destProc;
886  sendEqns_->newCoefData_ = 1;
887 
888  return(sendEqns_->addEqn(eqnNumber, coefs, indices, num, accumulate_));
889 }
890 
891 //------------------------------------------------------------------------------
892 int EqnCommMgr::addRemoteEqn(int eqnNumber, const double* coefs,
893  const int* indices, int num)
894 {
895  sendEqns_->newCoefData_ = 1;
896 
897  return(sendEqns_->addEqn(eqnNumber, coefs, indices, num, accumulate_));
898 }
899 
900 //------------------------------------------------------------------------------
901 void EqnCommMgr::setNumRHSs(int numRHSs) {
902  sendEqns_->setNumRHSs(numRHSs);
903  recvEqns_->setNumRHSs(numRHSs);
904 }
905 
906 //------------------------------------------------------------------------------
907 int EqnCommMgr::addRemoteRHS(int eqnNumber, int destProc, int rhsIndex,
908  double value)
909 {
910  (void)destProc;
911  sendEqns_->newRHSData_ = 1;
912  return(sendEqns_->addRHS(eqnNumber, rhsIndex, value));
913 }
914 
915 //------------------------------------------------------------------------------
916 int EqnCommMgr::addRemoteRHS(int eqnNumber, int rhsIndex, double value)
917 {
918  sendEqns_->newRHSData_ = 1;
919  return(sendEqns_->addRHS(eqnNumber, rhsIndex, value));
920 }
921 
922 //------------------------------------------------------------------------------
923 void EqnCommMgr::addRemoteIndices(int eqnNumber, int destProc,
924  int* indices, int num)
925 {
926  if (destProc < 0) {
927  fei::console_out() << "fei: EqnCommMgr::addRemoteIndices ERROR, destProc < 0" << FEI_ENDL;
928  std::abort();
929  }
930 
931  sendEqns_->addIndices(eqnNumber, indices, num);
932 
933  sendProcEqns_->addEqn(eqnNumber, destProc);
934 }
935 
936 //------------------------------------------------------------------------------
941 
942  sendEqns_->newCoefData_ = 0;
943  sendEqns_->newRHSData_ = 0;
944  recvEqns_->newCoefData_ = 0;
945  recvEqns_->newRHSData_ = 0;
946 
947  int numRecvEqns = recvEqns_->getNumEqns();
948  for(int i=0; i<numRecvEqns; i++) {
949  solnValues_[i] = 0.0;
950  }
951 }
952 
953 //----------------------------------------------------------------------------
955 {
956  //Gather boundary-condition equations from all sharing processors to the
957  //owning processors.
958  //
959  //On entry to this function, bcEqns contains all boundary-condition equations
960  //that were specified by the finite-element application on this processor.
961  //On exit, bcEqns will also include any boundary-condition equations that were
962  //specified by the finite-element application on processors that share nodes
963  //that are owned by this processor.
964  //
965  //We'll set up two new equation buffers: sendBCs and recvBCs. From the input
966  //bcEqns, we'll put each eqn that is in sendEqns_ into sendBCs because these
967  //are the equations that are shared and remotely owned. We'll then do a
968  //gather where all processors send their shared-bc eqns to the owner of those
969  //equations. The result of this gather will be in recvBCs, which we will then
970  //merge into bcEqns.
971 
972  if (fei::numProcs(comm_) == 1) return(0);
973 
974  int i;
975  EqnBuffer sendBCs, recvBCs;
976  ProcEqns sendBCProcEqns, recvBCProcEqns;
977 
978  //now loop through the equations in bcEqns, checking whether they're in
979  //our sendEqns_ object.
980  std::vector<int>& bcEqnNumbers = bcEqns.eqnNumbers();
981  int numBCeqns = bcEqnNumbers.size();
982 
983  std::vector<int>& sendEqnNumbers = sendEqns_->eqnNumbers();
984  std::vector<int>& sendProcs = sendProcEqns_->procsPtr();
985  std::vector<std::vector<int>*>& sendProcEqnNumbers =
987  size_t numSendProcs = sendProcs.size();
988 
989  for(i=0; i<numBCeqns; i++) {
990  int eqn = bcEqnNumbers[i];
991 
992  int index = fei::binarySearch(eqn, sendEqnNumbers);
993  if (index<0) continue;
994 
995  std::vector<double>& coefs = bcEqns.eqns()[i]->coefs();
996  std::vector<int>& indices = bcEqns.eqns()[i]->indices();
997  CHK_ERR( sendBCs.addEqn(eqn, &coefs[0], &indices[0],
998  indices.size(), false) );
999 
1000  for(unsigned p=0; p<numSendProcs; p++) {
1001  if (std::binary_search(sendProcEqnNumbers[p]->begin(),
1002  sendProcEqnNumbers[p]->end(), eqn)) {
1003 
1004  sendBCProcEqns.addEqn(eqn, indices.size(), sendProcs[p]);
1005  }
1006  }
1007  }
1008 
1009  //now set up the required mirror info, then perform the exchange among procs.
1010  CHK_ERR( mirrorProcEqns(sendBCProcEqns, recvBCProcEqns) );
1011  CHK_ERR( mirrorProcEqnLengths(sendBCProcEqns, recvBCProcEqns) );
1012  CHK_ERR( exchangeEqnBuffers(comm_, &sendBCProcEqns,
1013  &sendBCs, &recvBCProcEqns, &recvBCs, false) );
1014 
1015  //finally, merge the recvBCs into the bcEqns buffer.
1016  CHK_ERR( bcEqns.addEqns(recvBCs, false) );
1017 
1018  return(0);
1019 }
1020 
1021 //------------------------------------------------------------------------------
1022 int EqnCommMgr::exchangeRemEssBCs(int* essEqns, int numEssEqns,double* essAlpha,
1023  double* essGamma, MPI_Comm comm,
1025 {
1026  delete essBCEqns_;
1027  essBCEqns_ = new EqnBuffer();
1028 
1029  EqnBuffer* sendEssEqns = new EqnBuffer();
1030  ProcEqns* essSendProcEqns = new ProcEqns();
1031 
1032  std::vector<fei::CSVec*>& _sendEqns = sendEqns_->eqns();
1033  std::vector<int>& _sendEqnNumbers = sendEqns_->eqnNumbers();
1034  int _numSendEqns = sendEqns_->getNumEqns();
1035 
1036  //check to see if any of the essEqns are in the _sendEqns indices.
1037  //the ones that are, will need to be sent to other processors.
1038 
1039  if (dbgOut != NULL) {
1040  FEI_OSTREAM& os = *dbgOut;
1041  os << "#ereb: num-remote-rows: " << _numSendEqns
1042  << ", numEssEqns: " << numEssEqns << FEI_ENDL;
1043 // for(int ii=0; ii<numEssEqns; ++ii) {
1044 // os << "#ereb, essEqns["<<ii<<"]: "<<essEqns[ii]<<FEI_ENDL;
1045 // }
1046  os << "#ereb sendEqns_:"<<FEI_ENDL;
1047  os << *sendEqns_<<FEI_ENDL;
1048  }
1049 
1050  bool accumulate = false;
1051 
1052  std::vector<int> offsets(numEssEqns);
1053  int* offsetsPtr = numEssEqns>0 ? &offsets[0] : NULL;
1054 
1055  for(int j=0; j<_numSendEqns; j++) {
1056 
1057  std::vector<int>& indices = _sendEqns[j]->indices();
1058 
1059  fei::binarySearch(numEssEqns, essEqns, offsetsPtr,
1060  &indices[0], indices.size());
1061 
1062  int sendEqn_j = _sendEqnNumbers[j];
1063 
1064  int proc = getSendProcNumber(sendEqn_j);
1065 
1066  const int* sendEqnsPtr_j = &indices[0];
1067 
1068  if (dbgOut != NULL) {
1069  FEI_OSTREAM& os = *dbgOut;
1070  os << "#ereb sendeqns["<<j<<"].length: "
1071  <<_sendEqns[j]->size()<<", numEssEqns: " << numEssEqns<<FEI_ENDL;
1072  }
1073 
1074  for(int i=0; i<numEssEqns; i++) {
1075 
1076  int index = offsetsPtr[i];
1077 
1078  if (index < 0) continue;
1079 
1080  essSendProcEqns->addEqn(sendEqn_j, proc);
1081 
1082  double coef = essGamma[i]/essAlpha[i];
1083  int essEqns_i = essEqns[i];
1084 
1085  int* essEqns_i_ptr = &essEqns_i;
1086 
1087  sendEssEqns->addEqn(sendEqn_j, &coef,
1088  essEqns_i_ptr, 1, accumulate);
1089 
1090  for(size_t k=0; k<_sendEqns[j]->size(); k++) {
1091 
1092  int row = sendEqnsPtr_j[k];
1093 
1094  essBCEqns_->addEqn(row, &coef, essEqns_i_ptr, 1, accumulate);
1095  }
1096  }
1097  }
1098 
1099  if (dbgOut != NULL) {
1100  FEI_OSTREAM& os = *dbgOut;
1101  os << "#ereb sendEssEqns:"<<FEI_ENDL;
1102  os << *sendEssEqns<<FEI_ENDL;
1103  }
1104 
1105  ProcEqns* essRecvProcEqns = new ProcEqns();
1106 
1107  CHK_ERR( mirrorProcEqns(*essSendProcEqns, *essRecvProcEqns) );
1108 
1109  std::vector<int>& eqnNumbers = sendEssEqns->eqnNumbers();
1110  std::vector<fei::CSVec*>& sendEssEqnsVec = sendEssEqns->eqns();
1111  std::vector<int> eqnLengths(eqnNumbers.size());
1112  for(size_t i=0; i<eqnNumbers.size(); ++i) {
1113  eqnLengths[i] = sendEssEqnsVec[i]->size();
1114  }
1115 
1116  if (eqnNumbers.size() > 0) {
1117  essSendProcEqns->setProcEqnLengths(&eqnNumbers[0], &eqnLengths[0],
1118  eqnNumbers.size());
1119  }
1120 
1121  CHK_ERR( mirrorProcEqnLengths(*essSendProcEqns, *essRecvProcEqns) );
1122 
1123 
1124  CHK_ERR( exchangeEqnBuffers(comm, essSendProcEqns, sendEssEqns,
1125  essRecvProcEqns, essBCEqns_, accumulate) );
1126 
1127  if (dbgOut != NULL) {
1128  FEI_OSTREAM& os = *dbgOut;
1129  os << "essBCEqns:"<<FEI_ENDL;
1130  os << *essBCEqns_ << FEI_ENDL;
1131  }
1132 
1133  delete sendEssEqns;
1134  delete essSendProcEqns;
1135  delete essRecvProcEqns;
1136 
1137  return(0);
1138 }
1139 
1140 //------------------------------------------------------------------------------
1142 {
1143  std::set<int> sendIndices;
1144  std::vector<fei::CSVec*>& sendeqns = sendEqns_->eqns();
1145  for(size_t i=0; i<sendeqns.size(); ++i) {
1146  std::vector<int>& indices = sendeqns[i]->indices();
1147  int len = indices.size();
1148  if (len < 1) continue;
1149  int* indicesPtr = &indices[0];
1150  for(int j=0; j<len; ++j) {
1151  sendIndices.insert(indicesPtr[j]);
1152  }
1153  }
1154 
1155  std::set<int> recvIndices;
1156  std::vector<fei::CSVec*>& recveqns = recvEqns_->eqns();
1157  for(size_t i=0; i<recveqns.size(); ++i) {
1158  std::vector<int>& indices = recveqns[i]->indices();
1159  int len = indices.size();
1160  if (len < 1) continue;
1161  int* indicesPtr = &indices[0];
1162  for(int j=0; j<len; ++j) {
1163  recvIndices.insert(indicesPtr[j]);
1164  }
1165  }
1166 
1167  std::map<int,int>* ptEqns = blkEqnMapper.getPtEqns();
1168  size_t numPtEqns = ptEqns->size();
1169 
1170  std::map<int,int>::const_iterator
1171  pteq = ptEqns->begin(),
1172  pteq_end = ptEqns->end();
1173 
1174  std::vector<int> ptBlkInfo(numPtEqns*3);
1175  int* ptBlkInfoPtr = &ptBlkInfo[0];
1176 
1177  int offset = 0;
1178  for(; pteq!=pteq_end; ++pteq) {
1179  int ptEqn = (*pteq).first;
1180  if (sendIndices.find(ptEqn) == sendIndices.end()) continue;
1181 
1182  int blkEqn = blkEqnMapper.eqnToBlkEqn(ptEqn);
1183  int blkSize = blkEqnMapper.getBlkEqnSize(blkEqn);
1184 
1185  ptBlkInfoPtr[offset++] = ptEqn;
1186  ptBlkInfoPtr[offset++] = blkEqn;
1187  ptBlkInfoPtr[offset++] = blkSize;
1188  }
1189 
1190  ptBlkInfo.resize(offset);
1191 
1192  size_t numRecvProcs = recvProcEqns_->getNumProcs();
1193  std::vector<int>& recvProcs = recvProcEqns_->procsPtr();
1194  size_t numSendProcs = sendProcEqns_->getNumProcs();
1195  std::vector<int>& sendProcs = sendProcEqns_->procsPtr();
1196 
1197  std::vector<std::vector<int>* > recvData(numRecvProcs);
1198  for(unsigned i=0; i<numRecvProcs; ++i) {
1199  recvData[i] = new std::vector<int>;
1200  }
1201 
1202  std::vector<std::vector<int>* > sendData(numSendProcs);
1203  for(unsigned i=0; i<numSendProcs; ++i) {
1204  sendData[i] = &ptBlkInfo;
1205  }
1206 
1207  CHK_ERR( fei::exchangeData(comm_, sendProcs, sendData,
1208  recvProcs, false, recvData) );
1209 
1210  for(unsigned i=0; i<numRecvProcs; ++i) {
1211  size_t len = recvData[i]->size()/3;
1212  int* dataPtr = &(*(recvData[i]))[0];
1213 
1214  offset = 0;
1215  for(unsigned eq=0; eq<len; ++eq) {
1216  int ptEqn = dataPtr[offset++];
1217 
1218  if (recvIndices.find(ptEqn) == recvIndices.end()) {
1219  offset += 2; continue;
1220  }
1221 
1222  int blkEqn = dataPtr[offset++];
1223  int blkSize = dataPtr[offset++];
1224 
1225  blkEqnMapper.setEqn(ptEqn, blkEqn, blkSize);
1226  }
1227  delete recvData[i];
1228  }
1229 
1230  return(0);
1231 }
1232 
1233 //------------------------------------------------------------------------------
1234 int EqnCommMgr::addRemoteEqns(fei::CSRMat& mat, bool onlyIndices)
1235 {
1236  std::vector<int>& rowNumbers = mat.getGraph().rowNumbers;
1237  std::vector<int>& rowOffsets = mat.getGraph().rowOffsets;
1238  std::vector<int>& pckColInds = mat.getGraph().packedColumnIndices;
1239  std::vector<double>& pckCoefs = mat.getPackedCoefs();
1240 
1241  for(size_t i=0; i<rowNumbers.size(); ++i) {
1242  int row = rowNumbers[i];
1243  int offset = rowOffsets[i];
1244  int rowlen = rowOffsets[i+1]-offset;
1245  int* indices = &pckColInds[offset];
1246  double* coefs = &pckCoefs[offset];
1247 
1248  int proc = getSendProcNumber(row);
1249  if (proc == localProc_ || proc < 0) continue;
1250 
1251  if (onlyIndices) {
1252  addRemoteIndices(row, proc, indices, rowlen);
1253  }
1254  else {
1255  CHK_ERR( addRemoteEqn(row, proc, coefs, indices, rowlen) );
1256  }
1257  }
1258 
1259  return(0);
1260 }
1261 
1262 //------------------------------------------------------------------------------
1263 int EqnCommMgr::addRemoteRHS(fei::CSVec& vec, int rhsIndex)
1264 {
1265  std::vector<int>& indices = vec.indices();
1266  std::vector<double>& coefs = vec.coefs();
1267 
1268  for(size_t i=0; i<indices.size(); i++) {
1269  int proc = getSendProcNumber(indices[i]);
1270 
1271  if (proc == localProc_ || proc < 0) continue;
1272 
1273  CHK_ERR( addRemoteRHS(indices[i], proc, rhsIndex, coefs[i]) );
1274  }
1275 
1276  return(0);
1277 }
1278 
1279 //------------------------------------------------------------------------------
1281 {
1282  size_t numSendProcs = sendProcEqns_->getNumProcs();
1283  std::vector<int>& sendProcs = sendProcEqns_->procsPtr();
1284  std::vector<std::vector<int>*>& sendProcEqnNumbers =
1286 
1287  for(unsigned i=0; i<numSendProcs; i++) {
1288  if (std::binary_search(sendProcEqnNumbers[i]->begin(),
1289  sendProcEqnNumbers[i]->end(), eqn)) {
1290 
1291  return(sendProcs[i]);
1292  }
1293  }
1294 
1295  return(-1);
1296 }
1297 
int GlobalSum(MPI_Comm comm, std::vector< T > &local, std::vector< T > &global)
static int exchangeEqnBuffers(MPI_Comm comm, ProcEqns *sendProcEqns, EqnBuffer *sendEqns, ProcEqns *recvProcEqns, EqnBuffer *recvEqns, bool accumulate)
MPI_Comm comm_
int exchangeEqns(std::ostream *dbgOut=NULL)
int Allgatherv(MPI_Comm comm, std::vector< T > &sendbuf, std::vector< int > &recvLengths, std::vector< T > &recvbuf)
int exchangeRemEssBCs(int *essEqns, int numEssEqns, double *essAlpha, double *essGamma, MPI_Comm comm, std::ostream *dbgOut=NULL)
int addEqn(int eqnNumber, const double *coefs, const int *indices, int len, bool accumulate, bool create_indices_union=false)
std::vector< int > & eqnsPerProcPtr()
bool exchangeIndicesCalled_
std::vector< fei::CSVec * > & eqns()
int addRemoteEqns(fei::CSRMat &mat, bool onlyIndices)
EqnCommMgr(MPI_Comm comm, bool accumulate=true)
std::vector< int > & indices()
Definition: fei_CSVec.hpp:31
ProcEqns * recvProcEqns_
void setProcEqnLengths(int *eqnNumbers, int *eqnLengths, int len)
int mirrorProcEqns(ProcEqns &inProcEqns, ProcEqns &outProcEqns)
void setNumRHSs(int n)
void addEqn(int eqnNumber, int proc)
std::vector< int > rowNumbers
std::vector< int > & eqnNumbers()
int addRHS(int eqnNumber, int rhsIndex, double value, bool accumulate=true)
size_t size() const
Definition: fei_CSVec.hpp:36
int exchangeData(MPI_Comm comm, std::vector< int > &sendProcs, std::vector< std::vector< T > > &sendData, std::vector< int > &recvProcs, bool recvDataLengthsKnownOnEntry, std::vector< std::vector< T > > &recvData)
int exchangePtToBlkInfo(snl_fei::PointBlockMap &blkEqnMapper)
EqnCommMgr & operator=(const EqnCommMgr &src)
std::vector< double > solnValues_
int getSendProcNumber(int eqn)
#define MPI_Comm
Definition: fei_mpi.h:56
#define MPI_SUCCESS
Definition: fei_mpi.h:62
std::vector< int > packedColumnIndices
std::vector< std::vector< double > * > * rhsCoefsPtr()
#define MPI_Request
Definition: fei_mpi.h:57
std::vector< int > rowOffsets
void addLocalEqn(int eqnNumber, int srcProc)
int binarySearch(const T &item, const T *list, int len)
std::vector< int > & procsPtr()
#define FEI_OSTREAM
Definition: fei_iosfwd.hpp:24
int getNumRHSs()
#define dbgOut()
Definition: FEData.hpp:17
int exchangeIndices(std::ostream *dbgOut=NULL)
std::vector< std::vector< int > * > & procEqnNumbersPtr()
int mirrorProcs(MPI_Comm comm, std::vector< int > &toProcs, std::vector< int > &fromProcs)
ProcEqns * sendProcEqns_
EqnBuffer * deepCopy()
int addRemoteEqn(int eqnNumber, int destProc, const double *coefs, const int *indices, int num)
int getEqnIndex(int eqn)
void addRemoteIndices(int eqnNumber, int destProc, int *indices, int num)
#define ERReturn(a)
int setEqn(int ptEqn, int blkEqn)
int eqnToBlkEqn(int eqn) const
int addEqns(EqnBuffer &inputEqns, bool accumulate)
virtual ~EqnCommMgr()
#define FEI_ENDL
SparseRowGraph & getGraph()
Definition: fei_CSRMat.hpp:27
std::vector< std::vector< int > * > & procEqnLengthsPtr()
int addIndices(int eqnNumber, const int *indices, int len)
std::ostream & console_out()
size_t getNumProcs()
EqnBuffer * sendEqns_
int consistencyCheck(const char *caller, std::vector< int > &recvProcs, std::vector< int > &recvProcTotalLengths, std::vector< int > &sendProcs, std::vector< int > &sendProcTotalLengths)
EqnBuffer * recvEqns_
EqnCommMgr * deepCopy()
std::vector< double > sendEqnSoln_
int localProc(MPI_Comm comm)
int mirrorProcEqnLengths(ProcEqns &inProcEqns, ProcEqns &outProcEqns)
void setNumRHSs(int numRHSs)
std::map< int, int > * getPtEqns()
void resetCoefs()
ProcEqns * deepCopy()
int addRemoteRHS(fei::CSVec &vec, int rhsIndex)
#define CHK_ERR(a)
std::vector< double > & getPackedCoefs()
Definition: fei_CSRMat.hpp:30
EqnBuffer * essBCEqns_
void addSolnValues(int *eqnNumbers, double *values, int num)
int numProcs(MPI_Comm comm)
std::vector< double > & coefs()
Definition: fei_CSVec.hpp:33
int gatherSharedBCs(EqnBuffer &bcEqns)
void exchangeSoln()
int getNumEqns()