FEI  Version of the Day
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Groups Pages
fei_Vector_core.cpp
1 /*--------------------------------------------------------------------*/
2 /* Copyright 2005 Sandia Corporation. */
3 /* Under the terms of Contract DE-AC04-94AL85000, there is a */
4 /* non-exclusive license for use of this work by or on behalf */
5 /* of the U.S. Government. Export of this program may require */
6 /* a license from the United States Government. */
7 /*--------------------------------------------------------------------*/
8 
9 #include "fei_fstream.hpp"
10 
11 #include "fei_Vector_core.hpp"
12 #include "fei_VectorSpace.hpp"
13 #include "fei_CSVec.hpp"
14 #include "snl_fei_RecordCollection.hpp"
15 #include "fei_TemplateUtils.hpp"
16 #include "fei_impl_utils.hpp"
17 
18 #include <fstream>
19 #include <sstream>
20 #undef fei_file
21 #define fei_file "fei_Vector_core.cpp"
22 
23 #include "fei_ErrMacros.hpp"
24 
26  int numLocalEqns)
27  : eqnComm_(),
28  vecSpace_(vecSpace),
29  comm_(vecSpace->getCommunicator()),
30  firstLocalOffset_(0),
31  lastLocalOffset_(0),
32  numLocal_(0),
33  work_indices_(),
34  work_indices2_(),
35  haveFEVector_(false),
36  remotelyOwnedProcs_(),
37  remotelyOwned_(),
38  sendProcs_(),
39  recvProcs_(),
40  recv_sizes_(),
41  recv_chars_(),
42  send_chars_(),
43  sendRecvProcsNeedUpdated_(true),
44  overlapAlreadySet_(false),
45  dbgprefix_("Vcore: ")
46 {
47  eqnComm_.reset(new fei::EqnComm(comm_,numLocalEqns));
48 
49  const std::vector<int>& offsets = eqnComm_->getGlobalOffsets();
50  firstLocalOffset_ = offsets[fei::localProc(comm_)];
51  lastLocalOffset_ = offsets[fei::localProc(comm_)+1] - 1;
52  numLocal_ = lastLocalOffset_ - firstLocalOffset_ + 1;
53 
54  if (output_level_ >= fei::BRIEF_LOGS && output_stream_ != NULL) {
55  FEI_OSTREAM& os = *output_stream_;
56  os<<dbgprefix_<<" ctor firstLocal="<<firstLocalOffset_<<", lastLocal="
57  <<lastLocalOffset_<<FEI_ENDL;
58  }
59 }
60 
62 {
63  for(unsigned i=0; i<remotelyOwned_.size(); ++i) {
64  delete remotelyOwned_[i];
65  }
66 }
67 
68 void fei::Vector_core::setOverlap(int numRemoteEqns,
69  const int* remoteEqns)
70 {
71  if (numRemoteEqns == 0 && remoteEqns == NULL) {
72  if (overlapAlreadySet_) return;
73  }
74 
75  if (output_level_ >= fei::BRIEF_LOGS && output_stream_ != NULL) {
76  FEI_OSTREAM& os = *output_stream_;
77  os << dbgprefix_<<"setOverlap"<<FEI_ENDL;
78  }
79 
80  int local_proc = fei::localProc(comm_);
81 
82  if (numRemoteEqns != 0 && remoteEqns != NULL) {
83  for(int i=0; i<numRemoteEqns; ++i) {
84  int proc = eqnComm_->getOwnerProc(remoteEqns[i]);
85  if (proc == local_proc) continue;
86  fei::CSVec* remoteVec = getRemotelyOwned(proc);
87  fei::add_entry(*remoteVec, remoteEqns[i], 0.0);
88  }
89  }
90  else {
91  std::vector<int> eqns;
92  vecSpace_->getIndices_SharedAndOwned(eqns);
93 
94  for(size_t i=0; i<eqns.size(); ++i) {
95  int proc = eqnComm_->getOwnerProc(eqns[i]);
96  if (proc == local_proc) continue;
97  fei::CSVec* remoteVec = getRemotelyOwned(proc);
98 
99  fei::add_entry(*remoteVec, eqns[i], 0.0);
100  }
101  }
102 
103  overlapAlreadySet_ = true;
104  sendRecvProcsNeedUpdated_ = true;
105 }
106 
108 {
109  if (fei::numProcs(comm_) == 1 || haveFEVector()) {
110  return(0);
111  }
112 
113 #ifndef FEI_SER
114  if (!overlapAlreadySet_) {
115  setOverlap();
116  }
117 
118  //...and now the overlap is whatever is in our remotelyOwned_ vectors.
119 
120  //first find out which procs we'll be receiving from.
121  std::vector<int> recvProcs;
122  for(unsigned i=0; i<remotelyOwned_.size(); ++i) {
123  if (remotelyOwnedProcs_[i] == fei::localProc(comm_)) continue;
124  if (remotelyOwned_[i]->size() == 0) continue;
125 
126  recvProcs.push_back(remotelyOwnedProcs_[i]);
127  }
128 
129  //find out the send-procs.
130  std::vector<int> sendProcs;
131  fei::mirrorProcs(comm_, recvProcs, sendProcs);
132 
133  //declare arrays to send from, and corresponding sizes
134  std::vector<std::vector<int> > send_ints(sendProcs.size());
135  std::vector<std::vector<double> > send_doubles(sendProcs.size());
136  std::vector<int> send_sizes(sendProcs.size());
137 
138  std::vector<MPI_Request> mpiReqs(sendProcs.size()+recvProcs.size());
139  std::vector<MPI_Status> mpiStatuses(sendProcs.size()+recvProcs.size());
140  int tag1 = 11111;
141  int tag2 = 11112;
142 
143  //first, the procs we're going to send to, have to let us know
144  //how much data we're supposed to send. So we have to receive
145  //sizes and then indices from the "send"-procs.
146  for(unsigned i=0; i<sendProcs.size(); ++i) {
147  MPI_Irecv(&send_sizes[i], 1, MPI_INT, sendProcs[i],
148  tag1, comm_, &mpiReqs[i]);
149  }
150 
151  //now we'll send the sizes of our remotely-owned data to the
152  //procs that we will be receiving the data from, and also the
153  //indices that we want to receive.
154  for(unsigned i=0; i<recvProcs.size(); ++i) {
155  int proc = recvProcs[i];
156 
157  fei::CSVec* remoteVec = getRemotelyOwned(proc);
158  int size = remoteVec->size();
159  MPI_Send(&size, 1, MPI_INT, proc, tag1, comm_);
160  }
161 
162  MPI_Waitall(sendProcs.size(), &mpiReqs[0], &mpiStatuses[0]);
163 
164  //now resize our send_ints and send_doubles arrays, and post the recvs
165  //for indices that we're supposed to pack.
166  for(unsigned i=0; i<sendProcs.size(); ++i) {
167  int proc = sendProcs[i];
168  int size = send_sizes[i];
169  send_ints[i].resize(size);
170  MPI_Irecv(&(send_ints[i][0]), size, MPI_INT, proc, tag1,
171  comm_, &mpiReqs[i]);
172  send_doubles[i].resize(size);
173  }
174 
175  //now send the indices that we want to receive data for.
176  for(unsigned i=0; i<recvProcs.size(); ++i) {
177  int proc = recvProcs[i];
178  fei::CSVec* remoteVec = getRemotelyOwned(proc);
179  int size = remoteVec->size();
180  int* indices = &(remoteVec->indices())[0];
181  MPI_Send(indices, size, MPI_INT, proc, tag1, comm_);
182  }
183 
184  MPI_Waitall(sendProcs.size(), &mpiReqs[0], &mpiStatuses[0]);
185 
186  //now post our recvs.
187  for(unsigned i=0; i<recvProcs.size(); ++i) {
188  int proc = recvProcs[i];
189  fei::CSVec* remoteVec = getRemotelyOwned(proc);
190  int size = remoteVec->size();
191  double* coefs = &(remoteVec->coefs())[0];
192  MPI_Irecv(coefs, size, MPI_DOUBLE, proc, tag2, comm_, &mpiReqs[i]);
193  }
194 
195  //now pack and send the coefs that the other procs need from us.
196  for(unsigned i=0; i<sendProcs.size(); ++i) {
197  int proc = sendProcs[i];
198 
199  int num = send_sizes[i];
200  int err = copyOutOfUnderlyingVector(num, &(send_ints[i][0]),
201  &(send_doubles[i][0]), 0);
202  if (err != 0) {
203  FEI_COUT << "fei::Vector_core::scatterToOverlap ERROR getting data to send."<<FEI_ENDL;
204  return(err);
205  }
206 
207  MPI_Send(&(send_doubles[i][0]), num, MPI_DOUBLE, proc, tag2, comm_);
208  }
209 
210  MPI_Waitall(recvProcs.size(), &mpiReqs[0], &mpiStatuses[0]);
211 
212 #endif //#ifndef FEI_SER
213 
214  return(0);
215 }
216 
217 int fei::Vector_core::copyOut(int numValues,
218  const int* indices,
219  double* values,
220  int vectorIndex) const
221 {
222  for(int i=0; i<numValues; ++i) {
223  int ind = indices[i];
224 
225  int local = ind - firstLocalOffset_;
226  if (local < 0 || local >= numLocal_) {
227  if (ind < 0) {
228  continue;
229  }
230 
231  int proc = eqnComm_->getOwnerProc(ind);
232  const fei::CSVec* remoteVec = getRemotelyOwned(proc);
233 
234  int insertPoint = -1;
235  int idx = fei::binarySearch(ind, remoteVec->indices(), insertPoint);
236  if (idx < 0) {
237  fei::console_out() << "fei::Vector_core::copyOut: proc " << fei::localProc(comm_)
238  << ", index " << ind << " not in remotelyOwned_ vec object for proc "
239  <<proc<<FEI_ENDL;
240  ERReturn(-1);
241  }
242  else {
243  values[i] = remoteVec->coefs()[idx];
244  }
245  }
246  else {
247  CHK_ERR( copyOutOfUnderlyingVector(1, &ind, &(values[i]), vectorIndex) );
248  }
249  }
250 
251  return(0);
252 }
253 
254 int fei::Vector_core::giveToVector(int numValues,
255  const int* indices,
256  const double* values,
257  bool sumInto,
258  int vectorIndex)
259 {
260  int prev_proc = -1;
261  fei::CSVec* prev_vec = NULL;
262  for(int i=0; i<numValues; ++i) {
263  int ind = indices[i];
264  double val = values[i];
265 
266  if (ind < 0) {
267 // throw std::runtime_error("negative index not allowed");
268  //preservation of existing behavior: certain Sierra scenarios
269  //involve passing negative indices for positions that should be
270  //ignored... so we'll continue rather than throwing.
271  continue;
272  }
273  int local = ind - firstLocalOffset_;
274  if (local < 0 || local >= numLocal_) {
275  int proc = eqnComm_->getOwnerProc(ind);
276  if (output_level_ >= fei::BRIEF_LOGS && output_stream_ != NULL) {
277  FEI_OSTREAM& os = *output_stream_;
278  os << dbgprefix_<<"giveToVector remote["<<proc<<"]("
279  <<ind<<","<<val<<")"<<FEI_ENDL;
280  }
281  fei::CSVec* remoteVec = prev_vec;
282  if (proc != prev_proc) {
283  remoteVec = getRemotelyOwned(proc);
284  prev_vec = remoteVec;
285  prev_proc = proc;
286  }
287 
288  if (sumInto) {
289  fei::add_entry( *remoteVec, ind, val);
290  }
291  else {
292  fei::put_entry( *remoteVec, ind, val);
293  }
294  }
295  else {
296  int err = giveToUnderlyingVector(1, &ind, &val, sumInto, vectorIndex);
297  if (err != 0) {
298  fei::console_out() << "giveToVector sumIn ERROR, ind: " << ind
299  << ", val: " << val << FEI_ENDL;
300  ERReturn(-1);
301  }
302  }
303  }
304 
305  return(0);
306 }
307 
309  int idType,
310  int numIDs,
311  const int* IDs,
312  const double* data,
313  bool sumInto,
314  int vectorIndex)
315 {
316  if (vecSpace_.get() == NULL) ERReturn(-1);
317 
318  int fieldSize = vecSpace_->getFieldSize(fieldID);
319 
320  work_indices_.resize(numIDs*fieldSize);
321  int* indicesPtr = &work_indices_[0];
322 
323  CHK_ERR( vecSpace_->getGlobalIndices(numIDs, IDs, idType, fieldID,
324  indicesPtr) );
325 
326  CHK_ERR( giveToVector(numIDs*fieldSize, indicesPtr, data, sumInto, vectorIndex) );
327 
328  return(0);
329 }
330 
331 int fei::Vector_core::assembleFieldDataLocalIDs(int fieldID,
332  int idType,
333  int numIDs,
334  const int* localIDs,
335  const double* data,
336  bool sumInto,
337  int vectorIndex)
338 {
339  if (vecSpace_.get() == NULL) ERReturn(-1);
340 
341  int fieldSize = vecSpace_->getFieldSize(fieldID);
342 
343  work_indices_.resize(numIDs*fieldSize);
344  int* indicesPtr = &work_indices_[0];
345 
346  CHK_ERR( vecSpace_->getGlobalIndicesLocalIDs(numIDs, localIDs, idType, fieldID,
347  indicesPtr) );
348 
349  CHK_ERR( giveToVector(numIDs*fieldSize, indicesPtr, data, sumInto, vectorIndex) );
350 
351  return(0);
352 }
353 
354 void fei::Vector_core::pack_send_buffers(const std::vector<int>& sendProcs,
355  const std::vector<fei::CSVec*>& remotelyOwned,
356  std::vector<std::vector<char> >& send_chars,
357  bool resize_buffer,
358  bool zeroRemotelyOwnedAfterPacking)
359 {
360  for(size_t i=0; i<sendProcs.size(); ++i) {
361  int proc = sendProcs[i];
362  fei::CSVec* remoteVec = getRemotelyOwned(proc);
363  fei::impl_utils::pack_indices_coefs(remoteVec->indices(),
364  remoteVec->coefs(), send_chars[i], resize_buffer);
365 
366  if (zeroRemotelyOwnedAfterPacking) {
367  fei::set_values(*remoteVec, 0.0);
368  }
369  }
370 }
371 
372 void fei::Vector_core::setCommSizes()
373 {
374 #ifndef FEI_SER
375  sendProcs_.clear();
376  //first create the list of procs we'll be sending to.
377  for(unsigned i=0; i<remotelyOwned_.size(); ++i) {
378  if (remotelyOwnedProcs_[i] == fei::localProc(comm_)) continue;
379  if (remotelyOwned_[i]->size() == 0) continue;
380 
381  sendProcs_.push_back(remotelyOwnedProcs_[i]);
382  }
383 
384  std::vector<int> tmpSendProcs;
385  vecSpace_->getSendProcs(tmpSendProcs);
386  for(size_t i=0; i<tmpSendProcs.size(); ++i) {
387  bool found = false;
388  for(size_t j=0; j<sendProcs_.size(); ++j) {
389  if (sendProcs_[j] == tmpSendProcs[i]) {
390  found = true;
391  break;
392  }
393  if (sendProcs_[j] > tmpSendProcs[i]) {
394  sendProcs_.insert(sendProcs_.begin()+j, tmpSendProcs[i]);
395  found = true;
396  break;
397  }
398  }
399  if (!found) sendProcs_.push_back(tmpSendProcs[i]);
400  }
401 
402  recvProcs_.clear();
403  fei::mirrorProcs(comm_, sendProcs_, recvProcs_);
404 
405  std::vector<MPI_Request> mpiReqs;
406  int tag1 = 11111;
407 
408  send_chars_.resize(sendProcs_.size());
409  recv_chars_.resize(recvProcs_.size());
410 
411  bool resize_buffer = true;
412  bool zero_remotely_owned_after_packing = false;
413  pack_send_buffers(sendProcs_, remotelyOwned_, send_chars_,
414  resize_buffer, zero_remotely_owned_after_packing);
415 
416  recv_sizes_.resize(recvProcs_.size());
417  mpiReqs.resize(recvProcs_.size());
418 
419  //post the recvs for the sizes.
420  for(size_t i=0; i<recvProcs_.size(); ++i) {
421  int proc = recvProcs_[i];
422  MPI_Irecv(&recv_sizes_[i], 1, MPI_INT, proc,
423  tag1, comm_, &mpiReqs[i]);
424  }
425 
426  //send the sizes of data we'll be sending.
427  for(unsigned i=0; i<sendProcs_.size(); ++i) {
428  int proc = sendProcs_[i];
429  int size = send_chars_[i].size();
430  MPI_Send(&size, 1, MPI_INT, proc, tag1, comm_);
431  }
432 
433  for(size_t i=0; i<recvProcs_.size(); ++i) {
434  int index;
435  MPI_Status status;
436  MPI_Waitany(mpiReqs.size(), &mpiReqs[0], &index, &status);
437 
438  recv_chars_[index].resize(recv_sizes_[index]);
439  }
440 
441  sendRecvProcsNeedUpdated_ = false;
442 #endif
443 }
444 
445 int fei::Vector_core::gatherFromOverlap(bool accumulate)
446 {
447  if (fei::numProcs(comm_) == 1 || haveFEVector()) {
448  return(0);
449  }
450 
451 #ifndef FEI_SER
452  std::vector<MPI_Request> mpiReqs;
453  int tag1 = 11111;
454 
455  if (sendRecvProcsNeedUpdated_) {
456  setCommSizes();
457  }
458 
459  mpiReqs.resize(recvProcs_.size());
460 
461  //now post the recvs for the data.
462  for(size_t i=0; i<recvProcs_.size(); ++i) {
463  MPI_Irecv(&(recv_chars_[i][0]), recv_sizes_[i], MPI_CHAR, recvProcs_[i],
464  tag1, comm_, &mpiReqs[i]);
465  }
466 
467  bool resize_buffer = false;
468  bool zero_remotely_owned_after_packing = true;
469  pack_send_buffers(sendProcs_, remotelyOwned_, send_chars_,
470  resize_buffer, zero_remotely_owned_after_packing);
471 
472  //now send the outgoing data.
473  for(size_t i=0; i<sendProcs_.size(); ++i) {
474  int proc = sendProcs_[i];
475 
476  int size = send_chars_[i].size();
477  MPI_Send(&(send_chars_[i][0]), size, MPI_CHAR, proc, tag1, comm_);
478  }
479 
480  int numRecvProcs = recvProcs_.size();
481  for(size_t i=0; i<recvProcs_.size(); ++i) {
482  int index;
483  MPI_Status status;
484  MPI_Waitany(numRecvProcs, &mpiReqs[0], &index, &status);
485  }
486 
487  std::vector<int> indices;
488  std::vector<double> coefs;
489  //now store the data we've received.
490  for(size_t i=0; i<recvProcs_.size(); ++i) {
491  fei::impl_utils::unpack_indices_coefs(recv_chars_[i], indices, coefs);
492  int num = indices.size();
493  if (num == 0) continue;
494  int err = giveToUnderlyingVector(num, &(indices[0]),
495  &(coefs[0]), accumulate, 0);
496  if (err != 0) {
497  // FEI_COUT << "fei::Vector_core::gatherFromOverlap ERROR storing recvd data" << FEI_ENDL;
498  return(err);
499  }
500  }
501 
502 #endif //#ifndef FEI_SER
503 
504  return(0);
505 }
506 
507 int fei::Vector_core::copyOutFieldData(int fieldID,
508  int idType,
509  int numIDs,
510  const int* IDs,
511  double* data,
512  int vectorIndex)
513 {
514  if (vecSpace_.get() == NULL) ERReturn(-1);
515 
516  int fieldSize = vecSpace_->getFieldSize(fieldID);
517 
518  if (haveFEVector_) {
519  snl_fei::RecordCollection* collection = NULL;
520  CHK_ERR( vecSpace_->getRecordCollection(idType, collection) );
521  int nodeNumber;
522  int dofOffset;
523  int foffset;
524  std::vector<int>& eqnNums = vecSpace_->getEqnNumbers();
525  int* vspcEqnPtr = eqnNums.size() > 0 ? &eqnNums[0] : NULL;
526 
527  int offset = 0;
528  for(int i=0; i<numIDs; ++i) {
529  fei::Record<int>* node = collection->getRecordWithID(IDs[i]);
530  if (node == NULL) {
531  ERReturn(-1);
532  }
533 
534  nodeNumber = node->getNumber();
535  int* eqnNumbers = vspcEqnPtr+node->getOffsetIntoEqnNumbers();
536  int err = node->getFieldMask()->getFieldEqnOffset(fieldID, foffset);
537  if (err != 0) {
538  offset += fieldSize;
539  continue;
540  }
541  dofOffset = eqnNumbers[foffset] - eqnNumbers[0];
542  for(int j=0; j<fieldSize; ++j) {
543  CHK_ERR( copyOut_FE(nodeNumber, dofOffset+j, data[offset++]));
544  }
545  }
546  }
547  else {
548  work_indices_.resize(numIDs*fieldSize*2);
549  int* indicesPtr = &work_indices_[0];
550 
551  CHK_ERR( vecSpace_->getGlobalIndices(numIDs, IDs, idType,
552  fieldID, indicesPtr) );
553 
554  CHK_ERR( copyOut(numIDs*fieldSize, indicesPtr, data) );
555  }
556 
557  return(0);
558 }
559 
560 int fei::Vector_core::writeToFile(const char* filename,
561  bool matrixMarketFormat)
562 {
563  int numProcs = fei::numProcs(comm_);
564  int localProc =fei::localProc(comm_);
565 
566  double coef;
567 
568  static char mmbanner[] = "%%MatrixMarket matrix array real general";
569 
570  for(int p=0; p<numProcs; ++p) {
571  fei::Barrier(comm_);
572  if (p != localProc) continue;
573 
574  FEI_OFSTREAM* outFile = NULL;
575  if (p==0) {
576  outFile = new FEI_OFSTREAM(filename, IOS_OUT);
577  FEI_OFSTREAM& ofref = *outFile;
578  if (matrixMarketFormat) {
579  ofref << mmbanner << FEI_ENDL;
580  ofref << eqnComm_->getGlobalOffsets()[numProcs] << " 1" << FEI_ENDL;
581  }
582  else {
583  ofref << eqnComm_->getGlobalOffsets()[numProcs] << FEI_ENDL;
584  }
585  }
586  else outFile = new FEI_OFSTREAM(filename, IOS_APP);
587  FEI_OFSTREAM& ofref = *outFile;
588  ofref.setf(IOS_SCIENTIFIC, IOS_FLOATFIELD);
589  ofref.precision(13);
590 
591  for(int i=firstLocalOffset_; i<=lastLocalOffset_; ++i) {
592  CHK_ERR( copyOut(1, &i, &coef) );
593  if (matrixMarketFormat) {
594  ofref << " " << coef << FEI_ENDL;
595  }
596  else {
597  ofref << i << " " << coef << FEI_ENDL;
598  }
599  }
600 
601  delete outFile;
602  }
603 
604  return(0);
605 }
606 
607 int fei::Vector_core::writeToStream(FEI_OSTREAM& ostrm,
608  bool matrixMarketFormat)
609 {
610  int numProcs = fei::numProcs(comm_);
611  int local_proc =fei::localProc(comm_);
612 
613  double coef;
614 
615  static char mmbanner[] = "%%MatrixMarket matrix array real general";
616 
617  IOS_FMTFLAGS oldf = ostrm.setf(IOS_SCIENTIFIC, IOS_FLOATFIELD);
618  ostrm.precision(13);
619 
620  for(int proc=0; proc<numProcs; ++proc) {
621  fei::Barrier(comm_);
622  if (proc != local_proc) continue;
623 
624  if (proc==0) {
625  if (matrixMarketFormat) {
626  ostrm << mmbanner << FEI_ENDL;
627  ostrm << eqnComm_->getGlobalOffsets()[numProcs] << " 1" << FEI_ENDL;
628  }
629  else {
630  ostrm << eqnComm_->getGlobalOffsets()[numProcs] << FEI_ENDL;
631  }
632  }
633 
634  for(size_t p=0; p<remotelyOwned_.size(); ++p) {
635  if (remotelyOwnedProcs_[p] > local_proc) continue;
636  for(size_t ii=0; ii<remotelyOwned_[p]->size(); ++ii) {
637  if (matrixMarketFormat) {
638  ostrm << " " << remotelyOwned_[p]->coefs()[ii] << FEI_ENDL;
639  }
640  else {
641  ostrm << " " << remotelyOwned_[p]->indices()[ii] << " "
642  << remotelyOwned_[p]->coefs()[ii] << FEI_ENDL;
643  }
644  }
645  }
646 
647  for(int i=firstLocalOffset_; i<=lastLocalOffset_; ++i) {
648  CHK_ERR( copyOut(1, &i, &coef) );
649  if (matrixMarketFormat) {
650  ostrm << " " << coef << FEI_ENDL;
651  }
652  else {
653  ostrm << " " << i << " " << coef << FEI_ENDL;
654  }
655  }
656 
657  for(size_t p=0; p<remotelyOwned_.size(); ++p) {
658  if (remotelyOwnedProcs_[p] < local_proc) continue;
659  for(size_t ii=0; ii<remotelyOwned_[p]->size(); ++ii) {
660  if (matrixMarketFormat) {
661  ostrm << " " << remotelyOwned_[p]->coefs()[ii] << FEI_ENDL;
662  }
663  else {
664  ostrm << " " << remotelyOwned_[p]->indices()[ii] << " "
665  << remotelyOwned_[p]->coefs()[ii] << FEI_ENDL;
666  }
667  }
668  }
669  }
670 
671  ostrm.setf(oldf, IOS_FLOATFIELD);
672 
673  return(0);
674 }
675 
GlobalIDType getNumber() const
void setOverlap(int numRemoteEqns=0, const int *remoteEqns=NULL)
int giveToVector(int numValues, const int *indices, const double *values, bool sumInto=true, int vectorIndex=0)
virtual int writeToStream(FEI_OSTREAM &ostrm, bool matrixMarketFormat=true)
int copyOut(int numValues, const int *indices, double *values, int vectorIndex=0) const
int binarySearch(const T &item, const T *list, int len)
virtual int copyOutFieldData(int fieldID, int idType, int numIDs, const int *IDs, double *data, int vectorIndex=0)
int mirrorProcs(MPI_Comm comm, std::vector< int > &toProcs, std::vector< int > &fromProcs)
std::ostream & console_out()
int localProc(MPI_Comm comm)
Vector_core(fei::SharedPtr< fei::VectorSpace > vecSpace, int numLocalEqns)
int getFieldEqnOffset(int fieldID, int &offset) const
virtual int writeToFile(const char *filename, bool matrixMarketFormat=true)
virtual int scatterToOverlap()
int getOffsetIntoEqnNumbers() const
fei::FieldMask * getFieldMask()
fei::Record< int > * getRecordWithID(int ID)
virtual int gatherFromOverlap(bool accumulate=true)
int numProcs(MPI_Comm comm)
int assembleFieldData(int fieldID, int idType, int numIDs, const int *IDs, const double *data, bool sumInto=true, int vectorIndex=0)