EpetraExt Package Browser (Single Doxygen Collection)  Development
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
EpetraExt_MMHelpers.cpp
Go to the documentation of this file.
1 //@HEADER
2 // ***********************************************************************
3 //
4 // EpetraExt: Epetra Extended - Linear Algebra Services Package
5 // Copyright (2011) Sandia Corporation
6 //
7 // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
8 // the U.S. Government retains certain rights in this software.
9 //
10 // Redistribution and use in source and binary forms, with or without
11 // modification, are permitted provided that the following conditions are
12 // met:
13 //
14 // 1. Redistributions of source code must retain the above copyright
15 // notice, this list of conditions and the following disclaimer.
16 //
17 // 2. Redistributions in binary form must reproduce the above copyright
18 // notice, this list of conditions and the following disclaimer in the
19 // documentation and/or other materials provided with the distribution.
20 //
21 // 3. Neither the name of the Corporation nor the names of the
22 // contributors may be used to endorse or promote products derived from
23 // this software without specific prior written permission.
24 //
25 // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
26 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
29 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
30 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
31 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
32 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 //
37 // Questions? Contact Michael A. Heroux (maherou@sandia.gov)
38 //
39 // ***********************************************************************
40 //@HEADER
41 
42 #include <EpetraExt_ConfigDefs.h>
43 #include <EpetraExt_MMHelpers.h>
44 #include <Epetra_Comm.h>
45 #include <Epetra_CrsMatrix.h>
46 #include <Epetra_Import.h>
47 #include <Epetra_Export.h>
48 #include <Epetra_Distributor.h>
49 #include <Epetra_HashTable.h>
50 #include <Epetra_Util.h>
51 #include <Epetra_Import_Util.h>
53 
54 #include <Teuchos_TimeMonitor.hpp>
55 #include <limits>
56 
57 
58 #ifdef HAVE_MPI
59 #include "Epetra_MpiComm.h"
60 #include "Epetra_MpiDistributor.h"
61 #endif
62 #define MIN(x,y) ((x)<(y)?(x):(y))
63 #define MIN3(x,y,z) ((x)<(y)?(MIN(x,z)):(MIN(y,z)))
64 
65 namespace EpetraExt {
66 
67 //------------------------------------
68 // DEBUGGING ROUTINES
69 void debug_print_distor(const char * label, const Epetra_Distributor * Distor, const Epetra_Comm & Comm) {
70 #ifdef HAVE_MPI
71  const Epetra_MpiDistributor * MDistor = dynamic_cast<const Epetra_MpiDistributor*>(Distor);
72  printf("[%d] %s\n",Comm.MyPID(),label);
73  printf("[%d] NumSends = %d NumRecvs = %d\n",Comm.MyPID(),MDistor->NumSends(),MDistor->NumReceives());
74  printf("[%d] ProcsTo = ",Comm.MyPID());
75  for(int ii=0; ii<MDistor->NumSends(); ii++)
76  printf("%d ",MDistor->ProcsTo()[ii]);
77  printf("\n[%d] ProcsFrom = ",Comm.MyPID());
78  for(int ii=0; ii<MDistor->NumReceives(); ii++)
79  printf("%d ",MDistor->ProcsFrom()[ii]);
80  printf("\n");
81  fflush(stdout);
82 #endif
83 }
84 
85 //------------------------------------
86 // DEBUGGING ROUTINES
87 void debug_compare_import(const Epetra_Import * Import1,const Epetra_Import * Import2) {
88  if(!Import1 && !Import2) return;
89  const Epetra_Comm & Comm = (Import1)? Import1->SourceMap().Comm() : Import2->SourceMap().Comm();
90  bool flag=true;
91  int PID=Comm.MyPID();
92  if( (!Import1 && Import2) || (Import2 && !Import1) ) {printf("[%d] DCI: One Import exists, the other does not\n",PID);return;}
93  if(!Import1->SourceMap().SameAs(Import2->SourceMap())) {printf("[%d] DCI: SourceMaps don't match\n",PID);return;}
94  if(!Import1->TargetMap().SameAs(Import2->TargetMap())) {printf("[%d] DCI: TargetMaps don't match\n",PID);return;}
95 
96  if(Import1->NumSameIDs() != Import2->NumSameIDs()) {printf("[%d] DCI NumSameIDs() mismatch %d vs. %d\n",PID,Import1->NumSameIDs(),Import2->NumSameIDs());flag=false;}
97 
98  if(Import1->NumPermuteIDs() != Import2->NumPermuteIDs()) {printf("[%d] DCI NumPermuteIDs() mismatch %d vs. %d\n",PID,Import1->NumPermuteIDs(),Import2->NumPermuteIDs()); flag=false;}
99 
100  if(Import1->NumExportIDs() != Import2->NumExportIDs()) {printf("[%d] DCI NumExportIDs() mismatch %d vs. %d\n",PID,Import1->NumExportIDs(),Import2->NumExportIDs()); flag=false;}
101 
102  if(Import1->NumRemoteIDs() != Import2->NumRemoteIDs()) {printf("[%d] DCI NumRemoteIDs() mismatch %d vs. %d\n",PID,Import1->NumRemoteIDs(),Import2->NumRemoteIDs()); flag=false;}
103 
104  if(Import1->NumSend() != Import2->NumSend()) {printf("[%d] DCI NumSend() mismatch %d vs. %d\n",PID,Import1->NumSend(),Import2->NumSend()); flag=false;}
105 
106  if(Import1->NumRecv() != Import2->NumRecv()) {printf("[%d] DCI NumRecv() mismatch %d vs. %d\n",PID,Import1->NumRecv(),Import2->NumRecv()); flag=false;}
107 
108 
109  if(flag) printf("[%d] DCI Importers compare OK\n",PID);
110  fflush(stdout);
111  Import1->SourceMap().Comm().Barrier();
112  Import1->SourceMap().Comm().Barrier();
113  Import1->SourceMap().Comm().Barrier();
114  if(!flag) exit(1);
115 }
116 
117 
118 
119 //------------------------------------
121  : numRows(0), numEntriesPerRow(NULL), indices(NULL), values(NULL),
122  remote(NULL), numRemote(0), importColMap(NULL), rowMap(NULL), colMap(NULL),
123  domainMap(NULL), importMatrix(NULL), origMatrix(NULL)
124 {
125 }
126 
128 {
129  deleteContents();
130 }
131 
133 {
134  numRows = 0;
135  delete [] numEntriesPerRow; numEntriesPerRow = NULL;
136  delete [] indices; indices = NULL;
137  delete [] values; values = NULL;
138  delete [] remote; remote = NULL;
139  delete importColMap; importColMap = NULL;
140  numRemote = 0;
141  delete importMatrix; importMatrix=0;
142  // origMatrix is not owned by me, so don't delete
143  origMatrix=0;
144  targetMapToOrigRow.resize(0);
145  targetMapToImportRow.resize(0);
146 }
147 
149 {
150  std::cout << "proc " << M.rowMap->Comm().MyPID()<<std::endl;
151  std::cout << "numRows: " << M.numRows<<std::endl;
152  for(int i=0; i<M.numRows; ++i) {
153  for(int j=0; j<M.numEntriesPerRow[i]; ++j) {
154  if (M.remote[i]) {
155  std::cout << " *"<<M.rowMap->GID64(i)<<" "
156  <<M.importColMap->GID64(M.indices[i][j])<<" "<<M.values[i][j]<<std::endl;
157  }
158  else {
159  std::cout << " "<<M.rowMap->GID64(i)<<" "
160  <<M.colMap->GID64(M.indices[i][j])<<" "<<M.values[i][j]<<std::endl;
161  }
162  }
163  }
164  return(0);
165 }
166 
168  : ecrsmat_(epetracrsmatrix)
169 {
170 }
171 
173 {
174 }
175 
176 const Epetra_Map&
178 {
179  return ecrsmat_.RowMap();
180 }
181 
183 {
184  return ecrsmat_.Filled();
185 }
186 
187 #ifndef EPETRA_NO_32BIT_GLOBAL_INDICES
188 int
189 CrsWrapper_Epetra_CrsMatrix::InsertGlobalValues(int GlobalRow, int NumEntries, double* Values, int* Indices)
190 {
191  return ecrsmat_.InsertGlobalValues(GlobalRow, NumEntries, Values, Indices);
192 }
193 
194 int
195 CrsWrapper_Epetra_CrsMatrix::SumIntoGlobalValues(int GlobalRow, int NumEntries, double* Values, int* Indices)
196 {
197  return ecrsmat_.SumIntoGlobalValues(GlobalRow, NumEntries, Values, Indices);
198 }
199 #endif
200 
201 #ifndef EPETRA_NO_64BIT_GLOBAL_INDICES
202 int
203 CrsWrapper_Epetra_CrsMatrix::InsertGlobalValues(long long GlobalRow, int NumEntries, double* Values, long long* Indices)
204 {
205  return ecrsmat_.InsertGlobalValues(GlobalRow, NumEntries, Values, Indices);
206 }
207 
208 int
209 CrsWrapper_Epetra_CrsMatrix::SumIntoGlobalValues(long long GlobalRow, int NumEntries, double* Values, long long* Indices)
210 {
211  return ecrsmat_.SumIntoGlobalValues(GlobalRow, NumEntries, Values, Indices);
212 }
213 #endif
214 
215 
216 //------------------------------------
217 
218 template<typename int_type>
220  : graph_(),
221  rowmap_(emap),
222  max_row_length_(0)
223 {
224  int num_rows = emap.NumMyElements();
225  int_type* rows = 0;
226  emap.MyGlobalElementsPtr(rows);
227 
228  for(int i=0; i<num_rows; ++i) {
229  graph_[rows[i]] = new std::set<int_type>;
230  }
231 }
232 
233 template<typename int_type>
235 {
236  typename std::map<int_type,std::set<int_type>*>::iterator
237  iter = graph_.begin(), iter_end = graph_.end();
238  for(; iter!=iter_end; ++iter) {
239  delete iter->second;
240  }
241 
242  graph_.clear();
243 }
244 
245 template<typename int_type>
247 {
248  return false;
249 }
250 
251 template<typename int_type>
252 int
253 CrsWrapper_GraphBuilder<int_type>::InsertGlobalValues(int_type GlobalRow, int NumEntries, double* /* Values */, int_type* Indices)
254 {
255  typename std::map<int_type,std::set<int_type>*>::iterator
256  iter = graph_.find(GlobalRow);
257 
258  if (iter == graph_.end()) return(-1);
259 
260  std::set<int_type>& cols = *(iter->second);
261 
262  for(int i=0; i<NumEntries; ++i) {
263  cols.insert(Indices[i]);
264  }
265 
266  int row_length = cols.size();
267  if (row_length > max_row_length_) max_row_length_ = row_length;
268 
269  return(0);
270 }
271 
272 template<typename int_type>
273 int
274 CrsWrapper_GraphBuilder<int_type>::SumIntoGlobalValues(int_type GlobalRow, int NumEntries, double* Values, int_type* Indices)
275 {
276  return InsertGlobalValues(GlobalRow, NumEntries, Values, Indices);
277 }
278 
279 template<typename int_type>
280 std::map<int_type,std::set<int_type>*>&
282 {
283  return graph_;
284 }
285 
286 template<typename int_type>
289 {
290  int max_row_length = graphbuilder.get_max_row_length();
291  if (max_row_length < 1) return;
292 
293  std::vector<int_type> indices(max_row_length);
294  int_type* indices_ptr = &indices[0];
295  std::vector<double> zeros(max_row_length, 0.0);
296  double* zeros_ptr = &zeros[0];
297 
298  std::map<int_type,std::set<int_type>*>& graph = graphbuilder.get_graph();
299 
300  typename std::map<int_type,std::set<int_type>*>::iterator
301  iter = graph.begin(), iter_end = graph.end();
302 
303  for(; iter!=iter_end; ++iter) {
304  int_type row = iter->first;
305  std::set<int_type>& cols = *(iter->second);
306  int num_entries = cols.size();
307 
308  typename std::set<int_type>::iterator
309  col_iter = cols.begin(), col_end = cols.end();
310  for(int j=0; col_iter!=col_end; ++col_iter, ++j) {
311  indices_ptr[j] = *col_iter;
312  }
313 
314  C.InsertGlobalValues(row, num_entries, zeros_ptr, indices_ptr);
315  }
316 }
317 
318 template<typename int_type>
320  const std::vector<int_type>& proc_col_ranges,
321  std::vector<int_type>& send_rows,
322  std::vector<int>& rows_per_send_proc)
323 {
324  const Epetra_Map& rowmap = mtx.RowMap();
325  int numrows = mtx.NumMyRows();
326  const Epetra_CrsGraph& graph = mtx.Graph();
327  int rowlen = 0;
328  int* col_indices = NULL;
329  int_type* Tcol_indices = NULL;
330  int num_col_ranges = proc_col_ranges.size()/2;
331  rows_per_send_proc.resize(num_col_ranges);
332  send_rows.clear();
333  for(int nc=0; nc<num_col_ranges; ++nc) {
334  int_type first_col = proc_col_ranges[nc*2];
335  int_type last_col = proc_col_ranges[nc*2+1];
336  int num_send_rows = 0;
337  for(int i=0; i<numrows; ++i) {
338  int_type grow = (int_type) rowmap.GID64(i);
339  if (mtx.Filled()) {
340  const Epetra_Map& colmap = mtx.ColMap();
341  graph.ExtractMyRowView(i, rowlen, col_indices);
342  for(int j=0; j<rowlen; ++j) {
343  int_type col = (int_type) colmap.GID64(col_indices[j]);
344  if (first_col <= col && last_col >= col) {
345  ++num_send_rows;
346  send_rows.push_back(grow);
347  break;
348  }
349  }
350  }
351  else {
352  graph.ExtractGlobalRowView(grow, rowlen, Tcol_indices);
353  for(int j=0; j<rowlen; ++j) {
354  if (first_col <= Tcol_indices[j] && last_col >= Tcol_indices[j]) {
355  ++num_send_rows;
356  send_rows.push_back(grow);
357  break;
358  }
359  }
360  }
361  }
362  rows_per_send_proc[nc] = num_send_rows;
363  }
364 }
365 
366 #ifndef EPETRA_NO_32BIT_GLOBAL_INDICES
368  const std::vector<int>& proc_col_ranges,
369  std::vector<int>& send_rows,
370  std::vector<int>& rows_per_send_proc)
371 {
372  if(mtx.RowMatrixRowMap().GlobalIndicesInt()) {
373  Tpack_outgoing_rows<int>(mtx, proc_col_ranges, send_rows, rows_per_send_proc);
374  }
375  else {
376  throw "EpetraExt::pack_outgoing_rows: Global indices not int";
377  }
378 }
379 #endif
380 
381 #ifndef EPETRA_NO_64BIT_GLOBAL_INDICES
383  const std::vector<long long>& proc_col_ranges,
384  std::vector<long long>& send_rows,
385  std::vector<int>& rows_per_send_proc)
386 {
388  Tpack_outgoing_rows<long long>(mtx, proc_col_ranges, send_rows, rows_per_send_proc);
389  }
390  else {
391  throw "EpetraExt::pack_outgoing_rows: Global indices not long long";
392  }
393 }
394 #endif
395 
396 #ifndef EPETRA_NO_32BIT_GLOBAL_INDICES
397 template<>
398 std::pair<int,int> get_col_range<int>(const Epetra_Map& emap)
399  {
400  if(emap.GlobalIndicesInt())
401  return std::make_pair(emap.MinMyGID(),emap.MaxMyGID());
402  else
403  throw "EpetraExt::get_col_range<int>: Unknown Global Indices for Epetra_Map";
404 }
405 #endif
406 
407 #ifndef EPETRA_NO_64BIT_GLOBAL_INDICES
408 template<>
409 std::pair<long long,long long> get_col_range<long long>(const Epetra_Map& emap)
410 {
411  if(emap.GlobalIndicesLongLong())
412  return std::make_pair(emap.MinMyGID64(),emap.MaxMyGID64());
413  else
414  throw "EpetraExt::get_col_range<long long>: Unknown Global Indices for Epetra_Map";
415 }
416 #endif
417 
418 template<typename int_type>
419 std::pair<int_type,int_type> Tget_col_range(const Epetra_CrsMatrix& mtx)
420 {
421  std::pair<int_type,int_type> col_range;
422  if (mtx.Filled()) {
423  col_range = get_col_range<int_type>(mtx.ColMap());
424  }
425  else {
426  const Epetra_Map& row_map = mtx.RowMap();
427  col_range.first = row_map.MaxMyGID64();
428  col_range.second = row_map.MinMyGID64();
429  int rowlen = 0;
430  int_type* col_indices = NULL;
431  const Epetra_CrsGraph& graph = mtx.Graph();
432  for(int i=0; i<row_map.NumMyElements(); ++i) {
433  graph.ExtractGlobalRowView((int_type) row_map.GID64(i), rowlen, col_indices);
434  for(int j=0; j<rowlen; ++j) {
435  if (col_indices[j] < col_range.first) col_range.first = col_indices[j];
436  if (col_indices[j] > col_range.second) col_range.second = col_indices[j];
437  }
438  }
439  }
440 
441  return col_range;
442 }
443 
444 #ifndef EPETRA_NO_32BIT_GLOBAL_INDICES
445 template<>
446 std::pair<int,int> get_col_range<int>(const Epetra_CrsMatrix& mtx)
447 {
448  if(mtx.RowMatrixRowMap().GlobalIndicesInt())
449  return Tget_col_range<int>(mtx);
450  else
451  throw "EpetraExt::get_col_range<int>: Unknown Global Indices for Epetra_CrsMatrix";
452 }
453 #endif
454 
455 #ifndef EPETRA_NO_64BIT_GLOBAL_INDICES
456 template<>
457 std::pair<long long,long long> get_col_range<long long>(const Epetra_CrsMatrix& mtx)
458 {
459  if(mtx.RowMatrixRowMap().GlobalIndicesLongLong())
460  return Tget_col_range<long long>(mtx);
461  else
462  throw "EpetraExt::get_col_range<long long>: Unknown Global Indices for Epetra_CrsMatrix";
463 }
464 #endif
465 
466 
467 /**********************************************************************************************/
468 /**********************************************************************************************/
469 /**********************************************************************************************/
470 #ifdef HAVE_MPI
471 template <typename MyType>
472 void boundary_exchange(const Epetra_MpiComm Comm, MPI_Datatype DataType,
473  int NumSends, const int * SendProcs, const int * SendSizes, MyType* SendBuffer,
474  int NumRecvs, const int * RecvProcs, const int * RecvSizes, MyType* RecvBuffer,int SizeOfPacket,int msg_tag)
475 {
476 
477  MPI_Comm comm = Comm.Comm();
478  std::vector<MPI_Request> requests(NumRecvs);
479  std::vector<MPI_Status> status(NumRecvs);
480 
481  int i,num_waits=0,MyPID=Comm.MyPID();
482  int start, self_recv_len=-1,self_recv_start=-1, self_send_start=-1;
483 
484  // Default send/recv size if the Sizes arrays are NULL.
485  int mysendsize=1, myrecvsize=1;
486 
487  // Post Recvs
488  start=0;
489  for(i=0; i<NumRecvs; i++){
490  if(RecvSizes) myrecvsize=RecvSizes[i]*SizeOfPacket;
491  if(RecvProcs[i] != MyPID) {
492  MPI_Irecv(RecvBuffer + start, myrecvsize, DataType, RecvProcs[i], msg_tag, comm, &requests[num_waits]);
493  num_waits++;
494  }
495  else {
496  self_recv_len = myrecvsize;
497  self_recv_start=start;
498  }
499  start+=myrecvsize;
500  }
501 
502  // Do sends
503  start=0;
504  for(i=0; i<NumSends; i++){
505  if(SendSizes) mysendsize=SendSizes[i]*SizeOfPacket;
506  if(SendProcs[i] != MyPID)
507  MPI_Send(SendBuffer + start, mysendsize,DataType,SendProcs[i],msg_tag,comm);
508  else
509  self_send_start=start;
510  start+=mysendsize;
511  }
512 
513  // Self-copy (if needed)
514  if(self_recv_len != -1)
515  memcpy(RecvBuffer+self_recv_start,SendBuffer+self_send_start,self_recv_len*sizeof(MyType)*SizeOfPacket);
516 
517  // Wait
518  if(NumRecvs > 0)
519  MPI_Waitall(num_waits, &requests[0],&status[0]);
520 }
521 #endif
522 
523 
524 #ifdef HAVE_MPI
525 template <typename MyType>
526 void boundary_exchange_varsize(const Epetra_MpiComm Comm, MPI_Datatype DataType,
527  int NumSends, const int * SendProcs, const int * SendSizes, MyType* SendBuffer,
528  int NumRecvs, const int * RecvProcs, int * RecvSizes, MyType*& RecvBuffer,int SizeOfPacket,int msg_tag)
529 {
530 
531  int i,rbuffersize=0;
532 
533  // Do a first round of boundary exchange with the the SendBuffer sizes
534  boundary_exchange<int>(Comm,MPI_INT,NumSends,SendProcs,(int*)0,const_cast<int*>(SendSizes),NumRecvs,RecvProcs,(int*)0,RecvSizes,1,msg_tag);
535 
536  // Allocate the RecvBuffer
537  for(i=0; i<NumRecvs; i++) rbuffersize+=RecvSizes[i]*SizeOfPacket;
538  RecvBuffer = new MyType[rbuffersize];
539 
540  // Do a second round of boundary exchange to trade the actual values
541  boundary_exchange<MyType>(Comm,DataType,NumSends,SendProcs,SendSizes,SendBuffer,NumRecvs,RecvProcs,RecvSizes,RecvBuffer,SizeOfPacket,msg_tag+100);
542 }
543 #endif
544 
545 
546 //=========================================================================
547 //=========================================================================
548 //=========================================================================
550  Epetra_Data(),
551  IndexBase_(0),
552 #ifndef EPETRA_NO_32BIT_GLOBAL_INDICES
553  LIDHash_int_(0),
554 #endif
555 #ifndef EPETRA_NO_64BIT_GLOBAL_INDICES
556  LIDHash_LL_(0),
557 #endif
558  CopyMap_(0)
559 {
560 }
561 //=========================================================================
563 #ifndef EPETRA_NO_32BIT_GLOBAL_INDICES
564  delete LIDHash_int_;
565 #endif
566 #ifndef EPETRA_NO_64BIT_GLOBAL_INDICES
567  delete LIDHash_LL_;
568 #endif
569  delete CopyMap_;
570 }
571 
572 //=========================================================================
573 LightweightMap::LightweightMap():Data_(0),IsLongLong(false),IsInt(false){;}
574 
575 //=========================================================================
576 #ifndef EPETRA_NO_32BIT_GLOBAL_INDICES
577 void LightweightMap::Construct_int(int /* numGlobalElements */, int numMyElements, const int * myGlobalElements, long long /* indexBase */, bool GenerateHash)
578 {
580  Data_->MyGlobalElements_int_.resize(numMyElements);
581 
582  // Build the hash table
583  if(GenerateHash) Data_->LIDHash_int_ = new Epetra_HashTable<int>(numMyElements + 1 );
584  for(int i=0; i < numMyElements; ++i ) {
585  Data_->MyGlobalElements_int_[i] = myGlobalElements[i];
586  if(GenerateHash) Data_->LIDHash_int_->Add(myGlobalElements[i], i);
587  }
588  IsLongLong = false;
589  IsInt = true;
590 }
591 #endif
592 
593 #ifndef EPETRA_NO_64BIT_GLOBAL_INDICES
594 void LightweightMap::Construct_LL(long long /* numGlobalElements */, int numMyElements, const long long * myGlobalElements, long long /* indexBase */, bool GenerateHash)
595 {
597  Data_->MyGlobalElements_LL_.resize(numMyElements);
598 
599  // Build the hash table
600  if(GenerateHash) Data_->LIDHash_LL_ = new Epetra_HashTable<long long>(numMyElements + 1 );
601  for(int i=0; i < numMyElements; ++i ) {
602  Data_->MyGlobalElements_LL_[i] = myGlobalElements[i];
603  if(GenerateHash) Data_->LIDHash_LL_->Add(myGlobalElements[i], i);
604  }
605  IsLongLong = true;
606  IsInt = false;
607 }
608 #endif
609 
610 #ifndef EPETRA_NO_32BIT_GLOBAL_INDICES
611 LightweightMap::LightweightMap(int numGlobalElements,int numMyElements, const int * myGlobalElements, int indexBase, bool GenerateHash)
612 {
613  Construct_int(numGlobalElements, numMyElements, myGlobalElements, indexBase, GenerateHash);
614 }
615 #endif
616 
617 #ifndef EPETRA_NO_64BIT_GLOBAL_INDICES
618 LightweightMap::LightweightMap(long long numGlobalElements,int numMyElements, const long long * myGlobalElements, int indexBase, bool GenerateHash)
619 {
620  Construct_LL(numGlobalElements, numMyElements, myGlobalElements, indexBase, GenerateHash);
621 }
622 
623 LightweightMap::LightweightMap(long long numGlobalElements,int numMyElements, const long long * myGlobalElements, long long indexBase, bool GenerateHash)
624 {
625  Construct_LL(numGlobalElements, numMyElements, myGlobalElements, indexBase, GenerateHash);
626 }
627 #endif
628 
629 //=========================================================================
631 {
633  Data_->CopyMap_=new Epetra_Map(Map);
635  IsInt = Map.GlobalIndicesInt();
636 }
637 
638 //=========================================================================
640  : Data_(map.Data_)
641 {
642  Data_->IncrementReferenceCount();
643  IsLongLong = map.IsLongLong;
644  IsInt = map.IsInt;
645 }
646 
647 //=========================================================================
649  CleanupData();
650 }
651 
652 //=========================================================================
654 {
655  if((this != &map) && (Data_ != map.Data_)) {
656  CleanupData();
657  Data_ = map.Data_;
658  Data_->IncrementReferenceCount();
659  }
660  IsLongLong = map.IsLongLong;
661  IsInt = map.IsInt;
662  return(*this);
663 }
664 
665 //=========================================================================
667  if(Data_){
668  Data_->DecrementReferenceCount();
669  if(Data_->ReferenceCount() == 0) {
670  delete Data_;
671  }
672  }
673 }
674 
675 #ifndef EPETRA_NO_32BIT_GLOBAL_INDICES
676 void LightweightMap::MyGlobalElementsPtr(int *& MyGlobalElementList) const {
677  MyGlobalElementList = Data_->MyGlobalElements_int_.size() ? &Data_->MyGlobalElements_int_.front() : 0;
678 }
679 #endif
680 #ifndef EPETRA_NO_64BIT_GLOBAL_INDICES
681 void LightweightMap::MyGlobalElementsPtr(long long *& MyGlobalElementList) const {
682  MyGlobalElementList = Data_->MyGlobalElements_LL_.size() ? &Data_->MyGlobalElements_LL_.front() : 0;
683 }
684 #endif
685 
686 //=========================================================================
688  if(Data_->CopyMap_) return Data_->CopyMap_->NumMyElements();
689 #ifndef EPETRA_NO_32BIT_GLOBAL_INDICES
690  if(IsInt)
691  return Data_->MyGlobalElements_int_.size();
692  else
693 #endif
694 #ifndef EPETRA_NO_64BIT_GLOBAL_INDICES
695  if(IsLongLong)
696  return Data_->MyGlobalElements_LL_.size();
697  else
698 #endif
699  throw "EpetraExt::LightweightMap::NumMyElements: Global indices unknowns";
700 }
701 
702 //=========================================================================
703 #ifndef EPETRA_NO_32BIT_GLOBAL_INDICES
704 int LightweightMap::LID(int gid) const {
705  if(Data_->CopyMap_) return Data_->CopyMap_->LID(gid);
706  if(IsInt)
707  return Data_->LIDHash_int_->Get(gid);
708  else if(IsLongLong)
709  throw "EpetraExt::LightweightMap::LID: Int version called for long long map";
710  else
711  throw "EpetraExt::LightweightMap::LID: unknown GID type";
712 }
713 #endif
714 #ifndef EPETRA_NO_64BIT_GLOBAL_INDICES
715 int LightweightMap::LID(long long gid) const {
716 
717  if(Data_->CopyMap_) return Data_->CopyMap_->LID(gid);
718  if(IsLongLong)
719  return Data_->LIDHash_LL_->Get(gid);
720  else if(IsInt)
721  throw "EpetraExt::LightweightMap::LID: Long long version called for int map";
722  else
723  throw "EpetraExt::LightweightMap::LID: unknown GID type";
724 }
725 #endif
726 
727 //=========================================================================
728 #ifndef EPETRA_NO_32BIT_GLOBAL_INDICES
729 int LightweightMap::GID(int lid) const {
730  if(Data_->CopyMap_) return Data_->CopyMap_->GID(lid);
731  if(lid < 0 || lid > (int)Data_->MyGlobalElements_int_.size()) return -1;
732  return Data_->MyGlobalElements_int_[lid];
733 }
734 #endif
735 long long LightweightMap::GID64(int lid) const {
736  if(Data_->CopyMap_) return Data_->CopyMap_->GID64(lid);
737 #ifndef EPETRA_NO_32BIT_GLOBAL_INDICES
738  if(IsInt) {
739  if(lid < 0 || lid > (int)Data_->MyGlobalElements_int_.size()) return -1;
740  return Data_->MyGlobalElements_int_[lid];
741  }
742  else
743 #endif
744 #ifndef EPETRA_NO_64BIT_GLOBAL_INDICES
745  if(IsLongLong) {
746  if(lid < 0 || lid > (int)Data_->MyGlobalElements_LL_.size()) return -1;
747  return Data_->MyGlobalElements_LL_[lid];
748  }
749  else
750 #endif
751  throw "EpetraExt::LightweightMap::GID64: Global indices unknown.";
752 }
753 
754 //=========================================================================
755 #ifndef EPETRA_NO_32BIT_GLOBAL_INDICES
757  if(Data_->CopyMap_) return Data_->CopyMap_->MyGlobalElements();
758  else if(Data_->MyGlobalElements_int_.size()>0) return const_cast<int*>(&Data_->MyGlobalElements_int_[0]);
759  else return 0;
760 }
761 #endif
762 #ifndef EPETRA_NO_64BIT_GLOBAL_INDICES
765  else if(Data_->MyGlobalElements_LL_.size()>0) return const_cast<long long*>(&Data_->MyGlobalElements_LL_[0]);
766  else return 0;
767 }
768 #endif
769 
770 //=========================================================================
772  if(Data_->CopyMap_) return Data_->CopyMap_->MinLID();
773  else return 0;
774 }
775 
776 //=========================================================================
778  if(Data_->CopyMap_) return Data_->CopyMap_->MaxLID();
779  else {
780 #ifndef EPETRA_NO_32BIT_GLOBAL_INDICES
781  if(IsInt)
782  return Data_->MyGlobalElements_int_.size() - 1;
783  else
784 #endif
785 #ifndef EPETRA_NO_64BIT_GLOBAL_INDICES
786  if(IsLongLong)
787  return Data_->MyGlobalElements_LL_.size() - 1;
788  else
789 #endif
790  throw "EpetraExt::LightweightMap::MaxLID: Global indices unknowns";
791  }
792 }
793 
794 
795 //=========================================================================
796 //=========================================================================
797 //=========================================================================
798 RemoteOnlyImport::RemoteOnlyImport(const Epetra_Import & Importer, LightweightMap & RemoteOnlyTargetMap)
799 {
800  int i;
801 
802  // Build an "Importer" that only takes the remote parts of the Importer.
803  SourceMap_=&Importer.SourceMap();
804  TargetMap_=&RemoteOnlyTargetMap;
805 
806  // Pull data from the Importer
807  NumSend_ = Importer.NumSend();
808  NumRemoteIDs_ = Importer.NumRemoteIDs();
809  NumExportIDs_ = Importer.NumExportIDs();
810  Distor_ = &Importer.Distributor();
811  int * OldRemoteLIDs = Importer.RemoteLIDs();
812  int * OldExportLIDs = Importer.ExportLIDs();
813  int * OldExportPIDs = Importer.ExportPIDs();
814 
815  // Sanity Check
816  if(NumRemoteIDs_ != RemoteOnlyTargetMap.NumMyElements())
817  throw std::runtime_error("RemoteOnlyImport: Importer doesn't match RemoteOnlyTargetMap for number of remotes.");
818 
819  // Copy the ExportIDs_, since they don't change
820  ExportLIDs_ = new int[NumExportIDs_];
821  ExportPIDs_ = new int[NumExportIDs_];
822  for(i=0; i<NumExportIDs_; i++) {
823  ExportLIDs_[i] = OldExportLIDs[i];
824  ExportPIDs_[i] = OldExportPIDs[i];
825  }
826 
827  // The RemoteIDs, on the other hand, do change. So let's do this right.
828  // Note: We might be able to bypass the LID call by just indexing off the Same and Permute GIDs, but at the moment this
829  // is fast enough not to worry about it.
830  RemoteLIDs_ = new int[NumRemoteIDs_];
832  for(i=0; i<NumRemoteIDs_; i++)
833  RemoteLIDs_[i] = TargetMap_->LID( (int) Importer.TargetMap().GID64(OldRemoteLIDs[i]));
834  }
835  else if(TargetMap_->GlobalIndicesLongLong()) {
836  for(i=0; i<NumRemoteIDs_; i++)
837  RemoteLIDs_[i] = TargetMap_->LID(Importer.TargetMap().GID64(OldRemoteLIDs[i]));
838  }
839  else
840  throw std::runtime_error("RemoteOnlyImport: TargetMap_ index type unknown.");
841 
842  // Nowe we make sure these guys are in sorted order. AztecOO, ML and all that jazz.
843  for(i=0; i<NumRemoteIDs_-1; i++)
844  if(RemoteLIDs_[i] > RemoteLIDs_[i+1])
845  throw std::runtime_error("RemoteOnlyImport: Importer and RemoteOnlyTargetMap order don't match.");
846 }
847 
848 //=========================================================================
850 {
851  delete [] ExportLIDs_;
852  delete [] ExportPIDs_;
853  delete [] RemoteLIDs_;
854  // Don't delete the Distributor, SourceMap_ or TargetMap_ - those were shallow copies
855 }
856 
857 //=========================================================================
858 //=========================================================================
859 //=========================================================================
860 
861 template <class GO>
862 void MakeColMapAndReindexSort(int& NumRemoteColGIDs, GO*& RemoteColindices,
863  std::vector<int>& RemotePermuteIDs, std::vector<int>& RemoteOwningPIDs,bool SortGhostsAssociatedWithEachProcessor_);
864 
865 template <>
866 void MakeColMapAndReindexSort<int>(int& NumRemoteColGIDs, int*& RemoteColindices,
867  std::vector<int>& RemotePermuteIDs, std::vector<int>& RemoteOwningPIDs,bool SortGhostsAssociatedWithEachProcessor_)
868 {
869  // Sort External column indices so that all columns coming from a given remote processor are contiguous
870  int NLists = 2;
871  int* SortLists[3]; // this array is allocated on the stack, and so we won't need to delete it.
872  if(NumRemoteColGIDs > 0) {
873  SortLists[0] = RemoteColindices;
874  SortLists[1] = &RemotePermuteIDs[0];
875  Epetra_Util::Sort(true, NumRemoteColGIDs, &RemoteOwningPIDs[0], 0, 0, NLists, SortLists);
876  }
877 
878 
879  //bool SortGhostsAssociatedWithEachProcessor_ = false;
880  if (SortGhostsAssociatedWithEachProcessor_) {
881  // Sort external column indices so that columns from a given remote processor are not only contiguous
882  // but also in ascending order. NOTE: I don't know if the number of externals associated
883  // with a given remote processor is known at this point ... so I count them here.
884 
885  NLists=1;
886  int StartCurrent, StartNext;
887  StartCurrent = 0; StartNext = 1;
888  while ( StartNext < NumRemoteColGIDs ) {
889  if (RemoteOwningPIDs[StartNext]==RemoteOwningPIDs[StartNext-1]) StartNext++;
890  else {
891  SortLists[0] = &RemotePermuteIDs[StartCurrent];
892  Epetra_Util::Sort(true,StartNext-StartCurrent, &(RemoteColindices[StartCurrent]),0,0,NLists,SortLists);
893  StartCurrent = StartNext; StartNext++;
894  }
895  }
896  SortLists[0] = &RemotePermuteIDs[StartCurrent];
897  Epetra_Util::Sort(true, StartNext-StartCurrent, &(RemoteColindices[StartCurrent]), 0, 0, NLists, SortLists);
898  }
899 }
900 
901 template <>
902 void MakeColMapAndReindexSort<long long>(int& NumRemoteColGIDs, long long*& RemoteColindices,
903  std::vector<int>& RemotePermuteIDs, std::vector<int>& RemoteOwningPIDs,bool SortGhostsAssociatedWithEachProcessor_)
904 {
905  // Sort External column indices so that all columns coming from a given remote processor are contiguous
906  if(NumRemoteColGIDs > 0) {
907  long long* SortLists_LL[1] = {RemoteColindices};
908  int* SortLists_int[1] = {&RemotePermuteIDs[0]};
909  Epetra_Util::Sort(true, NumRemoteColGIDs, &RemoteOwningPIDs[0], 0, 0, 1, SortLists_int, 1, SortLists_LL);
910  }
911 
912  // bool SortGhostsAssociatedWithEachProcessor_ = false;
913  if (SortGhostsAssociatedWithEachProcessor_) {
914  // Sort external column indices so that columns from a given remote processor are not only contiguous
915  // but also in ascending order. NOTE: I don't know if the number of externals associated
916  // with a given remote processor is known at this point ... so I count them here.
917 
918  int NLists=1;
919  int StartCurrent, StartNext;
920  StartCurrent = 0; StartNext = 1;
921  while ( StartNext < NumRemoteColGIDs ) {
922  if (RemoteOwningPIDs[StartNext]==RemoteOwningPIDs[StartNext-1]) StartNext++;
923  else {
924  int* SortLists[1] = {&RemotePermuteIDs[StartCurrent]};
925  Epetra_Util::Sort(true,StartNext-StartCurrent, &(RemoteColindices[StartCurrent]),0,0,NLists,SortLists, 0, 0);
926  StartCurrent = StartNext; StartNext++;
927  }
928  }
929  int* SortLists[1] = {&RemotePermuteIDs[StartCurrent]};
930  Epetra_Util::Sort(true, StartNext-StartCurrent, &(RemoteColindices[StartCurrent]), 0, 0, NLists, SortLists, 0, 0);
931  }
932 }
933 
934 template <class GO>
935 int LightweightCrsMatrix::MakeColMapAndReindex(std::vector<int> owningPIDs, std::vector<GO> Gcolind,bool SortGhosts,const char * label)
936 {
937 #ifdef ENABLE_MMM_TIMINGS
938  std::string tpref;
939  if(label) tpref = std::string(label);
940  using Teuchos::TimeMonitor;
941  Teuchos::RCP<Teuchos::TimeMonitor> MM = Teuchos::rcp(new TimeMonitor(*TimeMonitor::getNewTimer(tpref + std::string("EpetraExt: LWCRS-3.1"))));
942 #else
943  (void)label;
944 #endif
945 
946  // Scan all column indices and sort into two groups:
947  // Local: those whose GID matches a GID of the domain map on this processor and
948  // Remote: All others.
949  int numDomainElements = DomainMap_.NumMyElements();
950  std::vector<bool> LocalGIDs(numDomainElements,false);
951 
952  bool DoSizes = !DomainMap_.ConstantElementSize(); // If not constant element size, then error
953  if(DoSizes) EPETRA_CHK_ERR(-1);
954 
955  // In principle it is good to have RemoteGIDs and RemoteGIDList be as long as the number of remote GIDs
956  // on this processor, but this would require two passes through the column IDs, so we make it the max of 100
957  // and the number of block rows.
958  int numMyBlockRows;
959  if(use_lw) numMyBlockRows = RowMapLW_->NumMyElements();
960  else numMyBlockRows = RowMapEP_->NumMyElements();
961 
962  int hashsize = numMyBlockRows; if (hashsize < 100) hashsize = 100;
963  Epetra_HashTable<GO> RemoteGIDs(hashsize);
964  std::vector<GO> RemoteGIDList; RemoteGIDList.reserve(hashsize);
965  std::vector<int> RemoteOwningPIDs; RemoteOwningPIDs.reserve(hashsize);
966 
967  // In order to do the map reindexing inexpensively, we clobber the GIDs during this pass. For *local* GID's we clobber them
968  // with their LID in the domainMap. For *remote* GIDs, we clobber them with (numDomainElements+NumRemoteColGIDs) before the increment of
969  // the remote count. These numberings will be separate because no local LID is greater than numDomainElements.
970  int NumLocalColGIDs = 0;
971  int NumRemoteColGIDs = 0;
972  for(int i = 0; i < numMyBlockRows; i++) {
973  for(int j = rowptr_[i]; j < rowptr_[i+1]; j++) {
974  GO GID = Gcolind[j];
975  // Check if GID matches a row GID
976  int LID = DomainMap_.LID(GID);
977  if(LID != -1) {
978  bool alreadyFound = LocalGIDs[LID];
979  if (!alreadyFound) {
980  LocalGIDs[LID] = true; // There is a column in the graph associated with this domain map GID
981  NumLocalColGIDs++;
982  }
983  colind_[j] = LID;
984  }
985  else {
986  GO hash_value=RemoteGIDs.Get(GID);
987  if(hash_value == -1) { // This means its a new remote GID
988  int PID = owningPIDs[j];
989  if(PID==-1) printf("[%d] ERROR: Remote PID should not be -1\n",DomainMap_.Comm().MyPID());
990  colind_[j] = numDomainElements + NumRemoteColGIDs;
991  RemoteGIDs.Add(GID, NumRemoteColGIDs);
992  RemoteGIDList.push_back(GID);
993  RemoteOwningPIDs.push_back(PID);
994  NumRemoteColGIDs++;
995  }
996  else
997  colind_[j] = numDomainElements + hash_value;
998  }
999  }
1000  }
1001 
1002  // Possible short-circuit: If all domain map GIDs are present as column indices, then set ColMap=domainMap and quit
1003  if (DomainMap_.Comm().NumProc()==1) {
1004  if (NumRemoteColGIDs!=0) {
1005  throw "Some column IDs are not in domainMap. If matrix is rectangular, you must pass in domainMap to FillComplete";
1006  // Sanity test: When one processor,there can be no remoteGIDs
1007  }
1008  if (NumLocalColGIDs==numDomainElements) {
1009  ColMap_ = DomainMap_;
1010 
1011  // In this case, we just use the domainMap's indices, which is, not coincidently, what we clobbered colind_ with up above anyway.
1012  // No further reindexing is needed.
1013  return(0);
1014  }
1015  }
1016 
1017  // Now build integer array containing column GIDs
1018  // Build back end, containing remote GIDs, first
1019  int numMyBlockCols = NumLocalColGIDs + NumRemoteColGIDs;
1020  typename Epetra_GIDTypeSerialDenseVector<GO>::impl Colindices;
1021  if(numMyBlockCols > 0)
1022  Colindices.Size(numMyBlockCols);
1023  GO* RemoteColindices = Colindices.Values() + NumLocalColGIDs; // Points to back end of Colindices
1024 
1025  for(int i = 0; i < NumRemoteColGIDs; i++)
1026  RemoteColindices[i] = RemoteGIDList[i];
1027 
1028  // Build permute array for *remote* reindexing.
1029  std::vector<int> RemotePermuteIDs(NumRemoteColGIDs);
1030  for(int i=0; i<NumRemoteColGIDs; i++) RemotePermuteIDs[i]=i;
1031 
1032  MakeColMapAndReindexSort<GO>(NumRemoteColGIDs, RemoteColindices, RemotePermuteIDs, RemoteOwningPIDs,SortGhosts);
1033 
1034  // Reverse the permutation to get the information we actually care about
1035  std::vector<int> ReverseRemotePermuteIDs(NumRemoteColGIDs);
1036  for(int i=0; i<NumRemoteColGIDs; i++) ReverseRemotePermuteIDs[RemotePermuteIDs[i]]=i;
1037 
1038  // Build permute array for *local* reindexing.
1039  bool use_local_permute=false;
1040  std::vector<int> LocalPermuteIDs(numDomainElements);
1041 
1042  // Now fill front end. Two cases:
1043  // (1) If the number of Local column GIDs is the same as the number of Local domain GIDs, we
1044  // can simply read the domain GIDs into the front part of Colindices, otherwise
1045  // (2) We step through the GIDs of the domainMap, checking to see if each domain GID is a column GID.
1046  // we want to do this to maintain a consistent ordering of GIDs between the columns and the domain.
1047 
1048  if(NumLocalColGIDs == DomainMap_.NumMyElements()) {
1049  DomainMap_.MyGlobalElements(Colindices.Values()); // Load Global Indices into first numMyBlockCols elements column GID list
1050  }
1051  else {
1052  GO* MyGlobalElements = 0;
1053  DomainMap_.MyGlobalElementsPtr(MyGlobalElements);
1054  int NumLocalAgain = 0;
1055  use_local_permute = true;
1056  for(int i = 0; i < numDomainElements; i++) {
1057  if(LocalGIDs[i]) {
1058  LocalPermuteIDs[i] = NumLocalAgain;
1059  Colindices[NumLocalAgain++] = MyGlobalElements[i];
1060  }
1061  }
1062  assert(NumLocalAgain==NumLocalColGIDs); // Sanity test
1063  }
1064 
1065 
1066  // Copy the remote PID list correctly
1067  ColMapOwningPIDs_.resize(numMyBlockCols);
1068  ColMapOwningPIDs_.assign(numMyBlockCols,DomainMap_.Comm().MyPID());
1069  for(int i=0;i<NumRemoteColGIDs;i++)
1070  ColMapOwningPIDs_[NumLocalColGIDs+i] = RemoteOwningPIDs[i];
1071 
1072 #ifdef ENABLE_MMM_TIMINGS
1073  MM = Teuchos::rcp(new TimeMonitor(*TimeMonitor::getNewTimer(tpref + std::string("EpetraExt: LWCRS-3.2"))));
1074 #endif
1075 
1076  // Make Column map with same element sizes as Domain map
1077  LightweightMap temp((GO) -1, numMyBlockCols, Colindices.Values(), (GO) DomainMap_.IndexBase64());
1078  ColMap_ = temp;
1079 
1080 
1081 #ifdef ENABLE_MMM_TIMINGS
1082  MM = Teuchos::rcp(new TimeMonitor(*TimeMonitor::getNewTimer(tpref + std::string("EpetraExt: LWCRS-3.3"))));
1083 #endif
1084 
1085  // Low-cost reindex of the matrix
1086  for(int i=0; i<numMyBlockRows; i++){
1087  for(int j=rowptr_[i]; j<rowptr_[i+1]; j++){
1088  int ID=colind_[j];
1089  if(ID < numDomainElements){
1090  if(use_local_permute) colind_[j] = LocalPermuteIDs[colind_[j]];
1091  // In the case where use_local_permute==false, we just copy the DomainMap's ordering, which it so happens
1092  // is what we put in colind_ to begin with.
1093  }
1094  else
1095  colind_[j] = NumLocalColGIDs + ReverseRemotePermuteIDs[colind_[j]-numDomainElements];
1096  }
1097  }
1098 
1099  assert((size_t)ColMap_.NumMyElements() == ColMapOwningPIDs_.size());
1100 
1101  return(0);
1102 }
1103 
1104 
1105 // Unused, file-local function doesn't need to exist.
1106 #if 0
1107 //=========================================================================
1108 // Template params are <PID,GID>
1109 static inline bool lessthan12(std::pair<int,int> i, std::pair<int,int> j){
1110  return ((i.first<j.first) || (i.first==j.first && i.second <j.second));
1111 }
1112 #endif // 0
1113 
1114 
1115 template<typename ImportType, typename int_type>
1116 int LightweightCrsMatrix::PackAndPrepareReverseComm(const Epetra_CrsMatrix & SourceMatrix, ImportType & RowImporter,
1117  std::vector<int> &ReverseSendSizes, std::vector<int_type> &ReverseSendBuffer) {
1118 #ifdef HAVE_MPI
1119  // Buffer pairs are in (PID,GID) order
1120  int i,j,k;
1121  const Epetra_Import *MyImporter= SourceMatrix.Importer();
1122  if(MyImporter == 0) return -1;
1123  const Epetra_MpiComm * MpiComm = dynamic_cast<const Epetra_MpiComm*>(&SourceMatrix.Comm());
1124  int MyPID = MpiComm->MyPID();
1125 
1126  // Things related to messages I and sending in forward mode (RowImporter)
1127  int NumExportIDs = RowImporter.NumExportIDs();
1128  int* ExportLIDs = RowImporter.ExportLIDs();
1129  int* ExportPIDs = RowImporter.ExportPIDs();
1130 
1131  // Things related to messages I am sending in reverse mode (MyImporter)
1132  Epetra_Distributor& Distor = MyImporter->Distributor();
1133  const Epetra_MpiDistributor * MDistor = dynamic_cast<Epetra_MpiDistributor*>(&Distor);
1134  int NumRecvs = MDistor->NumReceives();
1135  int* RemoteLIDs = MyImporter->RemoteLIDs();
1136  const int * ProcsFrom = MDistor->ProcsFrom();
1137  const int * LengthsFrom = MDistor->LengthsFrom();
1138 
1139 
1140  // Get the owning pids in a special way, s.t. ProcsFrom[RemotePIDs[i]] is the guy who owns
1141  // RemoteLIDs[j]....
1142  std::vector<int> RemotePIDOrder(SourceMatrix.NumMyCols(),-1);
1143 
1144  // Now, for each remote ID, record which processor (in ProcsFrom ordering) owns it.
1145  for(i=0,j=0;i<NumRecvs;i++){
1146  for(k=0;k<LengthsFrom[i];k++){
1147  int pid=ProcsFrom[i];
1148  if(pid!=MyPID) RemotePIDOrder[RemoteLIDs[j]]=i;
1149  j++;
1150  }
1151  }
1152 
1153  // Step One: Start tacking the (GID,PID) pairs on the std sets
1154  std::vector<std::set<std::pair<int,int_type> > > ReversePGIDs(NumRecvs);
1155  int *rowptr, *colind;
1156  double *vals;
1157  EPETRA_CHK_ERR(SourceMatrix.ExtractCrsDataPointers(rowptr,colind,vals));
1158 
1159 
1160  // Loop over each exported row and add to the temp list
1161  for(i=0; i < NumExportIDs; i++) {
1162  int lid = ExportLIDs[i];
1163  int exp_pid = ExportPIDs[i];
1164  for(j=rowptr[lid]; j<rowptr[lid+1]; j++){
1165  int pid_order = RemotePIDOrder[colind[j]];
1166  if(pid_order!=-1) {
1167  int_type gid = (int_type) SourceMatrix.GCID64(colind[j]);
1168  // This GID is getting shipped off somewhere
1169  ReversePGIDs[pid_order].insert(std::pair<int,int_type>(exp_pid,gid));
1170  }
1171  }
1172  }
1173 
1174  // Step 2: Count sizes (one too large to avoid std::vector errors)
1175  ReverseSendSizes.resize(NumRecvs+1);
1176  int totalsize=0;
1177  for(i=0; i<NumRecvs; i++) {
1178  ReverseSendSizes[i] = 2*ReversePGIDs[i].size();
1179  totalsize += ReverseSendSizes[i];
1180  }
1181 
1182  // Step 3: Alloc and fill the send buffer (one too large to avoid std::vector errors)
1183  ReverseSendBuffer.resize(totalsize+1);
1184  for(i=0, j=0; i<NumRecvs; i++) {
1185  for(typename std::set<std::pair<int,int_type> >::iterator it=ReversePGIDs[i].begin(); it!=ReversePGIDs[i].end(); it++) {
1186  ReverseSendBuffer[j] = it->first;
1187  ReverseSendBuffer[j+1] = it->second;
1188  j+=2;
1189  }
1190  }
1191 #endif
1192 
1193  return 0;
1194 }
1195 
1196 //=========================================================================
1197 
1198 template<typename int_type>
1199 void build_type3_exports_sort(std::vector<int_type>& ExportGID3, std::vector<int> &ExportPID3, int total_length3);
1200 
1201 template<>
1202 void build_type3_exports_sort<int>(std::vector<int>& ExportGID3, std::vector<int> &ExportPID3, int total_length3)
1203 {
1204  int * companion = &ExportGID3[0];
1205  Epetra_Util::Sort(true,total_length3,&ExportPID3[0],0,0,1,&companion,0,0);
1206 }
1207 
1208 template<>
1209 void build_type3_exports_sort<long long>(std::vector<long long>& ExportGID3, std::vector<int> &ExportPID3, int total_length3)
1210 {
1211  long long * companion = &ExportGID3[0];
1212  Epetra_Util::Sort(true,total_length3,&ExportPID3[0],0,0,0,0,1,&companion);
1213 }
1214 
1215 template<typename int_type>
1216 int build_type3_exports(int MyPID,int Nrecv, Epetra_BlockMap &DomainMap, std::vector<int> &ReverseRecvSizes, const int_type *ReverseRecvBuffer, std::vector<int> &ExportLID3, std::vector<int> &ExportPID3){
1217  int i,j;
1218 
1219  // Estimate total length of procs_to for Type 3
1220  int total_length3=0;
1221  for(i=0; i<Nrecv; i++)
1222  total_length3+=ReverseRecvSizes[i]/2;
1223  if(total_length3==0) return 0;
1224 
1225  std::vector<int_type> ExportGID3(total_length3);
1226  ExportLID3.resize(total_length3);
1227  ExportPID3.resize(total_length3);
1228 
1229  // Build a sorted colmap-style list for Type3 (removing any self-sends)
1230  for(i=0,j=0; i<2*total_length3; i+=2) {
1231  if(ReverseRecvBuffer[i] != MyPID){
1232  ExportPID3[j]=ReverseRecvBuffer[i];
1233  ExportGID3[j]=ReverseRecvBuffer[i+1];
1234  j++;
1235  }
1236  }
1237  total_length3=j;
1238 
1239  if(total_length3==0) return 0;
1240 
1241  // Sort (ala Epetra_CrsGraph)
1242  build_type3_exports_sort<int_type>(ExportGID3, ExportPID3, total_length3);
1243  int StartCurrent, StartNext;
1244  StartCurrent = 0; StartNext = 1;
1245  while ( StartNext < total_length3 ) {
1246  if(ExportPID3[StartNext] == ExportPID3[StartNext-1]) StartNext++;
1247  else {
1248  Epetra_Util::Sort(true,StartNext-StartCurrent, &(ExportGID3[StartCurrent]),0,0,0,0, 0, 0);
1249  StartCurrent = StartNext; StartNext++;
1250  }
1251  }
1252  Epetra_Util::Sort(true,StartNext-StartCurrent, &(ExportGID3[StartCurrent]),0,0,0,0, 0, 0);
1253 
1254 
1255  /* printf("[%d] Type 3 Sorted= ",MyComm.MyPID());
1256  for(i=0; i<total_length3; i++)
1257  printf("(--,%2d,%2d) ",ExportGID3[i],ExportPID3[i]);
1258  printf("\n");
1259  fflush(stdout);*/
1260 
1261 
1262  // Uniq & resize
1263  for(i=1,j=1; i<total_length3; i++){
1264  if(ExportPID3[i]!=ExportPID3[i-1] || ExportGID3[i]!=ExportGID3[i-1]){
1265  ExportPID3[j] = ExportPID3[i];
1266  ExportGID3[j] = ExportGID3[i];
1267  j++;
1268  }
1269  }
1270  ExportPID3.resize(j);
1271  ExportLID3.resize(j);
1272  total_length3=j;
1273 
1274  /* printf("[%d] Type 3 UNIQ = ",MyComm.MyPID());
1275  for(i=0; i<total_length3; i++)
1276  printf("(--,%2d,%2d) ",ExportGID3[i],ExportPID3[i]);
1277  printf("\n");
1278  fflush(stdout);*/
1279 
1280 
1281 
1282  // Now index down to LIDs
1283  for(i=0; i<total_length3; i++) {
1284  ExportLID3[i]=DomainMap.LID(ExportGID3[i]);
1285  if(ExportLID3[i] < 0) throw std::runtime_error("LightweightCrsMatrix:MakeExportLists invalid LID");
1286  }
1287 
1288  /* printf("[%d] Type 3 FINAL = ",MyComm.MyPID());
1289  for(i=0; i<total_length3; i++)
1290  printf("(%2d,%2d,%2d) ",ExportLID3[i],ExportGID3[i],ExportPID3[i]);
1291  printf("\n");
1292  fflush(stdout);*/
1293 
1294  return total_length3;
1295 }
1296 
1297 //=========================================================================
1298 template<typename ImportType, typename int_type>
1299 int build_type2_exports(const Epetra_CrsMatrix & SourceMatrix, ImportType & MyImporter, std::vector<int> &ExportLID2, std::vector<int> &ExportPID2){
1300  int total_length2=0;
1301 #ifdef HAVE_MPI
1302  int i,j;
1303  const Epetra_Import *SourceImporter= SourceMatrix.Importer();
1304 
1305  int *rowptr, *colind;
1306  double *vals;
1307  EPETRA_CHK_ERR(SourceMatrix.ExtractCrsDataPointers(rowptr,colind,vals));
1308 
1309  // Things related to messages I am sending in forward mode (MyImporter)
1310  int NumExportIDs = MyImporter.NumExportIDs();
1311  const int* ExportLIDs = MyImporter.ExportLIDs();
1312  const int* ExportPIDs = MyImporter.ExportPIDs();
1313  if(NumExportIDs==0) return 0;
1314 
1315  Epetra_Distributor& Distor = MyImporter.Distributor();
1316  const Epetra_MpiDistributor * MDistor = dynamic_cast<Epetra_MpiDistributor*>(&Distor);
1317 
1318  // Assume I own all the cols, then flag any cols I don't own
1319  // This allows us to avoid LID calls later...
1320  std::vector<bool> IsOwned(SourceMatrix.NumMyCols(),true);
1321  if(SourceImporter) {
1322  const int * RemoteLIDs = SourceImporter->RemoteLIDs();
1323  // Now flag the cols I don't own
1324  for(i=0; i<SourceImporter->NumRemoteIDs(); i++)
1325  IsOwned[RemoteLIDs[i]]=false;
1326  }
1327 
1328  // Status vector
1329  std::vector<int> SentTo(SourceMatrix.NumMyCols(),-1);
1330 
1331  // Initial allocation: NumUnknowsSent * Max row size (guaranteed to be too large)
1332  for(i=0,total_length2=0; i<MDistor->NumSends(); i++) total_length2+= MDistor->LengthsTo()[i] * SourceMatrix.MaxNumEntries();
1333  std::vector<int_type> ExportGID2(total_length2);
1334 
1335  ExportLID2.resize(total_length2);
1336  ExportPID2.resize(total_length2);
1337 
1338  int current=0, last_start=0, last_pid=ExportPIDs[0];
1339  for(i=0; i<NumExportIDs; i++){
1340  // For each row I have to send via MyImporter...
1341  int row=ExportLIDs[i];
1342  int pid=ExportPIDs[i];
1343 
1344  if(i!=0 && pid>last_pid) {
1345  // We have a new PID, so lets finish up the current one
1346  if(current!=last_start){
1347  int *lids = &ExportLID2[last_start];
1348  Epetra_Util::Sort(true,current-last_start,&ExportGID2[last_start],0,0,1,&lids,0,0);
1349  // Note: we don't need to sort the ExportPIDs since they're the same since last_start
1350  }
1351  // Reset the list
1352  last_pid=pid;
1353  last_start=current;
1354  }
1355  else if(pid < last_pid) {
1356  throw std::runtime_error("build_type2_exports: ExportPIDs are not sorted!");
1357  }
1358 
1359  for(j=rowptr[row]; j<rowptr[row+1]; j++) {
1360  // For each column in that row...
1361  int col=colind[j];
1362  if(IsOwned[col] && SentTo[col]!=pid){
1363  // We haven't added this guy to the list yet.
1364  if(current>= total_length2) throw std::runtime_error("build_type2_exports: More export ids than I thought!");
1365  SentTo[col] = pid;
1366  ExportGID2[current] = (int_type) SourceMatrix.GCID64(col);
1367  ExportLID2[current] = SourceMatrix.DomainMap().LID(ExportGID2[current]);
1368  ExportPID2[current] = pid;
1369  current++;
1370  }
1371  }
1372  }//end main loop
1373 
1374  // Final Sort
1375  int *lids = ExportLID2.size() > (std::size_t) last_start ? &ExportLID2[last_start] : 0;
1376  Epetra_Util::Sort(true,current-last_start,ExportGID2.size() > (std::size_t) last_start ? &ExportGID2[last_start] : 0,0,0,1,&lids,0,0);
1377  // Note: we don't need to sort the ExportPIDs since they're the same since last_start
1378 
1379  total_length2=current;
1380  ExportLID2.resize(total_length2);
1381  ExportPID2.resize(total_length2);
1382 #endif
1383  return total_length2;
1384 }
1385 
1386 //=========================================================================
1387 template<typename int_type>
1388 void build_type1_exports_sort(std::vector<int> &ExportLID1, std::vector<int> &ExportPID1, std::vector<int_type>& ExportGID1, int total_length1);
1389 
1390 template<>
1391 void build_type1_exports_sort<int>(std::vector<int> &ExportLID1, std::vector<int> &ExportPID1, std::vector<int>& ExportGID1, int total_length1){
1392  int * companion[2] = {&ExportLID1[0],&ExportGID1[0]};
1393  Epetra_Util::Sort(true,total_length1,&ExportPID1[0],0,0,2,&companion[0],0,0);
1394 }
1395 
1396 template<>
1397 void build_type1_exports_sort<long long>(std::vector<int> &ExportLID1, std::vector<int> &ExportPID1, std::vector<long long>& ExportGID1, int total_length1){
1398  int * companion = &ExportLID1[0];
1399  long long * companion64 = &ExportGID1[0];
1400  Epetra_Util::Sort(true,total_length1,&ExportPID1[0],0,0,1,&companion,1,&companion64);
1401 }
1402 
1403 template<typename int_type>
1404 int build_type1_exports(const Epetra_Import * Importer1, std::vector<int> &ExportLID1, std::vector<int> &ExportPID1){
1405  int i, total_length1=0;
1406  if(!Importer1) return 0;
1407  total_length1 = Importer1->NumSend();
1408  if(total_length1==0) return 0;
1409 
1410  std::vector<int_type> ExportGID1(total_length1);
1411  ExportLID1.resize(total_length1);
1412  ExportPID1.resize(total_length1);
1413  const int * ExportLID1Base = Importer1->ExportLIDs();
1414  const int * ExportPID1Base = Importer1->ExportPIDs();
1415 
1416  for(i=0; i<total_length1; i++){
1417  ExportLID1[i] = ExportLID1Base[i];
1418  ExportPID1[i] = ExportPID1Base[i];
1419  ExportGID1[i] = (int_type) Importer1->SourceMap().GID64(ExportLID1Base[i]);
1420  }
1421 
1422  // Sort (ala Epetra_CrsGraph)
1423  build_type1_exports_sort<int_type>(ExportLID1, ExportPID1, ExportGID1, total_length1);
1424 
1425  int StartCurrent, StartNext;
1426  StartCurrent = 0; StartNext = 1;
1427  while ( StartNext < total_length1 ) {
1428  if(ExportPID1[StartNext] == ExportPID1[StartNext-1]) StartNext++;
1429  else {
1430  int *new_companion = {&ExportLID1[StartCurrent]};
1431  Epetra_Util::Sort(true,StartNext-StartCurrent, &(ExportGID1[StartCurrent]),0,0,1,&new_companion, 0, 0);
1432  StartCurrent = StartNext; StartNext++;
1433  }
1434  }
1435  int *new_companion = {&ExportLID1[StartCurrent]};
1436  Epetra_Util::Sort(true,StartNext-StartCurrent, &(ExportGID1[StartCurrent]),0,0,1,&new_companion, 0, 0);
1437  return total_length1;
1438 }
1439 
1440 //=========================================================================
1441 template<typename ImportType, typename int_type>
1442 int LightweightCrsMatrix::MakeExportLists(const Epetra_CrsMatrix & SourceMatrix, ImportType & Importer2,
1443  std::vector<int> &ReverseRecvSizes, const int_type *ReverseRecvBuffer,
1444  std::vector<int> & ExportPIDs, std::vector<int> & ExportLIDs) {
1445 #ifdef HAVE_MPI
1446  int MyPID = SourceMatrix.Comm().MyPID();
1447 
1448  // This could (legitimately) be zero, in which case we don't have any ReverseRecvs either.
1449  const Epetra_Import *Importer1= SourceMatrix.Importer();
1450 
1451  // So for each entry in the DomainMap, I have to answer the question: Do I need to send this to anybody? And if so, to whom?
1452  //
1453  // This information comes from three sources:
1454  // 1) IDs in my DomainMap that are in someone else's ColMap (e.g. SourceMatrix.Importer()).
1455  // 2) IDs that I own that I sent to another proc in the Forward communication round (e.g. RowImporter).
1456  // 3) IDs that someone else sent on during the Forward round (and they told me about duing the reverse round).
1457  //
1458  // Any of these could legitimately be null.
1459  Epetra_MpiDistributor * Distor1 = (Importer1)?(dynamic_cast<Epetra_MpiDistributor*>(&Importer1->Distributor())):0;
1460 
1461  int Nsend1 = (Distor1)?(Distor1->NumSends()):0; // Also the number of messages we'll need to parse through in build_type3_exports
1462 
1463  std::vector<int> ExportPID3;
1464  std::vector<int> ExportLID3;
1465 
1466  std::vector<int> ExportPID2;
1467  std::vector<int> ExportLID2;
1468 
1469  std::vector<int> ExportPID1;
1470  std::vector<int> ExportLID1;
1471 
1472  // Build (sorted) ExportLID / ExportGID list for Type
1473  int Len1=build_type1_exports<int_type>(Importer1, ExportLID1, ExportPID1);
1474  int Len2=build_type2_exports<ImportType, int_type>(SourceMatrix, Importer2, ExportLID2, ExportPID2);
1475  int Len3=build_type3_exports(MyPID,Nsend1,DomainMap_,ReverseRecvSizes, ReverseRecvBuffer, ExportLID3, ExportPID3);
1476 
1477  // Since everything should now be sorted correctly, we can do a streaming merge of the three Export lists...
1478 #ifdef HAVE_EPETRAEXT_DEBUG
1479  {
1480  int i;
1481  // Now we sanity check the 1 & 2 lists
1482  bool test_passed=true;
1483  for(i=1; i<Len1; i++) {
1484  if(ExportPID1[i] < ExportPID1[i-1] || (ExportPID1[i] == ExportPID1[i-1] && DomainMap_.GID(ExportLID1[i]) < DomainMap_.GID(ExportLID1[i-1])))
1485  test_passed=false;
1486  }
1487  SourceMatrix.Comm().Barrier();
1488  if(!test_passed) {
1489  printf("[%d] Type1 ERRORS = ",SourceMatrix.Comm().MyPID());
1490  for(int i=0; i<Len1; i++)
1491  printf("(%2d,%2d,%2d) ",ExportLID1[i],DomainMap_.GID(ExportLID1[i]),ExportPID1[i]);
1492  printf("\n");
1493  fflush(stdout);
1494  throw std::runtime_error("Importer1 fails the sanity test");
1495  }
1496 
1497  for(i=1; i<Len2; i++) {
1498  if(ExportPID2[i] < ExportPID2[i-1] || (ExportPID2[i] == ExportPID2[i-1] && DomainMap_.GID(ExportLID2[i]) < DomainMap_.GID(ExportLID2[i-1])))
1499  test_passed=false;
1500  }
1501 
1502  SourceMatrix.Comm().Barrier();
1503  if(!test_passed) {
1504  printf("[%d] Type2 ERRORS = ",SourceMatrix.Comm().MyPID());
1505  for(int i=0; i<Len2; i++)
1506  printf("(%2d,%2d,%2d) ",ExportLID2[i],DomainMap_.GID(ExportLID2[i]),ExportPID2[i]);
1507  printf("\n");
1508  fflush(stdout);
1509  throw std::runtime_error("Importer2 fails the sanity test");
1510  }
1511 
1512  for(i=1; i<Len3; i++) {
1513  if(ExportPID3[i] < ExportPID3[i-1] || (ExportPID3[i] == ExportPID3[i-1] && DomainMap_.GID(ExportLID3[i]) < DomainMap_.GID(ExportLID3[i-1])))
1514  test_passed=false;
1515  }
1516 
1517  SourceMatrix.Comm().Barrier();
1518  if(!test_passed) {
1519  printf("[%d] Type3 ERRORS = ",SourceMatrix.Comm().MyPID());
1520  for(int i=0; i<Len3; i++)
1521  printf("(%2d,%2d,%2d) ",ExportLID3[i],DomainMap_.GID(ExportLID3[i]),ExportPID3[i]);
1522  printf("\n");
1523  fflush(stdout);
1524  throw std::runtime_error("Importer3 fails the sanity test");
1525  }
1526 
1527 
1528  }
1529 #endif
1530 
1531  if(Importer1 && !Importer1->SourceMap().SameAs(DomainMap_))
1532  throw std::runtime_error("ERROR: Map Mismatch Importer1");
1533 
1534  if(!Importer2.SourceMap().SameAs(SourceMatrix.RowMap()))
1535  throw std::runtime_error("ERROR: Map Mismatch Importer2");
1536 
1537  int_type InfGID = std::numeric_limits<int_type>::max();
1538  int InfPID = INT_MAX;
1539 
1540  int i1=0, i2=0, i3=0, current=0;
1541 
1542  int MyLen=Len1+Len2+Len3;
1543  ExportLIDs.resize(MyLen);
1544  ExportPIDs.resize(MyLen);
1545 
1546  while(i1 < Len1 || i2 < Len2 || i3 < Len3){
1547  int PID1 = (i1<Len1)?(ExportPID1[i1]):InfPID;
1548  int PID2 = (i2<Len2)?(ExportPID2[i2]):InfPID;
1549  int PID3 = (i3<Len3)?(ExportPID3[i3]):InfPID;
1550 
1551  int_type GID1 = (i1<Len1)?((int_type) DomainMap_.GID64(ExportLID1[i1])):InfGID;
1552  int_type GID2 = (i2<Len2)?((int_type) DomainMap_.GID64(ExportLID2[i2])):InfGID;
1553  int_type GID3 = (i3<Len3)?((int_type) DomainMap_.GID64(ExportLID3[i3])):InfGID;
1554 
1555  int MIN_PID = MIN3(PID1,PID2,PID3);
1556  int_type MIN_GID = MIN3( ((PID1==MIN_PID)?GID1:InfGID), ((PID2==MIN_PID)?GID2:InfGID), ((PID3==MIN_PID)?GID3:InfGID));
1557  bool added_entry=false;
1558 
1559  // Case 1: Add off list 1
1560  if(PID1 == MIN_PID && GID1 == MIN_GID){
1561  ExportLIDs[current] = ExportLID1[i1];
1562  ExportPIDs[current] = ExportPID1[i1];
1563  current++;
1564  i1++;
1565  added_entry=true;
1566  }
1567 
1568  // Case 2: Add off list 2
1569  if(PID2 == MIN_PID && GID2 == MIN_GID){
1570  if(!added_entry) {
1571  ExportLIDs[current] = ExportLID2[i2];
1572  ExportPIDs[current] = ExportPID2[i2];
1573  current++;
1574  added_entry=true;
1575  }
1576  i2++;
1577  }
1578 
1579  // Case 3: Add off list 3
1580  if(PID3 == MIN_PID && GID3 == MIN_GID){
1581  if(!added_entry) {
1582  ExportLIDs[current] = ExportLID3[i3];
1583  ExportPIDs[current] = ExportPID3[i3];
1584  current++;
1585  }
1586  i3++;
1587  }
1588  }// end while
1589  if(current!=MyLen) {
1590  ExportLIDs.resize(current);
1591  ExportPIDs.resize(current);
1592  }
1593 #endif
1594  return 0;
1595 }
1596 
1597 //=========================================================================
1598 template<typename ImportType, typename int_type>
1599 void LightweightCrsMatrix::Construct(const Epetra_CrsMatrix & SourceMatrix, ImportType & RowImporter,bool SortGhosts,const char * label)
1600 {
1601  // Do we need to use long long for GCIDs?
1602 
1603 #ifdef ENABLE_MMM_TIMINGS
1604  std::string tpref;
1605  if(label) tpref = std::string(label);
1606  using Teuchos::TimeMonitor;
1607  Teuchos::RCP<Teuchos::TimeMonitor> MM = Teuchos::rcp(new TimeMonitor(*TimeMonitor::getNewTimer(tpref + std::string("EpetraExt: LWCRS C-1"))));
1608 #else
1609  (void)label;
1610 #endif
1611 
1612  // Fused constructor, import & FillComplete
1613  int rv=0;
1614  int N;
1615  if(use_lw) N = RowMapLW_->NumMyElements();
1616  else N = RowMapEP_->NumMyElements();
1617 
1618  int MyPID = SourceMatrix.Comm().MyPID();
1619 
1620 #ifdef HAVE_MPI
1621  std::vector<int> ReverseSendSizes;
1622  std::vector<int_type> ReverseSendBuffer;
1623  std::vector<int> ReverseRecvSizes;
1624  int_type * ReverseRecvBuffer=0;
1625 #endif
1626 
1627  bool communication_needed = RowImporter.SourceMap().DistributedGlobal();
1628 
1629  // The basic algorithm here is:
1630  // 1) Call Distor.Do to handle the import.
1631  // 2) Copy all the Imported and Copy/Permuted data into the raw Epetra_CrsMatrix / Epetra_CrsGraphData pointers, still using GIDs.
1632  // 3) Call an optimized version of MakeColMap that avoids the Directory lookups (since the importer knows who owns all the gids) AND
1633  // reindexes to LIDs.
1634 
1635  // Sanity Check
1636  if (!SourceMatrix.RowMap().SameAs(RowImporter.SourceMap()))
1637  throw "LightweightCrsMatrix: Fused copy constructor requires Importer.SourceMap() to match SourceMatrix.RowMap()";
1638 
1639  // Get information from the Importer
1640  int NumSameIDs = RowImporter.NumSameIDs();
1641  int NumPermuteIDs = RowImporter.NumPermuteIDs();
1642  int NumRemoteIDs = RowImporter.NumRemoteIDs();
1643  int NumExportIDs = RowImporter.NumExportIDs();
1644  int* ExportLIDs = RowImporter.ExportLIDs();
1645  int* RemoteLIDs = RowImporter.RemoteLIDs();
1646  int* PermuteToLIDs = RowImporter.PermuteToLIDs();
1647  int* PermuteFromLIDs = RowImporter.PermuteFromLIDs();
1648 
1649 #ifdef HAVE_MPI
1650  Epetra_Distributor& Distor = RowImporter.Distributor();
1651  const Epetra_MpiComm * MpiComm = dynamic_cast<const Epetra_MpiComm*>(&SourceMatrix.Comm());
1652  const Epetra_MpiDistributor * MDistor = dynamic_cast<Epetra_MpiDistributor*>(&Distor);
1653 #endif
1654 
1655  // Allocate memory
1656  rowptr_.resize(N+1);
1657 
1658 
1659  // Owning PIDs
1660  std::vector<int> SourcePids;
1661  std::vector<int> TargetPids;
1662 
1663 
1664  /***************************************************/
1665  /***** 1) From Epetra_DistObject::DoTransfer() *****/
1666  /***************************************************/
1667  // rv=SourceMatrix.CheckSizes(SourceMatrix);
1668 
1669  // NTS: Add CheckSizes stuff here.
1670  if(rv) throw "LightweightCrsMatrix: Fused copy constructor failed in CheckSizes()";
1671 
1672  // Buffers & Other Relevant Info
1673  char* Exports_ = 0;
1674  char* Imports_ = 0;
1675  int LenImports_ =0;
1676  int LenExports_ = 0;
1677  int *Sizes_ = 0;
1678 
1679  int SizeOfPacket;
1680  bool VarSizes = false;
1681  if( NumExportIDs > 0) {
1682  Sizes_ = new int[NumExportIDs];
1683  }
1684 
1685 
1686  // Get the owning PIDs
1687  const Epetra_Import *MyImporter= SourceMatrix.Importer();
1688  if(MyImporter) Epetra_Util::GetPids(*MyImporter,SourcePids,false);
1689  else {
1690  SourcePids.resize(SourceMatrix.ColMap().NumMyElements());
1691  SourcePids.assign(SourceMatrix.ColMap().NumMyElements(),SourceMatrix.Comm().MyPID());
1692  }
1693 
1694 #ifdef ENABLE_MMM_TIMINGS
1695  MM = Teuchos::rcp(new TimeMonitor(*TimeMonitor::getNewTimer(tpref + std::string("EpetraExt: LWCRS C-1.1 Forward Pack"))));
1696 #endif
1697 
1698  // Pack & Prepare w/ owning PIDs
1699  rv=Epetra_Import_Util::PackAndPrepareWithOwningPIDs(SourceMatrix, // packandpreparewith reverse comm is needed for Tpetra. cbl
1700  NumExportIDs,ExportLIDs,
1701  LenExports_,Exports_,SizeOfPacket,
1702  Sizes_,VarSizes,SourcePids);
1703  if(rv) throw "LightweightCrsMatrix: Fused copy constructor failed in PackAndPrepare()";
1704 
1705 
1706 #ifdef ENABLE_MMM_TIMINGS
1707  MM = Teuchos::rcp(new TimeMonitor(*TimeMonitor::getNewTimer(tpref + std::string("EpetraExt: LWCRS C-1.2 Reverse"))));
1708 #endif
1709 
1710  if (communication_needed) {
1711 #ifdef HAVE_MPI
1712  // Do the exchange of remote data
1713  const int * ExportPIDs = RowImporter.ExportPIDs();
1714 
1715  // Use the fact that the export procs are sorted to avoid building a hash table.
1716  // NOTE: The +1's on the message size lists are to avoid std::vector problems if a proc has no sends or recvs.
1717  std::vector<int> SendSizes(MDistor->NumSends()+1,0);
1718  for(int i=0, curr_pid=0; i<NumExportIDs; i++) {
1719  if(i>0 && ExportPIDs[i] > ExportPIDs[i-1]) curr_pid++;
1720  SendSizes[curr_pid] +=Sizes_[i];
1721 
1722  // sanity check
1723  if(i>0 && ExportPIDs[i] < ExportPIDs[i-1]) throw "ExportPIDs not sorted";
1724  }
1725 
1726 #ifdef ENABLE_MMM_TIMINGS
1727  MM = Teuchos::rcp(new TimeMonitor(*TimeMonitor::getNewTimer(tpref + std::string("EpetraExt: LWCRS C-1.2 Forward Send"))));
1728 #endif
1729 
1730  std::vector<int> RecvSizes(MDistor->NumReceives()+1);
1731  int msg_tag=MpiComm->GetMpiTag();
1732  boundary_exchange_varsize<char>(*MpiComm,MPI_CHAR,MDistor->NumSends(),MDistor->ProcsTo(),SendSizes.size() ? &SendSizes[0] : 0,Exports_,
1733  MDistor->NumReceives(),MDistor->ProcsFrom(),RecvSizes.size() ? &RecvSizes[0] : 0,Imports_,SizeOfPacket,msg_tag); // cbl
1734 
1735  // If the source matrix doesn't have an importer, then nobody sent data belonging to me in the forward round.
1736  if(SourceMatrix.Importer()) {
1737  Epetra_Import* SourceImporter=const_cast<Epetra_Import*>(SourceMatrix.Importer());
1738  const Epetra_MpiDistributor * MyDistor = dynamic_cast<Epetra_MpiDistributor*>(&SourceImporter->Distributor());
1739 
1740 
1741 #ifdef ENABLE_MMM_TIMINGS
1742  MM = Teuchos::rcp(new TimeMonitor(*TimeMonitor::getNewTimer(tpref + std::string("EpetraExt: LWCRS C-1.3 Reverse Pack"))));
1743 #endif
1744 
1745  // Setup the reverse communication
1746  // Note: Buffer pairs are in (PID,GID) order
1747  PackAndPrepareReverseComm<ImportType, int_type>(SourceMatrix,RowImporter,ReverseSendSizes,ReverseSendBuffer); // cbl
1748 
1749 #ifdef ENABLE_MMM_TIMINGS
1750  MM = Teuchos::rcp(new TimeMonitor(*TimeMonitor::getNewTimer(tpref + std::string("EpetraExt: LWCRS C-1.4 Reverse Send"))));
1751 #endif
1752 
1753  // Do the reverse communication
1754  // NOTE: Make the vector one too large to avoid std::vector errors
1755  ReverseRecvSizes.resize(MyDistor->NumSends()+1);
1756  const int msg_tag2 = MpiComm->GetMpiTag ();
1757  MPI_Datatype data_type = sizeof(int_type) == 4 ? MPI_INT : MPI_LONG_LONG;
1758  boundary_exchange_varsize<int_type> (*MpiComm, data_type, MyDistor->NumReceives (), // cbl
1759  MyDistor->ProcsFrom (),
1760  ReverseSendSizes.size() ? &ReverseSendSizes[0] : 0,
1761  ReverseSendBuffer.size() ? &ReverseSendBuffer[0] : 0,
1762  MyDistor->NumSends (), MyDistor->ProcsTo (),
1763  ReverseRecvSizes.size() ? &ReverseRecvSizes[0] : 0,
1764  ReverseRecvBuffer, 1, msg_tag2);
1765  }
1766 #endif
1767  }
1768 
1769  if(rv) throw "LightweightCrsMatrix: Fused copy constructor failed in Distor.Do";
1770 
1771  /*********************************************************************/
1772  /**** 2) Copy all of the Same/Permute/Remote data into CSR_arrays ****/
1773  /*********************************************************************/
1774 #ifdef ENABLE_MMM_TIMINGS
1775  MM = Teuchos::rcp(new TimeMonitor(*TimeMonitor::getNewTimer(tpref + std::string("EpetraExt: LWCRS C-2"))));
1776 #endif
1777 
1778  // Count nnz
1779  int mynnz = Epetra_Import_Util::UnpackWithOwningPIDsCount(SourceMatrix,NumSameIDs,NumRemoteIDs,RemoteLIDs,NumPermuteIDs,PermuteToLIDs,PermuteFromLIDs,LenImports_,Imports_);
1780  // Presume the rowptr_ is the right size
1781  // Allocate colind_ & vals_ arrays
1782  colind_.resize(mynnz);
1783 #ifndef EPETRA_NO_64BIT_GLOBAL_INDICES
1784  if(sizeof(int_type) == sizeof(long long))
1785  colind_LL_.resize(mynnz);
1786 #endif
1787  vals_.resize(mynnz);
1788 
1789  // Reset the Source PIDs (now with -1 rule)
1790  if(MyImporter) Epetra_Util::GetPids(*MyImporter,SourcePids,true);
1791  else {
1792  SourcePids.assign(SourceMatrix.ColMap().NumMyElements(),-1);
1793  }
1794 
1795  // Unpack into arrays
1796  int * myrowptr = rowptr_.size() ? & rowptr_[0] : 0;
1797  double * myvals = vals_.size() ? & vals_[0] : 0;
1798 #ifndef EPETRA_NO_32BIT_GLOBAL_INDICES
1799  if(sizeof(int_type) == sizeof(int)) {
1800  int * mycolind = colind_.size() ? & colind_[0] : 0;
1801  Epetra_Import_Util::UnpackAndCombineIntoCrsArrays(SourceMatrix,NumSameIDs,NumRemoteIDs,RemoteLIDs,NumPermuteIDs,PermuteToLIDs,PermuteFromLIDs,LenImports_,Imports_,N,mynnz,MyPID,myrowptr,mycolind,myvals,SourcePids,TargetPids);
1802  }
1803  else
1804 #endif
1805 #ifndef EPETRA_NO_64BIT_GLOBAL_INDICES
1806  if(sizeof(int_type) == sizeof(long long)) {
1807  long long * mycolind = colind_LL_.size() ? & colind_LL_[0] : 0;
1808  Epetra_Import_Util::UnpackAndCombineIntoCrsArrays(SourceMatrix,NumSameIDs,NumRemoteIDs,RemoteLIDs,NumPermuteIDs,PermuteToLIDs,PermuteFromLIDs,LenImports_,Imports_,N,mynnz,MyPID,myrowptr,mycolind,myvals,SourcePids,TargetPids);
1809  }
1810  else
1811 #endif
1812  throw "EpetraExt::LightweightCrsMatrix::Construct: sizeof(int_type) error.";
1813 
1814  /**************************************************************/
1815  /**** 3) Call Optimized MakeColMap w/ no Directory Lookups ****/
1816  /**************************************************************/
1817 #ifdef ENABLE_MMM_TIMINGS
1818  MM = Teuchos::rcp(new TimeMonitor(*TimeMonitor::getNewTimer(tpref + std::string("EpetraExt: LWCRS C-3"))));
1819 #endif
1820 
1821  //Call an optimized version of MakeColMap that avoids the Directory lookups (since the importer knows who owns all the gids).
1822  MakeColMapAndReindex<int_type>(TargetPids,getcolind<int_type>(),SortGhosts);
1823 
1824  /********************************************/
1825  /**** 4) Make Export Lists for Import ****/
1826  /********************************************/
1827 #ifdef HAVE_MPI
1828  MakeExportLists<ImportType, int_type>(SourceMatrix,RowImporter,ReverseRecvSizes,ReverseRecvBuffer,ExportPIDs_,ExportLIDs_);
1829 #endif
1830 
1831  /********************************************/
1832  /**** 5) Call sort the entries ****/
1833  /********************************************/
1834  // NOTE: If we have no entries the &blah[0] will cause the STL to die in debug mode
1835 #ifdef ENABLE_MMM_TIMINGS
1836  MM = Teuchos::rcp(new TimeMonitor(*TimeMonitor::getNewTimer(tpref + std::string("EpetraExt: LWCRS C-4"))));
1837 #endif
1838  if(N>0) Epetra_Util::SortCrsEntries(N, &rowptr_[0], colind_.size() ? &colind_[0] : 0, vals_.size() ? &vals_[0] : 0);
1839 
1840  /********************************************/
1841  /**** 6) Cleanup ****/
1842  /********************************************/
1843 #ifdef ENABLE_MMM_TIMINGS
1844  MM = Teuchos::rcp(new TimeMonitor(*TimeMonitor::getNewTimer(tpref + std::string("EpetraExt: LWCRS C-5"))));
1845 #endif
1846 
1847 #ifdef HAVE_MPI
1848  delete [] ReverseRecvBuffer;
1849 #endif
1850 
1851  delete [] Exports_;
1852  delete [] Imports_;
1853  delete [] Sizes_;
1854 
1855  }// end fused copy constructor
1856 
1857 
1858 
1859 
1860 //=========================================================================
1861 LightweightCrsMatrix::LightweightCrsMatrix(const Epetra_CrsMatrix & SourceMatrix, RemoteOnlyImport & RowImporter, bool SortGhosts,const char * label):
1862  use_lw(true),
1863  RowMapLW_(0),
1864  RowMapEP_(0),
1865  DomainMap_(SourceMatrix.DomainMap())
1866 {
1867 #ifdef ENABLE_MMM_TIMINGS
1868  std::string tpref;
1869  if(label) tpref = std::string(label);
1870  using Teuchos::TimeMonitor;
1871  Teuchos::RCP<Teuchos::TimeMonitor> MM = Teuchos::rcp(new TimeMonitor(*TimeMonitor::getNewTimer(tpref + std::string("EpetraExt: LWCRS Total"))));
1872 #endif
1873 
1874  RowMapLW_= new LightweightMap(RowImporter.TargetMap());
1875 
1876 #ifndef EPETRA_NO_32BIT_GLOBAL_INDICES
1877  if(SourceMatrix.RowMatrixRowMap().GlobalIndicesInt()) {
1878  Construct<RemoteOnlyImport, int>(SourceMatrix,RowImporter,SortGhosts,label);
1879  }
1880  else
1881 #endif
1882 #ifndef EPETRA_NO_64BIT_GLOBAL_INDICES
1883  if(SourceMatrix.RowMatrixRowMap().GlobalIndicesLongLong()) {
1884  Construct<RemoteOnlyImport, long long>(SourceMatrix,RowImporter,SortGhosts,label);
1885  }
1886  else
1887 #endif
1888  throw "EpetraExt::LightweightCrsMatrix: ERROR, GlobalIndices type unknown.";
1889 
1890 }
1891 
1892 
1893 //=========================================================================
1895  use_lw(false),
1896  RowMapLW_(0),
1897  RowMapEP_(0),
1898  DomainMap_(SourceMatrix.DomainMap())
1899 {
1900  RowMapEP_= new Epetra_BlockMap(RowImporter.TargetMap());
1901 #ifndef EPETRA_NO_32BIT_GLOBAL_INDICES
1902  if(SourceMatrix.RowMatrixRowMap().GlobalIndicesInt()) {
1903  Construct<Epetra_Import, int>(SourceMatrix,RowImporter);
1904  }
1905  else
1906 #endif
1907 #ifndef EPETRA_NO_64BIT_GLOBAL_INDICES
1908  if(SourceMatrix.RowMatrixRowMap().GlobalIndicesLongLong()) {
1909  Construct<Epetra_Import, long long>(SourceMatrix,RowImporter);
1910  }
1911  else
1912 #endif
1913  throw "EpetraExt::LightweightCrsMatrix: ERROR, GlobalIndices type unknown.";
1914 }
1915 
1916 
1918  delete RowMapLW_;
1919  delete RowMapEP_;
1920 }
1921 
1922 
1923 
1924 //=========================================================================
1925 // Prints MMM-style statistics on communication done with an Import or Export object
1926 template <class TransferType>
1927 void TPrintMultiplicationStatistics(TransferType* Transfer, const std::string &label) {
1928  if(!Transfer) return;
1929 #ifdef HAVE_MPI
1930  const Epetra_MpiDistributor & Distor = dynamic_cast<Epetra_MpiDistributor&>(Transfer->Distributor());
1931  const Epetra_Comm & Comm = Transfer->SourceMap().Comm();
1932 
1933  int rows_send = Transfer->NumExportIDs();
1934  int rows_recv = Transfer->NumRemoteIDs();
1935 
1936  int round1_send = Transfer->NumExportIDs() * sizeof(int);
1937  int round1_recv = Transfer->NumRemoteIDs() * sizeof(int);
1938  int num_send_neighbors = Distor.NumSends();
1939  int num_recv_neighbors = Distor.NumReceives();
1940  int round2_send, round2_recv;
1941  Distor.GetLastDoStatistics(round2_send,round2_recv);
1942 
1943  int myPID = Comm.MyPID();
1944  int NumProcs = Comm.NumProc();
1945 
1946  // Processor by processor statistics
1947  // printf("[%d] %s Statistics: neigh[s/r]=%d/%d rows[s/r]=%d/%d r1bytes[s/r]=%d/%d r2bytes[s/r]=%d/%d\n",
1948  // myPID, label.c_str(),num_send_neighbors,num_recv_neighbors,rows_send,rows_recv,round1_send,round1_recv,round2_send,round2_recv);
1949 
1950  // Global statistics
1951  int lstats[8] = {num_send_neighbors,num_recv_neighbors,rows_send,rows_recv,round1_send,round1_recv,round2_send,round2_recv};
1952  int gstats_min[8], gstats_max[8];
1953 
1954  double lstats_avg[8], gstats_avg[8];
1955  for(int i=0; i<8; i++)
1956  lstats_avg[i] = ((double)lstats[i])/NumProcs;
1957 
1958  Comm.MinAll(lstats,gstats_min,8);
1959  Comm.MaxAll(lstats,gstats_max,8);
1960  Comm.SumAll(lstats_avg,gstats_avg,8);
1961 
1962  if(!myPID) {
1963  printf("%s Send Statistics[min/avg/max]: neigh=%d/%4.1f/%d rows=%d/%4.1f/%d round1=%d/%4.1f/%d round2=%d/%4.1f/%d\n", label.c_str(),
1964  (int)gstats_min[0],gstats_avg[0],(int)gstats_max[0], (int)gstats_min[2],gstats_avg[2],(int)gstats_max[2],
1965  (int)gstats_min[4],gstats_avg[4],(int)gstats_max[4], (int)gstats_min[6],gstats_avg[6],(int)gstats_max[6]);
1966  printf("%s Recv Statistics[min/avg/max]: neigh=%d/%4.1f/%d rows=%d/%4.1f/%d round1=%d/%4.1f/%d round2=%d/%4.1f/%d\n", label.c_str(),
1967  (int)gstats_min[1],gstats_avg[1],(int)gstats_max[1], (int)gstats_min[3],gstats_avg[3],(int)gstats_max[3],
1968  (int)gstats_min[5],gstats_avg[5],(int)gstats_max[5], (int)gstats_min[7],gstats_avg[7],(int)gstats_max[7]);
1969  }
1970 
1971 #endif
1972 }
1973 
1974 
1975 void printMultiplicationStatistics(Epetra_Import* Transfer, const std::string &label) {
1976  TPrintMultiplicationStatistics(Transfer,label);
1977 }
1978 
1979 void printMultiplicationStatistics(Epetra_Export* Transfer, const std::string &label) {
1980  TPrintMultiplicationStatistics(Transfer,label);
1981 }
1982 
1983 
1984 
1985 
1986 }//namespace EpetraExt
1987 
1988 #ifndef EPETRA_NO_32BIT_GLOBAL_INDICES
1990 #endif
1991 
1992 #ifndef EPETRA_NO_64BIT_GLOBAL_INDICES
1994 #endif
const int * LengthsFrom() const
long long MinMyGID64() const
int SumIntoGlobalValues(int_type GlobalRow, int NumEntries, double *Values, int_type *Indices)
void Construct_int(int NumGlobalElements, int NumMyElements, const int *MyGlobalElements, long long IndexBase, bool GenerateHash=true)
LightweightCrsMatrix * importMatrix
const LightweightMap * TargetMap_
RemoteOnlyImport(const Epetra_Import &Importer, LightweightMap &RemoteOnlyTargetMap)
Epetra_HashTable< long long > * LIDHash_LL_
std::vector< int > targetMapToOrigRow
long long * MyGlobalElements64() const
void debug_print_distor(const char *label, const Epetra_Distributor *Distor, const Epetra_Comm &Comm)
int NumRemoteIDs() const
int MaxLID() const
bool GlobalIndicesLongLong() const
std::pair< int_type, int_type > Tget_col_range(const Epetra_CrsMatrix &mtx)
void MyGlobalElementsPtr(int *&MyGlobalElementList) const
bool SameAs(const Epetra_BlockMap &Map) const
int NumSameIDs() const
int NumReceives() const
long long IndexBase64() const
int MyGlobalElements(int *MyGlobalElementList) const
int MakeColMapAndReindex(std::vector< int > owningPIDs, std::vector< GO > Gcolind, bool SortGhosts=false, const char *label=0)
void pack_outgoing_rows(const Epetra_CrsMatrix &mtx, const std::vector< int > &proc_col_ranges, std::vector< int > &send_rows, std::vector< int > &rows_per_send_proc)
int PackAndPrepareReverseComm(const Epetra_CrsMatrix &SourceMatrix, ImportType &RowImporter, std::vector< int > &ReverseSendSizes, std::vector< int_type > &ReverseSendBuffer)
value_type Get(const long long key)
void debug_compare_import(const Epetra_Import *Import1, const Epetra_Import *Import2)
virtual int InsertGlobalValues(int GlobalRow, int NumEntries, const double *Values, const int *Indices)
bool ConstantElementSize() const
const Epetra_Map & RowMatrixRowMap() const
virtual int SumIntoGlobalValues(int GlobalRow, int NumEntries, const double *Values, const int *Indices)
#define EPETRA_CHK_ERR(a)
long long GID64(int LID) const
int * ExportPIDs() const
void printMultiplicationStatistics(Epetra_Import *Transfer, const std::string &label)
const Epetra_BlockMap * SourceMap_
Epetra_Distributor & Distributor() const
bool GlobalIndicesInt() const
virtual int MinAll(double *PartialMins, double *GlobalMins, int Count) const =0
const Epetra_Map & ColMap() const
virtual void Barrier() const =0
const Epetra_CrsMatrix * origMatrix
int MinLID() const
void MakeColMapAndReindexSort(int &NumRemoteColGIDs, GO *&RemoteColindices, std::vector< int > &RemotePermuteIDs, std::vector< int > &RemoteOwningPIDs, bool SortGhostsAssociatedWithEachProcessor_)
int PackAndPrepareWithOwningPIDs(const Epetra_CrsMatrix &SourceMatrix, int NumExportIDs, int *ExportLIDs, int &LenExports, char *&Exports, int &SizeOfPacket, int *Sizes, bool &VarSizes, std::vector< int > &SourcePids)
static int GetPids(const Epetra_Import &Importer, std::vector< int > &pids, bool use_minus_one_for_local)
int * ExportLIDs() const
virtual int MyPID() const =0
void build_type1_exports_sort(std::vector< int > &ExportLID1, std::vector< int > &ExportPID1, std::vector< int_type > &ExportGID1, int total_length1)
void build_type1_exports_sort< int >(std::vector< int > &ExportLID1, std::vector< int > &ExportPID1, std::vector< int > &ExportGID1, int total_length1)
virtual int MaxAll(double *PartialMaxs, double *GlobalMaxs, int Count) const =0
int NumExportIDs() const
int NumSends() const
const Epetra_Map & RowMap() const
virtual int SumAll(double *PartialSums, double *GlobalSums, int Count) const =0
int NumMyElements() const
int NumMyCols() const
int UnpackWithOwningPIDsCount(const Epetra_CrsMatrix &SourceMatrix, int NumSameIDs, int NumRemoteIDs, const int *RemoteLIDs, int NumPermuteIDs, const int *PermuteToLIDs, const int *PermuteFromLIDs, int LenImports, char *Imports)
const int N
long long GID64(int LID) const
static int SortCrsEntries(int NumRows, const int *CRS_rowptr, int *CRS_colind, double *CRS_vals)
int InsertGlobalValues(int GlobalRow, int NumEntries, double *Values, int *Indices)
std::pair< long long, long long > get_col_range< long long >(const Epetra_Map &emap)
int UnpackAndCombineIntoCrsArrays(const Epetra_CrsMatrix &SourceMatrix, int NumSameIDs, int NumRemoteIDs, const int *RemoteLIDs, int NumPermuteIDs, const int *PermuteToLIDs, const int *PermuteFromLIDs, int LenImports, char *Imports, int TargetNumRows, int TargetNumNonzeros, int MyTargetPID, int *CSR_rowptr, int *CSR_colind, double *CSR_values, const std::vector< int > &SourcePids, std::vector< int > &TargetPids)
std::vector< int > targetMapToImportRow
TEUCHOS_DEPRECATED RCP< T > rcp(T *p, Dealloc_T dealloc, bool owns_mem)
const Epetra_Import * Importer() const
const Epetra_BlockMap & TargetMap() const
int MaxNumEntries() const
LightweightMap & operator=(const LightweightMap &map)
LightweightMapData * Data_
int GID(int LID) const
void build_type3_exports_sort< int >(std::vector< int > &ExportGID3, std::vector< int > &ExportPID3, int total_length3)
void build_type1_exports_sort< long long >(std::vector< int > &ExportLID1, std::vector< int > &ExportPID1, std::vector< long long > &ExportGID1, int total_length1)
int NumMyRows() const
void build_type3_exports_sort(std::vector< int_type > &ExportGID3, std::vector< int > &ExportPID3, int total_length3)
const Epetra_BlockMap & SourceMap() const
int build_type2_exports(const Epetra_CrsMatrix &SourceMatrix, ImportType &MyImporter, std::vector< int > &ExportLID2, std::vector< int > &ExportPID2)
MPI_Comm Comm() const
int MyGlobalElementsPtr(int *&MyGlobalElementList) const
void TPrintMultiplicationStatistics(TransferType *Transfer, const std::string &label)
int SumIntoGlobalValues(int GlobalRow, int NumEntries, double *Values, int *Indices)
int LID(int GID) const
#define MIN3(x, y, z)
CrsWrapper_Epetra_CrsMatrix(Epetra_CrsMatrix &epetracrsmatrix)
int NumRecv() const
const Epetra_Comm & Comm() const
std::vector< int > MyGlobalElements_int_
int ExtractMyRowView(int LocalRow, int &NumIndices, int *&Indices) const
const int * ProcsTo() const
const int * LengthsTo() const
int NumPermuteIDs() const
std::map< int_type, std::set< int_type > * > graph_
void MakeColMapAndReindexSort< long long >(int &NumRemoteColGIDs, long long *&RemoteColindices, std::vector< int > &RemotePermuteIDs, std::vector< int > &RemoteOwningPIDs, bool SortGhostsAssociatedWithEachProcessor_)
int GetMpiTag() const
int build_type3_exports(int MyPID, int Nrecv, Epetra_BlockMap &DomainMap, std::vector< int > &ReverseRecvSizes, const int_type *ReverseRecvBuffer, std::vector< int > &ExportLID3, std::vector< int > &ExportPID3)
const int * ProcsFrom() const
virtual int NumProc() const =0
int * RemoteLIDs() const
void MakeColMapAndReindexSort< int >(int &NumRemoteColGIDs, int *&RemoteColindices, std::vector< int > &RemotePermuteIDs, std::vector< int > &RemoteOwningPIDs, bool SortGhostsAssociatedWithEachProcessor_)
const LightweightMap & TargetMap() const
void Construct_LL(long long NumGlobalElements, int NumMyElements, const long long *MyGlobalElements, long long IndexBase, bool GenerateHash=true)
void Tpack_outgoing_rows(const Epetra_CrsMatrix &mtx, const std::vector< int_type > &proc_col_ranges, std::vector< int_type > &send_rows, std::vector< int > &rows_per_send_proc)
bool Filled() const
CrsWrapper_GraphBuilder(const Epetra_Map &emap)
const Epetra_Map & DomainMap() const
Epetra_HashTable< int > * LIDHash_int_
std::vector< long long > MyGlobalElements_LL_
int build_type1_exports(const Epetra_Import *Importer1, std::vector< int > &ExportLID1, std::vector< int > &ExportPID1)
static void Sort(bool SortAscending, int NumKeys, T *Keys, int NumDoubleCompanions, double **DoubleCompanions, int NumIntCompanions, int **IntCompanions, int NumLongLongCompanions, long long **LongLongCompanions)
LightweightCrsMatrix(const Epetra_CrsMatrix &A, RemoteOnlyImport &RowImporter, bool SortGhosts=false, const char *label=0)
std::map< int_type, std::set< int_type > * > & get_graph()
const Epetra_CrsGraph & Graph() const
void build_type3_exports_sort< long long >(std::vector< long long > &ExportGID3, std::vector< int > &ExportPID3, int total_length3)
long long * MyGlobalElements64() const
const Epetra_BlockMap * importColMap
void insert_matrix_locations(CrsWrapper_GraphBuilder< int_type > &graphbuilder, Epetra_CrsMatrix &C)
int dumpCrsMatrixStruct(const CrsMatrixStruct &M)
int NumSend() const
int ExtractGlobalRowView(int GlobalRow, int &NumIndices, int *&Indices) const
const Epetra_Comm & Comm() const
int InsertGlobalValues(int_type GlobalRow, int NumEntries, double *Values, int_type *Indices)
long long GCID64(int LCID_in) const
void Add(const long long key, const value_type value)
long long MaxMyGID64() const
void Construct(const Epetra_CrsMatrix &A, ImportType &RowImporter, bool SortGhosts=false, const char *label=0)
std::vector< long long > colind_LL_
std::pair< int, int > get_col_range< int >(const Epetra_Map &emap)
int MyPID() const
int ExtractCrsDataPointers(int *&IndexOffset, int *&Indices, double *&Values_in) const
int MakeExportLists(const Epetra_CrsMatrix &SourceMatrix, ImportType &RowImporter, std::vector< int > &ReverseRecvSizes, const int_type *ReverseRecvBuffer, std::vector< int > &ExportPIDs, std::vector< int > &ExportLIDs)
void GetLastDoStatistics(int &bytes_sent, int &bytes_recvd) const