Epetra Package Browser (Single Doxygen Collection)  Development
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
Epetra_CrsMatrix.cpp
Go to the documentation of this file.
1 /*
2 //@HEADER
3 // ************************************************************************
4 //
5 // Epetra: Linear Algebra Services Package
6 // Copyright 2011 Sandia Corporation
7 //
8 // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
9 // the U.S. Government retains certain rights in this software.
10 //
11 // Redistribution and use in source and binary forms, with or without
12 // modification, are permitted provided that the following conditions are
13 // met:
14 //
15 // 1. Redistributions of source code must retain the above copyright
16 // notice, this list of conditions and the following disclaimer.
17 //
18 // 2. Redistributions in binary form must reproduce the above copyright
19 // notice, this list of conditions and the following disclaimer in the
20 // documentation and/or other materials provided with the distribution.
21 //
22 // 3. Neither the name of the Corporation nor the names of the
23 // contributors may be used to endorse or promote products derived from
24 // this software without specific prior written permission.
25 //
26 // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
27 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
30 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 //
38 // Questions? Contact Michael A. Heroux (maherou@sandia.gov)
39 //
40 // ************************************************************************
41 //@HEADER
42 */
43 
44 #include "Epetra_ConfigDefs.h"
45 #include "Epetra_CrsMatrix.h"
46 #include "Epetra_Map.h"
47 #include "Epetra_Import.h"
48 #include "Epetra_Export.h"
49 #include "Epetra_Vector.h"
50 #include "Epetra_MultiVector.h"
51 #include "Epetra_Comm.h"
52 #include "Epetra_SerialComm.h"
53 #include "Epetra_Distributor.h"
54 #include "Epetra_OffsetIndex.h"
55 #include "Epetra_BLAS_wrappers.h"
56 
58 #include "Epetra_CrsGraphData.h"
59 #include "Epetra_HashTable.h"
60 #include "Epetra_Util.h"
61 #include "Epetra_Import_Util.h"
62 #include "Epetra_IntVector.h"
63 
64 #include <cstdlib>
65 #include <typeinfo>
66 
67 #ifdef EPETRA_CRS_MATRIX_TRACE_DUMP_MULTIPLY
68 # include "Teuchos_VerboseObject.hpp"
69 bool Epetra_CrsMatrixTraceDumpMultiply = false;
70 #endif // EPETRA_CRS_MATRIX_TRACE_DUMP_MULTIPLY
71 
72 #ifdef HAVE_EPETRA_TEUCHOS
73 // Define this macro to see some timers for some of these functions
74 # define EPETRA_CRSMATRIX_TEUCHOS_TIMERS
75 #endif
76 
77 #ifdef EPETRA_CRSMATRIX_TEUCHOS_TIMERS
78 # include "Teuchos_TimeMonitor.hpp"
79 #endif
80 
81 #if defined(Epetra_ENABLE_MKL_SPARSE) && defined(Epetra_ENABLE_CASK)
82 #error Error: Epetra_ENABLE_MKL_SPARSE and Epetra_ENABLE_CASK both defined.
83 #endif
84 
85 #ifdef Epetra_ENABLE_MKL_SPARSE
86 #include "mkl_spblas.h"
87 #endif
88 
89 //==============================================================================
90 Epetra_CrsMatrix::Epetra_CrsMatrix(Epetra_DataAccess CV, const Epetra_Map& rowMap, const int* NumEntriesPerRow, bool StaticProfile)
91  : Epetra_DistObject(rowMap, "Epetra::CrsMatrix"),
93  Epetra_BLAS(),
94  Graph_(CV, rowMap, NumEntriesPerRow, StaticProfile),
95  Allocated_(false),
96  StaticGraph_(false),
97  UseTranspose_(false),
98  constructedWithFilledGraph_(false),
99  matrixFillCompleteCalled_(false),
100  StorageOptimized_(false),
101  Values_(0),
102  Values_alloc_lengths_(0),
103  All_Values_(0),
104  NormInf_(0.0),
105  NormOne_(0.0),
106  NormFrob_(0.0),
107  NumMyRows_(rowMap.NumMyPoints()),
108  ImportVector_(0),
109  ExportVector_(0),
110  CV_(CV),
111  squareFillCompleteCalled_(false)
112 {
114  Allocate();
115 }
116 
117 //==============================================================================
118 Epetra_CrsMatrix::Epetra_CrsMatrix(Epetra_DataAccess CV, const Epetra_Map& rowMap, int NumEntriesPerRow, bool StaticProfile)
119  : Epetra_DistObject(rowMap, "Epetra::CrsMatrix"),
121  Epetra_BLAS(),
122  Graph_(CV, rowMap, NumEntriesPerRow, StaticProfile),
123  Allocated_(false),
124  StaticGraph_(false),
125  UseTranspose_(false),
126  constructedWithFilledGraph_(false),
127  matrixFillCompleteCalled_(false),
128  StorageOptimized_(false),
129  Values_(0),
130  Values_alloc_lengths_(0),
131  All_Values_(0),
132  NormInf_(0.0),
133  NormOne_(0.0),
134  NormFrob_(0.0),
135  NumMyRows_(rowMap.NumMyPoints()),
136  ImportVector_(0),
137  ExportVector_(0),
138  CV_(CV),
139  squareFillCompleteCalled_(false)
140 {
142  Allocate();
143 }
144 //==============================================================================
146  const Epetra_Map& colMap, const int* NumEntriesPerRow, bool StaticProfile)
147  : Epetra_DistObject(rowMap, "Epetra::CrsMatrix"),
149  Epetra_BLAS(),
150  Graph_(CV, rowMap, colMap, NumEntriesPerRow, StaticProfile),
151  Allocated_(false),
152  StaticGraph_(false),
153  UseTranspose_(false),
154  constructedWithFilledGraph_(false),
155  matrixFillCompleteCalled_(false),
156  StorageOptimized_(false),
157  Values_(0),
158  Values_alloc_lengths_(0),
159  All_Values_(0),
160  NormInf_(0.0),
161  NormOne_(0.0),
162  NormFrob_(0.0),
163  NumMyRows_(rowMap.NumMyPoints()),
164  ImportVector_(0),
165  ExportVector_(0),
166  CV_(CV),
167  squareFillCompleteCalled_(false)
168 {
170  Allocate();
171 }
172 
173 //==============================================================================
175  const Epetra_Map& colMap, int NumEntriesPerRow, bool StaticProfile)
176  : Epetra_DistObject(rowMap, "Epetra::CrsMatrix"),
178  Epetra_BLAS(),
179  Graph_(CV, rowMap, colMap, NumEntriesPerRow, StaticProfile),
180  Allocated_(false),
181  StaticGraph_(false),
182  UseTranspose_(false),
183  constructedWithFilledGraph_(false),
184  matrixFillCompleteCalled_(false),
185  StorageOptimized_(false),
186  Values_(0),
187  Values_alloc_lengths_(0),
188  All_Values_(0),
189  NormInf_(0.0),
190  NormOne_(0.0),
191  NormFrob_(0.0),
192  NumMyRows_(rowMap.NumMyPoints()),
193  ImportVector_(0),
194  ExportVector_(0),
195  CV_(CV),
196  squareFillCompleteCalled_(false)
197 {
198  if(!rowMap.GlobalIndicesTypeMatch(colMap))
199  throw ReportError("Epetra_CrsMatrix::Epetra_CrsMatrix: cannot be called with different indices types for rowMap and colMap", -1);
200 
202  Allocate();
203 }
204 //==============================================================================
206  : Epetra_DistObject(graph.Map(), "Epetra::CrsMatrix"),
208  Epetra_BLAS(),
209  Graph_(graph),
210  Allocated_(false),
211  StaticGraph_(true),
212  UseTranspose_(false),
213  constructedWithFilledGraph_(false),
214  matrixFillCompleteCalled_(false),
215  StorageOptimized_(false),
216  Values_(0),
217  Values_alloc_lengths_(0),
218  All_Values_(0),
219  NormInf_(0.0),
220  NormOne_(0.0),
221  NormFrob_(0.0),
222  NumMyRows_(graph.NumMyRows()),
223  ImportVector_(0),
224  ExportVector_(0),
225  CV_(CV),
226  squareFillCompleteCalled_(false)
227 {
230  Allocate();
231 }
232 
233 //==============================================================================
235  : Epetra_DistObject(Matrix),
236  Epetra_CompObject(Matrix),
237  Epetra_BLAS(),
238  Graph_(Matrix.Graph()),
239  Allocated_(false),
240  StaticGraph_(true),
241  UseTranspose_(Matrix.UseTranspose_),
242  constructedWithFilledGraph_(false),
243  matrixFillCompleteCalled_(false),
244  StorageOptimized_(false),
245  Values_(0),
246  Values_alloc_lengths_(0),
247  All_Values_(0),
248  NormInf_(0.0),
249  NormOne_(0.0),
250  NormFrob_(0.0),
251  NumMyRows_(Matrix.NumMyRows()),
252  ImportVector_(0),
253  ExportVector_(0),
254  CV_(Copy),
255  squareFillCompleteCalled_(false)
256 {
258  operator=(Matrix);
259 }
260 
261 //==============================================================================
263 {
264  if (this == &src) {
265  return( *this );
266  }
267 
268  if (!src.Filled()) throw ReportError("Copying an Epetra_CrsMatrix requires source matrix to have Filled()==true", -1);
269 
270  Graph_ = src.Graph_; // Copy graph
271 
272  DeleteMemory();
273 
274  StaticGraph_ = true;
278  Values_ = 0;
280  All_Values_ = 0;
281  NormInf_ = -1.0;
282  NormOne_ = -1.0;
283  NormFrob_ = -1.0;
284  NumMyRows_ = src.NumMyRows_;
285  ImportVector_ = 0;
286  ExportVector_ = 0;
287 
288  CV_ = Copy;
289 
291  if (src.StorageOptimized()) { // Special copy for case where storage is optimized
292 
293  int numMyNonzeros = Graph().NumMyEntries();
294  if (numMyNonzeros>0) All_Values_ = new double[numMyNonzeros];
295  double * srcValues = src.All_Values();
296  for (int i=0; i<numMyNonzeros; ++i) All_Values_[i] = srcValues[i];
297  Allocated_ = true;
298 #ifdef Epetra_ENABLE_CASK
300  cask = cask_handler_copy(src.cask);
301  }
302 #endif
303  }
304  else { // copy for non-optimized storage
305 
306  Allocate();
307  for (int i=0; i<NumMyRows_; i++) {
308  int NumEntries = src.NumMyEntries(i);
309  double * const srcValues = src.Values(i);
310  double * targValues = Values(i);
311  for (int j=0; j< NumEntries; j++) targValues[j] = srcValues[j];
312  }
313  }
314 
315  return( *this );
316 }
317 
318 //==============================================================================
319 void Epetra_CrsMatrix::InitializeDefaults() { // Initialize all attributes that have trivial default values
320 
321  UseTranspose_ = false;
322  Values_ = 0;
324  All_Values_ = 0;
325  NormInf_ = -1.0;
326  NormOne_ = -1.0;
327  NormFrob_ = -1.0;
328  ImportVector_ = 0;
329  ExportVector_ = 0;
330 
331  return;
332 }
333 
334 //==============================================================================
336 
337  int i, j;
338 
339  // Allocate Values array
340  Values_ = NumMyRows_ > 0 ? new double*[NumMyRows_] : NULL;
341  Values_alloc_lengths_ = NumMyRows_ > 0 ? new int[NumMyRows_] : NULL;
342  if (NumMyRows_ > 0) {
343  for(j=0; j<NumMyRows_; ++j) Values_alloc_lengths_[j] = 0;
344  }
345 
346  // Allocate and initialize entries if we are copying data
347  if (CV_==Copy) {
348  if (Graph().StaticProfile() || Graph().StorageOptimized()) {
349  int numMyNonzeros = Graph().NumMyEntries();
350  if (numMyNonzeros>0) All_Values_ = new double[numMyNonzeros];
351  if(Graph().StorageOptimized()){
352  StorageOptimized_ = true;
353  }
354  }
355  double * all_values = All_Values_;
356  for (i=0; i<NumMyRows_; i++) {
357  int NumAllocatedEntries = Graph().NumAllocatedMyIndices(i);
358 
359  if (NumAllocatedEntries > 0) {
360  if (Graph().StaticProfile() || Graph().StorageOptimized()) {
361  Values_[i] = all_values;
362  all_values += NumAllocatedEntries;
363  }
364  else {
365  Values_[i] = new double[NumAllocatedEntries];
366  Values_alloc_lengths_[i] = NumAllocatedEntries;
367  }
368  }
369  else
370  Values_[i] = 0;
371 
372  for(j=0; j< NumAllocatedEntries; j++)
373  Values_[i][j] = 0.0; // Fill values with zero
374  }
375  }
376  else {
377  for (i=0; i<NumMyRows_; i++) {
378  Values_[i] = 0;
379  }
380  }
381  SetAllocated(true);
382 #ifdef Epetra_ENABLE_CASK
383  cask=NULL;
384 #endif
385  return(0);
386 }
387 //==============================================================================
389 {
390  DeleteMemory();
391 }
392 
393 //==============================================================================
395 {
396  int i;
397 
398  if (CV_==Copy) {
399  if (All_Values_!=0)
400  delete [] All_Values_;
401  else if (Values_!=0)
402  for (i=0; i<NumMyRows_; i++)
403  if (Graph().NumAllocatedMyIndices(i) >0)
404  delete [] Values_[i];
405  }
406 
407  if (ImportVector_!=0)
408  delete ImportVector_;
409  ImportVector_=0;
410 
411  if (ExportVector_!=0)
412  delete ExportVector_;
413  ExportVector_=0;
414 
415  delete [] Values_;
416  Values_ = 0;
417 
418  delete [] Values_alloc_lengths_;
420 
421  NumMyRows_ = 0;
422 
423 #ifdef Epetra_ENABLE_CASK
424  if( StorageOptimized_ )
425  {
426  if( cask != NULL )
427  cask_handler_destroy(cask);
428 
429  cask = NULL;
430  }
431 #endif
432 
433  Allocated_ = false;
434 }
435 
436 //==============================================================================
438 {
439  int err = Graph_.ReplaceRowMap(newmap);
440  if (err == 0) {
441  //update export vector.
442 
443  if (ExportVector_ != 0) {
444  delete ExportVector_;
445  ExportVector_= 0;
446  }
447 
449  }
450  return(err);
451 }
452 
453 //==============================================================================
455 {
456  int err = Graph_.ReplaceColMap(newmap);
457  if (err == 0) {
458  //update import vector.
459 
460  if (ImportVector_ != 0) {
461  delete ImportVector_;
462  ImportVector_= 0;
463  }
464 
466  }
467  return(err);
468 }
469 
470 //==============================================================================
471 int Epetra_CrsMatrix::ReplaceDomainMapAndImporter(const Epetra_Map& NewDomainMap, const Epetra_Import * NewImporter) {
472  return Graph_.ReplaceDomainMapAndImporter(NewDomainMap,NewImporter);
473 }
474 
475 //==============================================================================
477  // Epetra_DistObject things
478  if(NewMap) {
479  Map_ = *NewMap;
480  Comm_ = &NewMap->Comm();
481  }
482 
483  return Graph_.RemoveEmptyProcessesInPlace(NewMap);
484 }
485 
486 //==============================================================================
487 int Epetra_CrsMatrix::PutScalar(double ScalarConstant)
488 {
489  if (StorageOptimized()) {
490  int length = NumMyNonzeros();
491  for (int i=0; i<length; ++i) All_Values_[i] = ScalarConstant;
492  }
493  else {
494  for(int i=0; i<NumMyRows_; i++) {
495  int NumEntries = Graph().NumMyIndices(i);
496  double * targValues = Values(i);
497  for(int j=0; j< NumEntries; j++)
498  targValues[j] = ScalarConstant;
499  }
500  }
501  return(0);
502 }
503 //==============================================================================
504 int Epetra_CrsMatrix::Scale(double ScalarConstant)
505 {
506  if (StorageOptimized()) {
507  int length = NumMyNonzeros();
508  for (int i=0; i<length; ++i) All_Values_[i] *= ScalarConstant;
509  }
510  else {
511  for(int i=0; i<NumMyRows_; i++) {
512  int NumEntries = Graph().NumMyIndices(i);
513  double * targValues = Values(i);
514  for(int j=0; j< NumEntries; j++)
515  targValues[j] *= ScalarConstant;
516  }
517  }
518  return(0);
519 }
520 
521 //==========================================================================
522 template<typename int_type>
523 int Epetra_CrsMatrix::TInsertGlobalValues(int_type Row, int NumEntries,
524  const double* values,
525  const int_type* Indices)
526 {
527  if(IndicesAreLocal())
528  EPETRA_CHK_ERR(-2); // Cannot insert global values into local graph
530  EPETRA_CHK_ERR(-3); // Indices cannot be individually deleted and newed
532  int locRow = Graph_.LRID(Row); // Find local row number for this global row index
533 
534  EPETRA_CHK_ERR( InsertValues(locRow, NumEntries, values, Indices) );
535 
536  return(0);
537 }
538 
539 #ifndef EPETRA_NO_32BIT_GLOBAL_INDICES
540 int Epetra_CrsMatrix::InsertGlobalValues(int Row, int NumEntries,
541  const double* values,
542  const int* Indices)
543 {
544  if(RowMap().GlobalIndicesInt())
545  return TInsertGlobalValues<int>(Row, NumEntries, values, Indices);
546  else
547  throw ReportError("Epetra_CrsMatrix::InsertGlobalValues int version called for a matrix that is not int.", -1);
548 }
549 #endif
550 #ifndef EPETRA_NO_64BIT_GLOBAL_INDICES
551 int Epetra_CrsMatrix::InsertGlobalValues(long long Row, int NumEntries,
552  const double* values,
553  const long long* Indices)
554 {
555  if(RowMap().GlobalIndicesLongLong())
556  return TInsertGlobalValues<long long>(Row, NumEntries, values, Indices);
557  else
558  throw ReportError("Epetra_CrsMatrix::InsertGlobalValues long long version called for a matrix that is not long long.", -1);
559 }
560 #endif
561 
562 //==========================================================================
563 template<typename int_type>
564 int Epetra_CrsMatrix::TInsertGlobalValues(int_type Row, int NumEntries,
565  double* values,
566  int_type* Indices)
567 {
568  if(IndicesAreLocal())
569  EPETRA_CHK_ERR(-2); // Cannot insert global values into local graph
571  EPETRA_CHK_ERR(-3); // Indices cannot be individually deleted and newed
573  int locRow = Graph_.LRID(Row); // Find local row number for this global row index
574 
575  EPETRA_CHK_ERR( InsertValues(locRow, NumEntries, values, Indices) );
576 
577  return(0);
578 }
579 
580 #ifndef EPETRA_NO_32BIT_GLOBAL_INDICES
581 int Epetra_CrsMatrix::InsertGlobalValues(int Row, int NumEntries,
582  double* values,
583  int* Indices)
584 {
585  if(RowMap().GlobalIndicesInt())
586  return TInsertGlobalValues<int>(Row, NumEntries, values, Indices);
587  else
588  throw ReportError("Epetra_CrsMatrix::InsertGlobalValues int version called for a matrix that is not int.", -1);
589 }
590 #endif
591 #ifndef EPETRA_NO_64BIT_GLOBAL_INDICES
592 int Epetra_CrsMatrix::InsertGlobalValues(long long Row, int NumEntries,
593  double* values,
594  long long* Indices)
595 {
596  if(RowMap().GlobalIndicesLongLong()) {
597  return TInsertGlobalValues<long long>(Row, NumEntries, values, Indices);
598  }
599  else
600  throw ReportError("Epetra_CrsMatrix::InsertGlobalValues long long version called for a matrix that is not long long.", -1);
601 }
602 #endif
603 //==========================================================================
604 int Epetra_CrsMatrix::InsertMyValues(int Row, int NumEntries,
605  const double* values,
606  const int* Indices)
607 {
608  if(IndicesAreGlobal())
609  EPETRA_CHK_ERR(-2); // Cannot insert global values into filled graph
610  if(IndicesAreContiguous() && CV_==Copy)
611  EPETRA_CHK_ERR(-3); // Indices cannot be individually deleted and new
613 
614 #if !defined(EPETRA_NO_32BIT_GLOBAL_INDICES) || !defined(EPETRA_NO_64BIT_GLOBAL_INDICES)
615  EPETRA_CHK_ERR( InsertValues(Row, NumEntries, values, Indices) );
616 #else
617  throw ReportError("Epetra_CrsMatrix::InsertMyValues: Failure because neither 32 bit nor 64 bit indices insertable.", -1);
618 #endif
619 
620  return(0);
621 
622 }
623 
624 //==========================================================================
625 int Epetra_CrsMatrix::InsertMyValues(int Row, int NumEntries,
626  double* values,
627  int* Indices)
628 {
629  if(IndicesAreGlobal())
630  EPETRA_CHK_ERR(-2); // Cannot insert global values into filled graph
631  if(IndicesAreContiguous() && CV_==Copy)
632  EPETRA_CHK_ERR(-3); // Indices cannot be individually deleted and new
634 
635 #if !defined(EPETRA_NO_32BIT_GLOBAL_INDICES) || !defined(EPETRA_NO_64BIT_GLOBAL_INDICES)
636  EPETRA_CHK_ERR( InsertValues(Row, NumEntries, values, Indices) );
637 #else
638  throw ReportError("Epetra_CrsMatrix::InsertMyValues: Failure because neither 32 bit nor 64 bit indices insertable.", -1);
639 #endif
640 
641  return(0);
642 
643 }
644 
645 //==========================================================================
646 template<typename int_type>
647 int Epetra_CrsMatrix::InsertValues(int Row, int NumEntries,
648  const double* values,
649  const int_type* Indices)
650 {
651  if(CV_ == View){
652  //cannot allow View mode with const pointers
653  EPETRA_CHK_ERR(-4);
654  }
655  else{
656  //to avoid code duplication I am using a cheap tactic of removing the constness of
657  //values and Indices. Since this is only called in copy mode the passed in variables
658  //will not be modified.
659  return(InsertValues(Row, NumEntries, const_cast<double*>(values), const_cast<int_type*>(Indices)));
660  }
661  return 0;
662 }
663 
664 #ifndef EPETRA_NO_32BIT_GLOBAL_INDICES
665 int Epetra_CrsMatrix::InsertValues(int Row, int NumEntries,
666  const double* values,
667  const int* Indices)
668 {
669  if(RowMap().GlobalIndicesInt() || (RowMap().GlobalIndicesLongLong() && IndicesAreLocal()))
670  return InsertValues<int>(Row, NumEntries, values, Indices);
671  else
672  throw ReportError("Epetra_CrsMatrix::InsertValues int version called for a matrix that is not int.", -1);
673 }
674 #endif
675 #ifndef EPETRA_NO_64BIT_GLOBAL_INDICES
676 int Epetra_CrsMatrix::InsertValues(int Row, int NumEntries,
677  const double* values,
678  const long long* Indices)
679 {
680  if(RowMap().GlobalIndicesLongLong())
681  return InsertValues<long long>(Row, NumEntries, values, Indices);
682  else
683  throw ReportError("Epetra_CrsMatrix::InsertValues long long version called for a matrix that is not long long.", -1);
684 }
685 #endif
686 //==========================================================================
687 template<typename int_type>
688 int Epetra_CrsMatrix::InsertValues(int Row, int NumEntries,
689  double* values,
690  int_type* Indices)
691 {
692  int j;
693  int ierr = 0;
694 
695  if(Row < 0 || Row >= NumMyRows_)
696  EPETRA_CHK_ERR(-1); // Not in Row range
697 
698  if(CV_ == View) {
699  //test indices in static graph
700  if(StaticGraph()) {
701  int testNumEntries;
702  int* testIndices;
703  int testRow = Row;
704  if(IndicesAreGlobal())
705  testRow = Graph_.LRID( Row );
706  EPETRA_CHK_ERR(Graph_.ExtractMyRowView(testRow, testNumEntries, testIndices));
707 
708  bool match = true;
709  if(NumEntries != testNumEntries)
710  match = false;
711  for(int i = 0; i < NumEntries; ++i)
712  match = match && (Indices[i]==testIndices[i]);
713 
714  if(!match)
715  ierr = -3;
716  }
717 
718  if(Values_[Row] != 0)
719  ierr = 2; // This row has been defined already. Issue warning.
720  Values_[Row] = values;
721  }
722  else {
723  if(StaticGraph())
724  EPETRA_CHK_ERR(-2); // If the matrix graph is fully constructed, we cannot insert new values
725 
726  int tmpNumEntries = NumEntries;
727 
728  if(Graph_.HaveColMap()) { //must insert only valid indices, values
729  const double* tmpValues = values;
730  values = new double[NumEntries];
731  int loc = 0;
732  if(IndicesAreLocal()) {
733  for(int i = 0; i < NumEntries; ++i)
734  if(Graph_.ColMap().MyLID(static_cast<int>(Indices[i])))
735  values[loc++] = tmpValues[i];
736  }
737  else {
738  for(int i = 0; i < NumEntries; ++i)
739  if(Graph_.ColMap().MyGID(Indices[i]))
740  values[loc++] = tmpValues[i];
741  }
742  if(NumEntries != loc)
743  ierr = 2; //Some columns excluded
744  NumEntries = loc;
745  }
746 
747  int start = Graph().NumMyIndices(Row);
748  int stop = start + NumEntries;
749  int NumAllocatedEntries = Values_alloc_lengths_[Row];
750  if(stop > NumAllocatedEntries) {
751  if (Graph().StaticProfile() && stop > Graph().NumAllocatedMyIndices(Row)) {
752  EPETRA_CHK_ERR(-2); // Cannot expand graph storage if graph created using StaticProfile
753  }
754  if(NumAllocatedEntries == 0) {
755  Values_[Row] = new double[NumEntries]; // Row was never allocated, so do it
756  Values_alloc_lengths_[Row] = NumEntries;
757  }
758  else {
759  ierr = 1; // Out of room. Must delete and allocate more space...
760  double* tmp_Values = new double[stop];
761  for(j = 0; j < start; j++)
762  tmp_Values[j] = Values_[Row][j]; // Copy existing entries
763  delete[] Values_[Row]; // Delete old storage
764  Values_[Row] = tmp_Values; // Set pointer to new storage
765  Values_alloc_lengths_[Row] = stop;
766  }
767  }
768 
769  for(j = start; j < stop; j++)
770  Values_[Row][j] = values[j-start];
771 
772 
773  NumEntries = tmpNumEntries;
774  if(Graph_.HaveColMap())
775  delete[] values;
776  }
777 
778  NormOne_ = -1.0; // Reset Norm so it will be recomputed.
779  NormInf_ = -1.0; // Reset Norm so it will be recomputed.
780  NormFrob_ = -1.0;
781 
782  if(!StaticGraph()) {
783  EPETRA_CHK_ERR(Graph_.InsertIndices(Row, NumEntries, Indices));
784  }
785 
786  EPETRA_CHK_ERR(ierr);
787  return(0);
788 }
789 
790 #ifndef EPETRA_NO_32BIT_GLOBAL_INDICES
791 int Epetra_CrsMatrix::InsertValues(int Row, int NumEntries,
792  double* values,
793  int* Indices)
794 {
795  if(RowMap().GlobalIndicesInt() || (RowMap().GlobalIndicesLongLong() && IndicesAreLocal()))
796  return InsertValues<int>(Row, NumEntries, values, Indices);
797  else
798  throw ReportError("Epetra_CrsMatrix::InsertValues int version called for a matrix that is not int.", -1);
799 }
800 #endif
801 #ifndef EPETRA_NO_64BIT_GLOBAL_INDICES
802 int Epetra_CrsMatrix::InsertValues(int Row, int NumEntries,
803  double* values,
804  long long* Indices)
805 {
806  if(RowMap().GlobalIndicesLongLong())
807  return InsertValues<long long>(Row, NumEntries, values, Indices);
808  else
809  throw ReportError("Epetra_CrsMatrix::InsertValues long long version called for a matrix that is not long long.", -1);
810 }
811 #endif
812 
813 //==========================================================================
814 int Epetra_CrsMatrix::InsertOffsetValues(long long Row, int NumEntries,
815  double* values,
816  int* Indices)
817 {
818  return ReplaceOffsetValues(Row, NumEntries, values, Indices);
819 }
820 
821 //==========================================================================
822 template<typename int_type>
823 int Epetra_CrsMatrix::TReplaceGlobalValues(int_type Row, int NumEntries, const double * srcValues, const int_type *Indices) {
824 
825  int j;
826  int ierr = 0;
827  int Loc;
828 
829  int locRow = Graph_.LRID(Row); // Normalize row range
830 
831  if (locRow < 0 || locRow >= NumMyRows_) {
832  EPETRA_CHK_ERR(-1); // Not in Row range
833  }
834  double * targValues = Values(locRow);
835  for (j=0; j<NumEntries; j++) {
836  int_type Index = Indices[j];
837  if (Graph_.FindGlobalIndexLoc(locRow,Index,j,Loc))
838  targValues[Loc] = srcValues[j];
839  else
840  ierr = 2; // Value Excluded
841  }
842 
843  NormOne_ = -1.0; // Reset Norm so it will be recomputed.
844  NormInf_ = -1.0; // Reset Norm so it will be recomputed.
845  NormFrob_ = -1.0;
846 
847  EPETRA_CHK_ERR(ierr);
848  return(0);
849 }
850 
851 #ifndef EPETRA_NO_32BIT_GLOBAL_INDICES
852 int Epetra_CrsMatrix::ReplaceGlobalValues(int Row, int NumEntries, const double * srcValues, const int *Indices) {
853  if(RowMap().GlobalIndicesInt())
854  return TReplaceGlobalValues<int>(Row, NumEntries, srcValues, Indices);
855  else
856  throw ReportError("Epetra_CrsMatrix::ReplaceGlobalValues int version called for a matrix that is not int.", -1);
857 }
858 #endif
859 #ifndef EPETRA_NO_64BIT_GLOBAL_INDICES
860 int Epetra_CrsMatrix::ReplaceGlobalValues(long long Row, int NumEntries, const double * srcValues, const long long *Indices) {
861  if(RowMap().GlobalIndicesLongLong())
862  return TReplaceGlobalValues<long long>(Row, NumEntries, srcValues, Indices);
863  else
864  throw ReportError("Epetra_CrsMatrix::ReplaceGlobalValues long long version called for a matrix that is not long long.", -1);
865 }
866 #endif
867 
868 //==========================================================================
869 int Epetra_CrsMatrix::ReplaceMyValues(int Row, int NumEntries, const double * srcValues, const int *Indices) {
870 
871  if (!IndicesAreLocal())
872  EPETRA_CHK_ERR(-4); // Indices must be local.
873 
874  int j;
875  int ierr = 0;
876  int Loc;
877 
878  if (Row < 0 || Row >= NumMyRows_) {
879  EPETRA_CHK_ERR(-1); // Not in Row range
880  }
881 
882  double* RowValues = Values(Row);
883  for (j=0; j<NumEntries; j++) {
884  int Index = Indices[j];
885  if (Graph_.FindMyIndexLoc(Row,Index,j,Loc))
886  RowValues[Loc] = srcValues[j];
887  else
888  ierr = 2; // Value Excluded
889  }
890 
891  NormOne_ = -1.0; // Reset Norm so it will be recomputed.
892  NormInf_ = -1.0; // Reset Norm so it will be recomputed.
893  NormFrob_ = -1.0;
894 
895  EPETRA_CHK_ERR(ierr);
896  return(0);
897 }
898 
899 //==========================================================================
900 int Epetra_CrsMatrix::ReplaceOffsetValues(long long Row, int NumEntries,
901  const double * srcValues, const int *Offsets)
902 {
903  int j;
904  int ierr = 0;
905 
906  // Normalize row range
907 #ifdef EPETRA_NO_64BIT_GLOBAL_INDICES
908  int locRow = LRID((int) Row);
909 #else
910  int locRow = LRID(Row);
911 #endif
912 
913  if (locRow < 0 || locRow >= NumMyRows_) {
914  EPETRA_CHK_ERR(-1); // Not in Row range
915  }
916 
917  double* RowValues = Values(locRow);
918  for(j=0; j<NumEntries; j++) {
919  if( Offsets[j] != -1 )
920  RowValues[Offsets[j]] = srcValues[j];
921  }
922 
923  NormOne_ = -1.0; // Reset Norm so it will be recomputed.
924  NormInf_ = -1.0; // Reset Norm so it will be recomputed.
925  NormFrob_ = -1.0;
926 
927  EPETRA_CHK_ERR(ierr);
928  return(0);
929 }
930 
931 //==========================================================================
932 template<typename int_type>
934  int NumEntries,
935  const double * srcValues,
936  const int_type *Indices)
937 {
938  int j;
939  int ierr = 0;
940  int Loc = 0;
941 
942 
943  int locRow = Graph_.LRID(Row); // Normalize row range
944 
945  if (locRow < 0 || locRow >= NumMyRows_) {
946  EPETRA_CHK_ERR(-1); // Not in Row range
947  }
948 
949  if (StaticGraph() && !Graph_.HaveColMap()) {
950  EPETRA_CHK_ERR(-1);
951  }
952 
953  double * RowValues = Values(locRow);
954 
955  if (!StaticGraph()) {
956  for (j=0; j<NumEntries; j++) {
957  int_type Index = Indices[j];
958  if (Graph_.FindGlobalIndexLoc(locRow,Index,j,Loc))
959 #ifdef EPETRA_HAVE_OMP
960 #ifdef EPETRA_HAVE_OMP_NONASSOCIATIVE
961 #pragma omp atomic
962 #endif
963 #endif
964  RowValues[Loc] += srcValues[j];
965  else
966  ierr = 2; // Value Excluded
967  }
968  }
969  else {
970  const Epetra_BlockMap& colmap = Graph_.ColMap();
971  int NumColIndices = Graph_.NumMyIndices(locRow);
972  const int* ColIndices = Graph_.Indices(locRow);
973 
974  if (Graph_.Sorted()) {
975  int insertPoint;
976  for (j=0; j<NumEntries; j++) {
977  int Index = colmap.LID(Indices[j]);
978 
979  // Check whether the next added element is the subsequent element in
980  // the graph indices, then we can skip the binary search
981  if (Loc < NumColIndices && Index == ColIndices[Loc])
982 #ifdef EPETRA_HAVE_OMP
983 #ifdef EPETRA_HAVE_OMP_NONASSOCIATIVE
984 #pragma omp atomic
985 #endif
986 #endif
987  RowValues[Loc] += srcValues[j];
988  else {
989  Loc = Epetra_Util_binary_search(Index, ColIndices, NumColIndices, insertPoint);
990  if (Loc > -1)
991 #ifdef EPETRA_HAVE_OMP
992 #ifdef EPETRA_HAVE_OMP_NONASSOCIATIVE
993 #pragma omp atomic
994 #endif
995 #endif
996  RowValues[Loc] += srcValues[j];
997  else
998  ierr = 2; // Value Excluded
999  }
1000  ++Loc;
1001  }
1002  }
1003  else
1004  for (j=0; j<NumEntries; j++) {
1005  int Index = colmap.LID(Indices[j]);
1006  if (Graph_.FindMyIndexLoc(NumColIndices,ColIndices,Index,j,Loc))
1007 #ifdef EPETRA_HAVE_OMP
1008 #ifdef EPETRA_HAVE_OMP_NONASSOCIATIVE
1009 #pragma omp atomic
1010 #endif
1011 #endif
1012  RowValues[Loc] += srcValues[j];
1013  else
1014  ierr = 2; // Value Excluded
1015  }
1016  }
1017 
1018  NormOne_ = -1.0; // Reset Norm so it will be recomputed.
1019  NormInf_ = -1.0; // Reset Norm so it will be recomputed.
1020  NormFrob_ = -1.0;
1021 
1022  EPETRA_CHK_ERR(ierr);
1023 
1024  return(0);
1025 }
1026 
1027 #ifndef EPETRA_NO_32BIT_GLOBAL_INDICES
1029  int NumEntries,
1030  const double * srcValues,
1031  const int *Indices)
1032 {
1033  if(RowMap().GlobalIndicesInt())
1034  return TSumIntoGlobalValues<int>(Row, NumEntries, srcValues, Indices);
1035  else
1036  throw ReportError("Epetra_CrsMatrix::SumIntoGlobalValues int version called for a matrix that is not int.", -1);
1037 }
1038 #endif
1039 #ifndef EPETRA_NO_64BIT_GLOBAL_INDICES
1041  int NumEntries,
1042  const double * srcValues,
1043  const long long *Indices)
1044 {
1045  if(RowMap().GlobalIndicesLongLong())
1046  return TSumIntoGlobalValues<long long>(Row, NumEntries, srcValues, Indices);
1047  else
1048  throw ReportError("Epetra_CrsMatrix::SumIntoGlobalValues long long version called for a matrix that is not long long.", -1);
1049 }
1050 #endif
1051 
1052 //==========================================================================
1053 int Epetra_CrsMatrix::SumIntoMyValues(int Row, int NumEntries, const double * srcValues, const int *Indices) {
1054 
1055  if (!IndicesAreLocal())
1056  EPETRA_CHK_ERR(-4); // Indices must be local.
1057 
1058  int j;
1059  int ierr = 0;
1060  int Loc = 0;
1061  int insertPoint;
1062 
1063  if (Row < 0 || Row >= NumMyRows_) {
1064  EPETRA_CHK_ERR(-1); // Not in Row range
1065  }
1066 
1067  double* RowValues = Values(Row);
1068  int NumColIndices = Graph_.NumMyIndices(Row);
1069  const int* ColIndices = Graph_.Indices(Row);
1070  if (Graph_.Sorted()) {
1071  for (j=0; j<NumEntries; j++) {
1072  int Index = Indices[j];
1073 
1074  // Check whether the next added element is the subsequent element in
1075  // the graph indices.
1076  if (Loc < NumColIndices && Index == ColIndices[Loc])
1077  RowValues[Loc] += srcValues[j];
1078  else {
1079  Loc = Epetra_Util_binary_search(Index, ColIndices, NumColIndices, insertPoint);
1080  if (Loc > -1)
1081  RowValues[Loc] += srcValues[j];
1082  else
1083  ierr = 2; // Value Excluded
1084  }
1085  ++Loc;
1086  }
1087  }
1088  else {
1089  for (j=0; j<NumEntries; j++) {
1090  int Index = Indices[j];
1091  if (Graph_.FindMyIndexLoc(Row,Index,j,Loc))
1092  RowValues[Loc] += srcValues[j];
1093  else
1094  ierr = 2; // Value Excluded
1095  }
1096  }
1097 
1098  NormOne_ = -1.0; // Reset Norm so it will be recomputed.
1099  NormInf_ = -1.0; // Reset Norm so it will be recomputed.
1100  NormFrob_ = -1.0;
1101 
1102  EPETRA_CHK_ERR(ierr);
1103 
1104  return(0);
1105 }
1106 
1107 //==========================================================================
1108 int Epetra_CrsMatrix::SumIntoOffsetValues(long long Row, int NumEntries, const double * srcValues, const int *Offsets) {
1109 
1110  int j;
1111  int ierr = 0;
1112 
1113  // Normalize row range
1114 #ifdef EPETRA_NO_64BIT_GLOBAL_INDICES
1115  int locRow = LRID((int) Row);
1116 #else
1117  int locRow = LRID(Row);
1118 #endif
1119 
1120  if (locRow < 0 || locRow >= NumMyRows_) {
1121  EPETRA_CHK_ERR(-1); // Not in Row range
1122  }
1123 
1124  double* RowValues = Values(locRow);
1125  for (j=0; j<NumEntries; j++) {
1126  if( Offsets[j] != -1 )
1127  RowValues[Offsets[j]] += srcValues[j];
1128  }
1129 
1130  NormOne_ = -1.0; // Reset Norm so it will be recomputed.
1131  NormInf_ = -1.0; // Reset Norm so it will be recomputed.
1132  NormFrob_ = -1.0;
1133 
1134  EPETRA_CHK_ERR(ierr);
1135 
1136  return(0);
1137 }
1138 
1139 //==========================================================================
1140 int Epetra_CrsMatrix::FillComplete(bool OptimizeDataStorage) {
1142  EPETRA_CHK_ERR(FillComplete(RowMap(), RowMap(), OptimizeDataStorage));
1143  return(0);
1144 }
1145 
1146 //==========================================================================
1148  const Epetra_Map& range_map, bool OptimizeDataStorage)
1149 {
1150  int returnValue = 0;
1151 
1152  if (Graph_.Filled()) {
1154  returnValue = 2;
1155  }
1156  }
1157 
1158  if (!StaticGraph()) {
1159  if (Graph_.MakeIndicesLocal(domain_map, range_map) < 0) {
1160  return(-1);
1161  }
1162  }
1163  SortEntries(); // Sort column entries from smallest to largest
1164  MergeRedundantEntries(); // Get rid of any redundant index values
1165  if (!StaticGraph()) {
1166  if (Graph_.FillComplete(domain_map, range_map) < 0) {
1167  return(-2);
1168  }
1169  }
1170 
1172 
1175  returnValue = 3;
1176  }
1177  squareFillCompleteCalled_ = false;
1178  EPETRA_CHK_ERR(returnValue);
1179  }
1180 
1181  if (OptimizeDataStorage) { EPETRA_CHK_ERR(OptimizeStorage()); }
1182 
1183  return(returnValue);
1184 }
1185 
1186 //==========================================================================
1189  return(0);
1190 }
1191 
1192 //==========================================================================
1193 int Epetra_CrsMatrix::TransformToLocal(const Epetra_Map* domainMap, const Epetra_Map* rangeMap) {
1194  EPETRA_CHK_ERR(FillComplete(*domainMap, *rangeMap));
1195  return(0);
1196 }
1197 
1198 //==========================================================================
1200 
1201  if(!IndicesAreLocal())
1202  EPETRA_CHK_ERR(-1);
1203  if(Sorted())
1204  return(0);
1205 
1206  // For each row, sort column entries from smallest to largest.
1207  // Use shell sort. Stable sort so it is fast if indices are already sorted.
1208 
1209 
1210  for(int i = 0; i < NumMyRows_; i++){
1211 
1212  double* locValues = Values(i);
1213  int NumEntries = Graph().NumMyIndices(i);
1214  int* locIndices = Graph().Indices(i);
1215 
1216  int n = NumEntries;
1217  int m = n/2;
1218 
1219  while(m > 0) {
1220  int max = n - m;
1221  for(int j = 0; j < max; j++) {
1222  for(int k = j; k >= 0; k-=m) {
1223  if(locIndices[k+m] >= locIndices[k])
1224  break;
1225  double dtemp = locValues[k+m];
1226  locValues[k+m] = locValues[k];
1227  locValues[k] = dtemp;
1228  int itemp = locIndices[k+m];
1229  locIndices[k+m] = locIndices[k];
1230  locIndices[k] = itemp;
1231  }
1232  }
1233  m = m/2;
1234  }
1235  }
1236  Graph_.SetSorted(true); // This also sorted the graph
1237  return(0);
1238 }
1239 
1240 //==========================================================================
1242 
1243  int i;
1244 
1245  if(NoRedundancies())
1246  return(0);
1247  if(!Sorted())
1248  EPETRA_CHK_ERR(-1); // Must have sorted entries
1249 
1250  // For each row, remove column indices that are repeated.
1251  // Also, determine if matrix is upper or lower triangular or has no diagonal (Done in graph)
1252  // Note: This function assumes that SortEntries was already called.
1253 
1254  for(i = 0; i<NumMyRows_; i++) {
1255  int NumEntries = Graph().NumMyIndices(i);
1256  if(NumEntries > 1) {
1257  double* const locValues = Values(i);
1258  int* const locIndices = Graph().Indices(i);
1259  int curEntry =0;
1260  double curValue = locValues[0];
1261  for(int k = 1; k < NumEntries; k++) {
1262  if(locIndices[k] == locIndices[k-1])
1263  curValue += locValues[k];
1264  else {
1265  locValues[curEntry++] = curValue;
1266  curValue = locValues[k];
1267  }
1268  }
1269  locValues[curEntry] = curValue;
1270 
1271  }
1272  }
1273 
1274  EPETRA_CHK_ERR(Graph_.RemoveRedundantIndices()); // Remove redundant indices and then return
1275  return(0);
1276 }
1277 
1278 //==========================================================================
1280 
1281 
1282  if (StorageOptimized())
1283  return(0); // Have we been here before?
1284  if (!Filled()) EPETRA_CHK_ERR(-1); // Cannot optimize storage before calling FillComplete()
1285 
1286 
1287  int ierr = Graph_.OptimizeStorage();
1288  if (ierr!=0) EPETRA_CHK_ERR(ierr); // In order for OptimizeStorage to make sense for the matrix, it must work on the graph.
1289 
1290  bool Contiguous = true; // Assume contiguous is true
1291  for (int i=1; i<NumMyRows_; i++){
1292  int NumEntries = Graph().NumMyIndices(i-1);
1293 
1294  // check if end of beginning of current row starts immediately after end of previous row.
1295  if (Values_[i]!=Values_[i-1]+NumEntries) {
1296  Contiguous = false;
1297  break;
1298  }
1299  }
1300 
1301  // NOTE: At the end of the above loop set, there is a possibility that NumEntries and NumAllocatedEntries
1302  // for the last row could be different, but I don't think it matters.
1303 
1304 
1305  if ((CV_==View) && !Contiguous)
1306  EPETRA_CHK_ERR(-1); // This is user data, it's not contiguous and we can't make it so.
1307 
1308 
1309  if(!Contiguous) { // Must pack indices if not already contiguous. Pack values into All_values_
1310 
1311  if (All_Values_==0) {
1312  // Compute Number of Nonzero entries (Done in FillComplete, but we may not have been there yet.)
1313  int numMyNonzeros = Graph_.NumMyNonzeros();
1314 
1315  // Allocate one big array for all values
1316  All_Values_ = new double[numMyNonzeros];
1317  if(All_Values_ == 0) throw ReportError("Error with All_Values_ allocation.", -99);
1318 
1319  const int* IndexOffset = Graph().IndexOffset();
1320  int numMyRows = NumMyRows_;
1321  double ** Values_s = Values_;
1322  double * All_Values_s = All_Values_;
1323 #ifdef EPETRA_HAVE_OMP
1324 #pragma omp parallel for default(none) shared(Values_s,All_Values_s,numMyRows,IndexOffset)
1325 #endif
1326  for (int i=0; i<numMyRows; i++) {
1327  int NumEntries = Graph().NumMyIndices(i);
1328  int curOffset = IndexOffset[i];
1329  double * values = Values_s[i];
1330  double * newValues = All_Values_s+curOffset;
1331  for (int j=0; j<NumEntries; j++) newValues[j] = values[j];
1332  }
1333  }
1334  else { // Static Profile, so just pack into existing storage (can't be threaded)
1335  double * tmp = All_Values_;
1336  for (int i=0; i<NumMyRows_; i++) {
1337  int NumEntries = Graph().NumMyIndices(i);
1338  double * values = Values_[i];
1339  if (tmp!=values) // Copy values if not pointing to same location
1340  for (int j=0; j<NumEntries; j++) tmp[j] = values[j];
1341  tmp += NumEntries;
1342  }
1343  }
1344 
1345  // Free Values_ arrays
1346  for (int i=0;i<NumMyRows_; ++i) {
1347  if (Values_alloc_lengths_[i] != 0) delete [] Values_[i];
1348  }
1349 
1351  } // End of !Contiguous section
1352  else {
1353  //if already contiguous, we'll simply set All_Values_ to be
1354  //a copy of Values_[0].
1355  if (All_Values_!=0 && All_Values_!=Values_[0]) delete [] All_Values_;
1356  All_Values_ = NumMyRows_ > 0 ? Values_[0] : NULL;
1357  }
1358 
1359  // Delete unneeded storage
1360  delete [] Values_; Values_=0;
1361 
1362 #ifdef Epetra_ENABLE_CASK
1363  if (cask == NULL && Graph().StorageOptimized() ) {
1364  int * Indices = Graph().All_Indices();
1365  int * IndexOffset = Graph().IndexOffset();
1366  int NumMyCols_ = NumMyCols();
1367  cask_handler_initialize(&cask);
1368  cask_csr_analysis(NumMyRows_, NumMyCols_, IndexOffset, Indices,
1369  NumGlobalNonzeros64(),cask);
1370  }
1371 #endif
1372 
1373 
1374  StorageOptimized_ = true;
1375 
1376 
1377  return(0);
1378 }
1379 
1380 //==========================================================================
1381 template<typename int_type>
1382 int Epetra_CrsMatrix::ExtractGlobalRowCopy(int_type Row, int Length, int & NumEntries, double * values,
1383  int_type * Indices) const
1384 {
1385 
1386  int ierr = Graph_.ExtractGlobalRowCopy(Row, Length, NumEntries, Indices);
1387  if (ierr)
1388  EPETRA_CHK_ERR(ierr);
1389 
1390  EPETRA_CHK_ERR(ExtractGlobalRowCopy(Row, Length, NumEntries, values));
1391  return(0);
1392 }
1393 
1394 #ifndef EPETRA_NO_32BIT_GLOBAL_INDICES
1395 int Epetra_CrsMatrix::ExtractGlobalRowCopy(int Row, int Length, int & NumEntries, double * values,
1396  int * Indices) const
1397 {
1398  if(RowMap().GlobalIndicesInt())
1399  return ExtractGlobalRowCopy<int>(Row, Length, NumEntries, values, Indices);
1400  else
1401  throw ReportError("Epetra_CrsMatrix::ExtractGlobalRowCopy int version called for a matrix that is not int.", -1);
1402 }
1403 #endif
1404 #ifndef EPETRA_NO_64BIT_GLOBAL_INDICES
1405 int Epetra_CrsMatrix::ExtractGlobalRowCopy(long long Row, int Length, int & NumEntries, double * values,
1406  long long * Indices) const
1407 {
1408  if(RowMap().GlobalIndicesLongLong())
1409  return ExtractGlobalRowCopy<long long>(Row, Length, NumEntries, values, Indices);
1410  else
1411  throw ReportError("Epetra_CrsMatrix::ExtractGlobalRowCopy long long version called for a matrix that is not long long.", -1);
1412 }
1413 #endif
1414 
1415 //==========================================================================
1416 int Epetra_CrsMatrix::ExtractMyRowCopy(int Row, int Length, int & NumEntries, double * values,
1417  int * Indices) const
1418 {
1419 
1420  int ierr = Graph_.ExtractMyRowCopy(Row, Length, NumEntries, Indices);
1421  if (ierr)
1422  EPETRA_CHK_ERR(ierr);
1423 
1424  EPETRA_CHK_ERR(ExtractMyRowCopy(Row, Length, NumEntries, values));
1425  return(0);
1426 }
1427 //==========================================================================
1428 int Epetra_CrsMatrix::NumMyRowEntries(int Row, int & NumEntries) const
1429 {
1430 
1431  if (!MyLRID(Row))
1432  EPETRA_CHK_ERR(-1); // Not in the range of local rows
1433  NumEntries = NumMyEntries(Row);
1434  return(0);
1435 }
1436 
1437 //==========================================================================
1438 template<typename int_type>
1439 int Epetra_CrsMatrix::ExtractGlobalRowCopy(int_type Row, int Length, int & NumEntries, double * values) const
1440 {
1441  int Row0 = Graph_.RowMap().LID(Row); // Normalize row range
1442 
1443  EPETRA_CHK_ERR(ExtractMyRowCopy(Row0, Length, NumEntries, values));
1444  return(0);
1445 }
1446 
1447 #ifndef EPETRA_NO_32BIT_GLOBAL_INDICES
1448 int Epetra_CrsMatrix::ExtractGlobalRowCopy(int Row, int Length, int & NumEntries, double * values) const
1449 {
1450  if(RowMap().GlobalIndicesInt())
1451  return ExtractGlobalRowCopy<int>(Row, Length, NumEntries, values);
1452  else
1453  throw ReportError("Epetra_CrsMatrix::ExtractGlobalRowCopy int version called for a matrix that is not int.", -1);
1454 }
1455 #endif
1456 #ifndef EPETRA_NO_64BIT_GLOBAL_INDICES
1457 int Epetra_CrsMatrix::ExtractGlobalRowCopy(long long Row, int Length, int & NumEntries, double * values) const
1458 {
1459  if(RowMap().GlobalIndicesLongLong())
1460  return ExtractGlobalRowCopy<long long>(Row, Length, NumEntries, values);
1461  else
1462  throw ReportError("Epetra_CrsMatrix::ExtractGlobalRowCopy long long version called for a matrix that is not long long.", -1);
1463 }
1464 #endif
1465 
1466 //==========================================================================
1467 int Epetra_CrsMatrix::ExtractMyRowCopy(int Row, int Length, int & NumEntries, double * targValues) const
1468 {
1469  int j;
1470 
1471  if (Row < 0 || Row >= NumMyRows_)
1472  EPETRA_CHK_ERR(-1); // Not in Row range
1473 
1474  NumEntries = Graph().NumMyIndices(Row);
1475  if (Length < NumEntries)
1476  EPETRA_CHK_ERR(-2); // Not enough space for copy. Needed size is passed back in NumEntries
1477 
1478  double * srcValues = Values(Row);
1479 
1480  for(j=0; j<NumEntries; j++)
1481  targValues[j] = srcValues[j];
1482 
1483  return(0);
1484 }
1485 
1486 //==============================================================================
1488 
1489  if(!Filled())
1490  EPETRA_CHK_ERR(-1); // Can't get diagonal unless matrix is filled (and in local index space)
1491  if(!RowMap().SameAs(Diagonal.Map()))
1492  EPETRA_CHK_ERR(-2); // Maps must be the same
1493 
1494  for(int i = 0; i < NumMyRows_; i++) {
1495  long long ii = GRID64(i);
1496  int NumEntries = Graph().NumMyIndices(i);
1497  int* Indices = Graph().Indices(i);
1498  double * srcValues = Values(i);
1499 
1500  Diagonal[i] = 0.0;
1501  for(int j = 0; j < NumEntries; j++) {
1502  if(ii == GCID64(Indices[j])) {
1503  Diagonal[i] = srcValues[j];
1504  break;
1505  }
1506  }
1507  }
1508  return(0);
1509 }
1510 //==============================================================================
1512 
1513  if(!Filled())
1514  EPETRA_CHK_ERR(-1); // Can't replace diagonal unless matrix is filled (and in local index space)
1515  if(!RowMap().SameAs(Diagonal.Map()))
1516  EPETRA_CHK_ERR(-2); // Maps must be the same
1517 
1518  int ierr = 0;
1519  for(int i = 0; i < NumMyRows_; i++) {
1520  long long ii = GRID64(i);
1521  int NumEntries = Graph().NumMyIndices(i);
1522  int* Indices = Graph().Indices(i);
1523  double * targValues = Values(i);
1524  bool DiagMissing = true;
1525  for(int j = 0; j < NumEntries; j++) {
1526  if(ii == GCID64(Indices[j])) {
1527  targValues[j] = Diagonal[i];
1528  DiagMissing = false;
1529  break;
1530  }
1531  }
1532  if(DiagMissing)
1533  ierr = 1; // flag a warning error
1534  }
1535 
1536  NormOne_ = -1.0; // Reset Norm so it will be recomputed.
1537  NormInf_ = -1.0; // Reset Norm so it will be recomputed.
1538  NormFrob_ = -1.0;
1539 
1540  EPETRA_CHK_ERR(ierr);
1541 
1542  return(0);
1543 }
1544 
1545 //==========================================================================
1546 template<typename int_type>
1547 int Epetra_CrsMatrix::ExtractGlobalRowView(int_type Row, int & NumEntries, double *& values, int_type *& Indices) const
1548 {
1549  int ierr = Graph_.ExtractGlobalRowView(Row, NumEntries, Indices);
1550  if (ierr)
1551  EPETRA_CHK_ERR(ierr);
1552 
1553  EPETRA_CHK_ERR(ExtractGlobalRowView(Row, NumEntries, values));
1554  return(0);
1555 }
1556 
1557 #ifndef EPETRA_NO_32BIT_GLOBAL_INDICES
1558 int Epetra_CrsMatrix::ExtractGlobalRowView(int Row, int & NumEntries, double *& values, int *& Indices) const
1559 {
1560  if(RowMap().GlobalIndicesInt())
1561  return ExtractGlobalRowView<int>(Row, NumEntries, values, Indices);
1562  else
1563  throw ReportError("Epetra_CrsMatrix::ExtractGlobalRowView int version called for a matrix that is not int.", -1);
1564 }
1565 #endif
1566 #ifndef EPETRA_NO_64BIT_GLOBAL_INDICES
1567 int Epetra_CrsMatrix::ExtractGlobalRowView(long long Row, int & NumEntries, double *& values, long long *& Indices) const
1568 {
1569  if(RowMap().GlobalIndicesLongLong())
1570  return ExtractGlobalRowView<long long>(Row, NumEntries, values, Indices);
1571  else
1572  throw ReportError("Epetra_CrsMatrix::ExtractGlobalRowView long long version called for a matrix that is not long long.", -1);
1573 }
1574 #endif
1575 
1576 //==========================================================================
1577 int Epetra_CrsMatrix::ExtractMyRowView(int Row, int & NumEntries, double *& values, int *& Indices) const
1578 {
1579  int ierr = Graph_.ExtractMyRowView(Row, NumEntries, Indices);
1580  if (ierr)
1581  EPETRA_CHK_ERR(ierr);
1582 
1583  EPETRA_CHK_ERR(ExtractMyRowView(Row, NumEntries, values));
1584  return(0);
1585 }
1586 //==========================================================================
1587 template<typename int_type>
1588 int Epetra_CrsMatrix::ExtractGlobalRowView(int_type Row, int & NumEntries, double *& values) const
1589 {
1590  int Row0 = Graph_.RowMap().LID(Row); // Normalize row range
1591 
1592  EPETRA_CHK_ERR(ExtractMyRowView(Row0, NumEntries, values));
1593  return(0);
1594 }
1595 
1596 #ifndef EPETRA_NO_32BIT_GLOBAL_INDICES
1597 int Epetra_CrsMatrix::ExtractGlobalRowView(int Row, int & NumEntries, double *& values) const
1598 {
1599  if(RowMap().GlobalIndicesInt())
1600  return ExtractGlobalRowView<int>(Row, NumEntries, values);
1601  else
1602  throw ReportError("Epetra_CrsMatrix::ExtractGlobalRowView int version called for a matrix that is not int.", -1);
1603 }
1604 #endif
1605 #ifndef EPETRA_NO_64BIT_GLOBAL_INDICES
1606 int Epetra_CrsMatrix::ExtractGlobalRowView(long long Row, int & NumEntries, double *& values) const
1607 {
1608  if(RowMap().GlobalIndicesLongLong())
1609  return ExtractGlobalRowView<long long>(Row, NumEntries, values);
1610  else
1611  throw ReportError("Epetra_CrsMatrix::ExtractGlobalRowView long long version called for a matrix that is not long long.", -1);
1612 }
1613 #endif
1614 
1615 //==========================================================================
1616 int Epetra_CrsMatrix::ExtractMyRowView(int Row, int & NumEntries, double *& targValues) const
1617 {
1618  if (Row < 0 || Row >= NumMyRows_)
1619  EPETRA_CHK_ERR(-1); // Not in Row range
1620 
1621  NumEntries = Graph().NumMyIndices(Row);
1622 
1623  targValues = Values(Row);
1624 
1625  return(0);
1626 }
1627 
1628 //=============================================================================
1629 int Epetra_CrsMatrix::Solve(bool Upper, bool Trans, bool UnitDiagonal,
1630  const Epetra_Vector& x, Epetra_Vector& y) const
1631 {
1632 
1633 #ifdef EPETRA_CRSMATRIX_TEUCHOS_TIMERS
1634  TEUCHOS_FUNC_TIME_MONITOR("Epetra_CrsMatrix::Solve(Upper,Trans,UnitDiag,x,y)");
1635 #endif
1636 
1637  //
1638  // This function finds y such that Ly = x or Uy = x or the transpose cases.
1639  //
1640 
1641  // First short-circuit to the pre-5.0 version if no storage optimization was performed
1642  if (!StorageOptimized() && !Graph().StorageOptimized()) {
1643  EPETRA_CHK_ERR(Solve1(Upper, Trans, UnitDiagonal, x, y));
1644  return(0);
1645  }
1646 
1647  if (!Filled()) {
1648  EPETRA_CHK_ERR(-1); // Matrix must be filled.
1649  }
1650 
1651  if ((Upper) && (!UpperTriangular()))
1652  EPETRA_CHK_ERR(-2);
1653  if ((!Upper) && (!LowerTriangular()))
1654  EPETRA_CHK_ERR(-3);
1655  if ((!UnitDiagonal) && (NoDiagonal()))
1656  EPETRA_CHK_ERR(-4); // If matrix has no diagonal, we must use UnitDiagonal
1657  if ((!UnitDiagonal) && (NumMyDiagonals()<NumMyRows_))
1658  EPETRA_CHK_ERR(-5); // Need each row to have a diagonal
1659 
1660  double *xp = (double*)x.Values();
1661  double *yp = (double*)y.Values();
1662 
1663 
1664  GeneralSV(Upper, Trans, UnitDiagonal, xp, yp);
1665 
1667  return(0);
1668 }
1669 
1670 //=============================================================================
1671 int Epetra_CrsMatrix::Solve(bool Upper, bool Trans, bool UnitDiagonal, const Epetra_MultiVector& X, Epetra_MultiVector& Y) const {
1672 
1673 #ifdef EPETRA_CRSMATRIX_TEUCHOS_TIMERS
1674  TEUCHOS_FUNC_TIME_MONITOR("Epetra_CrsMatrix::Solve(Upper,Trans,UnitDiag,X,Y)");
1675 #endif
1676 
1677  //
1678  // This function find Y such that LY = X or UY = X or the transpose cases.
1679  //
1680 
1681  // First short-circuit to the pre-5.0 version if no storage optimization was performed
1682  if (!StorageOptimized() && !Graph().StorageOptimized()) {
1683  EPETRA_CHK_ERR(Solve1(Upper, Trans, UnitDiagonal, X, Y));
1684  return(0);
1685  }
1686  if(!Filled())
1687  EPETRA_CHK_ERR(-1); // Matrix must be filled.
1688 
1689  if((Upper) && (!UpperTriangular()))
1690  EPETRA_CHK_ERR(-2);
1691  if((!Upper) && (!LowerTriangular()))
1692  EPETRA_CHK_ERR(-3);
1693  if((!UnitDiagonal) && (NoDiagonal()))
1694  EPETRA_CHK_ERR(-4); // If matrix has no diagonal, we must use UnitDiagonal
1695  if((!UnitDiagonal) && (NumMyDiagonals()<NumMyRows_))
1696  EPETRA_CHK_ERR(-5); // Need each row to have a diagonal
1697 
1698  double** Xp = (double**) X.Pointers();
1699  double** Yp = (double**) Y.Pointers();
1700  int LDX = X.ConstantStride() ? X.Stride() : 0;
1701  int LDY = Y.ConstantStride() ? Y.Stride() : 0;
1702  int NumVectors = X.NumVectors();
1703 
1704 
1705  // Do actual computation
1706  if (NumVectors==1)
1707  GeneralSV(Upper, Trans, UnitDiagonal, *Xp, *Yp);
1708  else
1709  GeneralSM(Upper, Trans, UnitDiagonal, Xp, LDX, Yp, LDY, NumVectors);
1710 
1711  UpdateFlops(2 * NumVectors * NumGlobalNonzeros64());
1712  return(0);
1713 }
1714 
1715 //=============================================================================
1717  //
1718  // Put inverse of the sum of absolute values of the ith row of A in x[i].
1719  //
1720 
1721  if (!Filled()) EPETRA_CHK_ERR(-1); // Matrix must be filled.
1722  int ierr = 0;
1723  int i, j;
1724  x.PutScalar(0.0); // Make sure we sum into a vector of zeros.
1725  double * xp = (double*)x.Values();
1726  if (Graph().RangeMap().SameAs(x.Map()) && Exporter() != 0) {
1727  Epetra_Vector x_tmp(RowMap());
1728  x_tmp.PutScalar(0.0);
1729  double * x_tmp_p = (double*)x_tmp.Values();
1730  for (i=0; i < NumMyRows_; i++) {
1731  int NumEntries = NumMyEntries(i);
1732  double * RowValues = Values(i);
1733  for (j=0; j < NumEntries; j++) x_tmp_p[i] += std::abs(RowValues[j]);
1734  }
1735  EPETRA_CHK_ERR(x.Export(x_tmp, *Exporter(), Add)); //Export partial row sums to x.
1736  int myLength = x.MyLength();
1737  for (i=0; i<myLength; i++) {
1738  if (xp[i]<Epetra_MinDouble) {
1739  if (xp[i]==0.0) ierr = 1; // Set error to 1 to signal that zero rowsum found (supercedes ierr = 2)
1740  else if (ierr!=1) ierr = 2;
1741  xp[i] = Epetra_MaxDouble;
1742  }
1743  else
1744  xp[i] = 1.0/xp[i];
1745  }
1746  }
1747  else if (Graph().RowMap().SameAs(x.Map())) {
1748  for (i=0; i < NumMyRows_; i++) {
1749  int NumEntries = NumMyEntries(i);
1750  double * RowValues = Values(i);
1751  double scale = 0.0;
1752  for (j=0; j < NumEntries; j++) scale += std::abs(RowValues[j]);
1753  if (scale<Epetra_MinDouble) {
1754  if (scale==0.0) ierr = 1; // Set error to 1 to signal that zero rowsum found (supercedes ierr = 2)
1755  else if (ierr!=1) ierr = 2;
1756  xp[i] = Epetra_MaxDouble;
1757  }
1758  else
1759  xp[i] = 1.0/scale;
1760  }
1761  }
1762  else { // x.Map different than both Graph().RowMap() and Graph().RangeMap()
1763  EPETRA_CHK_ERR(-2); // The map of x must be the RowMap or RangeMap of A.
1764  }
1766  EPETRA_CHK_ERR(ierr);
1767  return(0);
1768 }
1769 
1770 //=============================================================================
1772  //
1773  // Put inverse of the max of absolute values of the ith row of A in x[i].
1774  //
1775 
1776  if (!Filled()) EPETRA_CHK_ERR(-1); // Matrix must be filled.
1777  int ierr = 0;
1778  int i, j;
1779  bool needExport = false;
1780  double * xp = (double*)x.Values();
1781  Epetra_Vector* x_tmp = 0;
1782  if (Graph().RangeMap().SameAs(x.Map())) {
1783  if (Exporter() != 0) {
1784  needExport = true; //Having this information later avoids a .SameAs
1785  x_tmp = new Epetra_Vector(RowMap()); // Create import vector if needed
1786  xp = (double*)x_tmp->Values();
1787  }
1788  }
1789  else if (!Graph().RowMap().SameAs(x.Map())) {
1790  EPETRA_CHK_ERR(-2); // The map of x must be the RowMap or RangeMap of A.
1791  }
1792  for (i=0; i < NumMyRows_; i++) {
1793  int NumEntries = NumMyEntries(i);
1794  double * RowValues = Values(i);
1795  double scale = 0.0;
1796  for (j=0; j < NumEntries; j++) scale = EPETRA_MAX(std::abs(RowValues[j]),scale);
1797  if (scale<Epetra_MinDouble) {
1798  if (scale==0.0) ierr = 1; // Set error to 1 to signal that zero rowmax found (supercedes ierr = 2)
1799  else if (ierr!=1) ierr = 2;
1800  xp[i] = Epetra_MaxDouble;
1801  }
1802  else
1803  xp[i] = 1.0/scale;
1804  }
1805  if(needExport) {
1806  x.PutScalar(0.0);
1807  EPETRA_CHK_ERR(x.Export(*x_tmp, *Exporter(), Insert)); // Fill x with values from temp vector
1808  delete x_tmp;
1809  }
1811  EPETRA_CHK_ERR(ierr);
1812  return(0);
1813 }
1814 
1815 //=============================================================================
1817  //
1818  // Put inverse of the sum of absolute values of the jth column of A in x[j].
1819  //
1820 
1821  if(!Filled()) EPETRA_CHK_ERR(-1); // Matrix must be filled.
1822  int ierr = 0;
1823  int i, j;
1824  int MapNumMyElements = x.Map().NumMyElements();
1825  x.PutScalar(0.0); // Make sure we sum into a vector of zeros.
1826  double* xp = (double*)x.Values();
1827  if(Graph().DomainMap().SameAs(x.Map()) && Importer() != 0) {
1828  Epetra_Vector x_tmp(ColMap());
1829  x_tmp.PutScalar(0.0);
1830  double * x_tmp_p = (double*)x_tmp.Values();
1831  for(i = 0; i < NumMyRows_; i++) {
1832  int NumEntries = NumMyEntries(i);
1833  int* ColIndices = Graph().Indices(i);
1834  double* RowValues = Values(i);
1835  for(j = 0; j < NumEntries; j++)
1836  x_tmp_p[ColIndices[j]] += std::abs(RowValues[j]);
1837  }
1838  EPETRA_CHK_ERR(x.Export(x_tmp, *Importer(), Add)); // Fill x with partial column sums
1839  }
1840  else if(Graph().ColMap().SameAs(x.Map())) {
1841  for(i = 0; i < NumMyRows_; i++) {
1842  int NumEntries = NumMyEntries(i);
1843  int* ColIndices = Graph().Indices(i);
1844  double* RowValues = Values(i);
1845  for(j = 0; j < NumEntries; j++)
1846  xp[ColIndices[j]] += std::abs(RowValues[j]);
1847  }
1848  }
1849  else { //x.Map different than both Graph().ColMap() and Graph().DomainMap()
1850  EPETRA_CHK_ERR(-2); // x must have the same distribution as the domain of A
1851  }
1852 
1853  // Invert values, don't allow them to get too large
1854  for(i = 0; i < MapNumMyElements; i++) {
1855  double scale = xp[i];
1856  if(scale < Epetra_MinDouble) {
1857  if(scale == 0.0)
1858  ierr = 1; // Set error to 1 to signal that zero rowsum found (supercedes ierr = 2)
1859  else if(ierr != 1)
1860  ierr = 2;
1861  xp[i] = Epetra_MaxDouble;
1862  }
1863  else
1864  xp[i] = 1.0 / scale;
1865  }
1866 
1868  EPETRA_CHK_ERR(ierr);
1869  return(0);
1870 }
1871 
1872 //=============================================================================
1874  //
1875  // Put inverse of the max of absolute values of the jth column of A in x[j].
1876  //
1877 
1878  if(!Filled()) EPETRA_CHK_ERR(-1); // Matrix must be filled.
1879  int ierr = 0;
1880  int i, j;
1881  int MapNumMyElements = x.Map().NumMyElements();
1882  x.PutScalar(0.0); // Make sure we sum into a vector of zeros.
1883  double* xp = (double*)x.Values();
1884  if(Graph().DomainMap().SameAs(x.Map()) && Importer() != 0) {
1885  Epetra_Vector x_tmp(ColMap());
1886  x_tmp.PutScalar(0.0);
1887  double * x_tmp_p = (double*)x_tmp.Values();
1888  for(i = 0; i < NumMyRows_; i++) {
1889  int NumEntries = NumMyEntries(i);
1890  int* ColIndices = Graph().Indices(i);
1891  double* RowValues = Values(i);
1892  for(j = 0; j < NumEntries; j++)
1893  x_tmp_p[ColIndices[j]] = EPETRA_MAX(std::abs(RowValues[j]),x_tmp_p[ColIndices[j]]);
1894  }
1895  EPETRA_CHK_ERR(x.Export(x_tmp, *Importer(), AbsMax)); // Fill x with partial column sums
1896  }
1897  else if(Graph().ColMap().SameAs(x.Map())) {
1898  for(i = 0; i < NumMyRows_; i++) {
1899  int NumEntries = NumMyEntries(i);
1900  int* ColIndices = Graph().Indices(i);
1901  double* RowValues = Values(i);
1902  for(j = 0; j < NumEntries; j++)
1903  xp[ColIndices[j]] = EPETRA_MAX(std::abs(RowValues[j]),xp[ColIndices[j]]);
1904  }
1905  }
1906  else { //x.Map different than both Graph().ColMap() and Graph().DomainMap()
1907  EPETRA_CHK_ERR(-2); // x must have the same distribution as the domain of A
1908  }
1909 
1910  // Invert values, don't allow them to get too large
1911  for(i = 0; i < MapNumMyElements; i++) {
1912  double scale = xp[i];
1913  if(scale < Epetra_MinDouble) {
1914  if(scale == 0.0)
1915  ierr = 1; // Set error to 1 to signal that zero rowsum found (supercedes ierr = 2)
1916  else if(ierr != 1)
1917  ierr = 2;
1918  xp[i] = Epetra_MaxDouble;
1919  }
1920  else
1921  xp[i] = 1.0 / scale;
1922  }
1923 
1925  EPETRA_CHK_ERR(ierr);
1926  return(0);
1927 }
1928 
1929 //=============================================================================
1931  //
1932  // This function scales the ith row of A by x[i].
1933  //
1934 
1935  if(!Filled())
1936  EPETRA_CHK_ERR(-1); // Matrix must be filled.
1937  double* xp = 0;
1938  if(Graph().RangeMap().SameAs(x.Map()))
1939  // If we have a non-trivial exporter, we must import elements that are
1940  // permuted or are on other processors. (We will use the exporter to
1941  // perform the import.)
1942  if(Exporter() != 0) {
1943  UpdateExportVector(1);
1945  xp = (double*) ExportVector_->Values();
1946  }
1947  else
1948  xp = (double*)x.Values();
1949  else if (Graph().RowMap().SameAs(x.Map()))
1950  xp = (double*)x.Values();
1951  else {
1952  EPETRA_CHK_ERR(-2); // The Map of x must be the RowMap or RangeMap of A.
1953  }
1954  int i, j;
1955 
1956  for(i = 0; i < NumMyRows_; i++) {
1957  int NumEntries = NumMyEntries(i);
1958  double* RowValues = Values(i);
1959  double scale = xp[i];
1960  for(j = 0; j < NumEntries; j++)
1961  RowValues[j] *= scale;
1962  }
1963  NormOne_ = -1.0; // Reset Norm so it will be recomputed.
1964  NormInf_ = -1.0; // Reset Norm so it will be recomputed.
1965  NormFrob_ = -1.0;
1966 
1968 
1969  return(0);
1970 }
1971 
1972 //=============================================================================
1974  //
1975  // This function scales the jth column of A by x[j].
1976  //
1977 
1978  if(!Filled())
1979  EPETRA_CHK_ERR(-1); // Matrix must be filled.
1980  double* xp = 0;
1981  if(Graph().DomainMap().SameAs(x.Map()))
1982  // If we have a non-trivial exporter, we must import elements that are
1983  // permuted or are on other processors.
1984  if(Importer() != 0) {
1985  UpdateImportVector(1);
1987  xp = (double*) ImportVector_->Values();
1988  }
1989  else
1990  xp = (double*)x.Values();
1991  else if(Graph().ColMap().SameAs(x.Map()))
1992  xp = (double*)x.Values();
1993  else
1994  EPETRA_CHK_ERR(-2); // The Map of x must be the RowMap or RangeMap of A.
1995  int i, j;
1996 
1997  for(i = 0; i < NumMyRows_; i++) {
1998  int NumEntries = NumMyEntries(i);
1999  int* ColIndices = Graph().Indices(i);
2000  double* RowValues = Values(i);
2001  for(j = 0; j < NumEntries; j++)
2002  RowValues[j] *= xp[ColIndices[j]];
2003  }
2004  NormOne_ = -1.0; // Reset Norm so it will be recomputed.
2005  NormInf_ = -1.0; // Reset Norm so it will be recomputed.
2006  NormFrob_ = -1.0;
2007 
2009  return(0);
2010 }
2011 
2012 //=============================================================================
2014 
2015 #if 0
2016  //
2017  // Commenting this section out disables caching, ie.
2018  // causes the norm to be computed each time NormInf is called.
2019  // See bug #1151 for a full discussion.
2020  //
2021  double MinNorm ;
2022  Comm().MinAll( &NormInf_, &MinNorm, 1 ) ;
2023 
2024  if( MinNorm >= 0.0)
2025  return(NormInf_);
2026 #endif
2027 
2028  if(!Filled())
2029  EPETRA_CHK_ERR(-1); // Matrix must be filled.
2030 
2031  Epetra_Vector x(RangeMap()); // Need temp vector for row sums
2032  double* xp = (double*)x.Values();
2033  Epetra_MultiVector* x_tmp = 0;
2034 
2035  // If we have a non-trivial exporter, we must export elements that are permuted or belong to other processors
2036  if(Exporter() != 0) {
2037  x_tmp = new Epetra_Vector(RowMap()); // Create temporary import vector if needed
2038  xp = (double*)x_tmp->Values();
2039  }
2040  int i, j;
2041 
2042  for(i = 0; i < NumMyRows_; i++) {
2043  xp[i] = 0.0;
2044  int NumEntries = NumMyEntries(i);
2045  double* RowValues = Values(i);
2046  for(j = 0; j < NumEntries; j++)
2047  xp[i] += std::abs(RowValues[j]);
2048  }
2049  if(Exporter() != 0) {
2050  x.PutScalar(0.0);
2051  EPETRA_CHK_ERR(x.Export(*x_tmp, *Exporter(), Add)); // Fill x with Values from temp vector
2052  }
2053  x.MaxValue(&NormInf_); // Find max
2054  if(x_tmp != 0)
2055  delete x_tmp;
2057  return(NormInf_);
2058 }
2059 //=============================================================================
2061 
2062 #if 0
2063  //
2064  // Commenting this section out disables caching, ie.
2065  // causes the norm to be computed each time NormOne is called.
2066  // See bug #1151 for a full discussion.
2067  //
2068  double MinNorm ;
2069  Comm().MinAll( &NormOne_, &MinNorm, 1 ) ;
2070 
2071  if( MinNorm >= 0.0)
2072  return(NormOne_);
2073 #endif
2074 
2075  if(!Filled())
2076  EPETRA_CHK_ERR(-1); // Matrix must be filled.
2077 
2078  Epetra_Vector x(DomainMap()); // Need temp vector for column sums
2079 
2080  double* xp = (double*)x.Values();
2081  Epetra_MultiVector* x_tmp = 0;
2082  int NumCols = NumMyCols();
2083 
2084 
2085  // If we have a non-trivial importer, we must export elements that are permuted or belong to other processors
2086  if(Importer() != 0) {
2087  x_tmp = new Epetra_Vector(ColMap()); // Create temporary import vector if needed
2088  xp = (double*)x_tmp->Values();
2089  }
2090  int i, j;
2091 
2092  for(i = 0; i < NumCols; i++)
2093  xp[i] = 0.0;
2094 
2095  for(i = 0; i < NumMyRows_; i++) {
2096  int NumEntries = NumMyEntries(i);
2097  int* ColIndices = Graph().Indices(i);
2098  double* RowValues = Values(i);
2099  for(j = 0; j < NumEntries; j++)
2100  xp[ColIndices[j]] += std::abs(RowValues[j]);
2101  }
2102  if(Importer() != 0) {
2103  x.PutScalar(0.0);
2104  EPETRA_CHK_ERR(x.Export(*x_tmp, *Importer(), Add)); // Fill x with Values from temp vector
2105  }
2106  x.MaxValue(&NormOne_); // Find max
2107  if(x_tmp != 0)
2108  delete x_tmp;
2110  return(NormOne_);
2111 }
2112 //=============================================================================
2114 
2115 #if 0
2116  //
2117  // Commenting this section out disables caching, ie.
2118  // causes the norm to be computed each time NormFrobenius is called.
2119  // See bug #1151 for a full discussion.
2120  //
2121  double MinNorm ;
2122  Comm().MinAll( &NormFrob_, &MinNorm, 1 ) ;
2123 
2124  if( MinNorm >= 0.0)
2125  return(NormFrob_);
2126 #endif
2127 
2128  if(!Filled())
2129  EPETRA_CHK_ERR(-1); // Matrix must be filled.
2130 
2131  double local_sum = 0.0;
2132 
2133  for(int i = 0; i < NumMyRows_; i++) {
2134  int NumEntries = NumMyEntries(i);
2135  double* RowValues = Values(i);
2136  for(int j = 0; j < NumEntries; j++) {
2137  local_sum += RowValues[j]*RowValues[j];
2138  }
2139  }
2140 
2141  double global_sum = 0.0;
2142  Comm().SumAll(&local_sum, &global_sum, 1);
2143 
2144  NormFrob_ = std::sqrt(global_sum);
2145 
2147 
2148  return(NormFrob_);
2149 }
2150 //=========================================================================
2152  try {
2153  const Epetra_CrsMatrix & A = dynamic_cast<const Epetra_CrsMatrix &>(Source);
2154  if (!A.Graph().GlobalConstantsComputed()) EPETRA_CHK_ERR(-1); // Must have global constants to proceed
2155  }
2156  catch (...) {
2157  return(0); // No error at this point, object could be a RowMatrix
2158  }
2159  return(0);
2160 }
2161 
2162 //=========================================================================
2164  int NumSameIDs,
2165  int NumPermuteIDs,
2166  int * PermuteToLIDs,
2167  int *PermuteFromLIDs,
2168  const Epetra_OffsetIndex * Indexor ,
2169  Epetra_CombineMode CombineMode) {
2170 
2171  if(!Source.Map().GlobalIndicesTypeMatch(RowMap()))
2172  throw ReportError("Epetra_CrsMatrix::CopyAndPermute: Incoming global index type does not match the one for *this",-1);
2173 
2174  try {
2175  const Epetra_CrsMatrix & A = dynamic_cast<const Epetra_CrsMatrix &>(Source);
2176  EPETRA_CHK_ERR(CopyAndPermuteCrsMatrix(A, NumSameIDs, NumPermuteIDs, PermuteToLIDs,
2177  PermuteFromLIDs,Indexor,CombineMode));
2178  }
2179  catch (...) {
2180  try {
2181  const Epetra_RowMatrix & A = dynamic_cast<const Epetra_RowMatrix &>(Source);
2182  EPETRA_CHK_ERR(CopyAndPermuteRowMatrix(A, NumSameIDs, NumPermuteIDs, PermuteToLIDs,
2183  PermuteFromLIDs,Indexor,CombineMode));
2184  }
2185  catch (...) {
2186  EPETRA_CHK_ERR(-1); // Incompatible SrcDistObject
2187  }
2188  }
2189 
2190  return(0);
2191 }
2192 
2193 //=========================================================================
2194 template<typename int_type>
2196  int NumSameIDs,
2197  int NumPermuteIDs,
2198  int * PermuteToLIDs,
2199  int *PermuteFromLIDs,
2200  const Epetra_OffsetIndex * Indexor,
2201  Epetra_CombineMode CombineMode) {
2202 
2203  int i, ierr = -1;
2204 
2205  int_type Row;
2206  int NumEntries;
2207  int maxNumEntries = A.MaxNumEntries();
2208  int_type * Indices = 0;
2209  double * values = 0;
2210 
2211  if (maxNumEntries>0 && A.IndicesAreLocal() ) { //Need Temp Space
2212  Indices = new int_type[maxNumEntries];
2213  values = new double[maxNumEntries];
2214  }
2215 
2216  // Do copy first
2217  if (NumSameIDs>0) {
2218  if (A.IndicesAreLocal()) {
2219  if (StaticGraph() || IndicesAreLocal()) {
2220  if(Indexor) {
2221  for (i=0; i<NumSameIDs; i++) {
2222  Row = (int_type) GRID64(i);
2223  EPETRA_CHK_ERR(A.ExtractGlobalRowCopy(Row, maxNumEntries, NumEntries, values, Indices)); // Set pointers
2224  if (CombineMode==Epetra_AddLocalAlso)
2225  ierr = SumIntoOffsetValues(Row, NumEntries, values, Indexor->SameOffsets()[i]);
2226  else
2227  ierr = ReplaceOffsetValues(Row, NumEntries, values, Indexor->SameOffsets()[i]);
2228  if( ierr<0 ) EPETRA_CHK_ERR(ierr);
2229  }
2230  }
2231  else {
2232  for (i=0; i<NumSameIDs; i++) {
2233  Row = (int_type) GRID64(i);
2234  EPETRA_CHK_ERR(A.ExtractGlobalRowCopy(Row, maxNumEntries, NumEntries, values, Indices)); // Set pointers
2235  if (CombineMode==Epetra_AddLocalAlso)
2236  ierr = SumIntoGlobalValues(Row, NumEntries, values, Indices);
2237  else
2238  ierr = ReplaceGlobalValues(Row, NumEntries, values, Indices);
2239  if( ierr<0 ) EPETRA_CHK_ERR(ierr);
2240  }
2241  }
2242  }
2243  else {
2244  if(Indexor) {
2245  for (i=0; i<NumSameIDs; i++) {
2246  Row = (int_type) GRID64(i);
2247  EPETRA_CHK_ERR(A.ExtractGlobalRowCopy(Row, maxNumEntries, NumEntries, values, Indices)); // Set pointers
2248  ierr = InsertOffsetValues(Row, NumEntries, values, Indexor->SameOffsets()[i]);
2249  if( ierr<0 ) EPETRA_CHK_ERR(ierr);
2250  }
2251  }
2252  else {
2253  for (i=0; i<NumSameIDs; i++) {
2254  Row = (int_type) GRID64(i);
2255  EPETRA_CHK_ERR(A.ExtractGlobalRowCopy(Row, maxNumEntries, NumEntries, values, Indices)); // Set pointers
2256  ierr = InsertGlobalValues(Row, NumEntries, values, Indices);
2257  if( ierr<0 ) EPETRA_CHK_ERR(ierr);
2258  }
2259  }
2260  }
2261  }
2262  else { // A.IndicesAreGlobal()
2263  if (StaticGraph() || IndicesAreLocal()) {
2264  if(Indexor) {
2265  for (i=0; i<NumSameIDs; i++) {
2266  Row = (int_type) GRID64(i);
2267  EPETRA_CHK_ERR(A.ExtractGlobalRowView(Row, NumEntries, values, Indices)); // Set pointers
2268  if (CombineMode==Epetra_AddLocalAlso)
2269  ierr = SumIntoOffsetValues(Row, NumEntries, values, Indexor->SameOffsets()[i]);
2270  else
2271  ierr = ReplaceOffsetValues(Row, NumEntries, values, Indexor->SameOffsets()[i]);
2272  if( ierr<0 ) EPETRA_CHK_ERR(ierr);
2273  }
2274  }
2275  else {
2276  for (i=0; i<NumSameIDs; i++) {
2277  Row = (int_type) GRID64(i);
2278  EPETRA_CHK_ERR(A.ExtractGlobalRowView(Row, NumEntries, values, Indices)); // Set pointers
2279  if (CombineMode==Epetra_AddLocalAlso)
2280  ierr = SumIntoGlobalValues(Row, NumEntries, values, Indices);
2281  else
2282  ierr = ReplaceGlobalValues(Row, NumEntries, values, Indices);
2283  if( ierr<0 ) EPETRA_CHK_ERR(ierr);
2284  }
2285  }
2286  }
2287  else {
2288  if(Indexor) {
2289  for (i=0; i<NumSameIDs; i++) {
2290  Row = (int_type) GRID64(i);
2291  EPETRA_CHK_ERR(A.ExtractGlobalRowView(Row, NumEntries, values, Indices)); // Set pointers
2292  ierr = InsertOffsetValues(Row, NumEntries, values, Indexor->SameOffsets()[i]);
2293  if( ierr<0 ) EPETRA_CHK_ERR(ierr);
2294  }
2295  }
2296  else {
2297  for (i=0; i<NumSameIDs; i++) {
2298  Row = (int_type) GRID64(i);
2299  EPETRA_CHK_ERR(A.ExtractGlobalRowView(Row, NumEntries, values, Indices)); // Set pointers
2300  ierr = InsertGlobalValues(Row, NumEntries, values, Indices);
2301  if( ierr<0 ) EPETRA_CHK_ERR(ierr);
2302  }
2303  }
2304  }
2305  }
2306  }
2307 
2308  // Do local permutation next
2309  int_type FromRow, ToRow;
2310  if (NumPermuteIDs>0) {
2311  if (A.IndicesAreLocal()) {
2312  if (StaticGraph() || IndicesAreLocal()) {
2313  if(Indexor) {
2314  for (i=0; i<NumPermuteIDs; i++) {
2315  FromRow = (int_type) A.GRID64(PermuteFromLIDs[i]);
2316  ToRow = (int_type) GRID64(PermuteToLIDs[i]);
2317  EPETRA_CHK_ERR(A.ExtractGlobalRowCopy(FromRow, maxNumEntries, NumEntries, values, Indices)); // Set pointers
2318  if (CombineMode==Epetra_AddLocalAlso)
2319  ierr = SumIntoOffsetValues(ToRow, NumEntries, values, Indexor->PermuteOffsets()[i]);
2320  else
2321  ierr = ReplaceOffsetValues(ToRow, NumEntries, values, Indexor->PermuteOffsets()[i]);
2322  if( ierr<0 ) EPETRA_CHK_ERR(ierr);
2323  }
2324  }
2325  else {
2326  for (i=0; i<NumPermuteIDs; i++) {
2327  FromRow = (int_type) A.GRID64(PermuteFromLIDs[i]);
2328  ToRow = (int_type) GRID64(PermuteToLIDs[i]);
2329  EPETRA_CHK_ERR(A.ExtractGlobalRowCopy(FromRow, maxNumEntries, NumEntries, values, Indices)); // Set pointers
2330  if (CombineMode==Epetra_AddLocalAlso)
2331  ierr = SumIntoGlobalValues(ToRow, NumEntries, values, Indices);
2332  else
2333  ierr = ReplaceGlobalValues(ToRow, NumEntries, values, Indices);
2334  if( ierr<0 ) EPETRA_CHK_ERR(ierr);
2335  }
2336  }
2337  }
2338  else {
2339  if(Indexor) {
2340  for (i=0; i<NumPermuteIDs; i++) {
2341  FromRow = (int_type) A.GRID64(PermuteFromLIDs[i]);
2342  ToRow = (int_type) GRID64(PermuteToLIDs[i]);
2343  EPETRA_CHK_ERR(A.ExtractGlobalRowCopy(FromRow, maxNumEntries, NumEntries, values, Indices)); // Set pointers
2344  ierr = InsertOffsetValues(ToRow, NumEntries, values, Indexor->PermuteOffsets()[i]);
2345  if (ierr<0) EPETRA_CHK_ERR(ierr);
2346  }
2347  }
2348  else {
2349  for (i=0; i<NumPermuteIDs; i++) {
2350  FromRow = (int_type) A.GRID64(PermuteFromLIDs[i]);
2351  ToRow = (int_type) GRID64(PermuteToLIDs[i]);
2352  EPETRA_CHK_ERR(A.ExtractGlobalRowCopy(FromRow, maxNumEntries, NumEntries, values, Indices)); // Set pointers
2353  ierr = InsertGlobalValues(ToRow, NumEntries, values, Indices);
2354  if (ierr<0) EPETRA_CHK_ERR(ierr);
2355  }
2356  }
2357  }
2358  }
2359  else { // A.IndicesAreGlobal()
2360  if (StaticGraph() || IndicesAreLocal()) {
2361  if(Indexor) {
2362  for (i=0; i<NumPermuteIDs; i++) {
2363  FromRow = (int_type) A.GRID64(PermuteFromLIDs[i]);
2364  ToRow = (int_type) GRID64(PermuteToLIDs[i]);
2365  EPETRA_CHK_ERR(A.ExtractGlobalRowView(FromRow, NumEntries, values, Indices)); // Set pointers
2366  if (CombineMode==Epetra_AddLocalAlso)
2367  ierr = SumIntoOffsetValues(ToRow, NumEntries, values, Indexor->PermuteOffsets()[i]);
2368  else
2369  ierr = ReplaceOffsetValues(ToRow, NumEntries, values, Indexor->PermuteOffsets()[i]);
2370  if (ierr<0) EPETRA_CHK_ERR(ierr);
2371  }
2372  }
2373  else {
2374  for (i=0; i<NumPermuteIDs; i++) {
2375  FromRow = (int_type) A.GRID64(PermuteFromLIDs[i]);
2376  ToRow = (int_type) GRID64(PermuteToLIDs[i]);
2377  EPETRA_CHK_ERR(A.ExtractGlobalRowView(FromRow, NumEntries, values, Indices)); // Set pointers
2378  if (CombineMode==Epetra_AddLocalAlso)
2379  ierr = SumIntoGlobalValues(ToRow, NumEntries, values, Indices);
2380  else
2381  ierr = ReplaceGlobalValues(ToRow, NumEntries, values, Indices);
2382  if (ierr<0) EPETRA_CHK_ERR(ierr);
2383  }
2384  }
2385  }
2386  else {
2387  if(Indexor) {
2388  for (i=0; i<NumPermuteIDs; i++) {
2389  FromRow = (int_type) A.GRID64(PermuteFromLIDs[i]);
2390  ToRow = (int_type) GRID64(PermuteToLIDs[i]);
2391  EPETRA_CHK_ERR(A.ExtractGlobalRowView(FromRow, NumEntries, values, Indices)); // Set pointers
2392  ierr = InsertOffsetValues(ToRow, NumEntries, values, Indexor->PermuteOffsets()[i]);
2393  if (ierr<0) EPETRA_CHK_ERR(ierr);
2394  }
2395  }
2396  else {
2397  for (i=0; i<NumPermuteIDs; i++) {
2398  FromRow = (int_type) A.GRID64(PermuteFromLIDs[i]);
2399  ToRow = (int_type) GRID64(PermuteToLIDs[i]);
2400  EPETRA_CHK_ERR(A.ExtractGlobalRowView(FromRow, NumEntries, values, Indices)); // Set pointers
2401  ierr = InsertGlobalValues(ToRow, NumEntries, values, Indices);
2402  if (ierr<0) EPETRA_CHK_ERR(ierr);
2403  }
2404  }
2405  }
2406  }
2407  }
2408 
2409  if (maxNumEntries>0 && A.IndicesAreLocal() ) { // Delete Temp Space
2410  delete [] values;
2411  delete [] Indices;
2412  }
2413 
2414  return(0);
2415 }
2416 
2418  int NumSameIDs,
2419  int NumPermuteIDs,
2420  int * PermuteToLIDs,
2421  int *PermuteFromLIDs,
2422  const Epetra_OffsetIndex * Indexor,
2423  Epetra_CombineMode CombineMode)
2424 {
2426  throw ReportError("Epetra_CrsMatrix::CopyAndPermuteCrsMatrix: Incoming global index type does not match the one for *this",-1);
2427 
2428  if(A.RowMap().GlobalIndicesInt())
2429 #ifndef EPETRA_NO_32BIT_GLOBAL_INDICES
2430  return TCopyAndPermuteCrsMatrix<int>(A, NumSameIDs, NumPermuteIDs, PermuteToLIDs, PermuteFromLIDs, Indexor, CombineMode);
2431 #else
2432  throw ReportError("Epetra_CrsMatrix::CopyAndPermuteCrsMatrix: ERROR, GlobalIndicesInt but no API for it.",-1);
2433 #endif
2434 
2435  if(A.RowMap().GlobalIndicesLongLong())
2436 #ifndef EPETRA_NO_64BIT_GLOBAL_INDICES
2437  return TCopyAndPermuteCrsMatrix<long long>(A, NumSameIDs, NumPermuteIDs, PermuteToLIDs, PermuteFromLIDs, Indexor, CombineMode);
2438 #else
2439  throw ReportError("Epetra_CrsMatrix::CopyAndPermuteCrsMatrix: ERROR, GlobalIndicesLongLong but no API for it.",-1);
2440 #endif
2441 
2442  throw ReportError("Epetra_CrsMatrix::CopyAndPermuteCrsMatrix: Internal error, unable to determine global index type of maps", -1);
2443 }
2444 
2445 //=========================================================================
2446 template<typename int_type>
2448  int NumSameIDs,
2449  int NumPermuteIDs,
2450  int * PermuteToLIDs,
2451  int *PermuteFromLIDs,
2452  const Epetra_OffsetIndex * Indexor,
2453  Epetra_CombineMode /* CombineMode */) {
2454  int i, j, ierr;
2455 
2456  int_type Row, ToRow;
2457  int NumEntries;
2458  int FromRow;
2459  int maxNumEntries = A.MaxNumEntries();
2460  int_type * gen_Indices = 0; // gen = general (int or long long)
2461  int * int_Indices = 0;
2462  double * values = 0;
2463 
2464  if (maxNumEntries>0) {
2465  if(StaticGraph() || IndicesAreLocal() || Indexor) {
2466  int_Indices = new int[maxNumEntries];
2467  }
2468  else {
2469  gen_Indices = new int_type[maxNumEntries];
2470  int_Indices = reinterpret_cast<int*>(gen_Indices);
2471  }
2472 
2473  values = new double[maxNumEntries]; // Must extract values even though we discard them
2474  }
2475 
2476  const Epetra_Map & rowMap = A.RowMatrixRowMap();
2477  const Epetra_Map & colMap = A.RowMatrixColMap();
2478 
2479  // Do copy first
2480  if (NumSameIDs>0) {
2481  if (StaticGraph() || IndicesAreLocal()) {
2482  if( Indexor ) {
2483  for (i=0; i<NumSameIDs; i++) {
2484  Row = (int_type) GRID64(i);
2485  int AlocalRow = rowMap.LID(Row);
2486  EPETRA_CHK_ERR(A.ExtractMyRowCopy(AlocalRow, maxNumEntries, NumEntries, values, int_Indices));
2487  ierr = ReplaceOffsetValues(Row, NumEntries, values, Indexor->SameOffsets()[i]);
2488  if (ierr<0) EPETRA_CHK_ERR(ierr);
2489  }
2490  }
2491  else {
2492  for (i=0; i<NumSameIDs; i++) {
2493  Row = (int_type) GRID64(i);
2494  int AlocalRow = rowMap.LID(Row);
2495  EPETRA_CHK_ERR(A.ExtractMyRowCopy(AlocalRow, maxNumEntries, NumEntries, values, int_Indices));
2496  for(j=0; j<NumEntries; ++j) {
2497  int_Indices[j] = LCID((int_type) colMap.GID64(int_Indices[j]));
2498  }
2499  ierr = ReplaceMyValues(i, NumEntries, values, int_Indices);
2500  if (ierr<0) EPETRA_CHK_ERR(ierr);
2501  }
2502  }
2503  }
2504  else {
2505  if( Indexor ) {
2506  for (i=0; i<NumSameIDs; i++) {
2507  EPETRA_CHK_ERR(A.ExtractMyRowCopy(i, maxNumEntries, NumEntries, values, int_Indices));
2508  Row = (int_type) GRID64(i);
2509  ierr = InsertOffsetValues(Row, NumEntries, values, Indexor->SameOffsets()[i]);
2510  if (ierr<0) EPETRA_CHK_ERR(ierr);
2511  }
2512  }
2513  else {
2514  for (i=0; i<NumSameIDs; i++) {
2515  EPETRA_CHK_ERR(A.ExtractMyRowCopy(i, maxNumEntries, NumEntries, values, int_Indices));
2516  Row = (int_type) GRID64(i);
2517 
2518  // convert to GIDs, start from right.
2519  for(j = NumEntries; j > 0;) {
2520  --j;
2521  gen_Indices[j] = (int_type) colMap.GID64(int_Indices[j]);
2522  }
2523  ierr = InsertGlobalValues(Row, NumEntries, values, gen_Indices);
2524  if (ierr<0) EPETRA_CHK_ERR(ierr);
2525  }
2526  }
2527  }
2528  }
2529 
2530  // Do local permutation next
2531  if (NumPermuteIDs>0) {
2532  if (StaticGraph() || IndicesAreLocal()) {
2533  if( Indexor ) {
2534  for (i=0; i<NumPermuteIDs; i++) {
2535  FromRow = PermuteFromLIDs[i];
2536  EPETRA_CHK_ERR(A.ExtractMyRowCopy(FromRow, maxNumEntries, NumEntries, values, int_Indices));
2537  ToRow = (int_type) GRID64(PermuteToLIDs[i]);
2538  ierr = ReplaceOffsetValues(ToRow, NumEntries, values, Indexor->PermuteOffsets()[i]);
2539  if (ierr<0) EPETRA_CHK_ERR(ierr);
2540  }
2541  }
2542  else {
2543  for (i=0; i<NumPermuteIDs; i++) {
2544  FromRow = PermuteFromLIDs[i];
2545  EPETRA_CHK_ERR(A.ExtractMyRowCopy(FromRow, maxNumEntries, NumEntries, values, int_Indices));
2546  ToRow = (int_type) GRID64(PermuteToLIDs[i]);
2547  for(j=0; j<NumEntries; ++j) {
2548  int_Indices[j] = LCID((int_type) colMap.GID64(int_Indices[j]));
2549  }
2550  ierr = ReplaceMyValues((int) ToRow, NumEntries, values, int_Indices);
2551  if (ierr<0) EPETRA_CHK_ERR(ierr);
2552  }
2553  }
2554  }
2555  else {
2556  if( Indexor ) {
2557  for (i=0; i<NumPermuteIDs; i++) {
2558  FromRow = PermuteFromLIDs[i];
2559  EPETRA_CHK_ERR(A.ExtractMyRowCopy(FromRow, maxNumEntries, NumEntries, values, int_Indices));
2560  ToRow = (int_type) GRID64(PermuteToLIDs[i]);
2561  ierr = InsertOffsetValues(ToRow, NumEntries, values, Indexor->PermuteOffsets()[i]);
2562  if (ierr<0) EPETRA_CHK_ERR(ierr);
2563  }
2564  }
2565  else {
2566  for (i=0; i<NumPermuteIDs; i++) {
2567  FromRow = PermuteFromLIDs[i];
2568  EPETRA_CHK_ERR(A.ExtractMyRowCopy(FromRow, maxNumEntries, NumEntries, values, int_Indices));
2569 
2570  // convert to GIDs, start from right.
2571  for(j = NumEntries; j > 0;) {
2572  --j;
2573  gen_Indices[j] = (int_type) colMap.GID64(int_Indices[j]);
2574  }
2575 
2576  ToRow = (int_type) GRID64(PermuteToLIDs[i]);
2577  ierr = InsertGlobalValues(ToRow, NumEntries, values, gen_Indices);
2578  if (ierr<0) EPETRA_CHK_ERR(ierr);
2579  }
2580  }
2581  }
2582  }
2583 
2584  if (maxNumEntries>0) {
2585  delete [] values;
2586  if(StaticGraph() || IndicesAreLocal() || Indexor) {
2587  delete [] int_Indices;
2588  }
2589  else {
2590  delete [] gen_Indices;
2591  }
2592  }
2593  return(0);
2594 }
2595 
2597  int NumSameIDs,
2598  int NumPermuteIDs,
2599  int * PermuteToLIDs,
2600  int *PermuteFromLIDs,
2601  const Epetra_OffsetIndex * Indexor,
2602  Epetra_CombineMode CombineMode)
2603 {
2604  if(!A.Map().GlobalIndicesTypeMatch(RowMap()))
2605  throw ReportError("Epetra_CrsMatrix::CopyAndPermuteRowMatrix: Incoming global index type does not match the one for *this",-1);
2606 
2608 #ifndef EPETRA_NO_32BIT_GLOBAL_INDICES
2609  return TCopyAndPermuteRowMatrix<int>(A, NumSameIDs, NumPermuteIDs, PermuteToLIDs, PermuteFromLIDs, Indexor, CombineMode);
2610 #else
2611  throw ReportError("Epetra_CrsMatrix::CopyAndPermuteRowMatrix: ERROR, GlobalIndicesInt but no API for it.",-1);
2612 #endif
2613 
2615 #ifndef EPETRA_NO_64BIT_GLOBAL_INDICES
2616  return TCopyAndPermuteRowMatrix<long long>(A, NumSameIDs, NumPermuteIDs, PermuteToLIDs, PermuteFromLIDs, Indexor, CombineMode);
2617 #else
2618  throw ReportError("Epetra_CrsMatrix::CopyAndPermuteRowMatrix: ERROR, GlobalIndicesLongLong but no API for it.",-1);
2619 #endif
2620 
2621  throw ReportError("Epetra_CrsMatrix::CopyAndPermuteRowMatrix: Internal error, unable to determine global index type of maps", -1);
2622 }
2623 
2624 //=========================================================================
2626  int NumExportIDs,
2627  int * ExportLIDs,
2628  int & LenExports,
2629  char *& Exports,
2630  int & SizeOfPacket,
2631  int * Sizes,
2632  bool & VarSizes,
2633  Epetra_Distributor & Distor)
2634 {
2635  if(!Source.Map().GlobalIndicesTypeMatch(RowMap()))
2636  throw ReportError("Epetra_CrsMatrix::PackAndPrepare: Incoming global index type does not match the one for *this",-1);
2637 
2638  (void)Distor;
2639  // Rest of work can be done using RowMatrix only
2640  const Epetra_RowMatrix & A = dynamic_cast<const Epetra_RowMatrix &>(Source);
2641 
2642  VarSizes = true; //enable variable block size data comm
2643 
2644  int TotalSendLength = 0;
2645  int * IntSizes = 0;
2646  if( NumExportIDs>0 ) IntSizes = new int[NumExportIDs];
2647 
2648  int SizeofIntType = -1;
2649  if(Source.Map().GlobalIndicesInt())
2650  SizeofIntType = (int)sizeof(int);
2651  else if(Source.Map().GlobalIndicesLongLong())
2652  SizeofIntType = (int)sizeof(long long);
2653  else
2654  throw ReportError("Epetra_CrsMatrix::PackAndPrepare: Unable to determine source global index type",-1);
2655 
2656  for( int i = 0; i < NumExportIDs; ++i )
2657  {
2658  int NumEntries;
2659  A.NumMyRowEntries( ExportLIDs[i], NumEntries );
2660  // Will have NumEntries doubles, NumEntries +2 ints, pack them interleaved Sizes[i] = NumEntries;
2661  Sizes[i] = NumEntries;
2662  IntSizes[i] = 1 + (((NumEntries+2)*SizeofIntType)/(int)sizeof(double));
2663  TotalSendLength += (Sizes[i]+IntSizes[i]);
2664  }
2665 
2666  double * DoubleExports = 0;
2667  SizeOfPacket = (int)sizeof(double);
2668 
2669  //setup buffer locally for memory management by this object
2670  if( TotalSendLength*SizeOfPacket > LenExports )
2671  {
2672  if( LenExports > 0 ) delete [] Exports;
2673  LenExports = TotalSendLength*SizeOfPacket;
2674  DoubleExports = new double[TotalSendLength];
2675  for( int i = 0; i < TotalSendLength; ++i ) DoubleExports[i] = 0.0;
2676  Exports = (char *) DoubleExports;
2677  }
2678 
2679  int NumEntries;
2680  double * values;
2681  double * valptr, * dintptr;
2682 
2683  // Each segment of Exports will be filled by a packed row of information for each row as follows:
2684  // 1st int: GRID of row where GRID is the global row ID for the source matrix
2685  // next int: NumEntries, Number of indices in row.
2686  // next NumEntries: The actual indices for the row.
2687 
2688  const Epetra_Map & rowMap = A.RowMatrixRowMap();
2689  const Epetra_Map & colMap = A.RowMatrixColMap();
2690 
2691  if( NumExportIDs > 0 )
2692  {
2693  if(Source.Map().GlobalIndicesInt()) {
2694  int * Indices;
2695  int FromRow;
2696  int * intptr;
2697 
2698  int maxNumEntries = A.MaxNumEntries();
2699  dintptr = (double *) Exports;
2700  valptr = dintptr + IntSizes[0];
2701  intptr = (int *) dintptr;
2702  for (int i=0; i<NumExportIDs; i++)
2703  {
2704  FromRow = (int) rowMap.GID64(ExportLIDs[i]);
2705  intptr[0] = FromRow;
2706  values = valptr;
2707  Indices = intptr + 2;
2708  EPETRA_CHK_ERR(A.ExtractMyRowCopy(ExportLIDs[i], maxNumEntries, NumEntries, values, Indices));
2709  for (int j=0; j<NumEntries; j++) Indices[j] = (int) colMap.GID64(Indices[j]); // convert to GIDs
2710  intptr[1] = NumEntries; // Load second slot of segment
2711  if( i < (NumExportIDs-1) )
2712  {
2713  dintptr += (IntSizes[i]+Sizes[i]);
2714  valptr = dintptr + IntSizes[i+1];
2715  intptr = (int *) dintptr;
2716  }
2717  }
2718  }
2719  else if(Source.Map().GlobalIndicesLongLong()) {
2720  long long * LL_Indices;
2721  long long FromRow;
2722  long long * LLptr;
2723 
2724  int maxNumEntries = A.MaxNumEntries();
2725  dintptr = (double *) Exports;
2726  valptr = dintptr + IntSizes[0];
2727  LLptr = (long long *) dintptr;
2728  for (int i=0; i<NumExportIDs; i++)
2729  {
2730  FromRow = rowMap.GID64(ExportLIDs[i]);
2731  LLptr[0] = FromRow;
2732  values = valptr;
2733  LL_Indices = LLptr + 2;
2734  int * int_indices = reinterpret_cast<int*>(LL_Indices);
2735  EPETRA_CHK_ERR(A.ExtractMyRowCopy(ExportLIDs[i], maxNumEntries, NumEntries, values, int_indices));
2736 
2737  // convert to GIDs, start from right.
2738  for(int j = NumEntries; j > 0;) {
2739  --j;
2740  LL_Indices[j] = colMap.GID64(int_indices[j]);
2741  }
2742 
2743  LLptr[1] = NumEntries; // Load second slot of segment
2744  if( i < (NumExportIDs-1) )
2745  {
2746  dintptr += (IntSizes[i]+Sizes[i]);
2747  valptr = dintptr + IntSizes[i+1];
2748  LLptr = (long long *) dintptr;
2749  }
2750  }
2751  }
2752 
2753  for( int i = 0; i < NumExportIDs; ++i )
2754  Sizes[i] += IntSizes[i];
2755  }
2756 
2757  if( IntSizes ) delete [] IntSizes;
2758 
2759  return(0);
2760 }
2761 
2762 //=========================================================================
2763 template<typename int_type>
2765  int NumImportIDs,
2766  int * ImportLIDs,
2767  int LenImports,
2768  char * Imports,
2769  int & SizeOfPacket,
2770  Epetra_Distributor & Distor,
2771  Epetra_CombineMode CombineMode,
2772  const Epetra_OffsetIndex * Indexor )
2773 {
2774  (void)Source;
2775  (void)LenImports;
2776  (void)SizeOfPacket;
2777  (void)Distor;
2778  if (NumImportIDs<=0) return(0);
2779 
2780  if ( CombineMode != Add
2781  && CombineMode != Insert
2782  && CombineMode != Zero )
2783  EPETRA_CHK_ERR(-1); //Unsupported CombineMode, defaults to Zero
2784 
2785  int NumEntries;
2786  int_type * Indices;
2787  double * values;
2788  int_type ToRow;
2789  int i, ierr;
2790  int IntSize;
2791 
2792  double * valptr, *dintptr;
2793  int_type * intptr;
2794 
2795  // Each segment of Exports will be filled by a packed row of information for each row as follows:
2796  // 1st int: GRID of row where GRID is the global row ID for the source matrix
2797  // next int: NumEntries, Number of indices in row.
2798  // next NumEntries: The actual indices for the row.
2799 
2800  dintptr = (double *) Imports;
2801  intptr = (int_type *) dintptr;
2802  NumEntries = (int) intptr[1];
2803  IntSize = 1 + (((NumEntries+2)*(int)sizeof(int_type))/(int)sizeof(double));
2804  valptr = dintptr + IntSize;
2805 
2806  for (i=0; i<NumImportIDs; i++)
2807  {
2808  ToRow = (int_type) GRID64(ImportLIDs[i]);
2809  assert((intptr[0])==ToRow); // Sanity check
2810  values = valptr;
2811  Indices = intptr + 2;
2812 
2813  if (CombineMode==Add) {
2814  if (StaticGraph() || IndicesAreLocal()) {
2815  if( Indexor )
2816  ierr = SumIntoOffsetValues(ToRow, NumEntries, values, Indexor->RemoteOffsets()[i]);
2817  else
2818  ierr = SumIntoGlobalValues(ToRow, NumEntries, values, Indices);
2819  }
2820  else {
2821  if( Indexor )
2822  ierr = InsertOffsetValues(ToRow, NumEntries, values, Indexor->RemoteOffsets()[i]);
2823  else
2824  ierr = InsertGlobalValues(ToRow, NumEntries, values, Indices);
2825  }
2826  if (ierr<0) EPETRA_CHK_ERR(ierr);
2827  }
2828  else if (CombineMode==Insert) {
2829  if (StaticGraph() || IndicesAreLocal()) {
2830  if( Indexor )
2831  ierr = ReplaceOffsetValues(ToRow, NumEntries, values, Indexor->RemoteOffsets()[i]);
2832  else
2833  ierr = ReplaceGlobalValues(ToRow, NumEntries, values, Indices);
2834  }
2835  else {
2836  if( Indexor )
2837  ierr = InsertOffsetValues(ToRow, NumEntries, values, Indexor->RemoteOffsets()[i]);
2838  else
2839  ierr = InsertGlobalValues(ToRow, NumEntries, values, Indices);
2840  }
2841  if (ierr<0) EPETRA_CHK_ERR(ierr);
2842  }
2843 
2844  if( i < (NumImportIDs-1) )
2845  {
2846  dintptr += IntSize + NumEntries;
2847  intptr = (int_type *) dintptr;
2848  NumEntries = (int) intptr[1];
2849  IntSize = 1 + (((NumEntries+2)*(int)sizeof(int_type))/(int)sizeof(double));
2850  valptr = dintptr + IntSize;
2851  }
2852  }
2853 
2854  return(0);
2855 }
2856 
2858  int NumImportIDs,
2859  int * ImportLIDs,
2860  int LenImports,
2861  char * Imports,
2862  int & SizeOfPacket,
2863  Epetra_Distributor & Distor,
2864  Epetra_CombineMode CombineMode,
2865  const Epetra_OffsetIndex * Indexor )
2866 {
2867  if(!Source.Map().GlobalIndicesTypeMatch(RowMap()))
2868  throw ReportError("Epetra_CrsMatrix::UnpackAndCombine: Incoming global index type does not match the one for *this",-1);
2869 
2870  if(Source.Map().GlobalIndicesInt())
2871 #ifndef EPETRA_NO_32BIT_GLOBAL_INDICES
2872  return TUnpackAndCombine<int>(Source, NumImportIDs, ImportLIDs, LenImports,
2873  Imports, SizeOfPacket, Distor, CombineMode, Indexor);
2874 #else
2875  throw ReportError("Epetra_CrsMatrix::UnpackAndCombine: ERROR, GlobalIndicesInt but no API for it.",-1);
2876 #endif
2877 
2878  if(Source.Map().GlobalIndicesLongLong())
2879 #ifndef EPETRA_NO_64BIT_GLOBAL_INDICES
2880  return TUnpackAndCombine<long long>(Source, NumImportIDs, ImportLIDs, LenImports,
2881  Imports, SizeOfPacket, Distor, CombineMode, Indexor);
2882 #else
2883  throw ReportError("Epetra_CrsMatrix::UnpackAndCombine: ERROR, GlobalIndicesLongLong but no API for it.",-1);
2884 #endif
2885 
2886  throw ReportError("Epetra_CrsMatrix::UnpackAndCombine: Internal error, unable to determine global index type of maps", -1);
2887 }
2888 
2889 //=========================================================================
2890 
2891 void Epetra_CrsMatrix::Print(std::ostream& os) const {
2892  int MyPID = RowMap().Comm().MyPID();
2893  int NumProc = RowMap().Comm().NumProc();
2894 
2895  for (int iproc=0; iproc < NumProc; iproc++) {
2896  if (MyPID==iproc) {
2897  /* const Epetra_fmtflags olda = os.setf(ios::right,ios::adjustfield);
2898  const Epetra_fmtflags oldf = os.setf(ios::scientific,ios::floatfield);
2899  const int oldp = os.precision(12); */
2900  if (MyPID==0) {
2901  os << "\nNumber of Global Rows = "; os << NumGlobalRows64(); os << std::endl;
2902  os << "Number of Global Cols = "; os << NumGlobalCols64(); os << std::endl;
2903  os << "Number of Global Diagonals = "; os << NumGlobalDiagonals64(); os << std::endl;
2904  os << "Number of Global Nonzeros = "; os << NumGlobalNonzeros64(); os << std::endl;
2905  os << "Global Maximum Num Entries = "; os << GlobalMaxNumEntries(); os << std::endl;
2906  if (LowerTriangular()) { os << " ** Matrix is Lower Triangular **"; os << std::endl; }
2907  if (UpperTriangular()) { os << " ** Matrix is Upper Triangular **"; os << std::endl; }
2908  if (NoDiagonal()) { os << " ** Matrix has no diagonal **"; os << std::endl; os << std::endl; }
2909  }
2910 
2911  os << "\nNumber of My Rows = "; os << NumMyRows(); os << std::endl;
2912  os << "Number of My Cols = "; os << NumMyCols(); os << std::endl;
2913  os << "Number of My Diagonals = "; os << NumMyDiagonals(); os << std::endl;
2914  os << "Number of My Nonzeros = "; os << NumMyNonzeros(); os << std::endl;
2915  os << "My Maximum Num Entries = "; os << MaxNumEntries(); os << std::endl; os << std::endl;
2916 
2917  os << std::flush;
2918 
2919  // Reset os flags
2920 
2921  /* os.setf(olda,ios::adjustfield);
2922  os.setf(oldf,ios::floatfield);
2923  os.precision(oldp); */
2924  }
2925  // Do a few global ops to give I/O a chance to complete
2926  Comm().Barrier();
2927  Comm().Barrier();
2928  Comm().Barrier();
2929  }
2930 
2931  {for (int iproc=0; iproc < NumProc; iproc++) {
2932  if (MyPID==iproc) {
2933  int NumMyRows1 = NumMyRows();
2934  int MaxNumIndices = MaxNumEntries();
2935 
2936  int * Indices_int = 0;
2937  long long * Indices_LL = 0;
2938  if(RowMap().GlobalIndicesInt()) {
2939  Indices_int = new int[MaxNumIndices];
2940  }
2941  else if(RowMap().GlobalIndicesLongLong()) {
2942  Indices_LL = new long long[MaxNumIndices];
2943  }
2944  else
2945  throw ReportError("Epetra_CrsGraph::Print: Unable to determine source global index type",-1);
2946 
2947  double * values = new double[MaxNumIndices];
2948 #if !defined(EPETRA_NO_32BIT_GLOBAL_INDICES) || !defined(EPETRA_NO_64BIT_GLOBAL_INDICES)
2949  int NumIndices;
2950  int j;
2951 #endif
2952  int i;
2953 
2954  if (MyPID==0) {
2955  os.width(8);
2956  os << " Processor ";
2957  os.width(10);
2958  os << " Row Index ";
2959  os.width(10);
2960  os << " Col Index ";
2961  os.width(20);
2962  os << " Value ";
2963  os << std::endl;
2964  }
2965  for (i=0; i<NumMyRows1; i++) {
2966  if(RowMap().GlobalIndicesInt()) {
2967 #ifndef EPETRA_NO_32BIT_GLOBAL_INDICES
2968  int Row = (int) GRID64(i); // Get global row number
2969  ExtractGlobalRowCopy(Row, MaxNumIndices, NumIndices, values, Indices_int);
2970 
2971  for (j = 0; j < NumIndices ; j++) {
2972  os.width(8);
2973  os << MyPID ; os << " ";
2974  os.width(10);
2975  os << Row ; os << " ";
2976  os.width(10);
2977  os << Indices_int[j]; os << " ";
2978  os.width(20);
2979  os << values[j]; os << " ";
2980  os << std::endl;
2981  }
2982 #else
2983  throw ReportError("Epetra_CrsMatrix::Print: ERROR, GlobalIndicesInt but no API for it.",-1);
2984 #endif
2985  }
2986  else if(RowMap().GlobalIndicesLongLong()) {
2987 #ifndef EPETRA_NO_64BIT_GLOBAL_INDICES
2988  long long Row = GRID64(i); // Get global row number
2989  ExtractGlobalRowCopy(Row, MaxNumIndices, NumIndices, values, Indices_LL);
2990 
2991  for (j = 0; j < NumIndices ; j++) {
2992  os.width(8);
2993  os << MyPID ; os << " ";
2994  os.width(10);
2995  os << Row ; os << " ";
2996  os.width(10);
2997  os << Indices_LL[j]; os << " ";
2998  os.width(20);
2999  os << values[j]; os << " ";
3000  os << std::endl;
3001  }
3002 #else
3003  throw ReportError("Epetra_CrsMatrix::Print: ERROR, GlobalIndicesLongLong but no API for it.",-1);
3004 #endif
3005  }
3006  }
3007 
3008  if(RowMap().GlobalIndicesInt()) {
3009  delete [] Indices_int;
3010  }
3011  else if(RowMap().GlobalIndicesLongLong()) {
3012  delete [] Indices_LL;
3013  }
3014  delete [] values;
3015 
3016  os << std::flush;
3017 
3018  }
3019  // Do a few global ops to give I/O a chance to complete
3020  RowMap().Comm().Barrier();
3021  RowMap().Comm().Barrier();
3022  RowMap().Comm().Barrier();
3023  }}
3024 
3025  return;
3026 }
3027 //=============================================================================
3028 int Epetra_CrsMatrix::Multiply(bool TransA, const Epetra_Vector& x, Epetra_Vector& y) const {
3029 
3030 #ifdef EPETRA_CRSMATRIX_TEUCHOS_TIMERS
3031  TEUCHOS_FUNC_TIME_MONITOR("Epetra_CrsMatrix::Multiply(TransA,x,y)");
3032 #endif
3033 
3034  //
3035  // This function forms the product y = A * x or y = A' * x
3036  //
3037 
3038  if(!Filled())
3039  EPETRA_CHK_ERR(-1); // Matrix must be filled.
3040 
3041  double* xp = (double*) x.Values();
3042  double* yp = (double*) y.Values();
3043 
3044  Epetra_Vector * xcopy = 0;
3045  if (&x==&y && Importer()==0 && Exporter()==0) {
3046  xcopy = new Epetra_Vector(x);
3047  xp = (double *) xcopy->Values();
3048  }
3049  UpdateImportVector(1); // Refresh import and output vectors if needed
3050  UpdateExportVector(1);
3051 
3052  if(!TransA) {
3053 
3054  // If we have a non-trivial importer, we must import elements that are permuted or are on other processors
3055  if(Importer() != 0) {
3057  xp = (double*) ImportVector_->Values();
3058  }
3059 
3060  // If we have a non-trivial exporter, we must export elements that are permuted or belong to other processors
3061  if(Exporter() != 0) yp = (double*) ExportVector_->Values();
3062 
3063  // Do actual computation
3064  GeneralMV(xp, yp);
3065 
3066  if(Exporter() != 0) {
3067  y.PutScalar(0.0); // Make sure target is zero
3068  EPETRA_CHK_ERR(y.Export(*ExportVector_, *Exporter(), Add)); // Fill y with Values from export vector
3069  }
3070  // Handle case of rangemap being a local replicated map
3071  if (!Graph().RangeMap().DistributedGlobal() && Comm().NumProc()>1) EPETRA_CHK_ERR(y.Reduce());
3072  }
3073 
3074  else { // Transpose operation
3075 
3076  // If we have a non-trivial exporter, we must import elements that are permuted or are on other processors
3077  if(Exporter() != 0) {
3079  xp = (double*) ExportVector_->Values();
3080  }
3081 
3082  // If we have a non-trivial importer, we must export elements that are permuted or belong to other processors
3083  if(Importer() != 0) yp = (double*) ImportVector_->Values();
3084 
3085  // Do actual computation
3086  GeneralMTV(xp, yp);
3087 
3088  if(Importer() != 0) {
3089  y.PutScalar(0.0); // Make sure target is zero
3090  EPETRA_CHK_ERR(y.Export(*ImportVector_, *Importer(), Add)); // Fill y with Values from export vector
3091  }
3092  // Handle case of rangemap being a local replicated map
3094  }
3095 
3096 
3098  if (xcopy!=0) {
3099  delete xcopy;
3100  EPETRA_CHK_ERR(1); // Return positive code to alert the user about needing extra copy of x
3101  return(1);
3102  }
3103  return(0);
3104 }
3105 
3106 //=============================================================================
3108 
3109 #ifdef EPETRA_CRSMATRIX_TEUCHOS_TIMERS
3110  TEUCHOS_FUNC_TIME_MONITOR("Epetra_CrsMatrix::Multiply(TransA,X,Y)");
3111 #endif
3112 
3113 #ifdef EPETRA_CRS_MATRIX_TRACE_DUMP_MULTIPLY
3115  out = Teuchos::VerboseObjectBase::getDefaultOStream();
3116  Teuchos::OSTab tab(out);
3117  if(Epetra_CrsMatrixTraceDumpMultiply) {
3118  *out << std::boolalpha;
3119  *out << "\nEntering Epetra_CrsMatrix::Multipy("<<TransA<<",X,Y) ...\n";
3120  if(!TransA) {
3121  *out << "\nDomainMap =\n";
3122  this->DomainMap().Print(Teuchos::OSTab(out).o());
3123  }
3124  else {
3125  *out << "\nRangeMap =\n";
3126  this->RangeMap().Print(Teuchos::OSTab(out).o());
3127  }
3128  *out << "\nInitial input X with " << ( TransA ? "RangeMap" : "DomainMap" ) << " =\n\n";
3129  X.Print(Teuchos::OSTab(out).o());
3130  }
3131 #endif // EPETRA_CRS_MATRIX_TRACE_DUMP_MULTIPLY
3132 
3133  //
3134  // This function forms the product Y = A * Y or Y = A' * X
3135  //
3136  if(!Filled()) {
3137  EPETRA_CHK_ERR(-1); // Matrix must be filled.
3138  }
3139 
3140  int NumVectors = X.NumVectors();
3141  if (NumVectors!=Y.NumVectors()) {
3142  EPETRA_CHK_ERR(-2); // Need same number of vectors in each MV
3143  }
3144 
3145  double** Xp = (double**) X.Pointers();
3146  double** Yp = (double**) Y.Pointers();
3147 
3148  int LDX = X.ConstantStride() ? X.Stride() : 0;
3149  int LDY = Y.ConstantStride() ? Y.Stride() : 0;
3150 
3151  Epetra_MultiVector * Xcopy = 0;
3152  if (&X==&Y && Importer()==0 && Exporter()==0) {
3153  Xcopy = new Epetra_MultiVector(X);
3154  Xp = (double **) Xcopy->Pointers();
3155  LDX = Xcopy->ConstantStride() ? Xcopy->Stride() : 0;
3156  }
3157  UpdateImportVector(NumVectors); // Make sure Import and Export Vectors are compatible
3158  UpdateExportVector(NumVectors);
3159 
3160  if (!TransA) {
3161 
3162  // If we have a non-trivial importer, we must import elements that are permuted or are on other processors
3163  if (Importer()!=0) {
3165  Xp = (double**)ImportVector_->Pointers();
3167 #ifdef EPETRA_CRS_MATRIX_TRACE_DUMP_MULTIPLY
3168  if(Epetra_CrsMatrixTraceDumpMultiply) {
3169  *out << "\nColMap =\n";
3170  this->ColMap().Print(Teuchos::OSTab(out).o());
3171  *out << "\nX after import from DomainMap to ColMap =\n\n";
3172  ImportVector_->Print(Teuchos::OSTab(out).o());
3173  }
3174 #endif // EPETRA_CRS_MATRIX_TRACE_DUMP_MULTIPLY
3175  }
3176 
3177  // If we have a non-trivial exporter, we must export elements that are permuted or belong to other processors
3178  if (Exporter()!=0) {
3179  Yp = (double**)ExportVector_->Pointers();
3181  }
3182 
3183  // Do actual computation
3184  if (NumVectors==1)
3185  GeneralMV(*Xp, *Yp);
3186  else
3187  GeneralMM(Xp, LDX, Yp, LDY, NumVectors);
3188 #ifdef EPETRA_CRS_MATRIX_TRACE_DUMP_MULTIPLY
3189  if(Epetra_CrsMatrixTraceDumpMultiply) {
3190  *out << "\nRowMap =\n";
3191  this->RowMap().Print(Teuchos::OSTab(out).o());
3192  *out << "\nY after local mat-vec where Y has RowMap =\n\n";
3193  if(Exporter()!=0)
3194  ExportVector_->Print(Teuchos::OSTab(out).o());
3195  else
3196  Y.Print(Teuchos::OSTab(out).o());
3197  }
3198 #endif // EPETRA_CRS_MATRIX_TRACE_DUMP_MULTIPLY
3199  if (Exporter()!=0) {
3200  Y.PutScalar(0.0); // Make sure target is zero
3201  Y.Export(*ExportVector_, *Exporter(), Add); // Fill Y with Values from export vector
3202 #ifdef EPETRA_CRS_MATRIX_TRACE_DUMP_MULTIPLY
3203  if(Epetra_CrsMatrixTraceDumpMultiply) {
3204  *out << "\nRangeMap =\n";
3205  this->RangeMap().Print(Teuchos::OSTab(out).o());
3206  *out << "\nY after export from RowMap to RangeMap = \n\n";
3207  Y.Print(Teuchos::OSTab(out).o());
3208  }
3209 #endif // EPETRA_CRS_MATRIX_TRACE_DUMP_MULTIPLY
3210  }
3211  // Handle case of rangemap being a local replicated map
3212  if (!Graph().RangeMap().DistributedGlobal() && Comm().NumProc()>1) EPETRA_CHK_ERR(Y.Reduce());
3213  }
3214  else { // Transpose operation
3215 
3216  // If we have a non-trivial exporter, we must import elements that are permuted or are on other processors
3217 
3218  if (Exporter()!=0) {
3220  Xp = (double**)ExportVector_->Pointers();
3222 #ifdef EPETRA_CRS_MATRIX_TRACE_DUMP_MULTIPLY
3223  if(Epetra_CrsMatrixTraceDumpMultiply) {
3224  *out << "\nRowMap =\n";
3225  this->RowMap().Print(Teuchos::OSTab(out).o());
3226  *out << "\nX after import from RangeMap to RowMap =\n\n";
3227  ExportVector_->Print(Teuchos::OSTab(out).o());
3228  }
3229 #endif // EPETRA_CRS_MATRIX_TRACE_DUMP_MULTIPLY
3230  }
3231 
3232  // If we have a non-trivial importer, we must export elements that are permuted or belong to other processors
3233  if (Importer()!=0) {
3234  Yp = (double**)ImportVector_->Pointers();
3236  }
3237 
3238  // Do actual computation
3239  if (NumVectors==1)
3240  GeneralMTV(*Xp, *Yp);
3241  else
3242  GeneralMTM(Xp, LDX, Yp, LDY, NumVectors);
3243 #ifdef EPETRA_CRS_MATRIX_TRACE_DUMP_MULTIPLY
3244  if(Epetra_CrsMatrixTraceDumpMultiply) {
3245  *out << "\nColMap =\n";
3246  this->ColMap().Print(Teuchos::OSTab(out).o());
3247  *out << "\nY after local transpose mat-vec where Y has ColMap =\n\n";
3248  if(Importer()!=0)
3249  ImportVector_->Print(Teuchos::OSTab(out).o());
3250  else
3251  Y.Print(Teuchos::OSTab(out).o());
3252  }
3253 #endif // EPETRA_CRS_MATRIX_TRACE_DUMP_MULTIPLY
3254  if (Importer()!=0) {
3255  Y.PutScalar(0.0); // Make sure target is zero
3256  EPETRA_CHK_ERR(Y.Export(*ImportVector_, *Importer(), Add)); // Fill Y with Values from export vector
3257 #ifdef EPETRA_CRS_MATRIX_TRACE_DUMP_MULTIPLY
3258  if(Epetra_CrsMatrixTraceDumpMultiply) {
3259  *out << "\nDomainMap =\n";
3260  this->DomainMap().Print(Teuchos::OSTab(out).o());
3261  *out << "\nY after export from ColMap to DomainMap =\n\n";
3262  Y.Print(Teuchos::OSTab(out).o());
3263  }
3264 #endif // EPETRA_CRS_MATRIX_TRACE_DUMP_MULTIPLY
3265  }
3266  // Handle case of rangemap being a local replicated map
3267  if (!Graph().DomainMap().DistributedGlobal() && Comm().NumProc()>1) EPETRA_CHK_ERR(Y.Reduce());
3268  }
3269 
3270  UpdateFlops(2*NumVectors*NumGlobalNonzeros64());
3271  if (Xcopy!=0) {
3272  delete Xcopy;
3273  EPETRA_CHK_ERR(1); // Return positive code to alert the user about needing extra copy of X
3274  return(1);
3275  }
3276 
3277 #ifdef EPETRA_CRS_MATRIX_TRACE_DUMP_MULTIPLY
3278  if(Epetra_CrsMatrixTraceDumpMultiply) {
3279  *out << "\nFinal output Y is the last Y printed above!\n";
3280  *out << "\nLeaving Epetra_CrsMatrix::Multipy("<<TransA<<",X,Y) ...\n";
3281  }
3282 #endif // EPETRA_CRS_MATRIX_TRACE_DUMP_MULTIPLY
3283 
3284  return(0);
3285 }
3286 //=======================================================================================================
3287 void Epetra_CrsMatrix::UpdateImportVector(int NumVectors) const {
3288  if(Importer() != 0) {
3289  if(ImportVector_ != 0) {
3290  if(ImportVector_->NumVectors() != NumVectors) {
3291  delete ImportVector_;
3292  ImportVector_= 0;
3293  }
3294  }
3295  if(ImportVector_ == 0)
3296  ImportVector_ = new Epetra_MultiVector(ColMap(),NumVectors); // Create import vector if needed
3297  }
3298  return;
3299 }
3300 //=======================================================================================================
3301 void Epetra_CrsMatrix::UpdateExportVector(int NumVectors) const {
3302  if(Exporter() != 0) {
3303  if(ExportVector_ != 0) {
3304  if(ExportVector_->NumVectors() != NumVectors) {
3305  delete ExportVector_;
3306  ExportVector_= 0;
3307  }
3308  }
3309  if(ExportVector_ == 0)
3310  ExportVector_ = new Epetra_MultiVector(RowMap(),NumVectors); // Create Export vector if needed
3311  }
3312  return;
3313 }
3314 //=======================================================================================================
3315 void Epetra_CrsMatrix::GeneralMV(double * x, double * y) const {
3316 
3317 if (StorageOptimized() && Graph().StorageOptimized()) {
3318 
3319  double * values = All_Values();
3320  int * Indices = Graph().All_Indices();
3321  int * IndexOffset = Graph().IndexOffset();
3322 #if defined(Epetra_ENABLE_MKL_SPARSE)
3323  char transa = 'n';
3324  int m = NumMyRows_;
3325  int NumCols = NumMyCols();
3326  double alpha = 1, beta = 0;
3327  // MKL should ignore '/'. G = General, C = 0-based indexing.
3328  char matdescra[6] = "G//C/";
3329  mkl_dcsrmv(&transa, &m, &NumCols, &alpha, matdescra, values, Indices, IndexOffset, IndexOffset + 1, x, &beta, y);
3330 #elif defined(EPETRA_HAVE_OMP)
3331  int numMyRows = NumMyRows_;
3332 #pragma omp parallel for default(none) shared(IndexOffset,values,Indices,y,x,numMyRows)
3333  for (int row=0; row<numMyRows; ++row)
3334  {
3335  const int curOffset = IndexOffset[row];
3336  const double *val_ptr = values+curOffset;
3337  const int *colnum_ptr = Indices+curOffset;
3338  double s = 0.;
3339  const double *const val_end_of_row = &values[IndexOffset[row+1]];
3340  while (val_ptr != val_end_of_row)
3341  s += *val_ptr++ * x[*colnum_ptr++];
3342  y[row] = s;
3343  }
3344 #elif defined(Epetra_ENABLE_CASK)
3345  cask_csr_dax_new(NumMyRows_, IndexOffset, Indices,
3346  values, x, y, cask);
3347 #elif !defined(FORTRAN_DISABLED)
3348  int ione = 0;
3349  int NumCols = NumMyCols();
3350  //std::cout << "Entering DCRSMV" << std::endl;
3351  EPETRA_DCRSMV_F77(&ione, &NumMyRows_, &NumCols, values, Indices, IndexOffset, x, y);
3352 #else
3353  const double *val_ptr = values;
3354  const int *colnum_ptr = Indices;
3355  double * dst_ptr = y;
3356  for (int row=0; row<NumMyRows_; ++row)
3357  {
3358  double s = 0.;
3359  const double *const val_end_of_row = &values[IndexOffset[row+1]];
3360  while (val_ptr != val_end_of_row)
3361  s += *val_ptr++ * x[*colnum_ptr++];
3362  *dst_ptr++ = s;
3363  }
3364 #endif // Epetra_ENABLE_CASK or EPETRA_HAVE_OMP or Epetra_ENABLE_MKL_SPARSE
3365 
3366  return;
3367  }
3368  else if (!StorageOptimized() && !Graph().StorageOptimized()) {
3369 
3370 
3371  int* NumEntriesPerRow = Graph().NumIndicesPerRow();
3372  int** Indices = Graph().Indices();
3373  double** srcValues = Values();
3374  int numMyRows = NumMyRows_;
3375 
3376  // Do actual computation
3377 #ifdef EPETRA_HAVE_OMP
3378 #pragma omp parallel for default(none) shared(NumEntriesPerRow,Indices,srcValues,y,x,numMyRows)
3379 #endif
3380  for(int i = 0; i < numMyRows; i++) {
3381  int NumEntries = NumEntriesPerRow[i];
3382  int* RowIndices = Indices[i];
3383  double* RowValues = srcValues[i];
3384  double sum = 0.0;
3385  for(int j = 0; j < NumEntries; j++)
3386  sum += *RowValues++ * x[*RowIndices++];
3387 
3388  y[i] = sum;
3389 
3390  }
3391  }
3392  else { // Case where StorageOptimized is incompatible: Use general accessors.
3393 
3394  int numMyRows = NumMyRows_;
3395 
3396  // Do actual computation
3397 #ifdef EPETRA_HAVE_OMP
3398 #pragma omp parallel for default(none) shared(x,y,numMyRows)
3399 #endif
3400  for(int i = 0; i < numMyRows; i++) {
3401  int NumEntries = NumMyEntries(i);
3402  int* RowIndices = Graph().Indices(i);
3403  double* RowValues = Values(i);
3404  double sum = 0.0;
3405  for(int j = 0; j < NumEntries; j++)
3406  sum += *RowValues++ * x[*RowIndices++];
3407 
3408  y[i] = sum;
3409 
3410  }
3411  }
3412  return;
3413 }
3414 //=======================================================================================================
3415 void Epetra_CrsMatrix::GeneralMTV(double * x, double * y) const {
3416 
3417 #if !defined(FORTRAN_DISABLED) || defined(Epetra_ENABLE_CASK) || defined(Epetra_ENABLE_MKL_SPARSE)
3418  if (StorageOptimized() && Graph().StorageOptimized()) {
3419  double * values = All_Values_;
3420  int * Indices = Graph().All_Indices();
3421  int * IndexOffset = Graph().IndexOffset();
3422  int NumCols = NumMyCols();
3423 #if defined(Epetra_ENABLE_MKL_SPARSE)
3424  char transa = 't';
3425  int m = NumMyRows_;
3426  double alpha = 1, beta = 0;
3427  // MKL should ignore '/'. G = General, C = 0-based indexing.
3428  char matdescra[6] = "G//C/";
3429  mkl_dcsrmv(&transa, &m, &NumCols, &alpha, matdescra, values, Indices, IndexOffset, IndexOffset + 1, x, &beta, y);
3430 #elif defined(Epetra_ENABLE_CASK)
3431  cask_csr_datx( NumMyRows_, NumCols, IndexOffset, Indices, values,x ,y );
3432 #else
3433  int ione = 1;
3434  EPETRA_DCRSMV_F77(&ione, &NumMyRows_, &NumCols, values, Indices, IndexOffset, x, y);
3435 #endif
3436 
3437  return;
3438  }
3439 #endif // FORTRAN_DISABLED etc
3440  int NumCols = NumMyCols();
3441  for(int i = 0; i < NumCols; i++)
3442  y[i] = 0.0; // Initialize y for transpose multiply
3443 
3444  if (StorageOptimized() && Graph().StorageOptimized()) {
3445  double * values = All_Values_;
3446  int * Indices = Graph().All_Indices();
3447  int * IndexOffset = Graph().IndexOffset();
3448  for(int i = 0; i < NumMyRows_; ++i) {
3449  int prevOffset = *IndexOffset++;
3450  int NumEntries = *IndexOffset - prevOffset;
3451  double xi = x[i];
3452  for(int j = 0; j < NumEntries; j++)
3453  y[*Indices++] += *values++ * xi;
3454  }
3455  }
3456  else if (!StorageOptimized() && !Graph().StorageOptimized()) {
3457 
3458  int* NumEntriesPerRow = Graph().NumIndicesPerRow();
3459  int** Indices = Graph().Indices();
3460  double** srcValues = Values();
3461 
3462  for(int i = 0; i < NumMyRows_; i++) {
3463  int NumEntries = *NumEntriesPerRow++;
3464  int* RowIndices = *Indices++;
3465  double* RowValues = *srcValues++;
3466  double xi = x[i];
3467  for(int j = 0; j < NumEntries; j++)
3468  y[*RowIndices++] += *RowValues++ * xi;
3469  }
3470  }
3471  else { // Case where StorageOptimized is incompatible: Use general accessors.
3472 
3473  for(int i = 0; i < NumMyRows_; i++) {
3474  int NumEntries = NumMyEntries(i);
3475  int* RowIndices = Graph().Indices(i);
3476  double* RowValues = Values(i);
3477  double xi = x[i];
3478  for(int j = 0; j < NumEntries; j++)
3479  y[*RowIndices++] += *RowValues++ * xi;
3480  }
3481  }
3482 
3483  return;
3484 }
3485 //=======================================================================================================
3486 void Epetra_CrsMatrix::GeneralMM(double ** X, int LDX, double ** Y, int LDY, int NumVectors) const {
3487 
3488 #if !defined(FORTRAN_DISABLED) || defined(Epetra_ENABLE_CASK) || (defined(Epetra_ENABLE_MKL_SPARSE) && !defined(Epetra_DISABLE_MKL_SPARSE_MM))
3489  if (StorageOptimized() && Graph().StorageOptimized()) {
3490  double * values = All_Values_;
3491  int * Indices = Graph().All_Indices();
3492  int * IndexOffset = Graph().IndexOffset();
3493 
3494  if (LDX!=0 && LDY!=0) {
3495 #if defined(Epetra_ENABLE_MKL_SPARSE) && !defined(Epetra_DISABLE_MKL_SPARSE_MM)
3496  int * IndicesPlus1 = Graph().All_IndicesPlus1();
3497  char transa = 'n';
3498  int NumRows = NumMyRows_;
3499  int NumCols = NumMyCols();
3500  double alpha = 1, beta = 0;
3501  // MKL should ignore '/'. G = General, C = 0-based indexing, F = 1-based.
3502  // 1-based is used because X and Y are column-oriented and MKL does not
3503  // do 0-based and column-oriented.
3504  char matdescra[6] = "G//F/";
3505  mkl_dcsrmm(&transa, &NumRows, &NumVectors, &NumCols, &alpha, matdescra, values, IndicesPlus1, IndexOffset, IndexOffset + 1, *X, &LDX, &beta, *Y, &LDY);
3506 #elif defined(Epetra_ENABLE_CASK)
3507  cask_csr_dgesmm_new(0, 1.0, NumMyRows_, NumMyRows_, NumVectors,
3508  IndexOffset, Indices, values, *X, LDX, 0.0, *Y, LDY,cask);
3509 #else
3510  int izero = 0;
3511  EPETRA_DCRSMM_F77(&izero, &NumMyRows_, &NumMyRows_, values, Indices, IndexOffset, *X, &LDX, *Y, &LDY, &NumVectors);
3512 #endif
3513  return;
3514  }
3515 
3516  double ** xp = X;
3517  double ** yp = Y;
3518  int numMyRows = NumMyRows_;
3519 #ifdef EPETRA_HAVE_OMP
3520 #pragma omp parallel for default(none) shared(IndexOffset,Indices,values,NumVectors,numMyRows,xp,yp)
3521 #endif
3522  for (int i=0; i < numMyRows; i++) {
3523  int prevOffset = IndexOffset[i];
3524  int NumEntries = IndexOffset[i+1] - prevOffset;
3525  int * RowIndices = Indices+prevOffset;
3526  double * RowValues = values+prevOffset;
3527  for (int k=0; k<NumVectors; k++) {
3528  double sum = 0.0;
3529  const double * const x = xp[k];
3530  double * const y = yp[k];
3531  for (int j=0; j < NumEntries; j++) sum += RowValues[j] * x[RowIndices[j]];
3532  y[i] = sum;
3533  }
3534  }
3535  }
3536  else if (!StorageOptimized() && !Graph().StorageOptimized()) {
3537 #else
3538  if (!StorageOptimized() && !Graph().StorageOptimized()) {
3539 #endif // FORTRAN_DISABLED etc
3540 
3541  int* NumEntriesPerRow = Graph().NumIndicesPerRow();
3542  int** Indices = Graph().Indices();
3543  double** srcValues = Values();
3544  double ** xp = X;
3545  double ** yp = Y;
3546  int numMyRows = NumMyRows_;
3547 
3548 #ifdef EPETRA_HAVE_OMP
3549 #pragma omp parallel for default(none) shared(NumEntriesPerRow,Indices,srcValues,NumVectors,numMyRows,xp,yp)
3550 #endif
3551  for (int i=0; i < numMyRows; i++) {
3552  int NumEntries = NumEntriesPerRow[i];
3553  int * RowIndices = Indices[i];
3554  double * RowValues = srcValues[i];
3555  for (int k=0; k<NumVectors; k++) {
3556  double sum = 0.0;
3557  const double * const x = xp[k];
3558  double * const y = yp[k];
3559  for (int j=0; j < NumEntries; j++) sum += RowValues[j] * x[RowIndices[j]];
3560  y[i] = sum;
3561  }
3562  }
3563  }
3564  else {
3565 
3566  double ** xp = X;
3567  double ** yp = Y;
3568  int numMyRows = NumMyRows_;
3569 #ifdef EPETRA_HAVE_OMP
3570 #pragma omp parallel for default(none) shared(NumVectors,numMyRows,xp,yp)
3571 #endif
3572  for (int i=0; i < numMyRows; i++) {
3573  int NumEntries = NumMyEntries(i);
3574  int* RowIndices = Graph().Indices(i);
3575  double* RowValues = Values(i);
3576  for (int k=0; k<NumVectors; k++) {
3577  double sum = 0.0;
3578  double * x = xp[k];
3579  for (int j=0; j < NumEntries; j++) sum += RowValues[j] * x[RowIndices[j]];
3580  yp[k][i] = sum;
3581  }
3582  }
3583  }
3584  return;
3585 }
3586 //=======================================================================================================
3587 void Epetra_CrsMatrix::GeneralMTM(double ** X, int LDX, double ** Y, int LDY, int NumVectors) const{
3588 
3589  int NumCols = NumMyCols();
3590 #if !defined(FORTRAN_DISABLED) || defined(Epetra_ENABLE_CASK) || (defined(Epetra_ENABLE_MKL_SPARSE) && !defined(Epetra_DISABLE_MKL_SPARSE_MM))
3591  if (StorageOptimized() && Graph().StorageOptimized()) {
3592  if (LDX!=0 && LDY!=0) {
3593  double * values = All_Values_;
3594  int * Indices = Graph().All_Indices();
3595  int * IndexOffset = Graph().IndexOffset();
3596 #if defined(Epetra_ENABLE_MKL_SPARSE) && !defined(Epetra_DISABLE_MKL_SPARSE_MM)
3597  int * IndicesPlus1 = Graph().All_IndicesPlus1();
3598  char transa = 't';
3599  int NumRows = NumMyRows_;
3600  int NumCols = NumMyCols();
3601  double alpha = 1, beta = 0;
3602  // MKL should ignore '/'. G = General, C = 0-based indexing, F = 1-based.
3603  // 1-based is used because X and Y are column-oriented and MKL does not
3604  // do 0-based and column-oriented.
3605  char matdescra[6] = "G//F/";
3606  mkl_dcsrmm(&transa, &NumRows, &NumVectors, &NumCols, &alpha, matdescra, values, IndicesPlus1, IndexOffset, IndexOffset + 1, *X, &LDX, &beta, *Y, &LDY);
3607 #elif defined(Epetra_ENABLE_CASK)
3608  cask_csr_dgesmm_new(1, 1.0, NumMyRows_, NumCols, NumVectors,
3609  IndexOffset, Indices, values, *X, LDX, 0.0,
3610  *Y, LDY, cask);
3611 #else
3612  int ione = 1;
3613  EPETRA_DCRSMM_F77(&ione, &NumMyRows_, &NumCols, values, Indices, IndexOffset, *X, &LDX, *Y, &LDY, &NumVectors);
3614 #endif
3615  return;
3616  }
3617  }
3618 #endif // FORTRAN_DISABLED etc
3619  for (int k=0; k<NumVectors; k++)
3620  for (int i=0; i < NumCols; i++)
3621  Y[k][i] = 0.0; // Initialize y for transpose multiply
3622 
3623  if (StorageOptimized() && Graph().StorageOptimized()) {
3624  double * values = All_Values_;
3625  int * Indices = Graph().All_Indices();
3626  int * IndexOffset = Graph().IndexOffset();
3627  for (int i=0; i < NumMyRows_; i++) {
3628  int prevOffset = *IndexOffset++;
3629  int NumEntries = *IndexOffset - prevOffset;
3630  int * RowIndices = Indices+prevOffset;
3631  double * RowValues = values+prevOffset;
3632 
3633  for (int k=0; k<NumVectors; k++) {
3634  double * y = Y[k];
3635  double * x = X[k];
3636  for (int j=0; j < NumEntries; j++)
3637  y[RowIndices[j]] += RowValues[j] * x[i];
3638  }
3639  }
3640  }
3641  else if (!StorageOptimized() && !Graph().StorageOptimized()) {
3642 
3643  int* NumEntriesPerRow = Graph().NumIndicesPerRow();
3644  int** Indices = Graph().Indices();
3645  double** srcValues = Values();
3646 
3647  for (int i=0; i < NumMyRows_; i++) {
3648  int NumEntries = *NumEntriesPerRow++;
3649  int * RowIndices = *Indices++;
3650  double * RowValues = *srcValues++;
3651  for (int k=0; k<NumVectors; k++) {
3652  double * y = Y[k];
3653  double * x = X[k];
3654  for (int j=0; j < NumEntries; j++)
3655  y[RowIndices[j]] += RowValues[j] * x[i];
3656  }
3657  }
3658  }
3659  else { // Case where StorageOptimized is incompatible: Use general accessors.
3660 
3661  for (int i=0; i < NumMyRows_; i++) {
3662  int NumEntries = NumMyEntries(i);
3663  int* RowIndices = Graph().Indices(i);
3664  double* RowValues = Values(i);
3665  for (int k=0; k<NumVectors; k++) {
3666  double * y = Y[k];
3667  double * x = X[k];
3668  for (int j=0; j < NumEntries; j++)
3669  y[RowIndices[j]] += RowValues[j] * x[i];
3670  }
3671  }
3672  }
3673  return;
3674 }
3675 //=======================================================================================================
3676 void Epetra_CrsMatrix::GeneralSV(bool Upper, bool Trans, bool UnitDiagonal, double * xp, double * yp) const {
3677 
3678 
3679  int i, j, j0;
3680 
3681 #if !defined(FORTRAN_DISABLED) || defined(Epetra_ENABLE_CASK) || defined(Epetra_ENABLE_MKL_SPARSE)
3682  if (StorageOptimized() && Graph().StorageOptimized() && ((UnitDiagonal && NoDiagonal())|| (!UnitDiagonal && !NoDiagonal()))) {
3683  double * values = All_Values();
3684  int * Indices = Graph().All_Indices();
3685  int * IndexOffset = Graph().IndexOffset();
3686 
3687 #if !defined(Epetra_ENABLE_MKL_SPARSE)
3688  int iupper = Upper ? 1:0;
3689  int itrans = Trans ? 1:0;
3690  int udiag = UnitDiagonal ? 1:0;
3691  int nodiag = NoDiagonal() ? 1:0;
3692  int xysame = (xp==yp) ? 1:0;
3693 #endif
3694 
3695 #if defined(Epetra_ENABLE_MKL_SPARSE)
3696  char transa = Trans ? 't' : 'n';
3697  int m = NumMyRows_;
3698  double alpha = 1;
3699  // T = Triangular, C = 0-based indexing. '/' should be ignored by MKL
3700  char matdescra[6] = {'T', Upper ? 'U' : 'L', UnitDiagonal ? 'U' : 'N', 'C', '/', '\0'};
3701  mkl_dcsrsv(&transa, &m, &alpha, matdescra, values, Indices, IndexOffset, IndexOffset + 1, xp, yp);
3702 #elif defined(Epetra_ENABLE_CASK)
3703  cask_csr_dtrsv_new( iupper, itrans, udiag, nodiag, 0, xysame, NumMyRows_,
3704  NumMyRows_, IndexOffset, Indices, values, xp, yp, cask);
3705 #else
3706  EPETRA_DCRSSV_F77( &iupper, &itrans, &udiag, &nodiag, &NumMyRows_, &NumMyRows_, values, Indices, IndexOffset, xp, yp, &xysame);
3707 #endif
3708  return;
3709  }
3710  //=================================================================
3711  else { // !StorageOptimized()
3712  //=================================================================
3713 #endif
3714  if (!Trans) {
3715 
3716  if (Upper) {
3717 
3718  j0 = 1;
3719  if (NoDiagonal())
3720  j0--; // Include first term if no diagonal
3721  for (i=NumMyRows_-1; i >=0; i--) {
3722  int NumEntries = NumMyEntries(i);
3723  int * RowIndices = Graph().Indices(i);
3724  double * RowValues = Values(i);
3725  double sum = 0.0;
3726  for (j=j0; j < NumEntries; j++)
3727  sum += RowValues[j] * yp[RowIndices[j]];
3728 
3729  if (UnitDiagonal)
3730  yp[i] = xp[i] - sum;
3731  else
3732  yp[i] = (xp[i] - sum)/RowValues[0];
3733 
3734  }
3735  }
3736  else {
3737  j0 = 1;
3738  if (NoDiagonal())
3739  j0--; // Include first term if no diagonal
3740  for (i=0; i < NumMyRows_; i++) {
3741  int NumEntries = NumMyEntries(i) - j0;
3742  int * RowIndices = Graph().Indices(i);
3743  double * RowValues = Values(i);
3744  double sum = 0.0;
3745  for (j=0; j < NumEntries; j++)
3746  sum += RowValues[j] * yp[RowIndices[j]];
3747 
3748  if (UnitDiagonal)
3749  yp[i] = xp[i] - sum;
3750  else
3751  yp[i] = (xp[i] - sum)/RowValues[NumEntries];
3752 
3753  }
3754  }
3755  }
3756 
3757  // *********** Transpose case *******************************
3758 
3759  else {
3760 
3761  if (xp!=yp)
3762  for (i=0; i < NumMyRows_; i++)
3763  yp[i] = xp[i]; // Initialize y for transpose solve
3764 
3765  if (Upper) {
3766 
3767  j0 = 1;
3768  if (NoDiagonal())
3769  j0--; // Include first term if no diagonal
3770 
3771  for (i=0; i < NumMyRows_; i++) {
3772  int NumEntries = NumMyEntries(i);
3773  int * RowIndices = Graph().Indices(i);
3774  double * RowValues = Values(i);
3775  if (!UnitDiagonal)
3776  yp[i] = yp[i]/RowValues[0];
3777  double ytmp = yp[i];
3778  for (j=j0; j < NumEntries; j++)
3779  yp[RowIndices[j]] -= RowValues[j] * ytmp;
3780  }
3781  }
3782  else {
3783 
3784  j0 = 1;
3785  if (NoDiagonal())
3786  j0--; // Include first term if no diagonal
3787 
3788  for (i=NumMyRows_-1; i >= 0; i--) {
3789  int NumEntries = NumMyEntries(i) - j0;
3790  int * RowIndices = Graph().Indices(i);
3791  double * RowValues = Values(i);
3792  if (!UnitDiagonal)
3793  yp[i] = yp[i]/RowValues[NumEntries];
3794  double ytmp = yp[i];
3795  for (j=0; j < NumEntries; j++)
3796  yp[RowIndices[j]] -= RowValues[j] * ytmp;
3797  }
3798  }
3799 
3800  }
3801 #if !defined(FORTRAN_DISABLED) || defined(Epetra_ENABLE_CASK) || defined(Epetra_ENABLE_MKL_SPARSE)
3802  }
3803 #endif
3804  return;
3805 }
3806 //=======================================================================================================
3807 void Epetra_CrsMatrix::GeneralSM(bool Upper, bool Trans, bool UnitDiagonal, double ** Xp, int LDX, double ** Yp, int LDY, int NumVectors) const {
3808 
3809  int i, j, j0, k;
3810  double diag = 0.0;
3811 
3812  if (StorageOptimized() && Graph().StorageOptimized()) {
3813  double * values = All_Values();
3814  int * Indices = Graph().All_Indices();
3815  int * IndexOffset = Graph().IndexOffset();
3816 #if !defined(FORTRAN_DISABLED) || defined(Epetra_ENABLE_CASK) || (defined(Epetra_ENABLE_MKL_SPARSE) && !defined(Epetra_DISABLE_MKL_SPARSE_MM))
3817  if (LDX!=0 && LDY!=0 && ((UnitDiagonal && NoDiagonal()) || (!UnitDiagonal && !NoDiagonal()))) {
3818 
3819 #if !defined(Epetra_ENABLE_MKL_SPARSE) || defined(Epetra_DISABLE_MKL_SPARSE_MM)
3820  int iupper = Upper ? 1:0;
3821  int itrans = Trans ? 1:0;
3822  int udiag = UnitDiagonal ? 1:0;
3823  int nodiag = NoDiagonal() ? 1:0;
3824  int xysame = (Xp==Yp) ? 1:0;
3825 #endif
3826 
3827 #if defined(Epetra_ENABLE_MKL_SPARSE) && !defined(Epetra_DISABLE_MKL_SPARSE_MM)
3828  int * IndicesPlus1 = Graph().All_IndicesPlus1();
3829  char transa = Trans ? 't' : 'n';
3830  int NumRows = NumMyRows_;
3831  double alpha = 1;
3832  // T = Triangular, C = 0-based indexing, F = 1-based. '/' should be ignored by MKL.
3833  // 1-based is used because X and Y are column-oriented and MKL does not
3834  // do 0-based and column-oriented.
3835  char matdescra[6] = {'T', Upper ? 'U' : 'L', UnitDiagonal ? 'U' : 'N', 'F', '/', '\0'};
3836  mkl_dcsrsm(&transa, &NumRows, &NumVectors, &alpha, matdescra, values, IndicesPlus1, IndexOffset, IndexOffset + 1, *Xp, &LDX, *Yp, &LDY);
3837 #elif defined(Epetra_ENABLE_CASK)
3838  cask_csr_dtrsm( iupper, itrans, udiag, nodiag, 0, xysame, NumMyRows_,
3839  NumMyRows_, NumVectors, IndexOffset, Indices, values,
3840  *Xp, LDX, *Yp, LDY);
3841 #else
3842  EPETRA_DCRSSM_F77( &iupper, &itrans, &udiag, &nodiag, &NumMyRows_, &NumMyRows_, values, Indices, IndexOffset,
3843  *Xp, &LDX, *Yp, &LDY, &xysame, &NumVectors);
3844 #endif
3845  return;
3846  }
3847 #endif // FORTRAN_DISABLED etc
3848  if(!Trans) {
3849  if(Upper) {
3850  j0 = 1;
3851  if(NoDiagonal())
3852  j0--; // Include first term if no diagonal
3853  for(i = NumMyRows_ - 1; i >= 0; i--) {
3854  int Offset = IndexOffset[i];
3855  int NumEntries = IndexOffset[i+1]-Offset;
3856  int * RowIndices = Indices+Offset;
3857  double * RowValues = values+Offset;
3858  if(!UnitDiagonal)
3859  diag = 1.0/RowValues[0]; // Take inverse of diagonal once for later use
3860  for(k = 0; k < NumVectors; k++) {
3861  double sum = 0.0;
3862  for(j = j0; j < NumEntries; j++)
3863  sum += RowValues[j] * Yp[k][RowIndices[j]];
3864 
3865  if(UnitDiagonal)
3866  Yp[k][i] = Xp[k][i] - sum;
3867  else
3868  Yp[k][i] = (Xp[k][i] - sum) * diag;
3869  }
3870  }
3871  }
3872  else {
3873  j0 = 1;
3874  if(NoDiagonal())
3875  j0--; // Include first term if no diagonal
3876  for(i = 0; i < NumMyRows_; i++) {
3877  int Offset = IndexOffset[i];
3878  int NumEntries = IndexOffset[i+1]-Offset - j0;
3879  int * RowIndices = Indices+Offset;
3880  double * RowValues = values+Offset;
3881  if(!UnitDiagonal)
3882  diag = 1.0/RowValues[NumEntries]; // Take inverse of diagonal once for later use
3883  for(k = 0; k < NumVectors; k++) {
3884  double sum = 0.0;
3885  for(j = 0; j < NumEntries; j++)
3886  sum += RowValues[j] * Yp[k][RowIndices[j]];
3887 
3888  if(UnitDiagonal)
3889  Yp[k][i] = Xp[k][i] - sum;
3890  else
3891  Yp[k][i] = (Xp[k][i] - sum)*diag;
3892  }
3893  }
3894  }
3895  }
3896  // *********** Transpose case *******************************
3897 
3898  else {
3899  for(k = 0; k < NumVectors; k++)
3900  if(Yp[k] != Xp[k])
3901  for(i = 0; i < NumMyRows_; i++)
3902  Yp[k][i] = Xp[k][i]; // Initialize y for transpose multiply
3903 
3904  if(Upper) {
3905  j0 = 1;
3906  if(NoDiagonal())
3907  j0--; // Include first term if no diagonal
3908 
3909  for(i = 0; i < NumMyRows_; i++) {
3910  int Offset = IndexOffset[i];
3911  int NumEntries = IndexOffset[i+1]-Offset;
3912  int * RowIndices = Indices+Offset;
3913  double * RowValues = values+Offset;
3914  if(!UnitDiagonal)
3915  diag = 1.0/RowValues[0]; // Take inverse of diagonal once for later use
3916  for(k = 0; k < NumVectors; k++) {
3917  if(!UnitDiagonal)
3918  Yp[k][i] = Yp[k][i]*diag;
3919  double ytmp = Yp[k][i];
3920  for(j = j0; j < NumEntries; j++)
3921  Yp[k][RowIndices[j]] -= RowValues[j] * ytmp;
3922  }
3923  }
3924  }
3925  else {
3926  j0 = 1;
3927  if(NoDiagonal())
3928  j0--; // Include first term if no diagonal
3929  for(i = NumMyRows_ - 1; i >= 0; i--) {
3930  int Offset = IndexOffset[i];
3931  int NumEntries = IndexOffset[i+1]-Offset - j0;
3932  int * RowIndices = Indices+Offset;
3933  double * RowValues = values+Offset;
3934  if(!UnitDiagonal)
3935  diag = 1.0/RowValues[NumEntries]; // Take inverse of diagonal once for later use
3936  for(k = 0; k < NumVectors; k++) {
3937  if(!UnitDiagonal)
3938  Yp[k][i] = Yp[k][i]*diag;
3939  double ytmp = Yp[k][i];
3940  for(j = 0; j < NumEntries; j++)
3941  Yp[k][RowIndices[j]] -= RowValues[j] * ytmp;
3942  }
3943  }
3944  }
3945  }
3946  }
3947  // ========================================================
3948  else { // !StorageOptimized()
3949  // ========================================================
3950 
3951  if(!Trans) {
3952  if(Upper) {
3953  j0 = 1;
3954  if(NoDiagonal())
3955  j0--; // Include first term if no diagonal
3956  for(i = NumMyRows_ - 1; i >= 0; i--) {
3957  int NumEntries = NumMyEntries(i);
3958  int* RowIndices = Graph().Indices(i);
3959  double* RowValues = Values(i);
3960  if(!UnitDiagonal)
3961  diag = 1.0/RowValues[0]; // Take inverse of diagonal once for later use
3962  for(k = 0; k < NumVectors; k++) {
3963  double sum = 0.0;
3964  for(j = j0; j < NumEntries; j++)
3965  sum += RowValues[j] * Yp[k][RowIndices[j]];
3966 
3967  if(UnitDiagonal)
3968  Yp[k][i] = Xp[k][i] - sum;
3969  else
3970  Yp[k][i] = (Xp[k][i] - sum) * diag;
3971  }
3972  }
3973  }
3974  else {
3975  j0 = 1;
3976  if(NoDiagonal())
3977  j0--; // Include first term if no diagonal
3978  for(i = 0; i < NumMyRows_; i++) {
3979  int NumEntries = NumMyEntries(i) - j0;
3980  int* RowIndices = Graph().Indices(i);
3981  double* RowValues = Values(i);
3982  if(!UnitDiagonal)
3983  diag = 1.0/RowValues[NumEntries]; // Take inverse of diagonal once for later use
3984  for(k = 0; k < NumVectors; k++) {
3985  double sum = 0.0;
3986  for(j = 0; j < NumEntries; j++)
3987  sum += RowValues[j] * Yp[k][RowIndices[j]];
3988 
3989  if(UnitDiagonal)
3990  Yp[k][i] = Xp[k][i] - sum;
3991  else
3992  Yp[k][i] = (Xp[k][i] - sum)*diag;
3993  }
3994  }
3995  }
3996  }
3997  // *********** Transpose case *******************************
3998 
3999  else {
4000  for(k = 0; k < NumVectors; k++)
4001  if(Yp[k] != Xp[k])
4002  for(i = 0; i < NumMyRows_; i++)
4003  Yp[k][i] = Xp[k][i]; // Initialize y for transpose multiply
4004 
4005  if(Upper) {
4006  j0 = 1;
4007  if(NoDiagonal())
4008  j0--; // Include first term if no diagonal
4009 
4010  for(i = 0; i < NumMyRows_; i++) {
4011  int NumEntries = NumMyEntries(i);
4012  int* RowIndices = Graph().Indices(i);
4013  double* RowValues = Values(i);
4014  if(!UnitDiagonal)
4015  diag = 1.0/RowValues[0]; // Take inverse of diagonal once for later use
4016  for(k = 0; k < NumVectors; k++) {
4017  if(!UnitDiagonal)
4018  Yp[k][i] = Yp[k][i]*diag;
4019  double ytmp = Yp[k][i];
4020  for(j = j0; j < NumEntries; j++)
4021  Yp[k][RowIndices[j]] -= RowValues[j] * ytmp;
4022  }
4023  }
4024  }
4025  else {
4026  j0 = 1;
4027  if(NoDiagonal())
4028  j0--; // Include first term if no diagonal
4029  for(i = NumMyRows_ - 1; i >= 0; i--) {
4030  int NumEntries = NumMyEntries(i) - j0;
4031  int* RowIndices = Graph().Indices(i);
4032  double* RowValues = Values(i);
4033  if(!UnitDiagonal)
4034  diag = 1.0/RowValues[NumEntries]; // Take inverse of diagonal once for later use
4035  for(k = 0; k < NumVectors; k++) {
4036  if(!UnitDiagonal)
4037  Yp[k][i] = Yp[k][i]*diag;
4038  double ytmp = Yp[k][i];
4039  for(j = 0; j < NumEntries; j++)
4040  Yp[k][RowIndices[j]] -= RowValues[j] * ytmp;
4041  }
4042  }
4043  }
4044  }
4045  }
4046 
4047  return;
4048 }
4049 //=============================================================================
4050 int Epetra_CrsMatrix::Multiply1(bool TransA, const Epetra_Vector& x, Epetra_Vector& y) const {
4051 
4052 #ifdef EPETRA_CRSMATRIX_TEUCHOS_TIMERS
4053  TEUCHOS_FUNC_TIME_MONITOR("Epetra_CrsMatrix::Multiply1(TransA,x,y)");
4054 #endif
4055 
4056  //
4057  // This function forms the product y = A * x or y = A' * x
4058  //
4059 
4060  if(!Filled())
4061  EPETRA_CHK_ERR(-1); // Matrix must be filled.
4062 
4063  int i, j;
4064  double* xp = (double*) x.Values();
4065  double* yp = (double*) y.Values();
4066  int NumMyCols_ = NumMyCols();
4067 
4068  if(!TransA) {
4069 
4070  // If we have a non-trivial importer, we must import elements that are permuted or are on other processors
4071  if(Importer() != 0) {
4072  if(ImportVector_ != 0) {
4073  if(ImportVector_->NumVectors() != 1) {
4074  delete ImportVector_;
4075  ImportVector_= 0;
4076  }
4077  }
4078  if(ImportVector_ == 0)
4079  ImportVector_ = new Epetra_MultiVector(ColMap(),1); // Create import vector if needed
4081  xp = (double*) ImportVector_->Values();
4082  }
4083 
4084  // If we have a non-trivial exporter, we must export elements that are permuted or belong to other processors
4085  if(Exporter() != 0) {
4086  if(ExportVector_ != 0) {
4087  if(ExportVector_->NumVectors() != 1) {
4088  delete ExportVector_;
4089  ExportVector_= 0;
4090  }
4091  }
4092  if(ExportVector_ == 0)
4093  ExportVector_ = new Epetra_MultiVector(RowMap(),1); // Create Export vector if needed
4094  yp = (double*) ExportVector_->Values();
4095  }
4096 
4097  // Do actual computation
4098  for(i = 0; i < NumMyRows_; i++) {
4099  int NumEntries = NumMyEntries(i);
4100  int* RowIndices = Graph().Indices(i);
4101  double* RowValues = Values(i);
4102  double sum = 0.0;
4103  for(j = 0; j < NumEntries; j++)
4104  sum += RowValues[j] * xp[RowIndices[j]];
4105 
4106  yp[i] = sum;
4107 
4108  }
4109  if(Exporter() != 0) {
4110  y.PutScalar(0.0); // Make sure target is zero
4111  EPETRA_CHK_ERR(y.Export(*ExportVector_, *Exporter(), Add)); // Fill y with Values from export vector
4112  }
4113  // Handle case of rangemap being a local replicated map
4114  if (!Graph().RangeMap().DistributedGlobal() && Comm().NumProc()>1) EPETRA_CHK_ERR(y.Reduce());
4115  }
4116 
4117  else { // Transpose operation
4118 
4119  // If we have a non-trivial exporter, we must import elements that are permuted or are on other processors
4120  if(Exporter() != 0) {
4121  if(ExportVector_ != 0) {
4122  if(ExportVector_->NumVectors() != 1) {
4123  delete ExportVector_;
4124  ExportVector_= 0;
4125  }
4126  }
4127  if(ExportVector_ == 0)
4128  ExportVector_ = new Epetra_MultiVector(RowMap(),1); // Create Export vector if needed
4130  xp = (double*) ExportVector_->Values();
4131  }
4132 
4133  // If we have a non-trivial importer, we must export elements that are permuted or belong to other processors
4134  if(Importer() != 0) {
4135  if(ImportVector_ != 0) {
4136  if(ImportVector_->NumVectors() != 1) {
4137  delete ImportVector_;
4138  ImportVector_= 0;
4139  }
4140  }
4141  if(ImportVector_ == 0)
4142  ImportVector_ = new Epetra_MultiVector(ColMap(),1); // Create import vector if needed
4143  yp = (double*) ImportVector_->Values();
4144  }
4145 
4146  // Do actual computation
4147  for(i = 0; i < NumMyCols_; i++)
4148  yp[i] = 0.0; // Initialize y for transpose multiply
4149 
4150  for(i = 0; i < NumMyRows_; i++) {
4151  int NumEntries = NumMyEntries(i);
4152  int* RowIndices = Graph().Indices(i);
4153  double* RowValues = Values(i);
4154  for(j = 0; j < NumEntries; j++)
4155  yp[RowIndices[j]] += RowValues[j] * xp[i];
4156  }
4157  if(Importer() != 0) {
4158  y.PutScalar(0.0); // Make sure target is zero
4159  EPETRA_CHK_ERR(y.Export(*ImportVector_, *Importer(), Add)); // Fill y with Values from export vector
4160  }
4161  // Handle case of rangemap being a local replicated map
4163  }
4164 
4166  return(0);
4167 }
4168 //=============================================================================
4170 
4171 #ifdef EPETRA_CRSMATRIX_TEUCHOS_TIMERS
4172  TEUCHOS_FUNC_TIME_MONITOR("Epetra_CrsMatrix::Multiply1(TransA,X,Y)");
4173 #endif
4174 
4175  //
4176  // This function forms the product Y = A * Y or Y = A' * X
4177  //
4178  if((X.NumVectors() == 1) && (Y.NumVectors() == 1)) {
4179  double* xp = (double*) X[0];
4180  double* yp = (double*) Y[0];
4181  Epetra_Vector x(View, X.Map(), xp);
4182  Epetra_Vector y(View, Y.Map(), yp);
4183  EPETRA_CHK_ERR(Multiply1(TransA, x, y));
4184  return(0);
4185  }
4186  if(!Filled()) {
4187  EPETRA_CHK_ERR(-1); // Matrix must be filled.
4188  }
4189 
4190  int i, j, k;
4191 
4192  double** Xp = (double**) X.Pointers();
4193  double** Yp = (double**) Y.Pointers();
4194 
4195  int NumVectors = X.NumVectors();
4196  int NumMyCols_ = NumMyCols();
4197 
4198 
4199  // Need to better manage the Import and Export vectors:
4200  // - Need accessor functions
4201  // - Need to make the NumVector match (use a View to do this)
4202  // - Need to look at RightScale and ColSum routines too.
4203 
4204  if (!TransA) {
4205 
4206  // If we have a non-trivial importer, we must import elements that are permuted or are on other processors
4207  if (Importer()!=0) {
4208  if (ImportVector_!=0) {
4209  if (ImportVector_->NumVectors()!=NumVectors) {
4210  delete ImportVector_; ImportVector_= 0;}
4211  }
4212  if (ImportVector_==0)
4213  ImportVector_ = new Epetra_MultiVector(ColMap(),NumVectors); // Create import vector if needed
4215  Xp = (double**)ImportVector_->Pointers();
4216  }
4217 
4218  // If we have a non-trivial exporter, we must export elements that are permuted or belong to other processors
4219  if (Exporter()!=0) {
4220  if (ExportVector_!=0) {
4221  if (ExportVector_->NumVectors()!=NumVectors) {
4222  delete ExportVector_; ExportVector_= 0;}
4223  }
4224  if (ExportVector_==0)
4225  ExportVector_ = new Epetra_MultiVector(RowMap(),NumVectors); // Create Export vector if needed
4226  Yp = (double**)ExportVector_->Pointers();
4227  }
4228 
4229  // Do actual computation
4230 
4231  for (i=0; i < NumMyRows_; i++) {
4232  int NumEntries = NumMyEntries(i);
4233  int * RowIndices = Graph().Indices(i);
4234  double * RowValues = Values(i);
4235  for (k=0; k<NumVectors; k++) {
4236  double sum = 0.0;
4237  for (j=0; j < NumEntries; j++) sum += RowValues[j] * Xp[k][RowIndices[j]];
4238  Yp[k][i] = sum;
4239  }
4240  }
4241  if (Exporter()!=0) {
4242  Y.PutScalar(0.0); // Make sure target is zero
4243  Y.Export(*ExportVector_, *Exporter(), Add); // Fill Y with Values from export vector
4244  }
4245  // Handle case of rangemap being a local replicated map
4246  if (!Graph().RangeMap().DistributedGlobal() && Comm().NumProc()>1) EPETRA_CHK_ERR(Y.Reduce());
4247  }
4248  else { // Transpose operation
4249 
4250 
4251  // If we have a non-trivial exporter, we must import elements that are permuted or are on other processors
4252 
4253  if (Exporter()!=0) {
4254  if (ExportVector_!=0) {
4255  if (ExportVector_->NumVectors()!=NumVectors) {
4256  delete ExportVector_; ExportVector_= 0;}
4257  }
4258  if (ExportVector_==0)
4259  ExportVector_ = new Epetra_MultiVector(RowMap(),NumVectors); // Create Export vector if needed
4261  Xp = (double**)ExportVector_->Pointers();
4262  }
4263 
4264  // If we have a non-trivial importer, we must export elements that are permuted or belong to other processors
4265  if (Importer()!=0) {
4266  if (ImportVector_!=0) {
4267  if (ImportVector_->NumVectors()!=NumVectors) {
4268  delete ImportVector_; ImportVector_= 0;}
4269  }
4270  if (ImportVector_==0)
4271  ImportVector_ = new Epetra_MultiVector(ColMap(),NumVectors); // Create import vector if needed
4272  Yp = (double**)ImportVector_->Pointers();
4273  }
4274 
4275  // Do actual computation
4276 
4277 
4278 
4279  for (k=0; k<NumVectors; k++)
4280  for (i=0; i < NumMyCols_; i++)
4281  Yp[k][i] = 0.0; // Initialize y for transpose multiply
4282 
4283  for (i=0; i < NumMyRows_; i++) {
4284  int NumEntries = NumMyEntries(i);
4285  int * RowIndices = Graph().Indices(i);
4286  double * RowValues = Values(i);
4287  for (k=0; k<NumVectors; k++) {
4288  for (j=0; j < NumEntries; j++)
4289  Yp[k][RowIndices[j]] += RowValues[j] * Xp[k][i];
4290  }
4291  }
4292  if (Importer()!=0) {
4293  Y.PutScalar(0.0); // Make sure target is zero
4294  EPETRA_CHK_ERR(Y.Export(*ImportVector_, *Importer(), Add)); // Fill Y with Values from export vector
4295  }
4296  // Handle case of rangemap being a local replicated map
4298  }
4299 
4300  UpdateFlops(2*NumVectors*NumGlobalNonzeros64());
4301  return(0);
4302 }
4303 
4304 //=============================================================================
4305 int Epetra_CrsMatrix::Solve1(bool Upper, bool Trans, bool UnitDiagonal,
4306  const Epetra_Vector& x, Epetra_Vector& y) const
4307 {
4308 
4309 #ifdef EPETRA_CRSMATRIX_TEUCHOS_TIMERS
4310  TEUCHOS_FUNC_TIME_MONITOR("Epetra_CrsMatrix::Solve1(Upper,Trans,UnitDiag,x,y)");
4311 #endif
4312 
4313  //
4314  // This function finds y such that Ly = x or Uy = x or the transpose cases.
4315  //
4316 
4317  if (!Filled()) {
4318  EPETRA_CHK_ERR(-1); // Matrix must be filled.
4319  }
4320 
4321  if ((Upper) && (!UpperTriangular()))
4322  EPETRA_CHK_ERR(-2);
4323  if ((!Upper) && (!LowerTriangular()))
4324  EPETRA_CHK_ERR(-3);
4325  if ((!UnitDiagonal) && (NoDiagonal()))
4326  EPETRA_CHK_ERR(-4); // If matrix has no diagonal, we must use UnitDiagonal
4327  if ((!UnitDiagonal) && (NumMyDiagonals()<NumMyRows_))
4328  EPETRA_CHK_ERR(-5); // Need each row to have a diagonal
4329 
4330 
4331  int i, j, j0;
4332  int * NumEntriesPerRow = Graph().NumIndicesPerRow();
4333  int ** Indices = Graph().Indices();
4334  double ** Vals = Values();
4335  int NumMyCols_ = NumMyCols();
4336 
4337  // If upper, point to last row
4338  if ((Upper && !Trans) || (!Upper && Trans)) {
4339  NumEntriesPerRow += NumMyRows_-1;
4340  Indices += NumMyRows_-1;
4341  Vals += NumMyRows_-1;
4342  }
4343 
4344  double *xp = (double*)x.Values();
4345  double *yp = (double*)y.Values();
4346 
4347  if (!Trans) {
4348 
4349  if (Upper) {
4350 
4351  j0 = 1;
4352  if (NoDiagonal())
4353  j0--; // Include first term if no diagonal
4354  for (i=NumMyRows_-1; i >=0; i--) {
4355  int NumEntries = *NumEntriesPerRow--;
4356  int * RowIndices = *Indices--;
4357  double * RowValues = *Vals--;
4358  double sum = 0.0;
4359  for (j=j0; j < NumEntries; j++)
4360  sum += RowValues[j] * yp[RowIndices[j]];
4361 
4362  if (UnitDiagonal)
4363  yp[i] = xp[i] - sum;
4364  else
4365  yp[i] = (xp[i] - sum)/RowValues[0];
4366 
4367  }
4368  }
4369  else {
4370  j0 = 1;
4371  if (NoDiagonal())
4372  j0--; // Include first term if no diagonal
4373  for (i=0; i < NumMyRows_; i++) {
4374  int NumEntries = *NumEntriesPerRow++ - j0;
4375  int * RowIndices = *Indices++;
4376  double * RowValues = *Vals++;
4377  double sum = 0.0;
4378  for (j=0; j < NumEntries; j++)
4379  sum += RowValues[j] * yp[RowIndices[j]];
4380 
4381  if (UnitDiagonal)
4382  yp[i] = xp[i] - sum;
4383  else
4384  yp[i] = (xp[i] - sum)/RowValues[NumEntries];
4385 
4386  }
4387  }
4388  }
4389 
4390  // *********** Transpose case *******************************
4391 
4392  else {
4393 
4394  if (xp!=yp)
4395  for (i=0; i < NumMyCols_; i++)
4396  yp[i] = xp[i]; // Initialize y for transpose solve
4397 
4398  if (Upper) {
4399 
4400  j0 = 1;
4401  if (NoDiagonal())
4402  j0--; // Include first term if no diagonal
4403 
4404  for (i=0; i < NumMyRows_; i++) {
4405  int NumEntries = *NumEntriesPerRow++;
4406  int * RowIndices = *Indices++;
4407  double * RowValues = *Vals++;
4408  if (!UnitDiagonal)
4409  yp[i] = yp[i]/RowValues[0];
4410  double ytmp = yp[i];
4411  for (j=j0; j < NumEntries; j++)
4412  yp[RowIndices[j]] -= RowValues[j] * ytmp;
4413  }
4414  }
4415  else {
4416 
4417  j0 = 1;
4418  if (NoDiagonal())
4419  j0--; // Include first term if no diagonal
4420 
4421  for (i=NumMyRows_-1; i >= 0; i--) {
4422  int NumEntries = *NumEntriesPerRow-- - j0;
4423  int * RowIndices = *Indices--;
4424  double * RowValues = *Vals--;
4425  if (!UnitDiagonal)
4426  yp[i] = yp[i]/RowValues[NumEntries];
4427  double ytmp = yp[i];
4428  for (j=0; j < NumEntries; j++)
4429  yp[RowIndices[j]] -= RowValues[j] * ytmp;
4430  }
4431  }
4432 
4433  }
4435  return(0);
4436 }
4437 
4438 //=============================================================================
4439 int Epetra_CrsMatrix::Solve1(bool Upper, bool Trans, bool UnitDiagonal, const Epetra_MultiVector& X, Epetra_MultiVector& Y) const {
4440 
4441 #ifdef EPETRA_CRSMATRIX_TEUCHOS_TIMERS
4442  TEUCHOS_FUNC_TIME_MONITOR("Epetra_CrsMatrix::Solve(Upper,Trans,UnitDiag,X,Y)");
4443 #endif
4444 
4445  //
4446  // This function find Y such that LY = X or UY = X or the transpose cases.
4447  //
4448  if((X.NumVectors() == 1) && (Y.NumVectors() == 1)) {
4449  double* xp = (double*) X[0];
4450  double* yp = (double*) Y[0];
4451  Epetra_Vector x(View, X.Map(), xp);
4452  Epetra_Vector y(View, Y.Map(), yp);
4453  EPETRA_CHK_ERR(Solve1(Upper, Trans, UnitDiagonal, x, y));
4454  return(0);
4455  }
4456  if(!Filled())
4457  EPETRA_CHK_ERR(-1); // Matrix must be filled.
4458 
4459  if((Upper) && (!UpperTriangular()))
4460  EPETRA_CHK_ERR(-2);
4461  if((!Upper) && (!LowerTriangular()))
4462  EPETRA_CHK_ERR(-3);
4463  if((!UnitDiagonal) && (NoDiagonal()))
4464  EPETRA_CHK_ERR(-4); // If matrix has no diagonal, we must use UnitDiagonal
4465  if((!UnitDiagonal) && (NumMyDiagonals()<NumMyRows_))
4466  EPETRA_CHK_ERR(-5); // Need each row to have a diagonal
4467 
4468  int i, j, j0, k;
4469  int* NumEntriesPerRow = Graph().NumIndicesPerRow();
4470  int** Indices = Graph().Indices();
4471  double** Vals = Values();
4472  double diag = 0.0;
4473 
4474  // If upper, point to last row
4475  if((Upper && !Trans) || (!Upper && Trans)) {
4476  NumEntriesPerRow += NumMyRows_-1;
4477  Indices += NumMyRows_-1;
4478  Vals += NumMyRows_-1;
4479  }
4480 
4481  double** Xp = (double**) X.Pointers();
4482  double** Yp = (double**) Y.Pointers();
4483 
4484  int NumVectors = X.NumVectors();
4485 
4486  if(!Trans) {
4487  if(Upper) {
4488  j0 = 1;
4489  if(NoDiagonal())
4490  j0--; // Include first term if no diagonal
4491  for(i = NumMyRows_ - 1; i >= 0; i--) {
4492  int NumEntries = *NumEntriesPerRow--;
4493  int* RowIndices = *Indices--;
4494  double* RowValues = *Vals--;
4495  if(!UnitDiagonal)
4496  diag = 1.0/RowValues[0]; // Take inverse of diagonal once for later use
4497  for(k = 0; k < NumVectors; k++) {
4498  double sum = 0.0;
4499  for(j = j0; j < NumEntries; j++)
4500  sum += RowValues[j] * Yp[k][RowIndices[j]];
4501 
4502  if(UnitDiagonal)
4503  Yp[k][i] = Xp[k][i] - sum;
4504  else
4505  Yp[k][i] = (Xp[k][i] - sum) * diag;
4506  }
4507  }
4508  }
4509  else {
4510  j0 = 1;
4511  if(NoDiagonal())
4512  j0--; // Include first term if no diagonal
4513  for(i = 0; i < NumMyRows_; i++) {
4514  int NumEntries = *NumEntriesPerRow++ - j0;
4515  int* RowIndices = *Indices++;
4516  double* RowValues = *Vals++;
4517  if(!UnitDiagonal)
4518  diag = 1.0/RowValues[NumEntries]; // Take inverse of diagonal once for later use
4519  for(k = 0; k < NumVectors; k++) {
4520  double sum = 0.0;
4521  for(j = 0; j < NumEntries; j++)
4522  sum += RowValues[j] * Yp[k][RowIndices[j]];
4523 
4524  if(UnitDiagonal)
4525  Yp[k][i] = Xp[k][i] - sum;
4526  else
4527  Yp[k][i] = (Xp[k][i] - sum)*diag;
4528  }
4529  }
4530  }
4531  }
4532  // *********** Transpose case *******************************
4533 
4534  else {
4535  for(k = 0; k < NumVectors; k++)
4536  if(Yp[k] != Xp[k])
4537  for(i = 0; i < NumMyRows_; i++)
4538  Yp[k][i] = Xp[k][i]; // Initialize y for transpose multiply
4539 
4540  if(Upper) {
4541  j0 = 1;
4542  if(NoDiagonal())
4543  j0--; // Include first term if no diagonal
4544 
4545  for(i = 0; i < NumMyRows_; i++) {
4546  int NumEntries = *NumEntriesPerRow++;
4547  int* RowIndices = *Indices++;
4548  double* RowValues = *Vals++;
4549  if(!UnitDiagonal)
4550  diag = 1.0/RowValues[0]; // Take inverse of diagonal once for later use
4551  for(k = 0; k < NumVectors; k++) {
4552  if(!UnitDiagonal)
4553  Yp[k][i] = Yp[k][i]*diag;
4554  double ytmp = Yp[k][i];
4555  for(j = j0; j < NumEntries; j++)
4556  Yp[k][RowIndices[j]] -= RowValues[j] * ytmp;
4557  }
4558  }
4559  }
4560  else {
4561  j0 = 1;
4562  if(NoDiagonal())
4563  j0--; // Include first term if no diagonal
4564  for(i = NumMyRows_ - 1; i >= 0; i--) {
4565  int NumEntries = *NumEntriesPerRow-- - j0;
4566  int* RowIndices = *Indices--;
4567  double* RowValues = *Vals--;
4568  if (!UnitDiagonal)
4569  diag = 1.0/RowValues[NumEntries]; // Take inverse of diagonal once for later use
4570  for(k = 0; k < NumVectors; k++) {
4571  if(!UnitDiagonal)
4572  Yp[k][i] = Yp[k][i]*diag;
4573  double ytmp = Yp[k][i];
4574  for(j = 0; j < NumEntries; j++)
4575  Yp[k][RowIndices[j]] -= RowValues[j] * ytmp;
4576  }
4577  }
4578  }
4579  }
4580 
4581  UpdateFlops(2 * NumVectors * NumGlobalNonzeros64());
4582  return(0);
4583 }
4584 
4585 
4586 
4587 //=============================================================================
4590  }
4591 
4592 //=============================================================================
4595  }
4596 
4597 //=============================================================================
4599  if(Graph_.CrsGraphData_->ReferenceCount() > 1){
4601  Graph_.CrsGraphData_=new Epetra_CrsGraphData(Copy, RowMap(),ColMap(),true); // true is for StaticProfile
4602  }
4603  return (0);
4604  }
4605 
4606 //=============================================================================
4607 int Epetra_CrsMatrix::ExpertStaticFillComplete(const Epetra_Map & theDomainMap,const Epetra_Map & theRangeMap, const Epetra_Import * theImporter, const Epetra_Export * theExporter, int numMyDiagonals){
4608 
4610  int m=D.RowMap_.NumMyElements();
4611 
4612  bool UseLL=false;
4613  if(D.RowMap_.GlobalIndicesLongLong()) UseLL=true;
4614 
4615  // This only works for constant element size matrices w/ size=1
4616  if(!D.RowMap_.ConstantElementSize() || D.RowMap_.ElementSize()!=1 ||
4618  EPETRA_CHK_ERR(-1);
4619 
4620  // Maps
4621  D.DomainMap_ = theDomainMap;
4622  D.RangeMap_ = theRangeMap;
4623 
4624  // Create import, if needed
4625  if (!D.ColMap_.SameAs(D.DomainMap_)) {
4626  if (D.Importer_ != 0) {
4627  delete D.Importer_;
4628  D.Importer_ = 0;
4629  }
4630  if(theImporter && theImporter->SourceMap().SameAs(D.DomainMap_) && theImporter->TargetMap().SameAs(D.ColMap_)){
4631  D.Importer_=theImporter;
4632  }
4633  else {
4634  delete theImporter;
4635  D.Importer_ = new Epetra_Import(D.ColMap_, D.DomainMap_);
4636  }
4637  } else {
4638  if (D.Importer_ != 0) {
4639  delete D.Importer_;
4640  D.Importer_ = 0;
4641  }
4642  }
4643 
4644  // Create export, if needed
4645  if (!D.RowMap_.SameAs(D.RangeMap_)) {
4646  if (D.Exporter_ != 0) {
4647  delete D.Exporter_;
4648  D.Exporter_ = 0;
4649  }
4650  if(theExporter && theExporter->SourceMap().SameAs(D.RowMap_) && theExporter->TargetMap().SameAs(D.RangeMap_)){
4651  D.Exporter_=theExporter;
4652  }
4653  else {
4654  delete theExporter;
4656  }
4657  } else {
4658  if (D.Exporter_ != 0) {
4659  delete D.Exporter_;
4660  D.Exporter_ = 0;
4661  }
4662  }
4663 
4664 
4665  // Matrix constants
4666  Allocated_ = true;
4667  StaticGraph_ = true;
4668  UseTranspose_ = false;
4671  StorageOptimized_ = true;
4672  squareFillCompleteCalled_ = false; // Always false for the full FillComplete
4673  // Note: Entries in D.Indices_ array point to memory we don't need to dealloc, but the D.Indices_
4674  // array itself needs deallocation.
4675 
4676  // Cleanup existing data
4677  for(int i=0;i<m;i++){
4678  if(Values_) delete [] Values_[i];
4679  }
4680  D.data->SortedEntries_.clear();
4681 
4682  delete [] Values_; Values_=0;
4684  delete [] D.data->Indices_; D.data->Indices_=0;
4686 
4687 
4688  // GraphData constants
4689  D.Filled_ = true;
4690  D.Allocated_ = true;
4691  D.Sorted_ = false;
4692  D.StorageOptimized_ = true;
4693  D.NoRedundancies_ = true;
4694  D.IndicesAreGlobal_ = false;
4695  D.IndicesAreLocal_ = true;
4696  D.IndicesAreContiguous_ = true;
4697  D.GlobalConstantsComputed_ = true;
4698  D.StaticProfile_ = true;
4700  D.HaveColMap_ = true;
4701 
4702  // Don't change the index base
4703 
4704  // Local quantities (no computing)
4705  int nnz=D.IndexOffset_[m]-D.IndexOffset_[0];
4706  D.NumMyRows_ = D.NumMyBlockRows_ = m;
4708  D.NumMyNonzeros_ = D.NumMyEntries_ = nnz;
4709  D.MaxRowDim_ = 1;
4710  D.MaxColDim_ = 1;
4711 
4712  // A priori globals
4713  D.GlobalMaxRowDim_ = 1;
4714  D.GlobalMaxColDim_ = 1;
4715 
4716  // Compute max NZ per row, indices, diagonals, triangular
4717  D.MaxNumIndices_=0;
4718  for(int i=0; i<m; i++){
4719  int NumIndices=D.IndexOffset_[i+1]-D.IndexOffset_[i];
4720  D.MaxNumIndices_=EPETRA_MAX(D.MaxNumIndices_,NumIndices);
4721 
4722  // Code copied from Epetra_CrsGraph.Determine_Triangular()
4723  if(NumIndices > 0) {
4724  const int* col_indices = & D.data->All_Indices_[D.IndexOffset_[i]];
4725 
4726  int jl_0 = col_indices[0];
4727  int jl_n = col_indices[NumIndices-1];
4728 
4729  if(jl_n > i) D.LowerTriangular_ = false;
4730  if(jl_0 < i) D.UpperTriangular_ = false;
4731 
4732 
4733  if(numMyDiagonals == -1) {
4734  // Binary search in GID space
4735  // NOTE: This turns out to be noticibly faster than doing a single LID lookup and then binary searching
4736  // on the LIDs directly.
4737  int insertPoint = -1;
4738  if(UseLL) {
4739 #ifndef EPETRA_NO_64BIT_GLOBAL_INDICES
4740  long long jg = D.RowMap_.GID64(i);
4741  if (Epetra_Util_binary_search_aux(jg, col_indices, D.ColMap_.MyGlobalElements64(), NumIndices, insertPoint)>-1) {
4743  D.NumMyDiagonals_ ++;
4744  }
4745 #endif
4746  }
4747  else {
4748 #ifndef EPETRA_NO_32BIT_GLOBAL_INDICES
4749  int jg = D.RowMap_.GID(i);
4750  if (Epetra_Util_binary_search_aux(jg, col_indices, D.ColMap_.MyGlobalElements(), NumIndices, insertPoint)>-1) {
4752  D.NumMyDiagonals_ ++;
4753  }
4754 #endif
4755  }
4756  }
4757  }
4758  } // end DetermineTriangular
4759 
4760  if(numMyDiagonals > -1) D.NumMyDiagonals_ = D.NumMyBlockDiagonals_ = numMyDiagonals;
4761 
4763 
4764  // Compute global constants - Code copied from Epetra_CrsGraph.ComputeGlobalConstants()
4765  Epetra_IntSerialDenseVector tempvec(8); // Temp space
4766  tempvec[0] = D.NumMyEntries_;
4767  tempvec[1] = D.NumMyBlockDiagonals_;
4768  tempvec[2] = D.NumMyDiagonals_;
4769  tempvec[3] = D.NumMyNonzeros_;
4770 
4771  Comm().SumAll(&tempvec[0], &tempvec[4], 4);
4772 
4773  D.NumGlobalEntries_ = tempvec[4];
4774  D.NumGlobalBlockDiagonals_ = tempvec[5];
4775  D.NumGlobalDiagonals_ = tempvec[6];
4776  D.NumGlobalNonzeros_ = tempvec[7];
4777 
4778  tempvec[0] = D.MaxNumIndices_;
4779  tempvec[1] = D.MaxNumNonzeros_;
4780 
4781  Comm().MaxAll(&tempvec[0], &tempvec[2], 2);
4782 
4783  D.GlobalMaxNumIndices_ = tempvec[2];
4784  D.GlobalMaxNumNonzeros_ = tempvec[3];
4785  //end Epetra_CrsGraph.ComputeGlobalConstants()
4786 
4787 
4788  // Global Info
4789  if(UseLL) {
4792  }
4793  else {
4796  }
4797  return 0;
4798 }
4799 
4800 // ===================================================================
4801 template<class TransferType>
4802 void
4804  const TransferType& RowTransfer,
4805  const TransferType* DomainTransfer,
4806  const Epetra_Map* theDomainMap,
4807  const Epetra_Map* theRangeMap,
4808  const bool RestrictCommunicator)
4809 {
4810  // Fused constructor, import & FillComplete
4811  int rv;
4812  int N = NumMyRows();
4813  bool communication_needed = RowTransfer.SourceMap().DistributedGlobal();
4814 
4815  bool UseLL=false;
4816  if(SourceMatrix.RowMap().GlobalIndicesInt()) {
4817  UseLL=false;
4818  }
4819  else if(SourceMatrix.RowMap().GlobalIndicesLongLong()) {
4820  UseLL=true;
4821  }
4822  else
4823  throw ReportError("Epetra_CrsMatrix: Fused import/export constructor unable to determine source global index type",-1);
4824 
4825  // The basic algorithm here is:
4826  // 1) Call Distor.Do to handle the import.
4827  // 2) Copy all the Imported and Copy/Permuted data into the raw Epetra_CrsMatrix / Epetra_CrsGraphData pointers, still using GIDs.
4828  // 3) Call an optimized version of MakeColMap that avoids the Directory lookups (since the importer knows who owns all the gids) AND
4829  // reindexes to LIDs.
4830  // 4) Call ExpertStaticFillComplete()
4831 
4832  // Sanity Check
4833  if (!SourceMatrix.RowMap().SameAs(RowTransfer.SourceMap()))
4834  throw ReportError("Epetra_CrsMatrix: Fused import/export constructor requires RowTransfer.SourceMap() to match SourceMatrix.RowMap()",-2);
4835 
4836  // Owning PIDs
4837  std::vector<int> SourcePids;
4838  std::vector<int> TargetPids;
4839  int MyPID = Comm().MyPID();
4840 
4841  // The new Domain & Range maps
4842  const Epetra_Map* MyRowMap = &RowMap();
4843  const Epetra_Map* MyDomainMap = theDomainMap ? theDomainMap : &SourceMatrix.DomainMap();
4844  const Epetra_Map* MyRangeMap = theRangeMap ? theRangeMap : &RowMap();
4845  const Epetra_Map* BaseRowMap = &RowMap();
4846  const Epetra_Map* BaseDomainMap = MyDomainMap;
4847 
4848  // Temp variables for sub-communicators
4849  Epetra_Map *ReducedRowMap=0, *ReducedDomainMap=0, *ReducedRangeMap=0;
4850  const Epetra_Comm * ReducedComm = 0;
4851 
4852  /***************************************************/
4853  /***** 1) First communicator restriction phase ****/
4854  /***************************************************/
4855  if(RestrictCommunicator) {
4856  ReducedRowMap = BaseRowMap->RemoveEmptyProcesses();
4857  ReducedComm = ReducedRowMap ? &ReducedRowMap->Comm() : 0;
4858 
4859  // Now we have to strip down the communicators for the Domain & Range Maps. Check first if we're lucky
4860  ReducedDomainMap = RowMap().DataPtr() == MyDomainMap->DataPtr() ? ReducedRowMap : MyDomainMap->ReplaceCommWithSubset(ReducedComm);
4861  ReducedRangeMap = RowMap().DataPtr() == MyRangeMap->DataPtr() ? ReducedRowMap : MyRangeMap->ReplaceCommWithSubset(ReducedComm);
4862 
4863  // Reset the "my" maps
4864  MyRowMap = ReducedRowMap;
4865  MyDomainMap = ReducedDomainMap;
4866  MyRangeMap = ReducedRangeMap;
4867 
4868  // Update my PID, if we've restricted the communicator
4869  if(ReducedComm) MyPID = ReducedComm->MyPID();
4870  else MyPID=-2; // For Debugging
4871  }
4872 
4873  /***************************************************/
4874  /***** 2) From Epetra_DistObject::DoTransfer() *****/
4875  /***************************************************/
4876 
4877  // Get the owning PIDs
4878  // can we avoid the SameAs calls?
4879  const Epetra_Import *MyImporter= SourceMatrix.Importer();
4880 
4881  // check whether domain maps of source matrix and base domain map is the same
4882  bool bSameDomainMap = BaseDomainMap->SameAs(SourceMatrix.DomainMap());
4883 
4884  // Different use cases
4885  if(!RestrictCommunicator && MyImporter && bSameDomainMap) {
4886  // Same domain map as source matrix (no restriction of communicator)
4887  // NOTE: This won't work for restrictComm (because the Importer doesn't know the restricted PIDs), though
4888  // writing and optimized version for that case would be easy (Import an IntVector of the new PIDs). Might
4889  // want to add this later.
4890  Epetra_Util::GetPids(*MyImporter,SourcePids,false);
4891  }
4892  else if (RestrictCommunicator && MyImporter && bSameDomainMap) {
4893  // Same domain map as source matrix (restricted communicator)
4894  // We need one import from the domain to the column map
4895  Epetra_IntVector SourceDomain_pids(SourceMatrix.DomainMap(),true);
4896 
4897  SourcePids.resize(SourceMatrix.ColMap().NumMyElements(),0);
4898  Epetra_IntVector SourceCol_pids(View,SourceMatrix.ColMap(), Epetra_Util_data_ptr(SourcePids));
4899  // SourceDomain_pids contains the restricted pids
4900  SourceDomain_pids.PutValue(MyPID);
4901 
4902  SourceCol_pids.Import(SourceDomain_pids,*MyImporter,Insert);
4903  }
4904  else if(!MyImporter && bSameDomainMap) {
4905  // Same domain map as source matrix (no off-processor entries)
4906  // Matrix has no off-processor entries
4907  // Special case for multigrid (tentative transfer operators...)
4908  SourcePids.resize(SourceMatrix.ColMap().NumMyElements());
4909  SourcePids.assign(SourceMatrix.ColMap().NumMyElements(),MyPID);
4910  }
4911  else if (MyImporter && DomainTransfer) {
4912  // general implementation for rectangular matrices with
4913  // domain map different than SourceMatrix domain map.
4914  // User has to provide a DomainTransfer object. We need
4915  // to communications (import/export)
4916 
4917  // TargetDomain_pids lives on the rebalanced new domain map
4918  Epetra_IntVector TargetDomain_pids(*theDomainMap,true);
4919  TargetDomain_pids.PutValue(MyPID);
4920 
4921  // SourceDomain_pids lives on the non-rebalanced old domain map
4922  Epetra_IntVector SourceDomain_pids(SourceMatrix.DomainMap());
4923 
4924  // Associate SourcePids vector with non-rebalanced column map
4925  SourcePids.resize(SourceMatrix.ColMap().NumMyElements(),0);
4926  Epetra_IntVector SourceCol_pids(View,SourceMatrix.ColMap(), Epetra_Util_data_ptr(SourcePids));
4927 
4928  // create an Import object between non-rebalanced (=source) and rebalanced domain map (=target)
4929  if(typeid(TransferType)==typeid(Epetra_Import) && DomainTransfer->TargetMap().SameBlockMapDataAs(*theDomainMap))
4930  SourceDomain_pids.Export(TargetDomain_pids,*DomainTransfer,Insert);
4931  else if(typeid(TransferType)==typeid(Epetra_Export) && DomainTransfer->SourceMap().SameBlockMapDataAs(*theDomainMap))
4932  SourceDomain_pids.Import(TargetDomain_pids,*DomainTransfer,Insert);
4933  else
4934  throw ReportError("Epetra_CrsMatrix: Fused import/export constructor TransferType must be Epetra_Import or Epetra_Export",-31);
4935 
4936  SourceCol_pids.Import(SourceDomain_pids,*MyImporter,Insert);
4937  }
4938  else if(BaseDomainMap->SameAs(*BaseRowMap) && SourceMatrix.DomainMap().SameAs(SourceMatrix.RowMap())){
4939  // Special case for quadratic matrices where user has only provided a RowTransfer object.
4940  // This branch can probably be removed
4941 
4942  // We can use the RowTransfer + SourceMatrix' importer to find out who owns what.
4943  Epetra_IntVector TargetRow_pids(*theDomainMap,true);
4944  Epetra_IntVector SourceRow_pids(SourceMatrix.RowMap());
4945  SourcePids.resize(SourceMatrix.ColMap().NumMyElements(),0);
4946  Epetra_IntVector SourceCol_pids(View,SourceMatrix.ColMap(), Epetra_Util_data_ptr(SourcePids));
4947 
4948  TargetRow_pids.PutValue(MyPID);
4949  if(typeid(TransferType)==typeid(Epetra_Import))
4950  SourceRow_pids.Export(TargetRow_pids,RowTransfer,Insert);
4951  else if(typeid(TransferType)==typeid(Epetra_Export))
4952  SourceRow_pids.Import(TargetRow_pids,RowTransfer,Insert);
4953  else
4954  throw ReportError("Epetra_CrsMatrix: Fused import/export constructor TransferType must be Epetra_Import or Epetra_Export",-31);
4955  SourceCol_pids.Import(SourceRow_pids,*MyImporter,Insert);
4956  }
4957  else {
4958  // the only error may be that myImporter = Teuchos::null -> error!
4959  std::cout << "Error in FusedTransfer:" << std::endl;
4960  std::cout << "SourceMatrix.RangeMap " << SourceMatrix.RangeMap().NumMyElements() << std::endl;
4961  std::cout << "SourceMatrix.DomainMap " << SourceMatrix.DomainMap().NumMyElements() << std::endl;
4962  std::cout << "BaseRowMap " << BaseRowMap->NumMyElements() << std::endl;
4963  std::cout << "BaseDomainMap" << BaseDomainMap->NumMyElements() << std::endl;
4964  if(DomainTransfer == NULL) std::cout << "DomainTransfer = NULL" << std::endl;
4965  else std::cout << "DomainTransfer is not NULL" << std::endl;
4966  throw ReportError("Epetra_CrsMatrix: Fused import/export constructor only supports *theDomainMap==SourceMatrix.DomainMap() || DomainTransfer != NULL || *theDomainMap==RowTransfer.TargetMap() && SourceMatrix.DomainMap() == SourceMatrix.RowMap().",-4);
4967  }
4968 
4969  // Get information from the Importer
4970  int NumSameIDs = RowTransfer.NumSameIDs();
4971  int NumPermuteIDs = RowTransfer.NumPermuteIDs();
4972  int NumRemoteIDs = RowTransfer.NumRemoteIDs();
4973  int NumExportIDs = RowTransfer.NumExportIDs();
4974  int* ExportLIDs = RowTransfer.ExportLIDs();
4975  int* RemoteLIDs = RowTransfer.RemoteLIDs();
4976  int* PermuteToLIDs = RowTransfer.PermuteToLIDs();
4977  int* PermuteFromLIDs = RowTransfer.PermuteFromLIDs();
4978  Epetra_Distributor& Distor = RowTransfer.Distributor();
4979 
4980  // Pull and pointers & allocate memory (rowptr should be the right size already)
4983  double *& CSR_vals = ExpertExtractValues();
4984  CSR_rowptr.Resize(N+1);
4985 
4986  // Unused if we're not in LL mode
4987  std::vector<long long> CSR_colind_LL;
4988 
4989  rv=CheckSizes(SourceMatrix);
4990  if(rv) throw ReportError("Epetra_CrsMatrix: Fused import/export constructor failed in CheckSizes()",-3);
4991 
4992  int SizeOfPacket;
4993  bool VarSizes = false;
4994  if( NumExportIDs > 0) {
4995  delete [] Sizes_;
4996  Sizes_ = new int[NumExportIDs];
4997  }
4998 
4999  // Pack & Prepare w/ owning PIDs
5001  NumExportIDs,ExportLIDs,
5002  LenExports_,Exports_,SizeOfPacket,
5003  Sizes_,VarSizes,SourcePids);
5004  if(rv) throw ReportError("Epetra_CrsMatrix: Fused import/export constructor failed in PackAndPrepareWithOwningPIDs()",-5);
5005 
5006  if (communication_needed) {
5007  // Do the exchange of remote data
5008  if( VarSizes )
5009  rv=Distor.Do(Exports_, SizeOfPacket, Sizes_, LenImports_, Imports_);
5010  else
5011  rv=Distor.Do(Exports_, SizeOfPacket, LenImports_, Imports_);
5012  }
5013  if(rv) throw ReportError("Epetra_CrsMatrix: Fused import/export constructor failed in Distor.Do",-6);
5014 
5015 
5016  /*********************************************************************/
5017  /**** 3) Copy all of the Same/Permute/Remote data into CSR_arrays ****/
5018  /*********************************************************************/
5019  // Count nnz
5020  int mynnz = Epetra_Import_Util::UnpackWithOwningPIDsCount(SourceMatrix,NumSameIDs,NumRemoteIDs,RemoteLIDs,NumPermuteIDs,PermuteToLIDs,PermuteFromLIDs,LenImports_,Imports_);
5021  // Presume the RowPtr is the right size
5022  // Allocate CSR_colind & CSR_values arrays
5023  CSR_colind.Resize(mynnz);
5024  if(UseLL) CSR_colind_LL.resize(mynnz);
5025  delete [] CSR_vals; // should be a noop.
5026  CSR_vals = new double[mynnz];
5027 
5028  // Unpack into arrays
5029  if(UseLL)
5030  Epetra_Import_Util::UnpackAndCombineIntoCrsArrays(SourceMatrix,NumSameIDs,NumRemoteIDs,RemoteLIDs,NumPermuteIDs,PermuteToLIDs,PermuteFromLIDs,LenImports_,Imports_,NumMyRows(),mynnz,MyPID,CSR_rowptr.Values(),Epetra_Util_data_ptr(CSR_colind_LL),CSR_vals,SourcePids,TargetPids);
5031  else
5032  Epetra_Import_Util::UnpackAndCombineIntoCrsArrays(SourceMatrix,NumSameIDs,NumRemoteIDs,RemoteLIDs,NumPermuteIDs,PermuteToLIDs,PermuteFromLIDs,LenImports_,Imports_,NumMyRows(),mynnz,MyPID,CSR_rowptr.Values(),CSR_colind.Values(),CSR_vals,SourcePids,TargetPids);
5033 
5034  /***************************************************/
5035  /**** 4) Second communicator restriction phase ****/
5036  /***************************************************/
5037  if(RestrictCommunicator) {
5038  // Dangerous: If we're not in the new communicator, call it quits here. The user is then responsible
5039  // for not breaking anything on the return. Thankfully, the dummy RowMap should report no local unknowns,
5040  // so the user can at least test for this particular case.
5041  RemoveEmptyProcessesInPlace(ReducedRowMap);
5042 
5043  if(ReducedComm) {
5044  // Replace the RowMap
5045  Graph_.CrsGraphData_->RowMap_ = *MyRowMap;
5047  }
5048  else {
5049  // Replace all the maps with dummy maps with SerialComm, then quit
5050 #if defined(EPETRA_NO_32BIT_GLOBAL_INDICES) && defined(EPETRA_NO_64BIT_GLOBAL_INDICES)
5051  Epetra_Map DummyMap;
5052 #else
5053  Epetra_SerialComm SComm;
5054  Epetra_Map DummyMap(0,0,SComm);
5055 #endif
5056  Graph_.CrsGraphData_->RowMap_ = DummyMap;
5057  Graph_.CrsGraphData_->ColMap_ = DummyMap;
5058  Graph_.CrsGraphData_->RangeMap_ = DummyMap;
5059  Graph_.CrsGraphData_->DomainMap_ = DummyMap;
5061  return;
5062  }
5063  }
5064 
5065  /**************************************************************/
5066  /**** 5) Call Optimized MakeColMap w/ no Directory Lookups ****/
5067  /**************************************************************/
5068  //Call an optimized version of MakeColMap that avoids the Directory lookups (since the importer knows who owns all the gids).
5069  std::vector<int> RemotePIDs;
5070  int * pids_ptr = Epetra_Util_data_ptr(TargetPids);
5071  if(UseLL) {
5072 #ifndef EPETRA_NO_64BIT_GLOBAL_INDICES
5073  long long * CSR_colind_LL_ptr = Epetra_Util_data_ptr(CSR_colind_LL);
5074  Epetra_Import_Util::LowCommunicationMakeColMapAndReindex(N,CSR_rowptr.Values(),CSR_colind.Values(),CSR_colind_LL_ptr,
5075  *MyDomainMap,pids_ptr,
5079 #endif
5080  }
5081  else {
5082 #ifndef EPETRA_NO_32BIT_GLOBAL_INDICES
5083  Epetra_Import_Util::LowCommunicationMakeColMapAndReindex(N,CSR_rowptr.Values(),CSR_colind.Values(),*MyDomainMap,pids_ptr,
5087 #endif
5088  }
5089 
5090  /***************************************************/
5091  /**** 6) Sort (and merge if needed) ****/
5092  /***************************************************/
5093  // Sort the entries
5094  const Epetra_Import* xferAsImport = dynamic_cast<const Epetra_Import*> (&RowTransfer);
5095  if(xferAsImport)
5096  Epetra_Util::SortCrsEntries(N, CSR_rowptr.Values(), CSR_colind.Values(), CSR_vals);
5097  else
5098  Epetra_Util::SortAndMergeCrsEntries(N, CSR_rowptr.Values(), CSR_colind.Values(), CSR_vals);
5099 
5100  /***************************************************/
5101  /**** 7) Build Importer & Call ESFC ****/
5102  /***************************************************/
5103  // Pre-build the importer using the existing PIDs
5104  Epetra_Import * MyImport=0;
5105  int NumRemotePIDs = RemotePIDs.size();
5106  int *RemotePIDs_ptr = Epetra_Util_data_ptr(RemotePIDs);
5107  // if(!RestrictCommunicator && !MyDomainMap->SameAs(ColMap()))
5108  if(!MyDomainMap->SameAs(ColMap()))
5109  MyImport = new Epetra_Import(ColMap(),*MyDomainMap,NumRemotePIDs,RemotePIDs_ptr);
5110 
5111  // Note: At the moment, the RemotePIDs_ptr won't work with the restricted communicator.
5112  // This should be fixed.
5113  ExpertStaticFillComplete(*MyDomainMap,*MyRangeMap,MyImport);
5114 
5115  // Note: ExpertStaticFillComplete assumes ownership of the importer, if we made one...
5116  // We are not to deallocate that here.
5117 
5118  // Cleanup
5119  if(ReducedDomainMap!=ReducedRowMap) delete ReducedDomainMap;
5120  if(ReducedRangeMap !=ReducedRowMap) delete ReducedRangeMap;
5121  delete ReducedRowMap;
5122 }// end FusedTransfer
5123 
5124 
5125 
5126 // ===================================================================
5127 Epetra_CrsMatrix::Epetra_CrsMatrix(const Epetra_CrsMatrix & SourceMatrix, const Epetra_Import & RowImporter,const Epetra_Map * theDomainMap, const Epetra_Map * theRangeMap, bool RestrictCommunicator)
5128  : Epetra_DistObject(RowImporter.TargetMap(), "Epetra::CrsMatrix"),
5130  Epetra_BLAS(),
5131  Graph_(Copy, RowImporter.TargetMap(), 0, false),
5132  Values_(0),
5134  All_Values_(0),
5135  NumMyRows_(RowImporter.TargetMap().NumMyPoints()),
5136  ImportVector_(0),
5137  ExportVector_(0),
5138  CV_(Copy)
5139 {
5140  FusedTransfer<Epetra_Import>(SourceMatrix,RowImporter,NULL,theDomainMap,theRangeMap,RestrictCommunicator);
5141 }// end fused import constructor
5142 
5143 
5144 // ===================================================================
5145 Epetra_CrsMatrix::Epetra_CrsMatrix(const Epetra_CrsMatrix & SourceMatrix, const Epetra_Export & RowExporter,const Epetra_Map * theDomainMap, const Epetra_Map * theRangeMap, bool RestrictCommunicator)
5146  : Epetra_DistObject(RowExporter.TargetMap(), "Epetra::CrsMatrix"),
5148  Epetra_BLAS(),
5149  Graph_(Copy, RowExporter.TargetMap(), 0, false),
5150  Values_(0),
5151  Values_alloc_lengths_(0),
5152  All_Values_(0),
5153  NumMyRows_(RowExporter.TargetMap().NumMyPoints()),
5154  ImportVector_(0),
5155  ExportVector_(0),
5156  CV_(Copy)
5157 {
5158  FusedTransfer<Epetra_Export>(SourceMatrix,RowExporter,NULL,theDomainMap,theRangeMap,RestrictCommunicator);
5159 } // end fused export constructor
5160 
5161 // ===================================================================
5162 Epetra_CrsMatrix::Epetra_CrsMatrix(const Epetra_CrsMatrix & SourceMatrix, const Epetra_Import & RowImporter, const Epetra_Import * DomainImporter, const Epetra_Map * theDomainMap, const Epetra_Map * theRangeMap, bool RestrictCommunicator)
5163  : Epetra_DistObject(RowImporter.TargetMap(), "Epetra::CrsMatrix"),
5165  Epetra_BLAS(),
5166  Graph_(Copy, RowImporter.TargetMap(), 0, false),
5167  Values_(0),
5168  Values_alloc_lengths_(0),
5169  All_Values_(0),
5170  NumMyRows_(RowImporter.TargetMap().NumMyPoints()),
5171  ImportVector_(0),
5172  ExportVector_(0),
5173  CV_(Copy)
5174 {
5175  FusedTransfer<Epetra_Import>(SourceMatrix,RowImporter,DomainImporter,theDomainMap,theRangeMap,RestrictCommunicator);
5176 }// end fused import constructor
5177 
5178 
5179 // ===================================================================
5180 Epetra_CrsMatrix::Epetra_CrsMatrix(const Epetra_CrsMatrix & SourceMatrix, const Epetra_Export & RowExporter, const Epetra_Export * DomainExporter, const Epetra_Map * theDomainMap, const Epetra_Map * theRangeMap, bool RestrictCommunicator)
5181  : Epetra_DistObject(RowExporter.TargetMap(), "Epetra::CrsMatrix"),
5183  Epetra_BLAS(),
5184  Graph_(Copy, RowExporter.TargetMap(), 0, false),
5185  Values_(0),
5186  Values_alloc_lengths_(0),
5187  All_Values_(0),
5188  NumMyRows_(RowExporter.TargetMap().NumMyPoints()),
5189  ImportVector_(0),
5190  ExportVector_(0),
5191  CV_(Copy)
5192 {
5193  FusedTransfer<Epetra_Export>(SourceMatrix,RowExporter,DomainExporter,theDomainMap,theRangeMap,RestrictCommunicator);
5194 } // end fused export constructor
5195 
5196 // ===================================================================
5198  const Epetra_Import & RowImporter,
5199  const Epetra_Map * theDomainMap,
5200  const Epetra_Map * theRangeMap,
5201  bool RestrictCommunicator) {
5202  FusedTransfer<Epetra_Import>(SourceMatrix,RowImporter,NULL,theDomainMap,theRangeMap,RestrictCommunicator);
5203 } // end fused import non-constructor
5204 
5205 // ===================================================================
5207  const Epetra_Export & RowExporter,
5208  const Epetra_Map * theDomainMap,
5209  const Epetra_Map * theRangeMap,
5210  bool RestrictCommunicator) {
5211  FusedTransfer<Epetra_Export>(SourceMatrix,RowExporter,NULL,theDomainMap,theRangeMap,RestrictCommunicator);
5212 } // end fused export non-constructor
5213 
5215  const Epetra_Import & RowImporter,
5216  const Epetra_Import * DomainImporter,
5217  const Epetra_Map * theDomainMap,
5218  const Epetra_Map * theRangeMap,
5219  bool RestrictCommunicator) {
5220  FusedTransfer<Epetra_Import>(SourceMatrix,RowImporter,DomainImporter,theDomainMap,theRangeMap,RestrictCommunicator);
5221 } // end fused import non-constructor
5222 
5223 // ===================================================================
5225  const Epetra_Export & RowExporter,
5226  const Epetra_Export * DomainExporter,
5227  const Epetra_Map * theDomainMap,
5228  const Epetra_Map * theRangeMap,
5229  bool RestrictCommunicator) {
5230  FusedTransfer<Epetra_Export>(SourceMatrix,RowExporter,DomainExporter,theDomainMap,theRangeMap,RestrictCommunicator);
5231 } // end fused export non-constructor
int NumMyRowEntries(int MyRow, int &NumEntries) const
Return the current number of values stored for the specified local row.
const Epetra_Export * Exporter_
int TransformToLocal()
Use FillComplete() instead.
Epetra_MultiVector: A class for constructing and using dense multi-vectors, vectors and matrices in p...
int Solve1(bool Upper, bool Trans, bool UnitDiagonal, const Epetra_Vector &x, Epetra_Vector &y) const
const Epetra_BlockMap & RangeMap() const
Returns the RangeMap associated with this graph.
int LRID(int GRID_in) const
Returns the local row index for given global row index, returns -1 if no local row for this global ro...
int ReplaceDiagonalValues(const Epetra_Vector &Diagonal)
Replaces diagonal values of the matrix with those in the user-provided vector.
bool HaveColMap() const
Returns true if we have a well-defined ColMap, and returns false otherwise.
bool DistributedGlobal() const
Returns true if this multi-vector is distributed global, i.e., not local replicated.
Epetra_Map: A class for partitioning vectors and matrices.
Definition: Epetra_Map.h:119
bool DistributedGlobal() const
Returns true if map is defined across more than one processor.
const Epetra_Map & RangeMap() const
Returns the Epetra_Map object associated with the range of this matrix operator.
#define TEUCHOS_FUNC_TIME_MONITOR(FUNCNAME)
Epetra_BlockMap RangeMap_
Epetra_IntSerialDenseVector: A class for constructing and using dense vectors.
int FillComplete()
Tranform to local index space. Perform other operations to allow optimal matrix operations.
void GeneralMM(double **X, int LDX, double **Y, int LDY, int NumVectors) const
int ElementSize() const
Returns the size of elements in the map; only valid if map has constant element size.
bool StorageOptimized() const
If OptimizeStorage() has been called, this query returns true, otherwise it returns false...
void FusedTransfer(const Epetra_CrsMatrix &SourceMatrix, const TransferType &RowTransfer, const TransferType *DomainTransfer, const Epetra_Map *DomainMap, const Epetra_Map *RangeMap, bool RestrictCommunicator)
int MaxValue(double *Result) const
Compute maximum value of each vector in multi-vector.
virtual const Epetra_Map & RowMatrixRowMap() const =0
Returns the Epetra_Map object associated with the rows of this matrix.
long long NumGlobalRows64() const
int Resize(int Length_in)
Resize a Epetra_IntSerialDenseVector object.
int SumIntoOffsetValues(long long GlobalRow, int NumEntries, const double *Values, const int *Indices)
int NumMyEntries(int Row) const
Returns the current number of nonzero entries in specified local row on this processor.
bool GlobalIndicesLongLong() const
Returns true if map create with long long NumGlobalElements.
Epetra_Distributor: The Epetra Gather/Scatter Setup Base Class.
bool SameAs(const Epetra_BlockMap &Map) const
Returns true if this and Map are identical maps.
int ExtractDiagonalCopy(Epetra_Vector &Diagonal) const
Returns a copy of the main diagonal in a user-provided vector.
Epetra_OffsetIndex: This class builds index for efficient mapping of data from one Epetra_CrsGraph ba...
int Multiply(bool TransA, const Epetra_Vector &x, Epetra_Vector &y) const
Returns the result of a Epetra_CrsMatrix multiplied by a Epetra_Vector x in y.
int ReplaceOffsetValues(long long GlobalRow, int NumEntries, const double *Values, const int *Indices)
int * IndexOffset() const
int SortEntries()
Sort column entries, row-by-row, in ascending order.
const Epetra_BlockMap & TargetMap() const
Returns the TargetMap used to construct this exporter.
Epetra_IntVector: A class for constructing and using dense integer vectors on a parallel computer...
void DecrementReferenceCount()
Decrement reference count.
Definition: Epetra_Data.cpp:66
int MyGlobalElements(int *MyGlobalElementList) const
Puts list of global elements on this processor into the user-provided array.
virtual void Print(std::ostream &os) const
Print object to an output stream.
const Epetra_BlockMap & SourceMap() const
Returns the SourceMap used to construct this exporter.
double * All_Values() const
void GeneralMTV(double *x, double *y) const
long long NumGlobalElements64() const
int TCopyAndPermuteRowMatrix(const Epetra_RowMatrix &A, int NumSameIDs, int NumPermuteIDs, int *PermuteToLIDs, int *PermuteFromLIDs, const Epetra_OffsetIndex *Indexor, Epetra_CombineMode CombineMode)
long long NumGlobalCols64() const
int PackAndPrepare(const Epetra_SrcDistObject &Source, int NumExportIDs, int *ExportLIDs, int &LenExports, char *&Exports, int &SizeOfPacket, int *Sizes, bool &VarSizes, Epetra_Distributor &Distor)
Perform any packing or preparation required for call to DoTransfer().
int UnpackWithOwningPIDsCount(const Epetra_CrsMatrix &SourceMatrix, int NumSameIDs, int NumRemoteIDs, const int *RemoteLIDs, int NumPermuteIDs, const int *PermuteToLIDs, const int *PermuteFromLIDs, int LenImports, char *Imports)
UnpackWithOwningPIDsCount.
int ** Indices() const
int PutScalar(double ScalarConstant)
Initialize all values in a multi-vector with constant value.
virtual int InsertGlobalValues(int GlobalRow, int NumEntries, const double *Values, const int *Indices)
Insert a list of elements in a given global row of the matrix.
int NumMyEntries() const
Returns the number of entries on this processor.
double NormInf() const
Returns the infinity norm of the global matrix.
int MyLength() const
Returns the local vector length on the calling processor of vectors in the multi-vector.
long long GRID64(int LRID_in) const
double ** Values() const
bool Filled() const
If FillComplete() has been called, this query returns true, otherwise it returns false.
bool ConstantElementSize() const
Returns true if map has constant element size.
void UpdateExportVector(int NumVectors) const
virtual void Print(std::ostream &os) const
Print method.
int InsertOffsetValues(long long GlobalRow, int NumEntries, double *Values, int *Indices)
virtual int SumIntoGlobalValues(int GlobalRow, int NumEntries, const double *Values, const int *Indices)
Add this list of entries to existing values for a given global row of the matrix. ...
Epetra_DataAccess CV_
double ** Pointers() const
Get pointer to individual vector pointers.
T * Epetra_Util_data_ptr(std::vector< T > &vec)
Function that returns either a pointer to the first entry in the vector or, if the vector is empty...
Definition: Epetra_Util.h:422
Epetra_BlockMap DomainMap_
#define EPETRA_CHK_ERR(a)
bool NoRedundancies() const
If MergeRedundantEntries() has been called, this query returns true, otherwise it returns false...
int NumMyDiagonals() const
Returns the number of local nonzero diagonal entries, based on global row/column index comparisons...
int UnpackAndCombine(const Epetra_SrcDistObject &Source, int NumImportIDs, int *ImportLIDs, int LenImports, char *Imports, int &SizeOfPacket, Epetra_Distributor &Distor, Epetra_CombineMode CombineMode, const Epetra_OffsetIndex *Indexor)
Perform any unpacking and combining after call to DoTransfer().
Epetra_Export: This class builds an export object for efficient exporting of off-processor elements...
Definition: Epetra_Export.h:62
int PutValue(int Value)
Set all elements of the vector to Value.
int MakeIndicesLocal(const Epetra_BlockMap &DomainMap, const Epetra_BlockMap &RangeMap)
double NormOne() const
Returns the one norm of the global matrix.
bool Sorted() const
If SortEntries() has been called, this query returns true, otherwise it returns false.
int ReplaceRowMap(const Epetra_BlockMap &newmap)
Replaces the current RowMap with the user-specified map object, but only if currentmap-&gt;PointSameAs(n...
Epetra_Vector: A class for constructing and using dense vectors on a parallel computer.
const Epetra_BlockMap & ColMap() const
Returns the Column Map associated with this graph.
int ** PermuteOffsets() const
Accessor.
void UpdateImportVector(int NumVectors) const
bool GlobalIndicesInt() const
Returns true if map create with int NumGlobalElements.
int RemoveEmptyProcessesInPlace(const Epetra_BlockMap *NewMap)
Remove processes owning zero rows from the Maps and their communicator.
const Epetra_BlockMap & DomainMap() const
Returns the DomainMap associated with this graph.
long long NumGlobalPoints64() const
int PackAndPrepareWithOwningPIDs(const Epetra_CrsMatrix &SourceMatrix, int NumExportIDs, int *ExportLIDs, int &LenExports, char *&Exports, int &SizeOfPacket, int *Sizes, bool &VarSizes, std::vector< int > &SourcePids)
PackAndPrepareWithOwningPIDs.
virtual int MinAll(double *PartialMins, double *GlobalMins, int Count) const =0
Epetra_Comm Global Min function.
bool GlobalConstantsComputed() const
const Epetra_Map & ColMap() const
Returns the Epetra_Map object that describes the set of column-indices that appear in each processor&#39;...
Epetra_Import: This class builds an import object for efficient importing of off-processor elements...
Definition: Epetra_Import.h:63
bool IndicesAreLocal() const
If matrix indices has been transformed to local, this query returns true, otherwise it returns false...
virtual void Barrier() const =0
Epetra_Comm Barrier function.
int ReplaceDomainMapAndImporter(const Epetra_Map &NewDomainMap, const Epetra_Import *NewImporter)
Replaces the current DomainMap &amp; Importer with the user-specified map object.
static int SortAndMergeCrsEntries(int NumRows, int *CRS_rowptr, int *CRS_colind, double *CRS_vals)
Epetra_Util SortAndMergeCrsEntries function.
void PREFIX EPETRA_DCRSSM_F77(const int *, const int *, const int *, const int *, const int *, const int *, const double *, const int *, const int *, double *, const int *, double *, const int *, const int *, const int *)
int ReplaceDomainMapAndImporter(const Epetra_BlockMap &NewDomainMap, const Epetra_Import *NewImporter)
Replaces the current DomainMap &amp; Importer with the user-specified map object.
bool UpperTriangular() const
If matrix is upper triangular in local index space, this query returns true, otherwise it returns fal...
void FusedImport(const Epetra_CrsMatrix &SourceMatrix, const Epetra_Import &RowImporter, const Epetra_Map *DomainMap, const Epetra_Map *RangeMap, bool RestrictCommunicator)
static int GetPids(const Epetra_Import &Importer, std::vector< int > &pids, bool use_minus_one_for_local)
Epetra_Util GetPids function.
Epetra_CrsGraphData * CrsGraphData_
int NumVectors() const
Returns the number of vectors in the multi-vector.
int GlobalMaxNumEntries() const
Returns the maximum number of nonzero entries across all rows on all processors.
int InvColSums(Epetra_Vector &x) const
Computes the inverse of the sum of absolute values of the columns of the Epetra_CrsMatrix, results returned in x.
bool FindGlobalIndexLoc(int LocalRow, int Index, int Start, int &Loc) const
virtual int MyPID() const =0
Return my process ID.
int FillComplete(bool OptimizeDataStorage=true)
Signal that data entry is complete. Perform transformations to local index space. ...
int RemoveRedundantIndices()
Removes any redundant column indices in the rows of the graph.
IndexData< int > * data
int ExpertMakeUniqueCrsGraphData()
Makes sure this matrix has a unique CrsGraphData object.
virtual int MaxAll(double *PartialMaxs, double *GlobalMaxs, int Count) const =0
Epetra_Comm Global Max function.
Epetra_CrsGraphData: The Epetra CrsGraph Data Class.
bool MyLRID(int LRID_in) const
Returns true if the LRID passed in belongs to the calling processor in this map, otherwise returns fa...
bool StaticGraph()
Returns true if the graph associated with this matrix was pre-constructed and therefore not changeabl...
int ExtractMyRowView(int MyRow, int &NumEntries, double *&Values, int *&Indices) const
Returns a view of the specified local row values via pointers to internal data.
int PutScalar(double ScalarConstant)
Initialize all values in the matrix with constant value.
long long NumGlobalNonzeros64() const
int LRID(int GRID_in) const
Returns the local row index for given global row index, returns -1 if no local row for this global ro...
Epetra_BLAS: The Epetra BLAS Wrapper Class.
Definition: Epetra_BLAS.h:70
const Epetra_Map & RowMap() const
Returns the Epetra_Map object associated with the rows of this matrix.
virtual int SumAll(double *PartialSums, double *GlobalSums, int Count) const =0
Epetra_Comm Global Sum function.
int NumMyElements() const
Number of elements on the calling processor.
int UnpackAndCombineIntoCrsArrays(const Epetra_CrsMatrix &SourceMatrix, int NumSameIDs, int NumRemoteIDs, const int *RemoteLIDs, int NumPermuteIDs, const int *PermuteToLIDs, const int *PermuteFromLIDs, int LenImports, char *Imports, int TargetNumRows, int TargetNumNonzeros, int MyTargetPID, int *CSR_rowptr, int *CSR_colind, double *CSR_values, const std::vector< int > &SourcePids, std::vector< int > &TargetPids)
UnpackAndCombineIntoCrsArrays.
virtual int NumMyRowEntries(int MyRow, int &NumEntries) const =0
Returns the number of nonzero entries in MyRow.
int NumMyCols() const
Returns the number of entries in the set of column-indices that appear on this processor.
void GeneralSM(bool Upper, bool Trans, bool UnitDiagonal, double **X, int LDX, double **Y, int LDY, int NumVectors) const
virtual int MaxNumEntries() const =0
Returns the maximum of NumMyRowEntries() over all rows.
long long GID64(int LID) const
static int SortCrsEntries(int NumRows, const int *CRS_rowptr, int *CRS_colind, double *CRS_vals)
Epetra_Util SortCrsEntries function.
std::vector< EntriesInOneRow< int > > SortedEntries_
Epetra_MultiVector * ExportVector_
int Export(const Epetra_SrcDistObject &A, const Epetra_Import &Importer, Epetra_CombineMode CombineMode, const Epetra_OffsetIndex *Indexor=0)
Exports an Epetra_DistObject using the Epetra_Import object.
int InsertIndices(int Row, int NumIndices, int *Indices)
Epetra_Map * RemoveEmptyProcesses() const
Return a new BlockMap with processes with zero elements removed.
Definition: Epetra_Map.cpp:179
void PREFIX EPETRA_DCRSSV_F77(const int *, const int *, const int *, const int *, const int *, const int *, const double *, const int *, const int *, double *, double *, const int *)
bool Sorted() const
If SortIndices() has been called, this query returns true, otherwise it returns false.
Epetra_Comm: The Epetra Communication Abstract Base Class.
Definition: Epetra_Comm.h:73
int CopyAndPermute(const Epetra_SrcDistObject &Source, int NumSameIDs, int NumPermuteIDs, int *PermuteToLIDs, int *PermuteFromLIDs, const Epetra_OffsetIndex *Indexor, Epetra_CombineMode CombineMode=Zero)
Perform ID copies and permutations that are on processor.
void SetIndicesAreGlobal(bool Flag)
Epetra_IntSerialDenseVector NumAllocatedIndicesPerRow_
virtual ~Epetra_CrsMatrix()
Epetra_CrsMatrix Destructor.
void SetIndicesAreLocal(bool Flag)
int Solve(bool Upper, bool Trans, bool UnitDiagonal, const Epetra_Vector &x, Epetra_Vector &y) const
Returns the result of a local solve using the Epetra_CrsMatrix on a Epetra_Vector x in y...
const Epetra_Import * Importer() const
Returns the Epetra_Import object that contains the import operations for distributed operations...
const Epetra_BlockMap & TargetMap() const
Returns the TargetMap used to construct this importer.
int ExtractGlobalRowView(int GlobalRow, int &NumEntries, double *&Values, int *&Indices) const
Returns a view of the specified global row values via pointers to internal data.
void GeneralMTM(double **X, int LDX, double **Y, int LDY, int NumVectors) const
const Epetra_Comm * Comm_
int * NumIndicesPerRow() const
int ReplaceRowMap(const Epetra_BlockMap &newmap)
Replaces the current RowMap with the user-specified map object.
int MaxNumEntries() const
Returns the maximum number of nonzero entries across all rows on this processor.
int ExtractGlobalRowCopy(int GlobalRow, int Length, int &NumEntries, double *Values, int *Indices) const
Returns a copy of the specified global row in user-provided arrays.
int OptimizeStorage()
Make consecutive row index sections contiguous, minimize internal storage used for constructing graph...
int InsertValues(int LocalRow, int NumEntries, double *Values, int *Indices)
int OptimizeStorage()
Make consecutive row index sections contiguous, minimize internal storage used for constructing graph...
int TInsertGlobalValues(int_type Row, int NumEntries, const double *values, const int_type *Indices)
int TSumIntoGlobalValues(int_type Row, int NumEntries, const double *srcValues, const int_type *Indices)
int GID(int LID) const
Returns global ID of local ID, return IndexBase-1 if not found on this processor. ...
bool NoDiagonal() const
If matrix has no diagonal entries in global index space, this query returns true, otherwise it return...
virtual const Epetra_BlockMap & Map() const =0
Returns a reference to the Epetra_BlockMap for this object.
void GeneralMV(double *x, double *y) const
int ** RemoteOffsets() const
Accessor.
const double Epetra_MaxDouble
void FusedExport(const Epetra_CrsMatrix &SourceMatrix, const Epetra_Export &RowExporter, const Epetra_Map *DomainMap, const Epetra_Map *RangeMap, bool RestrictCommunicator)
int ExtractMyRowCopy(int LocalRow, int LenOfIndices, int &NumIndices, int *Indices) const
Extract a list of elements in a specified local row of the graph. Put into storage allocated by calli...
Epetra_CompObject: Functionality and data that is common to all computational classes.
int NumMyRows() const
Returns the number of matrix rows owned by the calling processor.
int ReferenceCount() const
Get reference count.
Definition: Epetra_Data.cpp:71
int LCID(int GCID_in) const
Returns the local column index for given global column index, returns -1 if no local column for this ...
Epetra_BlockMap: A class for partitioning block element vectors and matrices.
int RemoveEmptyProcessesInPlace(const Epetra_BlockMap *NewMap)
Remove processes owning zero rows from the Maps and their communicator.
int InvRowSums(Epetra_Vector &x) const
Computes the inverse of the sum of absolute values of the rows of the Epetra_CrsMatrix, results returned in x.
double NormFrobenius() const
Returns the frobenius norm of the global matrix.
bool IndicesAreContiguous() const
If matrix indices are packed into single array (done in OptimizeStorage()) return true...
const Epetra_BlockMap & SourceMap() const
Returns the SourceMap used to construct this importer.
Epetra_CrsGraph Graph_
const Epetra_BlockMap & RowMap() const
Returns the RowMap associated with this graph.
int Epetra_Util_binary_search_aux(T item, const int *list, const T *aux_list, int len, int &insertPoint)
Utility function to perform a binary-search on a list of data.
virtual void Print(std::ostream &os) const
Print method.
bool LowerTriangular() const
If matrix is lower triangular in local index space, this query returns true, otherwise it returns fal...
int NumAllocatedMyIndices(int Row) const
Returns the allocated number of nonzero entries in specified local row on this processor.
const Epetra_Export * Exporter() const
Returns the Epetra_Export object that contains the export operations for distributed operations...
int LID(int GID) const
Returns local ID of global ID, return -1 if not found on this processor.
bool MyGID(int GID_in) const
Returns true if the GID passed in belongs to the calling processor in this map, otherwise returns fal...
int ExtractGlobalRowCopy(int GlobalRow, int LenOfIndices, int &NumIndices, int *Indices) const
Extract a list of elements in a specified global row of the graph. Put into storage allocated by call...
int LowCommunicationMakeColMapAndReindex(int N, const int *rowptr, int *colind, const Epetra_Map &domainMap, const int *owningPIDs, bool SortGhostsAssociatedWithEachProcessor, std::vector< int > &RemotePIDs, Epetra_BlockMap &NewColMap)
LowCommunicationMakeColMapAndReindex.
int * All_Indices() const
int Epetra_Util_binary_search(T item, const T *list, int len, int &insertPoint)
Utility function to perform a binary-search on a list of data.
int LeftScale(const Epetra_Vector &x)
Scales the Epetra_CrsMatrix on the left with a Epetra_Vector x.
Epetra_IntSerialDenseVector & ExpertExtractIndices()
Returns a reference to the Epetra_IntSerialDenseVector used to hold the local All_Indices (CRS colind...
Epetra_IntSerialDenseVector All_Indices_
const Epetra_Comm & Comm() const
Access function for Epetra_Comm communicator.
int TReplaceGlobalValues(int_type Row, int NumEntries, const double *srcValues, const int_type *Indices)
int ExtractMyRowView(int LocalRow, int &NumIndices, int *&Indices) const
Get a view of the elements in a specified local row of the graph.
virtual int Do(char *export_objs, int obj_size, int &len_import_objs, char *&import_objs)=0
Execute plan on buffer of export objects in a single step.
Epetra_SerialComm: The Epetra Serial Communication Class.
bool FindMyIndexLoc(int LocalRow, int Index, int Start, int &Loc) const
bool IndicesAreGlobal() const
If matrix indices has not been transformed to local, this query returns true, otherwise it returns fa...
int Stride() const
Returns the stride between vectors in the multi-vector (only meaningful if ConstantStride() is true)...
void PREFIX EPETRA_DCRSMM_F77(const int *, const int *, const int *, const double *, const int *, const int *, double *, int *, double *, int *, int *)
int SetAllocated(bool Flag)
int SumIntoMyValues(int MyRow, int NumEntries, const double *Values, const int *Indices)
Add this list of entries to existing values for a given local row of the matrix.
Epetra_IntSerialDenseVector & ExpertExtractIndexOffset()
Returns a reference to the Epetra_IntSerialDenseVector used to hold the local IndexOffsets (CRS rowpt...
bool ConstantStride() const
Returns true if this multi-vector has constant stride between vectors.
const double Epetra_MinDouble
int NumMyIndices(int Row) const
Returns the current number of nonzero entries in specified local row on this processor.
double * Values() const
Get pointer to MultiVector values.
int InvRowMaxs(Epetra_Vector &x) const
Computes the inverse of the max of absolute values of the rows of the Epetra_CrsMatrix, results returned in x.
void SetSorted(bool Flag)
int ReplaceColMap(const Epetra_BlockMap &newmap)
Replaces the current ColMap with the user-specified map object, but only if no entries have been inse...
int TUnpackAndCombine(const Epetra_SrcDistObject &Source, int NumImportIDs, int *ImportLIDs, int LenImports, char *Imports, int &SizeOfPacket, Epetra_Distributor &Distor, Epetra_CombineMode CombineMode, const Epetra_OffsetIndex *Indexor)
int CopyAndPermuteRowMatrix(const Epetra_RowMatrix &A, int NumSameIDs, int NumPermuteIDs, int *PermuteToLIDs, int *PermuteFromLIDs, const Epetra_OffsetIndex *Indexor, Epetra_CombineMode CombineMode)
void GeneralSV(bool Upper, bool Trans, bool UnitDiagonal, double *x, double *y) const
virtual int NumProc() const =0
Returns total number of processes.
Epetra_IntSerialDenseVector IndexOffset_
Epetra_BlockMap Map_
int InvColMaxs(Epetra_Vector &x) const
Computes the max of absolute values of the columns of the Epetra_CrsMatrix, results returned in x...
bool Filled() const
If FillComplete() has been called, this query returns true, otherwise it returns false.
Epetra_CrsMatrix: A class for constructing and using real-valued double-precision sparse compressed r...
const Epetra_Map & DomainMap() const
Returns the Epetra_Map object associated with the domain of this matrix operator. ...
const Epetra_BlockMapData * DataPtr() const
Returns a pointer to the BlockMapData instance this BlockMap uses.
Epetra_CrsMatrix & operator=(const Epetra_CrsMatrix &src)
Assignment operator.
Epetra_CombineMode
virtual int ExtractMyRowCopy(int MyRow, int Length, int &NumEntries, double *Values, int *Indices) const =0
Returns a copy of the specified local row in user-provided arrays.
int CopyAndPermuteCrsMatrix(const Epetra_CrsMatrix &A, int NumSameIDs, int NumPermuteIDs, int *PermuteToLIDs, int *PermuteFromLIDs, const Epetra_OffsetIndex *Indexor, Epetra_CombineMode CombineMode)
virtual int ReportError(const std::string Message, int ErrorCode) const
Error reporting method.
virtual const Epetra_Map & RowMatrixColMap() const =0
Returns the Epetra_Map object associated with the columns of this matrix.
bool MyLID(int lid) const
Returns true if the LID passed in belongs to the calling processor in this map, otherwise returns fal...
const Epetra_Import * Importer_
int ExtractMyRowCopy(int MyRow, int Length, int &NumEntries, double *Values, int *Indices) const
Returns a copy of the specified local row in user-provided arrays.
Epetra_Map * ReplaceCommWithSubset(const Epetra_Comm *Comm) const
Replace this Map&#39;s communicator with a subset communicator.
Definition: Epetra_Map.cpp:279
const Epetra_CrsGraph & Graph() const
Returns a reference to the Epetra_CrsGraph object associated with this matrix.
Epetra_SrcDistObject: A class for supporting flexible source distributed objects for import/export op...
long long * MyGlobalElements64() const
Epetra_RowMatrix: A pure virtual class for using real-valued double-precision row matrices...
void PREFIX EPETRA_DCRSMV_F77(const int *, const int *, const int *, const double *, const int *, const int *, double *, double *)
int Scale(double ScalarConstant)
Multiply all values in the matrix by a constant value (in place: A &lt;- ScalarConstant * A)...
Epetra_DataAccess
int * Values()
Returns pointer to the values in vector.
int MergeRedundantEntries()
Add entries that have the same column index. Remove redundant entries from list.
int ReplaceMyValues(int MyRow, int NumEntries, const double *Values, const int *Indices)
Replace current values with this list of entries for a given local row of the matrix.
int InsertMyValues(int MyRow, int NumEntries, const double *Values, const int *Indices)
Insert a list of elements in a given local row of the matrix.
int NumMyNonzeros() const
Returns the number of indices in the local graph.
int TCopyAndPermuteCrsMatrix(const Epetra_CrsMatrix &A, int NumSameIDs, int NumPermuteIDs, int *PermuteToLIDs, int *PermuteFromLIDs, const Epetra_OffsetIndex *Indexor, Epetra_CombineMode CombineMode)
int ExtractGlobalRowView(int GlobalRow, int &NumIndices, int *&Indices) const
Get a view of the elements in a specified global row of the graph.
int Multiply1(bool TransA, const Epetra_Vector &x, Epetra_Vector &y) const
const Epetra_Comm & Comm() const
Returns a pointer to the Epetra_Comm communicator associated with this matrix.
int RightScale(const Epetra_Vector &x)
Scales the Epetra_CrsMatrix on the right with a Epetra_Vector x.
Epetra_MultiVector * ImportVector_
virtual int ReplaceGlobalValues(int GlobalRow, int NumEntries, const double *Values, const int *Indices)
Replace specified existing values with this list of entries for a given global row of the matrix...
int n
int Import(const Epetra_SrcDistObject &A, const Epetra_Import &Importer, Epetra_CombineMode CombineMode, const Epetra_OffsetIndex *Indexor=0)
Imports an Epetra_DistObject using the Epetra_Import object.
int ** SameOffsets() const
Accessor.
int ExpertStaticFillComplete(const Epetra_Map &DomainMap, const Epetra_Map &RangeMap, const Epetra_Import *Importer=0, const Epetra_Export *Exporter=0, int NumMyDiagonals=-1)
Performs a FillComplete on an object that aready has filled CRS data.
long long GCID64(int LCID_in) const
Epetra_CrsMatrix(Epetra_DataAccess CV, const Epetra_Map &RowMap, const int *NumEntriesPerRow, bool StaticProfile=false)
Epetra_CrsMatrix constructor with variable number of indices per row.
int CheckSizes(const Epetra_SrcDistObject &A)
Allows the source and target (this) objects to be compared for compatibility, return nonzero if not...
double *& ExpertExtractValues()
Returns a reference to the double* used to hold the values array.
#define EPETRA_MAX(x, y)
Epetra_CrsGraph: A class for constructing and using sparse compressed row graphs. ...
void UpdateFlops(int Flops_in) const
Increment Flop count for this object.
int ReplaceColMap(const Epetra_BlockMap &newmap)
Replaces the current ColMap with the user-specified map object.
Epetra_DistObject: A class for constructing and using dense multi-vectors, vectors and matrices in pa...
long long NumGlobalDiagonals64() const
const Epetra_BlockMap & Map() const
Returns the address of the Epetra_BlockMap for this multi-vector.
bool GlobalIndicesTypeMatch(const Epetra_BlockMap &other) const
int NumMyNonzeros() const
Returns the number of nonzero entries in the calling processor&#39;s portion of the matrix.