Tpetra parallel linear algebra  Version of the Day
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
TpetraExt_TripleMatrixMultiply_def.hpp
Go to the documentation of this file.
1 // @HEADER
2 // ***********************************************************************
3 //
4 // Tpetra: Templated Linear Algebra Services Package
5 // Copyright (2008) Sandia Corporation
6 //
7 // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
8 // the U.S. Government retains certain rights in this software.
9 //
10 // Redistribution and use in source and binary forms, with or without
11 // modification, are permitted provided that the following conditions are
12 // met:
13 //
14 // 1. Redistributions of source code must retain the above copyright
15 // notice, this list of conditions and the following disclaimer.
16 //
17 // 2. Redistributions in binary form must reproduce the above copyright
18 // notice, this list of conditions and the following disclaimer in the
19 // documentation and/or other materials provided with the distribution.
20 //
21 // 3. Neither the name of the Corporation nor the names of the
22 // contributors may be used to endorse or promote products derived from
23 // this software without specific prior written permission.
24 //
25 // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
26 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
29 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
30 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
31 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
32 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 //
37 // Questions? Contact Michael A. Heroux (maherou@sandia.gov)
38 //
39 // ************************************************************************
40 // @HEADER
41 #ifndef TPETRA_TRIPLEMATRIXMULTIPLY_DEF_HPP
42 #define TPETRA_TRIPLEMATRIXMULTIPLY_DEF_HPP
43 
45 #include "TpetraExt_MatrixMatrix_ExtraKernels_decl.hpp" //for UnmanagedView
46 #include "Teuchos_VerboseObject.hpp"
47 #include "Teuchos_Array.hpp"
48 #include "Tpetra_Util.hpp"
49 #include "Tpetra_ConfigDefs.hpp"
50 #include "Tpetra_CrsMatrix.hpp"
52 #include "Tpetra_RowMatrixTransposer.hpp"
53 #include "Tpetra_ConfigDefs.hpp"
54 #include "Tpetra_Map.hpp"
55 #include "Tpetra_Export.hpp"
56 #include "Tpetra_Import_Util.hpp"
57 #include "Tpetra_Import_Util2.hpp"
58 #include <algorithm>
59 #include <cmath>
60 #include "Teuchos_FancyOStream.hpp"
61 // #include "KokkosSparse_spgemm.hpp"
62 
63 
71 /*********************************************************************************************************/
72 // Include the architecture-specific kernel partial specializations here
73 // NOTE: This needs to be outside all namespaces
74 #include "TpetraExt_MatrixMatrix_OpenMP.hpp"
75 #include "TpetraExt_MatrixMatrix_Cuda.hpp"
76 
77 namespace Tpetra {
78 
79  namespace TripleMatrixMultiply{
80 
81  //
82  // This method forms the matrix-matrix product Ac = op(R) * op(A) * op(P), where
83  // op(A) == A if transposeA is false,
84  // op(A) == A^T if transposeA is true,
85  // and similarly for op(R) and op(P).
86  //
87  template <class Scalar,
88  class LocalOrdinal,
89  class GlobalOrdinal,
90  class Node>
93  bool transposeR,
95  bool transposeA,
97  bool transposeP,
99  bool call_FillComplete_on_result,
100  const std::string& label,
101  const Teuchos::RCP<Teuchos::ParameterList>& params)
102  {
103  using Teuchos::null;
104  using Teuchos::RCP;
105  typedef Scalar SC;
106  typedef LocalOrdinal LO;
107  typedef GlobalOrdinal GO;
108  typedef Node NO;
109  typedef CrsMatrix<SC,LO,GO,NO> crs_matrix_type;
110  typedef Import<LO,GO,NO> import_type;
111  typedef Export<LO,GO,NO> export_type;
112  typedef CrsMatrixStruct<SC,LO,GO,NO> crs_matrix_struct_type;
113  typedef Map<LO,GO,NO> map_type;
114  typedef RowMatrixTransposer<SC,LO,GO,NO> transposer_type;
115 
116 #ifdef HAVE_TPETRA_MMM_TIMINGS
117  std::string prefix_mmm = std::string("TpetraExt ") + label + std::string(": ");
118  using Teuchos::TimeMonitor;
119  RCP<Teuchos::TimeMonitor> MM = rcp(new TimeMonitor(*TimeMonitor::getNewTimer(prefix_mmm + std::string("RAP All Setup"))));
120 #endif
121 
122  const std::string prefix = "TpetraExt::TripleMatrixMultiply::MultiplyRAP(): ";
123 
124  // TEUCHOS_FUNC_TIME_MONITOR_DIFF("My Matrix Mult", mmm_multiply);
125 
126  // The input matrices R, A and P must both be fillComplete.
127  TEUCHOS_TEST_FOR_EXCEPTION(!R.isFillComplete(), std::runtime_error, prefix << "Matrix R is not fill complete.");
128  TEUCHOS_TEST_FOR_EXCEPTION(!A.isFillComplete(), std::runtime_error, prefix << "Matrix A is not fill complete.");
129  TEUCHOS_TEST_FOR_EXCEPTION(!P.isFillComplete(), std::runtime_error, prefix << "Matrix P is not fill complete.");
130 
131  // If transposeA is true, then Rprime will be the transpose of R
132  // (computed explicitly via RowMatrixTransposer). Otherwise, Rprime
133  // will just be a pointer to R.
134  RCP<const crs_matrix_type> Rprime = null;
135  // If transposeA is true, then Aprime will be the transpose of A
136  // (computed explicitly via RowMatrixTransposer). Otherwise, Aprime
137  // will just be a pointer to A.
138  RCP<const crs_matrix_type> Aprime = null;
139  // If transposeB is true, then Pprime will be the transpose of P
140  // (computed explicitly via RowMatrixTransposer). Otherwise, Pprime
141  // will just be a pointer to P.
142  RCP<const crs_matrix_type> Pprime = null;
143 
144  // Is this a "clean" matrix?
145  //
146  // mfh 27 Sep 2016: Historically, if Epetra_CrsMatrix was neither
147  // locally nor globally indexed, then it was empty. I don't like
148  // this, because the most straightforward implementation presumes
149  // lazy allocation of indices. However, historical precedent
150  // demands that we keep around this predicate as a way to test
151  // whether the matrix is empty.
152  const bool newFlag = !Ac.getGraph()->isLocallyIndexed() && !Ac.getGraph()->isGloballyIndexed();
153 
154  using Teuchos::ParameterList;
155  RCP<ParameterList> transposeParams (new ParameterList);
156  transposeParams->set ("sort", false);
157 
158  if (transposeR && &R != &P) {
159  transposer_type transposer(rcpFromRef (R));
160  Rprime = transposer.createTranspose (transposeParams);
161  } else {
162  Rprime = rcpFromRef(R);
163  }
164 
165  if (transposeA) {
166  transposer_type transposer(rcpFromRef (A));
167  Aprime = transposer.createTranspose (transposeParams);
168  } else {
169  Aprime = rcpFromRef(A);
170  }
171 
172  if (transposeP) {
173  transposer_type transposer(rcpFromRef (P));
174  Pprime = transposer.createTranspose (transposeParams);
175  } else {
176  Pprime = rcpFromRef(P);
177  }
178 
179  // Check size compatibility
180  global_size_t numRCols = R.getDomainMap()->getGlobalNumElements();
181  global_size_t numACols = A.getDomainMap()->getGlobalNumElements();
182  global_size_t numPCols = P.getDomainMap()->getGlobalNumElements();
183  global_size_t Rleft = transposeR ? numRCols : R.getGlobalNumRows();
184  global_size_t Rright = transposeR ? R.getGlobalNumRows() : numRCols;
185  global_size_t Aleft = transposeA ? numACols : A.getGlobalNumRows();
186  global_size_t Aright = transposeA ? A.getGlobalNumRows() : numACols;
187  global_size_t Pleft = transposeP ? numPCols : P.getGlobalNumRows();
188  global_size_t Pright = transposeP ? P.getGlobalNumRows() : numPCols;
189  TEUCHOS_TEST_FOR_EXCEPTION(Rright != Aleft, std::runtime_error,
190  prefix << "ERROR, inner dimensions of op(R) and op(A) "
191  "must match for matrix-matrix product. op(R) is "
192  << Rleft << "x" << Rright << ", op(A) is "<< Aleft << "x" << Aright);
193 
194  TEUCHOS_TEST_FOR_EXCEPTION(Aright != Pleft, std::runtime_error,
195  prefix << "ERROR, inner dimensions of op(A) and op(P) "
196  "must match for matrix-matrix product. op(A) is "
197  << Aleft << "x" << Aright << ", op(P) is "<< Pleft << "x" << Pright);
198 
199  // The result matrix Ac must at least have a row-map that reflects the correct
200  // row-size. Don't check the number of columns because rectangular matrices
201  // which were constructed with only one map can still end up having the
202  // correct capacity and dimensions when filled.
203  TEUCHOS_TEST_FOR_EXCEPTION(Rleft > Ac.getGlobalNumRows(), std::runtime_error,
204  prefix << "ERROR, dimensions of result Ac must "
205  "match dimensions of op(R) * op(A) * op(P). Ac has " << Ac.getGlobalNumRows()
206  << " rows, should have at least " << Rleft << std::endl);
207 
208  // It doesn't matter whether Ac is already Filled or not. If it is already
209  // Filled, it must have space allocated for the positions that will be
210  // referenced in forming Ac = op(R)*op(A)*op(P). If it doesn't have enough space,
211  // we'll error out later when trying to store result values.
212 
213  // CGB: However, matrix must be in active-fill
214  TEUCHOS_TEST_FOR_EXCEPT( Ac.isFillActive() == false );
215 
216  // We're going to need to import remotely-owned sections of P if
217  // more than one processor is performing this run, depending on the scenario.
218  int numProcs = P.getComm()->getSize();
219 
220  // Declare a couple of structs that will be used to hold views of the data
221  // of R, A and P, to be used for fast access during the matrix-multiplication.
222  crs_matrix_struct_type Rview;
223  crs_matrix_struct_type Aview;
224  crs_matrix_struct_type Pview;
225 
226  RCP<const map_type> targetMap_R = Rprime->getRowMap();
227  RCP<const map_type> targetMap_A = Aprime->getRowMap();
228  RCP<const map_type> targetMap_P = Pprime->getRowMap();
229 
230 #ifdef HAVE_TPETRA_MMM_TIMINGS
231  MM = rcp(new TimeMonitor(*TimeMonitor::getNewTimer(prefix_mmm + std::string("RAP All I&X"))));
232 #endif
233 
234  // Now import any needed remote rows and populate the Aview struct
235  // NOTE: We assert that an import isn't needed --- since we do the transpose
236  // above to handle that.
237  RCP<const import_type> dummyImporter;
238 
239  if (!(transposeR && &R == &P))
240  MMdetails::import_and_extract_views(*Rprime, targetMap_R, Rview, dummyImporter, true, label, params);
241 
242  MMdetails::import_and_extract_views(*Aprime, targetMap_A, Aview, dummyImporter, true, label, params);
243 
244  // We will also need local access to all rows of P that correspond to the
245  // column-map of op(A).
246  if (numProcs > 1)
247  targetMap_P = Aprime->getColMap();
248 
249  // Import any needed remote rows and populate the Pview struct.
250  MMdetails::import_and_extract_views(*Pprime, targetMap_P, Pview, Aprime->getGraph()->getImporter(), false, label, params);
251 
252 
253  RCP<Tpetra::CrsMatrix<Scalar, LocalOrdinal, GlobalOrdinal, Node> > Actemp;
254 
255  bool needs_final_export = !Pprime->getGraph()->getImporter().is_null();
256  if (needs_final_export)
257  Actemp = rcp(new Tpetra::CrsMatrix<Scalar, LocalOrdinal, GlobalOrdinal, Node>(Pprime->getColMap(),0));
258  else
259  Actemp = rcp(&Ac,false);// don't allow deallocation
260 
261 #ifdef HAVE_TPETRA_MMM_TIMINGS
262  MM = rcp(new TimeMonitor(*TimeMonitor::getNewTimer(prefix_mmm + std::string("RAP All Multiply"))));
263 #endif
264 
265  // Call the appropriate method to perform the actual multiplication.
266  if (call_FillComplete_on_result && newFlag) {
267  if (transposeR && &R == &P)
268  MMdetails::mult_PT_A_P_newmatrix(Aview, Pview, *Actemp, label, params);
269  else
270  MMdetails::mult_R_A_P_newmatrix(Rview, Aview, Pview, *Actemp, label, params);
271  } else if (call_FillComplete_on_result) {
272  if (transposeR && &R == &P)
273  MMdetails::mult_PT_A_P_reuse(Aview, Pview, *Actemp, label, params);
274  else
275  MMdetails::mult_R_A_P_reuse(Rview, Aview, Pview, *Actemp, label, params);
276  } else {
277  // mfh 27 Sep 2016: Is this the "slow" case? This
278  // "CrsWrapper_CrsMatrix" thing could perhaps be made to support
279  // thread-parallel inserts, but that may take some effort.
280  // CrsWrapper_CrsMatrix<Scalar, LocalOrdinal, GlobalOrdinal, Node> crsmat(Ac);
281 
282  // MMdetails::mult_A_B(Aview, Bview, crsmat, label,params);
283 
284  // #ifdef HAVE_TPETRA_MMM_TIMINGS
285  // MM = rcp(new TimeMonitor(*TimeMonitor::getNewTimer(prefix_mmm + std::string("RAP All FillComplete"))));
286  // #endif
287  // if (call_FillComplete_on_result) {
288  // // We'll call FillComplete on the C matrix before we exit, and give it a
289  // // domain-map and a range-map.
290  // // The domain-map will be the domain-map of B, unless
291  // // op(B)==transpose(B), in which case the range-map of B will be used.
292  // // The range-map will be the range-map of A, unless op(A)==transpose(A),
293  // // in which case the domain-map of A will be used.
294  // if (!C.isFillComplete())
295  // C.fillComplete(Bprime->getDomainMap(), Aprime->getRangeMap());
296  // }
297  // Not implemented
298  if (transposeR && &R == &P)
299  MMdetails::mult_PT_A_P_newmatrix(Aview, Pview, *Actemp, label, params);
300  else
301  MMdetails::mult_R_A_P_newmatrix(Rview, Aview, Pview, *Actemp, label, params);
302  }
303 
304  if (needs_final_export) {
305 #ifdef HAVE_TPETRA_MMM_TIMINGS
306  MM = rcp(new TimeMonitor(*TimeMonitor::getNewTimer(prefix_mmm + std::string("RAP exportAndFillComplete"))));
307 #endif
308  Teuchos::ParameterList labelList;
309  labelList.set("Timer Label", label);
310  Teuchos::ParameterList& labelList_subList = labelList.sublist("matrixmatrix: kernel params",false);
311 
312  RCP<crs_matrix_type> Acprime = rcpFromRef(Ac);
313  bool isMM = true;
314  bool overrideAllreduce = false;
315  int mm_optimization_core_count=::Tpetra::Details::Behavior::TAFC_OptimizationCoreCount();
316  if(!params.is_null()) {
317  Teuchos::ParameterList& params_sublist = params->sublist("matrixmatrix: kernel params",false);
318  mm_optimization_core_count = ::Tpetra::Details::Behavior::TAFC_OptimizationCoreCount();
319  mm_optimization_core_count = params_sublist.get("MM_TAFC_OptimizationCoreCount",mm_optimization_core_count);
320  int mm_optimization_core_count2 = params->get("MM_TAFC_OptimizationCoreCount",mm_optimization_core_count);
321  if(mm_optimization_core_count2<mm_optimization_core_count) mm_optimization_core_count=mm_optimization_core_count2;
322  isMM = params_sublist.get("isMatrixMatrix_TransferAndFillComplete",false);
323  overrideAllreduce = params_sublist.get("MM_TAFC_OverrideAllreduceCheck",false);
324 
325  labelList.set("compute global constants",params->get("compute global constants",true));
326  }
327  labelList_subList.set("MM_TAFC_OptimizationCoreCount",mm_optimization_core_count,"Core Count above which the optimized neighbor discovery is used");
328 
329  labelList_subList.set("isMatrixMatrix_TransferAndFillComplete",isMM,
330  "This parameter should be set to true only for MatrixMatrix operations: the optimization in Epetra that was ported to Tpetra does _not_ take into account the possibility that for any given source PID, a particular GID may not exist on the target PID: i.e. a transfer operation. A fix for this general case is in development.");
331  labelList_subList.set("MM_TAFC_OverrideAllreduceCheck",overrideAllreduce);
332 
333  export_type exporter = export_type(*Pprime->getGraph()->getImporter());
334  Actemp->exportAndFillComplete(Acprime,
335  exporter,
336  Acprime->getDomainMap(),
337  Acprime->getRangeMap(),
338  rcp(&labelList,false));
339 
340  }
341 #ifdef HAVE_TPETRA_MMM_STATISTICS
342  printMultiplicationStatistics(Actemp->getGraph()->getExporter(), label+std::string(" RAP MMM"));
343 #endif
344 
345  }
346 
347 
348  } //End namespace TripleMatrixMultiply
349 
350  namespace MMdetails{
351 
352  // Kernel method for computing the local portion of Ac = R*A*P
353  template<class Scalar,
354  class LocalOrdinal,
355  class GlobalOrdinal,
356  class Node>
357  void mult_R_A_P_newmatrix(
358  CrsMatrixStruct<Scalar, LocalOrdinal, GlobalOrdinal, Node>& Rview,
359  CrsMatrixStruct<Scalar, LocalOrdinal, GlobalOrdinal, Node>& Aview,
360  CrsMatrixStruct<Scalar, LocalOrdinal, GlobalOrdinal, Node>& Pview,
361  CrsMatrix<Scalar, LocalOrdinal, GlobalOrdinal, Node>& Ac,
362  const std::string& label,
363  const Teuchos::RCP<Teuchos::ParameterList>& params)
364  {
365  using Teuchos::Array;
366  using Teuchos::ArrayRCP;
367  using Teuchos::ArrayView;
368  using Teuchos::RCP;
369  using Teuchos::rcp;
370 
371  //typedef Scalar SC; // unused
372  typedef LocalOrdinal LO;
373  typedef GlobalOrdinal GO;
374  typedef Node NO;
375 
376  typedef Import<LO,GO,NO> import_type;
377  typedef Map<LO,GO,NO> map_type;
378 
379  // Kokkos typedefs
380  typedef typename map_type::local_map_type local_map_type;
382  typedef typename KCRS::StaticCrsGraphType graph_t;
383  typedef typename graph_t::row_map_type::non_const_type lno_view_t;
384  typedef typename NO::execution_space execution_space;
385  typedef Kokkos::RangePolicy<execution_space, size_t> range_type;
386  typedef Kokkos::View<LO*, typename lno_view_t::array_layout, typename lno_view_t::device_type> lo_view_t;
387 
388 #ifdef HAVE_TPETRA_MMM_TIMINGS
389  std::string prefix_mmm = std::string("TpetraExt ") + label + std::string(": ");
390  using Teuchos::TimeMonitor;
391  RCP<TimeMonitor> MM = rcp(new TimeMonitor(*(TimeMonitor::getNewTimer(prefix_mmm + std::string("RAP M5 Cmap")))));
392 #endif
393  LO LO_INVALID = Teuchos::OrdinalTraits<LO>::invalid();
394 
395  // Build the final importer / column map, hash table lookups for Ac
396  RCP<const import_type> Cimport;
397  RCP<const map_type> Ccolmap;
398  RCP<const import_type> Pimport = Pview.origMatrix->getGraph()->getImporter();
399  RCP<const import_type> Iimport = Pview.importMatrix.is_null() ? Teuchos::null : Pview.importMatrix->getGraph()->getImporter();
400  local_map_type Acolmap_local = Aview.colMap->getLocalMap();
401  local_map_type Prowmap_local = Pview.origMatrix->getRowMap()->getLocalMap();
402  local_map_type Irowmap_local; if(!Pview.importMatrix.is_null()) Irowmap_local = Pview.importMatrix->getRowMap()->getLocalMap();
403  local_map_type Pcolmap_local = Pview.origMatrix->getColMap()->getLocalMap();
404  local_map_type Icolmap_local; if(!Pview.importMatrix.is_null()) Icolmap_local = Pview.importMatrix->getColMap()->getLocalMap();
405 
406 
407  // mfh 27 Sep 2016: Pcol2Ccol is a table that maps from local column
408  // indices of B, to local column indices of Ac. (B and Ac have the
409  // same number of columns.) The kernel uses this, instead of
410  // copying the entire input matrix B and converting its column
411  // indices to those of C.
412  lo_view_t Pcol2Ccol(Kokkos::ViewAllocateWithoutInitializing("Pcol2Ccol"),Pview.colMap->getNodeNumElements()), Icol2Ccol;
413 
414  if (Pview.importMatrix.is_null()) {
415  // mfh 27 Sep 2016: B has no "remotes," so P and C have the same column Map.
416  Cimport = Pimport;
417  Ccolmap = Pview.colMap;
418  const LO colMapSize = static_cast<LO>(Pview.colMap->getNodeNumElements());
419  // Pcol2Ccol is trivial
420  Kokkos::parallel_for("Tpetra::mult_R_A_P_newmatrix::Pcol2Ccol_fill",
421  Kokkos::RangePolicy<execution_space, LO>(0, colMapSize),
422  KOKKOS_LAMBDA(const LO i) {
423  Pcol2Ccol(i) = i;
424  });
425  }
426  else {
427  // mfh 27 Sep 2016: P has "remotes," so we need to build the
428  // column Map of C, as well as C's Import object (from its domain
429  // Map to its column Map). C's column Map is the union of the
430  // column Maps of (the local part of) P, and the "remote" part of
431  // P. Ditto for the Import. We have optimized this "setUnion"
432  // operation on Import objects and Maps.
433 
434  // Choose the right variant of setUnion
435  if (!Pimport.is_null() && !Iimport.is_null()) {
436  Cimport = Pimport->setUnion(*Iimport);
437  }
438  else if (!Pimport.is_null() && Iimport.is_null()) {
439  Cimport = Pimport->setUnion();
440  }
441  else if (Pimport.is_null() && !Iimport.is_null()) {
442  Cimport = Iimport->setUnion();
443  }
444  else {
445  throw std::runtime_error("TpetraExt::RAP status of matrix importers is nonsensical");
446  }
447  Ccolmap = Cimport->getTargetMap();
448 
449  // FIXME (mfh 27 Sep 2016) This error check requires an all-reduce
450  // in general. We should get rid of it in order to reduce
451  // communication costs of sparse matrix-matrix multiply.
452  TEUCHOS_TEST_FOR_EXCEPTION(!Cimport->getSourceMap()->isSameAs(*Pview.origMatrix->getDomainMap()),
453  std::runtime_error, "Tpetra::RAP: Import setUnion messed with the DomainMap in an unfortunate way");
454 
455  // NOTE: This is not efficient and should be folded into setUnion
456  //
457  // mfh 27 Sep 2016: What the above comment means, is that the
458  // setUnion operation on Import objects could also compute these
459  // local index - to - local index look-up tables.
460  Kokkos::resize(Icol2Ccol,Pview.importMatrix->getColMap()->getNodeNumElements());
461  local_map_type Ccolmap_local = Ccolmap->getLocalMap();
462  Kokkos::parallel_for("Tpetra::mult_R_A_P_newmatrix::Pcol2Ccol_getGlobalElement",range_type(0,Pview.origMatrix->getColMap()->getNodeNumElements()),KOKKOS_LAMBDA(const LO i) {
463  Pcol2Ccol(i) = Ccolmap_local.getLocalElement(Pcolmap_local.getGlobalElement(i));
464  });
465  Kokkos::parallel_for("Tpetra::mult_R_A_P_newmatrix::Icol2Ccol_getGlobalElement",range_type(0,Pview.importMatrix->getColMap()->getNodeNumElements()),KOKKOS_LAMBDA(const LO i) {
466  Icol2Ccol(i) = Ccolmap_local.getLocalElement(Icolmap_local.getGlobalElement(i));
467  });
468 
469  }
470 
471  // Replace the column map
472  //
473  // mfh 27 Sep 2016: We do this because C was originally created
474  // without a column Map. Now we have its column Map.
475  Ac.replaceColMap(Ccolmap);
476 
477  // mfh 27 Sep 2016: Construct tables that map from local column
478  // indices of A, to local row indices of either B_local (the locally
479  // owned part of B), or B_remote (the "imported" remote part of B).
480  //
481  // For column index Aik in row i of A, if the corresponding row of B
482  // exists in the local part of B ("orig") (which I'll call B_local),
483  // then targetMapToOrigRow[Aik] is the local index of that row of B.
484  // Otherwise, targetMapToOrigRow[Aik] is "invalid" (a flag value).
485  //
486  // For column index Aik in row i of A, if the corresponding row of B
487  // exists in the remote part of B ("Import") (which I'll call
488  // B_remote), then targetMapToImportRow[Aik] is the local index of
489  // that row of B. Otherwise, targetMapToOrigRow[Aik] is "invalid"
490  // (a flag value).
491 
492  // Run through all the hash table lookups once and for all
493  lo_view_t targetMapToOrigRow(Kokkos::ViewAllocateWithoutInitializing("targetMapToOrigRow"),Aview.colMap->getNodeNumElements());
494  lo_view_t targetMapToImportRow(Kokkos::ViewAllocateWithoutInitializing("targetMapToImportRow"),Aview.colMap->getNodeNumElements());
495  Kokkos::fence();
496  Kokkos::parallel_for("Tpetra::mult_R_A_P_newmatrix::construct_tables",range_type(Aview.colMap->getMinLocalIndex(), Aview.colMap->getMaxLocalIndex()+1),KOKKOS_LAMBDA(const LO i) {
497  GO aidx = Acolmap_local.getGlobalElement(i);
498  LO P_LID = Prowmap_local.getLocalElement(aidx);
499  if (P_LID != LO_INVALID) {
500  targetMapToOrigRow(i) = P_LID;
501  targetMapToImportRow(i) = LO_INVALID;
502  } else {
503  LO I_LID = Irowmap_local.getLocalElement(aidx);
504  targetMapToOrigRow(i) = LO_INVALID;
505  targetMapToImportRow(i) = I_LID;
506  }
507  });
508 
509  // Call the actual kernel. We'll rely on partial template specialization to call the correct one ---
510  // Either the straight-up Tpetra code (SerialNode) or the KokkosKernels one (other NGP node types)
511  KernelWrappers3<Scalar,LocalOrdinal,GlobalOrdinal,Node,lo_view_t>::
512  mult_R_A_P_newmatrix_kernel_wrapper(Rview, Aview, Pview,
513  targetMapToOrigRow,targetMapToImportRow, Pcol2Ccol, Icol2Ccol,
514  Ac, Cimport, label, params);
515  }
516 
517 
518  // Kernel method for computing the local portion of Ac = R*A*P (reuse mode)
519  template<class Scalar,
520  class LocalOrdinal,
521  class GlobalOrdinal,
522  class Node>
523  void mult_R_A_P_reuse(
524  CrsMatrixStruct<Scalar, LocalOrdinal, GlobalOrdinal, Node>& Rview,
525  CrsMatrixStruct<Scalar, LocalOrdinal, GlobalOrdinal, Node>& Aview,
526  CrsMatrixStruct<Scalar, LocalOrdinal, GlobalOrdinal, Node>& Pview,
527  CrsMatrix<Scalar, LocalOrdinal, GlobalOrdinal, Node>& Ac,
528  const std::string& label,
529  const Teuchos::RCP<Teuchos::ParameterList>& params)
530  {
531  using Teuchos::Array;
532  using Teuchos::ArrayRCP;
533  using Teuchos::ArrayView;
534  using Teuchos::RCP;
535  using Teuchos::rcp;
536 
537  //typedef Scalar SC; // unused
538  typedef LocalOrdinal LO;
539  typedef GlobalOrdinal GO;
540  typedef Node NO;
541 
542  typedef Import<LO,GO,NO> import_type;
543  typedef Map<LO,GO,NO> map_type;
544 
545  // Kokkos typedefs
546  typedef typename map_type::local_map_type local_map_type;
548  typedef typename KCRS::StaticCrsGraphType graph_t;
549  typedef typename graph_t::row_map_type::non_const_type lno_view_t;
550  typedef typename NO::execution_space execution_space;
551  typedef Kokkos::RangePolicy<execution_space, size_t> range_type;
552  typedef Kokkos::View<LO*, typename lno_view_t::array_layout, typename lno_view_t::device_type> lo_view_t;
553 
554 #ifdef HAVE_TPETRA_MMM_TIMINGS
555  std::string prefix_mmm = std::string("TpetraExt ") + label + std::string(": ");
556  using Teuchos::TimeMonitor;
557  RCP<TimeMonitor> MM = rcp(new TimeMonitor(*(TimeMonitor::getNewTimer(prefix_mmm + std::string("RAP M5 Cmap")))));
558 #endif
559  LO LO_INVALID = Teuchos::OrdinalTraits<LO>::invalid();
560 
561  // Build the final importer / column map, hash table lookups for Ac
562  RCP<const import_type> Cimport = Ac.getGraph()->getImporter();
563  RCP<const map_type> Ccolmap = Ac.getColMap();
564  RCP<const import_type> Pimport = Pview.origMatrix->getGraph()->getImporter();
565  RCP<const import_type> Iimport = Pview.importMatrix.is_null() ? Teuchos::null : Pview.importMatrix->getGraph()->getImporter();
566  local_map_type Acolmap_local = Aview.colMap->getLocalMap();
567  local_map_type Prowmap_local = Pview.origMatrix->getRowMap()->getLocalMap();
568  local_map_type Irowmap_local; if(!Pview.importMatrix.is_null()) Irowmap_local = Pview.importMatrix->getRowMap()->getLocalMap();
569  local_map_type Pcolmap_local = Pview.origMatrix->getColMap()->getLocalMap();
570  local_map_type Icolmap_local; if(!Pview.importMatrix.is_null()) Icolmap_local = Pview.importMatrix->getColMap()->getLocalMap();
571  local_map_type Ccolmap_local = Ccolmap->getLocalMap();
572 
573  // Build the final importer / column map, hash table lookups for C
574  lo_view_t Bcol2Ccol(Kokkos::ViewAllocateWithoutInitializing("Bcol2Ccol"),Pview.colMap->getNodeNumElements()), Icol2Ccol;
575  {
576  // Bcol2Col may not be trivial, as Ccolmap is compressed during fillComplete in newmatrix
577  // So, column map of C may be a strict subset of the column map of B
578  Kokkos::parallel_for(range_type(0,Pview.origMatrix->getColMap()->getNodeNumElements()),KOKKOS_LAMBDA(const LO i) {
579  Bcol2Ccol(i) = Ccolmap_local.getLocalElement(Pcolmap_local.getGlobalElement(i));
580  });
581 
582  if (!Pview.importMatrix.is_null()) {
583  TEUCHOS_TEST_FOR_EXCEPTION(!Cimport->getSourceMap()->isSameAs(*Pview.origMatrix->getDomainMap()),
584  std::runtime_error, "Tpetra::MMM: Import setUnion messed with the DomainMap in an unfortunate way");
585 
586  Kokkos::resize(Icol2Ccol,Pview.importMatrix->getColMap()->getNodeNumElements());
587  Kokkos::parallel_for(range_type(0,Pview.importMatrix->getColMap()->getNodeNumElements()),KOKKOS_LAMBDA(const LO i) {
588  Icol2Ccol(i) = Ccolmap_local.getLocalElement(Icolmap_local.getGlobalElement(i));
589  });
590  }
591  }
592 
593  // Run through all the hash table lookups once and for all
594  lo_view_t targetMapToOrigRow(Kokkos::ViewAllocateWithoutInitializing("targetMapToOrigRow"),Aview.colMap->getNodeNumElements());
595  lo_view_t targetMapToImportRow(Kokkos::ViewAllocateWithoutInitializing("targetMapToImportRow"),Aview.colMap->getNodeNumElements());
596  Kokkos::parallel_for(range_type(Aview.colMap->getMinLocalIndex(), Aview.colMap->getMaxLocalIndex()+1),KOKKOS_LAMBDA(const LO i) {
597  GO aidx = Acolmap_local.getGlobalElement(i);
598  LO B_LID = Prowmap_local.getLocalElement(aidx);
599  if (B_LID != LO_INVALID) {
600  targetMapToOrigRow(i) = B_LID;
601  targetMapToImportRow(i) = LO_INVALID;
602  } else {
603  LO I_LID = Irowmap_local.getLocalElement(aidx);
604  targetMapToOrigRow(i) = LO_INVALID;
605  targetMapToImportRow(i) = I_LID;
606 
607  }
608  });
609 
610  // Call the actual kernel. We'll rely on partial template specialization to call the correct one ---
611  // Either the straight-up Tpetra code (SerialNode) or the KokkosKernels one (other NGP node types)
612  KernelWrappers3<Scalar,LocalOrdinal,GlobalOrdinal,Node,lo_view_t>::
613  mult_R_A_P_reuse_kernel_wrapper(Rview, Aview, Pview,
614  targetMapToOrigRow,targetMapToImportRow, Bcol2Ccol, Icol2Ccol,
615  Ac, Cimport, label, params);
616  }
617 
618 
619  // Kernel method for computing the local portion of Ac = R*A*P
620  template<class Scalar,
621  class LocalOrdinal,
622  class GlobalOrdinal,
623  class Node>
624  void mult_PT_A_P_newmatrix(
625  CrsMatrixStruct<Scalar, LocalOrdinal, GlobalOrdinal, Node>& Aview,
626  CrsMatrixStruct<Scalar, LocalOrdinal, GlobalOrdinal, Node>& Pview,
627  CrsMatrix<Scalar, LocalOrdinal, GlobalOrdinal, Node>& Ac,
628  const std::string& label,
629  const Teuchos::RCP<Teuchos::ParameterList>& params)
630  {
631  using Teuchos::Array;
632  using Teuchos::ArrayRCP;
633  using Teuchos::ArrayView;
634  using Teuchos::RCP;
635  using Teuchos::rcp;
636 
637  //typedef Scalar SC; // unused
638  typedef LocalOrdinal LO;
639  typedef GlobalOrdinal GO;
640  typedef Node NO;
641 
642  typedef Import<LO,GO,NO> import_type;
643  typedef Map<LO,GO,NO> map_type;
644 
645  // Kokkos typedefs
646  typedef typename map_type::local_map_type local_map_type;
648  typedef typename KCRS::StaticCrsGraphType graph_t;
649  typedef typename graph_t::row_map_type::non_const_type lno_view_t;
650  typedef typename NO::execution_space execution_space;
651  typedef Kokkos::RangePolicy<execution_space, size_t> range_type;
652  typedef Kokkos::View<LO*, typename lno_view_t::array_layout, typename lno_view_t::device_type> lo_view_t;
653 
654 #ifdef HAVE_TPETRA_MMM_TIMINGS
655  std::string prefix_mmm = std::string("TpetraExt ") + label + std::string(": ");
656  using Teuchos::TimeMonitor;
657  RCP<TimeMonitor> MM = rcp(new TimeMonitor(*(TimeMonitor::getNewTimer(prefix_mmm + std::string("PTAP M5 Cmap")))));
658 #endif
659  LO LO_INVALID = Teuchos::OrdinalTraits<LO>::invalid();
660 
661  // Build the final importer / column map, hash table lookups for Ac
662  RCP<const import_type> Cimport;
663  RCP<const map_type> Ccolmap;
664  RCP<const import_type> Pimport = Pview.origMatrix->getGraph()->getImporter();
665  RCP<const import_type> Iimport = Pview.importMatrix.is_null() ? Teuchos::null : Pview.importMatrix->getGraph()->getImporter();
666  local_map_type Acolmap_local = Aview.colMap->getLocalMap();
667  local_map_type Prowmap_local = Pview.origMatrix->getRowMap()->getLocalMap();
668  local_map_type Irowmap_local; if(!Pview.importMatrix.is_null()) Irowmap_local = Pview.importMatrix->getRowMap()->getLocalMap();
669  local_map_type Pcolmap_local = Pview.origMatrix->getColMap()->getLocalMap();
670  local_map_type Icolmap_local; if(!Pview.importMatrix.is_null()) Icolmap_local = Pview.importMatrix->getColMap()->getLocalMap();
671 
672 
673  // mfh 27 Sep 2016: Pcol2Ccol is a table that maps from local column
674  // indices of B, to local column indices of Ac. (B and Ac have the
675  // same number of columns.) The kernel uses this, instead of
676  // copying the entire input matrix B and converting its column
677  // indices to those of C.
678  lo_view_t Pcol2Ccol(Kokkos::ViewAllocateWithoutInitializing("Pcol2Ccol"),Pview.colMap->getNodeNumElements()), Icol2Ccol;
679 
680  if (Pview.importMatrix.is_null()) {
681  // mfh 27 Sep 2016: B has no "remotes," so P and C have the same column Map.
682  Cimport = Pimport;
683  Ccolmap = Pview.colMap;
684  const LO colMapSize = static_cast<LO>(Pview.colMap->getNodeNumElements());
685  // Pcol2Ccol is trivial
686  Kokkos::parallel_for("Tpetra::mult_R_A_P_newmatrix::Pcol2Ccol_fill",
687  Kokkos::RangePolicy<execution_space, LO>(0, colMapSize),
688  KOKKOS_LAMBDA(const LO i) {
689  Pcol2Ccol(i) = i;
690  });
691  }
692  else {
693  // mfh 27 Sep 2016: P has "remotes," so we need to build the
694  // column Map of C, as well as C's Import object (from its domain
695  // Map to its column Map). C's column Map is the union of the
696  // column Maps of (the local part of) P, and the "remote" part of
697  // P. Ditto for the Import. We have optimized this "setUnion"
698  // operation on Import objects and Maps.
699 
700  // Choose the right variant of setUnion
701  if (!Pimport.is_null() && !Iimport.is_null()) {
702  Cimport = Pimport->setUnion(*Iimport);
703  }
704  else if (!Pimport.is_null() && Iimport.is_null()) {
705  Cimport = Pimport->setUnion();
706  }
707  else if (Pimport.is_null() && !Iimport.is_null()) {
708  Cimport = Iimport->setUnion();
709  }
710  else {
711  throw std::runtime_error("TpetraExt::RAP status of matrix importers is nonsensical");
712  }
713  Ccolmap = Cimport->getTargetMap();
714 
715  // FIXME (mfh 27 Sep 2016) This error check requires an all-reduce
716  // in general. We should get rid of it in order to reduce
717  // communication costs of sparse matrix-matrix multiply.
718  TEUCHOS_TEST_FOR_EXCEPTION(!Cimport->getSourceMap()->isSameAs(*Pview.origMatrix->getDomainMap()),
719  std::runtime_error, "Tpetra::RAP: Import setUnion messed with the DomainMap in an unfortunate way");
720 
721  // NOTE: This is not efficient and should be folded into setUnion
722  //
723  // mfh 27 Sep 2016: What the above comment means, is that the
724  // setUnion operation on Import objects could also compute these
725  // local index - to - local index look-up tables.
726  Kokkos::resize(Icol2Ccol,Pview.importMatrix->getColMap()->getNodeNumElements());
727  local_map_type Ccolmap_local = Ccolmap->getLocalMap();
728  Kokkos::parallel_for("Tpetra::mult_R_A_P_newmatrix::Pcol2Ccol_getGlobalElement",range_type(0,Pview.origMatrix->getColMap()->getNodeNumElements()),KOKKOS_LAMBDA(const LO i) {
729  Pcol2Ccol(i) = Ccolmap_local.getLocalElement(Pcolmap_local.getGlobalElement(i));
730  });
731  Kokkos::parallel_for("Tpetra::mult_R_A_P_newmatrix::Icol2Ccol_getGlobalElement",range_type(0,Pview.importMatrix->getColMap()->getNodeNumElements()),KOKKOS_LAMBDA(const LO i) {
732  Icol2Ccol(i) = Ccolmap_local.getLocalElement(Icolmap_local.getGlobalElement(i));
733  });
734 
735  }
736 
737  // Replace the column map
738  //
739  // mfh 27 Sep 2016: We do this because C was originally created
740  // without a column Map. Now we have its column Map.
741  Ac.replaceColMap(Ccolmap);
742 
743  // mfh 27 Sep 2016: Construct tables that map from local column
744  // indices of A, to local row indices of either B_local (the locally
745  // owned part of B), or B_remote (the "imported" remote part of B).
746  //
747  // For column index Aik in row i of A, if the corresponding row of B
748  // exists in the local part of B ("orig") (which I'll call B_local),
749  // then targetMapToOrigRow[Aik] is the local index of that row of B.
750  // Otherwise, targetMapToOrigRow[Aik] is "invalid" (a flag value).
751  //
752  // For column index Aik in row i of A, if the corresponding row of B
753  // exists in the remote part of B ("Import") (which I'll call
754  // B_remote), then targetMapToImportRow[Aik] is the local index of
755  // that row of B. Otherwise, targetMapToOrigRow[Aik] is "invalid"
756  // (a flag value).
757 
758  // Run through all the hash table lookups once and for all
759  lo_view_t targetMapToOrigRow(Kokkos::ViewAllocateWithoutInitializing("targetMapToOrigRow"),Aview.colMap->getNodeNumElements());
760  lo_view_t targetMapToImportRow(Kokkos::ViewAllocateWithoutInitializing("targetMapToImportRow"),Aview.colMap->getNodeNumElements());
761  Kokkos::fence();
762  Kokkos::parallel_for("Tpetra::mult_R_A_P_newmatrix::construct_tables",range_type(Aview.colMap->getMinLocalIndex(), Aview.colMap->getMaxLocalIndex()+1),KOKKOS_LAMBDA(const LO i) {
763  GO aidx = Acolmap_local.getGlobalElement(i);
764  LO P_LID = Prowmap_local.getLocalElement(aidx);
765  if (P_LID != LO_INVALID) {
766  targetMapToOrigRow(i) = P_LID;
767  targetMapToImportRow(i) = LO_INVALID;
768  } else {
769  LO I_LID = Irowmap_local.getLocalElement(aidx);
770  targetMapToOrigRow(i) = LO_INVALID;
771  targetMapToImportRow(i) = I_LID;
772  }
773  });
774 
775  // Call the actual kernel. We'll rely on partial template specialization to call the correct one ---
776  // Either the straight-up Tpetra code (SerialNode) or the KokkosKernels one (other NGP node types)
777  KernelWrappers3<Scalar,LocalOrdinal,GlobalOrdinal,Node,lo_view_t>::
778  mult_PT_A_P_newmatrix_kernel_wrapper(Aview, Pview,
779  targetMapToOrigRow,targetMapToImportRow, Pcol2Ccol, Icol2Ccol,
780  Ac, Cimport, label, params);
781  }
782 
783  // Kernel method for computing the local portion of Ac = R*A*P (reuse mode)
784  template<class Scalar,
785  class LocalOrdinal,
786  class GlobalOrdinal,
787  class Node>
788  void mult_PT_A_P_reuse(
789  CrsMatrixStruct<Scalar, LocalOrdinal, GlobalOrdinal, Node>& Aview,
790  CrsMatrixStruct<Scalar, LocalOrdinal, GlobalOrdinal, Node>& Pview,
791  CrsMatrix<Scalar, LocalOrdinal, GlobalOrdinal, Node>& Ac,
792  const std::string& label,
793  const Teuchos::RCP<Teuchos::ParameterList>& params)
794  {
795  using Teuchos::Array;
796  using Teuchos::ArrayRCP;
797  using Teuchos::ArrayView;
798  using Teuchos::RCP;
799  using Teuchos::rcp;
800 
801  //typedef Scalar SC; // unused
802  typedef LocalOrdinal LO;
803  typedef GlobalOrdinal GO;
804  typedef Node NO;
805 
806  typedef Import<LO,GO,NO> import_type;
807  typedef Map<LO,GO,NO> map_type;
808 
809  // Kokkos typedefs
810  typedef typename map_type::local_map_type local_map_type;
812  typedef typename KCRS::StaticCrsGraphType graph_t;
813  typedef typename graph_t::row_map_type::non_const_type lno_view_t;
814  typedef typename NO::execution_space execution_space;
815  typedef Kokkos::RangePolicy<execution_space, size_t> range_type;
816  typedef Kokkos::View<LO*, typename lno_view_t::array_layout, typename lno_view_t::device_type> lo_view_t;
817 
818 #ifdef HAVE_TPETRA_MMM_TIMINGS
819  std::string prefix_mmm = std::string("TpetraExt ") + label + std::string(": ");
820  using Teuchos::TimeMonitor;
821  RCP<TimeMonitor> MM = rcp(new TimeMonitor(*(TimeMonitor::getNewTimer(prefix_mmm + std::string("RAP M5 Cmap")))));
822 #endif
823  LO LO_INVALID = Teuchos::OrdinalTraits<LO>::invalid();
824 
825  // Build the final importer / column map, hash table lookups for Ac
826  RCP<const import_type> Cimport = Ac.getGraph()->getImporter();
827  RCP<const map_type> Ccolmap = Ac.getColMap();
828  RCP<const import_type> Pimport = Pview.origMatrix->getGraph()->getImporter();
829  RCP<const import_type> Iimport = Pview.importMatrix.is_null() ? Teuchos::null : Pview.importMatrix->getGraph()->getImporter();
830  local_map_type Acolmap_local = Aview.colMap->getLocalMap();
831  local_map_type Prowmap_local = Pview.origMatrix->getRowMap()->getLocalMap();
832  local_map_type Irowmap_local; if(!Pview.importMatrix.is_null()) Irowmap_local = Pview.importMatrix->getRowMap()->getLocalMap();
833  local_map_type Pcolmap_local = Pview.origMatrix->getColMap()->getLocalMap();
834  local_map_type Icolmap_local; if(!Pview.importMatrix.is_null()) Icolmap_local = Pview.importMatrix->getColMap()->getLocalMap();
835  local_map_type Ccolmap_local = Ccolmap->getLocalMap();
836 
837  // Build the final importer / column map, hash table lookups for C
838  lo_view_t Bcol2Ccol(Kokkos::ViewAllocateWithoutInitializing("Bcol2Ccol"),Pview.colMap->getNodeNumElements()), Icol2Ccol;
839  {
840  // Bcol2Col may not be trivial, as Ccolmap is compressed during fillComplete in newmatrix
841  // So, column map of C may be a strict subset of the column map of B
842  Kokkos::parallel_for(range_type(0,Pview.origMatrix->getColMap()->getNodeNumElements()),KOKKOS_LAMBDA(const LO i) {
843  Bcol2Ccol(i) = Ccolmap_local.getLocalElement(Pcolmap_local.getGlobalElement(i));
844  });
845 
846  if (!Pview.importMatrix.is_null()) {
847  TEUCHOS_TEST_FOR_EXCEPTION(!Cimport->getSourceMap()->isSameAs(*Pview.origMatrix->getDomainMap()),
848  std::runtime_error, "Tpetra::MMM: Import setUnion messed with the DomainMap in an unfortunate way");
849 
850  Kokkos::resize(Icol2Ccol,Pview.importMatrix->getColMap()->getNodeNumElements());
851  Kokkos::parallel_for(range_type(0,Pview.importMatrix->getColMap()->getNodeNumElements()),KOKKOS_LAMBDA(const LO i) {
852  Icol2Ccol(i) = Ccolmap_local.getLocalElement(Icolmap_local.getGlobalElement(i));
853  });
854  }
855  }
856 
857  // Run through all the hash table lookups once and for all
858  lo_view_t targetMapToOrigRow(Kokkos::ViewAllocateWithoutInitializing("targetMapToOrigRow"),Aview.colMap->getNodeNumElements());
859  lo_view_t targetMapToImportRow(Kokkos::ViewAllocateWithoutInitializing("targetMapToImportRow"),Aview.colMap->getNodeNumElements());
860  Kokkos::parallel_for(range_type(Aview.colMap->getMinLocalIndex(), Aview.colMap->getMaxLocalIndex()+1),KOKKOS_LAMBDA(const LO i) {
861  GO aidx = Acolmap_local.getGlobalElement(i);
862  LO B_LID = Prowmap_local.getLocalElement(aidx);
863  if (B_LID != LO_INVALID) {
864  targetMapToOrigRow(i) = B_LID;
865  targetMapToImportRow(i) = LO_INVALID;
866  } else {
867  LO I_LID = Irowmap_local.getLocalElement(aidx);
868  targetMapToOrigRow(i) = LO_INVALID;
869  targetMapToImportRow(i) = I_LID;
870 
871  }
872  });
873 
874  // Call the actual kernel. We'll rely on partial template specialization to call the correct one ---
875  // Either the straight-up Tpetra code (SerialNode) or the KokkosKernels one (other NGP node types)
876  KernelWrappers3<Scalar,LocalOrdinal,GlobalOrdinal,Node,lo_view_t>::
877  mult_PT_A_P_reuse_kernel_wrapper(Aview, Pview,
878  targetMapToOrigRow,targetMapToImportRow, Bcol2Ccol, Icol2Ccol,
879  Ac, Cimport, label, params);
880  }
881 
882 
883  /*********************************************************************************************************/
884  // RAP NewMatrix Kernel wrappers (Default non-threaded version)
885  // Computes R * A * P -> Ac using classic Gustavson approach
886  template<class Scalar,
887  class LocalOrdinal,
888  class GlobalOrdinal,
889  class Node,
890  class LocalOrdinalViewType>
891  void KernelWrappers3<Scalar,LocalOrdinal,GlobalOrdinal,Node,LocalOrdinalViewType>::mult_R_A_P_newmatrix_kernel_wrapper(CrsMatrixStruct<Scalar, LocalOrdinal, GlobalOrdinal, Node>& Rview,
892  CrsMatrixStruct<Scalar, LocalOrdinal, GlobalOrdinal, Node>& Aview,
893  CrsMatrixStruct<Scalar, LocalOrdinal, GlobalOrdinal, Node>& Pview,
894  const LocalOrdinalViewType & Acol2Prow,
895  const LocalOrdinalViewType & Acol2PIrow,
896  const LocalOrdinalViewType & Pcol2Accol,
897  const LocalOrdinalViewType & PIcol2Accol,
898  CrsMatrix<Scalar, LocalOrdinal, GlobalOrdinal, Node>& Ac,
899  Teuchos::RCP<const Import<LocalOrdinal,GlobalOrdinal,Node> > Acimport,
900  const std::string& label,
901  const Teuchos::RCP<Teuchos::ParameterList>& params) {
902 #ifdef HAVE_TPETRA_MMM_TIMINGS
903  std::string prefix_mmm = std::string("TpetraExt ") + label + std::string(": ");
904  using Teuchos::TimeMonitor;
905  Teuchos::RCP<Teuchos::TimeMonitor> MM = rcp(new TimeMonitor(*TimeMonitor::getNewTimer(prefix_mmm + std::string("RAP Newmatrix SerialCore"))));
906 #endif
907 
908  using Teuchos::Array;
909  using Teuchos::ArrayRCP;
910  using Teuchos::ArrayView;
911  using Teuchos::RCP;
912  using Teuchos::rcp;
913 
914  // Lots and lots of typedefs
916  typedef typename KCRS::StaticCrsGraphType graph_t;
917  typedef typename graph_t::row_map_type::const_type c_lno_view_t;
918  typedef typename graph_t::row_map_type::non_const_type lno_view_t;
919  typedef typename graph_t::entries_type::non_const_type lno_nnz_view_t;
920  typedef typename KCRS::values_type::non_const_type scalar_view_t;
921 
922  typedef Scalar SC;
923  typedef LocalOrdinal LO;
924  typedef GlobalOrdinal GO;
925  typedef Node NO;
926  typedef Map<LO,GO,NO> map_type;
927  const size_t ST_INVALID = Teuchos::OrdinalTraits<LO>::invalid();
928  const LO LO_INVALID = Teuchos::OrdinalTraits<LO>::invalid();
929  const SC SC_ZERO = Teuchos::ScalarTraits<Scalar>::zero();
930 
931  // Sizes
932  RCP<const map_type> Accolmap = Ac.getColMap();
933  size_t m = Rview.origMatrix->getNodeNumRows();
934  size_t n = Accolmap->getNodeNumElements();
935  size_t p_max_nnz_per_row = Pview.origMatrix->getNodeMaxNumRowEntries();
936 
937  // Grab the Kokkos::SparseCrsMatrices & inner stuff
938  const KCRS & Amat = Aview.origMatrix->getLocalMatrix();
939  const KCRS & Pmat = Pview.origMatrix->getLocalMatrix();
940  const KCRS & Rmat = Rview.origMatrix->getLocalMatrix();
941 
942  c_lno_view_t Arowptr = Amat.graph.row_map, Prowptr = Pmat.graph.row_map, Rrowptr = Rmat.graph.row_map;
943  const lno_nnz_view_t Acolind = Amat.graph.entries, Pcolind = Pmat.graph.entries , Rcolind = Rmat.graph.entries;
944  const scalar_view_t Avals = Amat.values, Pvals = Pmat.values, Rvals = Rmat.values;
945 
946  c_lno_view_t Irowptr;
947  lno_nnz_view_t Icolind;
948  scalar_view_t Ivals;
949  if(!Pview.importMatrix.is_null()) {
950  Irowptr = Pview.importMatrix->getLocalMatrix().graph.row_map;
951  Icolind = Pview.importMatrix->getLocalMatrix().graph.entries;
952  Ivals = Pview.importMatrix->getLocalMatrix().values;
953  p_max_nnz_per_row = std::max(p_max_nnz_per_row,Pview.importMatrix->getNodeMaxNumRowEntries());
954  }
955 
956 #ifdef HAVE_TPETRA_MMM_TIMINGS
957  RCP<TimeMonitor> MM2 = rcp(new TimeMonitor(*TimeMonitor::getNewTimer(prefix_mmm + std::string("RAP Newmatrix SerialCore - Compare"))));
958 #endif
959 
960  // Classic csr assembly (low memory edition)
961  //
962  // mfh 27 Sep 2016: Ac_estimate_nnz does not promise an upper bound.
963  // The method loops over rows of R, and may resize after processing
964  // each row. Chris Siefert says that this reflects experience in
965  // ML; for the non-threaded case, ML found it faster to spend less
966  // effort on estimation and risk an occasional reallocation.
967  size_t CSR_alloc = std::max(C_estimate_nnz(*Aview.origMatrix, *Pview.origMatrix), n);
968  lno_view_t Crowptr(Kokkos::ViewAllocateWithoutInitializing("Crowptr"),m+1);
969  lno_nnz_view_t Ccolind(Kokkos::ViewAllocateWithoutInitializing("Ccolind"),CSR_alloc);
970  scalar_view_t Cvals(Kokkos::ViewAllocateWithoutInitializing("Cvals"),CSR_alloc);
971 
972  // mfh 27 Sep 2016: The ac_status array is an implementation detail
973  // of the local sparse matrix-matrix multiply routine.
974 
975  // The status array will contain the index into colind where this entry was last deposited.
976  // ac_status[i] < nnz - not in the row yet
977  // ac_status[i] >= nnz - this is the entry where you can find the data
978  // We start with this filled with INVALID's indicating that there are no entries yet.
979  // Sadly, this complicates the code due to the fact that size_t's are unsigned.
980  const size_t INVALID = Teuchos::OrdinalTraits<size_t>::invalid();
981  Array<size_t> ac_status(n, ST_INVALID);
982 
983  // mfh 27 Sep 2016: Here is the local sparse matrix-matrix multiply
984  // routine. The routine computes Ac := R * A * (P_local + P_remote).
985  //
986  // For column index Aik in row i of A, Acol2Prow[Aik] tells
987  // you whether the corresponding row of P belongs to P_local
988  // ("orig") or P_remote ("Import").
989 
990  // For each row of R
991  size_t nnz = 0, nnz_old = 0;
992  for (size_t i = 0; i < m; i++) {
993  // mfh 27 Sep 2016: m is the number of rows in the input matrix R
994  // on the calling process.
995  Crowptr[i] = nnz;
996 
997  // mfh 27 Sep 2016: For each entry of R in the current row of R
998  for (size_t kk = Rrowptr[i]; kk < Rrowptr[i+1]; kk++) {
999  LO k = Rcolind[kk]; // local column index of current entry of R
1000  const SC Rik = Rvals[kk]; // value of current entry of R
1001  if (Rik == SC_ZERO)
1002  continue; // skip explicitly stored zero values in R
1003  // For each entry of A in the current row of A
1004  for (size_t ll = Arowptr[k]; ll < Arowptr[k+1]; ll++) {
1005  LO l = Acolind[ll]; // local column index of current entry of A
1006  const SC Akl = Avals[ll]; // value of current entry of A
1007  if (Akl == SC_ZERO)
1008  continue; // skip explicitly stored zero values in A
1009 
1010 
1011  if (Acol2Prow[l] != LO_INVALID) {
1012  // mfh 27 Sep 2016: If the entry of Acol2Prow
1013  // corresponding to the current entry of A is populated, then
1014  // the corresponding row of P is in P_local (i.e., it lives on
1015  // the calling process).
1016 
1017  // Local matrix
1018  size_t Pl = Teuchos::as<size_t>(Acol2Prow[l]);
1019 
1020  // mfh 27 Sep 2016: Go through all entries in that row of P_local.
1021  for (size_t jj = Prowptr[Pl]; jj < Prowptr[Pl+1]; jj++) {
1022  LO j = Pcolind[jj];
1023  LO Acj = Pcol2Accol[j];
1024  SC Plj = Pvals[jj];
1025 
1026  if (ac_status[Acj] == INVALID || ac_status[Acj] < nnz_old) {
1027 #ifdef HAVE_TPETRA_DEBUG
1028  // Ac_estimate_nnz() is probably not perfect yet. If this happens, we need to allocate more memory..
1029  TEUCHOS_TEST_FOR_EXCEPTION(nnz >= Teuchos::as<size_t>(Ccolind.size()),
1030  std::runtime_error,
1031  label << " ERROR, not enough memory allocated for matrix product. Allocated: " << Ccolind.extent(0) << std::endl);
1032 #endif
1033  // New entry
1034  ac_status[Acj] = nnz;
1035  Ccolind[nnz] = Acj;
1036  Cvals[nnz] = Rik*Akl*Plj;
1037  nnz++;
1038  } else {
1039  Cvals[ac_status[Acj]] += Rik*Akl*Plj;
1040  }
1041  }
1042  } else {
1043  // mfh 27 Sep 2016: If the entry of Acol2PRow
1044  // corresponding to the current entry of A is NOT populated (has
1045  // a flag "invalid" value), then the corresponding row of P is
1046  // in P_remote (i.e., it does not live on the calling process).
1047 
1048  // Remote matrix
1049  size_t Il = Teuchos::as<size_t>(Acol2PIrow[l]);
1050  for (size_t jj = Irowptr[Il]; jj < Irowptr[Il+1]; jj++) {
1051  LO j = Icolind[jj];
1052  LO Acj = PIcol2Accol[j];
1053  SC Plj = Ivals[jj];
1054 
1055  if (ac_status[Acj] == INVALID || ac_status[Acj] < nnz_old) {
1056 #ifdef HAVE_TPETRA_DEBUG
1057  // Ac_estimate_nnz() is probably not perfect yet. If this happens, we need to allocate more memory..
1058  TEUCHOS_TEST_FOR_EXCEPTION(nnz >= Teuchos::as<size_t>(Ccolind.size()),
1059  std::runtime_error,
1060  label << " ERROR, not enough memory allocated for matrix product. Allocated: " << Ccolind.extent(0) << std::endl);
1061 #endif
1062  // New entry
1063  ac_status[Acj] = nnz;
1064  Ccolind[nnz] = Acj;
1065  Cvals[nnz] = Rik*Akl*Plj;
1066  nnz++;
1067  } else {
1068  Cvals[ac_status[Acj]] += Rik*Akl*Plj;
1069  }
1070  }
1071  }
1072  }
1073  }
1074  // Resize for next pass if needed
1075  if (nnz + n > CSR_alloc) {
1076  CSR_alloc *= 2;
1077  Kokkos::resize(Ccolind,CSR_alloc);
1078  Kokkos::resize(Cvals,CSR_alloc);
1079  }
1080  nnz_old = nnz;
1081  }
1082 
1083  Crowptr[m] = nnz;
1084 
1085  // Downward resize
1086  Kokkos::resize(Ccolind,nnz);
1087  Kokkos::resize(Cvals,nnz);
1088 
1089 #ifdef HAVE_TPETRA_MMM_TIMINGS
1090  MM = rcp(new TimeMonitor (*TimeMonitor::getNewTimer(prefix_mmm + std::string("RAP Newmatrix Final Sort"))));
1091 #endif
1092 
1093  // Final sort & set of CRS arrays
1094  if (params.is_null() || params->get("sort entries",true))
1095  Import_Util::sortCrsEntries(Crowptr,Ccolind, Cvals);
1096  Ac.setAllValues(Crowptr, Ccolind, Cvals);
1097 
1098 #ifdef HAVE_TPETRA_MMM_TIMINGS
1099  MM = rcp(new TimeMonitor (*TimeMonitor::getNewTimer(prefix_mmm + std::string("RAP Newmatrix ESFC"))));
1100 #endif
1101 
1102  // Final FillComplete
1103  //
1104  // mfh 27 Sep 2016: So-called "expert static fill complete" bypasses
1105  // Import (from domain Map to column Map) construction (which costs
1106  // lots of communication) by taking the previously constructed
1107  // Import object. We should be able to do this without interfering
1108  // with the implementation of the local part of sparse matrix-matrix
1109  // multply above.
1110  RCP<Teuchos::ParameterList> labelList = rcp(new Teuchos::ParameterList);
1111  labelList->set("Timer Label",label);
1112  if(!params.is_null()) labelList->set("compute global constants",params->get("compute global constants",true));
1113  RCP<const Export<LO,GO,NO> > dummyExport;
1114  Ac.expertStaticFillComplete(Pview.origMatrix->getDomainMap(),
1115  Rview.origMatrix->getRangeMap(),
1116  Acimport,
1117  dummyExport,
1118  labelList);
1119 
1120  }
1121 
1122  /*********************************************************************************************************/
1123  // RAP Reuse Kernel wrappers (Default non-threaded version)
1124  // Computes R * A * P -> Ac using reuse Gustavson
1125  template<class Scalar,
1126  class LocalOrdinal,
1127  class GlobalOrdinal,
1128  class Node,
1129  class LocalOrdinalViewType>
1130  void KernelWrappers3<Scalar,LocalOrdinal,GlobalOrdinal,Node,LocalOrdinalViewType>::mult_R_A_P_reuse_kernel_wrapper(CrsMatrixStruct<Scalar, LocalOrdinal, GlobalOrdinal, Node>& Rview,
1131  CrsMatrixStruct<Scalar, LocalOrdinal, GlobalOrdinal, Node>& Aview,
1132  CrsMatrixStruct<Scalar, LocalOrdinal, GlobalOrdinal, Node>& Pview,
1133  const LocalOrdinalViewType & Acol2Prow,
1134  const LocalOrdinalViewType & Acol2PIrow,
1135  const LocalOrdinalViewType & Pcol2Accol,
1136  const LocalOrdinalViewType & PIcol2Accol,
1137  CrsMatrix<Scalar, LocalOrdinal, GlobalOrdinal, Node>& Ac,
1138  Teuchos::RCP<const Import<LocalOrdinal,GlobalOrdinal,Node> > Acimport,
1139  const std::string& label,
1140  const Teuchos::RCP<Teuchos::ParameterList>& params) {
1141 #ifdef HAVE_TPETRA_MMM_TIMINGS
1142  std::string prefix_mmm = std::string("TpetraExt ") + label + std::string(": ");
1143  using Teuchos::TimeMonitor;
1144  Teuchos::RCP<Teuchos::TimeMonitor> MM = rcp(new TimeMonitor(*TimeMonitor::getNewTimer(prefix_mmm + std::string("RAP Reuse SerialCore"))));
1145 #endif
1146 
1147  using Teuchos::Array;
1148  using Teuchos::ArrayRCP;
1149  using Teuchos::ArrayView;
1150  using Teuchos::RCP;
1151  using Teuchos::rcp;
1152 
1153  // Lots and lots of typedefs
1155  typedef typename KCRS::StaticCrsGraphType graph_t;
1156  typedef typename graph_t::row_map_type::const_type c_lno_view_t;
1157  typedef typename graph_t::entries_type::non_const_type lno_nnz_view_t;
1158  typedef typename KCRS::values_type::non_const_type scalar_view_t;
1159 
1160  typedef Scalar SC;
1161  typedef LocalOrdinal LO;
1162  typedef GlobalOrdinal GO;
1163  typedef Node NO;
1164  typedef Map<LO,GO,NO> map_type;
1165  const size_t ST_INVALID = Teuchos::OrdinalTraits<LO>::invalid();
1166  const LO LO_INVALID = Teuchos::OrdinalTraits<LO>::invalid();
1167  const SC SC_ZERO = Teuchos::ScalarTraits<Scalar>::zero();
1168 
1169  // Sizes
1170  RCP<const map_type> Accolmap = Ac.getColMap();
1171  size_t m = Rview.origMatrix->getNodeNumRows();
1172  size_t n = Accolmap->getNodeNumElements();
1173  size_t p_max_nnz_per_row = Pview.origMatrix->getNodeMaxNumRowEntries();
1174 
1175  // Grab the Kokkos::SparseCrsMatrices & inner stuff
1176  const KCRS & Amat = Aview.origMatrix->getLocalMatrix();
1177  const KCRS & Pmat = Pview.origMatrix->getLocalMatrix();
1178  const KCRS & Rmat = Rview.origMatrix->getLocalMatrix();
1179  const KCRS & Cmat = Ac.getLocalMatrix();
1180 
1181  c_lno_view_t Arowptr = Amat.graph.row_map, Prowptr = Pmat.graph.row_map, Rrowptr = Rmat.graph.row_map, Crowptr = Cmat.graph.row_map;
1182  const lno_nnz_view_t Acolind = Amat.graph.entries, Pcolind = Pmat.graph.entries , Rcolind = Rmat.graph.entries, Ccolind = Cmat.graph.entries;
1183  const scalar_view_t Avals = Amat.values, Pvals = Pmat.values, Rvals = Rmat.values;
1184  scalar_view_t Cvals = Cmat.values;
1185 
1186  c_lno_view_t Irowptr;
1187  lno_nnz_view_t Icolind;
1188  scalar_view_t Ivals;
1189  if(!Pview.importMatrix.is_null()) {
1190  Irowptr = Pview.importMatrix->getLocalMatrix().graph.row_map;
1191  Icolind = Pview.importMatrix->getLocalMatrix().graph.entries;
1192  Ivals = Pview.importMatrix->getLocalMatrix().values;
1193  p_max_nnz_per_row = std::max(p_max_nnz_per_row,Pview.importMatrix->getNodeMaxNumRowEntries());
1194  }
1195 
1196 #ifdef HAVE_TPETRA_MMM_TIMINGS
1197  RCP<TimeMonitor> MM2 = rcp(new TimeMonitor(*TimeMonitor::getNewTimer(prefix_mmm + std::string("RAP Reuse SerialCore - Compare"))));
1198 #endif
1199 
1200  // mfh 27 Sep 2016: The ac_status array is an implementation detail
1201  // of the local sparse matrix-matrix multiply routine.
1202 
1203  // The status array will contain the index into colind where this entry was last deposited.
1204  // ac_status[i] < nnz - not in the row yet
1205  // ac_status[i] >= nnz - this is the entry where you can find the data
1206  // We start with this filled with INVALID's indicating that there are no entries yet.
1207  // Sadly, this complicates the code due to the fact that size_t's are unsigned.
1208  Array<size_t> ac_status(n, ST_INVALID);
1209 
1210  // mfh 27 Sep 2016: Here is the local sparse matrix-matrix multiply
1211  // routine. The routine computes Ac := R * A * (P_local + P_remote).
1212  //
1213  // For column index Aik in row i of A, Acol2Prow[Aik] tells
1214  // you whether the corresponding row of P belongs to P_local
1215  // ("orig") or P_remote ("Import").
1216 
1217  // For each row of R
1218  size_t OLD_ip = 0, CSR_ip = 0;
1219  for (size_t i = 0; i < m; i++) {
1220  // First fill the c_status array w/ locations where we're allowed to
1221  // generate nonzeros for this row
1222  OLD_ip = Crowptr[i];
1223  CSR_ip = Crowptr[i+1];
1224  for (size_t k = OLD_ip; k < CSR_ip; k++) {
1225  ac_status[Ccolind[k]] = k;
1226 
1227  // Reset values in the row of C
1228  Cvals[k] = SC_ZERO;
1229  }
1230 
1231  // mfh 27 Sep 2016: For each entry of R in the current row of R
1232  for (size_t kk = Rrowptr[i]; kk < Rrowptr[i+1]; kk++) {
1233  LO k = Rcolind[kk]; // local column index of current entry of R
1234  const SC Rik = Rvals[kk]; // value of current entry of R
1235  if (Rik == SC_ZERO)
1236  continue; // skip explicitly stored zero values in R
1237  // For each entry of A in the current row of A
1238  for (size_t ll = Arowptr[k]; ll < Arowptr[k+1]; ll++) {
1239  LO l = Acolind[ll]; // local column index of current entry of A
1240  const SC Akl = Avals[ll]; // value of current entry of A
1241  if (Akl == SC_ZERO)
1242  continue; // skip explicitly stored zero values in A
1243 
1244 
1245  if (Acol2Prow[l] != LO_INVALID) {
1246  // mfh 27 Sep 2016: If the entry of Acol2Prow
1247  // corresponding to the current entry of A is populated, then
1248  // the corresponding row of P is in P_local (i.e., it lives on
1249  // the calling process).
1250 
1251  // Local matrix
1252  size_t Pl = Teuchos::as<size_t>(Acol2Prow[l]);
1253 
1254  // mfh 27 Sep 2016: Go through all entries in that row of P_local.
1255  for (size_t jj = Prowptr[Pl]; jj < Prowptr[Pl+1]; jj++) {
1256  LO j = Pcolind[jj];
1257  LO Cij = Pcol2Accol[j];
1258  SC Plj = Pvals[jj];
1259 
1260  TEUCHOS_TEST_FOR_EXCEPTION(ac_status[Cij] < OLD_ip || ac_status[Cij] >= CSR_ip,
1261  std::runtime_error, "Trying to insert a new entry (" << i << "," << Cij << ") into a static graph " <<
1262  "(c_status = " << ac_status[Cij] << " of [" << OLD_ip << "," << CSR_ip << "))");
1263 
1264  Cvals[ac_status[Cij]] += Rik*Akl*Plj;
1265  }
1266  } else {
1267  // mfh 27 Sep 2016: If the entry of Acol2PRow
1268  // corresponding to the current entry of A is NOT populated (has
1269  // a flag "invalid" value), then the corresponding row of P is
1270  // in P_remote (i.e., it does not live on the calling process).
1271 
1272  // Remote matrix
1273  size_t Il = Teuchos::as<size_t>(Acol2PIrow[l]);
1274  for (size_t jj = Irowptr[Il]; jj < Irowptr[Il+1]; jj++) {
1275  LO j = Icolind[jj];
1276  LO Cij = PIcol2Accol[j];
1277  SC Plj = Ivals[jj];
1278 
1279  TEUCHOS_TEST_FOR_EXCEPTION(ac_status[Cij] < OLD_ip || ac_status[Cij] >= CSR_ip,
1280  std::runtime_error, "Trying to insert a new entry (" << i << "," << Cij << ") into a static graph " <<
1281  "(c_status = " << ac_status[Cij] << " of [" << OLD_ip << "," << CSR_ip << "))");
1282 
1283  Cvals[ac_status[Cij]] += Rik*Akl*Plj;
1284  }
1285  }
1286  }
1287  }
1288  }
1289 
1290 #ifdef HAVE_TPETRA_MMM_TIMINGS
1291  auto MM3 = rcp(new TimeMonitor (*TimeMonitor::getNewTimer(prefix_mmm + std::string("RAP Reuse ESFC"))));
1292 #endif
1293 
1294  Ac.fillComplete(Ac.getDomainMap(), Ac.getRangeMap());
1295 
1296 }
1297 
1298 
1299  /*********************************************************************************************************/
1300  // PT_A_P NewMatrix Kernel wrappers (Default, general, non-threaded version)
1301  // Computes P.T * A * P -> Ac
1302  template<class Scalar,
1303  class LocalOrdinal,
1304  class GlobalOrdinal,
1305  class Node,
1306  class LocalOrdinalViewType>
1307  void KernelWrappers3<Scalar,LocalOrdinal,GlobalOrdinal,Node,LocalOrdinalViewType>::mult_PT_A_P_newmatrix_kernel_wrapper(CrsMatrixStruct<Scalar, LocalOrdinal, GlobalOrdinal, Node>& Aview,
1308  CrsMatrixStruct<Scalar, LocalOrdinal, GlobalOrdinal, Node>& Pview,
1309  const LocalOrdinalViewType & Acol2Prow,
1310  const LocalOrdinalViewType & Acol2PIrow,
1311  const LocalOrdinalViewType & Pcol2Accol,
1312  const LocalOrdinalViewType & PIcol2Accol,
1313  CrsMatrix<Scalar, LocalOrdinal, GlobalOrdinal, Node>& Ac,
1314  Teuchos::RCP<const Import<LocalOrdinal,GlobalOrdinal,Node> > Acimport,
1315  const std::string& label,
1316  const Teuchos::RCP<Teuchos::ParameterList>& params) {
1317 #ifdef HAVE_TPETRA_MMM_TIMINGS
1318  std::string prefix_mmm = std::string("TpetraExt ") + label + std::string(": ");
1319  using Teuchos::TimeMonitor;
1320  Teuchos::TimeMonitor MM(*TimeMonitor::getNewTimer(prefix_mmm + std::string("PTAP local transpose")));
1321 #endif
1322 
1323  // We don't need a kernel-level PTAP, we just transpose here
1324  typedef RowMatrixTransposer<Scalar,LocalOrdinal,GlobalOrdinal, Node> transposer_type;
1325  transposer_type transposer (Pview.origMatrix,label+std::string("XP: "));
1326 
1327  using Teuchos::ParameterList;
1328  using Teuchos::RCP;
1329  RCP<ParameterList> transposeParams (new ParameterList);
1330  transposeParams->set ("sort", false);
1331 
1332  if (! params.is_null ()) {
1333  transposeParams->set ("compute global constants",
1334  params->get ("compute global constants: temporaries",
1335  false));
1336  }
1337  RCP<CrsMatrix<Scalar, LocalOrdinal, GlobalOrdinal, Node> > Ptrans =
1338  transposer.createTransposeLocal (transposeParams);
1339  CrsMatrixStruct<Scalar, LocalOrdinal, GlobalOrdinal, Node> Rview;
1340  Rview.origMatrix = Ptrans;
1341 
1342  mult_R_A_P_newmatrix_kernel_wrapper(Rview,Aview,Pview,Acol2Prow,Acol2PIrow,Pcol2Accol,PIcol2Accol,Ac,Acimport,label,params);
1343  }
1344 
1345  /*********************************************************************************************************/
1346  // PT_A_P Reuse Kernel wrappers (Default, general, non-threaded version)
1347  // Computes P.T * A * P -> Ac
1348  template<class Scalar,
1349  class LocalOrdinal,
1350  class GlobalOrdinal,
1351  class Node,
1352  class LocalOrdinalViewType>
1353  void KernelWrappers3<Scalar,LocalOrdinal,GlobalOrdinal,Node,LocalOrdinalViewType>::mult_PT_A_P_reuse_kernel_wrapper(CrsMatrixStruct<Scalar, LocalOrdinal, GlobalOrdinal, Node>& Aview,
1354  CrsMatrixStruct<Scalar, LocalOrdinal, GlobalOrdinal, Node>& Pview,
1355  const LocalOrdinalViewType & Acol2Prow,
1356  const LocalOrdinalViewType & Acol2PIrow,
1357  const LocalOrdinalViewType & Pcol2Accol,
1358  const LocalOrdinalViewType & PIcol2Accol,
1359  CrsMatrix<Scalar, LocalOrdinal, GlobalOrdinal, Node>& Ac,
1360  Teuchos::RCP<const Import<LocalOrdinal,GlobalOrdinal,Node> > Acimport,
1361  const std::string& label,
1362  const Teuchos::RCP<Teuchos::ParameterList>& params) {
1363 #ifdef HAVE_TPETRA_MMM_TIMINGS
1364  std::string prefix_mmm = std::string("TpetraExt ") + label + std::string(": ");
1365  using Teuchos::TimeMonitor;
1366  Teuchos::TimeMonitor MM(*TimeMonitor::getNewTimer(prefix_mmm + std::string("PTAP local transpose")));
1367 #endif
1368 
1369  // We don't need a kernel-level PTAP, we just transpose here
1370  typedef RowMatrixTransposer<Scalar,LocalOrdinal,GlobalOrdinal, Node> transposer_type;
1371  transposer_type transposer (Pview.origMatrix,label+std::string("XP: "));
1372 
1373  using Teuchos::ParameterList;
1374  using Teuchos::RCP;
1375  RCP<ParameterList> transposeParams (new ParameterList);
1376  transposeParams->set ("sort", false);
1377 
1378  if (! params.is_null ()) {
1379  transposeParams->set ("compute global constants",
1380  params->get ("compute global constants: temporaries",
1381  false));
1382  }
1383  RCP<CrsMatrix<Scalar, LocalOrdinal, GlobalOrdinal, Node> > Ptrans =
1384  transposer.createTransposeLocal (transposeParams);
1385  CrsMatrixStruct<Scalar, LocalOrdinal, GlobalOrdinal, Node> Rview;
1386  Rview.origMatrix = Ptrans;
1387 
1388  mult_R_A_P_reuse_kernel_wrapper(Rview,Aview,Pview,Acol2Prow,Acol2PIrow,Pcol2Accol,PIcol2Accol,Ac,Acimport,label,params);
1389  }
1390 
1391  /*********************************************************************************************************/
1392  // PT_A_P NewMatrix Kernel wrappers (Default non-threaded version)
1393  // Computes P.T * A * P -> Ac using a 2-pass algorithm.
1394  // This turned out to be slower on SerialNode, but it might still be helpful when going to Kokkos, so I left it in.
1395  // Currently, this implementation never gets called.
1396  template<class Scalar,
1397  class LocalOrdinal,
1398  class GlobalOrdinal,
1399  class Node>
1400  void KernelWrappers3MMM<Scalar,LocalOrdinal,GlobalOrdinal,Node>::mult_PT_A_P_newmatrix_kernel_wrapper_2pass(CrsMatrixStruct<Scalar, LocalOrdinal, GlobalOrdinal, Node>& Aview,
1401  CrsMatrixStruct<Scalar, LocalOrdinal, GlobalOrdinal, Node>& Pview,
1402  const Teuchos::Array<LocalOrdinal> & Acol2PRow,
1403  const Teuchos::Array<LocalOrdinal> & Acol2PRowImport,
1404  const Teuchos::Array<LocalOrdinal> & Pcol2Accol,
1405  const Teuchos::Array<LocalOrdinal> & PIcol2Accol,
1406  CrsMatrix<Scalar, LocalOrdinal, GlobalOrdinal, Node>& Ac,
1407  Teuchos::RCP<const Import<LocalOrdinal,GlobalOrdinal,Node> > Acimport,
1408  const std::string& label,
1409  const Teuchos::RCP<Teuchos::ParameterList>& params) {
1410 #ifdef HAVE_TPETRA_MMM_TIMINGS
1411  std::string prefix_mmm = std::string("TpetraExt ") + label + std::string(": ");
1412  using Teuchos::TimeMonitor;
1413  Teuchos::RCP<Teuchos::TimeMonitor> MM = rcp(new TimeMonitor(*TimeMonitor::getNewTimer(prefix_mmm + std::string("PTAP Newmatrix SerialCore"))));
1414 #endif
1415 
1416  using Teuchos::Array;
1417  using Teuchos::ArrayRCP;
1418  using Teuchos::ArrayView;
1419  using Teuchos::RCP;
1420  using Teuchos::rcp;
1421 
1422  typedef Scalar SC;
1423  typedef LocalOrdinal LO;
1424  typedef GlobalOrdinal GO;
1425  typedef Node NO;
1426  typedef RowMatrixTransposer<SC,LO,GO,NO> transposer_type;
1427  const LO LO_INVALID = Teuchos::OrdinalTraits<LO>::invalid();
1428  const SC SC_ZERO = Teuchos::ScalarTraits<Scalar>::zero();
1429 
1430  // number of rows on the process of the fine matrix
1431  // size_t m = Pview.origMatrix->getNodeNumRows();
1432  // number of rows on the process of the coarse matrix
1433  size_t n = Ac.getRowMap()->getNodeNumElements();
1434  LO maxAccol = Ac.getColMap()->getMaxLocalIndex();
1435 
1436  // Get Data Pointers
1437  ArrayRCP<const size_t> Arowptr_RCP, Prowptr_RCP, Irowptr_RCP;
1438  ArrayRCP<size_t> Acrowptr_RCP;
1439  ArrayRCP<const LO> Acolind_RCP, Pcolind_RCP, Icolind_RCP;
1440  ArrayRCP<LO> Accolind_RCP;
1441  ArrayRCP<const Scalar> Avals_RCP, Pvals_RCP, Ivals_RCP;
1442  ArrayRCP<SC> Acvals_RCP;
1443 
1444  // mfh 27 Sep 2016: "getAllValues" just gets the three CSR arrays
1445  // out of the CrsMatrix. This code computes R * A * (P_local +
1446  // P_remote), where P_local contains the locally owned rows of P,
1447  // and P_remote the (previously Import'ed) remote rows of P.
1448 
1449  Aview.origMatrix->getAllValues(Arowptr_RCP, Acolind_RCP, Avals_RCP);
1450  Pview.origMatrix->getAllValues(Prowptr_RCP, Pcolind_RCP, Pvals_RCP);
1451 
1452  if (!Pview.importMatrix.is_null())
1453  Pview.importMatrix->getAllValues(Irowptr_RCP, Icolind_RCP, Ivals_RCP);
1454 
1455  // mfh 27 Sep 2016: Remark below "For efficiency" refers to an issue
1456  // where Teuchos::ArrayRCP::operator[] may be slower than
1457  // Teuchos::ArrayView::operator[].
1458 
1459  // For efficiency
1460  ArrayView<const size_t> Arowptr, Prowptr, Irowptr;
1461  ArrayView<const LO> Acolind, Pcolind, Icolind;
1462  ArrayView<const SC> Avals, Pvals, Ivals;
1463  ArrayView<size_t> Acrowptr;
1464  ArrayView<LO> Accolind;
1465  ArrayView<SC> Acvals;
1466  Arowptr = Arowptr_RCP(); Acolind = Acolind_RCP(); Avals = Avals_RCP();
1467  Prowptr = Prowptr_RCP(); Pcolind = Pcolind_RCP(); Pvals = Pvals_RCP();
1468  if (!Pview.importMatrix.is_null()) {
1469  Irowptr = Irowptr_RCP(); Icolind = Icolind_RCP(); Ivals = Ivals_RCP();
1470  }
1471 
1473  // In a first pass, determine the graph of Ac.
1475 
1477  // Get the graph of Ac. This gets the local transpose of P,
1478  // then loops over R, A, P to get the graph of Ac.
1480 
1481 #ifdef HAVE_TPETRA_MMM_TIMINGS
1482  MM = rcp(new TimeMonitor (*TimeMonitor::getNewTimer(prefix_mmm + std::string("PTAP local transpose"))));
1483 #endif
1484 
1486  // Get the local transpose of the graph of P by locally transposing
1487  // all of P
1488 
1489  ArrayRCP<const size_t> Rrowptr_RCP;
1490  ArrayRCP<const LO> Rcolind_RCP;
1491  ArrayRCP<const Scalar> Rvals_RCP;
1492  ArrayView<const size_t> Rrowptr;
1493  ArrayView<const LO> Rcolind;
1494  ArrayView<const SC> Rvals;
1495 
1496  transposer_type transposer (Pview.origMatrix,label+std::string("XP: "));
1497 
1498  using Teuchos::ParameterList;
1499  RCP<ParameterList> transposeParams (new ParameterList);
1500  transposeParams->set ("sort", false);
1501  if (! params.is_null ()) {
1502  transposeParams->set ("compute global constants",
1503  params->get ("compute global constants: temporaries",
1504  false));
1505  }
1506  RCP<CrsMatrix<Scalar, LocalOrdinal, GlobalOrdinal, Node> > Ptrans =
1507  transposer.createTransposeLocal (transposeParams);
1508 
1509  Ptrans->getAllValues(Rrowptr_RCP, Rcolind_RCP, Rvals_RCP);
1510  Rrowptr = Rrowptr_RCP();
1511  Rcolind = Rcolind_RCP();
1512  Rvals = Rvals_RCP();
1513 
1515  // Construct graph
1516 
1517  #ifdef HAVE_TPETRA_MMM_TIMINGS
1518  MM = rcp(new TimeMonitor (*TimeMonitor::getNewTimer(prefix_mmm + std::string("PTAP graph"))));
1519  #endif
1520 
1521  const size_t ST_INVALID = Teuchos::OrdinalTraits<LO>::invalid();
1522  Array<size_t> ac_status(maxAccol + 1, ST_INVALID);
1523 
1524  size_t nnz_alloc = std::max(Ac_estimate_nnz(*Aview.origMatrix, *Pview.origMatrix), n);
1525  size_t nnzPerRowA = 100;
1526  if (Aview.origMatrix->getNodeNumEntries() > 0)
1527  nnzPerRowA = Aview.origMatrix->getNodeNumEntries()/Aview.origMatrix->getNodeNumRows();
1528  Acrowptr_RCP.resize(n+1);
1529  Acrowptr = Acrowptr_RCP();
1530  Accolind_RCP.resize(nnz_alloc);
1531  Accolind = Accolind_RCP();
1532 
1533  size_t nnz = 0, nnz_old = 0;
1534  for (size_t i = 0; i < n; i++) {
1535  // mfh 27 Sep 2016: m is the number of rows in the input matrix R
1536  // on the calling process.
1537  Acrowptr[i] = nnz;
1538 
1539  // mfh 27 Sep 2016: For each entry of R in the current row of R
1540  for (size_t kk = Rrowptr[i]; kk < Rrowptr[i+1]; kk++) {
1541  LO k = Rcolind[kk]; // local column index of current entry of R
1542  // For each entry of A in the current row of A
1543  for (size_t ll = Arowptr[k]; ll < Arowptr[k+1]; ll++) {
1544  LO l = Acolind[ll]; // local column index of current entry of A
1545 
1546  if (Acol2PRow[l] != LO_INVALID) {
1547  // mfh 27 Sep 2016: If the entry of Acol2PRow
1548  // corresponding to the current entry of A is populated, then
1549  // the corresponding row of P is in P_local (i.e., it lives on
1550  // the calling process).
1551 
1552  // Local matrix
1553  size_t Pl = Teuchos::as<size_t>(Acol2PRow[l]);
1554 
1555  // mfh 27 Sep 2016: Go through all entries in that row of P_local.
1556  for (size_t jj = Prowptr[Pl]; jj < Prowptr[Pl+1]; jj++) {
1557  LO j = Pcolind[jj];
1558  LO Acj = Pcol2Accol[j];
1559 
1560  if (ac_status[Acj] == ST_INVALID || ac_status[Acj] < nnz_old) {
1561  // New entry
1562  ac_status[Acj] = nnz;
1563  Accolind[nnz] = Acj;
1564  nnz++;
1565  }
1566  }
1567  } else {
1568  // mfh 27 Sep 2016: If the entry of Acol2PRow
1569  // corresponding to the current entry of A is NOT populated (has
1570  // a flag "invalid" value), then the corresponding row of P is
1571  // in P_remote (i.e., it does not live on the calling process).
1572 
1573  // Remote matrix
1574  size_t Il = Teuchos::as<size_t>(Acol2PRowImport[l]);
1575  for (size_t jj = Irowptr[Il]; jj < Irowptr[Il+1]; jj++) {
1576  LO j = Icolind[jj];
1577  LO Acj = PIcol2Accol[j];
1578 
1579  if (ac_status[Acj] == ST_INVALID || ac_status[Acj] < nnz_old){
1580  // New entry
1581  ac_status[Acj] = nnz;
1582  Accolind[nnz] = Acj;
1583  nnz++;
1584  }
1585  }
1586  }
1587  }
1588  }
1589  // Resize for next pass if needed
1590  // cag: Maybe we can do something more subtle here, and not double
1591  // the size right away.
1592  if (nnz + std::max(5*nnzPerRowA, n) > nnz_alloc) {
1593  nnz_alloc *= 2;
1594  nnz_alloc = std::max(nnz_alloc, nnz + std::max(5*nnzPerRowA, n));
1595  Accolind_RCP.resize(nnz_alloc); Accolind = Accolind_RCP();
1596  Acvals_RCP.resize(nnz_alloc); Acvals = Acvals_RCP();
1597  }
1598  nnz_old = nnz;
1599  }
1600  Acrowptr[n] = nnz;
1601 
1602  // Downward resize
1603  Accolind_RCP.resize(nnz);
1604  Accolind = Accolind_RCP();
1605 
1606  // Allocate Acvals
1607  Acvals_RCP.resize(nnz, SC_ZERO);
1608  Acvals = Acvals_RCP();
1609 
1610 
1612  // In a second pass, enter the values into Acvals.
1614 
1615  #ifdef HAVE_TPETRA_MMM_TIMINGS
1616  MM = rcp(new TimeMonitor (*TimeMonitor::getNewTimer(prefix_mmm + std::string("PTAP Newmatrix Fill Matrix"))));
1617  #endif
1618 
1619 
1620  for (size_t k = 0; k < n; k++) {
1621  for (size_t ii = Prowptr[k]; ii < Prowptr[k+1]; ii++) {
1622  LO i = Pcolind[ii];
1623  const SC Pki = Pvals[ii];
1624  for (size_t ll = Arowptr[k]; ll < Arowptr[k+1]; ll++) {
1625  LO l = Acolind[ll];
1626  const SC Akl = Avals[ll];
1627  if (Akl == 0.)
1628  continue;
1629  if (Acol2PRow[l] != LO_INVALID) {
1630  // mfh 27 Sep 2016: If the entry of Acol2PRow
1631  // corresponding to the current entry of A is populated, then
1632  // the corresponding row of P is in P_local (i.e., it lives on
1633  // the calling process).
1634 
1635  // Local matrix
1636  size_t Pl = Teuchos::as<size_t>(Acol2PRow[l]);
1637  for (size_t jj = Prowptr[Pl]; jj < Prowptr[Pl+1]; jj++) {
1638  LO j = Pcolind[jj];
1639  LO Acj = Pcol2Accol[j];
1640  size_t pp;
1641  for (pp = Acrowptr[i]; pp < Acrowptr[i+1]; pp++)
1642  if (Accolind[pp] == Acj)
1643  break;
1644  // TEUCHOS_TEST_FOR_EXCEPTION(Accolind[pp] != Acj,
1645  // std::runtime_error, "problem with Ac column indices");
1646  Acvals[pp] += Pki*Akl*Pvals[jj];
1647  }
1648  } else {
1649  // mfh 27 Sep 2016: If the entry of Acol2PRow
1650  // corresponding to the current entry of A NOT populated (has
1651  // a flag "invalid" value), then the corresponding row of P is
1652  // in P_remote (i.e., it does not live on the calling process).
1653 
1654  // Remote matrix
1655  size_t Il = Teuchos::as<size_t>(Acol2PRowImport[l]);
1656  for (size_t jj = Irowptr[Il]; jj < Irowptr[Il+1]; jj++) {
1657  LO j = Icolind[jj];
1658  LO Acj = PIcol2Accol[j];
1659  size_t pp;
1660  for (pp = Acrowptr[i]; pp < Acrowptr[i+1]; pp++)
1661  if (Accolind[pp] == Acj)
1662  break;
1663  // TEUCHOS_TEST_FOR_EXCEPTION(Accolind[pp] != Acj,
1664  // std::runtime_error, "problem with Ac column indices");
1665  Acvals[pp] += Pki*Akl*Ivals[jj];
1666  }
1667  }
1668  }
1669  }
1670  }
1671 
1672 
1673 #ifdef HAVE_TPETRA_MMM_TIMINGS
1674  MM = rcp(new TimeMonitor (*TimeMonitor::getNewTimer(prefix_mmm + std::string("PTAP sort"))));
1675 #endif
1676 
1677  // Final sort & set of CRS arrays
1678  //
1679  // TODO (mfh 27 Sep 2016) Will the thread-parallel "local" sparse
1680  // matrix-matrix multiply routine sort the entries for us?
1681  Import_Util::sortCrsEntries(Acrowptr_RCP(), Accolind_RCP(), Acvals_RCP());
1682 
1683  // mfh 27 Sep 2016: This just sets pointers.
1684  Ac.setAllValues(Acrowptr_RCP, Accolind_RCP, Acvals_RCP);
1685 
1686 #ifdef HAVE_TPETRA_MMM_TIMINGS
1687  MM = rcp(new TimeMonitor (*TimeMonitor::getNewTimer(prefix_mmm + std::string("PTAP Newmatrix ESFC"))));
1688 #endif
1689 
1690  // Final FillComplete
1691  //
1692  // mfh 27 Sep 2016: So-called "expert static fill complete" bypasses
1693  // Import (from domain Map to column Map) construction (which costs
1694  // lots of communication) by taking the previously constructed
1695  // Import object. We should be able to do this without interfering
1696  // with the implementation of the local part of sparse matrix-matrix
1697  // multply above.
1698  RCP<Teuchos::ParameterList> labelList = rcp(new Teuchos::ParameterList);
1699  labelList->set("Timer Label",label);
1700  // labelList->set("Sort column Map ghost GIDs")
1701  if(!params.is_null()) labelList->set("compute global constants",params->get("compute global constants",true));
1702  RCP<const Export<LO,GO,NO> > dummyExport;
1703  Ac.expertStaticFillComplete(Pview.origMatrix->getDomainMap(),
1704  Pview.origMatrix->getDomainMap(),
1705  Acimport,
1706  dummyExport, labelList);
1707  }
1708 
1709 
1710 
1711  } //End namepsace MMdetails
1712 
1713 } //End namespace Tpetra
1714 //
1715 // Explicit instantiation macro
1716 //
1717 // Must be expanded from within the Tpetra namespace!
1718 //
1719 
1720 #define TPETRA_TRIPLEMATRIXMULTIPLY_INSTANT(SCALAR,LO,GO,NODE) \
1721  \
1722  template \
1723  void TripleMatrixMultiply::MultiplyRAP( \
1724  const CrsMatrix< SCALAR , LO , GO , NODE >& R, \
1725  bool transposeR, \
1726  const CrsMatrix< SCALAR , LO , GO , NODE >& A, \
1727  bool transposeA, \
1728  const CrsMatrix< SCALAR , LO , GO , NODE >& P, \
1729  bool transposeP, \
1730  CrsMatrix< SCALAR , LO , GO , NODE >& Ac, \
1731  bool call_FillComplete_on_result, \
1732  const std::string & label, \
1733  const Teuchos::RCP<Teuchos::ParameterList>& params); \
1734  \
1735 
1736 
1737 #endif // TPETRA_TRIPLEMATRIXMULTIPLY_DEF_HPP
Communication plan for data redistribution from a uniquely-owned to a (possibly) multiply-owned distr...
static int TAFC_OptimizationCoreCount()
The core count above which Tpetra::CrsMatrix::transferAndFillComplete will attempt to do advanced nei...
Sparse matrix that presents a row-oriented interface that lets users read or modify entries...
global_size_t getGlobalNumRows() const override
Number of global elements in the row map of this matrix.
bool isFillActive() const
Whether the matrix is not fill complete.
size_t global_size_t
Global size_t object.
bool isFillComplete() const override
Whether the matrix is fill complete.
Teuchos::RCP< const Teuchos::Comm< int > > getComm() const override
The communicator over which the matrix is distributed.
Construct and (optionally) redistribute the explicitly stored transpose of a CrsMatrix.
Communication plan for data redistribution from a (possibly) multiply-owned to a uniquely-owned distr...
Teuchos::RCP< const RowGraph< LocalOrdinal, GlobalOrdinal, Node > > getGraph() const override
This matrix&#39;s graph, as a RowGraph.
Utility functions for packing and unpacking sparse matrix entries.
void MultiplyRAP(const CrsMatrix< Scalar, LocalOrdinal, GlobalOrdinal, Node > &R, bool transposeR, const CrsMatrix< Scalar, LocalOrdinal, GlobalOrdinal, Node > &A, bool transposeA, const CrsMatrix< Scalar, LocalOrdinal, GlobalOrdinal, Node > &P, bool transposeP, CrsMatrix< Scalar, LocalOrdinal, GlobalOrdinal, Node > &Ac, bool call_FillComplete_on_result=true, const std::string &label=std::string(), const Teuchos::RCP< Teuchos::ParameterList > &params=Teuchos::null)
Sparse matrix-matrix multiply.
A parallel distribution of indices over processes.
Teuchos::RCP< const map_type > getDomainMap() const override
The domain Map of this matrix.
KokkosSparse::CrsMatrix< impl_scalar_type, local_ordinal_type, execution_space, void, typename local_graph_type::size_type > local_matrix_type
The specialization of Kokkos::CrsMatrix that represents the part of the sparse matrix on each MPI pro...
Internal functions and macros designed for use with Tpetra::Import and Tpetra::Export objects...
Stand-alone utility functions and macros.
Struct that holds views of the contents of a CrsMatrix.