Tpetra parallel linear algebra  Version of the Day
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
Tpetra_BlockCrsMatrix_Helpers_def.hpp
Go to the documentation of this file.
1 // @HEADER
2 // ***********************************************************************
3 //
4 // Tpetra: Templated Linear Algebra Services Package
5 // Copyright (2008) Sandia Corporation
6 //
7 // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
8 // the U.S. Government retains certain rights in this software.
9 //
10 // Redistribution and use in source and binary forms, with or without
11 // modification, are permitted provided that the following conditions are
12 // met:
13 //
14 // 1. Redistributions of source code must retain the above copyright
15 // notice, this list of conditions and the following disclaimer.
16 //
17 // 2. Redistributions in binary form must reproduce the above copyright
18 // notice, this list of conditions and the following disclaimer in the
19 // documentation and/or other materials provided with the distribution.
20 //
21 // 3. Neither the name of the Corporation nor the names of the
22 // contributors may be used to endorse or promote products derived from
23 // this software without specific prior written permission.
24 //
25 // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
26 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
29 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
30 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
31 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
32 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 //
37 // ************************************************************************
38 // @HEADER
39 
40 #ifndef TPETRA_BLOCKCRSMATRIX_HELPERS_DEF_HPP
41 #define TPETRA_BLOCKCRSMATRIX_HELPERS_DEF_HPP
42 
44 
45 #include "Tpetra_BlockCrsMatrix.hpp"
46 #include "Tpetra_CrsMatrix.hpp"
47 #include "Tpetra_HashTable.hpp"
48 #include "Tpetra_Import.hpp"
49 #include "Tpetra_Map.hpp"
50 #include "Tpetra_MultiVector.hpp"
51 #include "Teuchos_ParameterList.hpp"
52 #include "Teuchos_ScalarTraits.hpp"
53 #include <ctime>
54 #include <fstream>
55 
56 namespace Tpetra {
57 
58  template<class Scalar, class LO, class GO, class Node>
59  void blockCrsMatrixWriter(BlockCrsMatrix<Scalar,LO,GO,Node> const &A, std::string const &fileName) {
60  Teuchos::ParameterList pl;
61  std::ofstream out;
62  out.open(fileName.c_str());
63  blockCrsMatrixWriter(A, out, pl);
64  }
65 
66  template<class Scalar, class LO, class GO, class Node>
67  void blockCrsMatrixWriter(BlockCrsMatrix<Scalar,LO,GO,Node> const &A, std::string const &fileName, Teuchos::ParameterList const &params) {
68  std::ofstream out;
69  out.open(fileName.c_str());
70  blockCrsMatrixWriter(A, out, params);
71  }
72 
73  template<class Scalar, class LO, class GO, class Node>
74  void blockCrsMatrixWriter(BlockCrsMatrix<Scalar,LO,GO,Node> const &A, std::ostream &os) {
75  Teuchos::ParameterList pl;
76  blockCrsMatrixWriter(A, os, pl);
77  }
78 
79  template<class Scalar, class LO, class GO, class Node>
80  void blockCrsMatrixWriter(BlockCrsMatrix<Scalar,LO,GO,Node> const &A, std::ostream &os, Teuchos::ParameterList const &params) {
81 
82  using Teuchos::RCP;
83  using Teuchos::rcp;
84 
85  typedef Teuchos::OrdinalTraits<GO> TOT;
86  typedef BlockCrsMatrix<Scalar, LO, GO, Node> block_crs_matrix_type;
87  typedef Tpetra::Import<LO, GO, Node> import_type;
88  typedef Tpetra::Map<LO, GO, Node> map_type;
90  typedef Tpetra::CrsGraph<LO, GO, Node> crs_graph_type;
91 
92  RCP<const map_type> const rowMap = A.getRowMap(); //"mesh" map
93  RCP<const Teuchos::Comm<int> > comm = rowMap->getComm();
94  const int myRank = comm->getRank();
95  const size_t numProcs = comm->getSize();
96 
97  // If true, force use of the import strip-mining infrastructure. This is useful for debugging on one process.
98  bool alwaysUseParallelAlgorithm = false;
99  if (params.isParameter("always use parallel algorithm"))
100  alwaysUseParallelAlgorithm = params.get<bool>("always use parallel algorithm");
101  bool printMatrixMarketHeader = true;
102  if (params.isParameter("print MatrixMarket header"))
103  printMatrixMarketHeader = params.get<bool>("print MatrixMarket header");
104 
105  if (printMatrixMarketHeader && myRank==0) {
106  std::time_t now = std::time(NULL);
107 
108  const std::string dataTypeStr =
109  Teuchos::ScalarTraits<Scalar>::isComplex ? "complex" : "real";
110 
111  // Explanation of first line of file:
112  // - "%%MatrixMarket" is the tag for Matrix Market format.
113  // - "matrix" is what we're printing.
114  // - "coordinate" means sparse (triplet format), rather than dense.
115  // - "real" / "complex" is the type (in an output format sense,
116  // not in a C++ sense) of each value of the matrix. (The
117  // other two possibilities are "integer" (not really necessary
118  // here) and "pattern" (no values, just graph).)
119  os << "%%MatrixMarket matrix coordinate " << dataTypeStr << " general" << std::endl;
120  os << "% time stamp: " << ctime(&now);
121  os << "% written from " << numProcs << " processes" << std::endl;
122  os << "% point representation of Tpetra::BlockCrsMatrix" << std::endl;
123  size_t numRows = A.getGlobalNumRows();
124  size_t numCols = A.getGlobalNumCols();
125  os << "% " << numRows << " block rows, " << numCols << " block columns" << std::endl;
126  const LO blockSize = A.getBlockSize();
127  os << "% block size " << blockSize << std::endl;
128  os << numRows*blockSize << " " << numCols*blockSize << " " << A.getGlobalNumEntries()*blockSize*blockSize << std::endl;
129  }
130 
131  if (numProcs==1 && !alwaysUseParallelAlgorithm) {
132  writeMatrixStrip(A,os,params);
133  } else {
134  size_t numRows = rowMap->getNodeNumElements();
135 
136  //Create source map
137  RCP<const map_type> allMeshGidsMap = rcp(new map_type(TOT::invalid(), numRows, A.getIndexBase(), comm));
138  //Create and populate vector of mesh GIDs corresponding to this pid's rows.
139  //This vector will be imported one pid's worth of information at a time to pid 0.
140  mv_type allMeshGids(allMeshGidsMap,1);
141  Teuchos::ArrayRCP<GO> allMeshGidsData = allMeshGids.getDataNonConst(0);
142 
143  for (size_t i=0; i<numRows; i++)
144  allMeshGidsData[i] = rowMap->getGlobalElement(i);
145  allMeshGidsData = Teuchos::null;
146 
147  // Now construct a RowMatrix on PE 0 by strip-mining the rows of the input matrix A.
148  size_t stripSize = allMeshGids.getGlobalLength() / numProcs;
149  size_t remainder = allMeshGids.getGlobalLength() % numProcs;
150  size_t curStart = 0;
151  size_t curStripSize = 0;
152  Teuchos::Array<GO> importMeshGidList;
153  for (size_t i=0; i<numProcs; i++) {
154  if (myRank==0) { // Only PE 0 does this part
155  curStripSize = stripSize;
156  if (i<remainder) curStripSize++; // handle leftovers
157  importMeshGidList.resize(curStripSize); // Set size of vector to max needed
158  for (size_t j=0; j<curStripSize; j++) importMeshGidList[j] = j + curStart + A.getIndexBase();
159  curStart += curStripSize;
160  }
161  // The following import map should be non-trivial only on PE 0.
162  TEUCHOS_TEST_FOR_EXCEPTION(myRank>0 && curStripSize!=0,
163  std::runtime_error, "Tpetra::blockCrsMatrixWriter: (pid "
164  << myRank << ") map size should be zero, but is " << curStripSize);
165  RCP<map_type> importMeshGidMap = rcp(new map_type(TOT::invalid(), importMeshGidList(), A.getIndexBase(), comm));
166  import_type gidImporter(allMeshGidsMap, importMeshGidMap);
167  mv_type importMeshGids(importMeshGidMap, 1);
168  importMeshGids.doImport(allMeshGids, gidImporter, INSERT);
169 
170  // importMeshGids now has a list of GIDs for the current strip of matrix rows.
171  // Use these values to build another importer that will get rows of the matrix.
172 
173  // The following import map will be non-trivial only on PE 0.
174  Teuchos::ArrayRCP<const GO> importMeshGidsData = importMeshGids.getData(0);
175  Teuchos::Array<GO> importMeshGidsGO;
176  importMeshGidsGO.reserve(importMeshGidsData.size());
177  for (typename Teuchos::ArrayRCP<const GO>::size_type j=0; j<importMeshGidsData.size(); ++j)
178  importMeshGidsGO.push_back(importMeshGidsData[j]);
179  RCP<const map_type> importMap = rcp(new map_type(TOT::invalid(), importMeshGidsGO(), rowMap->getIndexBase(), comm) );
180 
181  import_type importer(rowMap,importMap );
182  size_t numEntriesPerRow = A.getCrsGraph().getGlobalMaxNumRowEntries();
183  RCP<crs_graph_type> graph = createCrsGraph(importMap,numEntriesPerRow);
184  RCP<const map_type> domainMap = A.getCrsGraph().getDomainMap();
185  graph->doImport(A.getCrsGraph(), importer, INSERT);
186  graph->fillComplete(domainMap, importMap);
187 
188  block_crs_matrix_type importA(*graph, A.getBlockSize());
189  importA.doImport(A, importer, INSERT);
190 
191  // Finally we are ready to write this strip of the matrix
192  writeMatrixStrip(importA, os, params);
193  }
194  }
195  }
196 
197  template<class Scalar, class LO, class GO, class Node>
198  void writeMatrixStrip(BlockCrsMatrix<Scalar,LO,GO,Node> const &A, std::ostream &os, Teuchos::ParameterList const &params) {
199  using Teuchos::RCP;
200  using map_type = Tpetra::Map<LO, GO, Node>;
201 
202  size_t numRows = A.getGlobalNumRows();
203  RCP<const map_type> rowMap = A.getRowMap();
204  RCP<const map_type> colMap = A.getColMap();
205  RCP<const Teuchos::Comm<int> > comm = rowMap->getComm();
206  const int myRank = comm->getRank();
207 
208  const size_t meshRowOffset = rowMap->getIndexBase();
209  const size_t meshColOffset = colMap->getIndexBase();
210  TEUCHOS_TEST_FOR_EXCEPTION(meshRowOffset != meshColOffset,
211  std::runtime_error, "Tpetra::writeMatrixStrip: "
212  "mesh row index base != mesh column index base");
213 
214  if (myRank !=0) {
215 
216  TEUCHOS_TEST_FOR_EXCEPTION(A.getNodeNumRows() != 0,
217  std::runtime_error, "Tpetra::writeMatrixStrip: pid "
218  << myRank << " should have 0 rows but has " << A.getNodeNumRows());
219  TEUCHOS_TEST_FOR_EXCEPTION(A.getNodeNumCols() != 0,
220  std::runtime_error, "Tpetra::writeMatrixStrip: pid "
221  << myRank << " should have 0 columns but has " << A.getNodeNumCols());
222 
223  } else {
224 
225  TEUCHOS_TEST_FOR_EXCEPTION(numRows != A.getNodeNumRows(),
226  std::runtime_error, "Tpetra::writeMatrixStrip: "
227  "number of rows on pid 0 does not match global number of rows");
228 
229 
230  int err = 0;
231  const LO blockSize = A.getBlockSize();
232  const size_t numLocalRows = A.getNodeNumRows();
233  bool precisionChanged=false;
234  int oldPrecision = 0; // avoid "unused variable" warning
235  if (params.isParameter("precision")) {
236  oldPrecision = os.precision(params.get<int>("precision"));
237  precisionChanged=true;
238  }
239  int pointOffset = 1;
240  if (params.isParameter("zero-based indexing")) {
241  if (params.get<bool>("zero-based indexing") == true)
242  pointOffset = 0;
243  }
244 
245  size_t localRowInd;
246  for (localRowInd = 0; localRowInd < numLocalRows; ++localRowInd) {
247 
248  // Get a view of the current row.
249  const LO* localColInds;
250  Scalar* vals;
251  LO numEntries;
252  err = A.getLocalRowView (localRowInd, localColInds, vals, numEntries);
253  if (err != 0)
254  break;
255  GO globalMeshRowID = rowMap->getGlobalElement(localRowInd) - meshRowOffset;
256 
257  for (LO k = 0; k < numEntries; ++k) {
258  GO globalMeshColID = colMap->getGlobalElement(localColInds[k]) - meshColOffset;
259  Scalar* const curBlock = vals + blockSize * blockSize * k;
260  // Blocks are stored in row-major format.
261  for (LO j = 0; j < blockSize; ++j) {
262  GO globalPointRowID = globalMeshRowID * blockSize + j + pointOffset;
263  for (LO i = 0; i < blockSize; ++i) {
264  GO globalPointColID = globalMeshColID * blockSize + i + pointOffset;
265  const Scalar curVal = curBlock[i + j * blockSize];
266 
267  os << globalPointRowID << " " << globalPointColID << " ";
268  if (Teuchos::ScalarTraits<Scalar>::isComplex) {
269  // Matrix Market format wants complex values to be
270  // written as space-delimited pairs. See Bug 6469.
271  os << Teuchos::ScalarTraits<Scalar>::real (curVal) << " "
272  << Teuchos::ScalarTraits<Scalar>::imag (curVal);
273  }
274  else {
275  os << curVal;
276  }
277  os << std::endl;
278  }
279  }
280  }
281  }
282  if (precisionChanged)
283  os.precision(oldPrecision);
284  TEUCHOS_TEST_FOR_EXCEPTION(err != 0,
285  std::runtime_error, "Tpetra::writeMatrixStrip: "
286  "error getting view of local row " << localRowInd);
287 
288  }
289 
290  }
291 
292  template<class Scalar, class LO, class GO, class Node>
293  Teuchos::RCP<BlockCrsMatrix<Scalar, LO, GO, Node> >
294  convertToBlockCrsMatrix(const Tpetra::CrsMatrix<Scalar, LO, GO, Node>& pointMatrix, const LO &blockSize)
295  {
296 
297  /*
298  ASSUMPTIONS:
299 
300  1) In point matrix, all entries associated with a little block are present (even if they are zero).
301  2) For given mesh DOF, point DOFs appear consecutively and in ascending order in row & column maps.
302  3) Point column map and block column map are ordered consistently.
303  */
304 
305  using Teuchos::Array;
306  using Teuchos::ArrayView;
307  using Teuchos::RCP;
308 
309  typedef Tpetra::BlockCrsMatrix<Scalar,LO,GO,Node> block_crs_matrix_type;
310  typedef Tpetra::Map<LO,GO,Node> map_type;
311  typedef Tpetra::CrsGraph<LO,GO,Node> crs_graph_type;
312 
313  const map_type &pointRowMap = *(pointMatrix.getRowMap());
314  RCP<const map_type> meshRowMap = createMeshMap<LO,GO,Node>(blockSize, pointRowMap);
315 
316  const map_type &pointColMap = *(pointMatrix.getColMap());
317  RCP<const map_type> meshColMap = createMeshMap<LO,GO,Node>(blockSize, pointColMap);
318 
319  const map_type &pointDomainMap = *(pointMatrix.getDomainMap());
320  RCP<const map_type> meshDomainMap = createMeshMap<LO,GO,Node>(blockSize, pointDomainMap);
321 
322  const map_type &pointRangeMap = *(pointMatrix.getRangeMap());
323  RCP<const map_type> meshRangeMap = createMeshMap<LO,GO,Node>(blockSize, pointRangeMap);
324 
325  // Use graph ctor that provides column map and upper bound on nonzeros per row.
326  // We can use static profile because the point graph should have at least as many entries per
327  // row as the mesh graph.
328  RCP<crs_graph_type> meshCrsGraph = rcp(new crs_graph_type(meshRowMap, meshColMap,
329  pointMatrix.getGlobalMaxNumRowEntries(), Tpetra::StaticProfile));
330  // Fill the graph by walking through the matrix. For each mesh row, we query the collection of point
331  // rows associated with it. The point column ids are converted to mesh column ids and put into an array.
332  // As each point row collection is finished, the mesh column ids are sorted, made unique, and inserted
333  // into the mesh graph.
334  ArrayView<const LO> pointColInds;
335  ArrayView<const Scalar> pointVals;
336  Array<GO> meshColGids;
337  meshColGids.reserve(pointMatrix.getGlobalMaxNumRowEntries());
338  //again, I assume that point GIDs associated with a mesh GID are consecutive.
339  //if they are not, this will break!!
340  for (size_t i=0; i<pointMatrix.getNodeNumRows()/blockSize; i++) {
341  for (int j=0; j<blockSize; ++j) {
342  LO rowLid = i*blockSize+j;
343  pointMatrix.getLocalRowView(rowLid,pointColInds,pointVals); //TODO optimization: Since I don't care about values,
344  //TODO I should use the graph instead.
345  for (int k=0; k<pointColInds.size(); ++k) {
346  GO meshColInd = pointColMap.getGlobalElement(pointColInds[k]) / blockSize;
347  meshColGids.push_back(meshColInd);
348  }
349  }
350  //List of mesh GIDs probably contains duplicates because we looped over all point rows in the block.
351  //Sort and make unique.
352  std::sort(meshColGids.begin(), meshColGids.end());
353  meshColGids.erase( std::unique(meshColGids.begin(), meshColGids.end()), meshColGids.end() );
354  meshCrsGraph->insertGlobalIndices(meshRowMap->getGlobalElement(i), meshColGids());
355  meshColGids.clear();
356  }
357  meshCrsGraph->fillComplete(meshDomainMap,meshRangeMap);
358 
359  //create and populate the block matrix
360  RCP<block_crs_matrix_type> blockMatrix = rcp(new block_crs_matrix_type(*meshCrsGraph, blockSize));
361 
362  //preallocate the maximum number of (dense) block entries needed by any row
363  int maxBlockEntries = blockMatrix->getNodeMaxNumRowEntries();
364  Array<Array<Scalar>> blocks(maxBlockEntries);
365  for (int i=0; i<maxBlockEntries; ++i)
366  blocks[i].reserve(blockSize*blockSize);
367  std::map<int,int> bcol2bentry; //maps block column index to dense block entries
368  std::map<int,int>::iterator iter;
369  //Fill the block matrix. We must do this in local index space.
370  //TODO: Optimization: We assume the blocks are fully populated in the point matrix. This means
371  //TODO: on the first point row in the block row, we know that we're hitting new block col indices.
372  //TODO: on other rows, we know the block col indices have all been seen before
373  //int offset;
374  //if (pointMatrix.getIndexBase()) offset = 0;
375  //else offset = 1;
376  for (size_t i=0; i<pointMatrix.getNodeNumRows()/blockSize; i++) {
377  int blkCnt=0; //how many unique block entries encountered so far in current block row
378  for (int j=0; j<blockSize; ++j) {
379  LO rowLid = i*blockSize+j;
380  pointMatrix.getLocalRowView(rowLid,pointColInds,pointVals);
381  for (int k=0; k<pointColInds.size(); ++k) {
382  //convert point column to block col
383  LO meshColInd = pointColInds[k] / blockSize;
384  iter = bcol2bentry.find(meshColInd);
385  if (iter == bcol2bentry.end()) {
386  //new block column
387  bcol2bentry[meshColInd] = blkCnt;
388  blocks[blkCnt].push_back(pointVals[k]);
389  blkCnt++;
390  } else {
391  //block column found previously
392  int littleBlock = iter->second;
393  blocks[littleBlock].push_back(pointVals[k]);
394  }
395  }
396  }
397  // TODO This inserts the blocks one block entry at a time. It is probably more efficient to
398  // TODO store all the blocks in a block row contiguously so they can be inserted with a single call.
399  for (iter=bcol2bentry.begin(); iter != bcol2bentry.end(); ++iter) {
400  LO localBlockCol = iter->first;
401  Scalar *vals = (blocks[iter->second]).getRawPtr();
402  blockMatrix->replaceLocalValues(i, &localBlockCol, vals, 1);
403  }
404 
405  //Done with block row. Zero everything out.
406  for (int j=0; j<maxBlockEntries; ++j)
407  blocks[j].clear();
408  blkCnt = 0;
409  bcol2bentry.clear();
410  }
411 
412  return blockMatrix;
413 
414  }
415 
416  template<class LO, class GO, class Node>
417  Teuchos::RCP<const Tpetra::Map<LO,GO,Node> >
418  createMeshMap (const LO& blockSize, const Tpetra::Map<LO,GO,Node>& pointMap)
419  {
420  typedef Teuchos::OrdinalTraits<Tpetra::global_size_t> TOT;
421  typedef Tpetra::Map<LO,GO,Node> map_type;
422 
423  //calculate mesh GIDs
424  Teuchos::ArrayView<const GO> pointGids = pointMap.getNodeElementList();
425  Teuchos::Array<GO> meshGids;
426  GO indexBase = pointMap.getIndexBase();
427 
428  // Use hash table to track whether we've encountered this GID previously. This will happen
429  // when striding through the point DOFs in a block. It should not happen otherwise.
430  // I don't use sort/make unique because I don't want to change the ordering.
431  meshGids.reserve(pointGids.size());
432  Tpetra::Details::HashTable<GO,int> hashTable(pointGids.size());
433  for (int i=0; i<pointGids.size(); ++i) {
434  GO meshGid = (pointGids[i]-indexBase) / blockSize + indexBase;
435  if (hashTable.get(meshGid) == -1) {
436  hashTable.add(meshGid,1); //(key,value)
437  meshGids.push_back(meshGid);
438  }
439  }
440 
441  Teuchos::RCP<const map_type> meshMap = Teuchos::rcp( new map_type(TOT::invalid(), meshGids(), 0, pointMap.getComm()) );
442  return meshMap;
443 
444  }
445 
446 } // namespace Tpetra
447 
448 //
449 // Explicit instantiation macro for blockCrsMatrixWriter (various
450 // overloads), writeMatrixStrip, and convertToBlockCrsMatrix.
451 //
452 // Must be expanded from within the Tpetra namespace!
453 //
454 #define TPETRA_BLOCKCRSMATRIX_HELPERS_INSTANT(S,LO,GO,NODE) \
455  template void blockCrsMatrixWriter(BlockCrsMatrix<S,LO,GO,NODE> const &A, std::string const &fileName); \
456  template void blockCrsMatrixWriter(BlockCrsMatrix<S,LO,GO,NODE> const &A, std::string const &fileName, Teuchos::ParameterList const &params); \
457  template void blockCrsMatrixWriter(BlockCrsMatrix<S,LO,GO,NODE> const &A, std::ostream &os); \
458  template void blockCrsMatrixWriter(BlockCrsMatrix<S,LO,GO,NODE> const &A, std::ostream &os, Teuchos::ParameterList const &params); \
459  template void writeMatrixStrip(BlockCrsMatrix<S,LO,GO,NODE> const &A, std::ostream &os, Teuchos::ParameterList const &params); \
460  template Teuchos::RCP<BlockCrsMatrix<S, LO, GO, NODE> > convertToBlockCrsMatrix(const CrsMatrix<S, LO, GO, NODE>& pointMatrix, const LO &blockSize);
461 
462 //
463 // Explicit instantiation macro for createMeshMap.
464 //
465 #define TPETRA_CREATEMESHMAP_INSTANT(LO,GO,NODE) \
466  template Teuchos::RCP<const Map<LO,GO,NODE> > createMeshMap (const LO& blockSize, const Map<LO,GO,NODE>& pointMap);
467 
468 #endif // TPETRA_BLOCKCRSMATRIX_HELPERS_DEF_HPP
Communication plan for data redistribution from a uniquely-owned to a (possibly) multiply-owned distr...
void blockCrsMatrixWriter(BlockCrsMatrix< Scalar, LO, GO, Node > const &A, std::string const &fileName)
Helper function to write a BlockCrsMatrix. Calls the 3-argument version.
Sparse matrix that presents a row-oriented interface that lets users read or modify entries...
Sparse matrix whose entries are small dense square blocks, all of the same dimensions.
size_t getNodeNumRows() const
get the local number of block rows
Teuchos::RCP< const map_type > getRangeMap() const override
The range Map of this matrix.
virtual global_size_t getGlobalNumCols() const
The global number of columns of this matrix.
One or more distributed dense vectors.
Teuchos::RCP< const map_type > getDomainMap() const override
Returns the Map associated with the domain of this graph.
size_t getGlobalMaxNumRowEntries() const override
Maximum number of entries in any row of the graph, over all processes in the graph&#39;s communicator...
size_t getNodeNumRows() const override
The number of matrix rows owned by the calling process.
Teuchos::RCP< const Teuchos::Comm< int > > getComm() const
Accessors for the Teuchos::Comm and Kokkos Node objects.
size_t getGlobalMaxNumRowEntries() const override
Maximum number of entries in any row of the matrix, over all processes in the matrix&#39;s communicator...
void writeMatrixStrip(BlockCrsMatrix< Scalar, LO, GO, Node > const &A, std::ostream &os, Teuchos::ParameterList const &params)
Helper function called by blockCrsMatrixWriter.
Teuchos::RCP< const Tpetra::Map< LO, GO, Node > > createMeshMap(LO const &blockSize, const Tpetra::Map< LO, GO, Node > &pointMap)
Helper function to generate a mesh map from a point map. Important! It&#39;s assumed that point GIDs asso...
virtual size_t getNodeNumCols() const
The number of columns needed to apply the forward operator on this node.
Insert new values that don&#39;t currently exist.
global_ordinal_type getIndexBase() const
The index base for this Map.
Teuchos::RCP< const map_type > getRowMap() const
get the (mesh) map for the rows of this block matrix.
virtual global_size_t getGlobalNumEntries() const
The global number of stored (structurally nonzero) entries.
global_size_t getGlobalNumRows() const
get the global number of block rows
Teuchos::ArrayView< const global_ordinal_type > getNodeElementList() const
Return a NONOWNING view of the global indices owned by this process.
Teuchos::RCP< CrsGraph< LocalOrdinal, GlobalOrdinal, Node > > createCrsGraph(const Teuchos::RCP< const Map< LocalOrdinal, GlobalOrdinal, Node >> &map, size_t maxNumEntriesPerRow=0, const Teuchos::RCP< Teuchos::ParameterList > &params=Teuchos::null)
Nonmember function to create an empty CrsGraph given a row Map and the max number of entries allowed ...
A distributed graph accessed by rows (adjacency lists) and stored sparsely.
LO getLocalRowView(const LO localRowInd, const LO *&colInds, Scalar *&vals, LO &numInds) const
Get a view of the (mesh, i.e., block) row, using local (mesh, i.e., block) indices.
Teuchos::RCP< const map_type > getDomainMap() const override
The domain Map of this matrix.
void add(const KeyType key, const ValueType value)
Add a key and its value to the hash table.
void getLocalRowView(LocalOrdinal LocalRow, Teuchos::ArrayView< const LocalOrdinal > &indices, Teuchos::ArrayView< const Scalar > &values) const override
Get a constant, nonpersisting view of a row of this matrix, using local row and column indices...
LO getBlockSize() const
The number of degrees of freedom per mesh point.
Teuchos::RCP< BlockCrsMatrix< Scalar, LO, GO, Node > > convertToBlockCrsMatrix(const Tpetra::CrsMatrix< Scalar, LO, GO, Node > &pointMatrix, const LO &blockSize)
Non-member constructor that creates a BlockCrsMatrix from an existing point CrsMatrix.
Teuchos::RCP< const map_type > getColMap() const override
The Map that describes the column distribution in this matrix.
Teuchos::RCP< const map_type > getColMap() const
get the (mesh) map for the columns of this block matrix.
Teuchos::RCP< const map_type > getRowMap() const override
The Map that describes the row distribution in this matrix.
virtual GO getIndexBase() const
The index base for global indices in this matrix.