MueLu  Version of the Day
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
MueLu_BrickAggregationFactory_def.hpp
Go to the documentation of this file.
1 // @HEADER
2 //
3 // ***********************************************************************
4 //
5 // MueLu: A package for multigrid based preconditioning
6 // Copyright 2012 Sandia Corporation
7 //
8 // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
9 // the U.S. Government retains certain rights in this software.
10 //
11 // Redistribution and use in source and binary forms, with or without
12 // modification, are permitted provided that the following conditions are
13 // met:
14 //
15 // 1. Redistributions of source code must retain the above copyright
16 // notice, this list of conditions and the following disclaimer.
17 //
18 // 2. Redistributions in binary form must reproduce the above copyright
19 // notice, this list of conditions and the following disclaimer in the
20 // documentation and/or other materials provided with the distribution.
21 //
22 // 3. Neither the name of the Corporation nor the names of the
23 // contributors may be used to endorse or promote products derived from
24 // this software without specific prior written permission.
25 //
26 // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
27 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
30 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 //
38 // Questions? Contact
39 // Jonathan Hu (jhu@sandia.gov)
40 // Andrey Prokopenko (aprokop@sandia.gov)
41 // Ray Tuminaro (rstumin@sandia.gov)
42 //
43 // ***********************************************************************
44 //
45 // @HEADER
46 #ifndef MUELU_BRICKAGGREGATIONFACTORY_DEF_HPP_
47 #define MUELU_BRICKAGGREGATIONFACTORY_DEF_HPP_
48 
50 #ifdef HAVE_MPI
52 #include <Teuchos_CommHelpers.hpp>
53 #endif
55 
56 #include <Xpetra_Import.hpp>
57 #include <Xpetra_ImportFactory.hpp>
58 #include <Xpetra_Map.hpp>
59 #include <Xpetra_MapFactory.hpp>
60 #include <Xpetra_Matrix.hpp>
61 #include <Xpetra_MultiVector.hpp>
62 #include <Xpetra_MultiVectorFactory.hpp>
63 
64 
65 #include "MueLu_Aggregates.hpp"
66 #include "MueLu_Level.hpp"
67 #include "MueLu_MasterList.hpp"
68 #include "MueLu_Monitor.hpp"
69 #include "MueLu_Utilities.hpp"
70 #include "MueLu_GraphBase.hpp"
71 
72 namespace MueLu {
73 
74  template <class Scalar, class LocalOrdinal, class GlobalOrdinal, class Node>
76  RCP<ParameterList> validParamList = rcp(new ParameterList());
77 
78 #define SET_VALID_ENTRY(name) validParamList->setEntry(name, MasterList::getEntry(name))
79  SET_VALID_ENTRY("aggregation: brick x size");
80  SET_VALID_ENTRY("aggregation: brick y size");
81  SET_VALID_ENTRY("aggregation: brick z size");
82  SET_VALID_ENTRY("aggregation: brick x Dirichlet");
83  SET_VALID_ENTRY("aggregation: brick y Dirichlet");
84  SET_VALID_ENTRY("aggregation: brick z Dirichlet");
85 #undef SET_VALID_ENTRY
86 
87  validParamList->set< RCP<const FactoryBase> >("A", Teuchos::null, "Generating factory for matrix");
88  validParamList->set< RCP<const FactoryBase> >("Coordinates", Teuchos::null, "Generating factory for coordinates");
89  validParamList->set< RCP<const FactoryBase> >("Graph" , Teuchos::null, "Generating factory of the graph");
90  validParamList->set< RCP<const FactoryBase> >("DofsPerNode", null, "Generating factory for variable \'DofsPerNode\', usually the same as for \'Graph\'");
91 
92  return validParamList;
93  }
94 
95  template <class Scalar, class LocalOrdinal, class GlobalOrdinal, class Node>
97  Input(currentLevel, "A");
98  Input(currentLevel, "Coordinates");
99  }
100 
101  // The current implementation cannot deal with bricks larger than 3x3(x3) in
102  // parallel. The reason is that aggregation infrastructure in place has
103  // major drawbacks.
104  //
105  // Aggregates class is constructed with a help of a provided map, either
106  // taken from a graph, or provided directly. This map is usually taken to be
107  // a column map of a matrix. The reason for that is that if we have an
108  // overlapped aggregation, we want the processor owning aggregates to store
109  // agg id for all nodes in this aggregate. If we used row map, there would
110  // be no way for the processor to know whether there are some other nodes on
111  // a different processor which belong to its aggregate. On the other hand,
112  // using column map allows both vertex2AggId and procWinner arrays in
113  // Aggregates class to store some extra data, such as whether nodes belonging
114  // to a different processor belong to this processor aggregate.
115  //
116  // The drawback of this is that it stores only overlap=1 data. For aggressive
117  // coarsening, such a brick aggregation with a large single dimension of
118  // brick, it could happen that we need to know depth two or more extra nodes
119  // in the other processor subdomain.
120  //
121  // Another issue is that we may have some implicit connection between
122  // aggregate map and maps of A used in the construction of a tentative
123  // prolongator.
124  //
125  // Another issue is that it seems that some info is unused or not required.
126  // Specifically, it seems that if a node belongs to an aggregate on a
127  // different processor, we don't actually need to set vertex2AggId and
128  // procWinner, despite the following comment in
129  // Aggregates decl:
130  // vertex2AggId[k] gives a local id
131  // corresponding to the aggregate to which
132  // local id k has been assigned. While k
133  // is the local id on my processor (MyPID)
134  // vertex2AggId[k] is the local id on the
135  // processor which actually owns the
136  // aggregate. This owning processor has id
137  // given by procWinner[k].
138  // It is possible that that info is only used during arbitration in
139  // CoupledAggregationFactory.
140  //
141  // The steps that we need to do to resolve this issue:
142  // - Break the link between maps in TentativePFactory, allowing any maps in Aggregates
143  // - Allow Aggregates to construct their own maps, if necessary, OR
144  // - construct aggregates based on row map
145  template <class Scalar, class LocalOrdinal, class GlobalOrdinal, class Node>
147  FactoryMonitor m(*this, "Build", currentLevel);
148 
149  typedef Xpetra::MultiVector<typename Teuchos::ScalarTraits<Scalar>::magnitudeType,LO,GO,NO> MultiVector_d;
150 
151  const ParameterList& pL = GetParameterList();
152  RCP<MultiVector_d> coords = Get<RCP<MultiVector_d> >(currentLevel, "Coordinates");
153  RCP<Matrix> A = Get< RCP<Matrix> > (currentLevel, "A");
154  RCP<const Map> rowMap = A->getRowMap();
155  RCP<const Map> colMap = A->getColMap();
156  GO GO_INVALID = Teuchos::OrdinalTraits<GO>::invalid();
157 
158  RCP<const Teuchos::Comm<int> > comm = rowMap->getComm();
159  int numProcs = comm->getSize();
160  int myRank = comm->getRank();
161 
162  int numPoints = colMap->getNodeNumElements();
163 
164  bx_ = pL.get<int>("aggregation: brick x size");
165  by_ = pL.get<int>("aggregation: brick y size");
166  bz_ = pL.get<int>("aggregation: brick z size");
167 
168  dirichletX_ = pL.get<bool>("aggregation: brick x Dirichlet");
169  dirichletY_ = pL.get<bool>("aggregation: brick y Dirichlet");
170  dirichletZ_ = pL.get<bool>("aggregation: brick z Dirichlet");
171  if(dirichletX_) GetOStream(Runtime0) << "Dirichlet boundaries in the x direction"<<std::endl;
172  if(dirichletY_) GetOStream(Runtime0) << "Dirichlet boundaries in the y direction"<<std::endl;
173  if(dirichletZ_) GetOStream(Runtime0) << "Dirichlet boundaries in the z direction"<<std::endl;
174 
175  if (numProcs > 1) {
176  // TODO: deal with block size > 1 (see comments above)
177  //TEUCHOS_TEST_FOR_EXCEPTION(bx_ > 3 || by_ > 3 || bz_ > 3, Exceptions::RuntimeError, "Currently cannot deal with brick size > 3");
178  }
179 
180  RCP<MultiVector_d> overlappedCoords = coords;
181  RCP<const Import> importer = ImportFactory::Build(coords->getMap(), colMap);
182  if (!importer.is_null()) {
183  overlappedCoords = Xpetra::MultiVectorFactory<typename Teuchos::ScalarTraits<Scalar>::magnitudeType,LO,GO,NO>::Build(colMap, coords->getNumVectors());
184  overlappedCoords->doImport(*coords, *importer, Xpetra::INSERT);
185  }
186 
187  // Setup misc structures
188  // Logically, we construct enough data to query topological information of a rectangular grid
189  Setup(comm, overlappedCoords, colMap);
190 
191  GetOStream(Runtime0) << "Using brick size: " << bx_
192  << (nDim_ > 1 ? "x " + toString(by_) : "")
193  << (nDim_ > 2 ? "x " + toString(bz_) : "") << std::endl;
194 
195  // Construct aggregates
196  RCP<Aggregates> aggregates = rcp(new Aggregates(colMap));
197  aggregates->setObjectLabel("Brick");
198 
199  ArrayRCP<LO> vertex2AggId = aggregates->GetVertex2AggId()->getDataNonConst(0);
200  ArrayRCP<LO> procWinner = aggregates->GetProcWinner() ->getDataNonConst(0);
201 
202  // In the first pass, we set a mapping from a vertex to aggregate global id. We deal with a structured
203  // rectangular mesh, therefore we know the structure of aggregates. For each vertex we can tell exactly
204  // which aggregate it belongs to.
205  // If we determine that the aggregate does not belong to us (i.e. the root vertex does not belong to this
206  // processor, or is outside and we lost "" arbitration), we record the global aggregate id in order to
207  // fetch the local info from the processor owning the aggregate. This is required for aggregates, as it
208  // uses the local aggregate ids of the owning processor.
209  std::set<GO> myAggGIDs, remoteAggGIDs;
210  for (LO LID = 0; LID < numPoints; LID++) {
211  GO aggGID = getAggGID(LID);
212  // printf("[%d] (%d,%d,%d) => agg %d\n",LID,(int)(*xMap_)[x_[LID]],nDim_ > 1 ? (int)(*yMap_)[y_[LID]] : -1,nDim_ > 2 ? (int)(*zMap_)[z_[LID]] : -1,(int)aggGID);
213  if(aggGID == GO_INVALID) continue;
214  // printf("[%d] getRoot = %d\n",(int)LID,(int)getRoot(LID));
215 
216  if ((revMap_.find(getRoot(LID)) != revMap_.end()) && rowMap->isNodeGlobalElement(colMap->getGlobalElement(revMap_[getRoot(LID)]))) {
217  // Root of the brick aggregate containing GID (<- LID) belongs to us
218  vertex2AggId[LID] = aggGID;
219  myAggGIDs.insert(aggGID);
220 
221  if (isRoot(LID))
222  aggregates->SetIsRoot(LID);
223  // printf("[%d] initial vertex2AggId = %d\n",(int)LID,(int)vertex2AggId[LID]);
224  } else {
225  remoteAggGIDs.insert(aggGID);
226  }
227  }
228  size_t numAggregates = myAggGIDs .size();
229  size_t numRemote = remoteAggGIDs.size();
230  aggregates->SetNumAggregates(numAggregates);
231 
232  std::map<GO,LO> AggG2L; // Map: Agg GID -> Agg LID (possibly on a different processor)
233  std::map<GO,int> AggG2R; // Map: Agg GID -> processor rank owning aggregate
234 
235  Array<GO> myAggGIDsArray(numAggregates), remoteAggGIDsArray(numRemote);
236 
237  // Fill in the maps for aggregates that we own
238  size_t ind = 0;
239  for (typename std::set<GO>::const_iterator it = myAggGIDs.begin(); it != myAggGIDs.end(); it++) {
240  AggG2L[*it] = ind;
241  AggG2R[*it] = myRank;
242 
243  myAggGIDsArray[ind++] = *it;
244  }
245 
246  // The map is a convenient way to fetch remote local indices from global indices.
247  RCP<Map> aggMap = MapFactory::Build(rowMap->lib(), Teuchos::OrdinalTraits<Xpetra::global_size_t>::invalid(),
248  myAggGIDsArray, 0, comm);
249 
250  ind = 0;
251  for (typename std::set<GO>::const_iterator it = remoteAggGIDs.begin(); it != remoteAggGIDs.end(); it++)
252  remoteAggGIDsArray[ind++] = *it;
253 
254  // Fetch the required aggregate local ids and ranks
255  Array<int> remoteProcIDs(numRemote);
256  Array<LO> remoteLIDs (numRemote);
257  aggMap->getRemoteIndexList(remoteAggGIDsArray, remoteProcIDs, remoteLIDs);
258 
259  // Fill in the maps for aggregates that we don't own but which have some of our vertices
260  for (size_t i = 0; i < numRemote; i++) {
261  AggG2L[remoteAggGIDsArray[i]] = remoteLIDs [i];
262  AggG2R[remoteAggGIDsArray[i]] = remoteProcIDs[i];
263  }
264 
265  // Remap aggregate GIDs to LIDs and set up owning processors
266  for (LO LID = 0; LID < numPoints; LID++) {
267  if (revMap_.find(getRoot(LID)) != revMap_.end() && rowMap->isNodeGlobalElement(colMap->getGlobalElement(revMap_[getRoot(LID)]))) {
268  GO aggGID = vertex2AggId[LID];
269  if(aggGID != MUELU_UNAGGREGATED) {
270  vertex2AggId[LID] = AggG2L[aggGID];
271  procWinner [LID] = AggG2R[aggGID];
272  }
273  }
274  }
275 
276 
277  GO numGlobalRemote;
278  MueLu_sumAll(comm, as<GO>(numRemote), numGlobalRemote);
279  aggregates->AggregatesCrossProcessors(numGlobalRemote);
280 
281  Set(currentLevel, "Aggregates", aggregates);
282 
283  GetOStream(Statistics1) << aggregates->description() << std::endl;
284  }
285 
286  template <class Scalar, class LocalOrdinal, class GlobalOrdinal, class Node>
288  Setup(const RCP<const Teuchos::Comm<int> >& comm, const RCP<Xpetra::MultiVector<typename Teuchos::ScalarTraits<Scalar>::magnitudeType,LO,GO,NO> >& coords, const RCP<const Map>& /* map */) const {
289  nDim_ = coords->getNumVectors();
290 
291  x_ = coords->getData(0);
292  xMap_ = Construct1DMap(comm, x_);
293  nx_ = xMap_->size();
294 
295  ny_ = 1;
296  if (nDim_ > 1) {
297  y_ = coords->getData(1);
298  yMap_ = Construct1DMap(comm, y_);
299  ny_ = yMap_->size();
300  }
301 
302  nz_ = 1;
303  if (nDim_ > 2) {
304  z_ = coords->getData(2);
305  zMap_ = Construct1DMap(comm, z_);
306  nz_ = zMap_->size();
307  }
308 
309  for (size_t ind = 0; ind < coords->getLocalLength(); ind++) {
310  GO i = (*xMap_)[(coords->getData(0))[ind]], j = 0, k = 0;
311  if (nDim_ > 1)
312  j = (*yMap_)[(coords->getData(1))[ind]];
313  if (nDim_ > 2)
314  k = (*zMap_)[(coords->getData(2))[ind]];
315 
316  revMap_[k*ny_*nx_ + j*nx_ + i] = ind;
317  }
318 
319 
320  // Get the number of aggregates in each direction, correcting for Dirichlet
321  int xboost = dirichletX_ ? 1 : 0;
322  int yboost = dirichletY_ ? 1 : 0;
323  int zboost = dirichletZ_ ? 1 : 0;
324  naggx_ = (nx_-2*xboost)/bx_ + ((nx_-2*xboost) % bx_ ? 1 : 0);
325 
326  if(nDim_ > 1)
327  naggy_ = (ny_-2*yboost)/by_ + ( (ny_-2*yboost) % by_ ? 1 : 0);
328  else
329  naggy_ = 1;
330 
331  if(nDim_ > 2)
332  naggz_ = (nz_-2*zboost)/bz_ + ( (nz_-2*zboost) % bz_ ? 1 : 0);
333  else
334  naggz_ = 1;
335 
336  }
337 
338  template <class Scalar, class LocalOrdinal, class GlobalOrdinal, class Node>
342  const ArrayRCP<const typename Teuchos::ScalarTraits<Scalar>::magnitudeType>& x) const
343  {
344  int n = x.size();
345 
346  // Step 1: Create a local vector with unique coordinate points
347  RCP<container> gMap = rcp(new container);
348  for (int i = 0; i < n; i++)
349  (*gMap)[x[i]] = 0;
350 
351 #ifdef HAVE_MPI
352  // Step 2: exchange coordinates
353  // NOTE: we assume the coordinates are double, or double compatible
354  // That means that for complex case, we assume that all imaginary parts are zeros
355  int numProcs = comm->getSize();
356  if (numProcs > 1) {
357  RCP<const Teuchos::MpiComm<int> > dupMpiComm = rcp_dynamic_cast<const Teuchos::MpiComm<int> >(comm->duplicate());
358 
359  MPI_Comm rawComm = (*dupMpiComm->getRawMpiComm())();
360 
361  int sendCnt = gMap->size(), cnt = 0, recvSize;
362  Array<int> recvCnt(numProcs), Displs(numProcs);
363  Array<double> sendBuf, recvBuf;
364 
365  sendBuf.resize(sendCnt);
366  for (typename container::const_iterator cit = gMap->begin(); cit != gMap->end(); cit++)
367  sendBuf[cnt++] = Teuchos::as<double>(STS::real(cit->first));
368 
369  MPI_Allgather(&sendCnt, 1, MPI_INT, recvCnt.getRawPtr(), 1, MPI_INT, rawComm);
370  Displs[0] = 0;
371  for (int i = 0; i < numProcs-1; i++)
372  Displs[i+1] = Displs[i] + recvCnt[i];
373  recvSize = Displs[numProcs-1] + recvCnt[numProcs-1];
374  recvBuf.resize(recvSize);
375  MPI_Allgatherv(sendBuf.getRawPtr(), sendCnt, MPI_DOUBLE, recvBuf.getRawPtr(), recvCnt.getRawPtr(), Displs.getRawPtr(), MPI_DOUBLE, rawComm);
376 
377  for (int i = 0; i < recvSize; i++)
378  (*gMap)[as<SC>(recvBuf[i])] = 0;
379  }
380 #endif
381 
382  GO cnt = 0;
383  for (typename container::iterator it = gMap->begin(); it != gMap->end(); it++)
384  it->second = cnt++;
385 
386  return gMap;
387  }
388 
389  template <class Scalar, class LocalOrdinal, class GlobalOrdinal, class Node>
391  int i,j,k;
392  getIJK(LID,i,j,k);
393 
394  return (k*ny_*nx_ + j*nx_ + i) == getRoot(LID);
395  }
396 
397  template <class Scalar, class LocalOrdinal, class GlobalOrdinal, class Node>
399  bool boundary = false;
400  int i,j,k;
401  getIJK(LID,i,j,k);
402  if( dirichletX_ && (i == 0 || i == nx_-1) )
403  boundary = true;
404  if(nDim_ > 1 && dirichletY_ && (j == 0 || j == ny_-1) )
405  boundary = true;
406  if(nDim_ > 2 && dirichletZ_ && (k == 0 || k == nz_-1) )
407  boundary = true;
408 
409  return boundary;
410  }
411 
412 
413  template <class Scalar, class LocalOrdinal, class GlobalOrdinal, class Node>
415  if(isDirichlet(LID))
417 
418  int aggI,aggJ,aggK;
419  getAggIJK(LID,aggI,aggJ,aggK);
420  int xboost = dirichletX_ ? 1 : 0;
421  int yboost = dirichletY_ ? 1 : 0;
422  int zboost = dirichletZ_ ? 1 : 0;
423 
424  int i = xboost + aggI*bx_ + (bx_-1)/2;
425  int j = (nDim_>1) ? yboost + aggJ*by_ + (by_-1)/2 : 0;
426  int k = (nDim_>2) ? zboost + aggK*bz_ + (bz_-1)/2 : 0;
427 
428  return k*ny_*nx_ + j*nx_ + i;
429  }
430 
431  template <class Scalar, class LocalOrdinal, class GlobalOrdinal, class Node>
433  i = (*xMap_)[x_[LID]];
434  j = (nDim_>1) ? (*yMap_)[y_[LID]] : 0;
435  k = (nDim_>2) ? (*zMap_)[z_[LID]] : 0;
436  }
437 
438 
439  template <class Scalar, class LocalOrdinal, class GlobalOrdinal, class Node>
441  int xboost = dirichletX_ ? 1 : 0;
442  int yboost = dirichletY_ ? 1 : 0;
443  int zboost = dirichletZ_ ? 1 : 0;
444  int pointI, pointJ, pointK;
445  getIJK(LID,pointI,pointJ,pointK);
446  i = (pointI-xboost)/bx_;
447 
448  if (nDim_ > 1) j = (pointJ-yboost)/by_;
449  else j = 0;
450 
451  if (nDim_ > 2) k = (pointK-zboost)/bz_;
452  else k = 0;
453  }
454 
455  template <class Scalar, class LocalOrdinal, class GlobalOrdinal, class Node>
457  bool boundary = false;
458 
459  int i, j, k;
460  getIJK(LID,i,j,k);
461  int ii , jj, kk;
462  getAggIJK(LID,ii,jj,kk);
463 
464  if( dirichletX_ && (i == 0 || i == nx_ - 1)) boundary = true;
465  if (nDim_ > 1 && dirichletY_ && (j == 0 || j == ny_ - 1)) boundary = true;
466  if (nDim_ > 2 && dirichletZ_ && (k == 0 || k == nz_ - 1)) boundary = true;
467 
468  /*
469  if(boundary)
470  printf("[%d] coord = (%d,%d,%d) {%d,%d,%d} agg = (%d,%d,%d) {%d,%d,%d} => agg %s\n",LID,i,j,k,nx_,ny_,nz_,ii,jj,kk,naggx_,naggy_,naggz_,"BOUNDARY");
471  else
472  printf("[%d] coord = (%d,%d,%d) {%d,%d,%d} agg = (%d,%d,%d) {%d,%d,%d} => agg %d\n",LID,i,j,k,nx_,ny_,nz_,ii,jj,kk,naggx_,naggy_,naggz_,kk*naggy_*naggx_ + jj*naggx_ + ii);
473  */
474 
475  if (boundary)
477  else
478  return Teuchos::as<GlobalOrdinal>(kk*naggy_*naggx_) + Teuchos::as<GlobalOrdinal>(jj*naggx_) + ii;
479 
480  }
481 
482 
483 } //namespace MueLu
484 
485 #endif /* MUELU_BRICKAGGREGATIONFACTORY_DEF_HPP_ */
#define MueLu_sumAll(rcpComm, in, out)
void getAggIJK(LocalOrdinal LID, int &i, int &j, int &k) const
MueLu::DefaultLocalOrdinal LocalOrdinal
std::string toString(const T &what)
Little helper function to convert non-string types to strings.
Container class for aggregation information.
T & get(const std::string &name, T def_value)
ParameterList & set(std::string const &name, T const &value, std::string const &docString="", RCP< const ParameterEntryValidator > const &validator=null)
Timer to be used in factories. Similar to Monitor but with additional timers.
Print more statistics.
One-liner description of what is happening.
void Build(Level &currentLevel) const
Build aggregates.
size_type size() const
GlobalOrdinal getAggGID(LocalOrdinal LID) const
TEUCHOS_DEPRECATED RCP< T > rcp(T *p, Dealloc_T dealloc, bool owns_mem)
MueLu::DefaultGlobalOrdinal GlobalOrdinal
Class that holds all level-specific information.
Definition: MueLu_Level.hpp:99
void Setup(const RCP< const Teuchos::Comm< int > > &comm, const RCP< Xpetra::MultiVector< typename Teuchos::ScalarTraits< Scalar >::magnitudeType, LO, GO, NO > > &coords, const RCP< const Map > &map) const
#define MUELU_UNAGGREGATED
void resize(size_type new_size, const value_type &x=value_type())
void getIJK(LocalOrdinal LID, int &i, int &j, int &k) const
RCP< container > Construct1DMap(const RCP< const Teuchos::Comm< int > > &comm, const ArrayRCP< const typename Teuchos::ScalarTraits< Scalar >::magnitudeType > &x) const
std::map< Scalar, GlobalOrdinal, compare > container
GlobalOrdinal getRoot(LocalOrdinal LID) const
void DeclareInput(Level &currentLevel) const
Input.
RCP< const ParameterList > GetValidParameterList() const
Return a const parameter list of valid parameters that setParameterList() will accept.
#define SET_VALID_ENTRY(name)
bool is_null() const