46 #ifndef MUELU_REPARTITIONFACTORY_DEF_HPP
47 #define MUELU_REPARTITIONFACTORY_DEF_HPP
57 #include <Teuchos_CommHelpers.hpp>
60 #include <Xpetra_Map.hpp>
61 #include <Xpetra_MapFactory.hpp>
62 #include <Xpetra_MultiVectorFactory.hpp>
71 #include "MueLu_Utilities.hpp"
73 #include "MueLu_CloneRepartitionInterface.hpp"
78 #include "MueLu_PerfUtils.hpp"
82 template <
class Scalar,
class LocalOrdinal,
class GlobalOrdinal,
class Node>
86 #define SET_VALID_ENTRY(name) validParamList->setEntry(name, MasterList::getEntry(name))
92 #undef SET_VALID_ENTRY
95 validParamList->
set<
RCP<const FactoryBase> >(
"number of partitions", Teuchos::null,
"Instance of RepartitionHeuristicFactory.");
98 return validParamList;
101 template <
class Scalar,
class LocalOrdinal,
class GlobalOrdinal,
class Node>
103 Input(currentLevel,
"A");
104 Input(currentLevel,
"number of partitions");
105 Input(currentLevel,
"Partition");
108 template <
class Scalar,
class LocalOrdinal,
class GlobalOrdinal,
class Node>
115 bool remapPartitions = pL.
get<
bool>(
"repartition: remap parts");
118 RCP<Matrix> A = Get<RCP<Matrix> >(currentLevel,
"A");
119 if (A == Teuchos::null) {
120 Set<RCP<const Import> >(currentLevel,
"Importer", Teuchos::null);
124 GO indexBase = rowMap->getIndexBase();
130 int myRank = comm->getRank();
131 int numProcs = comm->getSize();
138 int numPartitions = Get<int>(currentLevel,
"number of partitions");
143 RCP<GOVector> decomposition = Get<RCP<GOVector> >(currentLevel,
"Partition");
146 if (remapPartitions ==
true && Teuchos::rcp_dynamic_cast<const CloneRepartitionInterface>(GetFactory(
"Partition")) != Teuchos::null) {
150 remapPartitions =
false;
154 if (numPartitions == 1) {
159 GetOStream(
Runtime0) <<
"Only one partition: Skip call to the repartitioner." << std::endl;
160 }
else if (numPartitions == -1) {
162 GetOStream(
Runtime0) <<
"No repartitioning necessary: partitions were left unchanged by the repartitioner" << std::endl;
163 Set<RCP<const Import> >(currentLevel,
"Importer", Teuchos::null);
168 const int nodeRepartLevel = pL.
get<
int>(
"repartition: node repartition level");
169 if (currentLevel.GetLevelID() == nodeRepartLevel) {
174 remapPartitions =
false;
214 if (remapPartitions) {
217 bool acceptPartition = pL.
get<
bool>(
"repartition: remap accept partition");
218 bool allSubdomainsAcceptPartitions;
219 int localNumAcceptPartition = acceptPartition;
220 int globalNumAcceptPartition;
221 MueLu_sumAll(comm, localNumAcceptPartition, globalNumAcceptPartition);
222 GetOStream(
Statistics2) <<
"Number of ranks that accept partitions: " << globalNumAcceptPartition << std::endl;
223 if (globalNumAcceptPartition < numPartitions) {
224 GetOStream(
Warnings0) <<
"Not enough ranks are willing to accept a partition, allowing partitions on all ranks." << std::endl;
225 acceptPartition =
true;
226 allSubdomainsAcceptPartitions =
true;
227 }
else if (numPartitions > numProcs) {
229 allSubdomainsAcceptPartitions =
true;
231 allSubdomainsAcceptPartitions =
false;
234 DeterminePartitionPlacement(*A, *decomposition, numPartitions, acceptPartition, allSubdomainsAcceptPartitions);
246 if (decomposition->getLocalLength() > 0)
247 decompEntries = decomposition->getData(0);
249 #ifdef HAVE_MUELU_DEBUG
251 int incorrectRank = -1;
252 for (
int i = 0; i < decompEntries.
size(); i++)
253 if (decompEntries[i] >= numProcs || decompEntries[i] < 0) {
254 incorrectRank = myRank;
258 int incorrectGlobalRank = -1;
264 myGIDs.
reserve(decomposition->getLocalLength());
269 typedef std::map<GO, Array<GO> > map_type;
271 for (
LO i = 0; i < decompEntries.
size(); i++) {
272 GO id = decompEntries[i];
273 GO GID = rowMap->getGlobalElement(i);
278 sendMap[id].push_back(GID);
280 decompEntries = Teuchos::null;
283 GO numLocalKept = myGIDs.
size(), numGlobalKept, numGlobalRows = A->getGlobalNumRows();
285 GetOStream(
Statistics2) <<
"Unmoved rows: " << numGlobalKept <<
" / " << numGlobalRows <<
" (" << 100 * Teuchos::as<double>(numGlobalKept) / numGlobalRows <<
"%)" << std::endl;
288 int numSend = sendMap.size(), numRecv;
294 for (
typename map_type::const_iterator it = sendMap.begin(); it != sendMap.end(); it++)
295 myParts[cnt++] = it->first;
301 GO partsIndexBase = 0;
303 RCP<Map> partsIOwn = MapFactory ::Build(lib, numProcs, myPart(), partsIndexBase, comm);
304 RCP<Export> partsExport = ExportFactory::Build(partsIHave, partsIOwn);
309 ArrayRCP<GO> partsISendData = partsISend->getDataNonConst(0);
310 for (
int i = 0; i < numSend; i++)
311 partsISendData[i] = 1;
313 (numPartsIRecv->getDataNonConst(0))[0] = 0;
315 numPartsIRecv->doExport(*partsISend, *partsExport,
Xpetra::ADD);
316 numRecv = (numPartsIRecv->getData(0))[0];
320 MPI_Datatype MpiType = Teuchos::Details::MpiTypeTraits<GO>::getType();
326 for (
typename map_type::iterator it = sendMap.begin(); it != sendMap.end(); it++)
327 MPI_Isend(static_cast<void*>(it->second.getRawPtr()), it->second.size(), MpiType, Teuchos::as<GO>(it->first), msgTag, *rawMpiComm, &sendReqs[cnt++]);
330 size_t totalGIDs = myGIDs.size();
331 for (
int i = 0; i < numRecv; i++) {
333 MPI_Probe(MPI_ANY_SOURCE, msgTag, *rawMpiComm, &status);
336 int fromRank = status.MPI_SOURCE, count;
337 MPI_Get_count(&status, MpiType, &count);
339 recvMap[fromRank].resize(count);
340 MPI_Recv(static_cast<void*>(recvMap[fromRank].getRawPtr()), count, MpiType, fromRank, msgTag, *rawMpiComm, &status);
347 Array<MPI_Status> sendStatuses(numSend);
348 MPI_Waitall(numSend, sendReqs.getRawPtr(), sendStatuses.getRawPtr());
352 myGIDs.reserve(totalGIDs);
353 for (
typename map_type::const_iterator it = recvMap.begin(); it != recvMap.end(); it++) {
354 int offset = myGIDs.size(), len = it->second.size();
356 myGIDs.resize(offset + len);
357 memcpy(myGIDs.getRawPtr() + offset, it->second.getRawPtr(), len *
sizeof(
GO));
362 std::sort(myGIDs.begin(), myGIDs.end());
367 SubFactoryMonitor m1(*
this,
"Map construction", currentLevel);
368 newRowMap = MapFactory ::Build(lib, rowMap->getGlobalNumElements(), myGIDs(), indexBase, origComm);
371 RCP<const Import> rowMapImporter;
373 RCP<const BlockedMap> blockedRowMap = Teuchos::rcp_dynamic_cast<
const BlockedMap>(rowMap);
376 SubFactoryMonitor m1(*
this,
"Import construction", currentLevel);
378 if (blockedRowMap.is_null())
379 rowMapImporter = ImportFactory::Build(rowMap, newRowMap);
381 rowMapImporter = ImportFactory::Build(blockedRowMap->getMap(), newRowMap);
386 if (!blockedRowMap.is_null()) {
387 SubFactoryMonitor m1(*
this,
"Blocking newRowMap and Importer", currentLevel);
392 size_t numBlocks = blockedRowMap->getNumMaps();
393 std::vector<RCP<const Import> > subImports(numBlocks);
395 for (
size_t i = 0; i < numBlocks; i++) {
396 RCP<const Map> source = blockedRowMap->getMap(i);
397 RCP<const Map> target = blockedTargetMap->getMap(i);
398 subImports[i] = ImportFactory::Build(source, target);
400 Set(currentLevel,
"SubImporters", subImports);
403 Set(currentLevel,
"Importer", rowMapImporter);
408 if (!rowMapImporter.is_null() && IsPrint(
Statistics2)) {
413 if (pL.get<
bool>(
"repartition: print partition distribution") && IsPrint(
Statistics2)) {
415 GetOStream(
Statistics2) <<
"Partition distribution over cores (ownership is indicated by '+')" << std::endl;
417 char amActive = (myGIDs.size() ? 1 : 0);
418 std::vector<char> areActive(numProcs, 0);
419 MPI_Gather(&amActive, 1, MPI_CHAR, &areActive[0], 1, MPI_CHAR, 0, *rawMpiComm);
421 int rowWidth = std::min(Teuchos::as<int>(ceil(sqrt(numProcs))), 100);
422 for (
int proc = 0; proc < numProcs; proc += rowWidth) {
423 for (
int j = 0; j < rowWidth; j++)
424 if (proc + j < numProcs)
425 GetOStream(
Statistics2) << (areActive[proc + j] ?
"+" :
".");
429 GetOStream(
Statistics2) <<
" " << proc <<
":" << std::min(proc + rowWidth, numProcs) - 1 << std::endl;
436 template <
typename T,
typename W>
441 template <
typename T,
typename W>
446 template <
class Scalar,
class LocalOrdinal,
class GlobalOrdinal,
class Node>
447 void RepartitionFactory<Scalar, LocalOrdinal, GlobalOrdinal, Node>::
448 DeterminePartitionPlacement(
const Matrix& A, GOVector& decomposition,
GO numPartitions,
bool willAcceptPartition,
bool allSubdomainsAcceptPartitions)
const {
452 int numProcs = comm->getSize();
464 const int maxLocal = pL.
get<
int>(
"repartition: remap num values");
465 const int dataSize = 2 * maxLocal;
468 if (decomposition.getLocalLength() > 0)
469 decompEntries = decomposition.getDataNonConst(0);
481 std::map<GO, GO> lEdges;
482 if (willAcceptPartition)
483 for (
LO i = 0; i < decompEntries.
size(); i++)
484 lEdges[decompEntries[i]] += A.getNumEntriesInLocalRow(i);
488 std::multimap<GO, GO> revlEdges;
489 for (
typename std::map<GO, GO>::const_iterator it = lEdges.begin(); it != lEdges.end(); it++)
490 revlEdges.insert(std::make_pair(it->second, it->first));
495 Array<GO> lData(dataSize, -1), gData(numProcs * dataSize);
497 for (
typename std::multimap<GO, GO>::reverse_iterator rit = revlEdges.rbegin(); rit != revlEdges.rend() && numEdges < maxLocal; rit++) {
498 lData[2 * numEdges + 0] = rit->second;
499 lData[2 * numEdges + 1] = rit->first;
505 MPI_Datatype MpiType = Teuchos::Details::MpiTypeTraits<GO>::getType();
506 MPI_Allgather(static_cast<void*>(lData.getRawPtr()), dataSize, MpiType, static_cast<void*>(gData.
getRawPtr()), dataSize, MpiType, *rawMpiComm);
514 for (
LO i = 0; i < gData.
size(); i += 2) {
515 int procNo = i / dataSize;
516 GO part = gData[i + 0];
517 GO weight = gData[i + 1];
519 gEdges[k].i = procNo;
521 gEdges[k].v = weight;
522 procWillAcceptPartition[procNo] =
true;
530 std::sort(gEdges.
begin(), gEdges.
end(), compareTriplets<int, int>);
533 std::map<int, int> match;
539 if (matchedRanks[rank] == 0 && matchedParts[part] == 0) {
540 matchedRanks[rank] = 1;
541 matchedParts[part] = 1;
546 GetOStream(
Statistics1) <<
"Number of unassigned partitions before cleanup stage: " << (numPartitions - numMatched) <<
" / " << numPartitions << std::endl;
553 if (numPartitions - numMatched > 0) {
555 for (
typename std::map<int, int>::const_iterator it = match.begin(); it != match.end(); it++)
556 partitionCounts[it->first] += 1;
557 for (
int part = 0, matcher = 0; part < numPartitions; part++) {
558 if (partitionCounts[part] == 0) {
560 while (matchedRanks[matcher] || !procWillAcceptPartition[matcher])
563 match[part] = matcher++;
569 TEUCHOS_TEST_FOR_EXCEPTION(numMatched != numPartitions,
Exceptions::RuntimeError,
"MueLu::RepartitionFactory::DeterminePartitionPlacement: Only " << numMatched <<
" partitions out of " << numPartitions <<
" got assigned to ranks.");
572 for (
LO i = 0; i < decompEntries.
size(); i++)
573 decompEntries[i] = match[decompEntries[i]];
578 #endif // ifdef HAVE_MPI
580 #endif // MUELU_REPARTITIONFACTORY_DEF_HPP
Important warning messages (one line)
void Build(Level ¤tLevel) const
Build an object with this factory.
void reserve(size_type n)
#define MueLu_sumAll(rcpComm, in, out)
#define MueLu_maxAll(rcpComm, in, out)
T & get(const std::string &name, T def_value)
ParameterList & set(std::string const &name, T const &value, std::string const &docString="", RCP< const ParameterEntryValidator > const &validator=null)
Timer to be used in factories. Similar to Monitor but with additional timers.
#define TEUCHOS_TEST_FOR_EXCEPTION(throw_exception_test, Exception, msg)
One-liner description of what is happening.
#define SET_VALID_ENTRY(name)
static std::string PrintImporterInfo(RCP< const Import > importer, const std::string &msgTag)
Print even more statistics.
static bool compareTriplets(const Triplet< T, W > &a, const Triplet< T, W > &b)
TEUCHOS_DEPRECATED RCP< T > rcp(T *p, Dealloc_T dealloc, bool owns_mem)
Class that holds all level-specific information.
Timer to be used in factories. Similar to SubMonitor but adds a timer level by level.
static RCP< const Xpetra::BlockedMap< LocalOrdinal, GlobalOrdinal, Node > > GeneratedBlockedTargetMap(const Xpetra::BlockedMap< LocalOrdinal, GlobalOrdinal, Node > &sourceBlockedMap, const Xpetra::Import< LocalOrdinal, GlobalOrdinal, Node > &Importer)
void resize(size_type new_size, const value_type &x=value_type())
static RCP< Vector > Build(const Teuchos::RCP< const Map > &map, bool zeroOut=true)
void DeclareInput(Level ¤tLevel) const
Determines the data that RepartitionFactory needs, and the factories that generate that data...
void push_back(const value_type &x)
Exception throws to report errors in the internal logical of the program.
RCP< const ParameterList > GetValidParameterList() const
Return a const parameter list of valid parameters that setParameterList() will accept.
std::string toString(const T &t)