10 #ifndef MUELU_TENTATIVEPFACTORY_KOKKOS_DEF_HPP
11 #define MUELU_TENTATIVEPFACTORY_KOKKOS_DEF_HPP
13 #include "Kokkos_UnorderedMap.hpp"
18 #include "MueLu_Aggregates.hpp"
19 #include "MueLu_AmalgamationInfo.hpp"
20 #include "MueLu_AmalgamationFactory.hpp"
23 #include "MueLu_PerfUtils.hpp"
32 template <
class LocalOrdinal,
class View>
33 class ReduceMaxFunctor {
35 ReduceMaxFunctor(View view)
38 KOKKOS_INLINE_FUNCTION
44 KOKKOS_INLINE_FUNCTION
51 KOKKOS_INLINE_FUNCTION
61 template <
class LOType,
class GOType,
class SCType,
class DeviceType,
class NspType,
class aggRowsType,
class maxAggDofSizeType,
class agg2RowMapLOType,
class statusType,
class rowsType,
class rowsAuxType,
class colsAuxType,
class valsAuxType>
62 class LocalQRDecompFunctor {
68 typedef typename DeviceType::execution_space execution_space;
69 typedef typename Kokkos::ArithTraits<SC>::val_type impl_SC;
70 typedef Kokkos::ArithTraits<impl_SC> impl_ATS;
71 typedef typename impl_ATS::magnitudeType Magnitude;
73 typedef Kokkos::View<impl_SC**, typename execution_space::scratch_memory_space, Kokkos::MemoryUnmanaged> shared_matrix;
74 typedef Kokkos::View<impl_SC*, typename execution_space::scratch_memory_space, Kokkos::MemoryUnmanaged> shared_vector;
90 LocalQRDecompFunctor(NspType fineNS_, NspType coarseNS_, aggRowsType aggRows_, maxAggDofSizeType maxAggDofSize_, agg2RowMapLOType agg2RowMapLO_, statusType statusAtomic_, rowsType rows_, rowsAuxType rowsAux_, colsAuxType colsAux_, valsAuxType valsAux_,
bool doQRStep_)
103 KOKKOS_INLINE_FUNCTION
104 void operator()(
const typename Kokkos::TeamPolicy<execution_space>::member_type& thread,
size_t& nnz)
const {
105 auto agg = thread.league_rank();
110 const impl_SC one = impl_ATS::one();
111 const impl_SC two = one + one;
112 const impl_SC zero = impl_ATS::zero();
113 const auto zeroM = impl_ATS::magnitude(zero);
123 shared_matrix r(thread.team_shmem(), m, n);
124 for (
int j = 0; j < n; j++)
125 for (
int k = 0; k < m; k++)
129 for (
int i = 0; i < m; i++) {
130 for (
int j = 0; j < n; j++)
131 printf(
" %5.3lf ", r(i,j));
137 shared_matrix q(thread.team_shmem(), m, m);
139 bool isSingular =
false;
143 for (
int i = 0; i < m; i++) {
144 for (
int j = 0; j < m; j++)
149 for (
int k = 0; k < n; k++) {
151 Magnitude s = zeroM, norm, norm_x;
152 for (
int i = k + 1; i < m; i++)
153 s += pow(impl_ATS::magnitude(r(i, k)), 2);
154 norm = sqrt(pow(impl_ATS::magnitude(r(k, k)), 2) + s);
161 r(k, k) -= norm * one;
163 norm_x = sqrt(pow(impl_ATS::magnitude(r(k, k)), 2) + s);
164 if (norm_x == zeroM) {
167 r(k, k) = norm * one;
172 for (
int i = k; i < m; i++)
176 for (
int j = k + 1; j < n; j++) {
179 for (
int i = k; i < m; i++)
180 si += r(i, k) * r(i, j);
181 for (
int i = k; i < m; i++)
182 r(i, j) -= two * si * r(i, k);
186 for (
int j = k; j < m; j++) {
189 for (
int i = k; i < m; i++)
190 si += r(i, k) * qt(i, j);
191 for (
int i = k; i < m; i++)
192 qt(i, j) -= two * si * r(i, k);
196 r(k, k) = norm * one;
197 for (
int i = k + 1; i < m; i++)
203 for (
int i = 0; i < m; i++)
204 for (
int j = 0; j < i; j++) {
205 impl_SC tmp = qt(i,j);
212 for (
int j = 0; j < n; j++)
213 for (
int k = 0; k <= j; k++)
252 for (
int j = 0; j < n; j++)
253 for (
int k = 0; k < n; k++)
257 coarseNS(offset + k, j) = (k == j ? one : zero);
260 for (
int i = 0; i < m; i++)
261 for (
int j = 0; j < n; j++)
262 q(i, j) = (j == i ? one : zero);
266 for (
int j = 0; j < m; j++) {
268 size_t rowStart =
rowsAux(localRow);
270 for (
int k = 0; k < n; k++) {
272 if (q(j, k) != zero) {
273 colsAux(rowStart + lnnz) = offset + k;
274 valsAux(rowStart + lnnz) = q(j, k);
278 rows(localRow + 1) = lnnz;
284 for (
int i = 0; i < m; i++) {
285 for (
int j = 0; j < n; j++)
291 for (
int i = 0; i < aggSize; i++) {
292 for (
int j = 0; j < aggSize; j++)
293 printf(
" %5.3lf ", q(i,j));
307 for (
int j = 0; j < m; j++) {
309 size_t rowStart =
rowsAux(localRow);
311 for (
int k = 0; k < n; k++) {
312 const impl_SC qr_jk =
fineNS(localRow, k);
315 colsAux(rowStart + lnnz) = offset + k;
316 valsAux(rowStart + lnnz) = qr_jk;
320 rows(localRow + 1) = lnnz;
324 for (
int j = 0; j < n; j++)
330 size_t team_shmem_size(
int )
const {
334 return shared_matrix::shmem_size(m, n) +
335 shared_matrix::shmem_size(m, m);
343 template <
class Scalar,
class LocalOrdinal,
class GlobalOrdinal,
class Node>
347 #define SET_VALID_ENTRY(name) validParamList->setEntry(name, MasterList::getEntry(name))
350 #undef SET_VALID_ENTRY
355 validParamList->
set<
RCP<const FactoryBase>>(
"Scaled Nullspace", Teuchos::null,
"Generating factory of the scaled nullspace");
356 validParamList->
set<
RCP<const FactoryBase>>(
"UnAmalgamationInfo", Teuchos::null,
"Generating factory of UnAmalgamationInfo");
359 validParamList->
set<
RCP<const FactoryBase>>(
"Node Comm", Teuchos::null,
"Generating factory of the node level communicator");
363 norecurse.disableRecursiveValidation();
364 validParamList->
set<
ParameterList>(
"matrixmatrix: kernel params", norecurse,
"MatrixMatrix kernel parameters");
366 return validParamList;
369 template <
class Scalar,
class LocalOrdinal,
class GlobalOrdinal,
class Node>
373 std::string nspName =
"Nullspace";
374 if (pL.
isParameter(
"Nullspace name")) nspName = pL.
get<std::string>(
"Nullspace name");
376 Input(fineLevel,
"A");
377 Input(fineLevel,
"Aggregates");
378 Input(fineLevel, nspName);
379 Input(fineLevel,
"UnAmalgamationInfo");
380 Input(fineLevel,
"CoarseMap");
383 pL.
get<
bool>(
"tentative: build coarse coordinates")) {
384 bTransferCoordinates_ =
true;
385 Input(fineLevel,
"Coordinates");
386 }
else if (bTransferCoordinates_) {
387 Input(fineLevel,
"Coordinates");
391 template <
class Scalar,
class LocalOrdinal,
class GlobalOrdinal,
class Node>
393 return BuildP(fineLevel, coarseLevel);
396 template <
class Scalar,
class LocalOrdinal,
class GlobalOrdinal,
class Node>
403 std::string nspName =
"Nullspace";
404 if (pL.
isParameter(
"Nullspace name")) nspName = pL.
get<std::string>(
"Nullspace name");
406 auto A = Get<RCP<Matrix>>(fineLevel,
"A");
407 auto aggregates = Get<RCP<Aggregates>>(fineLevel,
"Aggregates");
408 auto amalgInfo = Get<RCP<AmalgamationInfo>>(fineLevel,
"UnAmalgamationInfo");
409 auto fineNullspace = Get<RCP<MultiVector>>(fineLevel, nspName);
410 auto coarseMap = Get<RCP<const Map>>(fineLevel,
"CoarseMap");
412 if (bTransferCoordinates_) {
413 fineCoords = Get<RCP<RealValuedMultiVector>>(fineLevel,
"Coordinates");
419 if (aggregates->GetNumGlobalAggregatesComputeIfNeeded() == 0) {
420 Ptentative = Teuchos::null;
421 Set(coarseLevel,
"P", Ptentative);
427 if (bTransferCoordinates_) {
431 if (rcp_dynamic_cast<const StridedMap>(coarseMap) != Teuchos::null)
432 blkSize = rcp_dynamic_cast<
const StridedMap>(coarseMap)->getFixedBlockSize();
437 coarseCoordMap = coarseMap;
443 coarseCoords = RealValuedMultiVectorFactory::Build(coarseCoordMap, fineCoords->getNumVectors());
446 auto uniqueMap = fineCoords->getMap();
448 if (aggregates->AggregatesCrossProcessors()) {
449 auto nonUniqueMap = aggregates->GetMap();
450 auto importer = ImportFactory::Build(uniqueMap, nonUniqueMap);
452 ghostedCoords = RealValuedMultiVectorFactory::Build(nonUniqueMap, fineCoords->getNumVectors());
458 auto aggGraph = aggregates->GetGraph();
459 auto numAggs = aggGraph.numRows();
461 auto fineCoordsView = fineCoords->getDeviceLocalView(Xpetra::Access::ReadOnly);
462 auto coarseCoordsView = coarseCoords->getDeviceLocalView(Xpetra::Access::OverwriteAll);
468 const auto dim = fineCoords->getNumVectors();
471 for (
size_t j = 0; j < dim; j++) {
472 Kokkos::parallel_for(
473 "MueLu::TentativeP::BuildCoords", Kokkos::RangePolicy<local_ordinal_type, execution_space>(0, numAggs),
474 KOKKOS_LAMBDA(
const LO i) {
478 auto aggregate = aggGraph.rowConst(i);
480 coordinate_type sum = 0.0;
481 for (
size_t colID = 0; colID < static_cast<size_t>(aggregate.length); colID++)
482 sum += fineCoordsRandomView(aggregate(colID), j);
484 coarseCoordsView(i, j) = sum / aggregate.length;
490 if (!aggregates->AggregatesCrossProcessors()) {
492 BuildPuncoupledBlockCrs(coarseLevel, A, aggregates, amalgInfo, fineNullspace, coarseMap, Ptentative, coarseNullspace,
495 BuildPuncoupled(coarseLevel, A, aggregates, amalgInfo, fineNullspace, coarseMap, Ptentative, coarseNullspace, coarseLevel.
GetLevelID());
498 BuildPcoupled(A, aggregates, amalgInfo, fineNullspace, coarseMap, Ptentative, coarseNullspace);
508 if (A->IsView(
"stridedMaps") ==
true)
509 Ptentative->CreateView(
"stridedMaps", A->getRowMap(
"stridedMaps"), coarseMap);
511 Ptentative->CreateView(
"stridedMaps", Ptentative->getRangeMap(), coarseMap);
513 if (bTransferCoordinates_) {
514 Set(coarseLevel,
"Coordinates", coarseCoords);
518 if (fineLevel.IsAvailable(
"Node Comm")) {
520 Set<RCP<const Teuchos::Comm<int>>>(coarseLevel,
"Node Comm", nodeComm);
523 Set(coarseLevel,
"Nullspace", coarseNullspace);
524 Set(coarseLevel,
"P", Ptentative);
528 params->
set(
"printLoadBalancingInfo",
true);
533 template <
class Scalar,
class LocalOrdinal,
class GlobalOrdinal,
class Node>
539 auto rowMap = A->getRowMap();
540 auto colMap = A->getColMap();
542 const size_t numRows = rowMap->getLocalNumElements();
543 const size_t NSDim = fineNullspace->getNumVectors();
545 typedef Kokkos::ArithTraits<SC> ATS;
546 using impl_SC =
typename ATS::val_type;
547 using impl_ATS = Kokkos::ArithTraits<impl_SC>;
548 const impl_SC zero = impl_ATS::zero(), one = impl_ATS::one();
557 auto aggRows = aggGraph.row_map;
558 auto aggCols = aggGraph.entries;
566 goodMap = isGoodMap(*rowMap, *colMap);
570 "MueLu: TentativePFactory_kokkos: for now works only with good maps "
571 "(i.e. \"matching\" row and column maps)");
580 LO fullBlockSize, blockID, stridingOffset, stridedBlockSize;
586 auto procWinner = aggregates->
GetProcWinner()->getDeviceLocalView(Xpetra::Access::ReadOnly);
587 auto vertex2AggId = aggregates->
GetVertex2AggId()->getDeviceLocalView(Xpetra::Access::ReadOnly);
590 int myPID = aggregates->
GetMap()->getComm()->getRank();
595 typedef typename Aggregates::aggregates_sizes_type::non_const_type AggSizeType;
596 AggSizeType aggDofSizes;
598 if (stridedBlockSize == 1) {
602 aggDofSizes = AggSizeType(
"agg_dof_sizes", numAggregates + 1);
605 Kokkos::deep_copy(Kokkos::subview(aggDofSizes, Kokkos::make_pair(static_cast<size_t>(1), numAggregates + 1)), sizesConst);
611 aggDofSizes = AggSizeType(
"agg_dof_sizes", numAggregates + 1);
613 auto nodeMap = aggregates->
GetMap()->getLocalMap();
614 auto dofMap = colMap->getLocalMap();
616 Kokkos::parallel_for(
617 "MueLu:TentativePF:Build:compute_agg_sizes",
range_type(0, numAggregates),
618 KOKKOS_LAMBDA(
const LO agg) {
619 auto aggRowView = aggGraph.rowConst(agg);
622 for (
LO colID = 0; colID < aggRowView.length; colID++) {
623 GO nodeGID = nodeMap.getGlobalElement(aggRowView(colID));
625 for (
LO k = 0; k < stridedBlockSize; k++) {
626 GO dofGID = (nodeGID - indexBase) * fullBlockSize + k + indexBase + globalOffset + stridingOffset;
628 if (dofMap.getLocalElement(dofGID) != INVALID)
632 aggDofSizes(agg + 1) = size;
639 ReduceMaxFunctor<LO, decltype(aggDofSizes)> reduceMax(aggDofSizes);
640 Kokkos::parallel_reduce(
"MueLu:TentativePF:Build:max_agg_size",
range_type(0, aggDofSizes.extent(0)), reduceMax, maxAggSize);
644 Kokkos::parallel_scan(
645 "MueLu:TentativePF:Build:aggregate_sizes:stage1_scan",
range_type(0, numAggregates + 1),
646 KOKKOS_LAMBDA(
const LO i,
LO& update,
const bool& final_pass) {
647 update += aggDofSizes(i);
649 aggDofSizes(i) = update;
654 Kokkos::View<LO*, DeviceType>
agg2RowMapLO(Kokkos::ViewAllocateWithoutInitializing(
"agg2row_map_LO"), numRows);
658 AggSizeType aggOffsets(Kokkos::ViewAllocateWithoutInitializing(
"aggOffsets"), numAggregates);
659 Kokkos::deep_copy(aggOffsets, Kokkos::subview(aggDofSizes, Kokkos::make_pair(static_cast<size_t>(0), numAggregates)));
661 Kokkos::parallel_for(
662 "MueLu:TentativePF:Build:createAgg2RowMap",
range_type(0, vertex2AggId.extent(0)),
663 KOKKOS_LAMBDA(
const LO lnode) {
664 if (procWinner(lnode, 0) == myPID) {
666 auto aggID = vertex2AggId(lnode, 0);
668 auto offset = Kokkos::atomic_fetch_add(&aggOffsets(aggID), stridedBlockSize);
672 for (
LO k = 0; k < stridedBlockSize; k++)
673 agg2RowMapLO(offset + k) = lnode * stridedBlockSize + k;
680 coarseNullspace = MultiVectorFactory::Build(coarseMap, NSDim);
683 auto fineNS = fineNullspace->getDeviceLocalView(Xpetra::Access::ReadWrite);
684 auto coarseNS = coarseNullspace->getDeviceLocalView(Xpetra::Access::OverwriteAll);
689 typedef typename local_matrix_type::row_map_type::non_const_type rows_type;
690 typedef typename local_matrix_type::index_type::non_const_type cols_type;
691 typedef typename local_matrix_type::values_type::non_const_type vals_type;
694 typedef Kokkos::View<int[10], DeviceType> status_type;
695 status_type status(
"status");
701 const bool&
doQRStep = pL.
get<
bool>(
"tentative: calculate qr");
703 GetOStream(
Runtime1) <<
"TentativePFactory : bypassing local QR phase" << std::endl;
705 GetOStream(
Warnings0) <<
"TentativePFactor : for nontrivial nullspace, this may degrade performance" << std::endl;
708 size_t nnzEstimate = numRows * NSDim;
709 rows_type
rowsAux(Kokkos::ViewAllocateWithoutInitializing(
"Ptent_aux_rows"), numRows + 1);
710 cols_type
colsAux(Kokkos::ViewAllocateWithoutInitializing(
"Ptent_aux_cols"), nnzEstimate);
711 vals_type
valsAux(
"Ptent_aux_vals", nnzEstimate);
712 rows_type
rows(
"Ptent_rows", numRows + 1);
719 Kokkos::parallel_for(
720 "MueLu:TentativePF:BuildPuncoupled:for1",
range_type(0, numRows + 1),
721 KOKKOS_LAMBDA(
const LO row) {
724 Kokkos::parallel_for(
725 "MueLu:TentativePF:BuildUncoupled:for2",
range_type(0, nnzEstimate),
726 KOKKOS_LAMBDA(
const LO j) {
743 const Kokkos::TeamPolicy<execution_space> policy(numAggregates, 1);
746 Kokkos::parallel_for(
747 "MueLu:TentativePF:BuildUncoupled:main_loop", policy,
748 KOKKOS_LAMBDA(
const typename Kokkos::TeamPolicy<execution_space>::member_type& thread) {
749 auto agg = thread.league_rank();
757 auto norm = impl_ATS::magnitude(zero);
762 for (decltype(aggSize) k = 0; k < aggSize; k++) {
764 norm += dnorm * dnorm;
778 for (decltype(aggSize) k = 0; k < aggSize; k++) {
782 rows(localRow + 1) = 1;
788 typename status_type::HostMirror statusHost = Kokkos::create_mirror_view(status);
789 Kokkos::deep_copy(statusHost, status);
790 for (decltype(statusHost.size()) i = 0; i < statusHost.size(); i++)
792 std::ostringstream oss;
793 oss <<
"MueLu::TentativePFactory::MakeTentative: ";
795 case 0: oss <<
"!goodMap is not implemented";
break;
796 case 1: oss <<
"fine level NS part has a zero column";
break;
802 Kokkos::parallel_for(
803 "MueLu:TentativePF:BuildUncoupled:main_loop_noqr", policy,
804 KOKKOS_LAMBDA(
const typename Kokkos::TeamPolicy<execution_space>::member_type& thread) {
805 auto agg = thread.league_rank();
814 for (decltype(aggSize) k = 0; k < aggSize; k++) {
818 rows(localRow + 1) = 1;
825 Kokkos::parallel_reduce(
826 "MueLu:TentativeP:CountNNZ",
range_type(0, numRows + 1),
827 KOKKOS_LAMBDA(
const LO i,
size_t& nnz_count) {
828 nnz_count +=
rows(i);
846 const Kokkos::TeamPolicy<execution_space> policy(numAggregates, 1);
848 decltype(aggDofSizes ), decltype(maxAggSize), decltype(agg2RowMapLO),
849 decltype(statusAtomic), decltype(rows), decltype(rowsAux), decltype(colsAux),
851 localQRFunctor(fineNSRandom,
coarseNS, aggDofSizes, maxAggSize, agg2RowMapLO, statusAtomic,
852 rows, rowsAux, colsAux, valsAux, doQRStep);
853 Kokkos::parallel_reduce(
"MueLu:TentativePF:BuildUncoupled:main_qr_loop", policy, localQRFunctor, nnz);
856 typename status_type::HostMirror statusHost = Kokkos::create_mirror_view(status);
857 Kokkos::deep_copy(statusHost, status);
858 for (decltype(statusHost.size()) i = 0; i < statusHost.size(); i++)
860 std::ostringstream oss;
861 oss <<
"MueLu::TentativePFactory::MakeTentative: ";
863 case 0: oss <<
"!goodMap is not implemented";
break;
864 case 1: oss <<
"fine level NS part has a zero column";
break;
877 if (nnz != nnzEstimate) {
882 Kokkos::parallel_scan(
883 "MueLu:TentativePF:Build:compress_rows",
range_type(0, numRows + 1),
884 KOKKOS_LAMBDA(
const LO i,
LO& upd,
const bool&
final) {
894 cols = cols_type(
"Ptent_cols", nnz);
895 vals = vals_type(
"Ptent_vals", nnz);
900 Kokkos::parallel_for(
901 "MueLu:TentativePF:Build:compress_cols_vals",
range_type(0, numRows),
902 KOKKOS_LAMBDA(
const LO i) {
908 cols(rowStart + lnnz) =
colsAux(j);
909 vals(rowStart + lnnz) =
valsAux(j);
921 GetOStream(
Runtime1) <<
"TentativePFactory : aggregates do not cross process boundaries" << std::endl;
927 local_matrix_type lclMatrix = local_matrix_type(
"A", numRows, coarseMap->getLocalNumElements(), nnz, vals,
rows, cols);
931 if (pL.
isSublist(
"matrixmatrix: kernel params"))
937 FCparams->set(
"compute global constants", FCparams->get(
"compute global constants",
false));
938 FCparams->set(
"Timer Label", std::string(
"MueLu::TentativeP-") +
toString(levelID));
940 auto PtentCrs = CrsMatrixFactory::Build(lclMatrix, rowMap, coarseMap, coarseMap, A->getDomainMap());
941 Ptentative =
rcp(
new CrsMatrixWrap(PtentCrs));
945 template <
class Scalar,
class LocalOrdinal,
class GlobalOrdinal,
class Node>
964 const size_t numFineBlockRows = rowMap->getLocalNumElements();
970 typedef Kokkos::ArithTraits<SC> ATS;
971 using impl_SC =
typename ATS::val_type;
972 using impl_ATS = Kokkos::ArithTraits<impl_SC>;
973 const impl_SC one = impl_ATS::one();
976 const size_t NSDim = fineNullspace->getNumVectors();
984 auto aggRows = aggGraph.row_map;
985 auto aggCols = aggGraph.entries;
991 const size_t numCoarseBlockRows = coarsePointMap->getLocalNumElements() / NSDim;
992 RCP<const Map> coarseBlockMap = MapFactory::Build(coarsePointMap->lib(),
995 coarsePointMap->getIndexBase(),
996 coarsePointMap->getComm());
1008 "MueLu: TentativePFactory_kokkos: for now works only with good maps "
1009 "(i.e. \"matching\" row and column maps)");
1018 LO fullBlockSize, blockID, stridingOffset, stridedBlockSize;
1024 auto procWinner = aggregates->
GetProcWinner()->getDeviceLocalView(Xpetra::Access::ReadOnly);
1025 auto vertex2AggId = aggregates->
GetVertex2AggId()->getDeviceLocalView(Xpetra::Access::ReadOnly);
1028 int myPID = aggregates->
GetMap()->getComm()->getRank();
1033 typedef typename Aggregates::aggregates_sizes_type::non_const_type AggSizeType;
1034 AggSizeType aggDofSizes;
1040 aggDofSizes = AggSizeType(
"agg_dof_sizes", numAggregates + 1);
1042 Kokkos::deep_copy(Kokkos::subview(aggDofSizes, Kokkos::make_pair(static_cast<size_t>(1), numAggregates + 1)), aggSizes);
1048 ReduceMaxFunctor<LO, decltype(aggDofSizes)> reduceMax(aggDofSizes);
1049 Kokkos::parallel_reduce(
"MueLu:TentativePF:Build:max_agg_size",
range_type(0, aggDofSizes.extent(0)), reduceMax, maxAggSize);
1053 Kokkos::parallel_scan(
1054 "MueLu:TentativePF:Build:aggregate_sizes:stage1_scan",
range_type(0, numAggregates + 1),
1055 KOKKOS_LAMBDA(
const LO i,
LO& update,
const bool& final_pass) {
1056 update += aggDofSizes(i);
1058 aggDofSizes(i) = update;
1063 Kokkos::View<LO*, DeviceType> aggToRowMapLO(Kokkos::ViewAllocateWithoutInitializing(
"aggtorow_map_LO"), numFineBlockRows);
1067 AggSizeType aggOffsets(Kokkos::ViewAllocateWithoutInitializing(
"aggOffsets"), numAggregates);
1068 Kokkos::deep_copy(aggOffsets, Kokkos::subview(aggDofSizes, Kokkos::make_pair(static_cast<size_t>(0), numAggregates)));
1070 Kokkos::parallel_for(
1071 "MueLu:TentativePF:Build:createAgg2RowMap",
range_type(0, vertex2AggId.extent(0)),
1072 KOKKOS_LAMBDA(
const LO lnode) {
1073 if (procWinner(lnode, 0) == myPID) {
1075 auto aggID = vertex2AggId(lnode, 0);
1077 auto offset = Kokkos::atomic_fetch_add(&aggOffsets(aggID), stridedBlockSize);
1081 for (
LO k = 0; k < stridedBlockSize; k++)
1082 aggToRowMapLO(offset + k) = lnode * stridedBlockSize + k;
1089 coarseNullspace = MultiVectorFactory::Build(coarsePointMap, NSDim);
1092 auto fineNS = fineNullspace->getDeviceLocalView(Xpetra::Access::ReadWrite);
1093 auto coarseNS = coarseNullspace->getDeviceLocalView(Xpetra::Access::OverwriteAll);
1096 typedef typename local_matrix_type::row_map_type::non_const_type rows_type;
1097 typedef typename local_matrix_type::index_type::non_const_type cols_type;
1101 typedef Kokkos::View<int[10], DeviceType> status_type;
1102 status_type status(
"status");
1108 GetOStream(
Runtime1) <<
"TentativePFactory : bypassing local QR phase" << std::endl;
1114 rows_type ia(Kokkos::ViewAllocateWithoutInitializing(
"BlockGraph_rowptr"), numFineBlockRows + 1);
1115 cols_type ja(Kokkos::ViewAllocateWithoutInitializing(
"BlockGraph_colind"), numFineBlockRows);
1117 Kokkos::parallel_for(
1118 "MueLu:TentativePF:BlockCrs:graph_init",
range_type(0, numFineBlockRows),
1119 KOKKOS_LAMBDA(
const LO j) {
1123 if (j == (
LO)numFineBlockRows - 1)
1124 ia[numFineBlockRows] = numFineBlockRows;
1128 const Kokkos::TeamPolicy<execution_space> policy(numAggregates, 1);
1129 Kokkos::parallel_for(
1130 "MueLu:TentativePF:BlockCrs:fillGraph", policy,
1131 KOKKOS_LAMBDA(
const typename Kokkos::TeamPolicy<execution_space>::member_type& thread) {
1132 auto agg = thread.league_rank();
1138 for (
LO j = 0; j < aggSize; j++) {
1140 const LO localRow = aggToRowMapLO[aggDofSizes[agg] + j];
1141 const size_t rowStart = ia[localRow];
1142 ja[rowStart] = offset;
1152 rows_type i_temp(Kokkos::ViewAllocateWithoutInitializing(
"BlockGraph_rowptr"), numFineBlockRows + 1);
1154 Kokkos::parallel_scan(
1155 "MueLu:TentativePF:BlockCrs:compress_rows",
range_type(0, numFineBlockRows),
1156 KOKKOS_LAMBDA(
const LO i,
LO& upd,
const bool&
final) {
1159 for (
auto j = ia[i]; j < ia[i + 1]; j++)
1160 if (ja[j] != INVALID)
1162 if (
final && i == (
LO)numFineBlockRows - 1)
1163 i_temp[numFineBlockRows] = upd;
1167 cols_type j_temp(Kokkos::ViewAllocateWithoutInitializing(
"BlockGraph_colind"), nnz);
1169 Kokkos::parallel_for(
1170 "MueLu:TentativePF:BlockCrs:compress_cols",
range_type(0, numFineBlockRows),
1171 KOKKOS_LAMBDA(
const LO i) {
1172 size_t rowStart = i_temp[i];
1174 for (
auto j = ia[i]; j < ia[i + 1]; j++)
1175 if (ja[j] != INVALID) {
1176 j_temp[rowStart + lnnz] = ja[j];
1185 RCP<CrsGraph> BlockGraph = CrsGraphFactory::Build(rowMap, coarseBlockMap, ia, ja);
1190 if (pL.isSublist(
"matrixmatrix: kernel params"))
1191 FCparams =
rcp(
new ParameterList(pL.sublist(
"matrixmatrix: kernel params")));
1195 FCparams->
set(
"compute global constants", FCparams->
get(
"compute global constants",
false));
1196 std::string levelIDs =
toString(levelID);
1197 FCparams->
set(
"Timer Label", std::string(
"MueLu::TentativeP-") + levelIDs);
1200 BlockGraph->expertStaticFillComplete(coarseBlockMap, rowMap, dummy_i, dummy_e, FCparams);
1212 if (P_tpetra.
is_null())
throw std::runtime_error(
"BuildPUncoupled: Matrix factory did not return a Tpetra::BlockCrsMatrix");
1215 auto values = P_tpetra->getTpetra_BlockCrsMatrix()->getValuesDeviceNonConst();
1216 const LO stride = NSDim * NSDim;
1218 Kokkos::parallel_for(
1219 "MueLu:TentativePF:BlockCrs:main_loop_noqr", policy,
1220 KOKKOS_LAMBDA(
const typename Kokkos::TeamPolicy<execution_space>::member_type& thread) {
1221 auto agg = thread.league_rank();
1228 for (
LO j = 0; j < aggSize; j++) {
1229 LO localBlockRow = aggToRowMapLO(
aggRows(agg) + j);
1230 LO rowStart = localBlockRow * stride;
1231 for (
LO r = 0; r < (
LO)NSDim; r++) {
1232 LO localPointRow = localBlockRow * NSDim + r;
1233 for (
LO c = 0; c < (
LO)NSDim; c++) {
1234 values[rowStart + r * NSDim + c] = fineNSRandom(localPointRow, c);
1240 for (
LO j = 0; j < (
LO)NSDim; j++)
1244 Ptentative = P_wrap;
1247 template <
class Scalar,
class LocalOrdinal,
class GlobalOrdinal,
class Node>
1256 template <
class Scalar,
class LocalOrdinal,
class GlobalOrdinal,
class Node>
1259 auto rowLocalMap = rowMap.getLocalMap();
1260 auto colLocalMap = colMap.getLocalMap();
1262 const size_t numRows = rowLocalMap.getLocalNumElements();
1263 const size_t numCols = colLocalMap.getLocalNumElements();
1265 if (numCols < numRows)
1269 Kokkos::parallel_reduce(
1270 "MueLu:TentativePF:isGoodMap",
range_type(0, numRows),
1271 KOKKOS_LAMBDA(
const LO i,
size_t& diff) {
1272 diff += (rowLocalMap.getGlobalElement(i) != colLocalMap.getGlobalElement(i));
1276 return (numDiff == 0);
1281 #define MUELU_TENTATIVEPFACTORY_KOKKOS_SHORT
1282 #endif // MUELU_TENTATIVEPFACTORY_KOKKOS_DEF_HPP
Important warning messages (one line)
void BuildPcoupled(RCP< Matrix > A, RCP< Aggregates > aggregates, RCP< AmalgamationInfo > amalgInfo, RCP< MultiVector > fineNullspace, RCP< const Map > coarseMap, RCP< Matrix > &Ptentative, RCP< MultiVector > &coarseNullspace) const
MueLu::DefaultLocalOrdinal LocalOrdinal
void BuildPuncoupledBlockCrs(Level &coarseLevel, RCP< Matrix > A, RCP< Aggregates > aggregates, RCP< AmalgamationInfo > amalgInfo, RCP< MultiVector > fineNullspace, RCP< const Map > coarseMap, RCP< Matrix > &Ptentative, RCP< MultiVector > &coarseNullspace, const int levelID) const
static bool MapsAreNested(const Xpetra::Map< LocalOrdinal, GlobalOrdinal, Node > &rowMap, const Xpetra::Map< LocalOrdinal, GlobalOrdinal, Node > &colMap)
std::string toString(const T &what)
Little helper function to convert non-string types to strings.
const RCP< LOVector > & GetProcWinner() const
Returns constant vector that maps local node IDs to owning processor IDs.
KOKKOS_INLINE_FUNCTION LO GetNumAggregates() const
void BuildP(Level &fineLevel, Level &coarseLevel) const
Abstract Build method.
T & get(const std::string &name, T def_value)
Timer to be used in factories. Similar to Monitor but with additional timers.
#define TEUCHOS_TEST_FOR_EXCEPTION(throw_exception_test, Exception, msg)
const RCP< const Map > GetMap() const
returns (overlapping) map of aggregate/node distribution
ParameterList & set(std::string const &name, T &&value, std::string const &docString="", RCP< const ParameterEntryValidator > const &validator=null)
static const NoFactory * get()
Print even more statistics.
#define SET_VALID_ENTRY(name)
bool isParameter(const std::string &name) const
TEUCHOS_DEPRECATED RCP< T > rcp(T *p, Dealloc_T dealloc, bool owns_mem)
void BuildPuncoupled(Level &coarseLevel, RCP< Matrix > A, RCP< Aggregates > aggregates, RCP< AmalgamationInfo > amalgInfo, RCP< MultiVector > fineNullspace, RCP< const Map > coarseMap, RCP< Matrix > &Ptentative, RCP< MultiVector > &coarseNullspace, const int levelID) const
MueLu::DefaultScalar Scalar
MueLu::DefaultGlobalOrdinal GlobalOrdinal
Kokkos::RangePolicy< local_ordinal_type, execution_space > range_type
Class that holds all level-specific information.
bool isSublist(const std::string &name) const
void GetStridingInformation(LO &fullBlockSize, LO &blockID, LO &stridingOffset, LO &stridedBlockSize, GO &indexBase)
returns striding information
Timer to be used in factories. Similar to SubMonitor but adds a timer level by level.
typename LWGraph_kokkos::local_graph_type local_graph_type
void Build(Level &fineLevel, Level &coarseLevel) const
Build an object with this factory.
static void AmalgamateMap(const Map &sourceMap, const Matrix &A, RCP< const Map > &amalgamatedMap, Array< LO > &translation)
Method to create merged map for systems of PDEs.
void DeclareInput(Level &fineLevel, Level &coarseLevel) const
Input.
const RCP< LOMultiVector > & GetVertex2AggId() const
Returns constant vector that maps local node IDs to local aggregates IDs.
static std::string PrintMatrixInfo(const Matrix &A, const std::string &msgTag, RCP< const Teuchos::ParameterList > params=Teuchos::null)
GO GlobalOffset()
returns offset of global dof ids
RCP< const ParameterList > GetValidParameterList() const
Return a const parameter list of valid parameters that setParameterList() will accept.
ParameterList & sublist(const std::string &name, bool mustAlreadyExist=false, const std::string &docString="")
int GetLevelID() const
Return level number.
Exception throws to report errors in the internal logical of the program.
Description of what is happening (more verbose)
bool isGoodMap(const Map &rowMap, const Map &colMap) const
maxAggDofSizeType maxAggDofSize
local_graph_type GetGraph() const
Node::device_type DeviceType
agg2RowMapLOType agg2RowMapLO
aggregates_sizes_type::const_type ComputeAggregateSizes(bool forceRecompute=false) const
Compute sizes of aggregates.
bool IsAvailable(const std::string &ename, const FactoryBase *factory=NoFactory::get()) const
Test whether a need's value has been saved.