10 #ifndef MUELU_TENTATIVEPFACTORY_KOKKOS_DEF_HPP
11 #define MUELU_TENTATIVEPFACTORY_KOKKOS_DEF_HPP
13 #include "Kokkos_UnorderedMap.hpp"
18 #include "MueLu_Aggregates.hpp"
19 #include "MueLu_AmalgamationInfo.hpp"
20 #include "MueLu_AmalgamationFactory.hpp"
23 #include "MueLu_PerfUtils.hpp"
32 template <
class LocalOrdinal,
class View>
33 class ReduceMaxFunctor {
35 ReduceMaxFunctor(View view)
38 KOKKOS_INLINE_FUNCTION
44 KOKKOS_INLINE_FUNCTION
51 KOKKOS_INLINE_FUNCTION
61 template <
class LOType,
class GOType,
class SCType,
class DeviceType,
class NspType,
class aggRowsType,
class maxAggDofSizeType,
class agg2RowMapLOType,
class statusType,
class rowsType,
class rowsAuxType,
class colsAuxType,
class valsAuxType>
62 class LocalQRDecompFunctor {
68 typedef typename DeviceType::execution_space execution_space;
69 #if KOKKOS_VERSION >= 40799
70 typedef typename KokkosKernels::ArithTraits<SC>::val_type impl_SC;
72 typedef typename Kokkos::ArithTraits<SC>::val_type impl_SC;
74 #if KOKKOS_VERSION >= 40799
75 typedef KokkosKernels::ArithTraits<impl_SC> impl_ATS;
77 typedef Kokkos::ArithTraits<impl_SC> impl_ATS;
79 typedef typename impl_ATS::magnitudeType Magnitude;
81 typedef Kokkos::View<impl_SC**, typename execution_space::scratch_memory_space, Kokkos::MemoryUnmanaged> shared_matrix;
82 typedef Kokkos::View<impl_SC*, typename execution_space::scratch_memory_space, Kokkos::MemoryUnmanaged> shared_vector;
98 LocalQRDecompFunctor(NspType fineNS_, NspType coarseNS_, aggRowsType aggRows_, maxAggDofSizeType maxAggDofSize_, agg2RowMapLOType agg2RowMapLO_, statusType statusAtomic_, rowsType rows_, rowsAuxType rowsAux_, colsAuxType colsAux_, valsAuxType valsAux_,
bool doQRStep_)
111 KOKKOS_INLINE_FUNCTION
112 void operator()(
const typename Kokkos::TeamPolicy<execution_space>::member_type& thread,
size_t& nnz)
const {
113 auto agg = thread.league_rank();
118 const impl_SC one = impl_ATS::one();
119 const impl_SC two = one + one;
120 const impl_SC zero = impl_ATS::zero();
121 const auto zeroM = impl_ATS::magnitude(zero);
131 shared_matrix r(thread.team_shmem(), m, n);
132 for (
int j = 0; j < n; j++)
133 for (
int k = 0; k < m; k++)
137 for (
int i = 0; i < m; i++) {
138 for (
int j = 0; j < n; j++)
139 printf(
" %5.3lf ", r(i,j));
145 shared_matrix q(thread.team_shmem(), m, m);
147 bool isSingular =
false;
151 for (
int i = 0; i < m; i++) {
152 for (
int j = 0; j < m; j++)
157 for (
int k = 0; k < n; k++) {
159 Magnitude s = zeroM, norm, norm_x;
160 for (
int i = k + 1; i < m; i++)
161 s += pow(impl_ATS::magnitude(r(i, k)), 2);
162 norm = sqrt(pow(impl_ATS::magnitude(r(k, k)), 2) + s);
169 r(k, k) -= norm * one;
171 norm_x = sqrt(pow(impl_ATS::magnitude(r(k, k)), 2) + s);
172 if (norm_x == zeroM) {
175 r(k, k) = norm * one;
180 for (
int i = k; i < m; i++)
184 for (
int j = k + 1; j < n; j++) {
187 for (
int i = k; i < m; i++)
188 si += r(i, k) * r(i, j);
189 for (
int i = k; i < m; i++)
190 r(i, j) -= two * si * r(i, k);
194 for (
int j = k; j < m; j++) {
197 for (
int i = k; i < m; i++)
198 si += r(i, k) * qt(i, j);
199 for (
int i = k; i < m; i++)
200 qt(i, j) -= two * si * r(i, k);
204 r(k, k) = norm * one;
205 for (
int i = k + 1; i < m; i++)
211 for (
int i = 0; i < m; i++)
212 for (
int j = 0; j < i; j++) {
213 impl_SC tmp = qt(i,j);
220 for (
int j = 0; j < n; j++)
221 for (
int k = 0; k <= j; k++)
260 for (
int j = 0; j < n; j++)
261 for (
int k = 0; k < n; k++)
265 coarseNS(offset + k, j) = (k == j ? one : zero);
268 for (
int i = 0; i < m; i++)
269 for (
int j = 0; j < n; j++)
270 q(i, j) = (j == i ? one : zero);
274 for (
int j = 0; j < m; j++) {
276 size_t rowStart =
rowsAux(localRow);
278 for (
int k = 0; k < n; k++) {
280 if (q(j, k) != zero) {
281 colsAux(rowStart + lnnz) = offset + k;
282 valsAux(rowStart + lnnz) = q(j, k);
286 rows(localRow + 1) = lnnz;
292 for (
int i = 0; i < m; i++) {
293 for (
int j = 0; j < n; j++)
299 for (
int i = 0; i < aggSize; i++) {
300 for (
int j = 0; j < aggSize; j++)
301 printf(
" %5.3lf ", q(i,j));
315 for (
int j = 0; j < m; j++) {
317 size_t rowStart =
rowsAux(localRow);
319 for (
int k = 0; k < n; k++) {
320 const impl_SC qr_jk =
fineNS(localRow, k);
323 colsAux(rowStart + lnnz) = offset + k;
324 valsAux(rowStart + lnnz) = qr_jk;
328 rows(localRow + 1) = lnnz;
332 for (
int j = 0; j < n; j++)
338 size_t team_shmem_size(
int )
const {
342 return shared_matrix::shmem_size(m, n) +
343 shared_matrix::shmem_size(m, m);
351 template <
class Scalar,
class LocalOrdinal,
class GlobalOrdinal,
class Node>
355 #define SET_VALID_ENTRY(name) validParamList->setEntry(name, MasterList::getEntry(name))
358 #undef SET_VALID_ENTRY
363 validParamList->
set<
RCP<const FactoryBase>>(
"Scaled Nullspace", Teuchos::null,
"Generating factory of the scaled nullspace");
364 validParamList->
set<
RCP<const FactoryBase>>(
"UnAmalgamationInfo", Teuchos::null,
"Generating factory of UnAmalgamationInfo");
367 validParamList->
set<
RCP<const FactoryBase>>(
"Node Comm", Teuchos::null,
"Generating factory of the node level communicator");
371 norecurse.disableRecursiveValidation();
372 validParamList->
set<
ParameterList>(
"matrixmatrix: kernel params", norecurse,
"MatrixMatrix kernel parameters");
374 return validParamList;
377 template <
class Scalar,
class LocalOrdinal,
class GlobalOrdinal,
class Node>
381 std::string nspName =
"Nullspace";
382 if (pL.
isParameter(
"Nullspace name")) nspName = pL.
get<std::string>(
"Nullspace name");
384 Input(fineLevel,
"A");
385 Input(fineLevel,
"Aggregates");
386 Input(fineLevel, nspName);
387 Input(fineLevel,
"UnAmalgamationInfo");
388 Input(fineLevel,
"CoarseMap");
391 pL.
get<
bool>(
"tentative: build coarse coordinates")) {
392 bTransferCoordinates_ =
true;
393 Input(fineLevel,
"Coordinates");
394 }
else if (bTransferCoordinates_) {
395 Input(fineLevel,
"Coordinates");
399 template <
class Scalar,
class LocalOrdinal,
class GlobalOrdinal,
class Node>
401 return BuildP(fineLevel, coarseLevel);
404 template <
class Scalar,
class LocalOrdinal,
class GlobalOrdinal,
class Node>
411 std::string nspName =
"Nullspace";
412 if (pL.
isParameter(
"Nullspace name")) nspName = pL.
get<std::string>(
"Nullspace name");
414 auto A = Get<RCP<Matrix>>(fineLevel,
"A");
415 auto aggregates = Get<RCP<Aggregates>>(fineLevel,
"Aggregates");
416 auto amalgInfo = Get<RCP<AmalgamationInfo>>(fineLevel,
"UnAmalgamationInfo");
417 auto fineNullspace = Get<RCP<MultiVector>>(fineLevel, nspName);
418 auto coarseMap = Get<RCP<const Map>>(fineLevel,
"CoarseMap");
420 if (bTransferCoordinates_) {
421 fineCoords = Get<RCP<RealValuedMultiVector>>(fineLevel,
"Coordinates");
427 if (aggregates->GetNumGlobalAggregatesComputeIfNeeded() == 0) {
428 Ptentative = Teuchos::null;
429 Set(coarseLevel,
"P", Ptentative);
435 if (bTransferCoordinates_) {
439 if (rcp_dynamic_cast<const StridedMap>(coarseMap) != Teuchos::null)
440 blkSize = rcp_dynamic_cast<
const StridedMap>(coarseMap)->getFixedBlockSize();
445 coarseCoordMap = coarseMap;
451 coarseCoords = RealValuedMultiVectorFactory::Build(coarseCoordMap, fineCoords->getNumVectors());
454 auto uniqueMap = fineCoords->getMap();
456 if (aggregates->AggregatesCrossProcessors()) {
457 auto nonUniqueMap = aggregates->GetMap();
458 auto importer = ImportFactory::Build(uniqueMap, nonUniqueMap);
460 ghostedCoords = RealValuedMultiVectorFactory::Build(nonUniqueMap, fineCoords->getNumVectors());
466 auto aggGraph = aggregates->GetGraph();
467 auto numAggs = aggGraph.numRows();
469 auto fineCoordsView = fineCoords->getLocalViewDevice(Xpetra::Access::ReadOnly);
470 auto coarseCoordsView = coarseCoords->getLocalViewDevice(Xpetra::Access::OverwriteAll);
476 const auto dim = fineCoords->getNumVectors();
479 for (
size_t j = 0; j < dim; j++) {
480 Kokkos::parallel_for(
481 "MueLu::TentativeP::BuildCoords", Kokkos::RangePolicy<local_ordinal_type, execution_space>(0, numAggs),
482 KOKKOS_LAMBDA(
const LO i) {
486 auto aggregate = aggGraph.rowConst(i);
488 coordinate_type sum = 0.0;
489 for (
size_t colID = 0; colID < static_cast<size_t>(aggregate.length); colID++)
490 sum += fineCoordsRandomView(aggregate(colID), j);
492 coarseCoordsView(i, j) = sum / aggregate.length;
498 if (!aggregates->AggregatesCrossProcessors()) {
500 BuildPuncoupledBlockCrs(coarseLevel, A, aggregates, amalgInfo, fineNullspace, coarseMap, Ptentative, coarseNullspace,
503 BuildPuncoupled(coarseLevel, A, aggregates, amalgInfo, fineNullspace, coarseMap, Ptentative, coarseNullspace, coarseLevel.
GetLevelID());
506 BuildPcoupled(A, aggregates, amalgInfo, fineNullspace, coarseMap, Ptentative, coarseNullspace);
516 if (A->IsView(
"stridedMaps") ==
true)
517 Ptentative->CreateView(
"stridedMaps", A->getRowMap(
"stridedMaps"), coarseMap);
519 Ptentative->CreateView(
"stridedMaps", Ptentative->getRangeMap(), coarseMap);
521 if (bTransferCoordinates_) {
522 Set(coarseLevel,
"Coordinates", coarseCoords);
526 if (fineLevel.IsAvailable(
"Node Comm")) {
528 Set<RCP<const Teuchos::Comm<int>>>(coarseLevel,
"Node Comm", nodeComm);
531 Set(coarseLevel,
"Nullspace", coarseNullspace);
532 Set(coarseLevel,
"P", Ptentative);
536 params->
set(
"printLoadBalancingInfo",
true);
541 template <
class Scalar,
class LocalOrdinal,
class GlobalOrdinal,
class Node>
547 auto rowMap = A->getRowMap();
548 auto colMap = A->getColMap();
550 const size_t numRows = rowMap->getLocalNumElements();
551 const size_t NSDim = fineNullspace->getNumVectors();
553 #if KOKKOS_VERSION >= 40799
554 typedef KokkosKernels::ArithTraits<SC> ATS;
556 typedef Kokkos::ArithTraits<SC> ATS;
558 using impl_SC =
typename ATS::val_type;
559 #if KOKKOS_VERSION >= 40799
560 using impl_ATS = KokkosKernels::ArithTraits<impl_SC>;
562 using impl_ATS = Kokkos::ArithTraits<impl_SC>;
564 const impl_SC zero = impl_ATS::zero(), one = impl_ATS::one();
573 auto aggRows = aggGraph.row_map;
574 auto aggCols = aggGraph.entries;
582 goodMap = isGoodMap(*rowMap, *colMap);
586 "MueLu: TentativePFactory_kokkos: for now works only with good maps "
587 "(i.e. \"matching\" row and column maps)");
596 LO fullBlockSize, blockID, stridingOffset, stridedBlockSize;
602 auto procWinner = aggregates->
GetProcWinner()->getLocalViewDevice(Xpetra::Access::ReadOnly);
603 auto vertex2AggId = aggregates->
GetVertex2AggId()->getLocalViewDevice(Xpetra::Access::ReadOnly);
606 int myPID = aggregates->
GetMap()->getComm()->getRank();
611 typedef typename Aggregates::aggregates_sizes_type::non_const_type AggSizeType;
612 AggSizeType aggDofSizes;
614 if (stridedBlockSize == 1) {
618 aggDofSizes = AggSizeType(
"agg_dof_sizes", numAggregates + 1);
621 Kokkos::deep_copy(Kokkos::subview(aggDofSizes, Kokkos::make_pair(static_cast<size_t>(1), numAggregates + 1)), sizesConst);
627 aggDofSizes = AggSizeType(
"agg_dof_sizes", numAggregates + 1);
629 auto nodeMap = aggregates->
GetMap()->getLocalMap();
630 auto dofMap = colMap->getLocalMap();
632 Kokkos::parallel_for(
633 "MueLu:TentativePF:Build:compute_agg_sizes",
range_type(0, numAggregates),
634 KOKKOS_LAMBDA(
const LO agg) {
635 auto aggRowView = aggGraph.rowConst(agg);
638 for (
LO colID = 0; colID < aggRowView.length; colID++) {
639 GO nodeGID = nodeMap.getGlobalElement(aggRowView(colID));
641 for (
LO k = 0; k < stridedBlockSize; k++) {
642 GO dofGID = (nodeGID - indexBase) * fullBlockSize + k + indexBase + globalOffset + stridingOffset;
644 if (dofMap.getLocalElement(dofGID) != INVALID)
648 aggDofSizes(agg + 1) = size;
655 ReduceMaxFunctor<LO, decltype(aggDofSizes)> reduceMax(aggDofSizes);
656 Kokkos::parallel_reduce(
"MueLu:TentativePF:Build:max_agg_size",
range_type(0, aggDofSizes.extent(0)), reduceMax, maxAggSize);
660 Kokkos::parallel_scan(
661 "MueLu:TentativePF:Build:aggregate_sizes:stage1_scan",
range_type(0, numAggregates + 1),
662 KOKKOS_LAMBDA(
const LO i,
LO& update,
const bool& final_pass) {
663 update += aggDofSizes(i);
665 aggDofSizes(i) = update;
670 Kokkos::View<LO*, DeviceType>
agg2RowMapLO(Kokkos::ViewAllocateWithoutInitializing(
"agg2row_map_LO"), numRows);
674 AggSizeType aggOffsets(Kokkos::ViewAllocateWithoutInitializing(
"aggOffsets"), numAggregates);
675 Kokkos::deep_copy(aggOffsets, Kokkos::subview(aggDofSizes, Kokkos::make_pair(static_cast<size_t>(0), numAggregates)));
677 Kokkos::parallel_for(
678 "MueLu:TentativePF:Build:createAgg2RowMap",
range_type(0, vertex2AggId.extent(0)),
679 KOKKOS_LAMBDA(
const LO lnode) {
680 if (procWinner(lnode, 0) == myPID) {
682 auto aggID = vertex2AggId(lnode, 0);
684 auto offset = Kokkos::atomic_fetch_add(&aggOffsets(aggID), stridedBlockSize);
688 for (
LO k = 0; k < stridedBlockSize; k++)
689 agg2RowMapLO(offset + k) = lnode * stridedBlockSize + k;
696 coarseNullspace = MultiVectorFactory::Build(coarseMap, NSDim);
699 auto fineNS = fineNullspace->getLocalViewDevice(Xpetra::Access::ReadWrite);
700 auto coarseNS = coarseNullspace->getLocalViewDevice(Xpetra::Access::OverwriteAll);
705 typedef typename local_matrix_type::row_map_type::non_const_type rows_type;
706 typedef typename local_matrix_type::index_type::non_const_type cols_type;
707 typedef typename local_matrix_type::values_type::non_const_type vals_type;
710 typedef Kokkos::View<int[10], DeviceType> status_type;
711 status_type status(
"status");
717 const bool&
doQRStep = pL.
get<
bool>(
"tentative: calculate qr");
719 GetOStream(
Runtime1) <<
"TentativePFactory : bypassing local QR phase" << std::endl;
721 GetOStream(
Warnings0) <<
"TentativePFactor : for nontrivial nullspace, this may degrade performance" << std::endl;
724 size_t nnzEstimate = numRows * NSDim;
725 rows_type
rowsAux(Kokkos::ViewAllocateWithoutInitializing(
"Ptent_aux_rows"), numRows + 1);
726 cols_type
colsAux(Kokkos::ViewAllocateWithoutInitializing(
"Ptent_aux_cols"), nnzEstimate);
727 vals_type
valsAux(
"Ptent_aux_vals", nnzEstimate);
728 rows_type
rows(
"Ptent_rows", numRows + 1);
735 Kokkos::parallel_for(
736 "MueLu:TentativePF:BuildPuncoupled:for1",
range_type(0, numRows + 1),
737 KOKKOS_LAMBDA(
const LO row) {
740 Kokkos::parallel_for(
741 "MueLu:TentativePF:BuildUncoupled:for2",
range_type(0, nnzEstimate),
742 KOKKOS_LAMBDA(
const LO j) {
759 const Kokkos::TeamPolicy<execution_space> policy(numAggregates, 1);
762 Kokkos::parallel_for(
763 "MueLu:TentativePF:BuildUncoupled:main_loop", policy,
764 KOKKOS_LAMBDA(
const typename Kokkos::TeamPolicy<execution_space>::member_type& thread) {
765 auto agg = thread.league_rank();
773 auto norm = impl_ATS::magnitude(zero);
778 for (decltype(aggSize) k = 0; k < aggSize; k++) {
780 norm += dnorm * dnorm;
794 for (decltype(aggSize) k = 0; k < aggSize; k++) {
798 rows(localRow + 1) = 1;
804 typename status_type::host_mirror_type statusHost = Kokkos::create_mirror_view(status);
805 Kokkos::deep_copy(statusHost, status);
806 for (decltype(statusHost.size()) i = 0; i < statusHost.size(); i++)
808 std::ostringstream oss;
809 oss <<
"MueLu::TentativePFactory::MakeTentative: ";
811 case 0: oss <<
"!goodMap is not implemented";
break;
812 case 1: oss <<
"fine level NS part has a zero column";
break;
818 Kokkos::parallel_for(
819 "MueLu:TentativePF:BuildUncoupled:main_loop_noqr", policy,
820 KOKKOS_LAMBDA(
const typename Kokkos::TeamPolicy<execution_space>::member_type& thread) {
821 auto agg = thread.league_rank();
830 for (decltype(aggSize) k = 0; k < aggSize; k++) {
834 rows(localRow + 1) = 1;
841 Kokkos::parallel_reduce(
842 "MueLu:TentativeP:CountNNZ",
range_type(0, numRows + 1),
843 KOKKOS_LAMBDA(
const LO i,
size_t& nnz_count) {
844 nnz_count +=
rows(i);
862 const Kokkos::TeamPolicy<execution_space> policy(numAggregates, 1);
864 decltype(aggDofSizes ), decltype(maxAggSize), decltype(agg2RowMapLO),
865 decltype(statusAtomic), decltype(rows), decltype(rowsAux), decltype(colsAux),
867 localQRFunctor(fineNSRandom,
coarseNS, aggDofSizes, maxAggSize, agg2RowMapLO, statusAtomic,
868 rows, rowsAux, colsAux, valsAux, doQRStep);
869 Kokkos::parallel_reduce(
"MueLu:TentativePF:BuildUncoupled:main_qr_loop", policy, localQRFunctor, nnz);
872 typename status_type::host_mirror_type statusHost = Kokkos::create_mirror_view(status);
873 Kokkos::deep_copy(statusHost, status);
874 for (decltype(statusHost.size()) i = 0; i < statusHost.size(); i++)
876 std::ostringstream oss;
877 oss <<
"MueLu::TentativePFactory::MakeTentative: ";
879 case 0: oss <<
"!goodMap is not implemented";
break;
880 case 1: oss <<
"fine level NS part has a zero column";
break;
893 if (nnz != nnzEstimate) {
898 Kokkos::parallel_scan(
899 "MueLu:TentativePF:Build:compress_rows",
range_type(0, numRows + 1),
900 KOKKOS_LAMBDA(
const LO i,
LO& upd,
const bool&
final) {
910 cols = cols_type(
"Ptent_cols", nnz);
911 vals = vals_type(
"Ptent_vals", nnz);
916 Kokkos::parallel_for(
917 "MueLu:TentativePF:Build:compress_cols_vals",
range_type(0, numRows),
918 KOKKOS_LAMBDA(
const LO i) {
924 cols(rowStart + lnnz) =
colsAux(j);
925 vals(rowStart + lnnz) =
valsAux(j);
937 GetOStream(
Runtime1) <<
"TentativePFactory : aggregates do not cross process boundaries" << std::endl;
943 local_matrix_type lclMatrix = local_matrix_type(
"A", numRows, coarseMap->getLocalNumElements(), nnz, vals,
rows, cols);
947 if (pL.
isSublist(
"matrixmatrix: kernel params"))
953 FCparams->set(
"compute global constants", FCparams->get(
"compute global constants",
false));
954 FCparams->set(
"Timer Label", std::string(
"MueLu::TentativeP-") +
toString(levelID));
956 auto PtentCrs = CrsMatrixFactory::Build(lclMatrix, rowMap, coarseMap, coarseMap, A->getDomainMap());
957 Ptentative =
rcp(
new CrsMatrixWrap(PtentCrs));
961 template <
class Scalar,
class LocalOrdinal,
class GlobalOrdinal,
class Node>
980 const size_t numFineBlockRows = rowMap->getLocalNumElements();
986 #if KOKKOS_VERSION >= 40799
987 typedef KokkosKernels::ArithTraits<SC> ATS;
989 typedef Kokkos::ArithTraits<SC> ATS;
991 using impl_SC =
typename ATS::val_type;
992 #if KOKKOS_VERSION >= 40799
993 using impl_ATS = KokkosKernels::ArithTraits<impl_SC>;
995 using impl_ATS = Kokkos::ArithTraits<impl_SC>;
997 const impl_SC one = impl_ATS::one();
1000 const size_t NSDim = fineNullspace->getNumVectors();
1008 auto aggRows = aggGraph.row_map;
1009 auto aggCols = aggGraph.entries;
1015 const size_t numCoarseBlockRows = coarsePointMap->getLocalNumElements() / NSDim;
1016 RCP<const Map> coarseBlockMap = MapFactory::Build(coarsePointMap->lib(),
1019 coarsePointMap->getIndexBase(),
1020 coarsePointMap->getComm());
1032 "MueLu: TentativePFactory_kokkos: for now works only with good maps "
1033 "(i.e. \"matching\" row and column maps)");
1042 LO fullBlockSize, blockID, stridingOffset, stridedBlockSize;
1048 auto procWinner = aggregates->
GetProcWinner()->getLocalViewDevice(Xpetra::Access::ReadOnly);
1049 auto vertex2AggId = aggregates->
GetVertex2AggId()->getLocalViewDevice(Xpetra::Access::ReadOnly);
1052 int myPID = aggregates->
GetMap()->getComm()->getRank();
1057 typedef typename Aggregates::aggregates_sizes_type::non_const_type AggSizeType;
1058 AggSizeType aggDofSizes;
1064 aggDofSizes = AggSizeType(
"agg_dof_sizes", numAggregates + 1);
1066 Kokkos::deep_copy(Kokkos::subview(aggDofSizes, Kokkos::make_pair(static_cast<size_t>(1), numAggregates + 1)), aggSizes);
1072 ReduceMaxFunctor<LO, decltype(aggDofSizes)> reduceMax(aggDofSizes);
1073 Kokkos::parallel_reduce(
"MueLu:TentativePF:Build:max_agg_size",
range_type(0, aggDofSizes.extent(0)), reduceMax, maxAggSize);
1077 Kokkos::parallel_scan(
1078 "MueLu:TentativePF:Build:aggregate_sizes:stage1_scan",
range_type(0, numAggregates + 1),
1079 KOKKOS_LAMBDA(
const LO i,
LO& update,
const bool& final_pass) {
1080 update += aggDofSizes(i);
1082 aggDofSizes(i) = update;
1087 Kokkos::View<LO*, DeviceType> aggToRowMapLO(Kokkos::ViewAllocateWithoutInitializing(
"aggtorow_map_LO"), numFineBlockRows);
1091 AggSizeType aggOffsets(Kokkos::ViewAllocateWithoutInitializing(
"aggOffsets"), numAggregates);
1092 Kokkos::deep_copy(aggOffsets, Kokkos::subview(aggDofSizes, Kokkos::make_pair(static_cast<size_t>(0), numAggregates)));
1094 Kokkos::parallel_for(
1095 "MueLu:TentativePF:Build:createAgg2RowMap",
range_type(0, vertex2AggId.extent(0)),
1096 KOKKOS_LAMBDA(
const LO lnode) {
1097 if (procWinner(lnode, 0) == myPID) {
1099 auto aggID = vertex2AggId(lnode, 0);
1101 auto offset = Kokkos::atomic_fetch_add(&aggOffsets(aggID), stridedBlockSize);
1105 for (
LO k = 0; k < stridedBlockSize; k++)
1106 aggToRowMapLO(offset + k) = lnode * stridedBlockSize + k;
1113 coarseNullspace = MultiVectorFactory::Build(coarsePointMap, NSDim);
1116 auto fineNS = fineNullspace->getLocalViewDevice(Xpetra::Access::ReadWrite);
1117 auto coarseNS = coarseNullspace->getLocalViewDevice(Xpetra::Access::OverwriteAll);
1120 typedef typename local_matrix_type::row_map_type::non_const_type rows_type;
1121 typedef typename local_matrix_type::index_type::non_const_type cols_type;
1125 typedef Kokkos::View<int[10], DeviceType> status_type;
1126 status_type status(
"status");
1132 GetOStream(
Runtime1) <<
"TentativePFactory : bypassing local QR phase" << std::endl;
1138 rows_type ia(Kokkos::ViewAllocateWithoutInitializing(
"BlockGraph_rowptr"), numFineBlockRows + 1);
1139 cols_type ja(Kokkos::ViewAllocateWithoutInitializing(
"BlockGraph_colind"), numFineBlockRows);
1141 Kokkos::parallel_for(
1142 "MueLu:TentativePF:BlockCrs:graph_init",
range_type(0, numFineBlockRows),
1143 KOKKOS_LAMBDA(
const LO j) {
1147 if (j == (
LO)numFineBlockRows - 1)
1148 ia[numFineBlockRows] = numFineBlockRows;
1152 const Kokkos::TeamPolicy<execution_space> policy(numAggregates, 1);
1153 Kokkos::parallel_for(
1154 "MueLu:TentativePF:BlockCrs:fillGraph", policy,
1155 KOKKOS_LAMBDA(
const typename Kokkos::TeamPolicy<execution_space>::member_type& thread) {
1156 auto agg = thread.league_rank();
1162 for (
LO j = 0; j < aggSize; j++) {
1164 const LO localRow = aggToRowMapLO[aggDofSizes[agg] + j];
1165 const size_t rowStart = ia[localRow];
1166 ja[rowStart] = offset;
1176 rows_type i_temp(Kokkos::ViewAllocateWithoutInitializing(
"BlockGraph_rowptr"), numFineBlockRows + 1);
1178 Kokkos::parallel_scan(
1179 "MueLu:TentativePF:BlockCrs:compress_rows",
range_type(0, numFineBlockRows),
1180 KOKKOS_LAMBDA(
const LO i,
LO& upd,
const bool&
final) {
1183 for (
auto j = ia[i]; j < ia[i + 1]; j++)
1184 if (ja[j] != INVALID)
1186 if (
final && i == (
LO)numFineBlockRows - 1)
1187 i_temp[numFineBlockRows] = upd;
1191 cols_type j_temp(Kokkos::ViewAllocateWithoutInitializing(
"BlockGraph_colind"), nnz);
1193 Kokkos::parallel_for(
1194 "MueLu:TentativePF:BlockCrs:compress_cols",
range_type(0, numFineBlockRows),
1195 KOKKOS_LAMBDA(
const LO i) {
1196 size_t rowStart = i_temp[i];
1198 for (
auto j = ia[i]; j < ia[i + 1]; j++)
1199 if (ja[j] != INVALID) {
1200 j_temp[rowStart + lnnz] = ja[j];
1209 RCP<CrsGraph> BlockGraph = CrsGraphFactory::Build(rowMap, coarseBlockMap, ia, ja);
1214 if (pL.isSublist(
"matrixmatrix: kernel params"))
1215 FCparams =
rcp(
new ParameterList(pL.sublist(
"matrixmatrix: kernel params")));
1219 FCparams->
set(
"compute global constants", FCparams->
get(
"compute global constants",
false));
1220 std::string levelIDs =
toString(levelID);
1221 FCparams->
set(
"Timer Label", std::string(
"MueLu::TentativeP-") + levelIDs);
1224 BlockGraph->expertStaticFillComplete(coarseBlockMap, rowMap, dummy_i, dummy_e, FCparams);
1236 if (P_tpetra.
is_null())
throw std::runtime_error(
"BuildPUncoupled: Matrix factory did not return a Tpetra::BlockCrsMatrix");
1239 auto values = P_tpetra->getTpetra_BlockCrsMatrix()->getValuesDeviceNonConst();
1240 const LO stride = NSDim * NSDim;
1242 Kokkos::parallel_for(
1243 "MueLu:TentativePF:BlockCrs:main_loop_noqr", policy,
1244 KOKKOS_LAMBDA(
const typename Kokkos::TeamPolicy<execution_space>::member_type& thread) {
1245 auto agg = thread.league_rank();
1252 for (
LO j = 0; j < aggSize; j++) {
1253 LO localBlockRow = aggToRowMapLO(
aggRows(agg) + j);
1254 LO rowStart = localBlockRow * stride;
1255 for (
LO r = 0; r < (
LO)NSDim; r++) {
1256 LO localPointRow = localBlockRow * NSDim + r;
1257 for (
LO c = 0; c < (
LO)NSDim; c++) {
1258 values[rowStart + r * NSDim + c] = fineNSRandom(localPointRow, c);
1264 for (
LO j = 0; j < (
LO)NSDim; j++)
1268 Ptentative = P_wrap;
1271 template <
class Scalar,
class LocalOrdinal,
class GlobalOrdinal,
class Node>
1280 template <
class Scalar,
class LocalOrdinal,
class GlobalOrdinal,
class Node>
1283 auto rowLocalMap = rowMap.getLocalMap();
1284 auto colLocalMap = colMap.getLocalMap();
1286 const size_t numRows = rowLocalMap.getLocalNumElements();
1287 const size_t numCols = colLocalMap.getLocalNumElements();
1289 if (numCols < numRows)
1293 Kokkos::parallel_reduce(
1294 "MueLu:TentativePF:isGoodMap",
range_type(0, numRows),
1295 KOKKOS_LAMBDA(
const LO i,
size_t& diff) {
1296 diff += (rowLocalMap.getGlobalElement(i) != colLocalMap.getGlobalElement(i));
1300 return (numDiff == 0);
1305 #define MUELU_TENTATIVEPFACTORY_KOKKOS_SHORT
1306 #endif // MUELU_TENTATIVEPFACTORY_KOKKOS_DEF_HPP
Important warning messages (one line)
void BuildPcoupled(RCP< Matrix > A, RCP< Aggregates > aggregates, RCP< AmalgamationInfo > amalgInfo, RCP< MultiVector > fineNullspace, RCP< const Map > coarseMap, RCP< Matrix > &Ptentative, RCP< MultiVector > &coarseNullspace) const
MueLu::DefaultLocalOrdinal LocalOrdinal
void BuildPuncoupledBlockCrs(Level &coarseLevel, RCP< Matrix > A, RCP< Aggregates > aggregates, RCP< AmalgamationInfo > amalgInfo, RCP< MultiVector > fineNullspace, RCP< const Map > coarseMap, RCP< Matrix > &Ptentative, RCP< MultiVector > &coarseNullspace, const int levelID) const
static bool MapsAreNested(const Xpetra::Map< LocalOrdinal, GlobalOrdinal, Node > &rowMap, const Xpetra::Map< LocalOrdinal, GlobalOrdinal, Node > &colMap)
std::string toString(const T &what)
Little helper function to convert non-string types to strings.
const RCP< LOVector > & GetProcWinner() const
Returns constant vector that maps local node IDs to owning processor IDs.
KOKKOS_INLINE_FUNCTION LO GetNumAggregates() const
void BuildP(Level &fineLevel, Level &coarseLevel) const
Abstract Build method.
T & get(const std::string &name, T def_value)
Timer to be used in factories. Similar to Monitor but with additional timers.
#define TEUCHOS_TEST_FOR_EXCEPTION(throw_exception_test, Exception, msg)
const RCP< const Map > GetMap() const
returns (overlapping) map of aggregate/node distribution
ParameterList & set(std::string const &name, T &&value, std::string const &docString="", RCP< const ParameterEntryValidator > const &validator=null)
static const NoFactory * get()
Print even more statistics.
#define SET_VALID_ENTRY(name)
bool isParameter(const std::string &name) const
TEUCHOS_DEPRECATED RCP< T > rcp(T *p, Dealloc_T dealloc, bool owns_mem)
void BuildPuncoupled(Level &coarseLevel, RCP< Matrix > A, RCP< Aggregates > aggregates, RCP< AmalgamationInfo > amalgInfo, RCP< MultiVector > fineNullspace, RCP< const Map > coarseMap, RCP< Matrix > &Ptentative, RCP< MultiVector > &coarseNullspace, const int levelID) const
MueLu::DefaultScalar Scalar
MueLu::DefaultGlobalOrdinal GlobalOrdinal
Kokkos::RangePolicy< local_ordinal_type, execution_space > range_type
Class that holds all level-specific information.
bool isSublist(const std::string &name) const
void GetStridingInformation(LO &fullBlockSize, LO &blockID, LO &stridingOffset, LO &stridedBlockSize, GO &indexBase)
returns striding information
Timer to be used in factories. Similar to SubMonitor but adds a timer level by level.
typename LWGraph_kokkos::local_graph_type local_graph_type
void Build(Level &fineLevel, Level &coarseLevel) const
Build an object with this factory.
static void AmalgamateMap(const Map &sourceMap, const Matrix &A, RCP< const Map > &amalgamatedMap, Array< LO > &translation)
Method to create merged map for systems of PDEs.
void DeclareInput(Level &fineLevel, Level &coarseLevel) const
Input.
const RCP< LOMultiVector > & GetVertex2AggId() const
Returns constant vector that maps local node IDs to local aggregates IDs.
static std::string PrintMatrixInfo(const Matrix &A, const std::string &msgTag, RCP< const Teuchos::ParameterList > params=Teuchos::null)
GO GlobalOffset()
returns offset of global dof ids
RCP< const ParameterList > GetValidParameterList() const
Return a const parameter list of valid parameters that setParameterList() will accept.
ParameterList & sublist(const std::string &name, bool mustAlreadyExist=false, const std::string &docString="")
int GetLevelID() const
Return level number.
Exception throws to report errors in the internal logical of the program.
Description of what is happening (more verbose)
bool isGoodMap(const Map &rowMap, const Map &colMap) const
maxAggDofSizeType maxAggDofSize
local_graph_type GetGraph() const
Node::device_type DeviceType
agg2RowMapLOType agg2RowMapLO
aggregates_sizes_type::const_type ComputeAggregateSizes(bool forceRecompute=false) const
Compute sizes of aggregates.
bool IsAvailable(const std::string &ename, const FactoryBase *factory=NoFactory::get()) const
Test whether a need's value has been saved.