41 #include "Teuchos_StandardParameterEntryValidators.hpp"
52 if (sendType == DISTRIBUTOR_ISEND) {
55 else if (sendType == DISTRIBUTOR_SEND) {
58 else if (sendType == DISTRIBUTOR_ALLTOALL) {
61 #if defined(HAVE_TPETRACORE_MPI_ADVANCE)
62 else if (sendType == DISTRIBUTOR_MPIADVANCE_ALLTOALL) {
63 return "MpiAdvanceAlltoall";
65 else if (sendType == DISTRIBUTOR_MPIADVANCE_NBRALLTOALLV) {
66 return "MpiAdvanceNbralltoallv";
70 TEUCHOS_TEST_FOR_EXCEPTION(
true, std::invalid_argument,
"Invalid "
71 "EDistributorSendType enum value " << sendType <<
".");
79 case Details::DISTRIBUTOR_NOT_INITIALIZED:
80 return "Not initialized yet";
81 case Details::DISTRIBUTOR_INITIALIZED_BY_CREATE_FROM_SENDS:
82 return "By createFromSends";
83 case Details::DISTRIBUTOR_INITIALIZED_BY_CREATE_FROM_RECVS:
84 return "By createFromRecvs";
85 case Details::DISTRIBUTOR_INITIALIZED_BY_CREATE_FROM_SENDS_N_RECVS:
86 return "By createFromSendsAndRecvs";
87 case Details::DISTRIBUTOR_INITIALIZED_BY_REVERSE:
88 return "By createReverseDistributor";
89 case Details::DISTRIBUTOR_INITIALIZED_BY_COPY:
90 return "By copy constructor";
96 DistributorPlan::DistributorPlan(Teuchos::RCP<
const Teuchos::Comm<int>> comm)
98 #if defined(HAVE_TPETRACORE_MPI_ADVANCE)
99 mpixComm_(Teuchos::null),
101 howInitialized_(DISTRIBUTOR_NOT_INITIALIZED),
102 reversePlan_(Teuchos::null),
103 sendType_(DISTRIBUTOR_SEND),
104 sendMessageToSelf_(false),
105 numSendsToOtherProcs_(0),
108 totalReceiveLength_(0)
111 DistributorPlan::DistributorPlan(
const DistributorPlan& otherPlan)
112 : comm_(otherPlan.comm_),
113 #if defined(HAVE_TPETRACORE_MPI_ADVANCE)
114 mpixComm_(otherPlan.mpixComm_),
116 howInitialized_(DISTRIBUTOR_INITIALIZED_BY_COPY),
117 reversePlan_(otherPlan.reversePlan_),
118 sendType_(otherPlan.sendType_),
119 sendMessageToSelf_(otherPlan.sendMessageToSelf_),
120 numSendsToOtherProcs_(otherPlan.numSendsToOtherProcs_),
121 procIdsToSendTo_(otherPlan.procIdsToSendTo_),
122 startsTo_(otherPlan.startsTo_),
123 lengthsTo_(otherPlan.lengthsTo_),
124 maxSendLength_(otherPlan.maxSendLength_),
125 indicesTo_(otherPlan.indicesTo_),
126 numReceives_(otherPlan.numReceives_),
127 totalReceiveLength_(otherPlan.totalReceiveLength_),
128 lengthsFrom_(otherPlan.lengthsFrom_),
129 procsFrom_(otherPlan.procsFrom_),
130 startsFrom_(otherPlan.startsFrom_),
131 indicesFrom_(otherPlan.indicesFrom_)
134 size_t DistributorPlan::createFromSends(
const Teuchos::ArrayView<const int>& exportProcIDs) {
135 using Teuchos::outArg;
136 using Teuchos::REDUCE_MAX;
137 using Teuchos::reduceAll;
139 const char rawPrefix[] =
"Tpetra::DistributorPlan::createFromSends";
141 const size_t numExports = exportProcIDs.size();
142 const int myProcID = comm_->getRank();
143 const int numProcs = comm_->getSize();
187 for (
size_t i = 0; i < numExports; ++i) {
188 const int exportID = exportProcIDs[i];
189 if (exportID >= numProcs || exportID < 0) {
195 reduceAll<int, int> (*comm_, REDUCE_MAX, badID, outArg (gbl_badID));
196 TEUCHOS_TEST_FOR_EXCEPTION
197 (gbl_badID >= 0, std::runtime_error, rawPrefix <<
"Proc "
198 << gbl_badID <<
", perhaps among other processes, got a bad "
214 Teuchos::Array<size_t> starts (numProcs + 1, 0);
217 size_t numActive = 0;
218 int needSendBuff = 0;
220 for (
size_t i = 0; i < numExports; ++i) {
221 const int exportID = exportProcIDs[i];
236 if (needSendBuff == 0 && starts[exportID] > 1 &&
237 exportID != exportProcIDs[i-1]) {
244 #if defined(HAVE_TPETRA_PRINT_EFFICIENCY_WARNINGS)
246 int global_needSendBuff;
247 reduceAll<int, int> (*comm_, REDUCE_MAX, needSendBuff,
248 outArg (global_needSendBuff));
250 global_needSendBuff != 0,
251 "::createFromSends: Grouping export IDs together by process rank often "
252 "improves performance.");
258 if (starts[myProcID] != 0) {
259 sendMessageToSelf_ =
true;
262 sendMessageToSelf_ =
false;
265 if (! needSendBuff) {
267 numSendsToOtherProcs_ = 0;
270 for (
int i = 0; i < numProcs; ++i) {
272 ++numSendsToOtherProcs_;
278 indicesTo_.resize(0);
281 procIdsToSendTo_.assign(numSendsToOtherProcs_,0);
282 startsTo_.assign(numSendsToOtherProcs_,0);
283 lengthsTo_.assign(numSendsToOtherProcs_,0);
290 size_t procIndex = 0;
291 for (
size_t i = 0; i < numSendsToOtherProcs_; ++i) {
292 while (exportProcIDs[procIndex] < 0) {
295 startsTo_[i] = procIndex;
296 int procID = exportProcIDs[procIndex];
297 procIdsToSendTo_[i] = procID;
298 procIndex += starts[procID];
303 if (numSendsToOtherProcs_ > 0) {
304 sort2(procIdsToSendTo_.begin(), procIdsToSendTo_.end(), startsTo_.begin());
308 for (
size_t i = 0; i < numSendsToOtherProcs_; ++i) {
309 int procID = procIdsToSendTo_[i];
310 lengthsTo_[i] = starts[procID];
311 if ((procID != myProcID) && (lengthsTo_[i] > maxSendLength_)) {
312 maxSendLength_ = lengthsTo_[i];
323 if (starts[0] == 0 ) {
324 numSendsToOtherProcs_ = 0;
327 numSendsToOtherProcs_ = 1;
329 for (Teuchos::Array<size_t>::iterator i=starts.begin()+1,
331 i != starts.end(); ++i)
333 if (*i != 0) ++numSendsToOtherProcs_;
339 for (Teuchos::Array<size_t>::reverse_iterator ip1=starts.rbegin(),
341 i != starts.rend(); ++i)
350 indicesTo_.resize(numActive);
352 for (
size_t i = 0; i < numExports; ++i) {
353 if (exportProcIDs[i] >= 0) {
355 indicesTo_[starts[exportProcIDs[i]]] = i;
357 ++starts[exportProcIDs[i]];
369 for (
int proc = numProcs-1; proc != 0; --proc) {
370 starts[proc] = starts[proc-1];
373 starts[numProcs] = numActive;
380 procIdsToSendTo_.resize(numSendsToOtherProcs_);
381 startsTo_.resize(numSendsToOtherProcs_);
382 lengthsTo_.resize(numSendsToOtherProcs_);
389 for (
int proc = 0; proc < numProcs; ++proc ) {
390 if (starts[proc+1] != starts[proc]) {
391 lengthsTo_[snd] = starts[proc+1] - starts[proc];
392 startsTo_[snd] = starts[proc];
394 if ((proc != myProcID) && (lengthsTo_[snd] > maxSendLength_)) {
395 maxSendLength_ = lengthsTo_[snd];
397 procIdsToSendTo_[snd] = proc;
403 if (sendMessageToSelf_) {
404 --numSendsToOtherProcs_;
410 #if defined(HAVE_TPETRACORE_MPI_ADVANCE)
411 initializeMpiAdvance();
416 howInitialized_ = Details::DISTRIBUTOR_INITIALIZED_BY_CREATE_FROM_SENDS;
418 return totalReceiveLength_;
421 void DistributorPlan::createFromRecvs(
const Teuchos::ArrayView<const int>& remoteProcIDs)
423 createFromSends(remoteProcIDs);
424 *
this = *getReversePlan();
425 howInitialized_ = Details::DISTRIBUTOR_INITIALIZED_BY_CREATE_FROM_RECVS;
428 void DistributorPlan::createFromSendsAndRecvs(
const Teuchos::ArrayView<const int>& exportProcIDs,
429 const Teuchos::ArrayView<const int>& remoteProcIDs)
438 howInitialized_ = Tpetra::Details::DISTRIBUTOR_INITIALIZED_BY_CREATE_FROM_SENDS_N_RECVS;
440 int myProcID = comm_->getRank ();
441 int numProcs = comm_->getSize();
443 const size_t numExportIDs = exportProcIDs.size();
444 Teuchos::Array<size_t> starts (numProcs + 1, 0);
446 size_t numActive = 0;
447 int needSendBuff = 0;
449 for(
size_t i = 0; i < numExportIDs; i++ )
451 if( needSendBuff==0 && i && (exportProcIDs[i] < exportProcIDs[i-1]) )
453 if( exportProcIDs[i] >= 0 )
455 ++starts[ exportProcIDs[i] ];
460 sendMessageToSelf_ = ( starts[myProcID] != 0 ) ? 1 : 0;
462 numSendsToOtherProcs_ = 0;
466 if (starts[0] == 0 ) {
467 numSendsToOtherProcs_ = 0;
470 numSendsToOtherProcs_ = 1;
472 for (Teuchos::Array<size_t>::iterator i=starts.begin()+1,
474 i != starts.end(); ++i)
476 if (*i != 0) ++numSendsToOtherProcs_;
482 for (Teuchos::Array<size_t>::reverse_iterator ip1=starts.rbegin(),
484 i != starts.rend(); ++i)
493 indicesTo_.resize(numActive);
495 for (
size_t i = 0; i < numExportIDs; ++i) {
496 if (exportProcIDs[i] >= 0) {
498 indicesTo_[starts[exportProcIDs[i]]] = i;
500 ++starts[exportProcIDs[i]];
503 for (
int proc = numProcs-1; proc != 0; --proc) {
504 starts[proc] = starts[proc-1];
507 starts[numProcs] = numActive;
508 procIdsToSendTo_.resize(numSendsToOtherProcs_);
509 startsTo_.resize(numSendsToOtherProcs_);
510 lengthsTo_.resize(numSendsToOtherProcs_);
513 for (
int proc = 0; proc < numProcs; ++proc ) {
514 if (starts[proc+1] != starts[proc]) {
515 lengthsTo_[snd] = starts[proc+1] - starts[proc];
516 startsTo_[snd] = starts[proc];
518 if ((proc != myProcID) && (lengthsTo_[snd] > maxSendLength_)) {
519 maxSendLength_ = lengthsTo_[snd];
521 procIdsToSendTo_[snd] = proc;
528 numSendsToOtherProcs_ = 0;
531 for (
int i = 0; i < numProcs; ++i) {
533 ++numSendsToOtherProcs_;
539 indicesTo_.resize(0);
542 procIdsToSendTo_.assign(numSendsToOtherProcs_,0);
543 startsTo_.assign(numSendsToOtherProcs_,0);
544 lengthsTo_.assign(numSendsToOtherProcs_,0);
551 size_t procIndex = 0;
552 for (
size_t i = 0; i < numSendsToOtherProcs_; ++i) {
553 while (exportProcIDs[procIndex] < 0) {
556 startsTo_[i] = procIndex;
557 int procID = exportProcIDs[procIndex];
558 procIdsToSendTo_[i] = procID;
559 procIndex += starts[procID];
564 if (numSendsToOtherProcs_ > 0) {
565 sort2(procIdsToSendTo_.begin(), procIdsToSendTo_.end(), startsTo_.begin());
569 for (
size_t i = 0; i < numSendsToOtherProcs_; ++i) {
570 int procID = procIdsToSendTo_[i];
571 lengthsTo_[i] = starts[procID];
572 if ((procID != myProcID) && (lengthsTo_[i] > maxSendLength_)) {
573 maxSendLength_ = lengthsTo_[i];
579 numSendsToOtherProcs_ -= sendMessageToSelf_;
580 std::vector<int> recv_list;
581 recv_list.reserve(numSendsToOtherProcs_);
584 for(
int i=0; i<remoteProcIDs.size(); i++) {
585 if(remoteProcIDs[i]>last_pid) {
586 recv_list.push_back(remoteProcIDs[i]);
587 last_pid = remoteProcIDs[i];
589 else if (remoteProcIDs[i]<last_pid)
590 throw std::runtime_error(
"Tpetra::Distributor:::createFromSendsAndRecvs expected RemotePIDs to be in sorted order");
592 numReceives_ = recv_list.size();
594 procsFrom_.assign(numReceives_,0);
595 lengthsFrom_.assign(numReceives_,0);
596 indicesFrom_.assign(numReceives_,0);
597 startsFrom_.assign(numReceives_,0);
599 for(
size_t i=0,j=0; i<numReceives_; ++i) {
601 procsFrom_[i] = recv_list[i];
603 for( ; j<(size_t)remoteProcIDs.size() &&
604 remoteProcIDs[jlast]==remoteProcIDs[j] ; j++){;}
605 lengthsFrom_[i] = j-jlast;
607 totalReceiveLength_ = remoteProcIDs.size();
608 indicesFrom_.clear ();
609 numReceives_-=sendMessageToSelf_;
611 #if defined(HAVE_TPETRACORE_MPI_ADVANCE)
612 initializeMpiAdvance();
616 Teuchos::RCP<DistributorPlan> DistributorPlan::getReversePlan()
const {
617 if (reversePlan_.is_null()) createReversePlan();
621 void DistributorPlan::createReversePlan()
const
623 reversePlan_ = Teuchos::rcp(
new DistributorPlan(comm_));
624 reversePlan_->howInitialized_ = Details::DISTRIBUTOR_INITIALIZED_BY_REVERSE;
625 reversePlan_->sendType_ = sendType_;
630 size_t totalSendLength =
631 std::accumulate(lengthsTo_.begin(), lengthsTo_.end(), 0);
636 size_t maxReceiveLength = 0;
637 const int myProcID = comm_->getRank();
638 for (
size_t i=0; i < numReceives_; ++i) {
639 if (procsFrom_[i] != myProcID) {
641 if (lengthsFrom_[i] > maxReceiveLength) {
642 maxReceiveLength = lengthsFrom_[i];
647 reversePlan_->sendMessageToSelf_ = sendMessageToSelf_;
648 reversePlan_->numSendsToOtherProcs_ = numReceives_;
649 reversePlan_->procIdsToSendTo_ = procsFrom_;
650 reversePlan_->startsTo_ = startsFrom_;
651 reversePlan_->lengthsTo_ = lengthsFrom_;
652 reversePlan_->maxSendLength_ = maxReceiveLength;
653 reversePlan_->indicesTo_ = indicesFrom_;
654 reversePlan_->numReceives_ = numSendsToOtherProcs_;
655 reversePlan_->totalReceiveLength_ = totalSendLength;
656 reversePlan_->lengthsFrom_ = lengthsTo_;
657 reversePlan_->procsFrom_ = procIdsToSendTo_;
658 reversePlan_->startsFrom_ = startsTo_;
659 reversePlan_->indicesFrom_ = indicesTo_;
661 #if defined(HAVE_TPETRACORE_MPI_ADVANCE)
663 reversePlan_->initializeMpiAdvance();
667 void DistributorPlan::computeReceives()
669 using Teuchos::Array;
670 using Teuchos::ArrayRCP;
672 using Teuchos::CommStatus;
673 using Teuchos::CommRequest;
674 using Teuchos::ireceive;
677 using Teuchos::REDUCE_SUM;
678 using Teuchos::receive;
679 using Teuchos::reduce;
680 using Teuchos::scatter;
682 using Teuchos::waitAll;
684 const int myRank = comm_->getRank();
685 const int numProcs = comm_->getSize();
687 const int mpiTag = DEFAULT_MPI_TAG;
695 Array<int> toProcsFromMe (numProcs, 0);
696 #ifdef HAVE_TPETRA_DEBUG
697 bool counting_error =
false;
698 #endif // HAVE_TPETRA_DEBUG
699 for (
size_t i = 0; i < (numSendsToOtherProcs_ + (sendMessageToSelf_ ? 1 : 0)); ++i) {
700 #ifdef HAVE_TPETRA_DEBUG
701 if (toProcsFromMe[procIdsToSendTo_[i]] != 0) {
702 counting_error =
true;
704 #endif // HAVE_TPETRA_DEBUG
705 toProcsFromMe[procIdsToSendTo_[i]] = 1;
707 #ifdef HAVE_TPETRA_DEBUG
710 "Tpetra::Distributor::computeReceives: There was an error on at least "
711 "one process in counting the number of messages send by that process to "
712 "the other processs. Please report this bug to the Tpetra developers.",
714 #endif // HAVE_TPETRA_DEBUG
769 Array<int> numRecvsOnEachProc;
770 if (myRank == root) {
771 numRecvsOnEachProc.resize (numProcs);
773 int numReceivesAsInt = 0;
774 reduce<int, int> (toProcsFromMe.getRawPtr (),
775 numRecvsOnEachProc.getRawPtr (),
776 numProcs, REDUCE_SUM, root, *comm_);
777 scatter<int, int> (numRecvsOnEachProc.getRawPtr (), 1,
778 &numReceivesAsInt, 1, root, *comm_);
779 numReceives_ =
static_cast<size_t> (numReceivesAsInt);
785 lengthsFrom_.assign (numReceives_, 0);
786 procsFrom_.assign (numReceives_, 0);
802 const size_t actualNumReceives = numReceives_ - (sendMessageToSelf_ ? 1 : 0);
808 Array<RCP<CommRequest<int> > > requests (actualNumReceives);
809 Array<ArrayRCP<size_t> > lengthsFromBuffers (actualNumReceives);
810 Array<RCP<CommStatus<int> > > statuses (actualNumReceives);
815 const int anySourceProc = MPI_ANY_SOURCE;
817 const int anySourceProc = -1;
821 for (
size_t i = 0; i < actualNumReceives; ++i) {
826 lengthsFromBuffers[i].resize (1);
827 lengthsFromBuffers[i][0] = as<size_t> (0);
828 requests[i] = ireceive<int, size_t> (lengthsFromBuffers[i], anySourceProc,
840 for (
size_t i = 0; i < numSendsToOtherProcs_ + (sendMessageToSelf_ ? 1 : 0); ++i) {
841 if (procIdsToSendTo_[i] != myRank) {
845 const size_t*
const lengthsTo_i = &lengthsTo_[i];
846 send<int, size_t> (lengthsTo_i, 1, as<int> (procIdsToSendTo_[i]), mpiTag, *comm_);
855 lengthsFrom_[numReceives_-1] = lengthsTo_[i];
856 procsFrom_[numReceives_-1] = myRank;
866 waitAll (*comm_, requests (), statuses ());
867 for (
size_t i = 0; i < actualNumReceives; ++i) {
868 lengthsFrom_[i] = *lengthsFromBuffers[i];
869 procsFrom_[i] = statuses[i]->getSourceRank ();
875 sort2 (procsFrom_.begin(), procsFrom_.end(), lengthsFrom_.begin());
878 totalReceiveLength_ =
879 std::accumulate (lengthsFrom_.begin (), lengthsFrom_.end (), 0);
880 indicesFrom_.clear ();
882 startsFrom_.clear ();
883 startsFrom_.reserve (numReceives_);
884 for (
size_t i = 0, j = 0; i < numReceives_; ++i) {
885 startsFrom_.push_back(j);
886 j += lengthsFrom_[i];
889 if (sendMessageToSelf_) {
894 void DistributorPlan::setParameterList(
const Teuchos::RCP<Teuchos::ParameterList>& plist)
896 using Teuchos::FancyOStream;
897 using Teuchos::getIntegralValue;
898 using Teuchos::ParameterList;
899 using Teuchos::parameterList;
903 if (! plist.is_null()) {
904 RCP<const ParameterList> validParams = getValidParameters ();
905 plist->validateParametersAndSetDefaults (*validParams);
908 getIntegralValue<Details::EDistributorSendType> (*plist,
"Send type");
911 sendType_ = sendType;
915 this->setMyParamList (plist);
921 Teuchos::Array<std::string> sendTypes;
922 sendTypes.push_back (
"Isend");
923 sendTypes.push_back (
"Send");
924 sendTypes.push_back (
"Alltoall");
925 #if defined(HAVE_TPETRACORE_MPI_ADVANCE)
926 sendTypes.push_back (
"MpiAdvanceAlltoall");
927 sendTypes.push_back (
"MpiAdvanceNbralltoallv");
932 Teuchos::RCP<const Teuchos::ParameterList>
933 DistributorPlan::getValidParameters()
const
935 using Teuchos::Array;
936 using Teuchos::ParameterList;
937 using Teuchos::parameterList;
939 using Teuchos::setStringToIntegralParameter;
942 const std::string defaultSendType (
"Send");
943 Array<Details::EDistributorSendType> sendTypeEnums;
944 sendTypeEnums.push_back (Details::DISTRIBUTOR_ISEND);
945 sendTypeEnums.push_back (Details::DISTRIBUTOR_SEND);
946 sendTypeEnums.push_back (Details::DISTRIBUTOR_ALLTOALL);
947 #if defined(HAVE_TPETRACORE_MPI_ADVANCE)
948 sendTypeEnums.push_back (Details::DISTRIBUTOR_MPIADVANCE_ALLTOALL);
949 sendTypeEnums.push_back (Details::DISTRIBUTOR_MPIADVANCE_NBRALLTOALLV);
952 RCP<ParameterList> plist = parameterList (
"Tpetra::Distributor");
954 setStringToIntegralParameter<Details::EDistributorSendType> (
"Send type",
955 defaultSendType,
"When using MPI, the variant of send to use in "
956 "do[Reverse]Posts()", sendTypes(), sendTypeEnums(), plist.getRawPtr());
957 plist->set (
"Timer Label",
"",
"Label for Time Monitor output");
959 return Teuchos::rcp_const_cast<
const ParameterList> (plist);
962 #if defined(HAVE_TPETRACORE_MPI_ADVANCE)
965 struct MpixCommDeallocator {
966 void free(MPIX_Comm **comm)
const {
967 MPIX_Comm_free(*comm);
971 void DistributorPlan::initializeMpiAdvance() {
974 TEUCHOS_ASSERT(mpixComm_.is_null());
977 Teuchos::RCP<const Teuchos::MpiComm<int> > mpiComm = Teuchos::rcp_dynamic_cast<
const Teuchos::MpiComm<int> >(comm_);
978 Teuchos::RCP<const Teuchos::OpaqueWrapper<MPI_Comm> > rawComm = mpiComm->getRawMpiComm();
980 if (sendType_ == DISTRIBUTOR_MPIADVANCE_ALLTOALL) {
981 MPIX_Comm **mpixComm =
new(MPIX_Comm*);
982 err = MPIX_Comm_init(mpixComm, (*rawComm)());
983 mpixComm_ = Teuchos::RCP(mpixComm,
984 MpixCommDeallocator(),
988 else if (sendType_ == DISTRIBUTOR_MPIADVANCE_NBRALLTOALLV) {
989 int numRecvs = (int)(numReceives_ + (sendMessageToSelf_ ? 1 : 0));
990 int *sourceRanks = procsFrom_.data();
993 const int *sourceWeights = MPI_UNWEIGHTED;
994 int numSends = (int)(numSendsToOtherProcs_ + (sendMessageToSelf_ ? 1 : 0));
995 int *destRanks = procIdsToSendTo_.data();
998 const int *destWeights = MPI_UNWEIGHTED;
1000 MPIX_Comm **mpixComm =
new(MPIX_Comm*);
1001 err = MPIX_Dist_graph_create_adjacent((*rawComm)(), numRecvs, sourceRanks, sourceWeights, numSends, destRanks, destWeights, MPI_INFO_NULL,
false, mpixComm);
1002 mpixComm_ = Teuchos::RCP(mpixComm,
1003 MpixCommDeallocator(),
1008 TEUCHOS_ASSERT(err == 0);
EDistributorHowInitialized
Enum indicating how and whether a Distributor was initialized.
static bool debug()
Whether Tpetra is in debug mode.
std::string DistributorSendTypeEnumToString(EDistributorSendType sendType)
Convert an EDistributorSendType enum value to a string.
#define TPETRA_EFFICIENCY_WARNING(throw_exception_test, msg)
Print or throw an efficency warning.
Teuchos::Array< std::string > distributorSendTypes()
Valid values for Distributor's "Send type" parameter.
void sort2(const IT1 &first1, const IT1 &last1, const IT2 &first2)
Sort the first array, and apply the resulting permutation to the second array.
std::string DistributorHowInitializedEnumToString(EDistributorHowInitialized how)
Convert an EDistributorHowInitialized enum value to a string.
Stand-alone utility functions and macros.
#define SHARED_TEST_FOR_EXCEPTION(throw_exception_test, Exception, msg, comm)
Test for exception, with reduction over the given communicator.
EDistributorSendType
The type of MPI send that Distributor should use.
Declaration of Tpetra::Details::Behavior, a class that describes Tpetra's behavior.