12 #include "Teuchos_StandardParameterEntryValidators.hpp"
23 if (sendType == DISTRIBUTOR_ISEND) {
26 else if (sendType == DISTRIBUTOR_SEND) {
29 else if (sendType == DISTRIBUTOR_ALLTOALL) {
32 #if defined(HAVE_TPETRACORE_MPI_ADVANCE)
33 else if (sendType == DISTRIBUTOR_MPIADVANCE_ALLTOALL) {
34 return "MpiAdvanceAlltoall";
36 else if (sendType == DISTRIBUTOR_MPIADVANCE_NBRALLTOALLV) {
37 return "MpiAdvanceNbralltoallv";
41 TEUCHOS_TEST_FOR_EXCEPTION(
true, std::invalid_argument,
"Invalid "
42 "EDistributorSendType enum value " << sendType <<
".");
50 case Details::DISTRIBUTOR_NOT_INITIALIZED:
51 return "Not initialized yet";
52 case Details::DISTRIBUTOR_INITIALIZED_BY_CREATE_FROM_SENDS:
53 return "By createFromSends";
54 case Details::DISTRIBUTOR_INITIALIZED_BY_CREATE_FROM_RECVS:
55 return "By createFromRecvs";
56 case Details::DISTRIBUTOR_INITIALIZED_BY_CREATE_FROM_SENDS_N_RECVS:
57 return "By createFromSendsAndRecvs";
58 case Details::DISTRIBUTOR_INITIALIZED_BY_REVERSE:
59 return "By createReverseDistributor";
60 case Details::DISTRIBUTOR_INITIALIZED_BY_COPY:
61 return "By copy constructor";
67 DistributorPlan::DistributorPlan(Teuchos::RCP<
const Teuchos::Comm<int>> comm)
69 #if defined(HAVE_TPETRACORE_MPI_ADVANCE)
70 mpixComm_(Teuchos::null),
72 howInitialized_(DISTRIBUTOR_NOT_INITIALIZED),
73 reversePlan_(Teuchos::null),
74 sendType_(DISTRIBUTOR_SEND),
75 sendMessageToSelf_(false),
76 numSendsToOtherProcs_(0),
79 totalReceiveLength_(0)
82 DistributorPlan::DistributorPlan(
const DistributorPlan& otherPlan)
83 : comm_(otherPlan.comm_),
84 #if defined(HAVE_TPETRACORE_MPI_ADVANCE)
85 mpixComm_(otherPlan.mpixComm_),
87 howInitialized_(DISTRIBUTOR_INITIALIZED_BY_COPY),
88 reversePlan_(otherPlan.reversePlan_),
89 sendType_(otherPlan.sendType_),
90 sendMessageToSelf_(otherPlan.sendMessageToSelf_),
91 numSendsToOtherProcs_(otherPlan.numSendsToOtherProcs_),
92 procIdsToSendTo_(otherPlan.procIdsToSendTo_),
93 startsTo_(otherPlan.startsTo_),
94 lengthsTo_(otherPlan.lengthsTo_),
95 maxSendLength_(otherPlan.maxSendLength_),
96 indicesTo_(otherPlan.indicesTo_),
97 numReceives_(otherPlan.numReceives_),
98 totalReceiveLength_(otherPlan.totalReceiveLength_),
99 lengthsFrom_(otherPlan.lengthsFrom_),
100 procsFrom_(otherPlan.procsFrom_),
101 startsFrom_(otherPlan.startsFrom_),
102 indicesFrom_(otherPlan.indicesFrom_)
105 size_t DistributorPlan::createFromSends(
const Teuchos::ArrayView<const int>& exportProcIDs) {
106 using Teuchos::outArg;
107 using Teuchos::REDUCE_MAX;
108 using Teuchos::reduceAll;
110 const char rawPrefix[] =
"Tpetra::DistributorPlan::createFromSends";
112 const size_t numExports = exportProcIDs.size();
113 const int myProcID = comm_->getRank();
114 const int numProcs = comm_->getSize();
158 for (
size_t i = 0; i < numExports; ++i) {
159 const int exportID = exportProcIDs[i];
160 if (exportID >= numProcs || exportID < 0) {
166 reduceAll<int, int> (*comm_, REDUCE_MAX, badID, outArg (gbl_badID));
167 TEUCHOS_TEST_FOR_EXCEPTION
168 (gbl_badID >= 0, std::runtime_error, rawPrefix <<
"Proc "
169 << gbl_badID <<
", perhaps among other processes, got a bad "
185 Teuchos::Array<size_t> starts (numProcs + 1, 0);
188 size_t numActive = 0;
189 int needSendBuff = 0;
191 for (
size_t i = 0; i < numExports; ++i) {
192 const int exportID = exportProcIDs[i];
207 if (needSendBuff == 0 && starts[exportID] > 1 &&
208 exportID != exportProcIDs[i-1]) {
215 #if defined(HAVE_TPETRA_PRINT_EFFICIENCY_WARNINGS)
217 int global_needSendBuff;
218 reduceAll<int, int> (*comm_, REDUCE_MAX, needSendBuff,
219 outArg (global_needSendBuff));
221 global_needSendBuff != 0,
222 "::createFromSends: Grouping export IDs together by process rank often "
223 "improves performance.");
229 if (starts[myProcID] != 0) {
230 sendMessageToSelf_ =
true;
233 sendMessageToSelf_ =
false;
236 if (! needSendBuff) {
238 numSendsToOtherProcs_ = 0;
241 for (
int i = 0; i < numProcs; ++i) {
243 ++numSendsToOtherProcs_;
249 indicesTo_.resize(0);
252 procIdsToSendTo_.assign(numSendsToOtherProcs_,0);
253 startsTo_.assign(numSendsToOtherProcs_,0);
254 lengthsTo_.assign(numSendsToOtherProcs_,0);
261 size_t procIndex = 0;
262 for (
size_t i = 0; i < numSendsToOtherProcs_; ++i) {
263 while (exportProcIDs[procIndex] < 0) {
266 startsTo_[i] = procIndex;
267 int procID = exportProcIDs[procIndex];
268 procIdsToSendTo_[i] = procID;
269 procIndex += starts[procID];
274 if (numSendsToOtherProcs_ > 0) {
275 sort2(procIdsToSendTo_.begin(), procIdsToSendTo_.end(), startsTo_.begin());
279 for (
size_t i = 0; i < numSendsToOtherProcs_; ++i) {
280 int procID = procIdsToSendTo_[i];
281 lengthsTo_[i] = starts[procID];
282 if ((procID != myProcID) && (lengthsTo_[i] > maxSendLength_)) {
283 maxSendLength_ = lengthsTo_[i];
294 if (starts[0] == 0 ) {
295 numSendsToOtherProcs_ = 0;
298 numSendsToOtherProcs_ = 1;
300 for (Teuchos::Array<size_t>::iterator i=starts.begin()+1,
302 i != starts.end(); ++i)
304 if (*i != 0) ++numSendsToOtherProcs_;
310 for (Teuchos::Array<size_t>::reverse_iterator ip1=starts.rbegin(),
312 i != starts.rend(); ++i)
321 indicesTo_.resize(numActive);
323 for (
size_t i = 0; i < numExports; ++i) {
324 if (exportProcIDs[i] >= 0) {
326 indicesTo_[starts[exportProcIDs[i]]] = i;
328 ++starts[exportProcIDs[i]];
340 for (
int proc = numProcs-1; proc != 0; --proc) {
341 starts[proc] = starts[proc-1];
344 starts[numProcs] = numActive;
351 procIdsToSendTo_.resize(numSendsToOtherProcs_);
352 startsTo_.resize(numSendsToOtherProcs_);
353 lengthsTo_.resize(numSendsToOtherProcs_);
360 for (
int proc = 0; proc < numProcs; ++proc ) {
361 if (starts[proc+1] != starts[proc]) {
362 lengthsTo_[snd] = starts[proc+1] - starts[proc];
363 startsTo_[snd] = starts[proc];
365 if ((proc != myProcID) && (lengthsTo_[snd] > maxSendLength_)) {
366 maxSendLength_ = lengthsTo_[snd];
368 procIdsToSendTo_[snd] = proc;
374 if (sendMessageToSelf_) {
375 --numSendsToOtherProcs_;
381 #if defined(HAVE_TPETRACORE_MPI_ADVANCE)
382 initializeMpiAdvance();
387 howInitialized_ = Details::DISTRIBUTOR_INITIALIZED_BY_CREATE_FROM_SENDS;
389 return totalReceiveLength_;
392 void DistributorPlan::createFromRecvs(
const Teuchos::ArrayView<const int>& remoteProcIDs)
394 createFromSends(remoteProcIDs);
395 *
this = *getReversePlan();
396 howInitialized_ = Details::DISTRIBUTOR_INITIALIZED_BY_CREATE_FROM_RECVS;
399 void DistributorPlan::createFromSendsAndRecvs(
const Teuchos::ArrayView<const int>& exportProcIDs,
400 const Teuchos::ArrayView<const int>& remoteProcIDs)
409 howInitialized_ = Tpetra::Details::DISTRIBUTOR_INITIALIZED_BY_CREATE_FROM_SENDS_N_RECVS;
411 int myProcID = comm_->getRank ();
412 int numProcs = comm_->getSize();
414 const size_t numExportIDs = exportProcIDs.size();
415 Teuchos::Array<size_t> starts (numProcs + 1, 0);
417 size_t numActive = 0;
418 int needSendBuff = 0;
420 for(
size_t i = 0; i < numExportIDs; i++ )
422 if( needSendBuff==0 && i && (exportProcIDs[i] < exportProcIDs[i-1]) )
424 if( exportProcIDs[i] >= 0 )
426 ++starts[ exportProcIDs[i] ];
431 sendMessageToSelf_ = ( starts[myProcID] != 0 ) ? 1 : 0;
433 numSendsToOtherProcs_ = 0;
437 if (starts[0] == 0 ) {
438 numSendsToOtherProcs_ = 0;
441 numSendsToOtherProcs_ = 1;
443 for (Teuchos::Array<size_t>::iterator i=starts.begin()+1,
445 i != starts.end(); ++i)
447 if (*i != 0) ++numSendsToOtherProcs_;
453 for (Teuchos::Array<size_t>::reverse_iterator ip1=starts.rbegin(),
455 i != starts.rend(); ++i)
464 indicesTo_.resize(numActive);
466 for (
size_t i = 0; i < numExportIDs; ++i) {
467 if (exportProcIDs[i] >= 0) {
469 indicesTo_[starts[exportProcIDs[i]]] = i;
471 ++starts[exportProcIDs[i]];
474 for (
int proc = numProcs-1; proc != 0; --proc) {
475 starts[proc] = starts[proc-1];
478 starts[numProcs] = numActive;
479 procIdsToSendTo_.resize(numSendsToOtherProcs_);
480 startsTo_.resize(numSendsToOtherProcs_);
481 lengthsTo_.resize(numSendsToOtherProcs_);
484 for (
int proc = 0; proc < numProcs; ++proc ) {
485 if (starts[proc+1] != starts[proc]) {
486 lengthsTo_[snd] = starts[proc+1] - starts[proc];
487 startsTo_[snd] = starts[proc];
489 if ((proc != myProcID) && (lengthsTo_[snd] > maxSendLength_)) {
490 maxSendLength_ = lengthsTo_[snd];
492 procIdsToSendTo_[snd] = proc;
499 numSendsToOtherProcs_ = 0;
502 for (
int i = 0; i < numProcs; ++i) {
504 ++numSendsToOtherProcs_;
510 indicesTo_.resize(0);
513 procIdsToSendTo_.assign(numSendsToOtherProcs_,0);
514 startsTo_.assign(numSendsToOtherProcs_,0);
515 lengthsTo_.assign(numSendsToOtherProcs_,0);
522 size_t procIndex = 0;
523 for (
size_t i = 0; i < numSendsToOtherProcs_; ++i) {
524 while (exportProcIDs[procIndex] < 0) {
527 startsTo_[i] = procIndex;
528 int procID = exportProcIDs[procIndex];
529 procIdsToSendTo_[i] = procID;
530 procIndex += starts[procID];
535 if (numSendsToOtherProcs_ > 0) {
536 sort2(procIdsToSendTo_.begin(), procIdsToSendTo_.end(), startsTo_.begin());
540 for (
size_t i = 0; i < numSendsToOtherProcs_; ++i) {
541 int procID = procIdsToSendTo_[i];
542 lengthsTo_[i] = starts[procID];
543 if ((procID != myProcID) && (lengthsTo_[i] > maxSendLength_)) {
544 maxSendLength_ = lengthsTo_[i];
550 numSendsToOtherProcs_ -= sendMessageToSelf_;
551 std::vector<int> recv_list;
552 recv_list.reserve(numSendsToOtherProcs_);
555 for(
int i=0; i<remoteProcIDs.size(); i++) {
556 if(remoteProcIDs[i]>last_pid) {
557 recv_list.push_back(remoteProcIDs[i]);
558 last_pid = remoteProcIDs[i];
560 else if (remoteProcIDs[i]<last_pid)
561 throw std::runtime_error(
"Tpetra::Distributor:::createFromSendsAndRecvs expected RemotePIDs to be in sorted order");
563 numReceives_ = recv_list.size();
565 procsFrom_.assign(numReceives_,0);
566 lengthsFrom_.assign(numReceives_,0);
567 indicesFrom_.assign(numReceives_,0);
568 startsFrom_.assign(numReceives_,0);
570 for(
size_t i=0,j=0; i<numReceives_; ++i) {
572 procsFrom_[i] = recv_list[i];
574 for( ; j<(size_t)remoteProcIDs.size() &&
575 remoteProcIDs[jlast]==remoteProcIDs[j] ; j++){;}
576 lengthsFrom_[i] = j-jlast;
578 totalReceiveLength_ = remoteProcIDs.size();
579 indicesFrom_.clear ();
580 numReceives_-=sendMessageToSelf_;
582 #if defined(HAVE_TPETRACORE_MPI_ADVANCE)
583 initializeMpiAdvance();
587 Teuchos::RCP<DistributorPlan> DistributorPlan::getReversePlan()
const {
588 if (reversePlan_.is_null()) createReversePlan();
592 void DistributorPlan::createReversePlan()
const
594 reversePlan_ = Teuchos::rcp(
new DistributorPlan(comm_));
595 reversePlan_->howInitialized_ = Details::DISTRIBUTOR_INITIALIZED_BY_REVERSE;
596 reversePlan_->sendType_ = sendType_;
601 size_t totalSendLength =
602 std::accumulate(lengthsTo_.begin(), lengthsTo_.end(), 0);
607 size_t maxReceiveLength = 0;
608 const int myProcID = comm_->getRank();
609 for (
size_t i=0; i < numReceives_; ++i) {
610 if (procsFrom_[i] != myProcID) {
612 if (lengthsFrom_[i] > maxReceiveLength) {
613 maxReceiveLength = lengthsFrom_[i];
618 reversePlan_->sendMessageToSelf_ = sendMessageToSelf_;
619 reversePlan_->numSendsToOtherProcs_ = numReceives_;
620 reversePlan_->procIdsToSendTo_ = procsFrom_;
621 reversePlan_->startsTo_ = startsFrom_;
622 reversePlan_->lengthsTo_ = lengthsFrom_;
623 reversePlan_->maxSendLength_ = maxReceiveLength;
624 reversePlan_->indicesTo_ = indicesFrom_;
625 reversePlan_->numReceives_ = numSendsToOtherProcs_;
626 reversePlan_->totalReceiveLength_ = totalSendLength;
627 reversePlan_->lengthsFrom_ = lengthsTo_;
628 reversePlan_->procsFrom_ = procIdsToSendTo_;
629 reversePlan_->startsFrom_ = startsTo_;
630 reversePlan_->indicesFrom_ = indicesTo_;
632 #if defined(HAVE_TPETRACORE_MPI_ADVANCE)
634 reversePlan_->initializeMpiAdvance();
638 void DistributorPlan::computeReceives()
640 using Teuchos::Array;
641 using Teuchos::ArrayRCP;
643 using Teuchos::CommStatus;
644 using Teuchos::CommRequest;
645 using Teuchos::ireceive;
648 using Teuchos::REDUCE_SUM;
649 using Teuchos::receive;
650 using Teuchos::reduce;
651 using Teuchos::scatter;
653 using Teuchos::waitAll;
655 const int myRank = comm_->getRank();
656 const int numProcs = comm_->getSize();
658 const int mpiTag = DEFAULT_MPI_TAG;
666 Array<int> toProcsFromMe (numProcs, 0);
667 #ifdef HAVE_TPETRA_DEBUG
668 bool counting_error =
false;
669 #endif // HAVE_TPETRA_DEBUG
670 for (
size_t i = 0; i < (numSendsToOtherProcs_ + (sendMessageToSelf_ ? 1 : 0)); ++i) {
671 #ifdef HAVE_TPETRA_DEBUG
672 if (toProcsFromMe[procIdsToSendTo_[i]] != 0) {
673 counting_error =
true;
675 #endif // HAVE_TPETRA_DEBUG
676 toProcsFromMe[procIdsToSendTo_[i]] = 1;
678 #ifdef HAVE_TPETRA_DEBUG
681 "Tpetra::Distributor::computeReceives: There was an error on at least "
682 "one process in counting the number of messages send by that process to "
683 "the other processs. Please report this bug to the Tpetra developers.",
685 #endif // HAVE_TPETRA_DEBUG
740 Array<int> numRecvsOnEachProc;
741 if (myRank == root) {
742 numRecvsOnEachProc.resize (numProcs);
744 int numReceivesAsInt = 0;
745 reduce<int, int> (toProcsFromMe.getRawPtr (),
746 numRecvsOnEachProc.getRawPtr (),
747 numProcs, REDUCE_SUM, root, *comm_);
748 scatter<int, int> (numRecvsOnEachProc.getRawPtr (), 1,
749 &numReceivesAsInt, 1, root, *comm_);
750 numReceives_ =
static_cast<size_t> (numReceivesAsInt);
756 lengthsFrom_.assign (numReceives_, 0);
757 procsFrom_.assign (numReceives_, 0);
773 const size_t actualNumReceives = numReceives_ - (sendMessageToSelf_ ? 1 : 0);
779 Array<RCP<CommRequest<int> > > requests (actualNumReceives);
780 Array<ArrayRCP<size_t> > lengthsFromBuffers (actualNumReceives);
781 Array<RCP<CommStatus<int> > > statuses (actualNumReceives);
786 const int anySourceProc = MPI_ANY_SOURCE;
788 const int anySourceProc = -1;
792 for (
size_t i = 0; i < actualNumReceives; ++i) {
797 lengthsFromBuffers[i].resize (1);
798 lengthsFromBuffers[i][0] = as<size_t> (0);
799 requests[i] = ireceive<int, size_t> (lengthsFromBuffers[i], anySourceProc,
811 for (
size_t i = 0; i < numSendsToOtherProcs_ + (sendMessageToSelf_ ? 1 : 0); ++i) {
812 if (procIdsToSendTo_[i] != myRank) {
816 const size_t*
const lengthsTo_i = &lengthsTo_[i];
817 send<int, size_t> (lengthsTo_i, 1, as<int> (procIdsToSendTo_[i]), mpiTag, *comm_);
826 lengthsFrom_[numReceives_-1] = lengthsTo_[i];
827 procsFrom_[numReceives_-1] = myRank;
837 waitAll (*comm_, requests (), statuses ());
838 for (
size_t i = 0; i < actualNumReceives; ++i) {
839 lengthsFrom_[i] = *lengthsFromBuffers[i];
840 procsFrom_[i] = statuses[i]->getSourceRank ();
846 sort2 (procsFrom_.begin(), procsFrom_.end(), lengthsFrom_.begin());
849 totalReceiveLength_ =
850 std::accumulate (lengthsFrom_.begin (), lengthsFrom_.end (), 0);
851 indicesFrom_.clear ();
853 startsFrom_.clear ();
854 startsFrom_.reserve (numReceives_);
855 for (
size_t i = 0, j = 0; i < numReceives_; ++i) {
856 startsFrom_.push_back(j);
857 j += lengthsFrom_[i];
860 if (sendMessageToSelf_) {
865 void DistributorPlan::setParameterList(
const Teuchos::RCP<Teuchos::ParameterList>& plist)
867 using Teuchos::FancyOStream;
868 using Teuchos::getIntegralValue;
869 using Teuchos::ParameterList;
870 using Teuchos::parameterList;
874 if (! plist.is_null()) {
875 RCP<const ParameterList> validParams = getValidParameters ();
876 plist->validateParametersAndSetDefaults (*validParams);
879 getIntegralValue<Details::EDistributorSendType> (*plist,
"Send type");
882 sendType_ = sendType;
886 this->setMyParamList (plist);
892 Teuchos::Array<std::string> sendTypes;
893 sendTypes.push_back (
"Isend");
894 sendTypes.push_back (
"Send");
895 sendTypes.push_back (
"Alltoall");
896 #if defined(HAVE_TPETRACORE_MPI_ADVANCE)
897 sendTypes.push_back (
"MpiAdvanceAlltoall");
898 sendTypes.push_back (
"MpiAdvanceNbralltoallv");
903 Teuchos::RCP<const Teuchos::ParameterList>
904 DistributorPlan::getValidParameters()
const
906 using Teuchos::Array;
907 using Teuchos::ParameterList;
908 using Teuchos::parameterList;
910 using Teuchos::setStringToIntegralParameter;
913 const std::string defaultSendType (
"Send");
914 Array<Details::EDistributorSendType> sendTypeEnums;
915 sendTypeEnums.push_back (Details::DISTRIBUTOR_ISEND);
916 sendTypeEnums.push_back (Details::DISTRIBUTOR_SEND);
917 sendTypeEnums.push_back (Details::DISTRIBUTOR_ALLTOALL);
918 #if defined(HAVE_TPETRACORE_MPI_ADVANCE)
919 sendTypeEnums.push_back (Details::DISTRIBUTOR_MPIADVANCE_ALLTOALL);
920 sendTypeEnums.push_back (Details::DISTRIBUTOR_MPIADVANCE_NBRALLTOALLV);
923 RCP<ParameterList> plist = parameterList (
"Tpetra::Distributor");
925 setStringToIntegralParameter<Details::EDistributorSendType> (
"Send type",
926 defaultSendType,
"When using MPI, the variant of send to use in "
927 "do[Reverse]Posts()", sendTypes(), sendTypeEnums(), plist.getRawPtr());
928 plist->set (
"Timer Label",
"",
"Label for Time Monitor output");
930 return Teuchos::rcp_const_cast<
const ParameterList> (plist);
933 #if defined(HAVE_TPETRACORE_MPI_ADVANCE)
936 struct MpixCommDeallocator {
937 void free(MPIX_Comm **comm)
const {
938 MPIX_Comm_free(*comm);
942 void DistributorPlan::initializeMpiAdvance() {
945 TEUCHOS_ASSERT(mpixComm_.is_null());
948 Teuchos::RCP<const Teuchos::MpiComm<int> > mpiComm = Teuchos::rcp_dynamic_cast<
const Teuchos::MpiComm<int> >(comm_);
949 Teuchos::RCP<const Teuchos::OpaqueWrapper<MPI_Comm> > rawComm = mpiComm->getRawMpiComm();
951 if (sendType_ == DISTRIBUTOR_MPIADVANCE_ALLTOALL) {
952 MPIX_Comm **mpixComm =
new(MPIX_Comm*);
953 err = MPIX_Comm_init(mpixComm, (*rawComm)());
954 mpixComm_ = Teuchos::RCP(mpixComm,
955 MpixCommDeallocator(),
959 else if (sendType_ == DISTRIBUTOR_MPIADVANCE_NBRALLTOALLV) {
960 int numRecvs = (int)(numReceives_ + (sendMessageToSelf_ ? 1 : 0));
961 int *sourceRanks = procsFrom_.data();
964 const int *sourceWeights = MPI_UNWEIGHTED;
965 int numSends = (int)(numSendsToOtherProcs_ + (sendMessageToSelf_ ? 1 : 0));
966 int *destRanks = procIdsToSendTo_.data();
969 const int *destWeights = MPI_UNWEIGHTED;
971 MPIX_Comm **mpixComm =
new(MPIX_Comm*);
972 err = MPIX_Dist_graph_create_adjacent((*rawComm)(), numRecvs, sourceRanks, sourceWeights, numSends, destRanks, destWeights, MPI_INFO_NULL,
false, mpixComm);
973 mpixComm_ = Teuchos::RCP(mpixComm,
974 MpixCommDeallocator(),
979 TEUCHOS_ASSERT(err == 0);
EDistributorHowInitialized
Enum indicating how and whether a Distributor was initialized.
static bool debug()
Whether Tpetra is in debug mode.
std::string DistributorSendTypeEnumToString(EDistributorSendType sendType)
Convert an EDistributorSendType enum value to a string.
#define TPETRA_EFFICIENCY_WARNING(throw_exception_test, msg)
Print or throw an efficency warning.
void sort2(const IT1 &first1, const IT1 &last1, const IT2 &first2, const bool stableSort=false)
Sort the first array, and apply the resulting permutation to the second array.
Teuchos::Array< std::string > distributorSendTypes()
Valid values for Distributor's "Send type" parameter.
std::string DistributorHowInitializedEnumToString(EDistributorHowInitialized how)
Convert an EDistributorHowInitialized enum value to a string.
Stand-alone utility functions and macros.
#define SHARED_TEST_FOR_EXCEPTION(throw_exception_test, Exception, msg, comm)
Test for exception, with reduction over the given communicator.
EDistributorSendType
The type of MPI send that Distributor should use.
Declaration of Tpetra::Details::Behavior, a class that describes Tpetra's behavior.