61 if (outputRootRank >= 0) {
69 (void) MPI_Comm_size(globalMpiComm, &size);
70 (void) MPI_Comm_rank(globalMpiComm, &rank);
75 "ERROR: num subDomainProcs " << subDomainProcs <<
76 " must be strictly positive." << std::endl);
79 size % subDomainProcs != 0,
81 "ERROR: num subDomainProcs "<< subDomainProcs <<
82 " does not divide into num total procs " << size << std::endl);
88 MPI_Comm split_MPI_Comm;
89 MPI_Comm time_split_MPI_Comm;
95 &time_split_MPI_Comm);
105 if (numTimeSteps_ > 0)
106 *out <<
"Processor " << rank <<
" is on subdomain " <<
subDomainRank
108 <<
" time steps, starting with "
111 *out <<
"Processor " << rank <<
" is on subdomain " <<
subDomainRank
116 if (outputRootRank >= 0) {
140 MPI_Comm time_split_MPI_Comm;
141 int rank = EpetraMpiComm_.
MyPID();
142 (void) MPI_Comm_split(EpetraMpiComm_.
Comm(), rank, rank,
143 &time_split_MPI_Comm);
184 if (subDomainRank < remainder) {
int firstTimeStepOnDomain
#define TEUCHOS_TEST_FOR_EXCEPTION(throw_exception_test, Exception, msg)
void ResetNumTimeSteps(int numTimeSteps)
Reset total number of time steps, allowing time steps per domain to.
TEUCHOS_DEPRECATED RCP< T > rcp(T *p, Dealloc_T dealloc, bool owns_mem)
Epetra_MpiComm * timeComm
MultiMpiComm(MPI_Comm globalComm, int subDomainProcs, int numTimeSteps_=-1, const Teuchos::EVerbosityLevel verbLevel=Teuchos::VERB_DEFAULT)
MultiMpiComm constuctor.
basic_FancyOStream & setOutputToRootOnly(const int rootRank)
int getOutputToRootOnly() const
virtual ~MultiMpiComm()
Destructor.
Epetra_MpiComm(MPI_Comm comm)