58   size_indices_from_(0),
 
   74   total_recv_length_(0),
 
   93   size_indices_to_(Distributor.size_indices_to_),
 
   97   size_indices_from_(Distributor.size_indices_from_),
 
  107   indices_from_ptr_(0),
 
  108   nrecvs_(Distributor.nrecvs_),
 
  109   nsends_(Distributor.nsends_),
 
  110   nexports_(Distributor.nexports_),
 
  111   self_msg_(Distributor.self_msg_),
 
  112   max_send_length_(Distributor.max_send_length_),
 
  113   total_recv_length_(Distributor.total_recv_length_),
 
  114   tag_(Distributor.tag_),
 
  115   epComm_(Distributor.epComm_),
 
  116   comm_(Distributor.comm_),
 
  122   comm_plan_reverse_(0)
 
  222                                             const int * ExportPIDs,
 
  228   MPI_Comm_rank( 
comm_, &my_proc );
 
  231   MPI_Comm_size( 
comm_, &nprocs );
 
  253            const int * RemoteGIDs,
 
  254                  const int * RemotePIDs,
 
  261   MPI_Comm_rank( 
comm_, &my_proc );
 
  264   MPI_Comm_size( 
comm_, &nprocs );
 
  267          ExportGIDs, ExportPIDs, my_proc) );
 
  269   int testNumRemoteIDs;
 
  271            Deterministic, testNumRemoteIDs ) );
 
  281 #ifndef EPETRA_NO_64BIT_GLOBAL_INDICES 
  283            const long long * RemoteGIDs,
 
  284                  const int * RemotePIDs,
 
  287            long long *& ExportGIDs,
 
  291   MPI_Comm_rank( 
comm_, &my_proc );
 
  294   MPI_Comm_size( 
comm_, &nprocs );
 
  297          ExportGIDs, ExportPIDs, my_proc) );
 
  299   int testNumRemoteIDs;
 
  301            Deterministic, testNumRemoteIDs ) );
 
  314                 const int * ExportPIDs,
 
  315                 const int & NumRemoteIDs,
 
  316                 const int * RemoteGIDs,
 
  317                 const int * RemotePIDs,
 
  325   MPI_Comm_rank( 
comm_, &my_proc );
 
  327   MPI_Comm_size( 
comm_, &nprocs );
 
  337 #ifndef EPETRA_NO_64BIT_GLOBAL_INDICES 
  339                  const int * ExportPIDs,
 
  340                  const int & NumRemoteIDs,
 
  341                  const long long * RemoteGIDs,
 
  342                  const int * RemotePIDs,
 
  350   MPI_Comm_rank( 
comm_, &my_proc );
 
  352   MPI_Comm_size( 
comm_, &nprocs );
 
  371              const int & NumExportIDs,
 
  372              const int * ExportPIDs)
 
  382   int * starts = 
new int[ nprocs + 1 ];
 
  383   for( i = 0; i < nprocs; i++ )
 
  387   bool no_send_buff = 
true;
 
  388   int numDeadIndices = 0; 
 
  390   for( i = 0; i < NumExportIDs; i++ )
 
  392     if( no_send_buff && i && (ExportPIDs[i] < ExportPIDs[i-1]) )
 
  393       no_send_buff = 
false;
 
  394     if( ExportPIDs[i] >= 0 )
 
  396       ++starts[ ExportPIDs[i] ];
 
  399     else numDeadIndices++; 
 
  402   self_msg_ = ( starts[my_proc] != 0 ) ? 1 : 0;
 
  408     for( i = 0; i < nprocs; ++i )
 
  418     int index = numDeadIndices;  
 
  423       proc = ExportPIDs[index];
 
  425       index += starts[proc];
 
  443     if( starts[0] != 0 ) 
nsends_ = 1;
 
  445     for( i = 1; i < nprocs; i++ )
 
  447       if( starts[i] != 0 ) ++
nsends_;
 
  448       starts[i] += starts[i-1];
 
  451     for( i = nprocs-1; i != 0; i-- )
 
  452       starts[i] = starts[i-1];
 
  461     for( i = 0; i < NumExportIDs; i++ )
 
  462     if( ExportPIDs[i] >= 0 )
 
  465       ++starts[ ExportPIDs[i] ];
 
  470     for( i = nprocs-1; i != 0; i-- )
 
  471       starts[i] = starts[i-1];
 
  473     starts[nprocs] = nactive;
 
  484     for( i = 0; i < nprocs; i++ )
 
  485       if( starts[i+1] != starts[i] )
 
  505              const int * RemotePIDs)
 
  511   std::vector<int> recv_list;
 
  515   for(i=0; i<NumRemoteIDs; i++) {
 
  516     if(RemotePIDs[i]>last_pid) {
 
  517       recv_list.push_back(RemotePIDs[i]);
 
  518       last_pid = RemotePIDs[i];
 
  520     else if (RemotePIDs[i]<last_pid)
 
  521       throw std::runtime_error(
"Epetra_MpiDistributor::CreateRecvStructures_ expected RemotePIDs to be in sorted order");
 
  537     for( ; j<NumRemoteIDs && RemotePIDs[jlast]==RemotePIDs[j]  ; j++){;}
 
  555   int * msg_count = 
new int[ nprocs ];
 
  556   int * counts = 
new int[ nprocs ];
 
  561   for( i = 0; i < nprocs; i++ )
 
  570 #if defined(REDUCE_SCATTER_BUG) 
  572   MPI_Reduce(msg_count, counts, nprocs, MPI_INT, MPI_SUM, 0, 
comm_);
 
  573   MPI_Scatter(counts, 1, MPI_INT, &
nrecvs_, 1, MPI_INT, 0, 
comm_);
 
  575   MPI_Reduce_scatter( msg_count, &
nrecvs_, counts, MPI_INT, MPI_SUM, 
comm_ );
 
  590 #ifndef NEW_COMM_PATTERN 
  608   MPI_Barrier( 
comm_ );
 
  620   MPI_Barrier( comm_ );
 
  668   MPI_Barrier( comm_ );
 
  677 template<
typename id_type>
 
  679         const id_type *& import_ids,
 
  680         const int *& import_procs,
 
  682         id_type *& export_ids,
 
  690   int * import_objs = 0;
 
  691   char * c_export_objs = 0;
 
  692   const int pack_size = (1 + 
sizeof(id_type)/
sizeof(
int));
 
  694   if( num_imports > 0 )
 
  696     proc_list = 
new int[ num_imports ];
 
  697     import_objs = 
new int[ num_imports * pack_size];
 
  699     for( i = 0; i < num_imports; i++ )
 
  701       proc_list[i] = import_procs[i];
 
  703       *(id_type*)(import_objs + pack_size*i) = import_ids[i];
 
  704       *(import_objs + pack_size*i + (pack_size-1)) = my_proc;
 
  709              true, num_exports) );
 
  710   if( num_exports > 0 )
 
  713     export_ids = 
new id_type[ num_exports ];
 
  714     export_procs = 
new int[ num_exports ];
 
  722   int len_c_export_objs = 0;
 
  724             pack_size * (
int)
sizeof( 
int ),
 
  727   int * export_objs = 
reinterpret_cast<int *
>(c_export_objs);
 
  729   for( i = 0; i < num_exports; i++ ) {
 
  730     export_ids[i] = *(id_type*)(export_objs + pack_size*i);
 
  731     export_procs[i] = *(export_objs + pack_size*i + (pack_size-1));
 
  734   if( proc_list != 0 ) 
delete [] proc_list;
 
  735   if( import_objs != 0 ) 
delete [] import_objs;
 
  736   if( len_c_export_objs != 0 ) 
delete [] c_export_objs;
 
  745                                int & len_import_objs,
 
  746                                char *& import_objs )
 
  763                                       int & len_import_objs,
 
  764                                       char *& import_objs )
 
  767          len_import_objs, import_objs) );
 
  783                                     int & len_import_objs,
 
  784             char *& import_objs )
 
  789   int self_recv_address = 0;
 
  791   MPI_Comm_rank( 
comm_, &my_proc );
 
  795     if( import_objs!=0 ) {
delete [] import_objs; import_objs = 0;}
 
  797     if (len_import_objs>0) import_objs = 
new char[len_import_objs];
 
  798     for( i=0; i<len_import_objs; ++i ) import_objs[i]=0;
 
  808       MPI_Irecv( &(import_objs[j]),
 
  816       self_recv_address = j;
 
  821 #ifndef EPETRA_NO_READY_SEND_IN_DO_POSTS 
  831   MPI_Barrier( 
comm_ );
 
  832 #endif // EPETRA_NO_READY_SEND_IN_DO_POSTS 
  838   while( proc_index < nblocks && 
procs_to_[proc_index] < my_proc )
 
  840   if( proc_index == nblocks ) proc_index = 0;
 
  842   int self_num = 0, self_index = 0;
 
  847     for( i = 0; i < nblocks; ++i )
 
  850       if( p > (nblocks-1) ) p -= nblocks;
 
  854 #ifndef EPETRA_NO_READY_SEND_IN_DO_POSTS 
  855         MPI_Rsend( &export_objs[
starts_to_[p]*obj_size],
 
  862         MPI_Send( &export_objs[
starts_to_[p]*obj_size],
 
  868 #endif // EPETRA_NO_READY_SEND_IN_DO_POSTS 
  876       memcpy( &import_objs[self_recv_address],
 
  890     for( i = 0; i < nblocks; i++ )
 
  893       if( p > (nblocks-1) ) p -= nblocks;
 
  906 #ifndef EPETRA_NO_READY_SEND_IN_DO_POSTS 
  908                    lengths_to_[p] * obj_size,
 
  914       lengths_to_[p] * obj_size,
 
  918 #endif // EPETRA_NO_READY_SEND_IN_DO_POSTS 
  930         memcpy( &(import_objs[self_recv_address]),
 
  934         self_recv_address += obj_size;
 
  951                                            int & len_import_objs,
 
  952                                            char *& import_objs )
 
  988   MPI_Comm_rank (
comm_, &my_proc);
 
  990   MPI_Comm_size( 
comm_, &nprocs );
 
  997       match = match && (
sizes_[i]==sizes[i]);
 
  998     int matched = match?1:0;
 
 1000     MPI_Allreduce( &matched, &match_count, 1, MPI_INT, MPI_SUM, 
comm_ );
 
 1001     if( match_count == nprocs )
 
 1051     if (index!=0) {
delete [] index; index = 0;}
 
 1052     if (sort_val!=0) {
delete [] sort_val; sort_val = 0;}
 
 1058     if( nexports_ ) offset = 
new int[
nexports_];
 
 1083     if (offset!=0) {
delete [] offset; offset = 0;}
 
 1087   int self_index_to = -1;
 
 1091 #ifndef EPETRA_NEW_COMM_PATTERN 
 1134   MPI_Barrier( 
comm_ );
 
 1169                                int & len_import_objs,
 
 1170                                char *& import_objs )
 
 1173         len_import_objs, import_objs) );
 
 1183                                       int & len_import_objs,
 
 1184                                       char *& import_objs )
 
 1187          len_import_objs, import_objs) );
 
 1197                                     int & len_import_objs,
 
 1198                                     char *& import_objs )
 
 1205   MPI_Barrier( 
comm_ );
 
 1210   int self_recv_address = 0;
 
 1212   MPI_Comm_rank( 
comm_, &my_proc );
 
 1216     if( import_objs!=0 ) {
delete [] import_objs; import_objs = 0;}
 
 1218     if (len_import_objs>0) import_objs = 
new char[len_import_objs];
 
 1236   MPI_Barrier( 
comm_ );
 
 1242   while( proc_index < nblocks && 
procs_to_[proc_index] < my_proc )
 
 1244   if( proc_index == nblocks ) proc_index = 0;
 
 1251     for( i = 0; i < nblocks; ++i )
 
 1254       if( p > (nblocks-1) ) p -= nblocks;
 
 1268       memcpy( &import_objs[self_recv_address],
 
 1286     for( i=0; i<nblocks; ++i )
 
 1289       if( p > (nblocks-1) ) p -= nblocks;
 
 1317         memcpy( &import_objs[self_recv_address],
 
 1318                 &export_objs[jj*obj_size],
 
 1333                                            int & len_import_objs,
 
 1334                    char *& import_objs )
 
 1353   int myRank = 0, numProcs = 1;
 
 1354   MPI_Comm_rank (
comm_, &myRank);
 
 1355   MPI_Comm_size (
comm_, &numProcs);
 
 1358     os << 
"Epetra_MpiDistributor (implements Epetra_Distributor)" << std::endl;
 
 1364   for (
int p = 0; p < numProcs; ++p) {
 
 1366       os << 
"[Node " << p << 
" of " << numProcs << 
"]" << std::endl;
 
 1367       os << 
" selfMessage: " << 
self_msg_ << std::endl;
 
 1368       os << 
" numSends: " << 
nsends_ << std::endl;
 
 1370       os << 
" imagesTo: [";
 
 1371       for (
int i = 0; i < 
nsends_; ++i) {
 
 1373   if (i < nsends_ - 1) {
 
 1377       os << 
"]" << std::endl;
 
 1379       os << 
" lengthsTo: [";
 
 1380       for (
int i = 0; i < 
nsends_; ++i) {
 
 1382   if (i < nsends_ - 1) {
 
 1386       os << 
"]" << std::endl;
 
 1390       os << 
" startsTo: ";
 
 1392   os << 
"(NULL)" << std::endl;
 
 1395   for (
int i = 0; i < 
nsends_; ++i) {
 
 1397     if (i < nsends_ - 1) {
 
 1401   os << 
"]" << std::endl;
 
 1404       os << 
" indicesTo: ";
 
 1406   os << 
"(NULL)" << std::endl;
 
 1410   for (
int i = 0; i < 
nsends_; ++i) {
 
 1414     k += lengths_to_[i];
 
 1416   os << 
"]" << std::endl;
 
 1419       os << 
" numReceives: " << 
nrecvs_ << std::endl;
 
 1422       os << 
" lengthsFrom: [";
 
 1423       for (
int i = 0; i < 
nrecvs_; ++i) {
 
 1425   if (i < nrecvs_ - 1) {
 
 1429       os << 
"]" << std::endl;
 
 1431       os << 
" startsFrom: [";
 
 1432       for (
int i = 0; i < 
nrecvs_; ++i) {
 
 1434   if (i < nrecvs_ - 1) {
 
 1438       os << 
"]" << std::endl;
 
 1440       os << 
" imagesFrom: [";
 
 1441       for (
int i = 0; i < 
nrecvs_; ++i) {
 
 1443   if (i < nrecvs_ - 1) {
 
 1447       os << 
"]" << std::endl;
 
 1469     MPI_Barrier (
comm_);
 
 1470     MPI_Barrier (
comm_);
 
 1471     MPI_Barrier (
comm_);
 
 1488     if (nvals <= 1) 
return 0;
 
 1494     for (i = 0; i < nvals; i++)
 
 1495        if (n < vals_sort[i]) n = vals_sort[i];
 
 1496     int *pos = 
new int [n+2];
 
 1497     for (i = 0; i < n+2; i++) pos[i] = 0;
 
 1500     int *copy_sort  = 
new int [nvals];
 
 1501     int *copy_other = 
new int [nvals];
 
 1502     for (i = 0; i < nvals; i++)
 
 1504       copy_sort[i]  = vals_sort[i];
 
 1505       copy_other[i] = vals_other[i];
 
 1510     for (i = 0; i < nvals; i++) p[copy_sort[i]]++;
 
 1513     for (i = 1; i < n; i++) p[i] += p[i-1];
 
 1517     for (i = 0; i < nvals; i++)
 
 1519       vals_sort  [p[copy_sort [i]]]   = copy_sort[i];
 
 1520       vals_other [p[copy_sort [i]]++] = copy_other[i];
 
 1523     delete [] copy_sort;
 
 1524     delete [] copy_other;
 
 1535   bool throw_error = 
true;
 
 1537     throw ReportError(
"Epetra_MpiDistributor::operator= not supported.",-1);
 
 1548   MPI_Comm_rank( 
comm_, &my_proc );
 
 1551     int total_send_length = 0;
 
 1555     int max_recv_length = 0;
 
 1556     for( i = 0; i < 
nrecvs_; i++ )
 
int CreateSendStructures_(int my_proc, int nprocs, const int &NumExportIDs, const int *ExportPIDs)
int ComputeSends_(int num_imports, const id_type *&import_ids, const int *&import_procs, int &num_exports, id_type *&export_ids, int *&export_procs, int my_proc)
int ComputeRecvs_(int my_proc, int nprocs)
Epetra_MpiDistributor & operator=(const Epetra_MpiDistributor &src)
MPI_Comm GetMpiComm() const 
Get the MPI Communicator (identical to Comm() method; used when we know we are MPI. 
int DoPosts(char *export_objs, int obj_size, int &len_import_objs, char *&import_objs)
Post buffer of export objects (can do other local work before executing Waits) 
void CreateReverseDistributor()
Epetra_Distributor: The Epetra Gather/Scatter Setup Base Class. 
int CreateFromRecvs(const int &NumRemoteIDs, const int *RemoteGIDs, const int *RemotePIDs, bool Deterministic, int &NumExportIDs, int *&ExportGIDs, int *&ExportPIDs)
Create a communication plan from receive list. 
int NumReceives() const 
The number of procs from which we will receive data. 
int DoReverseWaits()
Wait on a reverse set of posts. 
#define EPETRA_CHK_ERR(a)
int CreateFromSendsAndRecvs(const int &NumExportIDs, const int *ExportPIDs, const int &NumRemoteIDs, const int *RemoteGIDs, const int *RemotePIDs, bool Deterministic)
Create a communication plan from send list and a recv list. 
MPI implementation of Epetra_Distributor. 
Epetra_MpiComm: The Epetra MPI Communication Class. 
int CreateRecvStructures_(const int &NumRemoteIDs, const int *RemotePIDs)
int NumSends() const 
The number of procs to which we will send data. 
Epetra_Object: The base Epetra class. 
virtual ~Epetra_MpiDistributor()
Destructor (declared virtual for memory safety). 
int CreateFromSends(const int &NumExportIDs, const int *ExportPIDs, bool Deterministic, int &NumRemoteIDs)
Create a communication plan from send list. 
MPI_Comm Comm() const 
Extract MPI Communicator from a Epetra_MpiComm object. 
int DoWaits()
Wait on a set of posts. 
int DoReversePosts(char *export_objs, int obj_size, int &len_import_objs, char *&import_objs)
Do reverse post of buffer of export objects (can do other local work before executing Waits) ...
Epetra_Distributor * ReverseClone()
Create and extract the reverse version of the distributor. 
int GetMpiTag() const 
Acquire an MPI tag from the Epetra range of 24050-24099, increment tag. 
void Print(std::ostream &os) const 
Epetra_MpiDistributor * comm_plan_reverse_
int Sort_ints_(int *vals, int *other, int nvals)
int Do(char *export_objs, int obj_size, int &len_import_objs, char *&import_objs)
Execute plan on buffer of export objects in a single step. 
Epetra_MpiDistributor(const Epetra_MpiComm &Comm)
Default constructor. 
virtual int ReportError(const std::string Message, int ErrorCode) const 
Error reporting method. 
int DoReverse(char *export_objs, int obj_size, int &len_import_objs, char *&import_objs)
Execute reverse of plan on buffer of export objects in a single step. 
const Epetra_MpiComm * epComm_