This example shows how to use geometric partitioning methods with multivectors.
#include <sys/types.h>
#include <unistd.h>
#define PRINTLIMIT 5000
#ifdef HAVE_EPETRA
#include <Epetra_Import.h>
#ifdef HAVE_MPI
#include <Epetra_MpiComm.h>
#else
#include <Epetra_SerialComm.h>
#endif
#include <Epetra_MultiVector.h>
#include <Teuchos_CommandLineProcessor.hpp>
#include <Teuchos_RCP.hpp>
#include <string>
#ifdef _MSC_VER
#include "winprocess.h"
#endif
int main(int argc, char** argv) {
  int fail = 0, dim=0;  
  int localProc = 0;
  int numProcs = 1;
#ifdef HAVE_MPI
  MPI_Init(&argc, &argv);
  MPI_Comm_rank(MPI_COMM_WORLD, &localProc);
  MPI_Comm_size(MPI_COMM_WORLD, &numProcs);
  const Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
  const Epetra_SerialComm Comm;
#endif
  if (getenv("DEBUGME")){
    std::cerr << localProc << " gdb test_rcb.exe " << getpid() << std::endl;
    sleep(15);
  }
  
  
  
  Teuchos::CommandLineProcessor clp(false,true);
  std::string *inputFile = new std::string("simple.coords");
  bool verbose = false;
  bool rib = false;
  clp.setOption( "f", inputFile, "Name of coordinate input file");
  clp.setOption( "v", "q", &verbose,
                "Display coordinates and weights before and after partitioning.");
  clp.setOption( "rib", "rcb", &rib, "Run RIB instead of RCB");
  Teuchos::CommandLineProcessor::EParseCommandLineReturn parse_return =
    clp.parse(argc,argv);
  if( parse_return == Teuchos::CommandLineProcessor::PARSE_HELP_PRINTED){
#ifdef HAVE_MPI
    MPI_Finalize();
#endif
    return 0;
  }
  if( parse_return != Teuchos::CommandLineProcessor::PARSE_SUCCESSFUL ) {
#ifdef HAVE_MPI
    MPI_Finalize();
#endif
    return 1;
  }
  
  
  
  
  if (!mv || ((dim = mv->NumVectors()) < 1)){
    if (localProc == 0)
      std::cerr << "Invalid input file " << *inputFile << std::endl;
    exit(1);
  }
  int vsize = mv->GlobalLength();
  if (verbose){
    if (vsize < PRINTLIMIT){
    }
    else{
      if (localProc == 0){
        std::cerr << "--v requested, but input file is larger than " << PRINTLIMIT << " coordinates." << std::endl;
        std::cerr << "Partitioning will be performed but will not be displayed." << std::endl;
        verbose = false;
      }
    }
  }
  
  
  
  
  
  
  
  if (!wgts || ((dim = wgts->NumVectors()) != 1)){
    if (localProc == 0)
      std::cout << "can't create weights" << std::endl;
    exit(1);
  }
  if (verbose){ 
  }
  
  
  
  Teuchos::ParameterList params;
  if (rib){
   
   
   
   params.set("Partitioning Method", "RIB");
   
  }
  else{
   
    
    
    
    
    
    
    
  }
  
  
  
  Teuchos::RCP<const Epetra_MultiVector> mv_rcp = Teuchos::rcp(mv);
  Teuchos::RCP<const Epetra_MultiVector> wgts_rcp = Teuchos::rcp(wgts);
  Teuchos::RCP<Isorropia::Epetra::Partitioner> part =
  
  
  Teuchos::RCP<Epetra_MultiVector> new_mv = rd.
redistribute(*mv);
 
  if (verbose){
  }
  
  Teuchos::RCP<Epetra_MultiVector> new_wgts = rd.
redistribute(*wgts);
 
  if (verbose){
  }
  
  
  
  double min1, min2, max1, max2, avg1, avg2;
#if 1
  double goal = 1.0 / (double)numProcs;
  Epetra_Vector * &w1 = (*wgts)(0);
  Epetra_Vector * &new_w1 = (*new_wgts)(0);
#else
  std::vector<double> min(2), max(2), avg(2);
  Teuchos::RCP<Epetra_Vector> wgts_copy = 
    Teuchos::rcp(new Epetra_Vector(Copy, wgts->Map(), (*wgts)[0]));
  costs.compareBeforeAndAfterImbalance(*mv, importer, min, max, avg);
  min1 = min[0]; max1 = max[0]; avg1 = avg[0];
  min2 = min[1]; max2 = max[1]; avg2 = avg[1];
#endif
  if (localProc == 0){
    std::cout << "Balance before partitioning: min " ;
    std::cout << min1 << " max " << max1 << " average " << avg1 << std::endl;
  
    std::cout << "Balance after partitioning:  min ";
    std::cout << min2 << " max " << max2 << " average " << avg2 << std::endl;
  }
#ifdef HAVE_MPI
  MPI_Finalize();
#endif
  return fail;
}
#endif   // HAVE_EPETRA