This example shows how to use graph partitioning methods with edge weights.
#ifdef HAVE_MPI
#include <mpi.h>
#endif
#ifdef HAVE_EPETRA
#ifdef HAVE_MPI
#include <Epetra_MpiComm.h>
#else
#include <Epetra_SerialComm.h>
#endif
#include <Epetra_Map.h>
#include <Epetra_Vector.h>
#include <Epetra_CrsMatrix.h>
#include <Epetra_FECrsMatrix.h>
#endif
int main(int argc, char** argv) {
#if defined(HAVE_MPI) && defined(HAVE_EPETRA)
  int numProcs = 1;
  int localProc = 0;
  
  MPI_Init(&argc, &argv);
  MPI_Comm_rank(MPI_COMM_WORLD, &localProc);
  MPI_Comm_size(MPI_COMM_WORLD, &numProcs);
  if (numProcs != 3) {
    std::cout << "num-procs="<<numProcs<<". This program can only "
      << "run on 3 procs. Exiting."<<std::endl;
    MPI_Finalize();
    return(0);
  }
  int nodesPerElem = 4;
  int global_n = 9;
  
  std::vector<int> mynodes(3);
  if (localProc == 0) {
    mynodes[0] = 0; mynodes[1] = 3; mynodes[2] = 8;
  }
  if (localProc == 1) {
    mynodes[0] = 1; mynodes[1] = 2; mynodes[2] = 7;
  }
  if (localProc == 2) {
    mynodes[0] = 4; mynodes[1] = 5; mynodes[2] = 6;
  }
  Epetra_MpiComm comm(MPI_COMM_WORLD);
  Epetra_Map origmap(global_n, 3, &mynodes[0], 0, comm);
  Teuchos::RCP<Epetra_FECrsMatrix> matrix =
    Teuchos::rcp(new Epetra_FECrsMatrix(Copy, origmap, 0));
  
  
  
  std::vector<int> indices(nodesPerElem);
  std::vector<double> coefs(nodesPerElem*nodesPerElem,2.0);
  if (localProc == 0) {
    
    indices[0] = 0; indices[1] = 1; indices[2] = 2; indices[3] = 3;
    matrix->InsertGlobalValues(nodesPerElem, &indices[0], &coefs[0]);
    
    indices[0] = 1; indices[1] = 4; indices[2] = 5; indices[3] = 2;
    matrix->InsertGlobalValues(nodesPerElem, &indices[0], &coefs[0]);
  }
  else if (localProc == 1) {
    
    indices[0] = 3; indices[1] = 2; indices[2] = 7; indices[3] = 8;
    matrix->InsertGlobalValues(nodesPerElem, &indices[0], &coefs[0]);
  }
  else { 
    
    indices[0] = 2; indices[1] = 5; indices[2] = 6; indices[3] = 7;
    matrix->InsertGlobalValues(nodesPerElem, &indices[0], &coefs[0]);
  }
  int err = matrix->GlobalAssemble();
  if (err != 0) {
    std::cout << "err="<<err<<" returned from matrix->GlobalAssemble()"
      << std::endl;
  }
  
  
  Teuchos::ParameterList paramlist;
#ifdef HAVE_ISORROPIA_ZOLTAN
  
  
  
  
  paramlist.set("PARTITIONING METHOD", "GRAPH");
  paramlist.set("PRINT ZOLTAN METRICS", "2");
  Teuchos::ParameterList& sublist = paramlist.sublist("Zoltan");
  sublist.set("GRAPH_PACKAGE", "PHG"); 
  sublist.set("EDGE_WEIGHT_DIM", "1"); 
  
  
  
#else
  
  
  
  
#endif
  Teuchos::RCP<Isorropia::Epetra::CostDescriber> costs =
  
  
  Teuchos::RCP<Epetra_FECrsMatrix> ge_weights =
    Teuchos::rcp(new Epetra_FECrsMatrix(*matrix));
  Teuchos::RCP<Epetra_CrsMatrix> crs_ge_weights;
  crs_ge_weights = ge_weights;
  
  crs_ge_weights->PutScalar(1.0);
  
  
  
  
  double weight = 500.0;
  if (localProc == 0) {
    
    indices[0] = 1;
    coefs[0] = weight;
    crs_ge_weights->ReplaceGlobalValues(0, 1, &coefs[0], &indices[0]);
    
    indices[0] = 2;
    coefs[0] = weight;
    crs_ge_weights->ReplaceGlobalValues(3, 1, &coefs[0], &indices[0]);
    
    indices[0] = 7;
    coefs[0] = weight;
    crs_ge_weights->ReplaceGlobalValues(8, 1, &coefs[0], &indices[0]);
  }
  if (localProc == 1) {
    
    indices[0] = 0; indices[1] = 4;
    coefs[0] = weight; coefs[1] = weight;
    crs_ge_weights->ReplaceGlobalValues(1, 2, &coefs[0], &indices[0]);
    
    indices[0] = 3; indices[1] = 5;
    coefs[0] = weight; coefs[1] = weight;
    crs_ge_weights->ReplaceGlobalValues(2, 2, &coefs[0], &indices[0]);
    
    indices[0] = 6; indices[1] = 8;
    coefs[0] = weight;
    crs_ge_weights->ReplaceGlobalValues(7, 2, &coefs[0], &indices[0]);
  }
  if (localProc == 2) {
    
    indices[0] = 1;
    coefs[0] = weight;
    crs_ge_weights->ReplaceGlobalValues(4, 1, &coefs[0], &indices[0]);
    
    indices[0] = 2;
    coefs[0] = weight;
    crs_ge_weights->ReplaceGlobalValues(5, 1, &coefs[0], &indices[0]);
    
    indices[0] = 7;
    coefs[0] = weight;
    crs_ge_weights->ReplaceGlobalValues(6, 1, &coefs[0], &indices[0]);
  }
  
  costs->setGraphEdgeWeights(crs_ge_weights);
  Teuchos::RCP<const Epetra_RowMatrix> rowmatrix;
  rowmatrix = matrix;
  
  
  Teuchos::RCP<Isorropia::Epetra::Partitioner> partitioner =
  
  
  Teuchos::RCP<Epetra_CrsMatrix> bal_matrix;
  
  
  if (localProc == 0) {
    std::cout << " calling Isorropia::Epetra::Redistributor::redistribute..."
        << std::endl;
  }
  try {
  }
  catch(std::exception& exc) {
    std::cout << "linsys example: Isorropia::Epetra::Redistributor threw "
         << "exception '" << exc.what() << "' on proc "
         << localProc << std::endl;
    MPI_Finalize();
    return(-1);
  }
  
  double bal0, bal1, cutn0, cutn1, cutl0, cutl1, cutWgt0, cutWgt1;
  int numCuts0, numCuts1;
#if 1
  
  double goalWeight = 1.0 / (double)numProcs;
                     bal0, numCuts0, cutWgt0, cutn0, cutl0);
  
  Teuchos::RCP<Epetra_CrsMatrix> new_weights = rd.
redistribute(*crs_ge_weights);
 
                     bal1, numCuts1, cutWgt1, cutn1, cutl1);
#else
  std::vector<double> bal(2), cutwgt(2), cutn(2), cutl(2);
  std::vector<int >ncuts(2);
  costs->compareBeforeAndAfterGraph(*rowmatrix, *bal_matrix, importer,
             bal, ncuts, cutwgt, cutn, cutl);
  bal0 = bal[0]; cutn0 = cutn[0]; cutl0 = cutl[0]; cutWgt0 = cutwgt[0]; numCuts0 = ncuts[0];
  bal1 = bal[1]; cutn1 = cutn[1]; cutl1 = cutl[1]; cutWgt1 = cutwgt[1]; numCuts1 = ncuts[1];
#endif
  bal_matrix.release();
  if (localProc == 0){
    std::cout << "Before partitioning: Number of cuts " << numCuts0 << " Cut weight " << cutWgt0 << std::endl;
    std::cout << "                     Balance " << bal0 << " cutN " << cutn0 << " cutL " << cutl0;
    std::cout << std::endl;
    std::cout << "After partitioning:  Number of cuts " << numCuts1 << " Cut weight " << cutWgt1 << std::endl;
    std::cout << "                     Balance " << bal1 << " cutN " << cutn1 << " cutL " << cutl1;
    std::cout << std::endl;
  }
  MPI_Finalize();
#else
  std::cout << "part_redist: must have both MPI and EPETRA. Make sure Trilinos "
    << "is configured with --enable-mpi and --enable-epetra." << std::endl;
#endif
  return(0);
}