This example shows how to use hypergraph partitioning methods with hyperedge weights.
#ifdef HAVE_MPI
#include <mpi.h>
#endif
#ifdef HAVE_EPETRA
#ifdef HAVE_MPI
#include <Epetra_MpiComm.h>
#else
#include <Epetra_SerialComm.h>
#endif
#include <Epetra_Map.h>
#include <Epetra_Vector.h>
#include <Epetra_CrsMatrix.h>
#include <Epetra_LinearProblem.h>
#endif
#ifdef HAVE_EPETRA
Teuchos::RCP<const Epetra_RowMatrix>
create_epetra_rowmatrix(int numProcs,
int localProc,
int local_n);
#endif
int main(int argc, char** argv) {
#if defined(HAVE_MPI) && defined(HAVE_EPETRA)
int numProcs = 1;
int localProc = 0;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &localProc);
MPI_Comm_size(MPI_COMM_WORLD, &numProcs);
int local_n = 1200;
Teuchos::RCP<const Epetra_RowMatrix> rowmatrix;
try {
rowmatrix = create_epetra_rowmatrix(numProcs, localProc, local_n);
}
catch(std::exception& exc) {
std::cout << "vert_weights example: create_epetra_rowmatrix threw"
<< " exception '" << exc.what() << "' on proc "
<< localProc << std::endl;
MPI_Finalize();
return(-1);
}
Teuchos::ParameterList paramlist;
#ifdef HAVE_ISORROPIA_ZOLTAN
paramlist.set("PARTITIONING METHOD", "HYPERGRAPH");
#else
#endif
Teuchos::RCP<Epetra_Vector> hge_weights =
Teuchos::rcp(new Epetra_Vector(rowmatrix->RowMatrixRowMap()));
double* vals = hge_weights->Values();
const Epetra_BlockMap& map = rowmatrix->RowMatrixRowMap();
int num = map.NumMyElements();
for(int i=0; i<num; ++i) {
vals[i] = 1.0*(map.GID(i)+1);
}
Teuchos::RCP<Isorropia::Epetra::CostDescriber> costs =
costs->setHypergraphEdgeWeights(hge_weights);
Teuchos::RCP<Isorropia::Epetra::Partitioner> partitioner =
Teuchos::RCP<Epetra_CrsMatrix> bal_matrix;
if (localProc == 0) {
std::cout << " calling Isorropia::Epetra::Redistributor::redistribute..."
<< std::endl;
}
try {
bal_matrix = rd.redistribute(*rowmatrix);
}
catch(std::exception& exc) {
std::cout << "linsys example: Isorropia::Epetra::Redistributor threw "
<< "exception '" << exc.what() << "' on proc "
<< localProc << std::endl;
MPI_Finalize();
return(-1);
}
double bal0, bal1, cutn0, cutn1, cutl0, cutl1;
#if 1
double goalWeight = 1.0 / (double)numProcs;
bal0, cutn0, cutl0);
Teuchos::RCP<Epetra_Vector> new_weights = rd.redistribute(*hge_weights);
bal1, cutn1, cutl1);
#else
std::vector<double> bal(2), cutn(2), cutl(2);
Epetra_Import &importer = rd.get_importer();
costs->compareBeforeAndAfterHypergraph(*rowmatrix, *bal_matrix, importer,
bal, cutn, cutl);
bal0 = bal[0]; cutn0 = cutn[0]; cutl0 = cutl[0];
bal1 = bal[1]; cutn1 = cutn[1]; cutl1 = cutl[1];
#endif
if (localProc == 0){
std::cout << "Before partitioning: ";
std::cout << "Balance " << bal0 << " cutN " << cutn0 << " cutL " << cutl0;
std::cout << std::endl;
std::cout << "After partitioning: ";
std::cout << "Balance " << bal1 << " cutN " << cutn1 << " cutL " << cutl1;
std::cout << std::endl;
}
MPI_Finalize();
#else
std::cout << "vert_weights: must have both MPI and EPETRA. Make sure "
<< "Trilinos is configured with --enable-mpi and --enable-epetra."
<< std::endl;
#endif
return(0);
}
#if defined(HAVE_MPI) && defined(HAVE_EPETRA)
Teuchos::RCP<const Epetra_RowMatrix>
create_epetra_rowmatrix(int numProcs,
int localProc,
int local_n)
{
if (localProc == 0) {
std::cout << " creating Epetra_CrsMatrix with even row-distribution..."
<< std::endl;
}
Epetra_MpiComm comm(MPI_COMM_WORLD);
int global_num_rows = numProcs*local_n;
Epetra_Map rowmap(global_num_rows, local_n, 0, comm);
int nnz_per_row = 9;
Teuchos::RCP<Epetra_CrsMatrix> matrix =
Teuchos::rcp(new Epetra_CrsMatrix(Copy, rowmap, nnz_per_row));
double negOne = -1.0;
double posTwo = 4.0;
for (int i=0; i<local_n; i++) {
int GlobalRow = matrix->GRID(i);
int RowLess1 = GlobalRow - 1;
int RowPlus1 = GlobalRow + 1;
int RowLess2 = GlobalRow - 2;
int RowPlus2 = GlobalRow + 2;
int RowLess3 = GlobalRow - 3;
int RowPlus3 = GlobalRow + 3;
int RowLess4 = GlobalRow - 4;
int RowPlus4 = GlobalRow + 4;
if (RowLess4>=0) {
matrix->InsertGlobalValues(GlobalRow, 1, &negOne, &RowLess4);
}
if (RowLess3>=0) {
matrix->InsertGlobalValues(GlobalRow, 1, &negOne, &RowLess3);
}
if (RowLess2>=0) {
matrix->InsertGlobalValues(GlobalRow, 1, &negOne, &RowLess2);
}
if (RowLess1>=0) {
matrix->InsertGlobalValues(GlobalRow, 1, &negOne, &RowLess1);
}
if (RowPlus1<global_num_rows) {
matrix->InsertGlobalValues(GlobalRow, 1, &negOne, &RowPlus1);
}
if (RowPlus2<global_num_rows) {
matrix->InsertGlobalValues(GlobalRow, 1, &negOne, &RowPlus2);
}
if (RowPlus3<global_num_rows) {
matrix->InsertGlobalValues(GlobalRow, 1, &negOne, &RowPlus3);
}
if (RowPlus4<global_num_rows) {
matrix->InsertGlobalValues(GlobalRow, 1, &negOne, &RowPlus4);
}
matrix->InsertGlobalValues(GlobalRow, 1, &posTwo, &GlobalRow);
}
int err = matrix->FillComplete();
if (err != 0) {
}
return(matrix);
}
#endif //HAVE_MPI && HAVE_EPETRA