Zoltan2
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
TaskMappingProblemTest.cpp
Go to the documentation of this file.
1 
5 
6 #include <string>
7 
8 #include <Teuchos_RCP.hpp>
9 #include <Teuchos_Array.hpp>
10 #include <Teuchos_ParameterList.hpp>
11 #include "Teuchos_XMLParameterListHelpers.hpp"
12 
13 #include "Tpetra_MultiVector.hpp"
14 #include <Tpetra_CrsGraph.hpp>
15 #include <Tpetra_Map.hpp>
16 
19 #include <Zoltan2_TimerManager.hpp>
24 
25 typedef Tpetra::CrsGraph<zlno_t, zgno_t, znode_t> mytest_tcrsGraph_t;
26 typedef Tpetra::MultiVector<zscalar_t, zlno_t, zgno_t, znode_t> mytest_tMVector_t;
28 typedef Tpetra::Map<>::node_type mytest_znode_t;
29 typedef Tpetra::Map<zlno_t, zgno_t, mytest_znode_t> mytest_map_t;
31 
36 };
37 
41 };
42 
43 RCP<mytest_tcrsGraph_t> create_tpetra_input_matrix(
44  int nx, int ny, int nz,
45  int numProcs, Teuchos::RCP<const Teuchos::Comm<int> > tcomm,
46  RCP<Zoltan2::Environment> env, zscalar_t ** &partCenters, zgno_t & myTasks)
47 {
48  int rank = tcomm->getRank();
49  using namespace Teuchos;
50 
51  int coordDim = 3;
52  zgno_t numGlobalTasks = nx*ny*nz;
53 
54  //int rank = tcomm->getRank();
55  myTasks = numGlobalTasks / numProcs;
56  zgno_t taskLeftOver = numGlobalTasks % numProcs;
57  if (zgno_t(rank) < taskLeftOver ) ++myTasks;
58 
59  zgno_t myTaskBegin = (numGlobalTasks / numProcs) * rank;
60  myTaskBegin += (taskLeftOver < zgno_t(rank) ? taskLeftOver : rank);
61  zgno_t myTaskEnd = myTaskBegin + myTasks;
62 
63  //zscalar_t **partCenters = NULL;
64  partCenters = new zscalar_t * [coordDim];
65  for(int i = 0; i < coordDim; ++i){
66  partCenters[i] = new zscalar_t[myTasks];
67  }
68 
69  zgno_t *task_communication_xadj_ = new zgno_t [myTasks+1];
70  zgno_t *task_communication_adj_ = new zgno_t [myTasks * 6];
71 
72  env->timerStart(Zoltan2::MACRO_TIMERS, "TaskGraphCreate");
73  zlno_t prevNCount = 0;
74  task_communication_xadj_[0] = 0;
75  for (zgno_t i = myTaskBegin; i < myTaskEnd; ++i) {
76  int x = i % nx;
77  int y = (i / (nx)) % ny;
78  int z = (i / (nx)) / ny;
79  partCenters[0][i - myTaskBegin] = x;
80  partCenters[1][i - myTaskBegin] = y;
81  partCenters[2][i - myTaskBegin] = z;
82 
83  if (x > 0){
84  task_communication_adj_[prevNCount++] = i - 1;
85  }
86  if (x < nx - 1){
87  task_communication_adj_[prevNCount++] = i + 1;
88  }
89  if (y > 0){
90  task_communication_adj_[prevNCount++] = i - nx;
91  }
92  if (y < ny - 1){
93  task_communication_adj_[prevNCount++] = i + nx;
94  }
95  if (z > 0){
96  task_communication_adj_[prevNCount++] = i - nx * ny;
97  }
98  if (z < nz - 1){
99  task_communication_adj_[prevNCount++] = i + nx * ny;
100  }
101  task_communication_xadj_[i+1 - myTaskBegin] = prevNCount;
102  }
103 
104  env->timerStop(Zoltan2::MACRO_TIMERS, "TaskGraphCreate");
105 
106  using namespace Teuchos;
107  RCP<const mytest_map_t> map = rcp (new mytest_map_t (numGlobalTasks, myTasks, 0, tcomm));
108 
109  RCP<mytest_tcrsGraph_t> TpetraCrsGraph(new mytest_tcrsGraph_t (map, 0));
110 
111  env->timerStart(Zoltan2::MACRO_TIMERS, "TpetraGraphCreate");
112 
113  for (zgno_t lclRow = 0; lclRow < myTasks; ++lclRow) {
114  const zgno_t gblRow = map->getGlobalElement (lclRow);
115  zgno_t begin = task_communication_xadj_[lclRow];
116  zgno_t end = task_communication_xadj_[lclRow + 1];
117  const ArrayView< const zgno_t > indices(task_communication_adj_+begin, end-begin);
118  TpetraCrsGraph->insertGlobalIndices(gblRow, indices);
119  }
120  TpetraCrsGraph->fillComplete ();
121 
122  delete [] task_communication_xadj_;
123  delete [] task_communication_adj_;
124 
125  env->timerStop(Zoltan2::MACRO_TIMERS, "TpetraGraphCreate");
126  return TpetraCrsGraph;
127 }
128 
129 
130 RCP <Zoltan2::XpetraMultiVectorAdapter<mytest_tMVector_t> > create_multi_vector_adapter(
131  RCP<const mytest_map_t> map,
132  zscalar_t ** partCenters, zgno_t myTasks){
133 
134  const int coord_dim = 3;
135  Teuchos::Array<Teuchos::ArrayView<const zscalar_t> > coordView(coord_dim);
136 
137  if(myTasks > 0){
138  Teuchos::ArrayView<const zscalar_t> a(partCenters[0], myTasks);
139  coordView[0] = a;
140  Teuchos::ArrayView<const zscalar_t> b(partCenters[1], myTasks);
141  coordView[1] = b;
142  Teuchos::ArrayView<const zscalar_t> c(partCenters[2], myTasks);
143  coordView[2] = c;
144  }
145  else {
146  Teuchos::ArrayView<const zscalar_t> a;
147  coordView[0] = a;
148  coordView[1] = a;
149  coordView[2] = a;
150  }
151  RCP<mytest_tMVector_t> coords(new mytest_tMVector_t(map, coordView.view(0, coord_dim), coord_dim));//= set multivector;
152  RCP<const mytest_tMVector_t> const_coords = rcp_const_cast<const mytest_tMVector_t>(coords);
153  RCP <Zoltan2::XpetraMultiVectorAdapter<mytest_tMVector_t> > adapter (new Zoltan2::XpetraMultiVectorAdapter<mytest_tMVector_t>(const_coords));
154  return adapter;
155 }
156 
157 
159  int nx, int ny, int nz, Teuchos::RCP<const Teuchos::Comm<int> > global_tcomm)
160 {
161  Teuchos::RCP<const Teuchos::Comm<int> > tcomm = global_tcomm;//Teuchos::createSerialComm<int>();
162  mytest_part_t numProcs = tcomm->getSize();
163  Teuchos::ParameterList distributed_problemParams;
164  //create mapping problem parameters
165  distributed_problemParams.set("Machine_Optimization_Level", 10);
166  distributed_problemParams.set("mapping_algorithm", "geometric");
167  distributed_problemParams.set("distributed_input_adapter", true);
168  distributed_problemParams.set("mj_enable_rcb", true);
169  distributed_problemParams.set("algorithm", "multijagged");
170  distributed_problemParams.set("num_global_parts", numProcs);
171 
172  RCP<Zoltan2::Environment> env(new Zoltan2::Environment(distributed_problemParams, global_tcomm));
173  RCP<Zoltan2::TimerManager> timer(new Zoltan2::TimerManager(global_tcomm, &std::cout, Zoltan2::MACRO_TIMERS));
174  env->setTimer(timer);
176 
177  zscalar_t **partCenters;
178  zgno_t myTasks ;
179  //create tpetra input graph
180  RCP<mytest_tcrsGraph_t> distributed_tpetra_graph =
181  create_tpetra_input_matrix(nx, ny, nz, numProcs,
182  tcomm, env, partCenters,
183  myTasks);
184  RCP<const mytest_map_t> distributed_map = distributed_tpetra_graph->getMap();
185  global_tcomm->barrier();
186 
187  //create input adapter from tpetra graph
188  env->timerStart(Zoltan2::MACRO_TIMERS, "AdapterCreate");
189  RCP<const mytest_tcrsGraph_t> const_tpetra_graph =
190  rcp_const_cast<const mytest_tcrsGraph_t>(distributed_tpetra_graph);
191  RCP<mytest_adapter_t> ia (new mytest_adapter_t(const_tpetra_graph));
192 
193  //create multivector for coordinates and
194  RCP<Zoltan2::XpetraMultiVectorAdapter<mytest_tMVector_t> > distributed_adapter = create_multi_vector_adapter(distributed_map, partCenters, myTasks);
195  ia->setCoordinateInput(distributed_adapter.getRawPtr());
196  env->timerStop(Zoltan2::MACRO_TIMERS, "AdapterCreate");
197  global_tcomm->barrier();
199 
200 
201  //NOW we have 3 ways to create mapping problem.
202  //First, run a partitioning algorithm on the input adapter. Then run task mapping at the result of this partitioning.
203  //Second, run mapping algorithm directly. Mapping algorithm will assume that the tasks within the same processors
204  //are in the same partition, such as they are migrated as a result of a partitioning.
205  //Third, you can create your own partitioning solution without running a partitioning algorithm. This option can be used
206  //to make the task mapping to perform partitioning as well. That is, create a partitioning solution where each element
207  //is a part itself, then task mapping algorithm will map each of these tasks to a processor. As a result of this mapping,
208  //it will perform partitioning as well.
209 
210  //FIRST CREATE A PARTITIONG PROBLEM.
211  Zoltan2::PartitioningProblem<mytest_adapter_t> partitioning_problem (ia.getRawPtr(), &distributed_problemParams, global_tcomm);
212  partitioning_problem.solve();
213  Zoltan2::PartitioningSolution<mytest_adapter_t> partition_solution = partitioning_problem.getSolution();
214 
215  //FOR THE SECOND CASE WE DONT NEED a solution.
216 
217  //FOR the third case we create our own solution and set unique parts to each element.
218  //Basically each element has its global id as part number.
219  Zoltan2::PartitioningSolution<mytest_adapter_t> single_phase_mapping_solution(env, global_tcomm, 0);
220  Teuchos::ArrayView< const zgno_t> gids = distributed_map->getNodeElementList();
221 
222  ArrayRCP<int> initial_part_ids(myTasks);
223  for (zgno_t i = 0; i < myTasks; ++i){
224  initial_part_ids[i] = gids[i];
225  }
226  single_phase_mapping_solution.setParts(initial_part_ids);
227 
228 
229  env->timerStart(Zoltan2::MACRO_TIMERS, "Problem Create");
230  //create mapping problem for the first case, provide the partition solution by MJ.
231  Zoltan2::MappingProblem<mytest_adapter_t> distributed_map_problem_1(ia.getRawPtr(), &distributed_problemParams, global_tcomm, &partition_solution);
232 
233  //create mapping problem for the second case. We don't provide a solution in this case.
234  //Mapping assumes that the elements in the current processor is attached together and are in the same part.
235  Zoltan2::MappingProblem<mytest_adapter_t> distributed_map_problem_2(ia.getRawPtr(), &distributed_problemParams, global_tcomm);
236 
237  //create a mapping problem for the third case. We provide a solution in which all elements belong to unique part.
238  Zoltan2::MappingProblem<mytest_adapter_t> distributed_map_problem_3(ia.getRawPtr(), &distributed_problemParams, global_tcomm, &single_phase_mapping_solution);
239 
240  env->timerStop(Zoltan2::MACRO_TIMERS, "Problem Create");
241  //solve mapping problem.
242  env->timerStart(Zoltan2::MACRO_TIMERS, "Problem Solve");
243  distributed_map_problem_1.solve(true);
244  distributed_map_problem_2.solve(true);
245  distributed_map_problem_3.solve(true);
246  env->timerStop(Zoltan2::MACRO_TIMERS, "Problem Solve");
247 
248  //get the solution.
249  Zoltan2::MappingSolution<mytest_adapter_t> *msoln1 = distributed_map_problem_1.getSolution();
250  Zoltan2::MappingSolution<mytest_adapter_t> *msoln2 = distributed_map_problem_2.getSolution();
251  Zoltan2::MappingSolution<mytest_adapter_t> *msoln3 = distributed_map_problem_3.getSolution();
252 
253  timer->printAndResetToZero();
254 
255  //typedef Zoltan2::EvaluatePartition<my_adapter_t> quality_t;
256  //typedef Zoltan2::EvaluatePartition<my_adapter_t> quality_t;
258 
259  RCP<quality_t> metricObject_1 = rcp(new quality_t(ia.getRawPtr(),&distributed_problemParams,global_tcomm,msoln1, distributed_map_problem_1.getMachine().getRawPtr()));
260  //metricObject_1->evaluate();
261  RCP<quality_t> metricObject_2 = rcp(new quality_t(ia.getRawPtr(),&distributed_problemParams,global_tcomm,msoln2, distributed_map_problem_2.getMachine().getRawPtr()));
262  //metricObject_2->evaluate();
263  RCP<quality_t> metricObject_3 = rcp(new quality_t(ia.getRawPtr(),&distributed_problemParams,global_tcomm,msoln3, distributed_map_problem_3.getMachine().getRawPtr()));
264  //metricObject_3->evaluate();
265 
266  if (global_tcomm->getRank() == 0){
267  std::cout << "METRICS FOR THE FIRST CASE - TWO PHASE MAPPING" << std::endl;
268  metricObject_1->printMetrics(std::cout);
269  std::cout << "METRICS FOR THE SECOND CASE - TWO PHASE MAPPING - INITIAL ASSIGNMENT ARE ASSUMED TO BE A PART" << std::endl;
270  metricObject_2->printMetrics(std::cout);
271  std::cout << "METRICS FOR THE THIRD CASE - ONE PHASE MAPPING - EACH ELEMENT IS ASSUMED TO BE IN UNIQUE PART AT THE BEGINNING" << std::endl;
272  metricObject_3->printMetrics(std::cout);
273  }
274 
275  for (int i = 0; i < 3; i++) delete [] partCenters[i];
276  delete [] partCenters;
277 }
278 
279 
280 
282  int nx, int ny, int nz, Teuchos::RCP<const Teuchos::Comm<int> > global_tcomm)
283 {
284  //all processors have the all input in this case.
285  Teuchos::RCP<const Teuchos::Comm<int> > serial_comm =
286  Teuchos::createSerialComm<int>();
287 
288  //for the input creation, let processor think that it is the only processor.
289  mytest_part_t numProcs = serial_comm->getSize();
290  Teuchos::ParameterList serial_problemParams;
291  //create mapping problem parameters
292  serial_problemParams.set("Machine_Optimization_Level", 10);
293  serial_problemParams.set("mapping_algorithm", "geometric");
294  serial_problemParams.set("distributed_input_adapter", false);
295  serial_problemParams.set("algorithm", "multijagged");
296  serial_problemParams.set("num_global_parts", numProcs);
297 
298  RCP<Zoltan2::Environment> env (new Zoltan2::Environment(serial_problemParams, global_tcomm));
299  RCP<Zoltan2::TimerManager> timer(new Zoltan2::TimerManager(global_tcomm, &std::cout, Zoltan2::MACRO_TIMERS));
300  env->setTimer(timer);
302 
303  zscalar_t **partCenters;
304  zgno_t myTasks ;
305  //create tpetra input graph
306  RCP<mytest_tcrsGraph_t> serial_tpetra_graph = create_tpetra_input_matrix(nx, ny, nz, numProcs, serial_comm, env, partCenters, myTasks);
307  RCP<const mytest_map_t> serial_map = serial_tpetra_graph->getMap();
308  global_tcomm->barrier();
309 
310  //create input adapter from tpetra graph
311  env->timerStart(Zoltan2::MACRO_TIMERS, "AdapterCreate");
312  RCP<const mytest_tcrsGraph_t> const_tpetra_graph = rcp_const_cast<const mytest_tcrsGraph_t>(serial_tpetra_graph);
313  RCP<mytest_adapter_t> ia (new mytest_adapter_t(const_tpetra_graph));
314 
315  //create multivector for coordinates and
316  RCP <Zoltan2::XpetraMultiVectorAdapter<mytest_tMVector_t> > serial_adapter = create_multi_vector_adapter(serial_map, partCenters, myTasks);
317  ia->setCoordinateInput(serial_adapter.getRawPtr());
318  env->timerStop(Zoltan2::MACRO_TIMERS, "AdapterCreate");
319  global_tcomm->barrier();
321 
322 
323  //NOW, it only makes sense to map them serially. This is a case for the applications,
324  //where they already have the whole graph in all processes, and they want to do the mapping.
325  //Basically it will same time mapping algorithm, if that is the case.
326 
327  //First case from the distributed case does not really make sense and it is errornous.
328  //zoltan partitioning algorithms require distributed input. One still can do that in two phases,
329  //but needs to make sure that distributed and serial input adapters matches correctly.
330 
331  //Second case does not make sense and errornous. All elements are within the same node and they should not be
332  //assumed to be in the same part, since it will result only a single part.
333 
334  //If input adapter is not distributed, we are only interested in the third case.
335  //Each element will be its own unique part at the beginning of the mapping.
336 
337  //FOR the third case we create our own solution and set unique parts to each element.
338  //Basically each element has its global id as part number.
339  //It global ids are same as local ids here because each processors owns the whole thing.
340  Zoltan2::PartitioningSolution<mytest_adapter_t> single_phase_mapping_solution(env, global_tcomm, 0);
341  Teuchos::ArrayView< const zgno_t> gids = serial_map->getNodeElementList();
342 
343  ArrayRCP<int> initial_part_ids(myTasks);
344  for (zgno_t i = 0; i < myTasks; ++i){
345  initial_part_ids[i] = gids[i];
346  }
347  single_phase_mapping_solution.setParts(initial_part_ids);
348 
349 
350  env->timerStart(Zoltan2::MACRO_TIMERS, "Problem Create");
351  //create a mapping problem for the third case. We provide a solution in which all elements belong to unique part.
352  //even the input is not distributed, we still provide the global_tcomm because processors will calculate different mappings
353  //and the best one will be chosen.
354  Zoltan2::MappingProblem<mytest_adapter_t> serial_map_problem(ia.getRawPtr(), &serial_problemParams, global_tcomm, &single_phase_mapping_solution);
355 
356  env->timerStop(Zoltan2::MACRO_TIMERS, "Problem Create");
357  //solve mapping problem.
358  env->timerStart(Zoltan2::MACRO_TIMERS, "Problem Solve");
359  serial_map_problem.solve(true);
360  env->timerStop(Zoltan2::MACRO_TIMERS, "Problem Solve");
361 
362  //get the solution.
363  Zoltan2::MappingSolution<mytest_adapter_t> *msoln3 = serial_map_problem.getSolution();
364 
365  timer->printAndResetToZero();
366 
367  //typedef Zoltan2::EvaluatePartition<my_adapter_t> quality_t;
369 
370 
371  //input is not distributed in this case.
372  //metric object should be given the serial comm so that it can calculate the correct metrics without global communication.
373  RCP<quality_t> metricObject_3 = rcp(new quality_t(ia.getRawPtr(),&serial_problemParams,serial_comm,msoln3, serial_map_problem.getMachine().getRawPtr()));
374 
375  if (global_tcomm->getRank() == 0){
376  std::cout << "METRICS FOR THE SERIAL CASE - ONE PHASE MAPPING - EACH ELEMENT IS ASSUMED TO BE IN UNIQUE PART AT THE BEGINNING" << std::endl;
377  metricObject_3->printMetrics(std::cout);
378  }
379 
380  for (int i = 0; i < 3; i++) delete [] partCenters[i];
381  delete [] partCenters;
382 }
383 
384 int main(int narg, char *arg[]){
385 
386  Tpetra::ScopeGuard tscope(&narg, &arg);
387  Teuchos::RCP<const Teuchos::Comm<int> > global_tcomm = Tpetra::getDefaultComm();
388 
389  int nx = 16, ny = 16, nz = 16;
390  for ( int i = 1 ; i < narg ; ++i ) {
391  if ( 0 == strcasecmp( arg[i] , "NX" ) ) {
392  nx = atoi( arg[++i] );
393  }
394  else if ( 0 == strcasecmp( arg[i] , "NY" ) ) {
395  ny = atoi( arg[++i] );
396  }
397  else if ( 0 == strcasecmp( arg[i] , "NZ" ) ) {
398  nz = atoi( arg[++i] );
399  }
400  else{
401  std::cerr << "Unrecognized command line argument #" << i << ": " << arg[i] << std::endl ;
402  return 1;
403  }
404  }
405 
406 
407  try{
408 
409 
410  Teuchos::RCP<const Teuchos::Comm<int> > serial_comm = Teuchos::createSerialComm<int>();
411  test_distributed_input_adapter(nx, ny, nz, global_tcomm);
412  test_serial_input_adapter(nx, ny, nz, global_tcomm);
413 
414 #if 0
415  {
416  part_t my_parts = 0, *my_result_parts;
417  //const part_t *local_element_to_rank = msoln1->getPartListView();
418 
419  std::cout << "me:" << global_tcomm->getRank() << " my_parts:" << my_parts << " myTasks:" << myTasks << std::endl;
420  if (global_tcomm->getRank() == 0) {
421 
422  //zscalar_t **dots = partCenters;
423  //int i = 0, j =0;
424  FILE *f2 = fopen("plot.gnuplot", "w");
425  for (int i = 0; i< global_tcomm->getSize(); ++i){
426  char str[20];
427  sprintf(str, "coords%d.txt", i);
428  if (i == 0){
429  fprintf(f2,"splot \"%s\"\n", str);
430  }
431  else {
432  fprintf(f2,"replot \"%s\"\n", str);
433  }
434  }
435  fprintf(f2,"pause-1\n");
436  fclose(f2);
437  }
438  char str[20];
439  int myrank = global_tcomm->getRank();
440  sprintf(str, "coords%d.txt", myrank);
441  FILE *coord_files = fopen(str, "w");
442 
443 
444  for (int j = 0; j < my_parts; ++j){
445  int findex = my_result_parts[j];
446  std::cout << "findex " << findex << std::endl;
447  fprintf(coord_files,"%lf %lf %lf\n", partCenters[0][findex], partCenters[1][findex], partCenters[2][findex]);
448  }
449  fclose(coord_files);
450  }
451 #endif
452 
453  if (global_tcomm->getRank() == 0){
454  std::cout << "PASS" << std::endl;
455  }
456  }
457  catch(std::string &s){
458  std::cerr << s << std::endl;
459  }
460 
461  catch(char * s){
462  std::cerr << s << std::endl;
463  }
464 }
465 
RCP< Zoltan2::XpetraMultiVectorAdapter< mytest_tMVector_t > > create_multi_vector_adapter(RCP< const mytest_map_t > map, zscalar_t **partCenters, zgno_t myTasks)
Time an algorithm (or other entity) as a whole.
Zoltan2::XpetraCrsGraphAdapter< mytest_tcrsGraph_t, mytest_tMVector_t > mytest_adapter_t
MappingInputDistributution
Tpetra::CrsGraph< zlno_t, zgno_t, znode_t > mytest_tcrsGraph_t
PartitionMapping maps a solution or an input distribution to ranks.
int main(int narg, char *arg[])
void solve(bool updateInputData=true)
Direct the problem to create a solution.
double zscalar_t
mytest_adapter_t::part_t mytest_part_t
Provides access for Zoltan2 to Xpetra::CrsGraph data.
int zlno_t
common code used by tests
Tpetra::Map::node_type mytest_znode_t
void test_distributed_input_adapter(int nx, int ny, int nz, Teuchos::RCP< const Teuchos::Comm< int > > global_tcomm)
Tpetra::Map< zlno_t, zgno_t, mytest_znode_t > mytest_map_t
Defines the XpetraMultiVectorAdapter.
Defines XpetraCrsGraphAdapter class.
SparseMatrixAdapter_t::part_t part_t
void test_serial_input_adapter(int nx, int ny, int nz, Teuchos::RCP< const Teuchos::Comm< int > > global_tcomm)
A PartitioningSolution is a solution to a partitioning problem.
Defines the EvaluatePartition class.
InputTraits< User >::part_t part_t
The user parameters, debug, timing and memory profiling output objects, and error checking methods...
An adapter for Xpetra::MultiVector.
void setParts(ArrayRCP< part_t > &partList)
The algorithm uses setParts to set the solution.
int zgno_t
Tpetra::MultiVector< zscalar_t, zlno_t, zgno_t, znode_t > mytest_tMVector_t
Defines the MappingSolution class.
PartitioningProblem sets up partitioning problems for the user.
RCP< mytest_tcrsGraph_t > create_tpetra_input_matrix(int nx, int ny, int nz, int numProcs, Teuchos::RCP< const Teuchos::Comm< int > > tcomm, RCP< Zoltan2::Environment > env, zscalar_t **&partCenters, zgno_t &myTasks)
Defines the PartitioningProblem class.
MappingProblem enables mapping of a partition (either computed or input) to MPI ranks.
A class that computes and returns quality metrics.
void solve(bool updateInputData=true)
Direct the problem to create a solution.
Declarations for TimerManager.
Defines the MappingProblem class.