Zoltan2
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
TaskMappingProblemTest.cpp
Go to the documentation of this file.
1 
5 
6 #include <string>
7 
8 #include <Teuchos_RCP.hpp>
9 #include <Teuchos_Array.hpp>
10 #include <Teuchos_ParameterList.hpp>
11 #include "Teuchos_XMLParameterListHelpers.hpp"
12 
13 #include "Tpetra_MultiVector.hpp"
14 #include <Tpetra_CrsGraph.hpp>
15 #include <Tpetra_Map.hpp>
16 
19 #include <Zoltan2_TimerManager.hpp>
24 
25 typedef Tpetra::CrsGraph<zlno_t, zgno_t, znode_t> mytest_tcrsGraph_t;
26 typedef Tpetra::MultiVector<zscalar_t, zlno_t, zgno_t, znode_t>
30 typedef Tpetra::Map<>::node_type mytest_znode_t;
31 typedef Tpetra::Map<zlno_t, zgno_t, mytest_znode_t> mytest_map_t;
33 
38 };
39 
43 };
44 
45 RCP<mytest_tcrsGraph_t> create_tpetra_input_matrix(
46  int nx, int ny, int nz,
47  int numProcs, Teuchos::RCP<const Teuchos::Comm<int> > tcomm,
48  RCP<Zoltan2::Environment> env, zscalar_t ** &partCenters, zgno_t & myTasks) {
49 
50  int rank = tcomm->getRank();
51  using namespace Teuchos;
52 
53  int coordDim = 3;
54  zgno_t numGlobalTasks = nx*ny*nz;
55 
56  //int rank = tcomm->getRank();
57  myTasks = numGlobalTasks / numProcs;
58  zgno_t taskLeftOver = numGlobalTasks % numProcs;
59  if (zgno_t(rank) < taskLeftOver ) ++myTasks;
60 
61  zgno_t myTaskBegin = (numGlobalTasks / numProcs) * rank;
62  myTaskBegin += (taskLeftOver < zgno_t(rank) ? taskLeftOver : rank);
63  zgno_t myTaskEnd = myTaskBegin + myTasks;
64 
65  //zscalar_t **partCenters = NULL;
66  partCenters = new zscalar_t * [coordDim];
67  for(int i = 0; i < coordDim; ++i) {
68  partCenters[i] = new zscalar_t[myTasks];
69  }
70 
71  zgno_t *task_communication_xadj_ = new zgno_t [myTasks + 1];
72  zgno_t *task_communication_adj_ = new zgno_t [myTasks * 6];
73 
74  env->timerStart(Zoltan2::MACRO_TIMERS, "TaskGraphCreate");
75  zlno_t prevNCount = 0;
76  task_communication_xadj_[0] = 0;
77  for (zgno_t i = myTaskBegin; i < myTaskEnd; ++i) {
78  int x = i % nx;
79  int y = (i / (nx)) % ny;
80  int z = (i / (nx)) / ny;
81  partCenters[0][i - myTaskBegin] = x;
82  partCenters[1][i - myTaskBegin] = y;
83  partCenters[2][i - myTaskBegin] = z;
84 
85  if (x > 0) {
86  task_communication_adj_[prevNCount++] = i - 1;
87  }
88  if (x < nx - 1) {
89  task_communication_adj_[prevNCount++] = i + 1;
90  }
91  if (y > 0) {
92  task_communication_adj_[prevNCount++] = i - nx;
93  }
94  if (y < ny - 1) {
95  task_communication_adj_[prevNCount++] = i + nx;
96  }
97  if (z > 0) {
98  task_communication_adj_[prevNCount++] = i - nx * ny;
99  }
100  if (z < nz - 1) {
101  task_communication_adj_[prevNCount++] = i + nx * ny;
102  }
103 
104  task_communication_xadj_[i + 1 - myTaskBegin] = prevNCount;
105  }
106 
107  env->timerStop(Zoltan2::MACRO_TIMERS, "TaskGraphCreate");
108 
109  using namespace Teuchos;
110  RCP<const mytest_map_t> map = rcp (new mytest_map_t (numGlobalTasks,
111  myTasks, 0, tcomm));
112 
113  Teuchos::Array<size_t> adjPerTask(myTasks);
114  for (zgno_t lclRow = 0; lclRow < myTasks; ++lclRow)
115  adjPerTask[lclRow] = task_communication_xadj_[lclRow+1]
116  - task_communication_xadj_[lclRow];
117  RCP<mytest_tcrsGraph_t> TpetraCrsGraph(new mytest_tcrsGraph_t(map,
118  adjPerTask()));
119 
120  env->timerStart(Zoltan2::MACRO_TIMERS, "TpetraGraphCreate");
121 
122  for (zgno_t lclRow = 0; lclRow < myTasks; ++lclRow) {
123  const zgno_t gblRow = map->getGlobalElement (lclRow);
124  zgno_t begin = task_communication_xadj_[lclRow];
125  zgno_t end = task_communication_xadj_[lclRow + 1];
126  const ArrayView< const zgno_t > indices(task_communication_adj_ + begin,
127  end - begin);
128  TpetraCrsGraph->insertGlobalIndices(gblRow, indices);
129  }
130  TpetraCrsGraph->fillComplete ();
131 
132  delete [] task_communication_xadj_;
133  delete [] task_communication_adj_;
134 
135  env->timerStop(Zoltan2::MACRO_TIMERS, "TpetraGraphCreate");
136  return TpetraCrsGraph;
137 }
138 
139 
140 RCP<Zoltan2::XpetraMultiVectorAdapter<mytest_tMVector_t> >
141 create_multi_vector_adapter(RCP<const mytest_map_t> map,
142  zscalar_t **partCenters,
143  zgno_t myTasks) {
144 
145  const int coord_dim = 3;
146  Teuchos::Array<Teuchos::ArrayView<const zscalar_t> > coordView(coord_dim);
147 
148  if(myTasks > 0) {
149  Teuchos::ArrayView<const zscalar_t> a(partCenters[0], myTasks);
150  coordView[0] = a;
151  Teuchos::ArrayView<const zscalar_t> b(partCenters[1], myTasks);
152  coordView[1] = b;
153  Teuchos::ArrayView<const zscalar_t> c(partCenters[2], myTasks);
154  coordView[2] = c;
155  }
156  else {
157  Teuchos::ArrayView<const zscalar_t> a;
158  coordView[0] = a;
159  coordView[1] = a;
160  coordView[2] = a;
161  }
162 
163  // = set multivector;
164  RCP<mytest_tMVector_t> coords(
165  new mytest_tMVector_t(map, coordView.view(0, coord_dim), coord_dim));
166  RCP<const mytest_tMVector_t> const_coords =
167  rcp_const_cast<const mytest_tMVector_t>(coords);
168  RCP <Zoltan2::XpetraMultiVectorAdapter<mytest_tMVector_t> > adapter(
170  return adapter;
171 }
172 
173 
175  int nx, int ny, int nz,
176  Teuchos::RCP<const Teuchos::Comm<int> > global_tcomm)
177 {
178  Teuchos::RCP<const Teuchos::Comm<int> > tcomm = global_tcomm;
179  //Teuchos::createSerialComm<int>();
180 
181  mytest_part_t numProcs = tcomm->getSize();
182  Teuchos::ParameterList distributed_problemParams;
183 
184  // Create mapping problem parameters
185  distributed_problemParams.set("Machine_Optimization_Level", 10);
186  distributed_problemParams.set("mapping_algorithm", "geometric");
187  distributed_problemParams.set("distributed_input_adapter", true);
188  distributed_problemParams.set("mj_enable_rcb", true);
189  distributed_problemParams.set("algorithm", "multijagged");
190  distributed_problemParams.set("num_global_parts", numProcs);
191 
192  RCP<Zoltan2::Environment> env(
193  new Zoltan2::Environment(distributed_problemParams, global_tcomm));
194  RCP<Zoltan2::TimerManager> timer(
195  new Zoltan2::TimerManager(global_tcomm, &std::cout,
197  env->setTimer(timer);
198 
199  //------------------CREATE DISTRIBUTED INPUT ADAPTER--------------------//
200  zscalar_t **partCenters;
201  zgno_t myTasks ;
202  // Create tpetra input graph
203  RCP<mytest_tcrsGraph_t> distributed_tpetra_graph =
204  create_tpetra_input_matrix(nx, ny, nz, numProcs,
205  tcomm, env, partCenters,
206  myTasks);
207  RCP<const mytest_map_t> distributed_map =
208  distributed_tpetra_graph->getMap();
209  global_tcomm->barrier();
210 
211  // Create input adapter from tpetra graph
212  env->timerStart(Zoltan2::MACRO_TIMERS, "AdapterCreate");
213  RCP<const mytest_tcrsGraph_t> const_tpetra_graph =
214  rcp_const_cast<const mytest_tcrsGraph_t>(distributed_tpetra_graph);
215  RCP<mytest_adapter_t> ia (new mytest_adapter_t(const_tpetra_graph));
216 
217  // Create multivector for coordinates
218  RCP<Zoltan2::XpetraMultiVectorAdapter<mytest_tMVector_t> >
219  distributed_adapter = create_multi_vector_adapter(distributed_map,
220  partCenters, myTasks);
221  ia->setCoordinateInput(distributed_adapter.getRawPtr());
222  env->timerStop(Zoltan2::MACRO_TIMERS, "AdapterCreate");
223  global_tcomm->barrier();
224  //---------------DISTRIBUTED INPUT ADAPTER IS CREATED-------------------//
225 
226 
227  // Now we have 3 ways to create mapping problem.
228  // First, run a partitioning algorithm on the input adapter. Then run task
229  // mapping at the result of this partitioning.
230  //
231  // Second, run mapping algorithm directly. Mapping algorithm will assume
232  // that the tasks within the same processors are in the same partition,
233  // such as they are migrated as a result of a partitioning.
234  //
235  // Third, you can create your own partitioning solution without running a
236  // partitioning algorithm. This option can be used to make the task
237  // mapping to perform partitioning as well. That is, create a partitioning
238  // solution where each element is a part itself, then task mapping
239  // algorithm will map each of these tasks to a processor. As a result of
240  // this mapping, it will perform partitioning as well.
241 
242  // First create a partitioning problem.
244  ia.getRawPtr(), &distributed_problemParams, global_tcomm);
245 
246  partitioning_problem.solve();
247 
249  partitioning_problem.getSolution();
250 
251  // For the second case, we do not need a solution.
252 
253  // For the third case we create our own solution and set unique parts to
254  // each element.
255  // Basically each element has its global id as part number.
257  single_phase_mapping_solution(env, global_tcomm, 0);
258  Teuchos::ArrayView< const zgno_t> gids =
259  distributed_map->getLocalElementList();
260 
261  ArrayRCP<int> initial_part_ids(myTasks);
262  for (zgno_t i = 0; i < myTasks; ++i) {
263  initial_part_ids[i] = gids[i];
264  }
265  single_phase_mapping_solution.setParts(initial_part_ids);
266 
267  env->timerStart(Zoltan2::MACRO_TIMERS, "Problem Create");
268  // Create mapping problem for the first case, provide the partition
269  // solution by MJ.
270  Zoltan2::MappingProblem<mytest_adapter_t> distributed_map_problem_1(
271  ia.getRawPtr(), &distributed_problemParams,
272  global_tcomm, &partition_solution);
273 
274  // Create mapping problem for the second case. We don't provide a
275  // solution in this case.
276  // Mapping assumes that the elements in the current processor is attached
277  // together and are in the same part.
278  Zoltan2::MappingProblem<mytest_adapter_t> distributed_map_problem_2(
279  ia.getRawPtr(), &distributed_problemParams, global_tcomm);
280 
281  // Create a mapping problem for the third case. We provide a solution in
282  // which all elements belong to unique part.
283  Zoltan2::MappingProblem<mytest_adapter_t> distributed_map_problem_3(
284  ia.getRawPtr(), &distributed_problemParams,
285  global_tcomm, &single_phase_mapping_solution);
286 
287  env->timerStop(Zoltan2::MACRO_TIMERS, "Problem Create");
288  //solve mapping problem.
289  env->timerStart(Zoltan2::MACRO_TIMERS, "Problem Solve");
290 
291  distributed_map_problem_1.solve(true);
292 
293  distributed_map_problem_2.solve(true);
294 
295  distributed_map_problem_3.solve(true);
296 
297  env->timerStop(Zoltan2::MACRO_TIMERS, "Problem Solve");
298 
299  //get the solution.
300 
302  distributed_map_problem_1.getSolution();
303 
305  distributed_map_problem_2.getSolution();
306 
308  distributed_map_problem_3.getSolution();
309 
310  timer->printAndResetToZero();
311 
312  //typedef Zoltan2::EvaluatePartition<my_adapter_t> quality_t;
313  //typedef Zoltan2::EvaluatePartition<my_adapter_t> quality_t;
315 
316  RCP<quality_t> metricObject_1 =
317  rcp(new quality_t(ia.getRawPtr(), &distributed_problemParams,
318  global_tcomm, msoln1,
319  distributed_map_problem_1.getMachine().getRawPtr()));
320  //metricObject_1->evaluate();
321 
322  RCP<quality_t> metricObject_2 =
323  rcp(new quality_t(ia.getRawPtr(), &distributed_problemParams,
324  global_tcomm, msoln2,
325  distributed_map_problem_2.getMachine().getRawPtr()));
326 
327  //metricObject_2->evaluate();
328  RCP<quality_t> metricObject_3 =
329  rcp(new quality_t(ia.getRawPtr(), &distributed_problemParams,
330  global_tcomm, msoln3,
331  distributed_map_problem_3.getMachine().getRawPtr()));
332 // metricObject_3->evaluate();
333 
334  if (global_tcomm->getRank() == 0) {
335  std::cout << "METRICS FOR THE FIRST CASE - TWO PHASE MAPPING"
336  << std::endl;
337  metricObject_1->printMetrics(std::cout);
338  std::cout << "METRICS FOR THE SECOND CASE - TWO PHASE MAPPING"
339  << " - INITIAL ASSIGNMENT ARE ASSUMED TO BE A PART" << std::endl;
340  metricObject_2->printMetrics(std::cout);
341  std::cout << "METRICS FOR THE THIRD CASE - ONE PHASE MAPPING"
342  << " - EACH ELEMENT IS ASSUMED TO BE IN UNIQUE PART AT THE BEGINNING"
343  << std::endl;
344  metricObject_3->printMetrics(std::cout);
345  }
346 
347  for (int i = 0; i < 3; i++)
348  delete [] partCenters[i];
349  delete [] partCenters;
350 }
351 
352 
353 
355  int nx, int ny, int nz,
356  Teuchos::RCP<const Teuchos::Comm<int> > global_tcomm)
357 {
358  // All processors have the all input in this case.
359  Teuchos::RCP<const Teuchos::Comm<int> > serial_comm =
360  Teuchos::createSerialComm<int>();
361 
362  // For the input creation, let processor think that it is the only
363  // processor.
364  mytest_part_t numProcs = serial_comm->getSize();
365  Teuchos::ParameterList serial_problemParams;
366  // Create mapping problem parameters
367  serial_problemParams.set("Machine_Optimization_Level", 10);
368  serial_problemParams.set("mapping_algorithm", "geometric");
369  serial_problemParams.set("distributed_input_adapter", false);
370  serial_problemParams.set("algorithm", "multijagged");
371  serial_problemParams.set("num_global_parts", numProcs);
372 
373  RCP<Zoltan2::Environment> env(
374  new Zoltan2::Environment(serial_problemParams, global_tcomm));
375  RCP<Zoltan2::TimerManager> timer(
376  new Zoltan2::TimerManager(global_tcomm, &std::cout,
378  env->setTimer(timer);
379 
380  //-------------------CREATE SERIAL INPUT ADAPTER-------------------------//
381  zscalar_t **partCenters;
382  zgno_t myTasks ;
383  // Create tpetra input graph
384  RCP<mytest_tcrsGraph_t> serial_tpetra_graph =
385  create_tpetra_input_matrix(nx, ny, nz, numProcs, serial_comm,
386  env, partCenters, myTasks);
387  RCP<const mytest_map_t> serial_map = serial_tpetra_graph->getMap();
388  global_tcomm->barrier();
389 
390  // Create input adapter from tpetra graph
391  env->timerStart(Zoltan2::MACRO_TIMERS, "AdapterCreate");
392  RCP<const mytest_tcrsGraph_t> const_tpetra_graph =
393  rcp_const_cast<const mytest_tcrsGraph_t>(serial_tpetra_graph);
394  RCP<mytest_adapter_t> ia (new mytest_adapter_t(const_tpetra_graph));
395 
396  // Create multivector for coordinates
397  RCP <Zoltan2::XpetraMultiVectorAdapter<mytest_tMVector_t>> serial_adapter =
398  create_multi_vector_adapter(serial_map, partCenters, myTasks);
399  ia->setCoordinateInput(serial_adapter.getRawPtr());
400  env->timerStop(Zoltan2::MACRO_TIMERS, "AdapterCreate");
401  global_tcomm->barrier();
402  //------------------SERIAL INPUT ADAPTER IS CREATED----------------------//
403 
404  // NOW, it only makes sense to map them serially. This is a case for the
405  // applications, where they already have the whole graph in all processes,
406  // and they want to do the mapping.
407  // Basically it will same time mapping algorithm, if that is the case.
408 
409  // First case from the distributed case does not really make sense and it
410  // is errornous.
411  // Zoltan partitioning algorithms require distributed input. One still can
412  // do that in two phases, but needs to make sure that distributed and
413  // serial input adapters matches correctly.
414 
415  // Second case does not make sense and errornous. All elements are within
416  // the same node and they should not be assumed to be in the same part,
417  // since it will result only a single part.
418 
419  // If input adapter is not distributed, we are only interested in the
420  // third case.
421  // Each element will be its own unique part at the beginning of the mapping.
422 
423  // For the third case we create our own solution and set unique parts to
424  // each element.
425  // Basically each element has its global id as part number.
426  // It global ids are same as local ids here because each processors owns
427  // the whole thing.
429  single_phase_mapping_solution(env, global_tcomm, 0);
430  Teuchos::ArrayView< const zgno_t> gids = serial_map->getLocalElementList();
431 
432  ArrayRCP<int> initial_part_ids(myTasks);
433  for (zgno_t i = 0; i < myTasks; ++i) {
434  initial_part_ids[i] = gids[i];
435  }
436  single_phase_mapping_solution.setParts(initial_part_ids);
437 
438  env->timerStart(Zoltan2::MACRO_TIMERS, "Problem Create");
439  // Create a mapping problem for the third case. We provide a solution in
440  // which all elements belong to unique part.
441  // Even the input is not distributed, we still provide the global_tcomm
442  // because processors will calculate different mappings and the best one
443  // will be chosen.
444 
446  ia.getRawPtr(), &serial_problemParams,
447  global_tcomm, &single_phase_mapping_solution);
448 
449  env->timerStop(Zoltan2::MACRO_TIMERS, "Problem Create");
450  // Solve mapping problem.
451 
452  env->timerStart(Zoltan2::MACRO_TIMERS, "Problem Solve");
453  serial_map_problem.solve(true);
454  env->timerStop(Zoltan2::MACRO_TIMERS, "Problem Solve");
455 
456  // Get the solution.
458  serial_map_problem.getSolution();
459 
460  timer->printAndResetToZero();
461 
462 // typedef Zoltan2::EvaluatePartition<my_adapter_t> quality_t;
464 
465  // Input is not distributed in this case.
466  // Metric object should be given the serial comm so that it can calculate
467  // the correct metrics without global communication.
468  RCP<quality_t> metricObject_3 =
469  rcp(new quality_t(ia.getRawPtr(),
470  &serial_problemParams, serial_comm,msoln3,
471  serial_map_problem.getMachine().getRawPtr()));
472 
473  if (global_tcomm->getRank() == 0) {
474  std::cout << "METRICS FOR THE SERIAL CASE - ONE PHASE MAPPING "
475  << "- EACH ELEMENT IS ASSUMED TO BE IN UNIQUE PART AT THE BEGINNING"
476  << std::endl;
477  metricObject_3->printMetrics(std::cout);
478  }
479 
480  for (int i = 0; i < 3; i++)
481  delete [] partCenters[i];
482  delete [] partCenters;
483 }
484 
485 int main(int narg, char *arg[]) {
486 
487  Tpetra::ScopeGuard tscope(&narg, &arg);
488  Teuchos::RCP<const Teuchos::Comm<int> > global_tcomm =
489  Tpetra::getDefaultComm();
490 
491  int nx = 16, ny = 16, nz = 16;
492  for (int i = 1 ; i < narg ; ++i) {
493  if (0 == strcasecmp(arg[i] , "NX")) {
494  nx = atoi( arg[++i] );
495  }
496  else if (0 == strcasecmp( arg[i] , "NY")) {
497  ny = atoi( arg[++i] );
498  }
499  else if (0 == strcasecmp( arg[i] , "NZ")) {
500  nz = atoi( arg[++i] );
501  }
502  else{
503  std::cerr << "Unrecognized command line argument #"
504  << i << ": " << arg[i] << std::endl ;
505  return 1;
506  }
507  }
508 
509  try{
510 
511  Teuchos::RCP<const Teuchos::Comm<int> > serial_comm =
512  Teuchos::createSerialComm<int>();
513  test_distributed_input_adapter(nx, ny, nz, global_tcomm);
514  test_serial_input_adapter(nx, ny, nz, global_tcomm);
515 
516 #if 0
517  {
518  part_t my_parts = 0, *my_result_parts;
519  //const part_t *local_element_to_rank = msoln1->getPartListView();
520 
521  std::cout << "me:" << global_tcomm->getRank()
522  << " my_parts:" << my_parts
523  << " myTasks:" << myTasks << std::endl;
524  if (global_tcomm->getRank() == 0) {
525 
526  //zscalar_t **dots = partCenters;
527  //int i = 0, j =0;
528  FILE *f2 = fopen("plot.gnuplot", "w");
529  for (int i = 0; i< global_tcomm->getSize(); ++i) {
530  char str[20];
531  sprintf(str, "coords%d.txt", i);
532  if (i == 0) {
533  fprintf(f2,"splot \"%s\"\n", str);
534  }
535  else {
536  fprintf(f2,"replot \"%s\"\n", str);
537  }
538  }
539  fprintf(f2,"pause-1\n");
540  fclose(f2);
541  }
542  char str[20];
543  int myrank = global_tcomm->getRank();
544  sprintf(str, "coords%d.txt", myrank);
545  FILE *coord_files = fopen(str, "w");
546 
547 
548  for (int j = 0; j < my_parts; ++j) {
549  int findex = my_result_parts[j];
550  std::cout << "findex " << findex << std::endl;
551  fprintf(coord_files, "%lf %lf %lf\n",
552  partCenters[0][findex],
553  partCenters[1][findex],
554  partCenters[2][findex]);
555  }
556  fclose(coord_files);
557  }
558 #endif
559 
560  if (global_tcomm->getRank() == 0) {
561  std::cout << "PASS" << std::endl;
562  }
563  }
564  catch(std::string &s) {
565  std::cerr << s << std::endl;
566  }
567 
568  catch(char * s) {
569  std::cerr << s << std::endl;
570  }
571 }
572 
RCP< Zoltan2::XpetraMultiVectorAdapter< mytest_tMVector_t > > create_multi_vector_adapter(RCP< const mytest_map_t > map, zscalar_t **partCenters, zgno_t myTasks)
Time an algorithm (or other entity) as a whole.
Zoltan2::XpetraCrsGraphAdapter< mytest_tcrsGraph_t, mytest_tMVector_t > mytest_adapter_t
MappingInputDistributution
Tpetra::CrsGraph< zlno_t, zgno_t, znode_t > mytest_tcrsGraph_t
PartitionMapping maps a solution or an input distribution to ranks.
void solve(bool updateInputData=true)
Direct the problem to create a solution.
mytest_adapter_t::part_t mytest_part_t
Provides access for Zoltan2 to Xpetra::CrsGraph data.
Zoltan2::EvaluatePartition< matrixAdapter_t > quality_t
int main(int narg, char **arg)
Definition: coloring1.cpp:199
common code used by tests
typename InputTraits< User >::part_t part_t
Tpetra::Map::node_type mytest_znode_t
void test_distributed_input_adapter(int nx, int ny, int nz, Teuchos::RCP< const Teuchos::Comm< int > > global_tcomm)
Tpetra::Map< zlno_t, zgno_t, mytest_znode_t > mytest_map_t
Defines the XpetraMultiVectorAdapter.
Defines XpetraCrsGraphAdapter class.
SparseMatrixAdapter_t::part_t part_t
void test_serial_input_adapter(int nx, int ny, int nz, Teuchos::RCP< const Teuchos::Comm< int > > global_tcomm)
A PartitioningSolution is a solution to a partitioning problem.
Defines the EvaluatePartition class.
The user parameters, debug, timing and memory profiling output objects, and error checking methods...
An adapter for Xpetra::MultiVector.
Tpetra::Map::local_ordinal_type zlno_t
void setParts(ArrayRCP< part_t > &partList)
The algorithm uses setParts to set the solution.
Tpetra::MultiVector< zscalar_t, zlno_t, zgno_t, znode_t > mytest_tMVector_t
Defines the MappingSolution class.
PartitioningProblem sets up partitioning problems for the user.
RCP< mytest_tcrsGraph_t > create_tpetra_input_matrix(int nx, int ny, int nz, int numProcs, Teuchos::RCP< const Teuchos::Comm< int > > tcomm, RCP< Zoltan2::Environment > env, zscalar_t **&partCenters, zgno_t &myTasks)
Defines the PartitioningProblem class.
MappingProblem enables mapping of a partition (either computed or input) to MPI ranks.
float zscalar_t
A class that computes and returns quality metrics.
Tpetra::Map::global_ordinal_type zgno_t
void solve(bool updateInputData=true)
Direct the problem to create a solution.
Declarations for TimerManager.
Defines the MappingProblem class.