Zoltan2
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
Zoltan2_MatrixPartitioningSolution.hpp
Go to the documentation of this file.
1 // @HEADER
2 // *****************************************************************************
3 // Zoltan2: A package of combinatorial algorithms for scientific computing
4 //
5 // Copyright 2012 NTESS and the Zoltan2 contributors.
6 // SPDX-License-Identifier: BSD-3-Clause
7 // *****************************************************************************
8 // @HEADER
9 
14 #ifndef _ZOLTAN2_MATRIXPARTITIONINGSOLUTION_HPP_
15 #define _ZOLTAN2_MATRIXPARTITIONINGSOLUTION_HPP_
16 
17 // namespace Zoltan2 {
18 // template <typename Adapter>
19 // class PartitioningSolution;
20 // }
21 
22 // #include <Zoltan2_Environment.hpp>
23 // #include <Zoltan2_Solution.hpp>
24 // #include <Zoltan2_GreedyMWM.hpp>
25 // #include <Zoltan2_Algorithm.hpp>
26 // #include <Zoltan2_CoordinatePartitioningGraph.hpp>
27 // #include <cmath>
28 // #include <algorithm>
29 // #include <vector>
30 // #include <limits>
31 
32 
33 namespace Zoltan2 {
34 
49 template <typename Adapter>
51 {
52 public:
53 
54 #ifndef DOXYGEN_SHOULD_SKIP_THIS
55  typedef typename Adapter::gno_t gno_t;
56  typedef typename Adapter::scalar_t scalar_t;
57  typedef typename Adapter::lno_t lno_t;
58  typedef typename Adapter::part_t part_t;
59  typedef typename Adapter::user_t user_t;
60 #endif
61 
77  MatrixPartitioningSolution( const RCP<const Environment> &env,
78  const RCP<const Comm<int> > &comm,
79  const RCP<Algorithm<Adapter> > &algorithm = Teuchos::null);
80 
81 
83  // Information that the algorithm may wish to query.
84 
86  // Method used by the algorithm to set results.
87 
102  void setIDLists(ArrayRCP<part_t> &rowIDs,ArrayRCP<part_t> &colIDs,
103  ArrayRCP<part_t> &domainIDs, ArrayRCP<part_t> &rangeIDs);
104 
106 
107  // TODO: Figure out if I want to do this
108  //
109  // /*! \brief Remap a new partition for maximum overlap with an input partition.
110  // *
111  // * Assumptions for this version:
112  // * input part assignment == processor rank for every local object.
113  // * assuming nGlobalParts <= num ranks
114  // * TODO: Write a version that takes the input part number as input;
115  // * this change requires input parts in adapters to be provided in
116  // * the Adapter.
117  // * TODO: For repartitioning, compare to old remapping results; see Zoltan1.
118  // */
119 
120  // void RemapParts();
121 
122  // ////////////////////////////////////////////////////////////////////
123  // /* Return the weight of objects staying with a given remap.
124  // * If remap is NULL, compute weight of objects staying with given partition
125  // */
126  // long measure_stays(part_t *remap, int *idx, part_t *adj, long *wgt,
127  // part_t nrhs, part_t nlhs)
128  // {
129  // long staying = 0;
130  // for (part_t i = 0; i < nrhs; i++) {
131  // part_t k = (remap ? remap[i] : i);
132  // for (part_t j = idx[k]; j < idx[k+1]; j++) {
133  // if (i == (adj[j]-nlhs)) {
134  // staying += wgt[j];
135  // break;
136  // }
137  // }
138  // }
139  // return staying;
140  // }
141 
143  // Results that may be queried by the user, by migration methods,
144  // or by metric calculation methods.
145  // We return raw pointers so users don't have to learn about our
146  // pointer wrappers.
147 
150  inline const RCP<const Comm<int> > &getCommunicator() const { return comm_;}
151 
154  inline const RCP<const Environment> &getEnvironment() const { return env_;}
155 
156 
159  const gno_t *getRowIdsView() const
160  {
161  if (rowIDs_.size() > 0)
162  return rowIDs_.getRawPtr();
163  else
164  return NULL;
165  }
166 
167 
170  const gno_t *getColIdsView() const
171  {
172  if (colIDs_.size() > 0)
173  return colIDs_.getRawPtr();
174  else
175  return NULL;
176  }
177 
178 
179  // /*! \brief Returns the process list corresponding to the global ID list.
180  // \return The return value is a NULL pointer if part IDs are
181  // synonomous with process IDs.
182  // */
183  // const int *getProcListView() const {
184  // if (procs_.size() > 0) return procs_.getRawPtr();
185  // else return NULL;
186  // }
187 
188 
189  // /*! \brief Get the processes containing a part.
190  // * \param partId a part number from 0 to one less than the global number
191  // * of parts.
192  // * \param procMin on return will be set to minimum proc number
193  // * \param procMax on return will be set to maximum proc number
194  // *
195  // * Normally \c procMin and \c procMax are the same value and a part
196  // * is assigned to one process. But if there are more processes than
197  // * parts, it's possible that a part will be divided across more than
198  // * one process.
199  // */
200  // void getProcsForPart(part_t partId, part_t &procMin, part_t &procMax) const
201  // {
202  // env_->localInputAssertion(__FILE__, __LINE__, "invalid part id",
203  // partId >= 0 && partId < nGlobalParts_, BASIC_ASSERTION);
204 
205  // partToProcsMap(partId, procMin, procMax);
206  // }
207 
208 private:
209 
210 
211  // void setPartDistribution();
212 
213  // void setPartSizes(ArrayView<ArrayRCP<part_t> > reqPartIds,
214  // ArrayView<ArrayRCP<scalar_t> > reqPartSizes);
215 
216  // void computePartSizes(int widx, ArrayView<part_t> ids,
217  // ArrayView<scalar_t> sizes);
218 
219  // void broadcastPartSizes(int widx);
220 
221 
222  RCP<const Environment> env_; // has application communicator
223  const RCP<const Comm<int> > comm_; // the problem communicator
224 
225  // part_t nGlobalParts_;// target global number of parts
226 
228  // The algorithm sets these values upon completion.
229 
230 
231  // There is a decision to made to decide what to store in the solution.
232  // 1. One possibility is to store part numbers for each local id. This
233  // is what was implemented for PartitioninSolution and this was advantageous
234  // for this case since (unlike 2) an extra communication was not needed
235  // after each process assigned its localids a part.
236  // 2. Alternatively, each process can store the set of ids that it owns. For 1D
237  // this would require an additional communication step to place the ids to the
238  // correct processor. This would also complicated the case where number of
239  // parts != number of processes. This is similar to what is needed to build
240  // row or column epetra/tpetra maps.
241  //
242  // However, things are more complicated for 2D Cartesian partitioning (maybe all
243  // of partitioning). In this case, we need to assign matrix nonzeros to both
244  // a process row and a process column. This means that extra communication will
245  // be needed for option #1 in order to determine both of these, which are needed
246  // to determine a part assignment for a given nonzero (or alternatively both the
247  // process rows and column assignments for a 2D block of matrix rows and columns,
248  // although this may complicated part assignment).
249  // This eliminates one of the advantages of method 1 vs. method 2.
250 
251  // For method 2 and 2D, each process would store the set of rowIDs and columnIDs
252  // for which it owns the nonzeros. Method 2 still
253  // has the disadvantage that it becomes more complicated for #parts != #procs.
254  // However, it would have what is needed to build both the row and column ids.
255  // For now, we are going with Method #2.
256 
257  // Need a plan to handle #parts != #procs
258 
259  ArrayRCP<gno_t> rowIDs_; // Row Ids assigned to this process
260  ArrayRCP<gno_t> colIDs_; // Col Ids assigned to this process
261 
262  ArrayRCP<gno_t> domainIDs_; // Domain vector Ids assigned to this process
263  ArrayRCP<gno_t> rangeIDs_; // Range vector Ids assigned to this process
264 
265 
266  bool haveSolution_;
267 
268 
270  // Algorithm used to compute the solution;
271  // Not sure if this is necessary
272  const RCP<Algorithm<Adapter> > algorithm_;
273 };
274 
277 template <typename Adapter>
279  const RCP<const Comm<int> > &comm, const RCP<Algorithm<Adapter> > &algorithm)
280  : env_(env), comm_(comm), rowIDs_(), colIDs_(), haveSolution_(false),
281  algorithm_(algorithm)
282 {
283  env_->memory("After construction of solution");
284 }
286 
289 template <typename Adapter>
290 void MatrixPartitioningSolution<Adapter>::setIDLists(ArrayRCP<part_t> &rowIDs,ArrayRCP<part_t> &colIDs,
291  ArrayRCP<part_t> &domainIDs, ArrayRCP<part_t> &rangeIDs)
292 {
293  env_->debug(DETAILED_STATUS, "Entering setParts");
294 
295  rowIDs_=rowIDs;
296  colIDs_=colIDs;
297  domainIDs_=domainIDs;
298  rangeIDs_=rangeIDs;
299 
300  haveSolution_ = true;
301 
302  env_->memory("After Solution has processed algorithm's answer");
303  env_->debug(DETAILED_STATUS, "Exiting setParts");
304 }
306 
307 
308 
309 
310 } // namespace Zoltan2
311 
312 #endif
Just a placeholder for now.
map_t::global_ordinal_type gno_t
Definition: mapRemotes.cpp:27
const RCP< const Comm< int > > & getCommunicator() const
Remap a new partition for maximum overlap with an input partition.
MatrixPartitioningSolution(const RCP< const Environment > &env, const RCP< const Comm< int > > &comm, const RCP< Algorithm< Adapter > > &algorithm=Teuchos::null)
Constructor when part sizes are not supplied.
void setIDLists(ArrayRCP< part_t > &rowIDs, ArrayRCP< part_t > &colIDs, ArrayRCP< part_t > &domainIDs, ArrayRCP< part_t > &rangeIDs)
The algorithm uses setIDLists to set the solution.
sub-steps, each method&#39;s entry and exit
SparseMatrixAdapter_t::part_t part_t
const gno_t * getRowIdsView() const
Returns list of global row IDs of the nonzeros owned by this process.
Algorithm defines the base class for all algorithms.
map_t::local_ordinal_type lno_t
Definition: mapRemotes.cpp:26
const gno_t * getColIdsView() const
Returns list of global column IDs of the nonzeros owned by this process.
const RCP< const Environment > & getEnvironment() const
Return the environment associated with the solution.
A PartitioningSolution is a solution to a partitioning problem.
Zoltan2::BasicUserTypes< zscalar_t, zlno_t, zgno_t > user_t
Definition: Metric.cpp:39