49 #ifndef ZOLTAN2_GRAPHICMETRICVALUESUTILITY_HPP
50 #define ZOLTAN2_GRAPHICMETRICVALUESUTILITY_HPP
55 #include <zoltan_dd.h>
90 template <
typename Adapter,
92 MachineRepresentation<
typename Adapter::scalar_t,
95 const RCP<const Environment> &env,
96 const RCP<
const Comm<int> > &comm,
98 const ArrayView<const typename Adapter::part_t> &parts,
101 ArrayRCP<typename Adapter::scalar_t> &globalSums,
102 bool bMessages =
true,
103 const RCP <const MachineRep> machine = Teuchos::null)
110 bool bHops = (machine != Teuchos::null);
117 typedef typename Adapter::lno_t lno_t;
118 typedef typename Adapter::gno_t gno_t;
119 typedef typename Adapter::offset_t offset_t;
120 typedef typename Adapter::scalar_t scalar_t;
126 lno_t localNumEdges = graph->getLocalNumEdges();
128 ArrayView<const gno_t> Ids;
129 ArrayView<t_input_t> v_wghts;
130 graph->getVertexList(Ids, v_wghts);
135 ArrayView<const gno_t> edgeIds;
136 ArrayView<const offset_t> offsets;
137 ArrayView<t_input_t> e_wgts;
138 graph->getEdgeList(edgeIds, offsets, e_wgts);
141 std::vector <scalar_t> edge_weights;
142 int numWeightPerEdge = graph->getNumWeightsPerEdge();
144 int numMetrics = bHops ?
148 if (numWeightPerEdge) numMetrics += bHops ?
149 numWeightPerEdge * 2 :
153 auto next = metrics.size();
154 for (
auto n = 0; n < numMetrics; ++n) {
155 addNewMetric<gm_t, scalar_t>(env, metrics);
158 std::vector <part_t> e_parts (localNumEdges);
160 std::vector<part_t> local_parts;
162 #ifdef HAVE_ZOLTAN2_MPI
163 if (comm->getSize() > 1) {
164 const bool bUseLocalIDs =
false;
167 directory_t directory(comm, bUseLocalIDs, debug_level);
168 if (localNumVertices)
169 directory.update(localNumVertices, &Ids[0], NULL, &parts[0],
170 NULL, directory_t::Update_Mode::Replace);
172 directory.update(localNumVertices, NULL, NULL, NULL,
173 NULL, directory_t::Update_Mode::Replace);
176 directory.find(localNumEdges, &edgeIds[0], NULL, &e_parts[0],
179 directory.find(localNumEdges, NULL, NULL, NULL,
184 std::map<gno_t,lno_t> global_id_to_local_index;
189 for (lno_t i = 0; i < localNumVertices; ++i){
192 global_id_to_local_index[Ids[i]] = i;
195 for (lno_t i = 0; i < localNumEdges; ++i){
196 gno_t ei = edgeIds[i];
198 part_t p = parts[global_id_to_local_index[ei]];
203 RCP<const Teuchos::Comm<int> > tcomm = comm;
206 const bool bUseLocalIDs =
false;
214 part_info() : sum_weights(0) {
221 const part_info & operator+=(
const part_info & src) {
222 sum_weights += src.sum_weights;
228 bool operator>(
const part_info & src) {
240 return (target_part > src.target_part);
251 bool operator==(
const part_info & src) {
254 return (target_part == src.target_part);
258 scalar_t sum_weights;
262 std::vector <lno_t> part_begins(numParts, -1);
263 std::vector <lno_t> part_nexts(localNumVertices, -1);
267 for (lno_t i = 0; i < localNumVertices; ++i){
268 part_t ap = parts[i];
269 part_nexts[i] = part_begins[ap];
273 for (
int weight_index = -1; weight_index < numWeightPerEdge ; ++weight_index) {
274 std::vector<part_t> part_data(numParts);
275 std::vector<std::vector<part_info>> user_data(numParts);
276 int count_total_entries = 0;
278 std::vector <part_t> part_neighbors(numParts);
279 std::vector <scalar_t> part_neighbor_weights(numParts, 0);
280 std::vector <scalar_t> part_neighbor_weights_ordered(numParts);
283 for (part_t i = 0; i < numParts; ++i) {
284 part_t num_neighbor_parts = 0;
285 lno_t v = part_begins[i];
289 for (offset_t j = offsets[v]; j < offsets[v+1]; ++j){
292 part_t ep = e_parts[j];
298 if (weight_index > -1){
299 ew = e_wgts[weight_index][j];
302 if (part_neighbor_weights[ep] < 0.00001){
303 part_neighbors[num_neighbor_parts++] = ep;
305 part_neighbor_weights[ep] += ew;
311 for (lno_t j = 0; j < num_neighbor_parts; ++j) {
312 part_t neighbor_part = part_neighbors[j];
313 part_neighbor_weights_ordered[j] = part_neighbor_weights[neighbor_part];
314 part_neighbor_weights[neighbor_part] = 0;
317 if (num_neighbor_parts > 0) {
323 part_data[count_total_entries] = i;
324 std::vector<part_info> & add_user_data = user_data[count_total_entries];
325 ++count_total_entries;
326 add_user_data.resize(num_neighbor_parts);
327 for(
int n = 0; n < num_neighbor_parts; ++n) {
328 part_info & add_data = add_user_data[n];
329 add_data.target_part = part_neighbors[n];
330 add_data.sum_weights = part_neighbor_weights_ordered[n];
335 scalar_t max_edge_cut = 0;
336 scalar_t total_edge_cut = 0;
337 part_t max_message = 0;
338 part_t total_message = 0;
340 part_t total_hop_count = 0;
341 scalar_t total_weighted_hop_count = 0;
342 part_t max_hop_count = 0;
343 scalar_t max_weighted_hop_count = 0;
348 if(local_parts.size() == 0) {
349 local_parts.resize(numParts);
350 for(
size_t n = 0; n < local_parts.size(); ++n) {
355 std::vector<std::vector<part_info>> find_user_data;
364 (
dynamic_cast<const Teuchos::SerialComm<int>*
>(&(*comm)) != NULL);
369 directory_t directory(comm, bUseLocalIDs, debug_level);
372 directory.update(count_total_entries, &part_data[0], NULL, &user_data[0],
373 NULL, directory_t::Update_Mode::AggregateAdd);
376 directory.get_locally_managed_gids(local_parts);
379 find_user_data.resize(local_parts.size());
382 directory.find(local_parts.size(), &local_parts[0], NULL,
383 &find_user_data[0], NULL, NULL,
false);
386 find_user_data = user_data;
389 for(
size_t n = 0; n < local_parts.size(); ++n) {
390 scalar_t part_edge_cut = 0;
391 part_t part_messages = 0;
392 const std::vector<part_info> & data = find_user_data[n];
393 for(
size_t q = 0; q < data.size(); ++q) {
394 const part_t local_part = local_parts[n];
395 const part_info & info = data[q];
396 if (info.target_part != local_part) {
397 part_edge_cut += info.sum_weights;
401 typename MachineRep::machine_pcoord_t hop_count = 0;
402 machine->getHopCount(local_part, info.target_part, hop_count);
404 part_t hop_counts = hop_count;
405 scalar_t weighted_hop_counts = hop_count * info.sum_weights;
407 total_hop_count += hop_counts;
408 total_weighted_hop_count += weighted_hop_counts;
410 if (hop_counts > max_hop_count ){
411 max_hop_count = hop_counts;
413 if (weighted_hop_counts > max_weighted_hop_count ){
414 max_weighted_hop_count = weighted_hop_counts;
420 if(part_edge_cut > max_edge_cut) {
421 max_edge_cut = part_edge_cut;
423 total_edge_cut += part_edge_cut;
425 if (part_messages > max_message){
426 max_message = part_messages;
428 total_message += part_messages;
432 scalar_t g_max_edge_cut = 0;
433 scalar_t g_total_edge_cut = 0;
434 part_t g_max_message = 0;
435 part_t g_total_message = 0;
437 part_t g_total_hop_count = 0;
438 scalar_t g_total_weighted_hop_count = 0;
439 part_t g_max_hop_count = 0;
440 scalar_t g_max_weighted_hop_count = 0;
443 Teuchos::reduceAll<int, scalar_t>(*comm,Teuchos::REDUCE_MAX,1,&max_edge_cut,&g_max_edge_cut);
444 Teuchos::reduceAll<int, part_t>(*comm,Teuchos::REDUCE_MAX,1,&max_message,&g_max_message);
446 Teuchos::reduceAll<int, scalar_t>(*comm,Teuchos::REDUCE_SUM,1,&total_edge_cut,&g_total_edge_cut);
447 Teuchos::reduceAll<int, part_t>(*comm,Teuchos::REDUCE_SUM,1,&total_message,&g_total_message);
450 Teuchos::reduceAll<int, part_t>(*comm,Teuchos::REDUCE_MAX,1,&max_hop_count,&g_max_hop_count);
451 Teuchos::reduceAll<int, scalar_t>(*comm,Teuchos::REDUCE_MAX,1,&max_weighted_hop_count,&g_max_weighted_hop_count);
453 Teuchos::reduceAll<int, part_t>(*comm,Teuchos::REDUCE_SUM,1,&total_hop_count,&g_total_hop_count);
454 Teuchos::reduceAll<int, scalar_t>(*comm,Teuchos::REDUCE_SUM,1,&total_weighted_hop_count,&g_total_weighted_hop_count);
459 if (weight_index == -1){
460 metrics[next]->setName(
"edge cuts");
463 std::ostringstream oss;
464 oss <<
"weight " << weight_index;
465 metrics[next]->setName( oss.str());
467 metrics[next]->setMetricValue(
"global maximum", g_max_edge_cut);
468 metrics[next]->setMetricValue(
"global sum", g_total_edge_cut);
470 if (weight_index == -1){
471 metrics[next]->setName(
"message");
472 metrics[next]->setMetricValue(
"global maximum", g_max_message);
473 metrics[next]->setMetricValue(
"global sum", g_total_message);
478 if (weight_index == -1){
479 metrics[next]->setName(
"hops (No Weight)");
480 metrics[next]->setMetricValue(
"global maximum", g_max_hop_count);
481 metrics[next]->setMetricValue(
"global sum", g_total_hop_count);
485 std::ostringstream oss;
486 oss <<
"weighted hops" << weight_index;
487 metrics[next]->setName( oss.str());
488 metrics[next]->setMetricValue(
"global maximum", g_max_weighted_hop_count);
489 metrics[next]->setMetricValue(
"global sum", g_total_weighted_hop_count);
501 template <
typename scalar_t,
typename part_t>
504 os <<
"Graph Metrics: (" << numParts <<
" parts)";
506 if (targetNumParts != numParts) {
507 os <<
"Target number of parts is: " << targetNumParts << std::endl;
514 template <
typename scalar_t,
typename part_t>
517 printGraphMetricsHeader<scalar_t, part_t>(os, targetNumParts, numParts);
518 for (
int i=0; i < infoList.size(); i++) {
520 infoList[i]->printLine(os);
528 template <
typename scalar_t,
typename part_t>
531 printGraphMetricsHeader<scalar_t, part_t>(os, targetNumParts, numParts);
532 metricValue->printLine(os);
Time an algorithm (or other entity) as a whole.
static void printHeader(std::ostream &os)
Print a standard header.
void printGraphMetricsHeader(std::ostream &os, part_t targetNumParts, part_t numParts)
Print out header info for graph metrics.
sub-steps, each method's entry and exit
SparseMatrixAdapter_t::part_t part_t
void globalWeightedByPart(const RCP< const Environment > &env, const RCP< const Comm< int > > &comm, const RCP< const GraphModel< typename Adapter::base_adapter_t > > &graph, const ArrayView< const typename Adapter::part_t > &parts, typename Adapter::part_t &numParts, ArrayRCP< RCP< BaseClassMetrics< typename Adapter::scalar_t > > > &metrics, ArrayRCP< typename Adapter::scalar_t > &globalSums, bool bMessages=true, const RCP< const MachineRep > machine=Teuchos::null)
Given the local partitioning, compute the global weighted cuts in each part.
void printGraphMetrics(std::ostream &os, part_t targetNumParts, part_t numParts, const ArrayView< RCP< BaseClassMetrics< scalar_t > > > &infoList)
Print out list of graph metrics.
size_t getLocalNumVertices() const
Returns the number vertices on this process.
Traits class to handle conversions between gno_t/lno_t and TPL data types (e.g., ParMETIS's idx_t...
GraphModel defines the interface required for graph models.
#define Z2_THROW_OUTSIDE_ERROR(env)
Throw an error returned from outside the Zoltan2 library.
#define METRICS_UNSET_STRING