Panzer  Version of the Day
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
Panzer_DOFManager.cpp
Go to the documentation of this file.
1 // @HEADER
2 // *****************************************************************************
3 // Panzer: A partial differential equation assembly
4 // engine for strongly coupled complex multiphysics systems
5 //
6 // Copyright 2011 NTESS and the Panzer contributors.
7 // SPDX-License-Identifier: BSD-3-Clause
8 // *****************************************************************************
9 // @HEADER
10 
11 #ifndef PANZER_DOF_MANAGER2_IMPL_HPP
12 #define PANZER_DOF_MANAGER2_IMPL_HPP
13 
14 #include <map>
15 #include <set>
16 
17 #include <mpi.h>
18 
19 #include "PanzerDofMgr_config.hpp"
20 #include "Panzer_FieldPattern.hpp"
23 #include "Panzer_ConnManager.hpp"
25 #include "Panzer_DOFManager.hpp"
29 
30 #include "Teuchos_RCP.hpp"
31 #include "Teuchos_Array.hpp"
32 #include "Teuchos_ArrayView.hpp"
33 
34 #include "Tpetra_Map.hpp"
35 #include "Tpetra_Export.hpp"
36 #include "Tpetra_Vector.hpp"
37 #include "Tpetra_MultiVector.hpp"
38 
39 #include <unordered_set> // a hash table
40 
41 /*
42 #define HAVE_ZOLTAN2
43 #ifdef HAVE_ZOLTAN2
44 #include "Zoltan2_XpetraCrsGraphInput.hpp"
45 #include "Zoltan2_OrderingProblem.hpp"
46 #endif
47 */
48 
49 namespace panzer {
50 
52 namespace {
53 template <typename LocalOrdinal,typename GlobalOrdinal>
54 class HashTieBreak : public Tpetra::Details::TieBreak<LocalOrdinal,GlobalOrdinal> {
55  const unsigned int seed_;
56 
57 public:
58  HashTieBreak(const unsigned int seed = (2654435761U))
59  : seed_(seed) { }
60 
61  virtual std::size_t selectedIndex(GlobalOrdinal GID,
62  const std::vector<std::pair<int,LocalOrdinal> > & pid_and_lid) const
63  {
64  // this is Epetra's hash/Tpetra's default hash: See Tpetra HashTable
65  int intkey = (int) ((GID & 0x000000007fffffffLL) +
66  ((GID & 0x7fffffff80000000LL) >> 31));
67  return std::size_t((seed_ ^ intkey) % pid_and_lid.size());
68  }
69 };
70 
71 }
72 
74 namespace {
75 template <typename LocalOrdinal,typename GlobalOrdinal>
76 class GreedyTieBreak : public Tpetra::Details::TieBreak<LocalOrdinal,GlobalOrdinal> {
77 
78 public:
79  GreedyTieBreak() { }
80 
81  virtual bool mayHaveSideEffects() const {
82  return true;
83  }
84 
85  virtual std::size_t selectedIndex(GlobalOrdinal /* GID */,
86  const std::vector<std::pair<int,LocalOrdinal> > & pid_and_lid) const
87  {
88  // always choose index of pair with smallest pid
89  const std::size_t numLids = pid_and_lid.size();
90  std::size_t idx = 0;
91  int minpid = pid_and_lid[0].first;
92  std::size_t minidx = 0;
93  for (idx = 0; idx < numLids; ++idx) {
94  if (pid_and_lid[idx].first < minpid) {
95  minpid = pid_and_lid[idx].first;
96  minidx = idx;
97  }
98  }
99  return minidx;
100  }
101 };
102 
103 }
104 
108 
109 using Teuchos::RCP;
110 using Teuchos::rcp;
111 using Teuchos::ArrayRCP;
112 using Teuchos::Array;
113 using Teuchos::ArrayView;
114 
117  : numFields_(0),buildConnectivityRun_(false),requireOrientations_(false), useTieBreak_(false), useNeighbors_(false)
118 { }
119 
121 DOFManager::DOFManager(const Teuchos::RCP<ConnManager> & connMngr,MPI_Comm mpiComm)
122  : numFields_(0),buildConnectivityRun_(false),requireOrientations_(false), useTieBreak_(false), useNeighbors_(false)
123 {
124  setConnManager(connMngr,mpiComm);
125 }
126 
128 void DOFManager::setConnManager(const Teuchos::RCP<ConnManager> & connMngr, MPI_Comm mpiComm)
129 {
131  "DOFManager::setConnManager: setConnManager cannot be called after "
132  "buildGlobalUnknowns has been called");
133  connMngr_ = connMngr;
134  //We acquire the block ordering from the connmanager.
135  connMngr_->getElementBlockIds(blockOrder_);
136  for (size_t i = 0; i < blockOrder_.size(); ++i) {
137  blockNameToID_.insert(std::map<std::string,int>::value_type(blockOrder_[i],i));
138  //We must also initialize vectors for FP associations.
139  }
140  blockToAssociatedFP_.resize(blockOrder_.size());
141  communicator_ = Teuchos::rcp(new Teuchos::MpiComm<int>(Teuchos::opaqueWrapper(mpiComm)));
142 }
143 
145 //Adds a field to be used in creating the Global Numbering
146 //Returns the index for the field pattern
147 int DOFManager::addField(const std::string & str, const Teuchos::RCP<const FieldPattern> & pattern,
148  const panzer::FieldType& type)
149 {
151  "DOFManager::addField: addField cannot be called after "
152  "buildGlobalUnknowns has been called");
153 
154  fieldPatterns_.push_back(pattern);
155  fieldTypes_.push_back(type);
156  fieldNameToAID_.insert(std::map<std::string,int>::value_type(str, numFields_));
157 
158  //The default values for IDs are the sequential order they are added in.
159  fieldStringOrder_.push_back(str);
160  fieldAIDOrder_.push_back(numFields_);
161 
162  for(std::size_t i=0;i<blockOrder_.size();i++) {
163  blockToAssociatedFP_[i].push_back(numFields_);
164  }
165 
166  ++numFields_;
167  return numFields_-1;
168 }
169 
171 int DOFManager::addField(const std::string & blockID, const std::string & str, const Teuchos::RCP<const FieldPattern> & pattern,
172  const panzer::FieldType& type)
173 {
175  "DOFManager::addField: addField cannot be called after "
176  "buildGlobalUnknowns has been called");
177  TEUCHOS_TEST_FOR_EXCEPTION((connMngr_==Teuchos::null),std::logic_error,
178  "DOFManager::addField: you must add a ConnManager before"
179  "you can associate a FP with a given block.")
180  bool found=false;
181  size_t blocknum=0;
182  while(!found && blocknum<blockOrder_.size()){
183  if(blockOrder_[blocknum]==blockID){
184  found=true;
185  break;
186  }
187  blocknum++;
188  }
189  TEUCHOS_TEST_FOR_EXCEPTION(!found,std::logic_error, "DOFManager::addField: Invalid block name.");
190 
191  //This will be different if the FieldPattern is already present.
192  //We need to check for that.
193  found=false;
194  std::map<std::string,int>::const_iterator fpIter = fieldNameToAID_.find(str);
195  if(fpIter!=fieldNameToAID_.end())
196  found=true;
197 
198  if(!found){
199  fieldPatterns_.push_back(pattern);
200  fieldTypes_.push_back(type);
201  fieldNameToAID_.insert(std::map<std::string,int>::value_type(str, numFields_));
202  //The default values for IDs are the sequential order they are added in.
203  fieldStringOrder_.push_back(str);
204  fieldAIDOrder_.push_back(numFields_);
205 
206  //This is going to be associated with blocknum.
207  blockToAssociatedFP_[blocknum].push_back(numFields_);
208  ++numFields_;
209  return numFields_-1;
210  }
211  else{
212  blockToAssociatedFP_[blocknum].push_back(fpIter->second);
213  return numFields_;
214  }
215 }
216 
219 {
220  std::map<std::string,int>::const_iterator fitr = fieldNameToAID_.find(name);
221  if(fitr==fieldNameToAID_.end())
222  return Teuchos::null;
223 
224  if(fitr->second<int(fieldPatterns_.size()))
225  return fieldPatterns_[fitr->second];
226 
227  return Teuchos::null;
228 }
229 
231 Teuchos::RCP<const FieldPattern> DOFManager::getFieldPattern(const std::string & blockId, const std::string & fieldName) const
232 {
233  std::map<std::string,int>::const_iterator fitr = fieldNameToAID_.find(fieldName);
234  if(fitr==fieldNameToAID_.end())
235  return Teuchos::null;
236 
237  bool found=false;
238  size_t blocknum=0;
239  while(!found && blocknum<blockOrder_.size()){
240  if(blockOrder_[blocknum]==blockId){
241  found=true;
242  break;
243  }
244  blocknum++;
245  }
246 
247  if(!found)
248  return Teuchos::null;
249 
250  std::vector<int>::const_iterator itr = std::find(blockToAssociatedFP_[blocknum].begin(),
251  blockToAssociatedFP_[blocknum].end(),fitr->second);
252  if(itr!=blockToAssociatedFP_[blocknum].end()) {
253  if(fitr->second<int(fieldPatterns_.size()))
254  return fieldPatterns_[fitr->second];
255  }
256 
257  return Teuchos::null;
258 }
259 
261 void DOFManager::getOwnedIndices(std::vector<panzer::GlobalOrdinal>& indices) const
262 {
263  indices = owned_;
264 }
265 
267 void DOFManager::getGhostedIndices(std::vector<panzer::GlobalOrdinal>& indices) const
268 {
269  indices = ghosted_;
270 }
271 
273 void DOFManager::getOwnedAndGhostedIndices(std::vector<panzer::GlobalOrdinal>& indices) const
274 {
275  using std::size_t;
276  indices.resize(owned_.size() + ghosted_.size());
277  for (size_t i(0); i < owned_.size(); ++i)
278  indices[i] = owned_[i];
279  for (size_t i(0); i < ghosted_.size(); ++i)
280  indices[owned_.size() + i] = ghosted_[i];
281 }
282 
284 void DOFManager::getElementGIDsAsInt(panzer::LocalOrdinal localElementID, std::vector<int>& gids, const std::string& /* blockIdHint */) const
285 {
286  const auto & element_ids = elementGIDs_[localElementID];
287  gids.resize(element_ids.size());
288  for (std::size_t i=0; i < gids.size(); ++i)
289  gids[i] = element_ids[i];
290 }
291 
293 void DOFManager::getOwnedIndicesAsInt(std::vector<int>& indices) const
294 {
295  indices.resize(owned_.size());
296  for (std::size_t i=0; i < owned_.size(); ++i)
297  indices[i] = owned_[i];
298 }
299 
301 void DOFManager::getGhostedIndicesAsInt(std::vector<int>& indices) const
302 {
303  indices.resize(ghosted_.size());
304  for (std::size_t i=0; i < ghosted_.size(); ++i)
305  indices[i] = ghosted_[i];
306 }
307 
309 void DOFManager::getOwnedAndGhostedIndicesAsInt(std::vector<int>& indices) const
310 {
311  indices.resize(owned_.size() + ghosted_.size());
312  for (std::size_t i=0; i < owned_.size(); ++i)
313  indices[i] = owned_[i];
314  for (std::size_t i=0; i < ghosted_.size(); ++i)
315  indices[owned_.size() + i] = ghosted_[i];
316 }
317 
320 {
321  return owned_.size();
322 }
323 
326 {
327  return ghosted_.size();
328 }
329 
332 {
333  return owned_.size() + ghosted_.size();
334 }
335 
338 {
339  return numFields_;
340 }
341 
343 const std::vector<int> &
344 DOFManager::getGIDFieldOffsets(const std::string & blockID, int fieldNum) const
345 {
346  TEUCHOS_TEST_FOR_EXCEPTION(!buildConnectivityRun_,std::logic_error, "DOFManager::getGIDFieldOffsets: cannot be called before "
347  "buildGlobalUnknowns has been called");
348  std::map<std::string,int>::const_iterator bitr = blockNameToID_.find(blockID);
349  if(bitr==blockNameToID_.end())
350  TEUCHOS_TEST_FOR_EXCEPTION(true,std::logic_error,"DOFManager::fieldInBlock: invalid block name");
351  int bid=bitr->second;
352  if(fa_fps_[bid]!=Teuchos::null)
353  return fa_fps_[bid]->localOffsets(fieldNum);
354 
355  static const std::vector<int> empty;
356  return empty;
357 }
358 
360 const PHX::View<const int*>
361 DOFManager::getGIDFieldOffsetsKokkos(const std::string & blockID, int fieldNum) const
362 {
363  TEUCHOS_TEST_FOR_EXCEPTION(!buildConnectivityRun_,std::logic_error, "DOFManager::getGIDFieldOffsets: cannot be called before "
364  "buildGlobalUnknowns has been called");
365  std::map<std::string,int>::const_iterator bitr = blockNameToID_.find(blockID);
366  if(bitr==blockNameToID_.end()) {
367  TEUCHOS_TEST_FOR_EXCEPTION(true,std::logic_error,"DOFManager::fieldInBlock: invalid block name");
368  }
369 
370  int bid=bitr->second;
371  if(fa_fps_[bid]!=Teuchos::null)
372  return fa_fps_[bid]->localOffsetsKokkos(fieldNum);
373 
374  static const PHX::View<int*> empty("panzer::DOFManager::getGIDFieldOffsetsKokkos() empty",0);
375  return empty;
376 }
377 
380 DOFManager::getGIDFieldOffsetsKokkos(const std::string & blockID, const std::vector<int> & fieldNums) const
381 {
382  TEUCHOS_TEST_FOR_EXCEPTION(!buildConnectivityRun_,std::logic_error, "DOFManager::getGIDFieldOffsets: cannot be called before "
383  "buildGlobalUnknowns has been called");
384  std::map<std::string,int>::const_iterator bitr = blockNameToID_.find(blockID);
385  if(bitr==blockNameToID_.end()) {
386  TEUCHOS_TEST_FOR_EXCEPTION(true,std::logic_error,"DOFManager::fieldInBlock: invalid block name");
387  }
388 
389  PHX::ViewOfViews<1,PHX::View<const int*>> vov("panzer::getGIDFieldOffsetsKokkos vector version",fieldNums.size());
390  vov.disableSafetyCheck(); // Its going to be moved/copied
391 
392  int bid=bitr->second;
393 
394  for (size_t i=0; i < fieldNums.size(); ++i) {
395  if(fa_fps_[bid]!=Teuchos::null) {
396  vov.addView(fa_fps_[bid]->localOffsetsKokkos(fieldNums[i]),i);
397  }
398  else {
399  TEUCHOS_TEST_FOR_EXCEPTION(true,std::runtime_error,"panzer::DOFManager::getGIDFieldOffsets() - no field pattern exists in block "
400  << blockID << "for field number " << fieldNums[i] << " exists!");
401  // vov.addView(PHX::View<int*>("panzer::DOFManager::getGIDFieldOffsetsKokkos() empty",0),i);
402  }
403  }
404 
405  vov.syncHostToDevice();
406 
407  return vov;
408 }
409 
411 void DOFManager::getElementGIDs(panzer::LocalOrdinal localElementID, std::vector<panzer::GlobalOrdinal>& gids, const std::string& /* blockIdHint */) const
412 {
413  gids = elementGIDs_[localElementID];
414 }
415 
418 {
419  /* STEPS.
420  * 1. Build GA_FP and all block's FA_FP's and place into respective data structures.
421  */
423  fieldPatterns_.push_back(Teuchos::rcp(new NodalFieldPattern(fieldPatterns_[0]->getCellTopology())));
424  fieldTypes_.push_back(FieldType::CG);
425  }
426 
427  TEUCHOS_ASSERT(fieldPatterns_.size() == fieldTypes_.size());
428  std::vector<std::pair<FieldType,Teuchos::RCP<const FieldPattern>>> tmp;
429  for (std::size_t i=0; i < fieldPatterns_.size(); ++i)
430  tmp.push_back(std::make_pair(fieldTypes_[i],fieldPatterns_[i]));
431 
433 
434  connMngr_->buildConnectivity(*aggFieldPattern);
435 
436  // using new geometric pattern, build global unknowns
437  this->buildGlobalUnknowns(aggFieldPattern);
438 }
439 
442 {
443  // some typedefs
444  typedef panzer::TpetraNodeType Node;
445  typedef Tpetra::Map<panzer::LocalOrdinal, panzer::GlobalOrdinal, Node> Map;
446 
447  typedef Tpetra::Import<panzer::LocalOrdinal,panzer::GlobalOrdinal,Node> Import;
448 
449  //the GIDs are of type panzer::GlobalOrdinal.
450  typedef Tpetra::MultiVector<panzer::GlobalOrdinal,panzer::LocalOrdinal,panzer::GlobalOrdinal,Node> MultiVector;
451 
452  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::DOFManager::buildGlobalUnknowns",buildGlobalUnknowns);
453 
454  Teuchos::FancyOStream out(Teuchos::rcpFromRef(std::cout));
455  out.setOutputToRootOnly(-1);
456  out.setShowProcRank(true);
457 
459  "DOFManager::buildGlobalUnknowns: buildGlobalUnknowns cannot be called again "
460  "after buildGlobalUnknowns has been called");
461 
462  // this is a safety check to make sure that nodes are included
463  // in the geometric field pattern when orientations are required
465  std::size_t sz = geomPattern->getSubcellIndices(0,0).size();
466 
467  TEUCHOS_TEST_FOR_EXCEPTION(sz==0,std::logic_error,
468  "DOFManager::buildGlobalUnknowns requires a geometric pattern including "
469  "the nodes when orientations are needed!");
470  }
471 
472  /* STEPS.
473  * 1. Build all block's FA_FP's and place into respective data structures.
474  */
475  ga_fp_ = geomPattern;
476 
477  // given a set of elements over each element block build an overlap
478  // map that will provide the required element entities for the
479  // set of elements requested.
480  ElementBlockAccess ownedAccess(true,connMngr_);
481 
482  // INPUT: To the algorithm in the GUN paper
483  RCP<MultiVector> tagged_overlap_mv = this->buildTaggedMultiVector(ownedAccess);
484  RCP<const Map> overlap_map = tagged_overlap_mv->getMap();
485 
486  RCP<MultiVector> overlap_mv = Tpetra::createMultiVector<panzer::GlobalOrdinal>(overlap_map,(size_t)numFields_);
487 
488  // call the GUN paper algorithm
489  auto non_overlap_pair = this->buildGlobalUnknowns_GUN(*tagged_overlap_mv,*overlap_mv);
490  RCP<MultiVector> non_overlap_mv = non_overlap_pair.first;
491  RCP<MultiVector> tagged_non_overlap_mv = non_overlap_pair.second;
492  RCP<const Map> non_overlap_map = non_overlap_mv->getMap();
493 
494  /* 14. Cross reference local element connectivity and overlap map to
495  * create final GID vectors.
496  */
497 
498  // this bit of code takes the uniquely assigned GIDs and spreads them
499  // out for processing by local element ID
500  this->fillGIDsFromOverlappedMV(ownedAccess,elementGIDs_,*overlap_map,*overlap_mv);
501 
502  // if neighbor unknowns are required, then make sure they are included
503  // in the elementGIDs_
504  if (useNeighbors_) { // enabling this turns on GID construction for
505  // neighbor processors
506  ElementBlockAccess neighborAccess(false,connMngr_);
507  RCP<const Map> overlap_map_neighbor =
508  this->buildOverlapMapFromElements(neighborAccess);
509 
510  // Export e(overlap_map_neighbor,non_overlap_map);
511  Import imp_neighbor(non_overlap_map,overlap_map_neighbor);
512 
513  Teuchos::RCP<MultiVector> overlap_mv_neighbor =
514  Tpetra::createMultiVector<panzer::GlobalOrdinal>(overlap_map_neighbor, (size_t)numFields_);
515 
516  // get all neighbor information
517  overlap_mv_neighbor->doImport(*non_overlap_mv, imp_neighbor,
518  Tpetra::REPLACE);
519 
520  this->fillGIDsFromOverlappedMV(neighborAccess, elementGIDs_,
521  *overlap_map_neighbor, *overlap_mv_neighbor);
522  }
523 
525  // this is where the code is modified to artificially induce GIDs
526  // over 2 Billion unknowns
528  #if 0
529  {
530  panzer::GlobalOrdinal offset = 0xFFFFFFFFLL;
531 
532  for (size_t b = 0; b < blockOrder_.size(); ++b) {
533  const std::vector<panzer::LocalOrdinal> & myElements = connMngr_->getElementBlock(blockOrder_[b]);
534  for (std::size_t l = 0; l < myElements.size(); ++l) {
535  std::vector<panzer::GlobalOrdinal> & localGIDs = elementGIDs_[myElements[l]];
536  for(std::size_t c=0;c<localGIDs.size();c++)
537  localGIDs[c] += offset;
538  }
539  }
540 
541  Teuchos::ArrayRCP<panzer::GlobalOrdinal> nvals = non_overlap_mv->get1dViewNonConst();
542  for (int j = 0; j < nvals.size(); ++j)
543  nvals[j] += offset;
544  }
545  #endif
546 
547  // build owned vector
548  {
549  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::DOFManager::buildGlobalUnknowns::build_owned_vector",build_owned_vector);
550 
551  typedef std::unordered_set<panzer::GlobalOrdinal> HashTable;
552  HashTable isOwned, remainingOwned;
553 
554  // owned_ is made up of owned_ids.: This doesn't work for high order
555  auto nvals = non_overlap_mv->getLocalViewHost(Tpetra::Access::ReadOnly);
556  auto tagged_vals = tagged_non_overlap_mv->getLocalViewHost(Tpetra::Access::ReadOnly);
557  TEUCHOS_ASSERT(nvals.size()==tagged_vals.size());
558  for (size_t i = 0; i < nvals.extent(1); ++i) {
559  for (size_t j = 0; j < nvals.extent(0); ++j) {
560  if (nvals(j,i) != -1) {
561  for(panzer::GlobalOrdinal offset=0;offset<tagged_vals(j,i);++offset)
562  isOwned.insert(nvals(j,i)+offset);
563  }
564  else {
565  // sanity check
566  TEUCHOS_ASSERT(tagged_vals(j,i)==0);
567  }
568  }
569  }
570  remainingOwned = isOwned;
571 
572  HashTable hashTable; // use to detect if global ID has been added to owned_
573  for (size_t b = 0; b < blockOrder_.size(); ++b) {
574 
575  if(fa_fps_[b]==Teuchos::null)
576  continue;
577 
578  const std::vector<panzer::LocalOrdinal> & myElements = connMngr_->getElementBlock(blockOrder_[b]);
579 
580  for (size_t l = 0; l < myElements.size(); ++l) {
581  const std::vector<panzer::GlobalOrdinal> & localOrdering = elementGIDs_[myElements[l]];
582 
583  // add "novel" global ids that are also owned to owned array
584  for(std::size_t i=0;i<localOrdering.size();i++) {
585  // don't try to add if ID is not owned
586  if(isOwned.find(localOrdering[i])==isOwned.end())
587  continue;
588 
589  // only add a global ID if it has not been added
590  std::pair<typename HashTable::iterator,bool> insertResult = hashTable.insert(localOrdering[i]);
591  if(insertResult.second) {
592  owned_.push_back(localOrdering[i]);
593  remainingOwned.erase(localOrdering[i]);
594  }
595  }
596  }
597  }
598 
599  // add any other owned GIDs that were not already included.
600  // these are ones that are owned locally but not required by any
601  // element owned by this processor (uggh!)
602  for(typename HashTable::const_iterator itr=remainingOwned.begin();itr!=remainingOwned.end();itr++)
603  owned_.push_back(*itr);
604 
605  if(owned_.size()!=isOwned.size()) {
606  out << "I'm about to hang because of unknown numbering failure ... sorry! (line = " << __LINE__ << ")" << std::endl;
607  TEUCHOS_TEST_FOR_EXCEPTION(owned_.size()!=isOwned.size(),std::logic_error,
608  "DOFManager::buildGlobalUnkonwns: Failure because not all owned unknowns have been accounted for.");
609  }
610 
611  }
612 
613  // Build the ghosted_ array. The old simple way led to slow Jacobian
614  // assembly; the new way speeds up Jacobian assembly.
615  {
616  // Loop over all the elements and do a greedy ordering of local values over
617  // the elements for building ghosted_. Hopefully this gives a better
618  // layout for an element-ordered assembly.
619  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::DOFManager::buildGlobalUnknowns::build_ghosted_array",BGA);
620 
621  // Use a hash table to detect if global IDs have been added to owned_.
622  typedef std::unordered_set<panzer::GlobalOrdinal> HashTable;
623  HashTable hashTable;
624  for (std::size_t i = 0; i < owned_.size(); i++)
625  hashTable.insert(owned_[i]);
626 
627  // Here we construct an accessor vector, such that we first process
628  // everything in the current element block, optionally followed by
629  // everything in the neighbor element block.
630  std::vector<ElementBlockAccess> blockAccessVec;
631  blockAccessVec.push_back(ElementBlockAccess(true,connMngr_));
632  if(useNeighbors_)
633  blockAccessVec.push_back(ElementBlockAccess(false,connMngr_));
634  for (std::size_t a = 0; a < blockAccessVec.size(); ++a)
635  {
636  // Get the access type (owned or neighbor).
637  const ElementBlockAccess& access = blockAccessVec[a];
638  for (size_t b = 0; b < blockOrder_.size(); ++b)
639  {
640  if (fa_fps_[b] == Teuchos::null)
641  continue;
642  const std::vector<panzer::LocalOrdinal>& myElements =
643  access.getElementBlock(blockOrder_[b]);
644  for (size_t l = 0; l < myElements.size(); ++l)
645  {
646  const std::vector<panzer::GlobalOrdinal>& localOrdering = elementGIDs_[myElements[l]];
647 
648  // Add "novel" global IDs into the ghosted_ vector.
649  for (std::size_t i = 0; i < localOrdering.size(); ++i)
650  {
651  std::pair<typename HashTable::iterator, bool> insertResult =
652  hashTable.insert(localOrdering[i]);
653 
654  // If the insertion succeeds, then this is "novel" to the owned_
655  // and ghosted_ vectors, so include it in ghosted_.
656  if(insertResult.second)
657  ghosted_.push_back(localOrdering[i]);
658  }
659  }
660  }
661  }
662  }
663 
664  buildConnectivityRun_ = true;
665 
666  // build orientations if required
668  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::DOFManager::buildGlobalUnknowns::build_orientation",BO);
670  }
671 
672  // allocate the local IDs
673  if (useNeighbors_) {
674  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::DOFManager::buildGlobalUnknowns::build_local_ids_from_owned_and_ghosted",BLOFOG);
676  }
677  else {
678  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::DOFManager::buildGlobalUnknowns::build_local_ids",BLI);
679  this->buildLocalIds();
680  }
681 }
682 
684 std::pair<Teuchos::RCP<Tpetra::MultiVector<panzer::GlobalOrdinal,panzer::LocalOrdinal,panzer::GlobalOrdinal,panzer::TpetraNodeType> >,
686 DOFManager::buildGlobalUnknowns_GUN(const Tpetra::MultiVector<panzer::GlobalOrdinal,panzer::LocalOrdinal,panzer::GlobalOrdinal,panzer::TpetraNodeType> & tagged_overlap_mv,
687  Tpetra::MultiVector<panzer::GlobalOrdinal,panzer::LocalOrdinal,panzer::GlobalOrdinal,panzer::TpetraNodeType> & overlap_mv) const
688 {
689  // some typedefs
690  typedef panzer::TpetraNodeType Node;
691  typedef Tpetra::Map<panzer::LocalOrdinal, panzer::GlobalOrdinal, Node> Map;
692 
693  typedef Tpetra::Export<panzer::LocalOrdinal,panzer::GlobalOrdinal,Node> Export;
694  typedef Tpetra::Import<panzer::LocalOrdinal,panzer::GlobalOrdinal,Node> Import;
695 
696  //the GIDs are of type panzer::GlobalOrdinal.
697  typedef Tpetra::MultiVector<panzer::GlobalOrdinal,panzer::LocalOrdinal,panzer::GlobalOrdinal,Node> MultiVector;
698 
699  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::DOFManager::buildGlobalUnknowns_GUN",BGU_GUN);
700 
701  // LINE 2: In the GUN paper
702  RCP<const Map> overlap_map = tagged_overlap_mv.getMap();
703 
704  /* 6. Create a OneToOne map from the overlap map.
705  */
706 
707  // LINE 4: In the GUN paper
708 
709  RCP<const Map> non_overlap_map;
710  if(!useTieBreak_) {
711  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::DOFManager::buildGlobalUnknowns_GUN::line_04 createOneToOne",GUN04);
712 
713  GreedyTieBreak<panzer::LocalOrdinal,panzer::GlobalOrdinal> greedy_tie_break;
714  non_overlap_map = Tpetra::createOneToOne<panzer::LocalOrdinal,panzer::GlobalOrdinal,Node>(overlap_map, greedy_tie_break);
715  }
716  else {
717  // use a hash tie break to get better load balancing from create one to one
718  // Aug. 4, 2016...this is a bad idea and doesn't work
719  HashTieBreak<panzer::LocalOrdinal,panzer::GlobalOrdinal> tie_break;
720  non_overlap_map = Tpetra::createOneToOne<panzer::LocalOrdinal,panzer::GlobalOrdinal,Node>(overlap_map,tie_break);
721  }
722 
723  /* 7. Create a non-overlapped multivector from OneToOne map.
724  */
725 
726  // LINE 5: In the GUN paper
727 
728  Teuchos::RCP<MultiVector> tagged_non_overlap_mv;
729  {
730  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::DOFManager::buildGlobalUnknowns_GUN::line_05 alloc_unique_mv",GUN05);
731 
732  tagged_non_overlap_mv = Tpetra::createMultiVector<panzer::GlobalOrdinal>(non_overlap_map,(size_t)numFields_);
733  }
734 
735  /* 8. Create an export between the two maps.
736  */
737 
738  // LINE 6: In the GUN paper
739  RCP<Export> exp;
740  RCP<Import> imp;
741  RCP<MultiVector> non_overlap_mv;
742  {
743  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::DOFManager::buildGlobalUnknowns_GUN::line_06 export",GUN06);
744 
745  exp = rcp(new Export(overlap_map,non_overlap_map));
746 
747  /* 9. Export data using ABSMAX.
748  */
749  tagged_non_overlap_mv->doExport(tagged_overlap_mv,*exp,Tpetra::ABSMAX);
750 
751  // copy the tagged one, so as to preserve the tagged MV so we can overwrite
752  // the non_overlap_mv
753  non_overlap_mv = rcp(new MultiVector(*tagged_non_overlap_mv,Teuchos::Copy));
754  }
755 
756  /* 10. Compute the local sum using Kokkos.
757  */
758 
759  // LINES 7-9: In the GUN paper
760 
761  panzer::GlobalOrdinal localsum=0;
762  {
763  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::DOFManager::buildGlobalUnknowns_GUN::line_07-09 local_count",GUN07_09);
764  auto values = non_overlap_mv->getLocalViewDevice(Tpetra::Access::ReadOnly);
766  }
767 
768  /* 11. Create a map using local sums to generate final GIDs.
769  */
770 
771  // LINE 10: In the GUN paper
772 
773  panzer::GlobalOrdinal myOffset = -1;
774  {
775  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::DOFManager::buildGlobalUnknowns_GUN::line_10 prefix_sum",GUN_10);
776 
777  // do a prefix sum
778  panzer::GlobalOrdinal scanResult = 0;
779  Teuchos::scan<int, panzer::GlobalOrdinal> (*getComm(), Teuchos::REDUCE_SUM, static_cast<panzer::GlobalOrdinal> (localsum), Teuchos::outArg (scanResult));
780  myOffset = scanResult - localsum;
781  }
782 
783  // LINE 11 and 12: In the GUN paper, these steps are eliminated because
784  // the non_overlap_mv is reused
785 
786  /* 12. Iterate through the non-overlapping MV and assign GIDs to
787  * the necessary points. (Assign a -1 elsewhere.)
788  */
789 
790  // LINES 13-21: In the GUN paper
791 
792  {
793  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::DOFManager::buildGlobalUnknowns_GUN::line_13-21 gid_assignment",GUN13_21);
794  int which_id=0;
795  auto editnonoverlap = non_overlap_mv->getLocalViewHost(Tpetra::Access::ReadWrite);
796  for(size_t i=0; i<non_overlap_mv->getLocalLength(); ++i){
797  for(int j=0; j<numFields_; ++j){
798  if(editnonoverlap(i,j)!=0){
799  // editnonoverlap[j][i]=myOffset+which_id;
800  int ndof = Teuchos::as<int>(editnonoverlap(i,j));
801  editnonoverlap(i,j)=myOffset+which_id;
802  which_id+=ndof;
803  }
804  else{
805  editnonoverlap(i,j)=-1;
806  }
807 
808  }
809  }
810  }
811 
812  // LINE 22: In the GUN paper. Were performed above, and the overlaped_mv is
813  // abused to handle input tagging.
814 
815  /* 13. Import data back to the overlap MV using REPLACE.
816  */
817 
818  // LINE 23: In the GUN paper
819 
820  {
821  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::DOFManager::buildGlobalUnknowns_GUN::line_23 final_import",GUN23);
822 
823  // use exporter to save on communication setup costs
824  overlap_mv.doImport(*non_overlap_mv,*exp,Tpetra::REPLACE);
825  }
826 
827  //std::cout << Teuchos::describe(*non_overlap_mv,Teuchos::VERB_EXTREME) << std::endl;
828 
829  // return non_overlap_mv;
830  return std::make_pair(non_overlap_mv,tagged_non_overlap_mv);
831 }
832 
836 {
837  // some typedefs
838  typedef panzer::TpetraNodeType Node;
839  typedef Tpetra::Map<panzer::LocalOrdinal, panzer::GlobalOrdinal, Node> Map;
840  typedef Tpetra::MultiVector<panzer::GlobalOrdinal,panzer::LocalOrdinal,panzer::GlobalOrdinal,Node> MultiVector;
841 
842  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::DOFManager::buildTaggedMultiVector",BTMV);
843 
844  //We will iterate through all of the blocks, building a FieldAggPattern for
845  //each of them.
846 
847  for (size_t b = 0; b < blockOrder_.size(); ++b) {
848  std::vector<std::tuple< int, panzer::FieldType, RCP<const panzer::FieldPattern> > > faConstruct;
849  //The ID is going to be the AID, and then everything will work.
850  //The ID should not be the AID, it should be the ID it has in the ordering.
851 
852  for (size_t i = 0; i < fieldAIDOrder_.size(); ++i) {
853  int looking = fieldAIDOrder_[i];
854 
855  //Check if in b's fp list
856  std::vector<int>::const_iterator reu = std::find(blockToAssociatedFP_[b].begin(), blockToAssociatedFP_[b].end(), looking);
857  if(!(reu==blockToAssociatedFP_[b].end())){
858  faConstruct.push_back(std::make_tuple(i, fieldTypes_[fieldAIDOrder_[i]], fieldPatterns_[fieldAIDOrder_[i]]));
859  }
860 
861  }
862 
863  if(faConstruct.size()>0) {
864  fa_fps_.push_back(rcp(new FieldAggPattern(faConstruct, ga_fp_)));
865 
866  // how many global IDs are in this element block?
867  int gidsInBlock = fa_fps_[fa_fps_.size()-1]->numberIds();
868  elementBlockGIDCount_.push_back(gidsInBlock);
869  }
870  else {
871  fa_fps_.push_back(Teuchos::null);
872  elementBlockGIDCount_.push_back(0);
873  }
874  }
875 
876  RCP<const Map> overlapmap = this->buildOverlapMapFromElements(ownedAccess);
877 
878  // LINE 22: In the GUN paper...the overlap_mv is reused for the tagged multivector.
879  // This is a bit of a practical abuse of the algorithm presented in the paper.
880 
881  Teuchos::RCP<MultiVector> overlap_mv;
882  {
883  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::DOFManager::buildTaggedMultiVector::allocate_tagged_multivector",ATMV);
884 
885  overlap_mv = Tpetra::createMultiVector<panzer::GlobalOrdinal>(overlapmap,(size_t)numFields_);
886  overlap_mv->putScalar(0); // if tpetra is not initialized with zeros
887  }
888 
889  /* 5. Iterate through all local elements again, checking with the FP
890  * information. Mark up the overlap map accordingly.
891  */
892 
893  {
894  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::DOFManager::buildTaggedMultiVector::fill_tagged_multivector",FTMV);
895 
896  // temporary working vector to fill each row in tagged array
897  std::vector<int> working(overlap_mv->getNumVectors());
898  auto edittwoview_host = overlap_mv->getLocalViewHost(Tpetra::Access::ReadWrite);
899  for (size_t b = 0; b < blockOrder_.size(); ++b) {
900  // there has to be a field pattern associated with the block
901  if(fa_fps_[b]==Teuchos::null)
902  continue;
903 
904  const std::vector<panzer::LocalOrdinal> & numFields= fa_fps_[b]->numFieldsPerId();
905  const std::vector<panzer::LocalOrdinal> & fieldIds= fa_fps_[b]->fieldIds();
906  const std::vector<panzer::LocalOrdinal> & myElements = connMngr_->getElementBlock(blockOrder_[b]);
907  for (size_t l = 0; l < myElements.size(); ++l) {
908  auto connSize = connMngr_->getConnectivitySize(myElements[l]);
909  const auto * elmtConn = connMngr_->getConnectivity(myElements[l]);
910  int offset=0;
911  for (int c = 0; c < connSize; ++c) {
912  size_t lid = overlapmap->getLocalElement(elmtConn[c]);
913  for(std::size_t i=0;i<working.size();i++)
914  working[i] = 0;
915  for (int n = 0; n < numFields[c]; ++n) {
916  int whichField = fieldIds[offset];
917  //Row will be lid. column will be whichField.
918  //Shove onto local ordering
919  working[whichField]++;
920  offset++;
921  }
922  for(std::size_t i=0;i<working.size();i++) {
923  auto current = edittwoview_host(lid,i);
924  edittwoview_host(lid,i) = (current > working[i]) ? current : working[i];
925  }
926 
927  }
928  }
929  }
930  // // verbose output for inspecting overlap_mv
931  // for(int i=0;i<overlap_mv->getLocalLength(); i++) {
932  // for(int j=0;j<overlap_mv->getNumVectors() ; j++)
933  // std::cout << edittwoview[j][i] << " ";
934  // std::cout << std::endl;
935  // }
936  }
937 
938  return overlap_mv;
939 }
940 
942 int DOFManager::getFieldNum(const std::string & string) const
943 {
944  int ind=0;
945  bool found=false;
946  while(!found && (size_t)ind<fieldStringOrder_.size()){
947  if(fieldStringOrder_[ind]==string)
948  found=true;
949  else
950  ind++;
951  }
952 
953  if(found)
954  return ind;
955 
956  // didn't find anything...return -1
957  return -1;
958 }
959 
961 void DOFManager::getFieldOrder(std::vector<std::string> & fieldOrder) const
962 {
963  fieldOrder.resize(fieldStringOrder_.size());
964  for (size_t i = 0; i < fieldStringOrder_.size(); ++i)
965  fieldOrder[i]=fieldStringOrder_[i];
966 }
967 
969 bool DOFManager::fieldInBlock(const std::string & field, const std::string & block) const
970 {
971  std::map<std::string,int>::const_iterator fitr = fieldNameToAID_.find(field);
972  if(fitr==fieldNameToAID_.end()) {
973  std::stringstream ss;
975  TEUCHOS_TEST_FOR_EXCEPTION(true,std::logic_error,"DOFManager::fieldInBlock: invalid field name. DOF information is:\n"+ss.str());
976  }
977  std::map<std::string,int>::const_iterator bitr = blockNameToID_.find(block);
978  if(bitr==blockNameToID_.end()) {
979  std::stringstream ss;
981  TEUCHOS_TEST_FOR_EXCEPTION(true,std::logic_error,"DOFManager::fieldInBlock: invalid block name. DOF information is:\n"+ss.str());
982  }
983  int fid=fitr->second;
984  int bid=bitr->second;
985 
986  bool found=false;
987  for (size_t i = 0; i < blockToAssociatedFP_[bid].size(); ++i) {
988  if(blockToAssociatedFP_[bid][i]==fid){
989  found=true;
990  break;
991  }
992  }
993 
994  return found;
995 }
996 
998 const std::vector<int> & DOFManager::getBlockFieldNumbers(const std::string & blockId) const
999 {
1000  TEUCHOS_TEST_FOR_EXCEPTION(!buildConnectivityRun_,std::logic_error,"DOFManager::getBlockFieldNumbers: BuildConnectivity must be run first.");
1001 
1002  std::map<std::string,int>::const_iterator bitr = blockNameToID_.find(blockId);
1003  if(bitr==blockNameToID_.end())
1004  TEUCHOS_TEST_FOR_EXCEPTION(true,std::logic_error,"DOFManager::fieldInBlock: invalid block name");
1005  int bid=bitr->second;
1006 
1007  // there has to be a field pattern assocaited with the block
1008  if(fa_fps_[bid]!=Teuchos::null)
1009  return fa_fps_[bid]->fieldIds();
1010 
1011  // nothing to return
1012  static std::vector<int> empty;
1013  return empty;
1014 }
1015 
1017 const std::pair<std::vector<int>,std::vector<int> > &
1018 DOFManager::getGIDFieldOffsets_closure(const std::string & blockId, int fieldNum, int subcellDim,int subcellId) const
1019 {
1020  TEUCHOS_TEST_FOR_EXCEPTION(!buildConnectivityRun_,std::logic_error,"DOFManager::getGIDFieldOffsets_closure: BuildConnectivity must be run first.");
1021  std::map<std::string,int>::const_iterator bitr = blockNameToID_.find(blockId);
1022  if(bitr==blockNameToID_.end())
1023  TEUCHOS_TEST_FOR_EXCEPTION(true,std::logic_error, "DOFManager::getGIDFieldOffsets_closure: invalid block name.");
1024 
1025  // there has to be a field pattern assocaited with the block
1026  if(fa_fps_[bitr->second]!=Teuchos::null)
1027  return fa_fps_[bitr->second]->localOffsets_closure(fieldNum, subcellDim, subcellId);
1028 
1029  static std::pair<std::vector<int>,std::vector<int> > empty;
1030  return empty;
1031 }
1032 
1034 void DOFManager::ownedIndices(const std::vector<panzer::GlobalOrdinal> & indices,std::vector<bool> & isOwned) const
1035 {
1036  //Resizes the isOwned array.
1037  if(indices.size()!=isOwned.size())
1038  isOwned.resize(indices.size(),false);
1039  typename std::vector<panzer::GlobalOrdinal>::const_iterator endOf = owned_.end();
1040  for (std::size_t i = 0; i < indices.size(); ++i) {
1041  isOwned[i] = ( std::find(owned_.begin(), owned_.end(), indices[i])!=endOf );
1042  }
1043 }
1044 
1046 void DOFManager::setFieldOrder(const std::vector<std::string> & fieldOrder)
1047 {
1049  "DOFManager::setFieldOrder: setFieldOrder cannot be called after "
1050  "buildGlobalUnknowns has been called");
1051  if(validFieldOrder(fieldOrder)){
1052  fieldStringOrder_=fieldOrder;
1053  //We also need to reassign the fieldAIDOrder_.
1054  for (size_t i = 0; i < fieldStringOrder_.size(); ++i) {
1056  }
1057  }
1058  else {
1059  TEUCHOS_TEST_FOR_EXCEPTION(true,std::logic_error,"DOFManager::setFieldOrder: Invalid Field Ordering!");
1060  }
1061 }
1062 
1064 bool DOFManager::validFieldOrder(const std::vector<std::string> & proposed_fieldOrder)
1065 {
1066  if(fieldStringOrder_.size()!=proposed_fieldOrder.size())
1067  return false;
1068  //I'm using a not very efficient way of doing this, but there should never
1069  //be that many fields, so it isn't that big of a deal.
1070  for (size_t i = 0; i < fieldStringOrder_.size(); ++i) {
1071  bool found=false;
1072  for (size_t j = 0; j < proposed_fieldOrder.size(); ++j) {
1073  if(fieldStringOrder_[i]==proposed_fieldOrder[j])
1074  found=true;
1075  }
1076  if(!found)
1077  return false;
1078  }
1079  return true;
1080 }
1081 
1083 const std::string & DOFManager::getFieldString(int num) const
1084 {
1085  //A reverse lookup through fieldStringOrder_.
1086  if(num>=(int)fieldStringOrder_.size())
1087  TEUCHOS_TEST_FOR_EXCEPTION(true, std::logic_error, "DOFManager::getFieldString: invalid number");
1088  return fieldStringOrder_[num];
1089 }
1090 
1092 // Everything associated with orientation is not yet built, but this
1093 // is the method as found in Panzer_DOFManager_impl.hpp. There are
1094 // going to need to be some substantial changes to the code as it applies
1095 // to this DOFManager, but the basic ideas and format should be similar.
1096 //
1098 {
1099  orientation_.clear(); // clean up previous work
1100 
1101  std::vector<std::string> elementBlockIds;
1102  connMngr_->getElementBlockIds(elementBlockIds);
1103 
1104  // figure out how many total elements are owned by this processor (why is this so hard!)
1105  std::size_t myElementCount = 0;
1106  for(std::vector<std::string>::const_iterator blockItr=elementBlockIds.begin(); blockItr!=elementBlockIds.end();++blockItr)
1107  myElementCount += connMngr_->getElementBlock(*blockItr).size();
1108 
1109  // allocate for each block
1110  orientation_.resize(myElementCount);
1111 
1112  // loop over all element blocks
1113  for(std::vector<std::string>::const_iterator blockItr=elementBlockIds.begin();
1114  blockItr!=elementBlockIds.end();++blockItr) {
1115  const std::string & blockName = *blockItr;
1116 
1117  // this block has no unknowns (or elements)
1118  std::map<std::string,int>::const_iterator fap = blockNameToID_.find(blockName);
1119  if(fap==blockNameToID_.end()) {
1120  TEUCHOS_TEST_FOR_EXCEPTION(true,std::logic_error,"DOFManager::buildUnknownsOrientation: invalid block name");
1121  }
1122 
1123  int bid=fap->second;
1124 
1125  if(fa_fps_[bid]==Teuchos::null)
1126  continue;
1127 
1128  // grab field patterns, will be necessary to compute orientations
1129  const FieldPattern & fieldPattern = *fa_fps_[bid];
1130 
1131  //Should be ga_fp_ (geometric aggregate field pattern)
1132  std::vector<std::pair<int,int> > topEdgeIndices;
1134 
1135  // grab face orientations if 3D
1136  std::vector<std::vector<int> > topFaceIndices;
1137  if(ga_fp_->getDimension()==3)
1139 
1140  //How many GIDs are associated with a particular element bloc
1141  const std::vector<panzer::LocalOrdinal> & elmts = connMngr_->getElementBlock(blockName);
1142  for(std::size_t e=0;e<elmts.size();e++) {
1143  // this is the vector of orientations to fill: initialize it correctly
1144  std::vector<signed char> & eOrientation = orientation_[elmts[e]];
1145 
1146  // This resize seems to be the same as fieldPattern.numberIDs().
1147  // When computer edge orientations is called, that is the assert.
1148  // There should be no reason to make it anymore complicated.
1149  eOrientation.resize(fieldPattern.numberIds());
1150  for(std::size_t s=0;s<eOrientation.size();s++)
1151  eOrientation[s] = 1; // put in 1 by default
1152 
1153  // get geometry ids
1154  auto connSz = connMngr_->getConnectivitySize(elmts[e]);
1155  const panzer::GlobalOrdinal * connPtr = connMngr_->getConnectivity(elmts[e]);
1156  const std::vector<panzer::GlobalOrdinal> connectivity(connPtr,connPtr+connSz);
1157 
1158  orientation_helpers::computeCellEdgeOrientations(topEdgeIndices, connectivity, fieldPattern, eOrientation);
1159 
1160  // compute face orientations in 3D
1161  if(ga_fp_->getDimension()==3)
1162  orientation_helpers::computeCellFaceOrientations(topFaceIndices, connectivity, fieldPattern, eOrientation);
1163  }
1164  }
1165 }
1166 
1168 void DOFManager::getElementOrientation(panzer::LocalOrdinal localElmtId,std::vector<double> & gidsOrientation) const
1169 {
1170  TEUCHOS_TEST_FOR_EXCEPTION(orientation_.size()==0,std::logic_error,
1171  "DOFManager::getElementOrientations: Orientations were not constructed!");
1172 
1173  const std::vector<signed char> & local_o = orientation_[localElmtId];
1174  gidsOrientation.resize(local_o.size());
1175  for(std::size_t i=0;i<local_o.size();i++) {
1176  gidsOrientation[i] = double(local_o[i]);
1177  }
1178 }
1179 
1181 {
1183 
1184  connMngr_ = Teuchos::null;
1185 
1186  // wipe out FEI objects
1187  orientation_.clear(); // clean up previous work
1188  fa_fps_.clear();
1189  elementGIDs_.clear();
1190  owned_.clear();
1191  ghosted_.clear();
1192  elementBlockGIDCount_.clear();
1193 
1194  return connMngr;
1195 }
1196 
1198 std::size_t DOFManager::blockIdToIndex(const std::string & blockId) const
1199 {
1200  std::map<std::string,int>::const_iterator bitr = blockNameToID_.find(blockId);
1201  if(bitr==blockNameToID_.end())
1202  TEUCHOS_TEST_FOR_EXCEPTION(true,std::logic_error,"DOFManager::fieldInBlock: invalid block name");
1203  return bitr->second;
1204 }
1205 
1207 void DOFManager::printFieldInformation(std::ostream & os) const
1208 {
1209  os << "DOFManager Field Information: " << std::endl;
1210 
1211  // sanity check
1213 
1214  for(std::size_t i=0;i<blockOrder_.size();i++) {
1215  os << " Element Block = " << blockOrder_[i] << std::endl;
1216 
1217  // output field information
1218  const std::vector<int> & fieldIds = blockToAssociatedFP_[i];
1219  for(std::size_t f=0;f<fieldIds.size();f++)
1220  os << " \"" << getFieldString(fieldIds[f]) << "\" is field ID " << fieldIds[f] << std::endl;
1221  }
1222 }
1223 
1227 {
1228  PANZER_FUNC_TIME_MONITOR("panzer::DOFManager::builderOverlapMapFromElements");
1229 
1230  /*
1231  * 2. Iterate through all local elements and create the overlapVector
1232  * of concerned elements.
1233  */
1234 
1235  std::set<panzer::GlobalOrdinal> overlapset;
1236  for (size_t i = 0; i < blockOrder_.size(); ++i) {
1237  const std::vector<panzer::LocalOrdinal> & myElements = access.getElementBlock(blockOrder_[i]);
1238  for (size_t e = 0; e < myElements.size(); ++e) {
1239  auto connSize = connMngr_->getConnectivitySize(myElements[e]);
1240  const panzer::GlobalOrdinal * elmtConn = connMngr_->getConnectivity(myElements[e]);
1241  for (int k = 0; k < connSize; ++k) {
1242  overlapset.insert(elmtConn[k]);
1243  }
1244  }
1245  }
1246 
1247  Array<panzer::GlobalOrdinal> overlapVector;
1248  for (typename std::set<panzer::GlobalOrdinal>::const_iterator itr = overlapset.begin(); itr!=overlapset.end(); ++itr) {
1249  overlapVector.push_back(*itr);
1250  }
1251 
1252  /* 3. Construct an overlap map from this structure.
1253  */
1254  return Tpetra::createNonContigMapWithNode<panzer::LocalOrdinal,panzer::GlobalOrdinal,panzer::TpetraNodeType>(overlapVector,getComm());
1255 }
1256 
1258 void DOFManager::
1260  std::vector<std::vector< panzer::GlobalOrdinal > > & elementGIDs,
1261  const Tpetra::Map<panzer::LocalOrdinal,panzer::GlobalOrdinal,panzer::TpetraNodeType> & overlapmap,
1262  const Tpetra::MultiVector<panzer::GlobalOrdinal,panzer::LocalOrdinal,panzer::GlobalOrdinal,panzer::TpetraNodeType> & const_overlap_mv) const
1263 {
1264  using Teuchos::ArrayRCP;
1265 
1266  //To generate elementGIDs we need to go through all of the local elements.
1267  auto overlap_mv = const_cast<Tpetra::MultiVector<panzer::GlobalOrdinal,panzer::LocalOrdinal,panzer::GlobalOrdinal,panzer::TpetraNodeType>&>(const_overlap_mv);
1268  const auto twoview_host = overlap_mv.getLocalViewHost(Tpetra::Access::ReadOnly);
1269 
1270  //And for each of the things in fa_fp.fieldIds we go to that column. To the the row,
1271  //we move from globalID to localID in the map and use our local value for something.
1272  for (size_t b = 0; b < blockOrder_.size(); ++b) {
1273  const std::vector<panzer::LocalOrdinal> & myElements = access.getElementBlock(blockOrder_[b]);
1274 
1275  if(fa_fps_[b]==Teuchos::null) {
1276  // fill elements that are not used with empty vectors
1277  for (size_t l = 0; l < myElements.size(); ++l) {
1278  panzer::LocalOrdinal thisID=myElements[l];
1279  if(elementGIDs.size()<=(size_t)thisID)
1280  elementGIDs.resize(thisID+1);
1281  }
1282  continue;
1283  }
1284 
1285  const std::vector<int> & numFields= fa_fps_[b]->numFieldsPerId();
1286  const std::vector<int> & fieldIds= fa_fps_[b]->fieldIds();
1287  //
1288  //
1289  for (size_t l = 0; l < myElements.size(); ++l) {
1290  auto connSize = connMngr_->getConnectivitySize(myElements[l]);
1291  const panzer::GlobalOrdinal * elmtConn = connMngr_->getConnectivity(myElements[l]);
1292  std::vector<panzer::GlobalOrdinal> localOrdering;
1293  int offset=0;
1294  for (int c = 0; c < connSize; ++c) {
1295  size_t lid = overlapmap.getLocalElement(elmtConn[c]);
1296  std::vector<int> dofsPerField(numFields_,0);
1297  for (int n = 0; n < numFields[c]; ++n) {
1298  int whichField = fieldIds[offset];
1299  offset++;
1300  //Row will be lid. column will be whichField.
1301  //Shove onto local ordering
1302  localOrdering.push_back(twoview_host(lid,whichField)+dofsPerField[whichField]);
1303 
1304  dofsPerField[whichField]++;
1305  }
1306  }
1307  panzer::LocalOrdinal thisID=myElements[l];
1308  if(elementGIDs.size()<=(size_t)thisID){
1309  elementGIDs.resize(thisID+1);
1310  }
1311  elementGIDs[thisID]=localOrdering;
1312  }
1313  }
1314 }
1315 
1317 {
1318  std::vector<std::vector<panzer::LocalOrdinal> > elementLIDs(elementGIDs_.size());
1319 
1320  std::vector<panzer::GlobalOrdinal> ownedAndGhosted;
1321  this->getOwnedAndGhostedIndices(ownedAndGhosted);
1322 
1323  // build global to local hash map (temporary and used only once)
1324  std::unordered_map<panzer::GlobalOrdinal,panzer::LocalOrdinal> hashMap;
1325  for(std::size_t i = 0; i < ownedAndGhosted.size(); ++i)
1326  hashMap[ownedAndGhosted[i]] = i;
1327 
1328  for (std::size_t i = 0; i < elementGIDs_.size(); ++i) {
1329  const std::vector<panzer::GlobalOrdinal>& gids = elementGIDs_[i];
1330  std::vector<panzer::LocalOrdinal>& lids = elementLIDs[i];
1331  lids.resize(gids.size());
1332  for (std::size_t g = 0; g < gids.size(); ++g)
1333  lids[g] = hashMap[gids[g]];
1334  }
1335 
1336  this->setLocalIds(elementLIDs);
1337 }
1338 
1339 /*
1340 template <typename panzer::LocalOrdinal,typename panzer::GlobalOrdinal>
1341 Teuchos::RCP<const Tpetra::Map<panzer::LocalOrdinal,panzer::GlobalOrdinal,panzer::TpetraNodeType> >
1342 DOFManager<panzer::LocalOrdinal,panzer::GlobalOrdinal>::runLocalRCMReordering(const Teuchos::RCP<const Tpetra::Map<panzer::LocalOrdinal,panzer::GlobalOrdinal,panzer::TpetraNodeType> > & map)
1343 {
1344  typedef panzer::TpetraNodeType Node;
1345  typedef Tpetra::Map<panzer::LocalOrdinal, panzer::GlobalOrdinal, Node> Map;
1346  typedef Tpetra::CrsGraph<panzer::LocalOrdinal, panzer::GlobalOrdinal, Node> Graph;
1347 
1348  Teuchos::RCP<Graph> graph = Teuchos::rcp(new Graph(map,0));
1349 
1350  // build Crs Graph from the mesh
1351  for (size_t b = 0; b < blockOrder_.size(); ++b) {
1352  if(fa_fps_[b]==Teuchos::null)
1353  continue;
1354 
1355  const std::vector<panzer::LocalOrdinal> & myElements = connMngr_->getElementBlock(blockOrder_[b]);
1356  for (size_t l = 0; l < myElements.size(); ++l) {
1357  panzer::LocalOrdinal connSize = connMngr_->getConnectivitySize(myElements[l]);
1358  const panzer::GlobalOrdinal * elmtConn = connMngr_->getConnectivity(myElements[l]);
1359  for (int c = 0; c < connSize; ++c) {
1360  panzer::LocalOrdinal lid = map->getLocalElement(elmtConn[c]);
1361  if(Teuchos::OrdinalTraits<panzer::LocalOrdinal>::invalid()!=lid)
1362  continue;
1363 
1364  graph->insertGlobalIndices(elmtConn[c],Teuchos::arrayView(elmtConn,connSize));
1365  }
1366  }
1367  }
1368 
1369  graph->fillComplete();
1370 
1371  std::vector<panzer::GlobalOrdinal> newOrder(map->getLocalNumElements());
1372  {
1373  // graph is constructed, now run RCM using zoltan2
1374  typedef Zoltan2::XpetraCrsGraphInput<Graph> SparseGraphAdapter;
1375 
1376  Teuchos::ParameterList params;
1377  params.set("order_method", "rcm");
1378  SparseGraphAdapter adapter(graph);
1379 
1380  Zoltan2::OrderingProblem<SparseGraphAdapter> problem(&adapter, &params,MPI_COMM_SELF);
1381  problem.solve();
1382 
1383  // build a new global ording array using permutation
1384  Zoltan2::OrderingSolution<panzer::GlobalOrdinal,panzer::LocalOrdinal> * soln = problem.getSolution();
1385 
1386  size_t dummy;
1387  size_t checkLength = soln->getPermutationSize();
1388  panzer::LocalOrdinal * checkPerm = soln->getPermutation(&dummy);
1389 
1390  Teuchos::ArrayView<const panzer::GlobalOrdinal > oldOrder = map->getLocalElementList();
1391  TEUCHOS_ASSERT(checkLength==oldOrder.size());
1392  TEUCHOS_ASSERT(checkLength==newOrder.size());
1393 
1394  for(size_t i=0;i<checkLength;i++)
1395  newOrder[checkPerm[i]] = oldOrder[i];
1396  }
1397 
1398  return Tpetra::createNonContigMap<panzer::LocalOrdinal,panzer::GlobalOrdinal>(newOrder,communicator_);
1399 }
1400 */
1401 
1402 } /*panzer*/
1403 
1404 #endif
std::vector< std::vector< int > > blockToAssociatedFP_
bool fieldInBlock(const std::string &field, const std::string &block) const
int getNumFields() const
gets the number of fields
virtual int getDimension() const =0
bool getOrientationsRequired() const
int getNumOwnedAndGhosted() const
Get the number of owned and ghosted indices for this processor.
void setFieldOrder(const std::vector< std::string > &fieldOrder)
Sums all entries of a Rank 2 Kokkos View.
std::vector< Teuchos::RCP< panzer::FieldAggPattern > > fa_fps_
const std::vector< panzer::LocalOrdinal > & getElementBlock(const std::string &eBlock) const
virtual Teuchos::RCP< const Tpetra::Map< panzer::LocalOrdinal, panzer::GlobalOrdinal, panzer::TpetraNodeType > > buildOverlapMapFromElements(const ElementBlockAccess &access) const
const PHX::View< const int * > getGIDFieldOffsetsKokkos(const std::string &blockID, int fieldNum) const
basic_FancyOStream & setShowProcRank(const bool showProcRank)
std::vector< std::vector< panzer::GlobalOrdinal > > elementGIDs_
#define TEUCHOS_TEST_FOR_EXCEPTION(throw_exception_test, Exception, msg)
void getOwnedIndices(std::vector< panzer::GlobalOrdinal > &indices) const
Get the set of indices owned by this processor.
virtual Teuchos::RCP< Tpetra::MultiVector< panzer::GlobalOrdinal, panzer::LocalOrdinal, panzer::GlobalOrdinal, panzer::TpetraNodeType > > buildTaggedMultiVector(const ElementBlockAccess &access)
size_type size() const
FieldType
The type of discretization to use for a field pattern.
void disableSafetyCheck()
Teuchos::RCP< ConnManager > resetIndices()
Reset the indices for this DOF manager.
int numFields
virtual void buildGlobalUnknowns()
builds the global unknowns array
Teuchos::RCP< const panzer::FieldPattern > ga_fp_
virtual int numberIds() const
void getFieldOrder(std::vector< std::string > &fieldOrder) const
void getGhostedIndicesAsInt(std::vector< int > &indices) const
Get the set of indices ghosted for this processor.
Kokkos::View< const LO **, Kokkos::LayoutRight, PHX::Device > lids
const std::vector< int > & getGIDFieldOffsets(const std::string &blockID, int fieldNum) const
std::vector< int > fieldAIDOrder_
void setConnManager(const Teuchos::RCP< ConnManager > &connMngr, MPI_Comm mpiComm)
Adds a Connection Manager that will be associated with this DOFManager.
std::map< std::string, int > fieldNameToAID_
TEUCHOS_DEPRECATED RCP< T > rcp(T *p, Dealloc_T dealloc, bool owns_mem)
std::vector< int > elementBlockGIDCount_
Teuchos::RCP< Teuchos::Comm< int > > communicator_
void printFieldInformation(std::ostream &os) const
void computeCellEdgeOrientations(const std::vector< std::pair< int, int > > &topEdgeIndices, const std::vector< panzer::GlobalOrdinal > &topology, const FieldPattern &fieldPattern, std::vector< signed char > &orientation)
std::vector< Teuchos::RCP< const FieldPattern > > fieldPatterns_
virtual std::pair< Teuchos::RCP< Tpetra::MultiVector< panzer::GlobalOrdinal, panzer::LocalOrdinal, panzer::GlobalOrdinal, panzer::TpetraNodeType > >, Teuchos::RCP< Tpetra::MultiVector< panzer::GlobalOrdinal, panzer::LocalOrdinal, panzer::GlobalOrdinal, panzer::TpetraNodeType > > > buildGlobalUnknowns_GUN(const Tpetra::MultiVector< panzer::GlobalOrdinal, panzer::LocalOrdinal, panzer::GlobalOrdinal, panzer::TpetraNodeType > &tagged_overlap_mv, Tpetra::MultiVector< panzer::GlobalOrdinal, panzer::LocalOrdinal, panzer::GlobalOrdinal, panzer::TpetraNodeType > &overlap_mv) const
void getOwnedIndicesAsInt(std::vector< int > &indices) const
Get the set of indices owned by this processor.
basic_FancyOStream & setOutputToRootOnly(const int rootRank)
virtual void fillGIDsFromOverlappedMV(const ElementBlockAccess &access, std::vector< std::vector< panzer::GlobalOrdinal > > &elementGIDs, const Tpetra::Map< panzer::LocalOrdinal, panzer::GlobalOrdinal, panzer::TpetraNodeType > &overlapmap, const Tpetra::MultiVector< panzer::GlobalOrdinal, panzer::LocalOrdinal, panzer::GlobalOrdinal, panzer::TpetraNodeType > &overlap_mv) const
void setLocalIds(const std::vector< std::vector< panzer::LocalOrdinal > > &localIDs)
void computePatternFaceIndices(const FieldPattern &pattern, std::vector< std::vector< int > > &faceIndices)
int getNumGhosted() const
Get the number of indices ghosted for this processor.
std::vector< panzer::GlobalOrdinal > owned_
const unsigned int seed_
void ownedIndices(const std::vector< panzer::GlobalOrdinal > &indices, std::vector< bool > &isOwned) const
std::vector< std::string > blockOrder_
std::vector< panzer::GlobalOrdinal > ghosted_
void push_back(const value_type &x)
void computeCellFaceOrientations(const std::vector< std::vector< int > > &topFaceIndices, const std::vector< panzer::GlobalOrdinal > &topology, const FieldPattern &fieldPattern, std::vector< signed char > &orientation)
Teuchos::RCP< Teuchos::Comm< int > > getComm() const
std::size_t blockIdToIndex(const std::string &blockId) const
Teuchos::RCP< ConnManager > connMngr_
PHX::MDField< ScalarT, panzer::Cell, panzer::BASIS > field
A field to which we&#39;ll contribute, or in which we&#39;ll store, the result of computing this integral...
void buildLocalIdsFromOwnedAndGhostedElements()
void apply(ReductionDataType &sum) const
const std::pair< std::vector< int >, std::vector< int > > & getGIDFieldOffsets_closure(const std::string &blockId, int fieldNum, int subcellDim, int subcellId) const
Use the field pattern so that you can find a particular field in the GIDs array. This version lets yo...
int getFieldNum(const std::string &string) const
Get the number used for access to this field.
const std::string & getFieldString(int num) const
Reverse lookup of the field string from a field number.
void getGhostedIndices(std::vector< panzer::GlobalOrdinal > &indices) const
Get the set of indices ghosted for this processor.
std::vector< std::string > fieldStringOrder_
void getElementGIDsAsInt(panzer::LocalOrdinal localElementID, std::vector< int > &gids, const std::string &blockIdHint="") const
Get the global IDs for a particular element. This function overwrites the gids variable.
int getNumOwned() const
Get the number of indices owned by this processor.
void getOwnedAndGhostedIndicesAsInt(std::vector< int > &indices) const
Get the set of owned and ghosted indices for this processor.
void getElementOrientation(panzer::LocalOrdinal localElmtId, std::vector< double > &gidsOrientation) const
Get a vector containg the orientation of the GIDs relative to the neighbors.
const std::vector< int > & getBlockFieldNumbers(const std::string &blockId) const
void getOwnedAndGhostedIndices(std::vector< panzer::GlobalOrdinal > &indices) const
Get the set of owned and ghosted indices for this processor.
#define TEUCHOS_ASSERT(assertion_test)
bool validFieldOrder(const std::vector< std::string > &proposed_fieldOrder)
std::vector< std::vector< signed char > > orientation_
int addField(const std::string &str, const Teuchos::RCP< const FieldPattern > &pattern, const panzer::FieldType &type=panzer::FieldType::CG)
Add a field to the DOF manager.
TransListIter end
Tpetra::KokkosCompat::KokkosDeviceWrapperNode< PHX::Device > TpetraNodeType
Teuchos::RCP< const FieldPattern > getFieldPattern(const std::string &name) const
Find a field pattern stored for a particular block and field number. This will retrive the pattern ad...
void computePatternEdgeIndices(const FieldPattern &pattern, std::vector< std::pair< int, int > > &edgeIndices)
void getElementGIDs(panzer::LocalOrdinal localElementID, std::vector< panzer::GlobalOrdinal > &gids, const std::string &blockIdHint="") const
get associated GIDs for a given local element
std::vector< FieldType > fieldTypes_
std::map< std::string, int > blockNameToID_