Panzer  Version of the Day
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
Panzer_DOFManager.cpp
Go to the documentation of this file.
1 // @HEADER
2 // ***********************************************************************
3 //
4 // Panzer: A partial differential equation assembly
5 // engine for strongly coupled complex multiphysics systems
6 // Copyright (2011) Sandia Corporation
7 //
8 // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
9 // the U.S. Government retains certain rights in this software.
10 //
11 // Redistribution and use in source and binary forms, with or without
12 // modification, are permitted provided that the following conditions are
13 // met:
14 //
15 // 1. Redistributions of source code must retain the above copyright
16 // notice, this list of conditions and the following disclaimer.
17 //
18 // 2. Redistributions in binary form must reproduce the above copyright
19 // notice, this list of conditions and the following disclaimer in the
20 // documentation and/or other materials provided with the distribution.
21 //
22 // 3. Neither the name of the Corporation nor the names of the
23 // contributors may be used to endorse or promote products derived from
24 // this software without specific prior written permission.
25 //
26 // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
27 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
30 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 //
38 // Questions? Contact Roger P. Pawlowski (rppawlo@sandia.gov) and
39 // Eric C. Cyr (eccyr@sandia.gov)
40 // ***********************************************************************
41 // @HEADER
42 
43 #ifndef PANZER_DOF_MANAGER2_IMPL_HPP
44 #define PANZER_DOF_MANAGER2_IMPL_HPP
45 
46 #include <map>
47 #include <set>
48 
49 #include <mpi.h>
50 
51 #include "PanzerDofMgr_config.hpp"
52 #include "Panzer_FieldPattern.hpp"
55 #include "Panzer_ConnManager.hpp"
57 #include "Panzer_DOFManager.hpp"
61 
62 #include "Teuchos_RCP.hpp"
63 #include "Teuchos_Array.hpp"
64 #include "Teuchos_ArrayView.hpp"
65 
66 #include "Tpetra_Map.hpp"
67 #include "Tpetra_Export.hpp"
68 #include "Tpetra_Vector.hpp"
69 #include "Tpetra_MultiVector.hpp"
70 
71 #include <unordered_set> // a hash table
72 
73 /*
74 #define HAVE_ZOLTAN2
75 #ifdef HAVE_ZOLTAN2
76 #include "Zoltan2_XpetraCrsGraphInput.hpp"
77 #include "Zoltan2_OrderingProblem.hpp"
78 #endif
79 */
80 
81 namespace panzer {
82 
84 namespace {
85 template <typename LocalOrdinal,typename GlobalOrdinal>
86 class HashTieBreak : public Tpetra::Details::TieBreak<LocalOrdinal,GlobalOrdinal> {
87  const unsigned int seed_;
88 
89 public:
90  HashTieBreak(const unsigned int seed = (2654435761U))
91  : seed_(seed) { }
92 
93  virtual std::size_t selectedIndex(GlobalOrdinal GID,
94  const std::vector<std::pair<int,LocalOrdinal> > & pid_and_lid) const
95  {
96  // this is Epetra's hash/Tpetra's default hash: See Tpetra HashTable
97  int intkey = (int) ((GID & 0x000000007fffffffLL) +
98  ((GID & 0x7fffffff80000000LL) >> 31));
99  return std::size_t((seed_ ^ intkey) % pid_and_lid.size());
100  }
101 };
102 
103 }
104 
106 namespace {
107 template <typename LocalOrdinal,typename GlobalOrdinal>
108 class GreedyTieBreak : public Tpetra::Details::TieBreak<LocalOrdinal,GlobalOrdinal> {
109 
110 public:
111  GreedyTieBreak() { }
112 
113  virtual bool mayHaveSideEffects() const {
114  return true;
115  }
116 
117  virtual std::size_t selectedIndex(GlobalOrdinal /* GID */,
118  const std::vector<std::pair<int,LocalOrdinal> > & pid_and_lid) const
119  {
120  // always choose index of pair with smallest pid
121  const std::size_t numLids = pid_and_lid.size();
122  std::size_t idx = 0;
123  int minpid = pid_and_lid[0].first;
124  std::size_t minidx = 0;
125  for (idx = 0; idx < numLids; ++idx) {
126  if (pid_and_lid[idx].first < minpid) {
127  minpid = pid_and_lid[idx].first;
128  minidx = idx;
129  }
130  }
131  return minidx;
132  }
133 };
134 
135 }
136 
140 
141 using Teuchos::RCP;
142 using Teuchos::rcp;
143 using Teuchos::ArrayRCP;
144 using Teuchos::Array;
145 using Teuchos::ArrayView;
146 
149  : numFields_(0),buildConnectivityRun_(false),requireOrientations_(false), useTieBreak_(false), useNeighbors_(false)
150 { }
151 
153 DOFManager::DOFManager(const Teuchos::RCP<ConnManager> & connMngr,MPI_Comm mpiComm)
154  : numFields_(0),buildConnectivityRun_(false),requireOrientations_(false), useTieBreak_(false), useNeighbors_(false)
155 {
156  setConnManager(connMngr,mpiComm);
157 }
158 
160 void DOFManager::setConnManager(const Teuchos::RCP<ConnManager> & connMngr, MPI_Comm mpiComm)
161 {
163  "DOFManager::setConnManager: setConnManager cannot be called after "
164  "buildGlobalUnknowns has been called");
165  connMngr_ = connMngr;
166  //We acquire the block ordering from the connmanager.
167  connMngr_->getElementBlockIds(blockOrder_);
168  for (size_t i = 0; i < blockOrder_.size(); ++i) {
169  blockNameToID_.insert(std::map<std::string,int>::value_type(blockOrder_[i],i));
170  //We must also initialize vectors for FP associations.
171  }
172  blockToAssociatedFP_.resize(blockOrder_.size());
173  communicator_ = Teuchos::rcp(new Teuchos::MpiComm<int>(Teuchos::opaqueWrapper(mpiComm)));
174 }
175 
177 //Adds a field to be used in creating the Global Numbering
178 //Returns the index for the field pattern
179 int DOFManager::addField(const std::string & str, const Teuchos::RCP<const FieldPattern> & pattern,
180  const panzer::FieldType& type)
181 {
183  "DOFManager::addField: addField cannot be called after "
184  "buildGlobalUnknowns has been called");
185 
186  fieldPatterns_.push_back(pattern);
187  fieldTypes_.push_back(type);
188  fieldNameToAID_.insert(std::map<std::string,int>::value_type(str, numFields_));
189 
190  //The default values for IDs are the sequential order they are added in.
191  fieldStringOrder_.push_back(str);
192  fieldAIDOrder_.push_back(numFields_);
193 
194  for(std::size_t i=0;i<blockOrder_.size();i++) {
195  blockToAssociatedFP_[i].push_back(numFields_);
196  }
197 
198  ++numFields_;
199  return numFields_-1;
200 }
201 
203 int DOFManager::addField(const std::string & blockID, const std::string & str, const Teuchos::RCP<const FieldPattern> & pattern,
204  const panzer::FieldType& type)
205 {
207  "DOFManager::addField: addField cannot be called after "
208  "buildGlobalUnknowns has been called");
209  TEUCHOS_TEST_FOR_EXCEPTION((connMngr_==Teuchos::null),std::logic_error,
210  "DOFManager::addField: you must add a ConnManager before"
211  "you can associate a FP with a given block.")
212  bool found=false;
213  size_t blocknum=0;
214  while(!found && blocknum<blockOrder_.size()){
215  if(blockOrder_[blocknum]==blockID){
216  found=true;
217  break;
218  }
219  blocknum++;
220  }
221  TEUCHOS_TEST_FOR_EXCEPTION(!found,std::logic_error, "DOFManager::addField: Invalid block name.");
222 
223  //This will be different if the FieldPattern is already present.
224  //We need to check for that.
225  found=false;
226  std::map<std::string,int>::const_iterator fpIter = fieldNameToAID_.find(str);
227  if(fpIter!=fieldNameToAID_.end())
228  found=true;
229 
230  if(!found){
231  fieldPatterns_.push_back(pattern);
232  fieldTypes_.push_back(type);
233  fieldNameToAID_.insert(std::map<std::string,int>::value_type(str, numFields_));
234  //The default values for IDs are the sequential order they are added in.
235  fieldStringOrder_.push_back(str);
236  fieldAIDOrder_.push_back(numFields_);
237 
238  //This is going to be associated with blocknum.
239  blockToAssociatedFP_[blocknum].push_back(numFields_);
240  ++numFields_;
241  return numFields_-1;
242  }
243  else{
244  blockToAssociatedFP_[blocknum].push_back(fpIter->second);
245  return numFields_;
246  }
247 }
248 
251 {
252  std::map<std::string,int>::const_iterator fitr = fieldNameToAID_.find(name);
253  if(fitr==fieldNameToAID_.end())
254  return Teuchos::null;
255 
256  if(fitr->second<int(fieldPatterns_.size()))
257  return fieldPatterns_[fitr->second];
258 
259  return Teuchos::null;
260 }
261 
263 Teuchos::RCP<const FieldPattern> DOFManager::getFieldPattern(const std::string & blockId, const std::string & fieldName) const
264 {
265  std::map<std::string,int>::const_iterator fitr = fieldNameToAID_.find(fieldName);
266  if(fitr==fieldNameToAID_.end())
267  return Teuchos::null;
268 
269  bool found=false;
270  size_t blocknum=0;
271  while(!found && blocknum<blockOrder_.size()){
272  if(blockOrder_[blocknum]==blockId){
273  found=true;
274  break;
275  }
276  blocknum++;
277  }
278 
279  if(!found)
280  return Teuchos::null;
281 
282  std::vector<int>::const_iterator itr = std::find(blockToAssociatedFP_[blocknum].begin(),
283  blockToAssociatedFP_[blocknum].end(),fitr->second);
284  if(itr!=blockToAssociatedFP_[blocknum].end()) {
285  if(fitr->second<int(fieldPatterns_.size()))
286  return fieldPatterns_[fitr->second];
287  }
288 
289  return Teuchos::null;
290 }
291 
293 void DOFManager::getOwnedIndices(std::vector<panzer::GlobalOrdinal>& indices) const
294 {
295  indices = owned_;
296 }
297 
299 void DOFManager::getGhostedIndices(std::vector<panzer::GlobalOrdinal>& indices) const
300 {
301  indices = ghosted_;
302 }
303 
305 void DOFManager::getOwnedAndGhostedIndices(std::vector<panzer::GlobalOrdinal>& indices) const
306 {
307  using std::size_t;
308  indices.resize(owned_.size() + ghosted_.size());
309  for (size_t i(0); i < owned_.size(); ++i)
310  indices[i] = owned_[i];
311  for (size_t i(0); i < ghosted_.size(); ++i)
312  indices[owned_.size() + i] = ghosted_[i];
313 }
314 
316 void DOFManager::getElementGIDsAsInt(panzer::LocalOrdinal localElementID, std::vector<int>& gids, const std::string& /* blockIdHint */) const
317 {
318  const auto & element_ids = elementGIDs_[localElementID];
319  gids.resize(element_ids.size());
320  for (std::size_t i=0; i < gids.size(); ++i)
321  gids[i] = element_ids[i];
322 }
323 
325 void DOFManager::getOwnedIndicesAsInt(std::vector<int>& indices) const
326 {
327  indices.resize(owned_.size());
328  for (std::size_t i=0; i < owned_.size(); ++i)
329  indices[i] = owned_[i];
330 }
331 
333 void DOFManager::getGhostedIndicesAsInt(std::vector<int>& indices) const
334 {
335  indices.resize(ghosted_.size());
336  for (std::size_t i=0; i < ghosted_.size(); ++i)
337  indices[i] = ghosted_[i];
338 }
339 
341 void DOFManager::getOwnedAndGhostedIndicesAsInt(std::vector<int>& indices) const
342 {
343  indices.resize(owned_.size() + ghosted_.size());
344  for (std::size_t i=0; i < owned_.size(); ++i)
345  indices[i] = owned_[i];
346  for (std::size_t i=0; i < ghosted_.size(); ++i)
347  indices[owned_.size() + i] = ghosted_[i];
348 }
349 
352 {
353  return owned_.size();
354 }
355 
358 {
359  return ghosted_.size();
360 }
361 
364 {
365  return owned_.size() + ghosted_.size();
366 }
367 
370 {
371  return numFields_;
372 }
373 
375 const std::vector<int> &
376 DOFManager::getGIDFieldOffsets(const std::string & blockID, int fieldNum) const
377 {
378  TEUCHOS_TEST_FOR_EXCEPTION(!buildConnectivityRun_,std::logic_error, "DOFManager::getGIDFieldOffsets: cannot be called before "
379  "buildGlobalUnknowns has been called");
380  std::map<std::string,int>::const_iterator bitr = blockNameToID_.find(blockID);
381  if(bitr==blockNameToID_.end())
382  TEUCHOS_TEST_FOR_EXCEPTION(true,std::logic_error,"DOFManager::fieldInBlock: invalid block name");
383  int bid=bitr->second;
384  if(fa_fps_[bid]!=Teuchos::null)
385  return fa_fps_[bid]->localOffsets(fieldNum);
386 
387  static const std::vector<int> empty;
388  return empty;
389 }
390 
392 const Kokkos::View<const int*,PHX::Device>
393 DOFManager::getGIDFieldOffsetsKokkos(const std::string & blockID, int fieldNum) const
394 {
395  TEUCHOS_TEST_FOR_EXCEPTION(!buildConnectivityRun_,std::logic_error, "DOFManager::getGIDFieldOffsets: cannot be called before "
396  "buildGlobalUnknowns has been called");
397  std::map<std::string,int>::const_iterator bitr = blockNameToID_.find(blockID);
398  if(bitr==blockNameToID_.end())
399  TEUCHOS_TEST_FOR_EXCEPTION(true,std::logic_error,"DOFManager::fieldInBlock: invalid block name");
400  int bid=bitr->second;
401  if(fa_fps_[bid]!=Teuchos::null)
402  return fa_fps_[bid]->localOffsetsKokkos(fieldNum);
403 
404  static const Kokkos::View<int*,PHX::Device> empty("panzer::DOFManager::getGIDFieldOffsetsKokkos() empty",0);
405  return empty;
406 }
407 
409 void DOFManager::getElementGIDs(panzer::LocalOrdinal localElementID, std::vector<panzer::GlobalOrdinal>& gids, const std::string& /* blockIdHint */) const
410 {
411  gids = elementGIDs_[localElementID];
412 }
413 
416 {
417  /* STEPS.
418  * 1. Build GA_FP and all block's FA_FP's and place into respective data structures.
419  */
421  fieldPatterns_.push_back(Teuchos::rcp(new NodalFieldPattern(fieldPatterns_[0]->getCellTopology())));
422  fieldTypes_.push_back(FieldType::CG);
423  }
424 
425  TEUCHOS_ASSERT(fieldPatterns_.size() == fieldTypes_.size());
426  std::vector<std::pair<FieldType,Teuchos::RCP<const FieldPattern>>> tmp;
427  for (std::size_t i=0; i < fieldPatterns_.size(); ++i)
428  tmp.push_back(std::make_pair(fieldTypes_[i],fieldPatterns_[i]));
429 
431 
432  connMngr_->buildConnectivity(*aggFieldPattern);
433 
434  // using new geometric pattern, build global unknowns
435  buildGlobalUnknowns(aggFieldPattern);
436 }
437 
440 {
441  // some typedefs
442  typedef panzer::TpetraNodeType Node;
443  typedef Tpetra::Map<panzer::LocalOrdinal, panzer::GlobalOrdinal, Node> Map;
444 
445  typedef Tpetra::Import<panzer::LocalOrdinal,panzer::GlobalOrdinal,Node> Import;
446 
447  //the GIDs are of type panzer::GlobalOrdinal.
448  typedef Tpetra::MultiVector<panzer::GlobalOrdinal,panzer::LocalOrdinal,panzer::GlobalOrdinal,Node> MultiVector;
449 
450  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::DOFManager::buildGlobalUnknowns",buildGlobalUnknowns);
451 
452  Teuchos::FancyOStream out(Teuchos::rcpFromRef(std::cout));
453  out.setOutputToRootOnly(-1);
454  out.setShowProcRank(true);
455 
457  "DOFManager::buildGlobalUnknowns: buildGlobalUnknowns cannot be called again "
458  "after buildGlobalUnknowns has been called");
459 
460  // this is a safety check to make sure that nodes are included
461  // in the geometric field pattern when orientations are required
463  std::size_t sz = geomPattern->getSubcellIndices(0,0).size();
464 
465  TEUCHOS_TEST_FOR_EXCEPTION(sz==0,std::logic_error,
466  "DOFManager::buildGlobalUnknowns requires a geometric pattern including "
467  "the nodes when orientations are needed!");
468  }
469 
470  /* STEPS.
471  * 1. Build all block's FA_FP's and place into respective data structures.
472  */
473  ga_fp_ = geomPattern;
474 
475  // given a set of elements over each element block build an overlap
476  // map that will provide the required element entities for the
477  // set of elements requested.
478  ElementBlockAccess ownedAccess(true,connMngr_);
479 
480  // INPUT: To the algorithm in the GUN paper
481  RCP<MultiVector> tagged_overlap_mv = buildTaggedMultiVector(ownedAccess);
482  RCP<const Map> overlap_map = tagged_overlap_mv->getMap();
483 
484  RCP<MultiVector> overlap_mv = Tpetra::createMultiVector<panzer::GlobalOrdinal>(overlap_map,(size_t)numFields_);
485 
486  // call the GUN paper algorithm
487  auto non_overlap_pair = buildGlobalUnknowns_GUN(*tagged_overlap_mv,*overlap_mv);
488  RCP<MultiVector> non_overlap_mv = non_overlap_pair.first;
489  RCP<MultiVector> tagged_non_overlap_mv = non_overlap_pair.second;
490  RCP<const Map> non_overlap_map = non_overlap_mv->getMap();
491 
492  /* 14. Cross reference local element connectivity and overlap map to
493  * create final GID vectors.
494  */
495 
496  // this bit of code takes the uniquely assigned GIDs and spreads them
497  // out for processing by local element ID
498  fillGIDsFromOverlappedMV(ownedAccess,elementGIDs_,*overlap_map,*overlap_mv);
499 
500  // if neighbor unknowns are required, then make sure they are included
501  // in the elementGIDs_
502  if (useNeighbors_) { // enabling this turns on GID construction for
503  // neighbor processors
504  ElementBlockAccess neighborAccess(false,connMngr_);
505  RCP<const Map> overlap_map_neighbor =
506  buildOverlapMapFromElements(neighborAccess);
507 
508  // Export e(overlap_map_neighbor,non_overlap_map);
509  Import imp_neighbor(non_overlap_map,overlap_map_neighbor);
510 
511  Teuchos::RCP<MultiVector> overlap_mv_neighbor =
512  Tpetra::createMultiVector<panzer::GlobalOrdinal>(overlap_map_neighbor, (size_t)numFields_);
513 
514  // get all neighbor information
515  overlap_mv_neighbor->doImport(*non_overlap_mv, imp_neighbor,
516  Tpetra::REPLACE);
517 
518  fillGIDsFromOverlappedMV(neighborAccess, elementGIDs_,
519  *overlap_map_neighbor, *overlap_mv_neighbor);
520  }
521 
523  // this is where the code is modified to artificially induce GIDs
524  // over 2 Billion unknowns
526  #if 0
527  {
528  panzer::GlobalOrdinal offset = 0xFFFFFFFFLL;
529 
530  for (size_t b = 0; b < blockOrder_.size(); ++b) {
531  const std::vector<panzer::LocalOrdinal> & myElements = connMngr_->getElementBlock(blockOrder_[b]);
532  for (std::size_t l = 0; l < myElements.size(); ++l) {
533  std::vector<panzer::GlobalOrdinal> & localGIDs = elementGIDs_[myElements[l]];
534  for(std::size_t c=0;c<localGIDs.size();c++)
535  localGIDs[c] += offset;
536  }
537  }
538 
539  Teuchos::ArrayRCP<panzer::GlobalOrdinal> nvals = non_overlap_mv->get1dViewNonConst();
540  for (int j = 0; j < nvals.size(); ++j)
541  nvals[j] += offset;
542  }
543  #endif
544 
545  // build owned vector
546  {
547  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::DOFManager::buildGlobalUnknowns::build_owned_vector",build_owned_vector);
548 
549  typedef std::unordered_set<panzer::GlobalOrdinal> HashTable;
550  HashTable isOwned, remainingOwned;
551 
552  // owned_ is made up of owned_ids.: This doesn't work for high order
553  non_overlap_mv->sync_host();
554  tagged_non_overlap_mv->sync_host();
555  PHX::Device().fence();
556  auto nvals = non_overlap_mv->template getLocalView<PHX::Device>();
557  auto tagged_vals = tagged_non_overlap_mv->template getLocalView<PHX::Device>();
558  TEUCHOS_ASSERT(nvals.size()==tagged_vals.size());
559  for (size_t i = 0; i < nvals.extent(1); ++i) {
560  for (size_t j = 0; j < nvals.extent(0); ++j) {
561  if (nvals(j,i) != -1) {
562  for(panzer::GlobalOrdinal offset=0;offset<tagged_vals(j,i);++offset)
563  isOwned.insert(nvals(j,i)+offset);
564  }
565  else {
566  // sanity check
567  TEUCHOS_ASSERT(tagged_vals(j,i)==0);
568  }
569  }
570  }
571  remainingOwned = isOwned;
572 
573  HashTable hashTable; // use to detect if global ID has been added to owned_
574  for (size_t b = 0; b < blockOrder_.size(); ++b) {
575 
576  if(fa_fps_[b]==Teuchos::null)
577  continue;
578 
579  const std::vector<panzer::LocalOrdinal> & myElements = connMngr_->getElementBlock(blockOrder_[b]);
580 
581  for (size_t l = 0; l < myElements.size(); ++l) {
582  const std::vector<panzer::GlobalOrdinal> & localOrdering = elementGIDs_[myElements[l]];
583 
584  // add "novel" global ids that are also owned to owned array
585  for(std::size_t i=0;i<localOrdering.size();i++) {
586  // don't try to add if ID is not owned
587  if(isOwned.find(localOrdering[i])==isOwned.end())
588  continue;
589 
590  // only add a global ID if it has not been added
591  std::pair<typename HashTable::iterator,bool> insertResult = hashTable.insert(localOrdering[i]);
592  if(insertResult.second) {
593  owned_.push_back(localOrdering[i]);
594  remainingOwned.erase(localOrdering[i]);
595  }
596  }
597  }
598  }
599 
600  // add any other owned GIDs that were not already included.
601  // these are ones that are owned locally but not required by any
602  // element owned by this processor (uggh!)
603  for(typename HashTable::const_iterator itr=remainingOwned.begin();itr!=remainingOwned.end();itr++)
604  owned_.push_back(*itr);
605 
606  if(owned_.size()!=isOwned.size()) {
607  out << "I'm about to hang because of unknown numbering failure ... sorry! (line = " << __LINE__ << ")" << std::endl;
608  TEUCHOS_TEST_FOR_EXCEPTION(owned_.size()!=isOwned.size(),std::logic_error,
609  "DOFManager::buildGlobalUnkonwns: Failure because not all owned unknowns have been accounted for.");
610  }
611 
612  }
613 
614  // Build the ghosted_ array. The old simple way led to slow Jacobian
615  // assembly; the new way speeds up Jacobian assembly.
616  {
617  // Loop over all the elements and do a greedy ordering of local values over
618  // the elements for building ghosted_. Hopefully this gives a better
619  // layout for an element-ordered assembly.
620  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::DOFManager::buildGlobalUnknowns::build_ghosted_array",BGA);
621 
622  // Use a hash table to detect if global IDs have been added to owned_.
623  typedef std::unordered_set<panzer::GlobalOrdinal> HashTable;
624  HashTable hashTable;
625  for (std::size_t i = 0; i < owned_.size(); i++)
626  hashTable.insert(owned_[i]);
627 
628  // Here we construct an accessor vector, such that we first process
629  // everything in the current element block, optionally followed by
630  // everything in the neighbor element block.
631  std::vector<ElementBlockAccess> blockAccessVec;
632  blockAccessVec.push_back(ElementBlockAccess(true,connMngr_));
633  if(useNeighbors_)
634  blockAccessVec.push_back(ElementBlockAccess(false,connMngr_));
635  for (std::size_t a = 0; a < blockAccessVec.size(); ++a)
636  {
637  // Get the access type (owned or neighbor).
638  const ElementBlockAccess& access = blockAccessVec[a];
639  for (size_t b = 0; b < blockOrder_.size(); ++b)
640  {
641  if (fa_fps_[b] == Teuchos::null)
642  continue;
643  const std::vector<panzer::LocalOrdinal>& myElements =
644  access.getElementBlock(blockOrder_[b]);
645  for (size_t l = 0; l < myElements.size(); ++l)
646  {
647  const std::vector<panzer::GlobalOrdinal>& localOrdering = elementGIDs_[myElements[l]];
648 
649  // Add "novel" global IDs into the ghosted_ vector.
650  for (std::size_t i = 0; i < localOrdering.size(); ++i)
651  {
652  std::pair<typename HashTable::iterator, bool> insertResult =
653  hashTable.insert(localOrdering[i]);
654 
655  // If the insertion succeeds, then this is "novel" to the owned_
656  // and ghosted_ vectors, so include it in ghosted_.
657  if(insertResult.second)
658  ghosted_.push_back(localOrdering[i]);
659  }
660  }
661  }
662  }
663  }
664 
665  buildConnectivityRun_ = true;
666 
667  // build orientations if required
669  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::DOFManager::buildGlobalUnknowns::build_orientation",BO);
671  }
672 
673  // allocate the local IDs
674  if (useNeighbors_) {
675  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::DOFManager::buildGlobalUnknowns::build_local_ids_from_owned_and_ghosted",BLOFOG);
677  }
678  else {
679  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::DOFManager::buildGlobalUnknowns::build_local_ids",BLI);
680  this->buildLocalIds();
681  }
682 }
683 
685 std::pair<Teuchos::RCP<Tpetra::MultiVector<panzer::GlobalOrdinal,panzer::LocalOrdinal,panzer::GlobalOrdinal,panzer::TpetraNodeType> >,
687 DOFManager::buildGlobalUnknowns_GUN(const Tpetra::MultiVector<panzer::GlobalOrdinal,panzer::LocalOrdinal,panzer::GlobalOrdinal,panzer::TpetraNodeType> & tagged_overlap_mv,
688  Tpetra::MultiVector<panzer::GlobalOrdinal,panzer::LocalOrdinal,panzer::GlobalOrdinal,panzer::TpetraNodeType> & overlap_mv) const
689 {
690  // some typedefs
691  typedef panzer::TpetraNodeType Node;
692  typedef Tpetra::Map<panzer::LocalOrdinal, panzer::GlobalOrdinal, Node> Map;
693 
694  typedef Tpetra::Export<panzer::LocalOrdinal,panzer::GlobalOrdinal,Node> Export;
695  typedef Tpetra::Import<panzer::LocalOrdinal,panzer::GlobalOrdinal,Node> Import;
696 
697  //the GIDs are of type panzer::GlobalOrdinal.
698  typedef Tpetra::MultiVector<panzer::GlobalOrdinal,panzer::LocalOrdinal,panzer::GlobalOrdinal,Node> MultiVector;
699 
700  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::DOFManager::buildGlobalUnknowns_GUN",BGU_GUN);
701 
702  // LINE 2: In the GUN paper
703  RCP<const Map> overlap_map = tagged_overlap_mv.getMap();
704 
705  /* 6. Create a OneToOne map from the overlap map.
706  */
707 
708  // LINE 4: In the GUN paper
709 
710  RCP<const Map> non_overlap_map;
711  if(!useTieBreak_) {
712  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::DOFManager::buildGlobalUnknowns_GUN::line_04 createOneToOne",GUN04);
713 
714  GreedyTieBreak<panzer::LocalOrdinal,panzer::GlobalOrdinal> greedy_tie_break;
715  non_overlap_map = Tpetra::createOneToOne<panzer::LocalOrdinal,panzer::GlobalOrdinal,Node>(overlap_map, greedy_tie_break);
716  }
717  else {
718  // use a hash tie break to get better load balancing from create one to one
719  // Aug. 4, 2016...this is a bad idea and doesn't work
720  HashTieBreak<panzer::LocalOrdinal,panzer::GlobalOrdinal> tie_break;
721  non_overlap_map = Tpetra::createOneToOne<panzer::LocalOrdinal,panzer::GlobalOrdinal,Node>(overlap_map,tie_break);
722  }
723 
724  /* 7. Create a non-overlapped multivector from OneToOne map.
725  */
726 
727  // LINE 5: In the GUN paper
728 
729  Teuchos::RCP<MultiVector> tagged_non_overlap_mv;
730  {
731  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::DOFManager::buildGlobalUnknowns_GUN::line_05 alloc_unique_mv",GUN05);
732 
733  tagged_non_overlap_mv = Tpetra::createMultiVector<panzer::GlobalOrdinal>(non_overlap_map,(size_t)numFields_);
734  }
735 
736  /* 8. Create an export between the two maps.
737  */
738 
739  // LINE 6: In the GUN paper
740  RCP<Export> exp;
741  RCP<Import> imp;
742  RCP<MultiVector> non_overlap_mv;
743  {
744  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::DOFManager::buildGlobalUnknowns_GUN::line_06 export",GUN06);
745 
746  exp = rcp(new Export(overlap_map,non_overlap_map));
747 
748  /* 9. Export data using ABSMAX.
749  */
750  tagged_non_overlap_mv->doExport(tagged_overlap_mv,*exp,Tpetra::ABSMAX);
751 
752  // copy the tagged one, so as to preserve the tagged MV so we can overwrite
753  // the non_overlap_mv
754  non_overlap_mv = rcp(new MultiVector(*tagged_non_overlap_mv,Teuchos::Copy));
755  }
756 
757  /* 10. Compute the local sum using Kokkos.
758  */
759 
760  // LINES 7-9: In the GUN paper
761 
762  panzer::GlobalOrdinal localsum=0;
763  {
764  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::DOFManager::buildGlobalUnknowns_GUN::line_07-09 local_count",GUN07_09);
765  auto values = non_overlap_mv->getLocalViewDevice();
766  auto mv_size = values.extent(0);
767  Kokkos::parallel_reduce(mv_size,panzer::dof_functors::SumRank2<panzer::GlobalOrdinal,decltype(values)>(values),localsum);
768  PHX::Device().fence();
769  }
770 
771  /* 11. Create a map using local sums to generate final GIDs.
772  */
773 
774  // LINE 10: In the GUN paper
775 
776  panzer::GlobalOrdinal myOffset = -1;
777  {
778  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::DOFManager::buildGlobalUnknowns_GUN::line_10 prefix_sum",GUN_10);
779 
780  // do a prefix sum
781  panzer::GlobalOrdinal scanResult = 0;
782  Teuchos::scan<int, panzer::GlobalOrdinal> (*getComm(), Teuchos::REDUCE_SUM, static_cast<panzer::GlobalOrdinal> (localsum), Teuchos::outArg (scanResult));
783  myOffset = scanResult - localsum;
784  }
785 
786  // LINE 11 and 12: In the GUN paper, these steps are eliminated because
787  // the non_overlap_mv is reused
788 
789  /* 12. Iterate through the non-overlapping MV and assign GIDs to
790  * the necessary points. (Assign a -1 elsewhere.)
791  */
792 
793  // LINES 13-21: In the GUN paper
794 
795  {
796  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::DOFManager::buildGlobalUnknowns_GUN::line_13-21 gid_assignment",GUN13_21);
797  int which_id=0;
798  if (non_overlap_mv->need_sync_host())
799  non_overlap_mv->sync_host();
800  auto editnonoverlap = non_overlap_mv->getLocalViewHost();
801  for(size_t i=0; i<non_overlap_mv->getLocalLength(); ++i){
802  for(int j=0; j<numFields_; ++j){
803  if(editnonoverlap(i,j)!=0){
804  // editnonoverlap[j][i]=myOffset+which_id;
805  int ndof = Teuchos::as<int>(editnonoverlap(i,j));
806  editnonoverlap(i,j)=myOffset+which_id;
807  which_id+=ndof;
808  }
809  else{
810  editnonoverlap(i,j)=-1;
811  }
812 
813  }
814  }
815  non_overlap_mv->modify_host();
816  non_overlap_mv->sync_device();
817  PHX::Device().fence();
818  }
819 
820  // LINE 22: In the GUN paper. Were performed above, and the overlaped_mv is
821  // abused to handle input tagging.
822 
823  /* 13. Import data back to the overlap MV using REPLACE.
824  */
825 
826  // LINE 23: In the GUN paper
827 
828  {
829  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::DOFManager::buildGlobalUnknowns_GUN::line_23 final_import",GUN23);
830 
831  // use exporter to save on communication setup costs
832  overlap_mv.doImport(*non_overlap_mv,*exp,Tpetra::REPLACE);
833  }
834 
835  //std::cout << Teuchos::describe(*non_overlap_mv,Teuchos::VERB_EXTREME) << std::endl;
836 
837  // return non_overlap_mv;
838  return std::make_pair(non_overlap_mv,tagged_non_overlap_mv);
839 }
840 
844 {
845  // some typedefs
846  typedef panzer::TpetraNodeType Node;
847  typedef Tpetra::Map<panzer::LocalOrdinal, panzer::GlobalOrdinal, Node> Map;
848  typedef Tpetra::MultiVector<panzer::GlobalOrdinal,panzer::LocalOrdinal,panzer::GlobalOrdinal,Node> MultiVector;
849 
850  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::DOFManager::buildTaggedMultiVector",BTMV);
851 
852  //We will iterate through all of the blocks, building a FieldAggPattern for
853  //each of them.
854 
855  for (size_t b = 0; b < blockOrder_.size(); ++b) {
856  std::vector<std::tuple< int, panzer::FieldType, RCP<const panzer::FieldPattern> > > faConstruct;
857  //The ID is going to be the AID, and then everything will work.
858  //The ID should not be the AID, it should be the ID it has in the ordering.
859 
860  for (size_t i = 0; i < fieldAIDOrder_.size(); ++i) {
861  int looking = fieldAIDOrder_[i];
862 
863  //Check if in b's fp list
864  std::vector<int>::const_iterator reu = std::find(blockToAssociatedFP_[b].begin(), blockToAssociatedFP_[b].end(), looking);
865  if(!(reu==blockToAssociatedFP_[b].end())){
866  faConstruct.push_back(std::make_tuple(i, fieldTypes_[fieldAIDOrder_[i]], fieldPatterns_[fieldAIDOrder_[i]]));
867  }
868 
869  }
870 
871  if(faConstruct.size()>0) {
872  fa_fps_.push_back(rcp(new FieldAggPattern(faConstruct, ga_fp_)));
873 
874  // how many global IDs are in this element block?
875  int gidsInBlock = fa_fps_[fa_fps_.size()-1]->numberIds();
876  elementBlockGIDCount_.push_back(gidsInBlock);
877  }
878  else {
879  fa_fps_.push_back(Teuchos::null);
880  elementBlockGIDCount_.push_back(0);
881  }
882  }
883 
884  RCP<const Map> overlapmap = buildOverlapMapFromElements(ownedAccess);
885  PHX::Device().fence();
886 
887  // LINE 22: In the GUN paper...the overlap_mv is reused for the tagged multivector.
888  // This is a bit of a practical abuse of the algorithm presented in the paper.
889 
890  Teuchos::RCP<MultiVector> overlap_mv;
891  {
892  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::DOFManager::buildTaggedMultiVector::allocate_tagged_multivector",ATMV);
893 
894  overlap_mv = Tpetra::createMultiVector<panzer::GlobalOrdinal>(overlapmap,(size_t)numFields_);
895  overlap_mv->putScalar(0); // if tpetra is not initialized with zeros
896  PHX::Device().fence();
897  }
898 
899  /* 5. Iterate through all local elements again, checking with the FP
900  * information. Mark up the overlap map accordingly.
901  */
902 
903  {
904  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::DOFManager::buildTaggedMultiVector::fill_tagged_multivector",FTMV);
905 
906  // temporary working vector to fill each row in tagged array
907  std::vector<int> working(overlap_mv->getNumVectors());
908  overlap_mv->sync_host();
909  PHX::Device().fence();
910  auto edittwoview_host = overlap_mv->getLocalViewHost();
911  for (size_t b = 0; b < blockOrder_.size(); ++b) {
912  // there has to be a field pattern associated with the block
913  if(fa_fps_[b]==Teuchos::null)
914  continue;
915 
916  const std::vector<panzer::LocalOrdinal> & numFields= fa_fps_[b]->numFieldsPerId();
917  const std::vector<panzer::LocalOrdinal> & fieldIds= fa_fps_[b]->fieldIds();
918  const std::vector<panzer::LocalOrdinal> & myElements = connMngr_->getElementBlock(blockOrder_[b]);
919  for (size_t l = 0; l < myElements.size(); ++l) {
920  auto connSize = connMngr_->getConnectivitySize(myElements[l]);
921  const auto * elmtConn = connMngr_->getConnectivity(myElements[l]);
922  int offset=0;
923  for (int c = 0; c < connSize; ++c) {
924  size_t lid = overlapmap->getLocalElement(elmtConn[c]);
925 
926  for(std::size_t i=0;i<working.size();i++)
927  working[i] = 0;
928  for (int n = 0; n < numFields[c]; ++n) {
929  int whichField = fieldIds[offset];
930  //Row will be lid. column will be whichField.
931  //Shove onto local ordering
932  working[whichField]++;
933  offset++;
934  }
935  for(std::size_t i=0;i<working.size();i++) {
936  auto current = edittwoview_host(lid,i);
937  edittwoview_host(lid,i) = (current > working[i]) ? current : working[i];
938  }
939 
940  }
941  }
942  }
943  overlap_mv->modify_host();
944  overlap_mv->sync_device();
945  PHX::Device().fence();
946 
947  // // verbose output for inspecting overlap_mv
948  // for(int i=0;i<overlap_mv->getLocalLength(); i++) {
949  // for(int j=0;j<overlap_mv->getNumVectors() ; j++)
950  // std::cout << edittwoview[j][i] << " ";
951  // std::cout << std::endl;
952  // }
953  }
954 
955  return overlap_mv;
956 }
957 
959 int DOFManager::getFieldNum(const std::string & string) const
960 {
961  int ind=0;
962  bool found=false;
963  while(!found && (size_t)ind<fieldStringOrder_.size()){
964  if(fieldStringOrder_[ind]==string)
965  found=true;
966  else
967  ind++;
968  }
969 
970  if(found)
971  return ind;
972 
973  // didn't find anything...return -1
974  return -1;
975 }
976 
978 void DOFManager::getFieldOrder(std::vector<std::string> & fieldOrder) const
979 {
980  fieldOrder.resize(fieldStringOrder_.size());
981  for (size_t i = 0; i < fieldStringOrder_.size(); ++i)
982  fieldOrder[i]=fieldStringOrder_[i];
983 }
984 
986 bool DOFManager::fieldInBlock(const std::string & field, const std::string & block) const
987 {
988  std::map<std::string,int>::const_iterator fitr = fieldNameToAID_.find(field);
989  if(fitr==fieldNameToAID_.end()) {
990  std::stringstream ss;
992  TEUCHOS_TEST_FOR_EXCEPTION(true,std::logic_error,"DOFManager::fieldInBlock: invalid field name. DOF information is:\n"+ss.str());
993  }
994  std::map<std::string,int>::const_iterator bitr = blockNameToID_.find(block);
995  if(bitr==blockNameToID_.end()) {
996  std::stringstream ss;
998  TEUCHOS_TEST_FOR_EXCEPTION(true,std::logic_error,"DOFManager::fieldInBlock: invalid block name. DOF information is:\n"+ss.str());
999  }
1000  int fid=fitr->second;
1001  int bid=bitr->second;
1002 
1003  bool found=false;
1004  for (size_t i = 0; i < blockToAssociatedFP_[bid].size(); ++i) {
1005  if(blockToAssociatedFP_[bid][i]==fid){
1006  found=true;
1007  break;
1008  }
1009  }
1010 
1011  return found;
1012 }
1013 
1015 const std::vector<int> & DOFManager::getBlockFieldNumbers(const std::string & blockId) const
1016 {
1017  TEUCHOS_TEST_FOR_EXCEPTION(!buildConnectivityRun_,std::logic_error,"DOFManager::getBlockFieldNumbers: BuildConnectivity must be run first.");
1018 
1019  std::map<std::string,int>::const_iterator bitr = blockNameToID_.find(blockId);
1020  if(bitr==blockNameToID_.end())
1021  TEUCHOS_TEST_FOR_EXCEPTION(true,std::logic_error,"DOFManager::fieldInBlock: invalid block name");
1022  int bid=bitr->second;
1023 
1024  // there has to be a field pattern assocaited with the block
1025  if(fa_fps_[bid]!=Teuchos::null)
1026  return fa_fps_[bid]->fieldIds();
1027 
1028  // nothing to return
1029  static std::vector<int> empty;
1030  return empty;
1031 }
1032 
1034 const std::pair<std::vector<int>,std::vector<int> > &
1035 DOFManager::getGIDFieldOffsets_closure(const std::string & blockId, int fieldNum, int subcellDim,int subcellId) const
1036 {
1037  TEUCHOS_TEST_FOR_EXCEPTION(!buildConnectivityRun_,std::logic_error,"DOFManager::getGIDFieldOffsets_closure: BuildConnectivity must be run first.");
1038  std::map<std::string,int>::const_iterator bitr = blockNameToID_.find(blockId);
1039  if(bitr==blockNameToID_.end())
1040  TEUCHOS_TEST_FOR_EXCEPTION(true,std::logic_error, "DOFManager::getGIDFieldOffsets_closure: invalid block name.");
1041 
1042  // there has to be a field pattern assocaited with the block
1043  if(fa_fps_[bitr->second]!=Teuchos::null)
1044  return fa_fps_[bitr->second]->localOffsets_closure(fieldNum, subcellDim, subcellId);
1045 
1046  static std::pair<std::vector<int>,std::vector<int> > empty;
1047  return empty;
1048 }
1049 
1051 void DOFManager::ownedIndices(const std::vector<panzer::GlobalOrdinal> & indices,std::vector<bool> & isOwned) const
1052 {
1053  //Resizes the isOwned array.
1054  if(indices.size()!=isOwned.size())
1055  isOwned.resize(indices.size(),false);
1056  typename std::vector<panzer::GlobalOrdinal>::const_iterator endOf = owned_.end();
1057  for (std::size_t i = 0; i < indices.size(); ++i) {
1058  isOwned[i] = ( std::find(owned_.begin(), owned_.end(), indices[i])!=endOf );
1059  }
1060 }
1061 
1063 void DOFManager::setFieldOrder(const std::vector<std::string> & fieldOrder)
1064 {
1066  "DOFManager::setFieldOrder: setFieldOrder cannot be called after "
1067  "buildGlobalUnknowns has been called");
1068  if(validFieldOrder(fieldOrder)){
1069  fieldStringOrder_=fieldOrder;
1070  //We also need to reassign the fieldAIDOrder_.
1071  for (size_t i = 0; i < fieldStringOrder_.size(); ++i) {
1073  }
1074  }
1075  else {
1076  TEUCHOS_TEST_FOR_EXCEPTION(true,std::logic_error,"DOFManager::setFieldOrder: Invalid Field Ordering!");
1077  }
1078 }
1079 
1081 bool DOFManager::validFieldOrder(const std::vector<std::string> & proposed_fieldOrder)
1082 {
1083  if(fieldStringOrder_.size()!=proposed_fieldOrder.size())
1084  return false;
1085  //I'm using a not very efficient way of doing this, but there should never
1086  //be that many fields, so it isn't that big of a deal.
1087  for (size_t i = 0; i < fieldStringOrder_.size(); ++i) {
1088  bool found=false;
1089  for (size_t j = 0; j < proposed_fieldOrder.size(); ++j) {
1090  if(fieldStringOrder_[i]==proposed_fieldOrder[j])
1091  found=true;
1092  }
1093  if(!found)
1094  return false;
1095  }
1096  return true;
1097 }
1098 
1100 const std::string & DOFManager::getFieldString(int num) const
1101 {
1102  //A reverse lookup through fieldStringOrder_.
1103  if(num>=(int)fieldStringOrder_.size())
1104  TEUCHOS_TEST_FOR_EXCEPTION(true, std::logic_error, "DOFManager::getFieldString: invalid number");
1105  return fieldStringOrder_[num];
1106 }
1107 
1109 // Everything associated with orientation is not yet built, but this
1110 // is the method as found in Panzer_DOFManager_impl.hpp. There are
1111 // going to need to be some substantial changes to the code as it applies
1112 // to this DOFManager, but the basic ideas and format should be similar.
1113 //
1115 {
1116  orientation_.clear(); // clean up previous work
1117 
1118  std::vector<std::string> elementBlockIds;
1119  connMngr_->getElementBlockIds(elementBlockIds);
1120 
1121  // figure out how many total elements are owned by this processor (why is this so hard!)
1122  std::size_t myElementCount = 0;
1123  for(std::vector<std::string>::const_iterator blockItr=elementBlockIds.begin(); blockItr!=elementBlockIds.end();++blockItr)
1124  myElementCount += connMngr_->getElementBlock(*blockItr).size();
1125 
1126  // allocate for each block
1127  orientation_.resize(myElementCount);
1128 
1129  // loop over all element blocks
1130  for(std::vector<std::string>::const_iterator blockItr=elementBlockIds.begin();
1131  blockItr!=elementBlockIds.end();++blockItr) {
1132  const std::string & blockName = *blockItr;
1133 
1134  // this block has no unknowns (or elements)
1135  std::map<std::string,int>::const_iterator fap = blockNameToID_.find(blockName);
1136  if(fap==blockNameToID_.end()) {
1137  TEUCHOS_TEST_FOR_EXCEPTION(true,std::logic_error,"DOFManager::buildUnknownsOrientation: invalid block name");
1138  }
1139 
1140  int bid=fap->second;
1141 
1142  if(fa_fps_[bid]==Teuchos::null)
1143  continue;
1144 
1145  // grab field patterns, will be necessary to compute orientations
1146  const FieldPattern & fieldPattern = *fa_fps_[bid];
1147 
1148  //Should be ga_fp_ (geometric aggregate field pattern)
1149  std::vector<std::pair<int,int> > topEdgeIndices;
1151 
1152  // grab face orientations if 3D
1153  std::vector<std::vector<int> > topFaceIndices;
1154  if(ga_fp_->getDimension()==3)
1156 
1157  //How many GIDs are associated with a particular element bloc
1158  const std::vector<panzer::LocalOrdinal> & elmts = connMngr_->getElementBlock(blockName);
1159  for(std::size_t e=0;e<elmts.size();e++) {
1160  // this is the vector of orientations to fill: initialize it correctly
1161  std::vector<signed char> & eOrientation = orientation_[elmts[e]];
1162 
1163  // This resize seems to be the same as fieldPattern.numberIDs().
1164  // When computer edge orientations is called, that is the assert.
1165  // There should be no reason to make it anymore complicated.
1166  eOrientation.resize(fieldPattern.numberIds());
1167  for(std::size_t s=0;s<eOrientation.size();s++)
1168  eOrientation[s] = 1; // put in 1 by default
1169 
1170  // get geometry ids
1171  auto connSz = connMngr_->getConnectivitySize(elmts[e]);
1172  const panzer::GlobalOrdinal * connPtr = connMngr_->getConnectivity(elmts[e]);
1173  const std::vector<panzer::GlobalOrdinal> connectivity(connPtr,connPtr+connSz);
1174 
1175  orientation_helpers::computeCellEdgeOrientations(topEdgeIndices, connectivity, fieldPattern, eOrientation);
1176 
1177  // compute face orientations in 3D
1178  if(ga_fp_->getDimension()==3)
1179  orientation_helpers::computeCellFaceOrientations(topFaceIndices, connectivity, fieldPattern, eOrientation);
1180  }
1181  }
1182 }
1183 
1185 void DOFManager::getElementOrientation(panzer::LocalOrdinal localElmtId,std::vector<double> & gidsOrientation) const
1186 {
1187  TEUCHOS_TEST_FOR_EXCEPTION(orientation_.size()==0,std::logic_error,
1188  "DOFManager::getElementOrientations: Orientations were not constructed!");
1189 
1190  const std::vector<signed char> & local_o = orientation_[localElmtId];
1191  gidsOrientation.resize(local_o.size());
1192  for(std::size_t i=0;i<local_o.size();i++) {
1193  gidsOrientation[i] = double(local_o[i]);
1194  }
1195 }
1196 
1198 {
1200 
1201  connMngr_ = Teuchos::null;
1202 
1203  // wipe out FEI objects
1204  orientation_.clear(); // clean up previous work
1205  fa_fps_.clear();
1206  elementGIDs_.clear();
1207  owned_.clear();
1208  ghosted_.clear();
1209  elementBlockGIDCount_.clear();
1210 
1211  return connMngr;
1212 }
1213 
1215 std::size_t DOFManager::blockIdToIndex(const std::string & blockId) const
1216 {
1217  std::map<std::string,int>::const_iterator bitr = blockNameToID_.find(blockId);
1218  if(bitr==blockNameToID_.end())
1219  TEUCHOS_TEST_FOR_EXCEPTION(true,std::logic_error,"DOFManager::fieldInBlock: invalid block name");
1220  return bitr->second;
1221 }
1222 
1224 void DOFManager::printFieldInformation(std::ostream & os) const
1225 {
1226  os << "DOFManager Field Information: " << std::endl;
1227 
1228  // sanity check
1230 
1231  for(std::size_t i=0;i<blockOrder_.size();i++) {
1232  os << " Element Block = " << blockOrder_[i] << std::endl;
1233 
1234  // output field information
1235  const std::vector<int> & fieldIds = blockToAssociatedFP_[i];
1236  for(std::size_t f=0;f<fieldIds.size();f++)
1237  os << " \"" << getFieldString(fieldIds[f]) << "\" is field ID " << fieldIds[f] << std::endl;
1238  }
1239 }
1240 
1244 {
1245  PANZER_FUNC_TIME_MONITOR("panzer::DOFManager::builderOverlapMapFromElements");
1246 
1247  /*
1248  * 2. Iterate through all local elements and create the overlapVector
1249  * of concerned elements.
1250  */
1251 
1252  std::set<panzer::GlobalOrdinal> overlapset;
1253  for (size_t i = 0; i < blockOrder_.size(); ++i) {
1254  const std::vector<panzer::LocalOrdinal> & myElements = access.getElementBlock(blockOrder_[i]);
1255  for (size_t e = 0; e < myElements.size(); ++e) {
1256  auto connSize = connMngr_->getConnectivitySize(myElements[e]);
1257  const panzer::GlobalOrdinal * elmtConn = connMngr_->getConnectivity(myElements[e]);
1258  for (int k = 0; k < connSize; ++k) {
1259  overlapset.insert(elmtConn[k]);
1260  }
1261  }
1262  }
1263 
1264  Array<panzer::GlobalOrdinal> overlapVector;
1265  for (typename std::set<panzer::GlobalOrdinal>::const_iterator itr = overlapset.begin(); itr!=overlapset.end(); ++itr) {
1266  overlapVector.push_back(*itr);
1267  }
1268 
1269  /* 3. Construct an overlap map from this structure.
1270  */
1271  return Tpetra::createNonContigMap<panzer::LocalOrdinal,panzer::GlobalOrdinal>(overlapVector,getComm());
1272 }
1273 
1275 void DOFManager::
1277  std::vector<std::vector< panzer::GlobalOrdinal > > & elementGIDs,
1278  const Tpetra::Map<panzer::LocalOrdinal,panzer::GlobalOrdinal,panzer::TpetraNodeType> & overlapmap,
1279  const Tpetra::MultiVector<panzer::GlobalOrdinal,panzer::LocalOrdinal,panzer::GlobalOrdinal,panzer::TpetraNodeType> & const_overlap_mv) const
1280 {
1281  using Teuchos::ArrayRCP;
1282 
1283  //To generate elementGIDs we need to go through all of the local elements.
1284  auto overlap_mv = const_cast<Tpetra::MultiVector<panzer::GlobalOrdinal,panzer::LocalOrdinal,panzer::GlobalOrdinal,panzer::TpetraNodeType>&>(const_overlap_mv);
1285  overlap_mv.sync_host();
1286  PHX::Device().fence();
1287  const auto twoview_host = overlap_mv.getLocalViewHost();
1288 
1289  //And for each of the things in fa_fp.fieldIds we go to that column. To the the row,
1290  //we move from globalID to localID in the map and use our local value for something.
1291  for (size_t b = 0; b < blockOrder_.size(); ++b) {
1292  const std::vector<panzer::LocalOrdinal> & myElements = access.getElementBlock(blockOrder_[b]);
1293 
1294  if(fa_fps_[b]==Teuchos::null) {
1295  // fill elements that are not used with empty vectors
1296  for (size_t l = 0; l < myElements.size(); ++l) {
1297  panzer::LocalOrdinal thisID=myElements[l];
1298  if(elementGIDs.size()<=(size_t)thisID)
1299  elementGIDs.resize(thisID+1);
1300  }
1301  continue;
1302  }
1303 
1304  const std::vector<int> & numFields= fa_fps_[b]->numFieldsPerId();
1305  const std::vector<int> & fieldIds= fa_fps_[b]->fieldIds();
1306  //
1307  //
1308  for (size_t l = 0; l < myElements.size(); ++l) {
1309  auto connSize = connMngr_->getConnectivitySize(myElements[l]);
1310  const panzer::GlobalOrdinal * elmtConn = connMngr_->getConnectivity(myElements[l]);
1311  std::vector<panzer::GlobalOrdinal> localOrdering;
1312  int offset=0;
1313  for (int c = 0; c < connSize; ++c) {
1314  size_t lid = overlapmap.getLocalElement(elmtConn[c]);
1315  std::vector<int> dofsPerField(numFields_,0);
1316  for (int n = 0; n < numFields[c]; ++n) {
1317  int whichField = fieldIds[offset];
1318  offset++;
1319  //Row will be lid. column will be whichField.
1320  //Shove onto local ordering
1321  localOrdering.push_back(twoview_host(lid,whichField)+dofsPerField[whichField]);
1322 
1323  dofsPerField[whichField]++;
1324  }
1325  }
1326  panzer::LocalOrdinal thisID=myElements[l];
1327  if(elementGIDs.size()<=(size_t)thisID){
1328  elementGIDs.resize(thisID+1);
1329  }
1330  elementGIDs[thisID]=localOrdering;
1331  }
1332  }
1333 }
1334 
1336 {
1337  std::vector<std::vector<panzer::LocalOrdinal> > elementLIDs(elementGIDs_.size());
1338 
1339  std::vector<panzer::GlobalOrdinal> ownedAndGhosted;
1340  this->getOwnedAndGhostedIndices(ownedAndGhosted);
1341 
1342  // build global to local hash map (temporary and used only once)
1343  std::unordered_map<panzer::GlobalOrdinal,panzer::LocalOrdinal> hashMap;
1344  for(std::size_t i = 0; i < ownedAndGhosted.size(); ++i)
1345  hashMap[ownedAndGhosted[i]] = i;
1346 
1347  for (std::size_t i = 0; i < elementGIDs_.size(); ++i) {
1348  const std::vector<panzer::GlobalOrdinal>& gids = elementGIDs_[i];
1349  std::vector<panzer::LocalOrdinal>& lids = elementLIDs[i];
1350  lids.resize(gids.size());
1351  for (std::size_t g = 0; g < gids.size(); ++g)
1352  lids[g] = hashMap[gids[g]];
1353  }
1354 
1355  this->setLocalIds(elementLIDs);
1356 }
1357 
1358 /*
1359 template <typename panzer::LocalOrdinal,typename panzer::GlobalOrdinal>
1360 Teuchos::RCP<const Tpetra::Map<panzer::LocalOrdinal,panzer::GlobalOrdinal,panzer::TpetraNodeType> >
1361 DOFManager<panzer::LocalOrdinal,panzer::GlobalOrdinal>::runLocalRCMReordering(const Teuchos::RCP<const Tpetra::Map<panzer::LocalOrdinal,panzer::GlobalOrdinal,panzer::TpetraNodeType> > & map)
1362 {
1363  typedef panzer::TpetraNodeType Node;
1364  typedef Tpetra::Map<panzer::LocalOrdinal, panzer::GlobalOrdinal, Node> Map;
1365  typedef Tpetra::CrsGraph<panzer::LocalOrdinal, panzer::GlobalOrdinal, Node> Graph;
1366 
1367  Teuchos::RCP<Graph> graph = Teuchos::rcp(new Graph(map,0));
1368 
1369  // build Crs Graph from the mesh
1370  for (size_t b = 0; b < blockOrder_.size(); ++b) {
1371  if(fa_fps_[b]==Teuchos::null)
1372  continue;
1373 
1374  const std::vector<panzer::LocalOrdinal> & myElements = connMngr_->getElementBlock(blockOrder_[b]);
1375  for (size_t l = 0; l < myElements.size(); ++l) {
1376  panzer::LocalOrdinal connSize = connMngr_->getConnectivitySize(myElements[l]);
1377  const panzer::GlobalOrdinal * elmtConn = connMngr_->getConnectivity(myElements[l]);
1378  for (int c = 0; c < connSize; ++c) {
1379  panzer::LocalOrdinal lid = map->getLocalElement(elmtConn[c]);
1380  if(Teuchos::OrdinalTraits<panzer::LocalOrdinal>::invalid()!=lid)
1381  continue;
1382 
1383  graph->insertGlobalIndices(elmtConn[c],Teuchos::arrayView(elmtConn,connSize));
1384  }
1385  }
1386  }
1387 
1388  graph->fillComplete();
1389 
1390  std::vector<panzer::GlobalOrdinal> newOrder(map->getNodeNumElements());
1391  {
1392  // graph is constructed, now run RCM using zoltan2
1393  typedef Zoltan2::XpetraCrsGraphInput<Graph> SparseGraphAdapter;
1394 
1395  Teuchos::ParameterList params;
1396  params.set("order_method", "rcm");
1397  SparseGraphAdapter adapter(graph);
1398 
1399  Zoltan2::OrderingProblem<SparseGraphAdapter> problem(&adapter, &params,MPI_COMM_SELF);
1400  problem.solve();
1401 
1402  // build a new global ording array using permutation
1403  Zoltan2::OrderingSolution<panzer::GlobalOrdinal,panzer::LocalOrdinal> * soln = problem.getSolution();
1404 
1405  size_t dummy;
1406  size_t checkLength = soln->getPermutationSize();
1407  panzer::LocalOrdinal * checkPerm = soln->getPermutation(&dummy);
1408 
1409  Teuchos::ArrayView<const panzer::GlobalOrdinal > oldOrder = map->getNodeElementList();
1410  TEUCHOS_ASSERT(checkLength==oldOrder.size());
1411  TEUCHOS_ASSERT(checkLength==newOrder.size());
1412 
1413  for(size_t i=0;i<checkLength;i++)
1414  newOrder[checkPerm[i]] = oldOrder[i];
1415  }
1416 
1417  return Tpetra::createNonContigMap<panzer::LocalOrdinal,panzer::GlobalOrdinal>(newOrder,communicator_);
1418 }
1419 */
1420 
1421 } /*panzer*/
1422 
1423 #endif
std::vector< std::vector< int > > blockToAssociatedFP_
bool fieldInBlock(const std::string &field, const std::string &block) const
int getNumFields() const
gets the number of fields
virtual int getDimension() const =0
bool getOrientationsRequired() const
int getNumOwnedAndGhosted() const
Get the number of owned and ghosted indices for this processor.
void setFieldOrder(const std::vector< std::string > &fieldOrder)
Sums all entries of a Rank 2 Kokkos View.
std::vector< Teuchos::RCP< panzer::FieldAggPattern > > fa_fps_
const std::vector< panzer::LocalOrdinal > & getElementBlock(const std::string &eBlock) const
Teuchos::RCP< const Tpetra::Map< panzer::LocalOrdinal, panzer::GlobalOrdinal, panzer::TpetraNodeType > > buildOverlapMapFromElements(const ElementBlockAccess &access) const
const Kokkos::View< const int *, PHX::Device > getGIDFieldOffsetsKokkos(const std::string &blockID, int fieldNum) const
basic_FancyOStream & setShowProcRank(const bool showProcRank)
std::vector< std::vector< panzer::GlobalOrdinal > > elementGIDs_
#define TEUCHOS_TEST_FOR_EXCEPTION(throw_exception_test, Exception, msg)
void getOwnedIndices(std::vector< panzer::GlobalOrdinal > &indices) const
Get the set of indices owned by this processor.
Teuchos::RCP< Tpetra::MultiVector< panzer::GlobalOrdinal, panzer::LocalOrdinal, panzer::GlobalOrdinal, panzer::TpetraNodeType > > buildTaggedMultiVector(const ElementBlockAccess &access)
size_type size() const
FieldType
The type of discretization to use for a field pattern.
Teuchos::RCP< ConnManager > resetIndices()
Reset the indices for this DOF manager.
int numFields
void buildGlobalUnknowns()
builds the global unknowns array
Teuchos::RCP< const panzer::FieldPattern > ga_fp_
virtual int numberIds() const
void getFieldOrder(std::vector< std::string > &fieldOrder) const
Kokkos::View< const LO **, PHX::Device > lids
void getGhostedIndicesAsInt(std::vector< int > &indices) const
Get the set of indices ghosted for this processor.
const std::vector< int > & getGIDFieldOffsets(const std::string &blockID, int fieldNum) const
std::vector< int > fieldAIDOrder_
void setConnManager(const Teuchos::RCP< ConnManager > &connMngr, MPI_Comm mpiComm)
Adds a Connection Manager that will be associated with this DOFManager.
std::map< std::string, int > fieldNameToAID_
TEUCHOS_DEPRECATED RCP< T > rcp(T *p, Dealloc_T dealloc, bool owns_mem)
std::vector< int > elementBlockGIDCount_
Teuchos::RCP< Teuchos::Comm< int > > communicator_
Kokkos::Compat::KokkosDeviceWrapperNode< PHX::Device > TpetraNodeType
void printFieldInformation(std::ostream &os) const
void computeCellEdgeOrientations(const std::vector< std::pair< int, int > > &topEdgeIndices, const std::vector< panzer::GlobalOrdinal > &topology, const FieldPattern &fieldPattern, std::vector< signed char > &orientation)
std::vector< Teuchos::RCP< const FieldPattern > > fieldPatterns_
std::pair< Teuchos::RCP< Tpetra::MultiVector< panzer::GlobalOrdinal, panzer::LocalOrdinal, panzer::GlobalOrdinal, panzer::TpetraNodeType > >, Teuchos::RCP< Tpetra::MultiVector< panzer::GlobalOrdinal, panzer::LocalOrdinal, panzer::GlobalOrdinal, panzer::TpetraNodeType > > > buildGlobalUnknowns_GUN(const Tpetra::MultiVector< panzer::GlobalOrdinal, panzer::LocalOrdinal, panzer::GlobalOrdinal, panzer::TpetraNodeType > &tagged_overlap_mv, Tpetra::MultiVector< panzer::GlobalOrdinal, panzer::LocalOrdinal, panzer::GlobalOrdinal, panzer::TpetraNodeType > &overlap_mv) const
void getOwnedIndicesAsInt(std::vector< int > &indices) const
Get the set of indices owned by this processor.
basic_FancyOStream & setOutputToRootOnly(const int rootRank)
void fillGIDsFromOverlappedMV(const ElementBlockAccess &access, std::vector< std::vector< panzer::GlobalOrdinal > > &elementGIDs, const Tpetra::Map< panzer::LocalOrdinal, panzer::GlobalOrdinal, panzer::TpetraNodeType > &overlapmap, const Tpetra::MultiVector< panzer::GlobalOrdinal, panzer::LocalOrdinal, panzer::GlobalOrdinal, panzer::TpetraNodeType > &overlap_mv) const
void setLocalIds(const std::vector< std::vector< panzer::LocalOrdinal > > &localIDs)
void computePatternFaceIndices(const FieldPattern &pattern, std::vector< std::vector< int > > &faceIndices)
int getNumGhosted() const
Get the number of indices ghosted for this processor.
std::vector< panzer::GlobalOrdinal > owned_
const unsigned int seed_
void ownedIndices(const std::vector< panzer::GlobalOrdinal > &indices, std::vector< bool > &isOwned) const
std::vector< std::string > blockOrder_
std::vector< panzer::GlobalOrdinal > ghosted_
void push_back(const value_type &x)
void computeCellFaceOrientations(const std::vector< std::vector< int > > &topFaceIndices, const std::vector< panzer::GlobalOrdinal > &topology, const FieldPattern &fieldPattern, std::vector< signed char > &orientation)
Teuchos::RCP< Teuchos::Comm< int > > getComm() const
std::size_t blockIdToIndex(const std::string &blockId) const
Teuchos::RCP< ConnManager > connMngr_
PHX::MDField< ScalarT, panzer::Cell, panzer::BASIS > field
A field to which we&#39;ll contribute, or in which we&#39;ll store, the result of computing this integral...
void buildLocalIdsFromOwnedAndGhostedElements()
const std::pair< std::vector< int >, std::vector< int > > & getGIDFieldOffsets_closure(const std::string &blockId, int fieldNum, int subcellDim, int subcellId) const
Use the field pattern so that you can find a particular field in the GIDs array. This version lets yo...
int getFieldNum(const std::string &string) const
Get the number used for access to this field.
const std::string & getFieldString(int num) const
Reverse lookup of the field string from a field number.
void getGhostedIndices(std::vector< panzer::GlobalOrdinal > &indices) const
Get the set of indices ghosted for this processor.
std::vector< std::string > fieldStringOrder_
void getElementGIDsAsInt(panzer::LocalOrdinal localElementID, std::vector< int > &gids, const std::string &blockIdHint="") const
Get the global IDs for a particular element. This function overwrites the gids variable.
int getNumOwned() const
Get the number of indices owned by this processor.
void getOwnedAndGhostedIndicesAsInt(std::vector< int > &indices) const
Get the set of owned and ghosted indices for this processor.
void getElementOrientation(panzer::LocalOrdinal localElmtId, std::vector< double > &gidsOrientation) const
Get a vector containg the orientation of the GIDs relative to the neighbors.
const std::vector< int > & getBlockFieldNumbers(const std::string &blockId) const
void getOwnedAndGhostedIndices(std::vector< panzer::GlobalOrdinal > &indices) const
Get the set of owned and ghosted indices for this processor.
#define TEUCHOS_ASSERT(assertion_test)
bool validFieldOrder(const std::vector< std::string > &proposed_fieldOrder)
std::vector< std::vector< signed char > > orientation_
int addField(const std::string &str, const Teuchos::RCP< const FieldPattern > &pattern, const panzer::FieldType &type=panzer::FieldType::CG)
Add a field to the DOF manager.
TransListIter end
Teuchos::RCP< const FieldPattern > getFieldPattern(const std::string &name) const
Find a field pattern stored for a particular block and field number. This will retrive the pattern ad...
void computePatternEdgeIndices(const FieldPattern &pattern, std::vector< std::pair< int, int > > &edgeIndices)
void getElementGIDs(panzer::LocalOrdinal localElementID, std::vector< panzer::GlobalOrdinal > &gids, const std::string &blockIdHint="") const
get associated GIDs for a given local element
std::vector< FieldType > fieldTypes_
std::map< std::string, int > blockNameToID_