Panzer  Version of the Day
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
Panzer_STK_PeriodicBC_Matcher_impl.hpp
Go to the documentation of this file.
1 // @HEADER
2 // *****************************************************************************
3 // Panzer: A partial differential equation assembly
4 // engine for strongly coupled complex multiphysics systems
5 //
6 // Copyright 2011 NTESS and the Panzer contributors.
7 // SPDX-License-Identifier: BSD-3-Clause
8 // *****************************************************************************
9 // @HEADER
10 
11 #include "Teuchos_Tuple.hpp"
12 #include "Teuchos_RCP.hpp"
13 
14 #include "Panzer_STK_Version.hpp"
15 #include "PanzerAdaptersSTK_config.hpp"
16 #include "Panzer_STK_Interface.hpp"
17 
18 #include "Teuchos_FancyOStream.hpp"
19 
20 #include <set>
21 
22 namespace panzer_stk {
23 namespace periodic_helpers {
24 
25 template <typename Matcher>
27 matchPeriodicSides(const std::string & left,const std::string & right,
28  const STK_Interface & mesh,
29  const Matcher & matcher,
30  const std::vector<std::pair<std::size_t,std::size_t> > & ownedToMapped, const std::string type_)
31 {
32  //
33  // Overview:
34  // A three step algorithm
35  // 1. Figure out all nodes and their coordinates that live on the "left"
36  // - Distribute information globally
37  // 2. Match the global nodes on the "left" to locally owned nodes on the "right"
38  // - only local work required
39  // 3. If a processor requires a node on the left (if it is owned or ghosted)
40  // communicate matching conditions from the right boundary
41  //
42  // Note: The matching check could definitely be sped up with a sorting operation
43  // Note: The communication could be done in a way that requires less global communication
44  // Essentially doing step one in a Many-2-Many way as opposed to an All-2-All
45  //
46 
47  using Teuchos::Tuple;
48  using Teuchos::RCP;
49  using Teuchos::rcp;
50 
51  // First step is to globally distribute all the node ids and coordinates
52  // on the left hand side: requires All-2-All!
54  std::pair<RCP<std::vector<std::size_t> >,
55  RCP<std::vector<Tuple<double,3> > > > idsAndCoords = getSideIdsAndCoords(mesh,left,type_);
56  std::vector<std::size_t> & sideIds = *idsAndCoords.first;
57  std::vector<Tuple<double,3> > & sideCoords = *idsAndCoords.second;
58 
59  // Now using only local operations, find the right hand side nodes owned
60  // by this processor and the matching ones on the left that were previously calculated
63 
64  bool failure = false;
65  try {
66  locallyMatchedIds = getLocallyMatchedSideIds(sideIds,sideCoords,mesh,right,matcher,type_);
67  } catch(std::logic_error & e) {
68  locallyMatchedIds = Teuchos::rcp(new std::vector<std::pair<std::size_t,std::size_t> >);
69  failure = true;
70 
71  Teuchos::FancyOStream out(Teuchos::rcpFromRef(std::cout));
72  out.setShowProcRank(true);
73  out.setOutputToRootOnly(-1);
74 
75  out << "Not all sides matched expect failure: \n" << e.what() << std::endl;
76  }
77 
78  // Get the ids on the left required by this processor (they maybe ghosted),
79  // and using the matched ids computed above over all processors, find the
80  // corrsponding node on the right boundary.
82 
83  // THIS COMPLEX ALGORITHM COMPENSATES FOR THE PREVIOUS PAIRING BY UPDATING
84  // THE REQUEST LIST. THERE ALSO IS A REQUIREMENT OF PER PROC UNIQUENESS IN THE EPETRA
85  // IMPORT. SO THERE IS SOME ADDITIONAL WORK DONE TO REMOVE REPEATED ENTRIES
86 
87  // build reverse map
88  std::map<std::size_t,std::vector<std::size_t> > reverseMap;
89  for(std::size_t i=0;i<ownedToMapped.size();i++)
90  reverseMap[ownedToMapped[i].second].push_back(ownedToMapped[i].first);
91 
92  Teuchos::RCP<std::vector<std::size_t> > locallyRequiredIds = getLocalSideIds(mesh,left,type_);
93  std::vector<std::size_t> saved_locallyRequiredIds = *locallyRequiredIds; // will be required
94  // to check owner/ghostship
95  // of IDs
96  std::vector<std::pair<std::size_t,std::size_t> > unusedOwnedToMapped;
97 
98  // apply previous mappings to this set of local IDs
99  for(std::size_t i=0;i<ownedToMapped.size();i++) {
100  std::size_t owned = ownedToMapped[i].first;
101  std::size_t mapped = ownedToMapped[i].second;
102 
103  std::vector<std::size_t>::iterator itr
104  = std::find(locallyRequiredIds->begin(),locallyRequiredIds->end(),owned);
105 
106  // if found, replace the local ID with the previously matched ID
107  // this means the locallyRequiredIds may now include IDs owned by a different processor
108  if(itr!=locallyRequiredIds->end())
109  *itr = mapped;
110  else
111  unusedOwnedToMapped.push_back(ownedToMapped[i]);
112  }
113 
114  // build a unique vector of locally matched IDs
115  std::vector<std::size_t> unique_locallyRequiredIds;
116  {
117  std::set<std::size_t> s;
118  s.insert(locallyRequiredIds->begin(),locallyRequiredIds->end());
119  unique_locallyRequiredIds.insert(unique_locallyRequiredIds.begin(),s.begin(),s.end());
120  }
121 
122  // next line requires communication
124  = getGlobalPairing(unique_locallyRequiredIds,*locallyMatchedIds,mesh,failure);
125 
126  // this nasty bit of code gurantees that only owned/ghosted IDs will
127  // end up in the globallyMatchedIds vector. It uses a search on
128  // only locally owned IDs to make sure that they are locally owned.
129  // this is a result (and fix) for some of the complexity incurred by
130  // the reverseMap above.
131  {
132  // fill up set with current globally matched ids (not neccessarily owned/ghosted)
133  std::set<std::pair<std::size_t,std::size_t> > gmi_set;
134  gmi_set.insert(globallyMatchedIds->begin(),globallyMatchedIds->end());
135 
136  // for each globally matched ID, update IDs from the previous
137  // run (i.e. from ownedToMapped) using the reverseMap
138  for(std::size_t i=0;i<globallyMatchedIds->size();i++) {
139  std::pair<std::size_t,std::size_t> pair = (*globallyMatchedIds)[i];
140  const std::vector<std::size_t> & others = reverseMap[pair.first];
141 
142  // add in reverse map (note other[j] is guranteed to be local to this processor
143  // if it was when ownedToMapped was passed in)
144  for(std::size_t j=0;j<others.size();j++)
145  gmi_set.insert(std::make_pair(others[j],pair.second));
146 
147  // remove ids that are not ghosted/owned by this processor
148  if(std::find(saved_locallyRequiredIds.begin(),
149  saved_locallyRequiredIds.end(),
150  pair.first)==saved_locallyRequiredIds.end()) {
151  gmi_set.erase(pair);
152  }
153  }
154 
155  // clear old data, and populate with new matched ids
156  globallyMatchedIds->clear();
157  globallyMatchedIds->insert(globallyMatchedIds->begin(),gmi_set.begin(),gmi_set.end());
158  }
159 
160  // now you have a pair of ids that maps ids on the left required by this processor
161  // to ids on the right
162  globallyMatchedIds->insert(globallyMatchedIds->end(),unusedOwnedToMapped.begin(),unusedOwnedToMapped.end());
163 
164  return globallyMatchedIds;
165 }
166 
167 template <typename Matcher>
169 matchPeriodicSides(const std::string & left,const std::string & right,
170  const STK_Interface & mesh,
171  const Matcher & matcher, const std::string type_)
172 {
173  //
174  // Overview:
175  // A three step algorithm
176  // 1. Figure out all nodes and their coordinates that live on the "left"
177  // - Distribute information globally
178  // 2. Match the global nodes on the "left" to locally owned nodes on the "right"
179  // - only local work required
180  // 3. If a processor requires a node on the left (if it is owned or ghosted)
181  // communicate matching conditions from the right boundary
182  //
183  // Note: The matching check could definitely be spead up with a sorting operation
184  // Note: The communication could be done in a way that requires less global communication
185  // Essentially doing step one in a Many-2-Many way as opposed to an All-2-All
186  //
187 
188  using Teuchos::Tuple;
189  using Teuchos::RCP;
190  using Teuchos::rcp;
191 
192  // First step is to globally distribute all the node ids and coordinates
193  // on the left hand side: requires All-2-All!
195  std::pair<RCP<std::vector<std::size_t> >,
196  RCP<std::vector<Tuple<double,3> > > > idsAndCoords = getSideIdsAndCoords(mesh,left,type_);
197  std::vector<std::size_t> & sideIds = *idsAndCoords.first;
198  std::vector<Tuple<double,3> > & sideCoords = *idsAndCoords.second;
199 
200  // Now using only local operations, find the right hand side nodes owned
201  // by this processor and the matching ones on the left that were previously calculated
204 
205  bool failure = false;
206  try {
207  locallyMatchedIds = getLocallyMatchedSideIds(sideIds,sideCoords,mesh,right,matcher,type_);
208  } catch(std::logic_error & e) {
209  locallyMatchedIds = Teuchos::rcp(new std::vector<std::pair<std::size_t,std::size_t> >);
210  failure = true;
211 
212  Teuchos::FancyOStream out(Teuchos::rcpFromRef(std::cout));
213  out.setShowProcRank(true);
214  out.setOutputToRootOnly(-1);
215 
216  out << "Not all sides matched expect failure: \n" << e.what() << std::endl;
217  }
218 
219  // Get the ids on the left required by this processor (they maybe ghosted),
220  // and using the matched ids computed above over all processors, find the
221  // corrsponding node on the right boundary.
223 
224  // next line requires communication
225  Teuchos::RCP<std::vector<std::size_t> > locallyRequiredIds = getLocalSideIds(mesh,left,type_);
227  = getGlobalPairing(*locallyRequiredIds,*locallyMatchedIds,mesh,failure);
228 
229  // now you have a pair of ids that maps ids on the left required by this processor
230  // to ids on the right
231 
232  return globallyMatchedIds;
233 }
234 
235 template <typename Matcher>
237 getLocallyMatchedSideIds(const std::vector<std::size_t> & side_ids,
238  const std::vector<Teuchos::Tuple<double,3> > & side_coords,
239  const panzer_stk::STK_Interface & mesh,
240  const std::string & sideName,const Matcher & matcher, std::string type_)
241 {
242  using Teuchos::RCP;
243  using Teuchos::Tuple;
244 
246  = Teuchos::rcp(new std::vector<std::pair<std::size_t,std::size_t> >);
247 
248  // grab local IDs and coordinates on this side
250 
251  std::pair<Teuchos::RCP<std::vector<std::size_t> >,
253  getLocalSideIdsAndCoords(mesh,sideName,type_);
254 
255  std::vector<std::size_t> & local_side_ids = *sidePair.first;
256  std::vector<Teuchos::Tuple<double,3> > & local_side_coords = *sidePair.second;
257 
258  bool checkProb = false;
259  std::vector<bool> side_flags(side_ids.size(),false);
260 
261  // do a slow search for the coordinates: this _can_ be sped
262  // up! (considered searches in sorted ranges using the sorted_permutation function)
264  for(std::size_t localNode=0;localNode<local_side_ids.size();localNode++) {
265  std::size_t local_gid = local_side_ids[localNode];
266  const Tuple<double,3> & local_coord = local_side_coords[localNode];
267 
268  // loop over globally distributed coordinates and find a match
269  for(std::size_t globalNode=0;globalNode<side_ids.size();globalNode++) {
270  std::size_t global_gid = side_ids[globalNode];
271  const Tuple<double,3> & global_coord = side_coords[globalNode];
272 
273  if(matcher(global_coord,local_coord)) {
274  if(side_flags[globalNode]) // has this node been matched by this
275  checkProb = true; // processor?
276 
277  result->push_back(std::make_pair(global_gid,local_gid));
278  side_flags[globalNode] = true;
279  continue;
280  }
281  }
282  }
283 
284  // make sure you matched everything you can: If this throws...it can
285  // cause the process to hang!
286  TEUCHOS_TEST_FOR_EXCEPTION(checkProb,std::logic_error,
287  "getLocallyMatchedSideIds: checkProb failed");
288 
289  TEUCHOS_TEST_FOR_EXCEPTION(local_side_ids.size()!=result->size(),std::logic_error,
290  "getLocallyMatchedSideIds: not all local side ids are satisfied!");
291 
292  return result;
293 }
294 
295 } // end periodic_helpers
296 } // end panzer_stk
Teuchos::RCP< std::vector< std::size_t > > getLocalSideIds(const STK_Interface &mesh, const std::string &sideName, const std::string type_)
Teuchos::RCP< std::vector< std::pair< std::size_t, std::size_t > > > matchPeriodicSides(const std::string &left, const std::string &right, const STK_Interface &mesh, const Matcher &matcher, const std::string type_="coord")
basic_FancyOStream & setShowProcRank(const bool showProcRank)
#define TEUCHOS_TEST_FOR_EXCEPTION(throw_exception_test, Exception, msg)
std::pair< Teuchos::RCP< std::vector< std::size_t > >, Teuchos::RCP< std::vector< Teuchos::Tuple< double, 3 > > > > getLocalSideIdsAndCoords(const STK_Interface &mesh, const std::string &sideName, const std::string type_)
PHX::MDField< ScalarT, panzer::Cell, panzer::IP > result
A field that will be used to build up the result of the integral we&#39;re performing.
TEUCHOS_DEPRECATED RCP< T > rcp(T *p, Dealloc_T dealloc, bool owns_mem)
basic_FancyOStream & setOutputToRootOnly(const int rootRank)
Teuchos::RCP< std::vector< std::pair< std::size_t, std::size_t > > > getGlobalPairing(const std::vector< std::size_t > &locallyRequiredIds, const std::vector< std::pair< std::size_t, std::size_t > > &locallyMatchedIds, const STK_Interface &mesh, bool failure)
std::pair< Teuchos::RCP< std::vector< std::size_t > >, Teuchos::RCP< std::vector< Teuchos::Tuple< double, 3 > > > > getSideIdsAndCoords(const STK_Interface &mesh, const std::string &sideName, const std::string type_)
Teuchos::RCP< std::vector< std::pair< std::size_t, std::size_t > > > getLocallyMatchedSideIds(const std::vector< std::size_t > &side_ids, const std::vector< Teuchos::Tuple< double, 3 > > &side_coords, const STK_Interface &mesh, const std::string &sideName, const Matcher &matcher, const std::string type_="coord")