Panzer  Version of the Day
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
Panzer_STK_PeriodicBC_Matcher_impl.hpp
Go to the documentation of this file.
1 // @HEADER
2 // ***********************************************************************
3 //
4 // Panzer: A partial differential equation assembly
5 // engine for strongly coupled complex multiphysics systems
6 // Copyright (2011) Sandia Corporation
7 //
8 // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
9 // the U.S. Government retains certain rights in this software.
10 //
11 // Redistribution and use in source and binary forms, with or without
12 // modification, are permitted provided that the following conditions are
13 // met:
14 //
15 // 1. Redistributions of source code must retain the above copyright
16 // notice, this list of conditions and the following disclaimer.
17 //
18 // 2. Redistributions in binary form must reproduce the above copyright
19 // notice, this list of conditions and the following disclaimer in the
20 // documentation and/or other materials provided with the distribution.
21 //
22 // 3. Neither the name of the Corporation nor the names of the
23 // contributors may be used to endorse or promote products derived from
24 // this software without specific prior written permission.
25 //
26 // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
27 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
30 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 //
38 // Questions? Contact Roger P. Pawlowski (rppawlo@sandia.gov) and
39 // Eric C. Cyr (eccyr@sandia.gov)
40 // ***********************************************************************
41 // @HEADER
42 
43 #include "Teuchos_Tuple.hpp"
44 #include "Teuchos_RCP.hpp"
45 
46 #include "Panzer_STK_Version.hpp"
47 #include "PanzerAdaptersSTK_config.hpp"
48 #include "Panzer_STK_Interface.hpp"
49 
50 #include "Teuchos_FancyOStream.hpp"
51 
52 #include <set>
53 
54 namespace panzer_stk {
55 namespace periodic_helpers {
56 
57 template <typename Matcher>
59 matchPeriodicSides(const std::string & left,const std::string & right,
60  const STK_Interface & mesh,
61  const Matcher & matcher,
62  const std::vector<std::pair<std::size_t,std::size_t> > & ownedToMapped, const std::string type_)
63 {
64  //
65  // Overview:
66  // A three step algorithm
67  // 1. Figure out all nodes and their coordinates that live on the "left"
68  // - Distribute information globally
69  // 2. Match the global nodes on the "left" to locally owned nodes on the "right"
70  // - only local work required
71  // 3. If a processor requires a node on the left (if it is owned or ghosted)
72  // communicate matching conditions from the right boundary
73  //
74  // Note: The matching check could definitely be sped up with a sorting operation
75  // Note: The communication could be done in a way that requires less global communication
76  // Essentially doing step one in a Many-2-Many way as opposed to an All-2-All
77  //
78 
79  using Teuchos::Tuple;
80  using Teuchos::RCP;
81  using Teuchos::rcp;
82 
83  // First step is to globally distribute all the node ids and coordinates
84  // on the left hand side: requires All-2-All!
86  std::pair<RCP<std::vector<std::size_t> >,
87  RCP<std::vector<Tuple<double,3> > > > idsAndCoords = getSideIdsAndCoords(mesh,left,type_);
88  std::vector<std::size_t> & sideIds = *idsAndCoords.first;
89  std::vector<Tuple<double,3> > & sideCoords = *idsAndCoords.second;
90 
91  // Now using only local operations, find the right hand side nodes owned
92  // by this processor and the matching ones on the left that were previously calculated
95 
96  bool failure = false;
97  try {
98  locallyMatchedIds = getLocallyMatchedSideIds(sideIds,sideCoords,mesh,right,matcher,type_);
99  } catch(std::logic_error & e) {
100  locallyMatchedIds = Teuchos::rcp(new std::vector<std::pair<std::size_t,std::size_t> >);
101  failure = true;
102 
103  Teuchos::FancyOStream out(Teuchos::rcpFromRef(std::cout));
104  out.setShowProcRank(true);
105  out.setOutputToRootOnly(-1);
106 
107  out << "Not all sides matched expect failure: \n" << e.what() << std::endl;
108  }
109 
110  // Get the ids on the left required by this processor (they maybe ghosted),
111  // and using the matched ids computed above over all processors, find the
112  // corrsponding node on the right boundary.
114 
115  // THIS COMPLEX ALGORITHM COMPENSATES FOR THE PREVIOUS PAIRING BY UPDATING
116  // THE REQUEST LIST. THERE ALSO IS A REQUIREMENT OF PER PROC UNIQUENESS IN THE EPETRA
117  // IMPORT. SO THERE IS SOME ADDITIONAL WORK DONE TO REMOVE REPEATED ENTRIES
118 
119  // build reverse map
120  std::map<std::size_t,std::vector<std::size_t> > reverseMap;
121  for(std::size_t i=0;i<ownedToMapped.size();i++)
122  reverseMap[ownedToMapped[i].second].push_back(ownedToMapped[i].first);
123 
124  Teuchos::RCP<std::vector<std::size_t> > locallyRequiredIds = getLocalSideIds(mesh,left,type_);
125  std::vector<std::size_t> saved_locallyRequiredIds = *locallyRequiredIds; // will be required
126  // to check owner/ghostship
127  // of IDs
128  std::vector<std::pair<std::size_t,std::size_t> > unusedOwnedToMapped;
129 
130  // apply previous mappings to this set of local IDs
131  for(std::size_t i=0;i<ownedToMapped.size();i++) {
132  std::size_t owned = ownedToMapped[i].first;
133  std::size_t mapped = ownedToMapped[i].second;
134 
135  std::vector<std::size_t>::iterator itr
136  = std::find(locallyRequiredIds->begin(),locallyRequiredIds->end(),owned);
137 
138  // if found, replace the local ID with the previously matched ID
139  // this means the locallyRequiredIds may now include IDs owned by a different processor
140  if(itr!=locallyRequiredIds->end())
141  *itr = mapped;
142  else
143  unusedOwnedToMapped.push_back(ownedToMapped[i]);
144  }
145 
146  // build a unique vector of locally matched IDs
147  std::vector<std::size_t> unique_locallyRequiredIds;
148  {
149  std::set<std::size_t> s;
150  s.insert(locallyRequiredIds->begin(),locallyRequiredIds->end());
151  unique_locallyRequiredIds.insert(unique_locallyRequiredIds.begin(),s.begin(),s.end());
152  }
153 
154  // next line requires communication
156  = getGlobalPairing(unique_locallyRequiredIds,*locallyMatchedIds,mesh,failure);
157 
158  // this nasty bit of code gurantees that only owned/ghosted IDs will
159  // end up in the globallyMatchedIds vector. It uses a search on
160  // only locally owned IDs to make sure that they are locally owned.
161  // this is a result (and fix) for some of the complexity incurred by
162  // the reverseMap above.
163  {
164  // fill up set with current globally matched ids (not neccessarily owned/ghosted)
165  std::set<std::pair<std::size_t,std::size_t> > gmi_set;
166  gmi_set.insert(globallyMatchedIds->begin(),globallyMatchedIds->end());
167 
168  // for each globally matched ID, update IDs from the previous
169  // run (i.e. from ownedToMapped) using the reverseMap
170  for(std::size_t i=0;i<globallyMatchedIds->size();i++) {
171  std::pair<std::size_t,std::size_t> pair = (*globallyMatchedIds)[i];
172  const std::vector<std::size_t> & others = reverseMap[pair.first];
173 
174  // add in reverse map (note other[j] is guranteed to be local to this processor
175  // if it was when ownedToMapped was passed in)
176  for(std::size_t j=0;j<others.size();j++)
177  gmi_set.insert(std::make_pair(others[j],pair.second));
178 
179  // remove ids that are not ghosted/owned by this processor
180  if(std::find(saved_locallyRequiredIds.begin(),
181  saved_locallyRequiredIds.end(),
182  pair.first)==saved_locallyRequiredIds.end()) {
183  gmi_set.erase(pair);
184  }
185  }
186 
187  // clear old data, and populate with new matched ids
188  globallyMatchedIds->clear();
189  globallyMatchedIds->insert(globallyMatchedIds->begin(),gmi_set.begin(),gmi_set.end());
190  }
191 
192  // now you have a pair of ids that maps ids on the left required by this processor
193  // to ids on the right
194  globallyMatchedIds->insert(globallyMatchedIds->end(),unusedOwnedToMapped.begin(),unusedOwnedToMapped.end());
195 
196  return globallyMatchedIds;
197 }
198 
199 template <typename Matcher>
201 matchPeriodicSides(const std::string & left,const std::string & right,
202  const STK_Interface & mesh,
203  const Matcher & matcher, const std::string type_)
204 {
205  //
206  // Overview:
207  // A three step algorithm
208  // 1. Figure out all nodes and their coordinates that live on the "left"
209  // - Distribute information globally
210  // 2. Match the global nodes on the "left" to locally owned nodes on the "right"
211  // - only local work required
212  // 3. If a processor requires a node on the left (if it is owned or ghosted)
213  // communicate matching conditions from the right boundary
214  //
215  // Note: The matching check could definitely be spead up with a sorting operation
216  // Note: The communication could be done in a way that requires less global communication
217  // Essentially doing step one in a Many-2-Many way as opposed to an All-2-All
218  //
219 
220  using Teuchos::Tuple;
221  using Teuchos::RCP;
222  using Teuchos::rcp;
223 
224  // First step is to globally distribute all the node ids and coordinates
225  // on the left hand side: requires All-2-All!
227  std::pair<RCP<std::vector<std::size_t> >,
228  RCP<std::vector<Tuple<double,3> > > > idsAndCoords = getSideIdsAndCoords(mesh,left,type_);
229  std::vector<std::size_t> & sideIds = *idsAndCoords.first;
230  std::vector<Tuple<double,3> > & sideCoords = *idsAndCoords.second;
231 
232  // Now using only local operations, find the right hand side nodes owned
233  // by this processor and the matching ones on the left that were previously calculated
236 
237  bool failure = false;
238  try {
239  locallyMatchedIds = getLocallyMatchedSideIds(sideIds,sideCoords,mesh,right,matcher,type_);
240  } catch(std::logic_error & e) {
241  locallyMatchedIds = Teuchos::rcp(new std::vector<std::pair<std::size_t,std::size_t> >);
242  failure = true;
243 
244  Teuchos::FancyOStream out(Teuchos::rcpFromRef(std::cout));
245  out.setShowProcRank(true);
246  out.setOutputToRootOnly(-1);
247 
248  out << "Not all sides matched expect failure: \n" << e.what() << std::endl;
249  }
250 
251  // Get the ids on the left required by this processor (they maybe ghosted),
252  // and using the matched ids computed above over all processors, find the
253  // corrsponding node on the right boundary.
255 
256  // next line requires communication
257  Teuchos::RCP<std::vector<std::size_t> > locallyRequiredIds = getLocalSideIds(mesh,left,type_);
259  = getGlobalPairing(*locallyRequiredIds,*locallyMatchedIds,mesh,failure);
260 
261  // now you have a pair of ids that maps ids on the left required by this processor
262  // to ids on the right
263 
264  return globallyMatchedIds;
265 }
266 
267 template <typename Matcher>
269 getLocallyMatchedSideIds(const std::vector<std::size_t> & side_ids,
270  const std::vector<Teuchos::Tuple<double,3> > & side_coords,
271  const panzer_stk::STK_Interface & mesh,
272  const std::string & sideName,const Matcher & matcher, std::string type_)
273 {
274  using Teuchos::RCP;
275  using Teuchos::Tuple;
276 
278  = Teuchos::rcp(new std::vector<std::pair<std::size_t,std::size_t> >);
279 
280  // grab local IDs and coordinates on this side
282 
283  std::pair<Teuchos::RCP<std::vector<std::size_t> >,
285  getLocalSideIdsAndCoords(mesh,sideName,type_);
286 
287  std::vector<std::size_t> & local_side_ids = *sidePair.first;
288  std::vector<Teuchos::Tuple<double,3> > & local_side_coords = *sidePair.second;
289 
290  bool checkProb = false;
291  std::vector<bool> side_flags(side_ids.size(),false);
292 
293  // do a slow search for the coordinates: this _can_ be sped
294  // up! (considered searches in sorted ranges using the sorted_permutation function)
296  for(std::size_t localNode=0;localNode<local_side_ids.size();localNode++) {
297  std::size_t local_gid = local_side_ids[localNode];
298  const Tuple<double,3> & local_coord = local_side_coords[localNode];
299 
300  // loop over globally distributed coordinates and find a match
301  for(std::size_t globalNode=0;globalNode<side_ids.size();globalNode++) {
302  std::size_t global_gid = side_ids[globalNode];
303  const Tuple<double,3> & global_coord = side_coords[globalNode];
304 
305  if(matcher(global_coord,local_coord)) {
306  if(side_flags[globalNode]) // has this node been matched by this
307  checkProb = true; // processor?
308 
309  result->push_back(std::make_pair(global_gid,local_gid));
310  side_flags[globalNode] = true;
311  continue;
312  }
313  }
314  }
315 
316  // make sure you matched everything you can: If this throws...it can
317  // cause the process to hang!
318  TEUCHOS_TEST_FOR_EXCEPTION(checkProb,std::logic_error,
319  "getLocallyMatchedSideIds: checkProb failed");
320 
321  TEUCHOS_TEST_FOR_EXCEPTION(local_side_ids.size()!=result->size(),std::logic_error,
322  "getLocallyMatchedSideIds: not all local side ids are satisfied!");
323 
324  return result;
325 }
326 
327 } // end periodic_helpers
328 } // end panzer_stk
Teuchos::RCP< std::vector< std::size_t > > getLocalSideIds(const STK_Interface &mesh, const std::string &sideName, const std::string type_)
Teuchos::RCP< std::vector< std::pair< std::size_t, std::size_t > > > matchPeriodicSides(const std::string &left, const std::string &right, const STK_Interface &mesh, const Matcher &matcher, const std::string type_="coord")
basic_FancyOStream & setShowProcRank(const bool showProcRank)
#define TEUCHOS_TEST_FOR_EXCEPTION(throw_exception_test, Exception, msg)
std::pair< Teuchos::RCP< std::vector< std::size_t > >, Teuchos::RCP< std::vector< Teuchos::Tuple< double, 3 > > > > getLocalSideIdsAndCoords(const STK_Interface &mesh, const std::string &sideName, const std::string type_)
PHX::MDField< ScalarT, panzer::Cell, panzer::IP > result
A field that will be used to build up the result of the integral we&#39;re performing.
TEUCHOS_DEPRECATED RCP< T > rcp(T *p, Dealloc_T dealloc, bool owns_mem)
basic_FancyOStream & setOutputToRootOnly(const int rootRank)
Teuchos::RCP< std::vector< std::pair< std::size_t, std::size_t > > > getGlobalPairing(const std::vector< std::size_t > &locallyRequiredIds, const std::vector< std::pair< std::size_t, std::size_t > > &locallyMatchedIds, const STK_Interface &mesh, bool failure)
std::pair< Teuchos::RCP< std::vector< std::size_t > >, Teuchos::RCP< std::vector< Teuchos::Tuple< double, 3 > > > > getSideIdsAndCoords(const STK_Interface &mesh, const std::string &sideName, const std::string type_)
Teuchos::RCP< std::vector< std::pair< std::size_t, std::size_t > > > getLocallyMatchedSideIds(const std::vector< std::size_t > &side_ids, const std::vector< Teuchos::Tuple< double, 3 > > &side_coords, const STK_Interface &mesh, const std::string &sideName, const Matcher &matcher, const std::string type_="coord")