Panzer  Version of the Day
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
Panzer_STK_PeriodicBC_Matcher_impl.hpp
Go to the documentation of this file.
1 // @HEADER
2 // ***********************************************************************
3 //
4 // Panzer: A partial differential equation assembly
5 // engine for strongly coupled complex multiphysics systems
6 // Copyright (2011) Sandia Corporation
7 //
8 // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
9 // the U.S. Government retains certain rights in this software.
10 //
11 // Redistribution and use in source and binary forms, with or without
12 // modification, are permitted provided that the following conditions are
13 // met:
14 //
15 // 1. Redistributions of source code must retain the above copyright
16 // notice, this list of conditions and the following disclaimer.
17 //
18 // 2. Redistributions in binary form must reproduce the above copyright
19 // notice, this list of conditions and the following disclaimer in the
20 // documentation and/or other materials provided with the distribution.
21 //
22 // 3. Neither the name of the Corporation nor the names of the
23 // contributors may be used to endorse or promote products derived from
24 // this software without specific prior written permission.
25 //
26 // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
27 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
30 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 //
38 // Questions? Contact Roger P. Pawlowski (rppawlo@sandia.gov) and
39 // Eric C. Cyr (eccyr@sandia.gov)
40 // ***********************************************************************
41 // @HEADER
42 
43 #include "Teuchos_Tuple.hpp"
44 #include "Teuchos_RCP.hpp"
45 
46 #include "Panzer_STK_Version.hpp"
47 #include "PanzerAdaptersSTK_config.hpp"
48 #include "Panzer_STK_Interface.hpp"
49 #include "Panzer_STK_Utilities.hpp"
50 
51 #include "Teuchos_FancyOStream.hpp"
52 
53 #include <set>
54 
55 namespace panzer_stk {
56 namespace periodic_helpers {
57 
58 template <typename Matcher>
60 matchPeriodicSides(const std::string & left,const std::string & right,
61  const STK_Interface & mesh,
62  const Matcher & matcher,
63  const std::vector<std::pair<std::size_t,std::size_t> > & ownedToMapped, const std::string type_)
64 {
65  //
66  // Overview:
67  // A three step algorithm
68  // 1. Figure out all nodes and their coordinates that live on the "left"
69  // - Distribute information globally
70  // 2. Match the global nodes on the "left" to locally owned nodes on the "right"
71  // - only local work required
72  // 3. If a processor requires a node on the left (if it is owned or ghosted)
73  // communicate matching conditions from the right boundary
74  //
75  // Note: The matching check could definitely be sped up with a sorting operation
76  // Note: The communication could be done in a way that requires less global communication
77  // Essentially doing step one in a Many-2-Many way as opposed to an All-2-All
78  //
79 
80  using Teuchos::Tuple;
81  using Teuchos::RCP;
82  using Teuchos::rcp;
83 
84  // First step is to globally distribute all the node ids and coordinates
85  // on the left hand side: requires All-2-All!
87  std::pair<RCP<std::vector<std::size_t> >,
89  std::vector<std::size_t> & sideIds = *idsAndCoords.first;
90  std::vector<Tuple<double,3> > & sideCoords = *idsAndCoords.second;
91 
92  // Now using only local operations, find the right hand side nodes owned
93  // by this processor and the matching ones on the left that were previously calculated
96 
97  bool failure = false;
98  try {
99  locallyMatchedIds = getLocallyMatchedSideIds(sideIds,sideCoords,mesh,right,matcher,type_);
100  } catch(std::logic_error & e) {
101  locallyMatchedIds = Teuchos::rcp(new std::vector<std::pair<std::size_t,std::size_t> >);
102  failure = true;
103 
104  Teuchos::FancyOStream out(Teuchos::rcpFromRef(std::cout));
105  out.setShowProcRank(true);
106  out.setOutputToRootOnly(-1);
107 
108  out << "Not all sides matched expect failure: \n" << e.what() << std::endl;
109  }
110 
111  // Get the ids on the left required by this processor (they maybe ghosted),
112  // and using the matched ids computed above over all processors, find the
113  // corrsponding node on the right boundary.
115 
116  // THIS COMPLEX ALGORITHM COMPENSATES FOR THE PREVIOUS PAIRING BY UPDATING
117  // THE REQUEST LIST. THERE ALSO IS A REQUIREMENT OF PER PROC UNIQUENESS IN THE EPETRA
118  // IMPORT. SO THERE IS SOME ADDITIONAL WORK DONE TO REMOVE REPEATED ENTRIES
119 
120  // build reverse map
121  std::map<std::size_t,std::vector<std::size_t> > reverseMap;
122  for(std::size_t i=0;i<ownedToMapped.size();i++)
123  reverseMap[ownedToMapped[i].second].push_back(ownedToMapped[i].first);
124 
125  Teuchos::RCP<std::vector<std::size_t> > locallyRequiredIds = getLocalSideIds(mesh,left,type_);
126  std::vector<std::size_t> saved_locallyRequiredIds = *locallyRequiredIds; // will be required
127  // to check owner/ghostship
128  // of IDs
129  std::vector<std::pair<std::size_t,std::size_t> > unusedOwnedToMapped;
130 
131  // apply previous mappings to this set of local IDs
132  for(std::size_t i=0;i<ownedToMapped.size();i++) {
133  std::size_t owned = ownedToMapped[i].first;
134  std::size_t mapped = ownedToMapped[i].second;
135 
136  std::vector<std::size_t>::iterator itr
137  = std::find(locallyRequiredIds->begin(),locallyRequiredIds->end(),owned);
138 
139  if(itr!=locallyRequiredIds->end())
140  *itr = mapped;
141  else
142  unusedOwnedToMapped.push_back(ownedToMapped[i]);
143  }
144 
145  // build a unique vector of locally matched IDs
146  std::vector<std::size_t> unique_locallyRequiredIds;
147  {
148  std::set<std::size_t> s;
149  s.insert(locallyRequiredIds->begin(),locallyRequiredIds->end());
150  unique_locallyRequiredIds.insert(unique_locallyRequiredIds.begin(),s.begin(),s.end());
151  }
152 
153  // next line requires communication
155  = getGlobalPairing(unique_locallyRequiredIds,*locallyMatchedIds,mesh,failure);
156 
157  // this nasty bit of code gurantees that only owned/ghosted IDs will
158  // end up in the globallyMatchedIds vector. It uses a search on
159  // only locally owned IDs to make sure that they are locally owned.
160  // this is a result (and fix) for some of the complexity incurred by
161  // the reverseMap above.
162  {
163  // fill up set with current globally matched ids (not neccessarily owned/ghosted)
164  std::set<std::pair<std::size_t,std::size_t> > gmi_set;
165  gmi_set.insert(globallyMatchedIds->begin(),globallyMatchedIds->end());
166 
167  // for each globally matched ID, update IDs from the previous
168  // run (i.e. from ownedToMapped) using the reverseMap
169  for(std::size_t i=0;i<globallyMatchedIds->size();i++) {
170  std::pair<std::size_t,std::size_t> pair = (*globallyMatchedIds)[i];
171  const std::vector<std::size_t> & others = reverseMap[pair.first];
172 
173  // add in reverse map (note other[j] is guranteed to be local to this processor
174  // if it was when ownedToMapped was passed in)
175  for(std::size_t j=0;j<others.size();j++)
176  gmi_set.insert(std::make_pair(others[j],pair.second));
177 
178  // remove ids that are not ghosted/owned by this processor
179  if(std::find(saved_locallyRequiredIds.begin(),
180  saved_locallyRequiredIds.end(),
181  pair.first)==saved_locallyRequiredIds.end()) {
182  gmi_set.erase(pair);
183  }
184  }
185 
186  // clear old data, and populate with new matched ids
187  globallyMatchedIds->clear();
188  globallyMatchedIds->insert(globallyMatchedIds->begin(),gmi_set.begin(),gmi_set.end());
189  }
190 
191  // now you have a pair of ids that maps ids on the left required by this processor
192  // to ids on the right
193  globallyMatchedIds->insert(globallyMatchedIds->end(),unusedOwnedToMapped.begin(),unusedOwnedToMapped.end());
194 
195  return globallyMatchedIds;
196 }
197 
198 template <typename Matcher>
200 matchPeriodicSides(const std::string & left,const std::string & right,
201  const STK_Interface & mesh,
202  const Matcher & matcher, const std::string type_)
203 {
204  //
205  // Overview:
206  // A three step algorithm
207  // 1. Figure out all nodes and their coordinates that live on the "left"
208  // - Distribute information globally
209  // 2. Match the global nodes on the "left" to locally owned nodes on the "right"
210  // - only local work required
211  // 3. If a processor requires a node on the left (if it is owned or ghosted)
212  // communicate matching conditions from the right boundary
213  //
214  // Note: The matching check could definitely be spead up with a sorting operation
215  // Note: The communication could be done in a way that requires less global communication
216  // Essentially doing step one in a Many-2-Many way as opposed to an All-2-All
217  //
218 
219  using Teuchos::Tuple;
220  using Teuchos::RCP;
221  using Teuchos::rcp;
222 
223  // First step is to globally distribute all the node ids and coordinates
224  // on the left hand side: requires All-2-All!
226  std::pair<RCP<std::vector<std::size_t> >,
228  std::vector<std::size_t> & sideIds = *idsAndCoords.first;
229  std::vector<Tuple<double,3> > & sideCoords = *idsAndCoords.second;
230 
231  // Now using only local operations, find the right hand side nodes owned
232  // by this processor and the matching ones on the left that were previously calculated
235 
236  bool failure = false;
237  try {
238  locallyMatchedIds = getLocallyMatchedSideIds(sideIds,sideCoords,mesh,right,matcher,type_);
239  } catch(std::logic_error & e) {
240  locallyMatchedIds = Teuchos::rcp(new std::vector<std::pair<std::size_t,std::size_t> >);
241  failure = true;
242 
243  Teuchos::FancyOStream out(Teuchos::rcpFromRef(std::cout));
244  out.setShowProcRank(true);
245  out.setOutputToRootOnly(-1);
246 
247  out << "Not all sides matched expect failure: \n" << e.what() << std::endl;
248  }
249 
250  // Get the ids on the left required by this processor (they maybe ghosted),
251  // and using the matched ids computed above over all processors, find the
252  // corrsponding node on the right boundary.
254 
255  // next line requires communication
256  Teuchos::RCP<std::vector<std::size_t> > locallyRequiredIds = getLocalSideIds(mesh,left,type_);
258  = getGlobalPairing(*locallyRequiredIds,*locallyMatchedIds,mesh,failure);
259 
260  // now you have a pair of ids that maps ids on the left required by this processor
261  // to ids on the right
262 
263  return globallyMatchedIds;
264 }
265 
266 template <typename Matcher>
268 getLocallyMatchedSideIds(const std::vector<std::size_t> & side_ids,
269  const std::vector<Teuchos::Tuple<double,3> > & side_coords,
270  const panzer_stk::STK_Interface & mesh,
271  const std::string & sideName,const Matcher & matcher, std::string type_)
272 {
273  using Teuchos::RCP;
274  using Teuchos::Tuple;
275 
277  = Teuchos::rcp(new std::vector<std::pair<std::size_t,std::size_t> >);
278 
279  // grab local IDs and coordinates on this side
281 
282  std::pair<Teuchos::RCP<std::vector<std::size_t> >,
284  getLocalSideIdsAndCoords(mesh,sideName,type_);
285 
286  std::vector<std::size_t> & local_side_ids = *sidePair.first;
287  std::vector<Teuchos::Tuple<double,3> > & local_side_coords = *sidePair.second;
288 
289  bool checkProb = false;
290  std::vector<bool> side_flags(side_ids.size(),false);
291 
292  // do a slow search for the coordinates: this _can_ be sped
293  // up! (considered searches in sorted ranges using the sorted_permutation function)
295  for(std::size_t localNode=0;localNode<local_side_ids.size();localNode++) {
296  std::size_t local_gid = local_side_ids[localNode];
297  const Tuple<double,3> & local_coord = local_side_coords[localNode];
298 
299  // loop over globally distributed coordinates and find a match
300  for(std::size_t globalNode=0;globalNode<side_ids.size();globalNode++) {
301  std::size_t global_gid = side_ids[globalNode];
302  const Tuple<double,3> & global_coord = side_coords[globalNode];
303 
304  if(matcher(global_coord,local_coord)) {
305  if(side_flags[globalNode]) // has this node been matched by this
306  checkProb = true; // processor?
307 
308  result->push_back(std::make_pair(global_gid,local_gid));
309  side_flags[globalNode] = true;
310  continue;
311  }
312  }
313  }
314 
315  // make sure you matched everything you can: If this throws...it can
316  // cause the process to hang!
317  TEUCHOS_TEST_FOR_EXCEPTION(checkProb,std::logic_error,
318  "getLocallyMatchedSideIds: checkProb failed");
319 
320  TEUCHOS_TEST_FOR_EXCEPTION(local_side_ids.size()!=result->size(),std::logic_error,
321  "getLocallyMatchedSideIds: not all local side ids are satisfied!");
322 
323  return result;
324 }
325 
326 } // end periodic_helpers
327 } // end panzer_stk
Teuchos::RCP< std::vector< std::size_t > > getLocalSideIds(const STK_Interface &mesh, const std::string &sideName, const std::string type_)
Teuchos::RCP< std::vector< std::pair< std::size_t, std::size_t > > > matchPeriodicSides(const std::string &left, const std::string &right, const STK_Interface &mesh, const Matcher &matcher, const std::string type_="coord")
basic_FancyOStream & setShowProcRank(const bool showProcRank)
#define TEUCHOS_TEST_FOR_EXCEPTION(throw_exception_test, Exception, msg)
std::pair< Teuchos::RCP< std::vector< std::size_t > >, Teuchos::RCP< std::vector< Teuchos::Tuple< double, 3 > > > > getLocalSideIdsAndCoords(const STK_Interface &mesh, const std::string &sideName, const std::string type_)
PHX::MDField< ScalarT, panzer::Cell, panzer::IP > result
A field that will be used to build up the result of the integral we&#39;re performing.
TEUCHOS_DEPRECATED RCP< T > rcp(T *p, Dealloc_T dealloc, bool owns_mem)
basic_FancyOStream & setOutputToRootOnly(const int rootRank)
Teuchos::RCP< std::vector< std::pair< std::size_t, std::size_t > > > getGlobalPairing(const std::vector< std::size_t > &locallyRequiredIds, const std::vector< std::pair< std::size_t, std::size_t > > &locallyMatchedIds, const STK_Interface &mesh, bool failure)
std::pair< Teuchos::RCP< std::vector< std::size_t > >, Teuchos::RCP< std::vector< Teuchos::Tuple< double, 3 > > > > getSideIdsAndCoords(const STK_Interface &mesh, const std::string &sideName, const std::string type_)
Teuchos::RCP< std::vector< std::pair< std::size_t, std::size_t > > > getLocallyMatchedSideIds(const std::vector< std::size_t > &side_ids, const std::vector< Teuchos::Tuple< double, 3 > > &side_coords, const STK_Interface &mesh, const std::string &sideName, const Matcher &matcher, const std::string type_="coord")