Panzer  Version of the Day
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
Panzer_IntegrationValues2.cpp
Go to the documentation of this file.
1 // @HEADER
2 // ***********************************************************************
3 //
4 // Panzer: A partial differential equation assembly
5 // engine for strongly coupled complex multiphysics systems
6 // Copyright (2011) Sandia Corporation
7 //
8 // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
9 // the U.S. Government retains certain rights in this software.
10 //
11 // Redistribution and use in source and binary forms, with or without
12 // modification, are permitted provided that the following conditions are
13 // met:
14 //
15 // 1. Redistributions of source code must retain the above copyright
16 // notice, this list of conditions and the following disclaimer.
17 //
18 // 2. Redistributions in binary form must reproduce the above copyright
19 // notice, this list of conditions and the following disclaimer in the
20 // documentation and/or other materials provided with the distribution.
21 //
22 // 3. Neither the name of the Corporation nor the names of the
23 // contributors may be used to endorse or promote products derived from
24 // this software without specific prior written permission.
25 //
26 // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
27 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
30 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 //
38 // Questions? Contact Roger P. Pawlowski (rppawlo@sandia.gov) and
39 // Eric C. Cyr (eccyr@sandia.gov)
40 // ***********************************************************************
41 // @HEADER
42 
44 #include "Panzer_UtilityAlgs.hpp"
45 
46 #include "Shards_CellTopology.hpp"
47 
48 #include "Kokkos_DynRankView.hpp"
49 #include "Intrepid2_FunctionSpaceTools.hpp"
50 #include "Intrepid2_RealSpaceTools.hpp"
51 #include "Intrepid2_CellTools.hpp"
52 #include "Intrepid2_ArrayTools.hpp"
53 #include "Intrepid2_CubatureControlVolume.hpp"
54 #include "Intrepid2_CubatureControlVolumeSide.hpp"
55 #include "Intrepid2_CubatureControlVolumeBoundary.hpp"
56 
58 #include "Panzer_Traits.hpp"
61 
62 // FIXME: There are some calls in Intrepid2 that require non-const arrays when they should be const - search for PHX::getNonConstDynRankViewFromConstMDField
63 #include "Phalanx_GetNonConstDynRankViewFromConstMDField.hpp"
64 
65 namespace panzer {
66 
67 namespace {
68 
70 getIntrepidCubature(const panzer::IntegrationRule & ir)
71 {
74 
75  Intrepid2::DefaultCubatureFactory cubature_factory;
76 
77  if(ir.getType() == ID::CV_SIDE){
78  ic = Teuchos::rcp(new Intrepid2::CubatureControlVolumeSide<PHX::Device::execution_space,double,double>(*ir.topology));
79  } else if(ir.getType() == ID::CV_VOLUME){
80  ic = Teuchos::rcp(new Intrepid2::CubatureControlVolume<PHX::Device::execution_space,double,double>(*ir.topology));
81  } else if(ir.getType() == ID::CV_BOUNDARY){
82  TEUCHOS_ASSERT(ir.isSide());
83  ic = Teuchos::rcp(new Intrepid2::CubatureControlVolumeBoundary<PHX::Device::execution_space,double,double>(*ir.topology,ir.getSide()));
84  } else if(ir.getType() == ID::VOLUME){
85  ic = cubature_factory.create<PHX::Device::execution_space,double,double>(*(ir.topology),ir.getOrder());
86  } else if(ir.getType() == ID::SIDE){
87  ic = cubature_factory.create<PHX::Device::execution_space,double,double>(*(ir.side_topology),ir.getOrder());
88  } else if(ir.getType() == ID::SURFACE){
89  // closed surface integrals don't exist in intrepid.
90  } else {
91  TEUCHOS_ASSERT(false);
92  }
93 
94  return ic;
95 }
96 
97 template<typename Scalar>
98 void
99 correctVirtualNormals(PHX::MDField<Scalar,Cell,IP,Dim> normals,
100  const int num_real_cells,
101  const int num_virtual_cells,
102  const shards::CellTopology & cell_topology,
103  const SubcellConnectivity & face_connectivity)
104 {
105  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::integrationValues2::correctVirtualNormals()",corr_virt_norms);
106 
107  // What we want is for the adjoining face of the virtual cell to have normals that are the negated real cell's normals.
108  // we correct the normals here:
109  const int space_dim = cell_topology.getDimension();
110  const int faces_per_cell = cell_topology.getSubcellCount(space_dim-1);
111  const int points = normals.extent_int(1);
112  const int points_per_face = points / faces_per_cell;
113 
114  Kokkos::parallel_for(num_virtual_cells, KOKKOS_LAMBDA(const int & virtual_cell_ordinal){
115 
116  const int virtual_cell = virtual_cell_ordinal+num_real_cells;
117 
118  // Find the face and local face ids for the given virtual cell
119  // Note that virtual cells only connect to the owned cells through a single face
120  int face, virtual_lidx;
121  for (int local_face_id=0; local_face_id<faces_per_cell; local_face_id++){
122  // Faces that exist have positive indexes
123  face = face_connectivity.subcellForCell(virtual_cell, local_face_id);
124  if (face >= 0){
125  virtual_lidx = local_face_id;
126  break;
127  }
128  }
129 
130  // Indexes for real cell
131  const int real_side = (face_connectivity.cellForSubcell(face, 0) == virtual_cell) ? 1 : 0;
132  const int real_cell = face_connectivity.cellForSubcell(face,real_side);
133  const int real_lidx = face_connectivity.localSubcellForSubcell(face,real_side);
134 
135  // Make sure it is a real cell (it should actually be an owned cell)
136  KOKKOS_ASSERT(real_cell < num_real_cells);
137 
138  for(int point_ordinal=0; point_ordinal<points_per_face; point_ordinal++){
139 
140  // Point indexes for virtual and real point on face
141  const int virtual_cell_point = points_per_face * virtual_lidx + point_ordinal;
142  const int real_cell_point = points_per_face * real_lidx + point_ordinal;
143 
144  for (int d=0; d<space_dim; d++)
145  normals(virtual_cell,virtual_cell_point,d) = -normals(real_cell,real_cell_point,d);
146 
147  }
148 
149  // Clear other normals
150  for (int local_face_id=0; local_face_id<faces_per_cell; local_face_id++){
151 
152  if (local_face_id == virtual_lidx) continue;
153 
154  for (int point_ordinal=0; point_ordinal<points_per_face; point_ordinal++){
155  const int point = local_face_id * points_per_face + point_ordinal;
156  for (int dim=0; dim<space_dim; dim++)
157  normals(virtual_cell,point,dim) = 0.0;
158  }
159  }
160  });
161  PHX::Device::execution_space().fence();
162 }
163 
164 
165 template<typename Scalar>
166 void
167 correctVirtualRotationMatrices(PHX::MDField<Scalar,Cell,IP,Dim,Dim> rotation_matrices,
168  const int num_real_cells,
169  const int num_virtual_cells,
170  const shards::CellTopology & cell_topology,
171  const SubcellConnectivity & face_connectivity)
172 {
173  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::integrationValues2::correctVirtualRotationMatrices()",corr_virt_rotmat);
174 
175  // What we want is for the adjoining face of the virtual cell to have normals that are the negated real cell's normals.
176  // we correct the normals here:
177  const int space_dim = cell_topology.getDimension();
178  const int faces_per_cell = cell_topology.getSubcellCount(space_dim-1);
179  const int points = rotation_matrices.extent_int(1);
180  const int points_per_face = points / faces_per_cell;
181 
182  Kokkos::parallel_for(num_virtual_cells, KOKKOS_LAMBDA(const int & virtual_cell_ordinal){
183 
184  const int virtual_cell = virtual_cell_ordinal+num_real_cells;
185 
186  // Find the face and local face ids for the given virtual cell
187  // Note that virtual cells only connect to the owned cells through a single face
188  int face, virtual_lidx;
189  for (int local_face_id=0; local_face_id<faces_per_cell; local_face_id++){
190  // Faces that exist have positive indexes
191  face = face_connectivity.subcellForCell(virtual_cell, local_face_id);
192  if (face >= 0){
193  virtual_lidx = local_face_id;
194  break;
195  }
196  }
197 
198  // The normals already have the correction applied, so we just need to zero out the rotation matrices on the other faces
199 
200  // Clear other rotation matrices
201  for (int local_face_id=0; local_face_id<faces_per_cell; local_face_id++){
202 
203  if (local_face_id == virtual_lidx) continue;
204 
205  for (int point_ordinal=0; point_ordinal<points_per_face; point_ordinal++){
206  const int point = local_face_id * points_per_face + point_ordinal;
207  for (int dim=0; dim<3; dim++)
208  for (int dim2=0; dim2<3; dim2++)
209  rotation_matrices(virtual_cell,point,dim,dim2) = 0.0;
210  }
211  }
212  });
213  PHX::Device::execution_space().fence();
214 }
215 
216 template<typename Scalar>
217 void
218 applyBasePermutation(PHX::MDField<Scalar,IP> field,
219  PHX::MDField<const int,Cell,IP> permutations)
220 {
221  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::integrationValues2::applyBasePermutation(rank 1)",app_base_perm_r1);
222  MDFieldArrayFactory af("",true);
223 
224  const int num_ip = field.extent(0);
225 
226  auto scratch = af.template buildStaticArray<Scalar,IP>("scratch", num_ip);
227  scratch.deep_copy(field);
228  Kokkos::parallel_for(1, KOKKOS_LAMBDA(const int & ){
229  for(int ip=0; ip<num_ip; ++ip)
230  if (ip != permutations(0,ip))
231  field(ip) = scratch(permutations(0,ip));
232  });
233  PHX::Device::execution_space().fence();
234 }
235 
236 template<typename Scalar>
237 void
238 applyBasePermutation(PHX::MDField<Scalar,IP,Dim> field,
239  PHX::MDField<const int,Cell,IP> permutations)
240 {
241  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::integrationValues2::applyBasePermutation(rank 2)",app_base_perm_r2);
242  MDFieldArrayFactory af("",true);
243 
244  const int num_ip = field.extent(0);
245  const int num_dim = field.extent(1);
246 
247  auto scratch = af.template buildStaticArray<Scalar,IP,Dim>("scratch", num_ip,num_dim);
248  scratch.deep_copy(field);
249  Kokkos::parallel_for(1, KOKKOS_LAMBDA(const int & ){
250  for(int ip=0; ip<num_ip; ++ip)
251  if (ip != permutations(0,ip))
252  for(int dim=0; dim<num_dim; ++dim)
253  field(ip,dim) = scratch(permutations(0,ip),dim);
254  });
255  PHX::Device::execution_space().fence();
256 }
257 
258 template<typename Scalar>
259 void
260 applyPermutation(PHX::MDField<Scalar,Cell,IP> field,
261  PHX::MDField<const int,Cell,IP> permutations)
262 {
263  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::integrationValues2::applyPermutation(rank 2)",app_perm_r2);
264  MDFieldArrayFactory af("",true);
265 
266  const int num_cells = field.extent(0);
267  const int num_ip = field.extent(1);
268 
269  auto scratch = af.template buildStaticArray<Scalar,Cell,IP>("scratch", num_cells, num_ip);
270  scratch.deep_copy(field);
271  Kokkos::parallel_for(num_cells, KOKKOS_LAMBDA(const int & cell){
272  for(int ip=0; ip<num_ip; ++ip)
273  if (ip != permutations(cell,ip))
274  field(cell,ip) = scratch(cell,permutations(cell,ip));
275  });
276  PHX::Device::execution_space().fence();
277 }
278 
279 template<typename Scalar>
280 void
281 applyPermutation(PHX::MDField<Scalar,Cell,IP,Dim> field,
282  PHX::MDField<const int,Cell,IP> permutations)
283 {
284  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::integrationValues2::applyPermutation(rank 3)",app_perm_r3);
285  MDFieldArrayFactory af("",true);
286 
287  const int num_cells = field.extent(0);
288  const int num_ip = field.extent(1);
289  const int num_dim = field.extent(2);
290 
291  auto scratch = af.template buildStaticArray<Scalar,Cell,IP,Dim>("scratch", num_cells, num_ip, num_dim);
292  scratch.deep_copy(field);
293  Kokkos::parallel_for(num_cells, KOKKOS_LAMBDA(const int & cell){
294  for(int ip=0; ip<num_ip; ++ip)
295  if (ip != permutations(cell,ip))
296  for(int dim=0; dim<num_dim; ++dim)
297  field(cell,ip,dim) = scratch(cell,permutations(cell,ip),dim);
298  });
299  PHX::Device::execution_space().fence();
300 }
301 
302 template<typename Scalar>
303 void
304 applyPermutation(PHX::MDField<Scalar,Cell,IP,Dim,Dim> field,
305  PHX::MDField<const int,Cell,IP> permutations)
306 {
307  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::integrationValues2::applyPermutation(rank 4)",app_perm_r4);
308  MDFieldArrayFactory af("",true);
309 
310  const int num_cells = field.extent(0);
311  const int num_ip = field.extent(1);
312  const int num_dim = field.extent(2);
313  const int num_dim2 = field.extent(3);
314 
315  auto scratch = af.template buildStaticArray<Scalar,Cell,IP,Dim,Dim>("scratch", num_cells, num_ip, num_dim, num_dim2);
316  scratch.deep_copy(field);
317  Kokkos::parallel_for(num_cells, KOKKOS_LAMBDA(const int & cell){
318  for(int ip=0; ip<num_ip; ++ip)
319  if (ip != permutations(cell,ip))
320  for(int dim=0; dim<num_dim; ++dim)
321  for(int dim2=0; dim2<num_dim2; ++dim2)
322  field(cell,ip,dim,dim2) = scratch(cell,permutations(cell,ip),dim,dim2);
323  });
324  PHX::Device::execution_space().fence();
325 }
326 
327 
328 // Find the permutation that maps the set of points coords to other_coords. To
329 // avoid possible finite precision issues, == is not used, but rather
330 // min(norm(.)).
331 template <typename Scalar>
333 generatePermutations(const int num_cells,
336 {
337  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::integrationValues2::generatePermutations()",gen_perms);
338 
339  const int num_ip = coords.extent(1);
340  const int num_dim = coords.extent(2);
341 
342  MDFieldArrayFactory af("",true);
343 
344  // This will store the permutations to go from coords to other_coords
345  auto permutation = af.template buildStaticArray<int,Cell,IP>("permutation", num_cells, num_ip);
346  permutation.deep_copy(0);
347 
348  // This is scratch space - there is likely a better way to do this
349  auto taken = af.template buildStaticArray<int,Cell,IP>("taken", num_cells, num_ip);
350  taken.deep_copy(0);
351 
352  Kokkos::parallel_for(num_cells, KOKKOS_LAMBDA(const int & cell){
353 
354  for (int ip = 0; ip < num_ip; ++ip) {
355  // Find an other point to associate with ip.
356  int i_min = 0;
357  Scalar d_min = -1;
358  for (int other_ip = 0; other_ip < num_ip; ++other_ip) {
359  // For speed, skip other points that are already associated.
360  if(taken(cell,other_ip)) continue;
361  // Compute the distance between the two points.
362  Scalar d(0);
363  for (int dim = 0; dim < num_dim; ++dim) {
364  const Scalar diff = coords(cell, ip, dim) - other_coords(cell, other_ip, dim);
365  d += diff*diff;
366  }
367  if (d_min < 0 || d < d_min) {
368  d_min = d;
369  i_min = other_ip;
370  }
371  }
372  // Record the match.
373  permutation(cell,ip) = i_min;
374  // This point is now taken.
375  taken(cell,i_min) = 1;
376  }
377  });
378  PHX::Device::execution_space().fence();
379 
380  return permutation;
381 
382 }
383 
384 template <typename Scalar>
386 generateSurfacePermutations(const int num_cells,
387  const SubcellConnectivity face_connectivity,
389  PHX::MDField<const Scalar,Cell,IP,Dim,Dim> surface_rotation_matrices)
390 
391 {
392  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::integrationValues2::generateSurfacePermutations()",gen_surf_perms);
393 
394  // The challenge for this call is handling wedge-based periodic boundaries
395  // We need to make sure that we can align points along faces that are rotated with respect to one another.
396  // Below we will see an algorithm that rotates two boundaries into a shared frame, then figures out
397  // how to permute the points on one face to line up with the other.
398 
399  const int num_points = surface_points.extent_int(1);
400  const int num_faces_per_cell = face_connectivity.numSubcellsOnCellHost(0);
401  const int num_points_per_face = num_points / num_faces_per_cell;
402  const int cell_dim = surface_points.extent(2);
403 
404  MDFieldArrayFactory af("",true);
405 
406  // This will store the permutations
407  auto permutation = af.template buildStaticArray<int,Cell,IP>("permutation", num_cells, num_points);
408  permutation.deep_copy(0);
409 
410  // Fill permutations with trivial values (i.e. no permutation - this will get overwritten for some cells)
411  Kokkos::parallel_for(num_cells,KOKKOS_LAMBDA (const int & cell) {
412  for(int point=0; point<num_points; ++point)
413  permutation(cell,point) = point;
414  });
415 
416  // Now we need to align the cubature points for each face
417  // If there is only one point there is no need to align things
418  if(num_points_per_face != 1) {
419  // To enforce that quadrature points on faces are aligned properly we will iterate through faces,
420  // map points to a plane shared by the faces, then re-order quadrature points on the "1" face to
421  // line up with the "0" face
422 
423  // Utility calls
424 #define PANZER_DOT(a,b) (a[0]*b[0] + a[1]*b[1] + a[2]*b[2])
425 #define PANZER_CROSS(a,b,c) {c[0] = a[1]*b[2] - a[2]*b[1]; c[1] = a[2]*b[0] - a[0]*b[2]; c[2] = a[0]*b[1] - a[1]*b[0];}
426 
427  // Iterate through faces
428  Kokkos::parallel_for("face iteration",face_connectivity.numSubcells(),KOKKOS_LAMBDA (const int face) {
429  // Cells for sides 0 and 1
430  const int cell_0 = face_connectivity.cellForSubcell(face,0);
431  const int cell_1 = face_connectivity.cellForSubcell(face,1);
432 
433  // If this face doesn't connect to anything we don't need to worry about alignment
434  if(cell_1 < 0)
435  return;
436 
437  // Local face index for sides 0 and 1
438  const int lidx_0 = face_connectivity.localSubcellForSubcell(face,0);
439  const int lidx_1 = face_connectivity.localSubcellForSubcell(face,1);
440 
441  // If the cell exists, then the local face index needs to exist
442  KOKKOS_ASSERT(lidx_1 >= 0);
443 
444  // To compare points on planes, it is good to center the planes around the origin
445  // Find the face center for the left and right cell (may not be the same point - e.g. periodic conditions)
446  // We also define a length scale r2 which gives an order of magnitude approximation to the cell size squared
447  Scalar xc0[3] = {0}, xc1[3] = {0}, r2 = 0;
448  for(int face_point=0; face_point<num_points_per_face; ++face_point){
449  Scalar dx2 = 0.;
450  for(int dim=0; dim<cell_dim; ++dim){
451  xc0[dim] += surface_points(cell_0,lidx_0*num_points_per_face + face_point,dim);
452  xc1[dim] += surface_points(cell_1,lidx_1*num_points_per_face + face_point,dim);
453  const Scalar dx = surface_points(cell_0,lidx_0*num_points_per_face + face_point,dim) - surface_points(cell_0,lidx_0*num_points_per_face,dim);
454  dx2 += dx*dx;
455  }
456 
457  // Check if the distance squared between these two points is larger than the others (doesn't need to be perfect)
458  r2 = (r2 < dx2) ? dx2 : r2;
459  }
460  for(int dim=0; dim<cell_dim; ++dim){
461  xc0[dim] /= double(num_points_per_face);
462  xc1[dim] /= double(num_points_per_face);
463  }
464 
465  // TODO: This needs to be adaptable to having curved faces - for now this will work
466 
467  // We need to define a plane with two vectors (transverse and binormal)
468  // These will be used with the face centers to construct a local reference frame for aligning points
469 
470  // We use the first point on the face to define the normal (may break for curved faces)
471  const int example_point_0 = lidx_0*num_points_per_face;
472  const int example_point_1 = lidx_1*num_points_per_face;
473 
474  // Load the transverse and binormal for the 0 cell (default)
475  Scalar t[3] = {surface_rotation_matrices(cell_0,example_point_0,1,0), surface_rotation_matrices(cell_0,example_point_0,1,1), surface_rotation_matrices(cell_0,example_point_0,1,2)};
476  Scalar b[3] = {surface_rotation_matrices(cell_0,example_point_0,2,0), surface_rotation_matrices(cell_0,example_point_0,2,1), surface_rotation_matrices(cell_0,example_point_0,2,2)};
477 
478  // In case the faces are not antiparallel (e.g. periodic wedge), we may need to change the transverse and binormal
479  {
480 
481  // Get the normals for each face for constructing one of the plane vectors (transverse)
482  const Scalar n0[3] = {surface_rotation_matrices(cell_0,example_point_0,0,0), surface_rotation_matrices(cell_0,example_point_0,0,1), surface_rotation_matrices(cell_0,example_point_0,0,2)};
483  const Scalar n1[3] = {surface_rotation_matrices(cell_1,example_point_1,0,0), surface_rotation_matrices(cell_1,example_point_1,0,1), surface_rotation_matrices(cell_1,example_point_1,0,2)};
484 
485  // n_0*n_1 == -1 (antiparallel), n_0*n_1 == 1 (parallel - bad), |n_0*n_1| < 1 (other)
486  const Scalar n0_dot_n1 = PANZER_DOT(n0,n1);
487 
488  // FIXME: Currently virtual cells will set their surface normal along the same direction as the cell they "reflect"
489  // This causes a host of issues (e.g. identifying 180 degree periodic wedges), but we have to support virtual cells as a priority
490  // Therefore, we will just assume that the ordering is fine (not valid for 180 degree periodic wedges)
491  if(Kokkos::fabs(n0_dot_n1 - 1.) < 1.e-8)
492  return;
493 
494  // Rotated faces case (e.g. periodic wedge) we need to check for non-antiparallel face normals
495  if(Kokkos::fabs(n0_dot_n1 + 1.) > 1.e-2){
496 
497  // Now we need to define an arbitrary transverse and binormal in the plane across which the faces are anti-symmetric
498  // We can do this by setting t = n_0 \times n_1
499  PANZER_CROSS(n0,n1,t);
500 
501  // Normalize the transverse vector
502  const Scalar mag_t = Kokkos::sqrt(PANZER_DOT(t,t));
503  t[0] /= mag_t;
504  t[1] /= mag_t;
505  t[2] /= mag_t;
506 
507  // The binormal will be the sum of the normals (does not need to be a right handed system)
508  b[0] = n0[0] + n1[0];
509  b[1] = n0[1] + n1[1];
510  b[2] = n0[2] + n1[2];
511 
512  // Normalize the binormal vector
513  const Scalar mag_b = Kokkos::sqrt(PANZER_DOT(b,b));
514  b[0] /= mag_b;
515  b[1] /= mag_b;
516  b[2] /= mag_b;
517 
518  }
519  }
520 
521  // Now that we have a reference coordinate plane in which to align our points
522  // Point on the transverse/binormal plane
523  Scalar p0[2] = {0};
524  Scalar p1[2] = {0};
525 
526  // Differential position in mesh
527  Scalar x0[3] = {0};
528  Scalar x1[3] = {0};
529 
530  // Iterate through points in cell 1 and find which point they align with in cell 0
531  for(int face_point_1=0; face_point_1<num_points_per_face; ++face_point_1){
532 
533  // Get the point index in the 1 cell
534  const int point_1 = lidx_1*num_points_per_face + face_point_1;
535 
536  // Load position shifted by face center
537  for(int dim=0; dim<cell_dim; ++dim)
538  x1[dim] = surface_points(cell_1,point_1,dim) - xc1[dim];
539 
540  // Convert position to transverse/binormal plane
541  p1[0] = PANZER_DOT(x1,t);
542  p1[1] = PANZER_DOT(x1,b);
543 
544  // Compare to points on the other surface
545  for(int face_point_0=0; face_point_0<num_points_per_face; ++face_point_0){
546 
547  // Get the point index in the 0 cell
548  const int point_0 = lidx_0*num_points_per_face + face_point_0;
549 
550  // Load position shifted by face center
551  for(int dim=0; dim<cell_dim; ++dim)
552  x0[dim] = surface_points(cell_0,point_0,dim) - xc0[dim];
553 
554  // Convert position to transverse/binormal plane
555  p0[0] = PANZER_DOT(x0,t);
556  p0[1] = PANZER_DOT(x0,b);
557 
558  // Find the distance squared between p0 and p1
559  const Scalar p012 = (p0[0]-p1[0])*(p0[0]-p1[0]) + (p0[1]-p1[1])*(p0[1]-p1[1]);
560 
561  // TODO: Should this be a constant value, or should we just find the minimum point?
562  // If the distance, compared to the size of the cell, is small, we assume these are the same points
563  if(p012 / r2 < 1.e-12){
564  permutation(cell_1,lidx_1*num_points_per_face+face_point_0) = point_1;
565  break;
566  }
567 
568  }
569  }
570  });
571  PHX::Device::execution_space().fence();
572  }
573 
574 #undef PANZER_DOT
575 #undef PANZER_CROSS
576 
577  return permutation;
578 }
579 
580 } // end anonymous namespace
581 
582 //template<typename DataType>
583 //using UnmanagedDynRankView = Kokkos::DynRankView<DataType,typename PHX::DevLayout<DataType>::type,PHX::Device,Kokkos::MemoryTraits<Kokkos::Unmanaged>>;
584 
585 template <typename Scalar>
587 IntegrationValues2(const std::string & pre,
588  const bool allocArrays):
589  num_cells_(0),
590  num_evaluate_cells_(0),
591  num_virtual_cells_(-1),
592  requires_permutation_(false),
593  alloc_arrays_(allocArrays),
594  prefix_(pre),
595  ddims_(1,0)
596 {
597  resetArrays();
598 }
599 
600 template <typename Scalar>
601 void
604 {
605  cub_points_evaluated_ = false;
606  side_cub_points_evaluated_ = false;
607  cub_weights_evaluated_ = false;
608  node_coordinates_evaluated_ = false;
609  jac_evaluated_ = false;
610  jac_inv_evaluated_ = false;
611  jac_det_evaluated_ = false;
612  weighted_measure_evaluated_ = false;
613  weighted_normals_evaluated_ = false;
614  surface_normals_evaluated_ = false;
615  surface_rotation_matrices_evaluated_ = false;
616  covarient_evaluated_ = false;
617  contravarient_evaluated_ = false;
618  norm_contravarient_evaluated_ = false;
619  ip_coordinates_evaluated_ = false;
620  ref_ip_coordinates_evaluated_ = false;
621 
622  // TODO: We need to clear the views
623 }
624 
625 template <typename Scalar>
628 {
629  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::integrationValues2::setupArrays()",setup_arrays);
630 
631  MDFieldArrayFactory af(prefix_,alloc_arrays_);
632 
634 
635  int_rule = ir;
636 
637  const int num_nodes = ir->topology->getNodeCount();
638  const int num_cells = ir->workset_size;
639  const int num_space_dim = ir->topology->getDimension();
640 
641  // Specialize content if this is quadrature at a node
642  const bool is_node_rule = (num_space_dim==1) and ir->isSide();
643  if(not is_node_rule) {
644  TEUCHOS_ASSERT(ir->getType() != ID::NONE);
645  intrepid_cubature = getIntrepidCubature(*ir);
646  }
647 
648  const int num_ip = is_node_rule ? 1 : ir->num_points;
649 
650  cub_points = af.template buildStaticArray<Scalar,IP,Dim>("cub_points",num_ip, num_space_dim);
651 
652  if (ir->isSide() && ir->cv_type == "none")
653  side_cub_points = af.template buildStaticArray<Scalar,IP,Dim>("side_cub_points",num_ip,ir->side_topology->getDimension());
654 
655  cub_weights = af.template buildStaticArray<Scalar,IP>("cub_weights",num_ip);
656 
657  node_coordinates = af.template buildStaticArray<Scalar,Cell,BASIS,Dim>("node_coordinates",num_cells, num_nodes, num_space_dim);
658 
659  jac = af.template buildStaticArray<Scalar,Cell,IP,Dim,Dim>("jac",num_cells, num_ip, num_space_dim,num_space_dim);
660 
661  jac_inv = af.template buildStaticArray<Scalar,Cell,IP,Dim,Dim>("jac_inv",num_cells, num_ip, num_space_dim,num_space_dim);
662 
663  jac_det = af.template buildStaticArray<Scalar,Cell,IP>("jac_det",num_cells, num_ip);
664 
665  weighted_measure = af.template buildStaticArray<Scalar,Cell,IP>("weighted_measure",num_cells, num_ip);
666 
667  covarient = af.template buildStaticArray<Scalar,Cell,IP,Dim,Dim>("covarient",num_cells, num_ip, num_space_dim,num_space_dim);
668 
669  contravarient = af.template buildStaticArray<Scalar,Cell,IP,Dim,Dim>("contravarient",num_cells, num_ip, num_space_dim,num_space_dim);
670 
671  norm_contravarient = af.template buildStaticArray<Scalar,Cell,IP>("norm_contravarient",num_cells, num_ip);
672 
673  ip_coordinates = af.template buildStaticArray<Scalar,Cell,IP,Dim>("ip_coordiantes",num_cells, num_ip,num_space_dim);
674 
675  ref_ip_coordinates = af.template buildStaticArray<Scalar,Cell,IP,Dim>("ref_ip_coordinates",num_cells, num_ip,num_space_dim);
676 
677  weighted_normals = af.template buildStaticArray<Scalar,Cell,IP,Dim>("weighted_normal",num_cells,num_ip,num_space_dim);
678 
679  surface_normals = af.template buildStaticArray<Scalar,Cell,IP,Dim>("surface_normals",num_cells, num_ip,num_space_dim);
680 
681  surface_rotation_matrices = af.template buildStaticArray<Scalar,Cell,IP,Dim,Dim>("surface_rotation_matrices",num_cells, num_ip,3,3);
682 }
683 
684 
685 // ***********************************************************
686 // * Evaluation of values - NOT specialized
687 // ***********************************************************
688 template <typename Scalar>
691  const int in_num_cells,
692  const Teuchos::RCP<const SubcellConnectivity> & face_connectivity,
693  const int num_virtual_cells)
694 {
695  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::integrationValues2::evaluateValues(with virtual cells)",eval_vals_with_virts);
696 
697  setup(int_rule, in_node_coordinates, in_num_cells);
698 
699  // Setup permutations (only required for surface integrators)
701  if((int_rule->getType() == ID::SURFACE) and (face_connectivity != Teuchos::null))
702  setupPermutations(face_connectivity, num_virtual_cells);
703 
704  // Evaluate everything once permutations are generated
705  evaluateEverything();
706 }
707 
708 template <typename Scalar>
709 void
712  const PHX::MDField<Scalar,Cell,IP,Dim>& other_ip_coordinates,
713  const int in_num_cells)
714 {
715  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::integrationValues2::evaluateValues(no virtual cells)",eval_vals_no_virts);
716 
717  setup(int_rule, in_node_coordinates, in_num_cells);
718 
719  // Setup permutations
720  setupPermutations(other_ip_coordinates);
721 
722  // Evaluate everything once permutations are generated
723  evaluateEverything();
724 }
725 
726 template <typename Scalar>
727 void
730  const int num_virtual_cells)
731 {
732  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::integrationValues2::setupPermutations(connectivity)",setup_perms_conn);
733 
734  TEUCHOS_ASSERT(not int_rule->isSide());
735  TEUCHOS_ASSERT(face_connectivity != Teuchos::null);
737  "IntegrationValues2::setupPermutations : Face connectivity based permutations are only required for surface integration schemes");
738  TEUCHOS_TEST_FOR_EXCEPT_MSG(num_virtual_cells_ >= 0,
739  "IntegrationValues2::setupPermutations : Number of virtual cells for surface permuations must be >= 0");
740  resetArrays();
741  side_connectivity_ = face_connectivity;
742  num_virtual_cells_ = num_virtual_cells;
743  requires_permutation_ = false;
744  permutations_ = generateSurfacePermutations<Scalar>(num_evaluate_cells_,*face_connectivity, getCubaturePoints(false,true), getSurfaceRotationMatrices(false,true));
745  requires_permutation_ = true;
746 }
747 
748 template <typename Scalar>
749 void
752 {
753  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::integrationValues2::setupPermutations(other_coords)",setup_perms_coords);
754  resetArrays();
755  requires_permutation_ = false;
756  permutations_ = generatePermutations<Scalar>(num_evaluate_cells_, getCubaturePoints(false,true), other_ip_coordinates);
757  requires_permutation_ = true;
758 }
759 
760 
761 template <typename Scalar>
762 void
765  const PHX::MDField<Scalar,Cell,NODE,Dim> & cell_node_coordinates,
766  const int num_cells)
767 {
768  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::integrationValues2::setup()",setup);
769 
770  // Clear arrays just in case we are rebuilding this object
771  resetArrays();
772 
773  num_cells_ = cell_node_coordinates.extent(0);
774  num_evaluate_cells_ = num_cells < 0 ? cell_node_coordinates.extent(0) : num_cells;
775  int_rule = ir;
776 
778  intrepid_cubature = getIntrepidCubature(*ir);
779 
780  // Copy node coordinates into owned allocation
781  {
782  MDFieldArrayFactory af(prefix_,true);
783 
784  const int num_space_dim = int_rule->topology->getDimension();
785  const int num_nodes = int_rule->topology->getNodeCount();
786  TEUCHOS_ASSERT(static_cast<int>(cell_node_coordinates.extent(1)) == num_nodes);
787 
788  auto aux = af.template buildStaticArray<Scalar,Cell,NODE,Dim>("node_coordinates",num_cells_,num_nodes, num_space_dim);
789  Kokkos::MDRangePolicy<PHX::Device,Kokkos::Rank<3>> policy({0,0,0},{num_evaluate_cells_,num_nodes,num_space_dim});
790  Kokkos::parallel_for(policy,KOKKOS_LAMBDA(const int & cell, const int & point, const int & dim){
791  aux(cell,point,dim) = cell_node_coordinates(cell,point,dim);
792  });
793  PHX::Device::execution_space().fence();
794  node_coordinates = aux;
795  }
796 
797 }
798 
799 template <typename Scalar>
803  const bool force,
804  const bool apply_permutation) const
805 {
806  if(cub_points_evaluated_ and not force)
807  return cub_points;
808 
809  // Only log time if values computed (i.e. don't log if values are already cached)
810  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::integrationValues2::getUniformCubaturePointsRef()",get_uniform_cub_pts_ref);
811 
812  Intrepid2::CellTools<PHX::Device::execution_space> cell_tools;
813  MDFieldArrayFactory af(prefix_,true);
814 
815  int num_space_dim = int_rule->topology->getDimension();
816  int num_ip = int_rule->num_points;
817 
818  auto aux = af.template buildStaticArray<Scalar,IP,Dim>("cub_points",num_ip, num_space_dim);
819 
820  if (int_rule->isSide() && num_space_dim==1) {
821  std::cout << "WARNING: 0-D quadrature rule infrastructure does not exist!!! Will not be able to do "
822  << "non-natural integration rules.";
823  return aux;
824  }
825 
826  TEUCHOS_TEST_FOR_EXCEPT_MSG(int_rule->cv_type != "none",
827  "IntegrationValues2::getUniformCubaturePointsRef : Cannot build reference cubature points for control volume integration scheme.");
828 
830  "IntegrationValues2::getUniformCubaturePointsRef : Cannot build reference cubature points for surface integration scheme.");
831 
832  auto weights = af.template buildStaticArray<Scalar,IP>("cub_weights",num_ip);
833 
834  if (!int_rule->isSide())
835  intrepid_cubature->getCubature(aux.get_view(), weights.get_view());
836  else {
837  auto s_cub_points = af.template buildStaticArray<Scalar,IP,Dim>("side_cub_points",num_ip, num_space_dim-1);
838 
839  intrepid_cubature->getCubature(s_cub_points.get_view(), weights.get_view());
840  cell_tools.mapToReferenceSubcell(aux.get_view(), s_cub_points.get_view(), num_space_dim-1, int_rule->getSide(), *(int_rule->topology));
841  }
842 
843  PHX::Device::execution_space().fence();
844 
845  if(apply_permutation and requires_permutation_)
846  applyBasePermutation(aux, permutations_);
847 
848  if(cache){
849  cub_points = aux;
850  cub_points_evaluated_ = true;
851  }
852 
853  return aux;
854 
855 }
856 
857 template <typename Scalar>
861  const bool force,
862  const bool apply_permutation) const
863 {
864  if(side_cub_points_evaluated_ and not force)
865  return side_cub_points;
866 
867  // Only log time if values computed (i.e. don't log if values are already cached)
868  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::integrationValues2::getUniformSideCubaturePointsRef()",get_uniform_side_cub_pts_ref);
869 
870  MDFieldArrayFactory af(prefix_,true);
871 
872  int num_space_dim = int_rule->topology->getDimension();
873  int num_ip = int_rule->num_points;
874 
875  auto aux = af.template buildStaticArray<Scalar,IP,Dim>("side_cub_points",num_ip, num_space_dim-1);
876 
877  if (int_rule->isSide() && num_space_dim==1) {
878  std::cout << "WARNING: 0-D quadrature rule infrastructure does not exist!!! Will not be able to do "
879  << "non-natural integration rules.";
880  return aux;
881  }
882 
883  TEUCHOS_TEST_FOR_EXCEPT_MSG(int_rule->cv_type != "none",
884  "IntegrationValues2::getUniformSideCubaturePointsRef : Cannot build reference cubature points for control volume integration scheme.");
885 
887  "IntegrationValues2::getUniformSideCubaturePointsRef : Cannot build reference cubature points for surface integration scheme.");
888 
889  TEUCHOS_TEST_FOR_EXCEPT_MSG(!int_rule->isSide(),
890  "IntegrationValues2::getUniformSideCubaturePointsRef : Requested side points, which is not supported by integration rule.");
891 
892  auto weights = af.template buildStaticArray<Scalar,IP>("cub_weights",num_ip);
893 
894  intrepid_cubature->getCubature(aux.get_view(), weights.get_view());
895 
896  PHX::Device::execution_space().fence();
897 
898  if(apply_permutation and requires_permutation_)
899  applyBasePermutation(aux, permutations_);
900 
901  if(cache){
902  side_cub_points = aux;
903  side_cub_points_evaluated_ = true;
904  }
905 
906  return aux;
907 }
908 
909 template <typename Scalar>
913  const bool force,
914  const bool apply_permutation) const
915 {
916  if(cub_weights_evaluated_ and not force)
917  return cub_weights;
918 
919  // Only log time if values computed (i.e. don't log if values are already cached)
920  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::integrationValues2::getUniformCubatureWeightRef()",get_uniform_cub_weights_ref);
921 
922  MDFieldArrayFactory af(prefix_,true);
923 
924  int num_space_dim = int_rule->topology->getDimension();
925  int num_ip = int_rule->num_points;
926 
927  auto aux = af.template buildStaticArray<Scalar,IP>("cub_weights",num_ip);
928 
929  if (int_rule->isSide() && num_space_dim==1) {
930  std::cout << "WARNING: 0-D quadrature rule infrastructure does not exist!!! Will not be able to do "
931  << "non-natural integration rules.";
932  return aux;
933  }
934 
935  TEUCHOS_TEST_FOR_EXCEPT_MSG(int_rule->cv_type != "none",
936  "IntegrationValues2::getUniformCubatureWeightsRef : Cannot build reference cubature weights for control volume integration scheme.");
937 
939  "IntegrationValues2::getUniformCubatureWeightsRef : Cannot build reference cubature weights for surface integration scheme.");
940 
941  auto points = af.template buildStaticArray<Scalar,IP,Dim>("cub_points",num_ip,num_space_dim);
942 
943  intrepid_cubature->getCubature(points.get_view(), aux.get_view());
944 
945  PHX::Device::execution_space().fence();
946 
947  if(apply_permutation and requires_permutation_)
948  applyBasePermutation(aux, permutations_);
949 
950  if(cache){
951  cub_weights = aux;
952  cub_weights_evaluated_ = true;
953  }
954 
955  return aux;
956 
957 }
958 
959 template <typename Scalar>
963 {
964  return node_coordinates;
965 }
966 
967 template <typename Scalar>
970 getJacobian(const bool cache,
971  const bool force) const
972 {
973  if(jac_evaluated_ and not force)
974  return jac;
975 
976  // Only log time if values computed (i.e. don't log if values are already cached)
977  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::integrationValues2::getJacobian()",get_jacobian);
978 
979  Intrepid2::CellTools<PHX::Device::execution_space> cell_tools;
980  MDFieldArrayFactory af(prefix_,true);
981 
982  int num_space_dim = int_rule->topology->getDimension();
983  int num_ip = int_rule->num_points;
984 
985  // Don't forget that since we are not caching this, we have to make sure the managed view remains alive while we use the non-const wrapper
986  auto const_ref_coord = getCubaturePointsRef(false,force);
987  auto ref_coord = PHX::getNonConstDynRankViewFromConstMDField(const_ref_coord);
988  auto node_coord = PHX::getNonConstDynRankViewFromConstMDField(getNodeCoordinates());
989  auto aux = af.template buildStaticArray<Scalar,Cell,IP,Dim,Dim>("jac",num_cells_, num_ip, num_space_dim,num_space_dim);
990 
991  const auto cell_range = std::make_pair(0,num_evaluate_cells_);
992  auto s_ref_coord = Kokkos::subview(ref_coord, cell_range,Kokkos::ALL(),Kokkos::ALL());
993  auto s_node_coord = Kokkos::subview(node_coord, cell_range,Kokkos::ALL(),Kokkos::ALL());
994  auto s_jac = Kokkos::subview(aux.get_view(),cell_range,Kokkos::ALL(),Kokkos::ALL(),Kokkos::ALL());
995 
996  cell_tools.setJacobian(s_jac, s_ref_coord, s_node_coord,*(int_rule->topology));
997 
998  PHX::Device::execution_space().fence();
999 
1000  if(cache){
1001  jac = aux;
1002  jac_evaluated_ = true;
1003  }
1004 
1005  return aux;
1006 }
1007 
1008 template <typename Scalar>
1011 getJacobianInverse(const bool cache,
1012  const bool force) const
1013 {
1014  if(jac_inv_evaluated_ and not force)
1015  return jac_inv;
1016 
1017  // Only log time if values computed (i.e. don't log if values are already cached)
1018  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::integrationValues2::getJacobianInverse()",get_jacobian_inv);
1019 
1020  Intrepid2::CellTools<PHX::Device::execution_space> cell_tools;
1021  MDFieldArrayFactory af(prefix_,true);
1022 
1023  const int num_space_dim = int_rule->topology->getDimension();
1024  const int num_ip = int_rule->num_points;
1025 
1026  auto jacobian = getJacobian(false,force);
1027  auto aux = af.template buildStaticArray<Scalar,Cell,IP,Dim,Dim>("jac_inv",num_cells_, num_ip, num_space_dim,num_space_dim);
1028 
1029  const auto cell_range = std::make_pair(0,num_evaluate_cells_);
1030  auto s_jac = Kokkos::subview(jacobian.get_view(),cell_range,Kokkos::ALL(),Kokkos::ALL(),Kokkos::ALL());
1031  auto s_jac_inv = Kokkos::subview(aux.get_view(), cell_range,Kokkos::ALL(),Kokkos::ALL(),Kokkos::ALL());
1032 
1033  cell_tools.setJacobianInv(s_jac_inv, s_jac);
1034 
1035  PHX::Device::execution_space().fence();
1036 
1037  if(cache){
1038  jac_inv = aux;
1039  jac_inv_evaluated_ = true;
1040  }
1041 
1042  return aux;
1043 }
1044 
1045 template <typename Scalar>
1048 getJacobianDeterminant(const bool cache,
1049  const bool force) const
1050 {
1051  if(jac_det_evaluated_ and not force)
1052  return jac_det;
1053 
1054  // Only log time if values computed (i.e. don't log if values are already cached)
1055  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::integrationValues2::getJacobianDeterminant()",get_jacobian_det);
1056 
1057  Intrepid2::CellTools<PHX::Device::execution_space> cell_tools;
1058  MDFieldArrayFactory af(prefix_,true);
1059 
1060  const int num_ip = int_rule->num_points;
1061 
1062  auto jacobian = getJacobian(false,force);
1063  auto aux = af.template buildStaticArray<Scalar,Cell,IP>("jac_det",num_cells_, num_ip);
1064 
1065  const auto cell_range = std::make_pair(0,num_evaluate_cells_);
1066  auto s_jac = Kokkos::subview(jacobian.get_view(),cell_range,Kokkos::ALL(),Kokkos::ALL(),Kokkos::ALL());
1067  auto s_jac_det = Kokkos::subview(aux.get_view(), cell_range,Kokkos::ALL());
1068 
1069  cell_tools.setJacobianDet(s_jac_det, s_jac);
1070 
1071  PHX::Device::execution_space().fence();
1072 
1073  if(cache){
1074  jac_det = aux;
1075  jac_det_evaluated_ = true;
1076  }
1077 
1078  return aux;
1079 }
1080 
1081 template <typename Scalar>
1084 getWeightedMeasure(const bool cache,
1085  const bool force) const
1086 {
1087  if(weighted_measure_evaluated_ and not force)
1088  return weighted_measure;
1089 
1090  // Only log time if values computed (i.e. don't log if values are already cached)
1091  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::integrationValues2::getWeightedMeasure()",get_wt_meas);
1092 
1093  MDFieldArrayFactory af(prefix_,true);
1094 
1095  const int num_space_dim = int_rule->topology->getDimension();
1096  const int num_ip = int_rule->num_points;
1097 
1098  auto aux = af.template buildStaticArray<Scalar,Cell,IP>("weighted_measure",num_cells_, num_ip);
1099 
1100  if(int_rule->cv_type != "none"){
1101 
1102  TEUCHOS_TEST_FOR_EXCEPT_MSG(int_rule->cv_type == "side",
1103  "IntegrationValues2::getWeightedMeasure : Weighted measure not available for side control volume integrators. Use getWeightedNormals instead.");
1104 
1105  // CV integration uses a single call to map from physical space to the weighted measure - I assume this is slower than what we do with non-cv integration methods
1106 
1107  auto s_cub_points = af.template buildStaticArray<Scalar, Cell, IP, Dim>("cub_points",num_evaluate_cells_,num_ip,num_space_dim);
1108 
1109  auto node_coord = PHX::getNonConstDynRankViewFromConstMDField(getNodeCoordinates());
1110 
1111  const auto cell_range = std::make_pair(0,num_evaluate_cells_);
1112  auto s_node_coord = Kokkos::subview(node_coord, cell_range,Kokkos::ALL(),Kokkos::ALL());
1113  auto s_weighted_measure = Kokkos::subview(aux.get_view(),cell_range,Kokkos::ALL());
1114 
1115  intrepid_cubature->getCubature(s_cub_points.get_view(),s_weighted_measure,s_node_coord);
1116 
1117  } else if(int_rule->getType() == IntegrationDescriptor::SURFACE){
1118 
1119  const auto & cell_topology = *int_rule->topology;
1120  const int cell_dim = cell_topology.getDimension();
1121  const int num_sides = (cell_dim==1) ? 2 : cell_topology.getSideCount();
1122  const int cubature_order = int_rule->order();
1123  const int num_points_on_side = num_ip / num_sides;
1124 
1125  Intrepid2::DefaultCubatureFactory cubature_factory;
1126  auto jacobian = getJacobian(false,force);
1127 
1128  for(int side=0; side<num_sides; ++side) {
1129 
1130  const int point_offset=side*num_points_on_side;
1131 
1132  // Get the cubature for the side
1133  Kokkos::DynRankView<double,PHX::Device> side_cub_weights;
1134  if(cell_dim==1){
1135  side_cub_weights = Kokkos::DynRankView<double,PHX::Device>("side_cub_weights",num_points_on_side);
1136  auto tmp_side_cub_weights_host = Kokkos::create_mirror_view(side_cub_weights);
1137  tmp_side_cub_weights_host(0)=1.;
1138  Kokkos::deep_copy(side_cub_weights,tmp_side_cub_weights_host);
1139  } else {
1140 
1141  // Get the face topology from the cell topology
1142  const shards::CellTopology face_topology(cell_topology.getCellTopologyData(cell_dim-1,side));
1143 
1144  auto ic = cubature_factory.create<PHX::Device::execution_space,double,double>(face_topology,cubature_order);
1145 
1146  side_cub_weights = Kokkos::DynRankView<double,PHX::Device>("side_cub_weights",num_points_on_side);
1147  auto subcell_cub_points = Kokkos::DynRankView<double,PHX::Device>("subcell_cub_points",num_points_on_side,cell_dim-1);
1148 
1149  // Get the reference face points
1150  ic->getCubature(subcell_cub_points, side_cub_weights);
1151  }
1152 
1153  PHX::Device::execution_space().fence();
1154 
1155  // Iterating over face points
1156  Kokkos::MDRangePolicy<PHX::Device::execution_space,Kokkos::Rank<2>> policy({0,0},{num_evaluate_cells_,num_points_on_side});
1157 
1158  // Calculate measures (quadrature weights in physical space) for this side
1159  auto side_weighted_measure = Kokkos::DynRankView<Scalar,PHX::Device>("side_weighted_measure",num_evaluate_cells_,num_points_on_side);
1160  if(cell_dim == 1){
1161  auto side_cub_weights_host = Kokkos::create_mirror_view_and_copy(Kokkos::HostSpace(),side_cub_weights);
1162  Kokkos::deep_copy(side_weighted_measure, side_cub_weights_host(0));
1163  } else {
1164 
1165  // Copy from complete jacobian to side jacobian
1166  auto side_jacobian = Kokkos::DynRankView<Scalar,PHX::Device>("side_jac",num_evaluate_cells_,num_points_on_side,cell_dim,cell_dim);
1167 
1168  Kokkos::parallel_for("Copy jacobian to side jacobian",policy,KOKKOS_LAMBDA (const int cell,const int point) {
1169  for(int dim=0;dim<cell_dim;++dim)
1170  for(int dim1=0;dim1<cell_dim;++dim1)
1171  side_jacobian(cell,point,dim,dim1) = jacobian(cell,point_offset+point,dim,dim1);
1172  });
1173 
1174  auto scratch = af.template buildStaticArray<Scalar,Point>("scratch_for_compute_measure", num_evaluate_cells_*num_points_on_side*num_space_dim*num_space_dim);
1175 
1176  if(cell_dim == 2){
1177  Intrepid2::FunctionSpaceTools<PHX::Device::execution_space>::
1178  computeEdgeMeasure(side_weighted_measure, side_jacobian, side_cub_weights,
1179  side,cell_topology,
1180  scratch.get_view());
1181  PHX::Device::execution_space().fence();
1182  } else if(cell_dim == 3){
1183  Intrepid2::FunctionSpaceTools<PHX::Device::execution_space>::
1184  computeFaceMeasure(side_weighted_measure, side_jacobian, side_cub_weights,
1185  side,cell_topology,
1186  scratch.get_view());
1187  PHX::Device::execution_space().fence();
1188  }
1189  }
1190 
1191 
1192  // Copy to the main array
1193  Kokkos::parallel_for("copy surface weighted measure values",policy,KOKKOS_LAMBDA (const int cell,const int point) {
1194  aux(cell,point_offset + point) = side_weighted_measure(cell,point);
1195  });
1196  PHX::Device::execution_space().fence();
1197  }
1198 
1199  } else {
1200 
1201  auto cell_range = std::make_pair(0,num_evaluate_cells_);
1202  auto s_weighted_measure = Kokkos::subview(aux.get_view(),cell_range,Kokkos::ALL());
1203  auto cubature_weights = getUniformCubatureWeightsRef(false,force,false);
1204 
1205  if (!int_rule->isSide()) {
1206 
1207  auto s_jac_det = Kokkos::subview(getJacobianDeterminant(false,force).get_view(),cell_range,Kokkos::ALL());
1208  Intrepid2::FunctionSpaceTools<PHX::Device::execution_space>::
1209  computeCellMeasure(s_weighted_measure, s_jac_det, cubature_weights.get_view());
1210 
1211  } else if(int_rule->spatial_dimension==3) {
1212 
1213  auto s_jac = Kokkos::subview(getJacobian(false,force).get_view(),cell_range,Kokkos::ALL(),Kokkos::ALL(),Kokkos::ALL());
1214  auto scratch = af.template buildStaticArray<Scalar,Point>("scratch_for_compute_measure", num_evaluate_cells_*num_ip*num_space_dim*num_space_dim);
1215  Intrepid2::FunctionSpaceTools<PHX::Device::execution_space>::
1216  computeFaceMeasure(s_weighted_measure, s_jac, cubature_weights.get_view(),
1217  int_rule->side, *int_rule->topology,
1218  scratch.get_view());
1219 
1220  } else if(int_rule->spatial_dimension==2) {
1221 
1222  auto s_jac = Kokkos::subview(getJacobian(false,force).get_view(),cell_range,Kokkos::ALL(),Kokkos::ALL(),Kokkos::ALL());
1223  auto scratch = af.template buildStaticArray<Scalar,Point>("scratch_for_compute_measure", num_evaluate_cells_*num_ip*num_space_dim*num_space_dim);
1224  Intrepid2::FunctionSpaceTools<PHX::Device::execution_space>::
1225  computeEdgeMeasure(s_weighted_measure, s_jac, cubature_weights.get_view(),
1226  int_rule->side,*int_rule->topology,
1227  scratch.get_view());
1228 
1229  } else {
1230  TEUCHOS_ASSERT(false);
1231  }
1232 
1233  }
1234 
1235  PHX::Device::execution_space().fence();
1236 
1237  // Apply permutations if necessary
1238  if(requires_permutation_)
1239  applyPermutation(aux, permutations_);
1240 
1241  if(cache){
1242  weighted_measure = aux;
1243  weighted_measure_evaluated_ = true;
1244  }
1245 
1246  return aux;
1247 }
1248 
1249 template <typename Scalar>
1252 getWeightedNormals(const bool cache,
1253  const bool force) const
1254 {
1255  if(weighted_normals_evaluated_ and not force)
1256  return weighted_normals;
1257 
1258  // Only log time if values computed (i.e. don't log if values are already cached)
1259  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::integrationValues2::getWeightedNormals()",get_wt_normals);
1260 
1261  MDFieldArrayFactory af(prefix_,true);
1262 
1263  const int num_space_dim = int_rule->topology->getDimension();
1264  const int num_ip = int_rule->num_points;
1265 
1266  auto aux = af.template buildStaticArray<Scalar,Cell,IP,Dim>("weighted_normals",num_cells_,num_ip,num_space_dim);
1267 
1269  "IntegrationValues2::getWeightedNormals : Cannot build reference weighted normals for surface integration scheme.");
1270 
1271  auto points = af.template buildStaticArray<Scalar,Cell,IP,Dim>("cub_points",num_cells_,num_ip,num_space_dim);
1272 
1273  auto node_coord = PHX::getNonConstDynRankViewFromConstMDField(getNodeCoordinates());
1274 
1275  const auto cell_range = std::make_pair(0,num_evaluate_cells_);
1276  auto s_cub_points = Kokkos::subview(points.get_view(),cell_range, Kokkos::ALL(), Kokkos::ALL());
1277  auto s_weighted_normals = Kokkos::subview(aux.get_view(), cell_range, Kokkos::ALL(), Kokkos::ALL());
1278  auto s_node_coord = Kokkos::subview(node_coord, cell_range, Kokkos::ALL(), Kokkos::ALL());
1279 
1280  intrepid_cubature->getCubature(s_cub_points,s_weighted_normals,s_node_coord);
1281 
1282  PHX::Device::execution_space().fence();
1283 
1284  // Apply permutations if necessary
1285  if(requires_permutation_)
1286  applyPermutation(aux, permutations_);
1287 
1288  if(cache){
1289  weighted_normals = aux;
1290  weighted_normals_evaluated_ = true;
1291  }
1292 
1293  return aux;
1294 }
1295 
1296 template <typename Scalar>
1299 getSurfaceNormals(const bool cache,
1300  const bool force) const
1301 {
1302  if(surface_normals_evaluated_ and not force)
1303  return surface_normals;
1304 
1305  // Only log time if values computed (i.e. don't log if values are already cached)
1306  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::integrationValues2::getSurfaceNormals()",get_surf_normals);
1307 
1308  TEUCHOS_TEST_FOR_EXCEPT_MSG(int_rule->isSide(),
1309  "IntegrationValues2::getSurfaceNormals : This call doesn't work with sides (only surfaces).");
1310 
1311  TEUCHOS_TEST_FOR_EXCEPT_MSG(int_rule->cv_type != "none",
1312  "IntegrationValues2::getSurfaceNormals : This call does not support control volume integration schemes.");
1313 
1314  TEUCHOS_TEST_FOR_EXCEPT_MSG(int_rule->getType() != IntegrationDescriptor::SURFACE,
1315  "IntegrationValues2::getSurfaceNormals : Can only build for surface integrators.");
1316 
1317  Intrepid2::CellTools<PHX::Device::execution_space> cell_tools;
1318  MDFieldArrayFactory af(prefix_,true);
1319 
1320  const shards::CellTopology & cell_topology = *(int_rule->topology);
1321  const int cell_dim = cell_topology.getDimension();
1322  const int subcell_dim = cell_topology.getDimension()-1;
1323  const int num_subcells = cell_topology.getSubcellCount(subcell_dim);
1324  const int num_space_dim = int_rule->topology->getDimension();
1325  const int num_ip = int_rule->num_points;
1326  const int num_points_on_face = num_ip / num_subcells;
1327 
1328  auto aux = af.template buildStaticArray<Scalar,Cell,IP,Dim>("surface_normals",num_cells_,num_ip,num_space_dim);
1329 
1330  // We only need the jacobian if we're not in 1D
1331  ConstArray_CellIPDimDim jacobian;
1332  if(cell_dim != 1)
1333  jacobian = getJacobian(false,force);
1334 
1335  // We get to build up our surface normals one 'side' at a time
1336  for(int subcell_index=0; subcell_index<num_subcells; ++subcell_index) {
1337 
1338  const int point_offset = subcell_index * num_points_on_face;;
1339 
1340  // Calculate normals
1341  auto side_normals = Kokkos::DynRankView<Scalar,PHX::Device>("side_normals",num_evaluate_cells_,num_points_on_face,cell_dim);
1342  if(cell_dim == 1){
1343 
1344  const int other_subcell_index = (subcell_index==0) ? 1 : 0;
1345  auto in_node_coordinates_k = getNodeCoordinates().get_view();
1346  const auto min = std::numeric_limits<typename Sacado::ScalarType<Scalar>::type>::min();
1347 
1348  Kokkos::parallel_for("compute normals 1D",num_evaluate_cells_,KOKKOS_LAMBDA (const int cell) {
1349  Scalar norm = (in_node_coordinates_k(cell,subcell_index,0) - in_node_coordinates_k(cell,other_subcell_index,0));
1350  side_normals(cell,0,0) = norm / fabs(norm + min);
1351  });
1352 
1353  } else {
1354 
1355  // Iterating over side points
1356  Kokkos::MDRangePolicy<PHX::Device::execution_space,Kokkos::Rank<2>> policy({0,0},{num_evaluate_cells_,num_points_on_face});
1357 
1358  auto side_jacobian = Kokkos::DynRankView<Scalar,PHX::Device>("side_jac",num_evaluate_cells_,num_points_on_face,cell_dim,cell_dim);
1359  Kokkos::parallel_for("Copy jacobian to side jacobian",policy,KOKKOS_LAMBDA (const int cell,const int point) {
1360  for(int dim=0;dim<cell_dim;++dim)
1361  for(int dim1=0;dim1<cell_dim;++dim1)
1362  side_jacobian(cell,point,dim,dim1) = jacobian(cell,point_offset+point,dim,dim1);
1363  });
1364 
1365  // Get the "physical side normals"
1366  cell_tools.getPhysicalSideNormals(side_normals,side_jacobian,subcell_index,cell_topology);
1367 
1368  // Normalize each normal
1369  Kokkos::parallel_for("Normalize the normals",policy,KOKKOS_LAMBDA (const int cell,const int point) {
1370  Scalar n = 0.;
1371  for(int dim=0;dim<cell_dim;++dim){
1372  n += side_normals(cell,point,dim)*side_normals(cell,point,dim);
1373  }
1374  // If n is zero then this is - hopefully - a virtual cell
1375  if(n > 0.){
1376  n = Kokkos::sqrt(n);
1377  for(int dim=0;dim<cell_dim;++dim)
1378  side_normals(cell,point,dim) /= n;
1379  }
1380  });
1381  }
1382 
1383  PHX::Device::execution_space().fence();
1384 
1385  Kokkos::MDRangePolicy<PHX::Device::execution_space,Kokkos::Rank<2>> policy({0,0},{num_evaluate_cells_,num_points_on_face});
1386  Kokkos::parallel_for("copy surface normals", policy,KOKKOS_LAMBDA (const int cell,const int point) {
1387  for(int dim=0;dim<cell_dim;++dim)
1388  aux(cell,point_offset + point,dim) = side_normals(cell,point,dim);
1389  });
1390  PHX::Device::execution_space().fence();
1391  }
1392 
1393  // Need to correct the virtual cells
1394  {
1395  TEUCHOS_TEST_FOR_EXCEPT_MSG(side_connectivity_ == Teuchos::null,
1396  "IntegrationValues2::getSurfaceNormals : Surface normals require a SubcellConnectivity object pass in through the setupPermutations call");
1397  TEUCHOS_TEST_FOR_EXCEPT_MSG(num_virtual_cells_ < 0,
1398  "IntegrationValues2::getSurfaceNormals : Number of virtual cells (see setupPermutations or evaluateValues) must be greater or equal to zero");
1399 
1400  // Virtual cell normals need to be reversed
1401  if(num_virtual_cells_ > 0)
1402  correctVirtualNormals(aux, num_evaluate_cells_ - num_virtual_cells_, num_virtual_cells_, *int_rule->topology, *side_connectivity_);
1403  }
1404 
1405  if(cache){
1406  surface_normals = aux;
1407  surface_normals_evaluated_ = true;
1408  }
1409 
1410  return aux;
1411 
1412 }
1413 
1414 template <typename Scalar>
1418  const bool force) const
1419 {
1420  if(surface_rotation_matrices_evaluated_ and not force)
1421  return surface_rotation_matrices;
1422 
1423  // Only log time if values computed (i.e. don't log if values are already cached)
1424  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::integrationValues2::getSurfaceRotationMatrices()",get_surf_rot_mat);
1425 
1426  MDFieldArrayFactory af(prefix_,true);
1427 
1428  const int num_ip = int_rule->num_points;
1429  const int cell_dim = int_rule->topology->getDimension();
1430 
1431  // This call will handle all the error checking
1432  auto normals = getSurfaceNormals(false,force).get_static_view();
1433  auto aux = af.template buildStaticArray<Scalar,Cell,IP,Dim,Dim>("surface_rotation_matrices",num_cells_, num_ip, 3, 3);
1434 
1435  Kokkos::MDRangePolicy<PHX::Device::execution_space,Kokkos::Rank<2>> policy({0,0},{num_evaluate_cells_,num_ip});
1436  Kokkos::parallel_for("create surface rotation matrices",policy,KOKKOS_LAMBDA (const int cell,const int point) {
1437  Scalar normal[3];
1438  for(int i=0;i<3;i++)
1439  normal[i]=0.;
1440  for(int dim=0; dim<cell_dim; ++dim)
1441  normal[dim] = normals(cell,point,dim);
1442 
1443  Scalar transverse[3];
1444  Scalar binormal[3];
1445  panzer::convertNormalToRotationMatrix(normal,transverse,binormal);
1446 
1447  for(int dim=0; dim<3; ++dim){
1448  aux(cell,point,0,dim) = normal[dim];
1449  aux(cell,point,1,dim) = transverse[dim];
1450  aux(cell,point,2,dim) = binormal[dim];
1451  }
1452  });
1453  PHX::Device::execution_space().fence();
1454 
1455  // Need to correct the virtual cells
1456  {
1457  TEUCHOS_TEST_FOR_EXCEPT_MSG(side_connectivity_ == Teuchos::null,
1458  "IntegrationValues2::getSurfaceRotationMatrices : Surface normals require a SubcellConnectivity object pass in through the setupPermutations call");
1459  TEUCHOS_TEST_FOR_EXCEPT_MSG(num_virtual_cells_ < 0,
1460  "IntegrationValues2::getSurfaceRotationMatrices : Number of virtual cells (see setupPermutations or evaluateValues) must be greater or equal to zero");
1461 
1462  // Virtual cell normals need to be reversed
1463  if(num_virtual_cells_ > 0)
1464  correctVirtualRotationMatrices(aux, num_evaluate_cells_ - num_virtual_cells_, num_virtual_cells_, *int_rule->topology, *side_connectivity_);
1465  }
1466 
1467  if(cache){
1468  surface_rotation_matrices = aux;
1469  surface_rotation_matrices_evaluated_ = true;
1470  }
1471 
1472  return aux;
1473 }
1474 
1475 template <typename Scalar>
1478 getCovarientMatrix(const bool cache,
1479  const bool force) const
1480 {
1481  if(covarient_evaluated_ and not force)
1482  return covarient;
1483 
1484  // Only log time if values computed (i.e. don't log if values are already cached)
1485  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::integrationValues2::getCovariantMatrix()",get_cov_mat);
1486 
1487  MDFieldArrayFactory af(prefix_,true);
1488 
1489  const int num_space_dim = int_rule->topology->getDimension();
1490  const int num_ip = int_rule->num_points;
1491 
1492  auto jacobian = getJacobian(false,force).get_static_view();
1493  auto aux = af.template buildStaticArray<Scalar,Cell,IP,Dim,Dim>("covarient",num_cells_, num_ip, num_space_dim,num_space_dim);
1494 
1495  Kokkos::MDRangePolicy<PHX::Device,Kokkos::Rank<2>> policy({0,0},{num_evaluate_cells_,num_ip});
1496  Kokkos::parallel_for("evalaute covarient metric tensor",policy,KOKKOS_LAMBDA (const int cell,const int ip) {
1497  // g^{ij} = \frac{\parital x_i}{\partial \chi_\alpha}\frac{\parital x_j}{\partial \chi_\alpha}
1498  for (int i = 0; i < num_space_dim; ++i) {
1499  for (int j = 0; j < num_space_dim; ++j) {
1500  Scalar value(0.0);
1501  for (int alpha = 0; alpha < num_space_dim; ++alpha)
1502  value += jacobian(cell,ip,i,alpha) * jacobian(cell,ip,j,alpha);
1503  aux(cell,ip,i,j) = value;
1504  }
1505  }
1506  });
1507  PHX::Device::execution_space().fence();
1508 
1509  if(cache){
1510  covarient = aux;
1511  covarient_evaluated_ = true;
1512  }
1513 
1514  return aux;
1515 }
1516 
1517 template <typename Scalar>
1520 getContravarientMatrix(const bool cache,
1521  const bool force) const
1522 {
1523  if(contravarient_evaluated_ and not force)
1524  return contravarient;
1525 
1526  // Only log time if values computed (i.e. don't log if values are already cached)
1527  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::integrationValues2::getContravarientMatrix()",get_contra_mat);
1528 
1529  MDFieldArrayFactory af(prefix_,true);
1530 
1531  const int num_space_dim = int_rule->topology->getDimension();
1532  const int num_ip = int_rule->num_points;
1533 
1534  auto cov = getCovarientMatrix(false,force);
1535  auto aux = af.template buildStaticArray<Scalar,Cell,IP,Dim,Dim>("contravarient",num_cells_, num_ip, num_space_dim,num_space_dim);
1536 
1537  const auto cell_range = std::make_pair(0,num_evaluate_cells_);
1538  auto s_contravarient = Kokkos::subview(aux.get_view(), cell_range,Kokkos::ALL,Kokkos::ALL,Kokkos::ALL);
1539  auto s_covarient = Kokkos::subview(cov.get_view(), cell_range,Kokkos::ALL,Kokkos::ALL,Kokkos::ALL);
1540 
1541  Intrepid2::RealSpaceTools<PHX::Device::execution_space>::inverse(s_contravarient, s_covarient);
1542  PHX::Device::execution_space().fence();
1543 
1544  if(cache){
1545  contravarient = aux;
1546  contravarient_evaluated_ = true;
1547  }
1548 
1549  return aux;
1550 }
1551 
1552 template <typename Scalar>
1556  const bool force) const
1557 {
1558  if(norm_contravarient_evaluated_ and not force)
1559  return norm_contravarient;
1560 
1561  // Only log time if values computed (i.e. don't log if values are already cached)
1562  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::integrationValues2::getNormContravarientMatrix()",get_norm_contr_mat);
1563 
1564  MDFieldArrayFactory af(prefix_,true);
1565 
1566  const int num_space_dim = int_rule->topology->getDimension();
1567  const int num_ip = int_rule->num_points;
1568 
1569  auto con = getContravarientMatrix(false,force).get_static_view();
1570  auto aux = af.template buildStaticArray<Scalar,Cell,IP>("norm_contravarient",num_cells_, num_ip);
1571 
1572  // norm of g_ij
1573  Kokkos::MDRangePolicy<PHX::Device,Kokkos::Rank<2>> policy({0,0},{num_evaluate_cells_,num_ip});
1574  Kokkos::parallel_for("evaluate norm_contravarient",policy,KOKKOS_LAMBDA (const int cell,const int ip) {
1575  aux(cell,ip) = 0.0;
1576  for (int i = 0; i < num_space_dim; ++i) {
1577  for (int j = 0; j < num_space_dim; ++j) {
1578  aux(cell,ip) += con(cell,ip,i,j) * con(cell,ip,i,j);
1579  }
1580  }
1581  aux(cell,ip) = Kokkos::sqrt(aux(cell,ip));
1582  });
1583  PHX::Device::execution_space().fence();
1584 
1585  if(cache){
1586  norm_contravarient = aux;
1587  norm_contravarient_evaluated_ = true;
1588  }
1589 
1590  return aux;
1591 }
1592 
1593 template <typename Scalar>
1596 getCubaturePoints(const bool cache,
1597  const bool force) const
1598 {
1599  if(ip_coordinates_evaluated_ and not force)
1600  return ip_coordinates;
1601 
1602  // Only log time if values computed (i.e. don't log if values are already cached)
1603  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::integrationValues2::getCubaturePoints()",get_cub_pts);
1604 
1605  MDFieldArrayFactory af(prefix_,true);
1606 
1607  const int num_space_dim = int_rule->topology->getDimension();
1608  const int num_ip = int_rule->num_points;
1609 
1610  auto aux = af.template buildStaticArray<Scalar,Cell,IP,Dim>("ip_coordinates",num_cells_, num_ip, num_space_dim);
1611 
1613  const bool is_cv = (int_rule->getType() == ID::CV_VOLUME) or (int_rule->getType() == ID::CV_SIDE) or (int_rule->getType() == ID::CV_BOUNDARY);
1614 
1615  auto node_coord = PHX::getNonConstDynRankViewFromConstMDField(getNodeCoordinates());
1616 
1617  if(is_cv){
1618 
1619  // CV integration uses a single call to map from physical space to the weighted measure - I assume this is slower than what we do with non-cv integration methods
1620  const auto cell_range = std::make_pair(0,num_evaluate_cells_);
1621  auto s_node_coord = Kokkos::subview(node_coord, cell_range,Kokkos::ALL(),Kokkos::ALL());
1622  auto s_cub_points = Kokkos::subview(aux.get_view(),cell_range,Kokkos::ALL(),Kokkos::ALL());
1623 
1624  // TODO: We need to pull this apart for control volumes. Right now we calculate both weighted measures/norms and cubature points at the same time
1625  if(int_rule->cv_type == "side"){
1626  auto scratch = af.template buildStaticArray<Scalar,Cell,IP,Dim>("scratch",num_evaluate_cells_,num_ip,num_space_dim);
1627  intrepid_cubature->getCubature(s_cub_points,scratch.get_view(),s_node_coord);
1628  } else {
1629  // I think boundary is included as a weighted measure because it has a side embedded in intrepid_cubature
1630  TEUCHOS_ASSERT((int_rule->getType() == ID::CV_VOLUME) or (int_rule->getType() == ID::CV_BOUNDARY));
1631  auto scratch = af.template buildStaticArray<Scalar,Cell,IP>("scratch",num_evaluate_cells_,num_ip);
1632  intrepid_cubature->getCubature(s_cub_points,scratch.get_view(),s_node_coord);
1633  }
1634 
1635  } else {
1636 
1637  // Don't forget that since we are not caching this, we have to make sure the managed view remains alive while we use the non-const wrapper
1638  auto const_ref_coord = getCubaturePointsRef(false,force);
1639  auto ref_coord = PHX::getNonConstDynRankViewFromConstMDField(const_ref_coord);
1640 
1641  const auto cell_range = std::make_pair(0,num_evaluate_cells_);
1642  auto s_ref_coord = Kokkos::subview(ref_coord, cell_range,Kokkos::ALL(),Kokkos::ALL());
1643  auto s_coord = Kokkos::subview(aux.get_view(),cell_range,Kokkos::ALL(),Kokkos::ALL());
1644  auto s_node_coord = Kokkos::subview(node_coord, cell_range,Kokkos::ALL(),Kokkos::ALL());
1645 
1646  Intrepid2::CellTools<PHX::Device::execution_space> cell_tools;
1647  cell_tools.mapToPhysicalFrame(s_coord, s_ref_coord, s_node_coord, *(int_rule->topology));
1648 
1649  }
1650 
1651  PHX::Device::execution_space().fence();
1652 
1653  if(cache){
1654  ip_coordinates = aux;
1655  ip_coordinates_evaluated_ = true;
1656  }
1657 
1658  return aux;
1659 }
1660 
1661 
1662 template <typename Scalar>
1665 getCubaturePointsRef(const bool cache,
1666  const bool force) const
1667 {
1668  if(ref_ip_coordinates_evaluated_)
1669  return ref_ip_coordinates;
1670 
1671  // Only log time if values computed (i.e. don't log if values are already cached)
1672  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::integrationValues2::getCubaturePointsRef()",get_cub_pts_ref);
1673 
1675  const bool is_surface = int_rule->getType() == ID::SURFACE;
1676  const bool is_cv = (int_rule->getType() == ID::CV_VOLUME) or (int_rule->getType() == ID::CV_SIDE) or (int_rule->getType() == ID::CV_BOUNDARY);
1677 
1678  const int num_space_dim = int_rule->topology->getDimension();
1679  const int num_ip = int_rule->num_points;
1680 
1681  MDFieldArrayFactory af(prefix_,true);
1682 
1683  Intrepid2::CellTools<PHX::Device::execution_space> cell_tools;
1684 
1685  auto aux = af.template buildStaticArray<Scalar,Cell,IP,Dim>("ref_ip_coordinates",num_cells_, num_ip, num_space_dim);
1686 
1687  if(is_cv){
1688 
1689  // Control volume reference points are actually generated from the physical points (i.e. reverse to everything else)
1690 
1691  auto node_coord = PHX::getNonConstDynRankViewFromConstMDField(getNodeCoordinates());
1692 
1693  // Don't forget that since we are not caching this, we have to make sure the managed view remains alive while we use the non-const wrapper
1694  auto const_coord = getCubaturePoints(false,force);
1695  auto coord = PHX::getNonConstDynRankViewFromConstMDField(const_coord);
1696 
1697  const auto cell_range = std::make_pair(0,num_evaluate_cells_);
1698  auto s_ref_coord = Kokkos::subview(aux.get_view(),cell_range,Kokkos::ALL(),Kokkos::ALL());
1699  auto s_coord = Kokkos::subview(coord, cell_range,Kokkos::ALL(),Kokkos::ALL());
1700  auto s_node_coord = Kokkos::subview(node_coord, cell_range,Kokkos::ALL(),Kokkos::ALL());
1701 
1702  cell_tools.mapToReferenceFrame(s_ref_coord, s_coord, s_node_coord, *(int_rule->topology));
1703 
1704  } else if(is_surface){
1705 
1706  const auto & cell_topology = *int_rule->topology;
1707  const int cell_dim = cell_topology.getDimension();
1708  const int num_sides = (cell_dim==1) ? 2 : cell_topology.getSideCount();
1709  const int subcell_dim = cell_dim-1;
1710  const int num_points_on_face = num_ip / num_sides;
1711  const int order = int_rule->getOrder();
1712 
1713  // Scratch space for storing the points for each side of the cell
1714  auto side_cub_points = af.template buildStaticArray<Scalar,IP,Dim>("side_cub_points",num_points_on_face,cell_dim);
1715 
1716  Intrepid2::DefaultCubatureFactory cubature_factory;
1717 
1718  // We get to build up our cubature one side at a time
1719  for(int side=0; side<num_sides; ++side) {
1720 
1721  const int point_offset = side*num_points_on_face;
1722 
1723  // Get the cubature for the side
1724  if(cell_dim==1){
1725  // In 1D the surface point is either on the left side of the cell, or the right side
1726  auto side_cub_points_host = Kokkos::create_mirror_view(side_cub_points.get_view());
1727  side_cub_points_host(0,0) = (side==0)? -1. : 1.;
1728  Kokkos::deep_copy(side_cub_points.get_view(),side_cub_points_host);
1729  } else {
1730 
1731  // Get the face topology from the cell topology
1732  const shards::CellTopology face_topology(cell_topology.getCellTopologyData(subcell_dim,side));
1733 
1734  // Create a cubature for the face of the cell
1735  auto ic = cubature_factory.create<PHX::Device::execution_space,double,double>(face_topology,order);
1736  auto tmp_side_cub_weights = Kokkos::DynRankView<double,PHX::Device>("tmp_side_cub_weights",num_points_on_face);
1737  auto tmp_side_cub_points = Kokkos::DynRankView<double,PHX::Device>("tmp_side_cub_points",num_points_on_face,subcell_dim);
1738 
1739  // Get the reference face points
1740  ic->getCubature(tmp_side_cub_points, tmp_side_cub_weights);
1741 
1742  // Convert from reference face points to reference cell points
1743  cell_tools.mapToReferenceSubcell(side_cub_points.get_view(), tmp_side_cub_points, subcell_dim, side, cell_topology);
1744  }
1745 
1746  PHX::Device::execution_space().fence();
1747 
1748  // Copy from the side allocation to the surface allocation
1749  Kokkos::MDRangePolicy<PHX::Device::execution_space,Kokkos::Rank<3>> policy({0,0,0},{num_evaluate_cells_,num_points_on_face, num_space_dim});
1750  Kokkos::parallel_for("copy values",policy,KOKKOS_LAMBDA (const int cell,const int point, const int dim) {
1751  aux(cell,point_offset + point,dim) = side_cub_points(point,dim);
1752  });
1753  PHX::Device::execution_space().fence();
1754  }
1755 
1756  } else {
1757 
1758  // Haven't set this up yet
1759  TEUCHOS_TEST_FOR_EXCEPT_MSG(int_rule->isSide() && num_space_dim==1,
1760  "ERROR: 0-D quadrature rule infrastructure does not exist!!! Will not be able to do "
1761  << "non-natural integration rules.");
1762 
1763  auto cub_points = getUniformCubaturePointsRef(false,force,false);
1764 
1765  Kokkos::MDRangePolicy<PHX::Device,Kokkos::Rank<3>> policy({0,0,0},{num_evaluate_cells_,num_ip,num_space_dim});
1766  Kokkos::parallel_for(policy, KOKKOS_LAMBDA(const int & cell, const int & ip, const int & dim){
1767  aux(cell,ip,dim) = cub_points(ip,dim);
1768  });
1769  }
1770 
1771  PHX::Device::execution_space().fence();
1772 
1773  if(requires_permutation_)
1774  applyPermutation(aux, permutations_);
1775 
1776  if(cache){
1777  ref_ip_coordinates = aux;
1778  ref_ip_coordinates_evaluated_ = true;
1779  }
1780 
1781  return aux;
1782 }
1783 
1784 template <typename Scalar>
1785 void
1788 {
1789  PANZER_FUNC_TIME_MONITOR_DIFF("panzer::integrationValues2::evaluateEverything()",eval_everything);
1790 
1792  const bool is_surface = int_rule->getType() == ID::SURFACE;
1793  const bool is_cv = (int_rule->getType() == ID::CV_VOLUME) or (int_rule->getType() == ID::CV_SIDE) or (int_rule->getType() == ID::CV_BOUNDARY);
1794  const bool is_side = int_rule->isSide();
1795 
1796  resetArrays();
1797 
1798  // Base cubature stuff
1799  if(is_cv){
1800  getCubaturePoints(true,true);
1801  getCubaturePointsRef(true,true);
1802  } else {
1803  if(not is_surface){
1804  getUniformCubaturePointsRef(true,true);
1805  getUniformCubatureWeightsRef(true,true);
1806  if(is_side)
1807  getUniformSideCubaturePointsRef(true,true);
1808  }
1809  getCubaturePointsRef(true,true);
1810  getCubaturePoints(true,true);
1811  }
1812 
1813  // Measure stuff
1814  getJacobian(true,true);
1815  getJacobianDeterminant(true,true);
1816  getJacobianInverse(true,true);
1817  if(int_rule->cv_type == "side")
1818  getWeightedNormals(true,true);
1819  else
1820  getWeightedMeasure(true,true);
1821 
1822  // Surface stuff
1823  if(is_surface){
1824  getSurfaceNormals(true,true);
1825  getSurfaceRotationMatrices(true,true);
1826  }
1827 
1828  // Stabilization stuff
1829  if(not (is_surface or is_cv)){
1830  getContravarientMatrix(true,true);
1831  getCovarientMatrix(true,true);
1832  getNormContravarientMatrix(true,true);
1833  }
1834 }
1835 
1836 #define INTEGRATION_VALUES2_INSTANTIATION(SCALAR) \
1837  template class IntegrationValues2<SCALAR>;
1838 
1840 
1841 // Disabled FAD support due to long build times on cuda (in debug mode
1842 // it takes multiple hours on some platforms). If we need
1843 // sensitivities wrt coordinates, we can reenable.
1844 
1845 // INTEGRATION_VALUES2_INSTANTIATION(panzer::Traits::FadType)
1846 
1847 }
ConstArray_IPDim getUniformSideCubaturePointsRef(const bool cache=true, const bool force=false, const bool apply_permutation=true) const
Get the uniform cubature points for a side.
void setupPermutations(const Teuchos::RCP< const SubcellConnectivity > &face_connectivity, const int num_virtual_cells)
Initialize the permutation arrays given a face connectivity.
ConstArray_CellIPDimDim getContravarientMatrix(const bool cache=true, const bool force=false) const
Get the contravarient matrix.
void setup(const Teuchos::RCP< const panzer::IntegrationRule > &ir, const PHX::MDField< Scalar, Cell, NODE, Dim > &cell_node_coordinates, const int num_cells=-1)
Main setup call for the lazy evaluation interface.
ConstArray_CellIP getWeightedMeasure(const bool cache=true, const bool force=false) const
Get the weighted measure (integration weights)
const int & getType() const
Get type of integrator.
ConstArray_CellIPDimDim getJacobianInverse(const bool cache=true, const bool force=false) const
Get the inverse of the Jacobian matrix evaluated at the cubature points.
#define INTEGRATION_VALUES2_INSTANTIATION(SCALAR)
ConstArray_CellIPDimDim getCovarientMatrix(const bool cache=true, const bool force=false) const
Get the covarient matrix.
TEUCHOS_DEPRECATED RCP< T > rcp(T *p, Dealloc_T dealloc, bool owns_mem)
#define PANZER_DOT(a, b)
#define TEUCHOS_TEST_FOR_EXCEPT_MSG(throw_exception_test, msg)
ConstArray_CellIPDim getWeightedNormals(const bool cache=true, const bool force=false) const
Get the weighted normals.
ConstArray_CellIP getNormContravarientMatrix(const bool cache=true, const bool force=false) const
Get the contravarient matrix.
ConstArray_CellIPDimDim getSurfaceRotationMatrices(const bool cache=true, const bool force=false) const
Get the surface rotation matrices.
#define PANZER_CROSS(a, b, c)
Teuchos::RCP< const shards::CellTopology > topology
KOKKOS_INLINE_FUNCTION void convertNormalToRotationMatrix(const Scalar normal[3], Scalar transverse[3], Scalar binormal[3])
ConstArray_CellIPDim getCubaturePoints(const bool cache=true, const bool force=false) const
Get the cubature points in physical space.
void setupArrays(const Teuchos::RCP< const panzer::IntegrationRule > &ir)
Sizes/allocates memory for arrays.
ConstArray_CellIPDim getCubaturePointsRef(const bool cache=true, const bool force=false) const
Get the cubature points in the reference space.
const int & getSide() const
Get side associated with integration - this is for backward compatibility.
ConstArray_IPDim getUniformCubaturePointsRef(const bool cache=true, const bool force=false, const bool apply_permutation=true) const
Get the uniform cubature points.
ConstArray_CellIPDimDim getJacobian(const bool cache=true, const bool force=false) const
Get the Jacobian matrix evaluated at the cubature points.
ConstArray_CellIP getJacobianDeterminant(const bool cache=true, const bool force=false) const
Get the determinant of the Jacobian matrix evaluated at the cubature points.
PHX::MDField< ScalarT, panzer::Cell, panzer::BASIS > field
A field to which we&#39;ll contribute, or in which we&#39;ll store, the result of computing this integral...
KOKKOS_FORCEINLINE_FUNCTION array_type get_static_view()
IntegrationValues2(const std::string &pre="", const bool allocArrays=false)
Base constructor.
Teuchos::RCP< shards::CellTopology > side_topology
#define TEUCHOS_ASSERT(assertion_test)
ConstArray_CellBASISDim getNodeCoordinates() const
Get the node coordinates describing the geometry of the mesh.
void evaluateValues(const PHX::MDField< Scalar, Cell, NODE, Dim > &cell_node_coordinates, const int num_cells=-1, const Teuchos::RCP< const SubcellConnectivity > &face_connectivity=Teuchos::null, const int num_virtual_cells=-1)
Evaluate basis values.
ConstArray_IP getUniformCubatureWeightsRef(const bool cache=true, const bool force=false, const bool apply_permutation=true) const
Get the uniform cubature weights.
ConstArray_CellIPDim getSurfaceNormals(const bool cache=true, const bool force=false) const
Get the surface normals.
const int & getOrder() const
Get order of integrator.