Stokhos Package Browser (Single Doxygen Collection)  Version of the Day
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
experimental/linear2d_diffusion_pce.cpp
Go to the documentation of this file.
1 // @HEADER
2 // ***********************************************************************
3 //
4 // Stokhos Package
5 // Copyright (2009) Sandia Corporation
6 //
7 // Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive
8 // license for use of this work by or on behalf of the U.S. Government.
9 //
10 // Redistribution and use in source and binary forms, with or without
11 // modification, are permitted provided that the following conditions are
12 // met:
13 //
14 // 1. Redistributions of source code must retain the above copyright
15 // notice, this list of conditions and the following disclaimer.
16 //
17 // 2. Redistributions in binary form must reproduce the above copyright
18 // notice, this list of conditions and the following disclaimer in the
19 // documentation and/or other materials provided with the distribution.
20 //
21 // 3. Neither the name of the Corporation nor the names of the
22 // contributors may be used to endorse or promote products derived from
23 // this software without specific prior written permission.
24 //
25 // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
26 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
29 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
30 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
31 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
32 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 //
37 // Questions? Contact Eric T. Phipps (etphipp@sandia.gov).
38 //
39 // ***********************************************************************
40 // @HEADER
41 
42 // Stokhos Stochastic Galerkin
43 #include "Stokhos_Epetra.hpp"
44 #include "Stokhos_Sacado.hpp"
45 #include "Stokhos_Ifpack2.hpp"
46 
47 // Class implementing our problem
49 
50 // Epetra communicator
51 #ifdef HAVE_MPI
52 #include "Epetra_MpiComm.h"
53 #else
54 #include "Epetra_SerialComm.h"
55 #endif
56 
57 // solver
58 #include "Ifpack2_Factory.hpp"
59 #include "BelosLinearProblem.hpp"
60 #include "kokkos_pce_specializations.hpp"
61 #include "BelosPseudoBlockCGSolMgr.hpp"
62 #include "BelosPseudoBlockGmresSolMgr.hpp"
63 #include "MatrixMarket_Tpetra.hpp"
64 #include "BelosBlockGmresSolMgr.hpp"
65 
66 // Utilities
67 #include "Teuchos_TimeMonitor.hpp"
69 
70 // Scalar types
72 
73 // MueLu includes
74 #include "Stokhos_MueLu.hpp"
75 #include "Stokhos_MueLu_QR_Interface_decl.hpp"
76 #include "Stokhos_MueLu_QR_Interface_def.hpp"
77 #include "MueLu_SmootherFactory.hpp"
78 #include "MueLu_TrilinosSmoother.hpp"
79 typedef KokkosClassic::DefaultNode::DefaultNodeType Node;
80 //#include <MueLu_UseShortNames.hpp>
81 
82 #include <BelosXpetraAdapter.hpp> // => This header defines Belos::XpetraOp
83 #include <BelosMueLuAdapter.hpp> // => This header defines Belos::MueLuOp
84 
85 #include "Xpetra_MultiVectorFactory.hpp"
86 #include "Xpetra_Matrix.hpp"
87 #include "Xpetra_Map.hpp"
88 #include "MueLu_Level.hpp"
89 #include "MueLu_CoupledAggregationFactory.hpp"
90 #include "MueLu_SaPFactory.hpp"
91 
92 // Random field types
94 const int num_sg_rf = 2;
96 const char *sg_rf_names[] = { "Uniform", "Log-Normal" };
97 
98 // Krylov methods
100 const int num_krylov_method = 2;
102 const char *krylov_method_names[] = { "GMRES", "CG" };
103 
104 // Multigrid preconditioning
108 const char *multigrid_smoother_names[] = { "Chebyshev", "SGS" };
109 
110 // Preconditioning approaches
112 const int num_sg_prec = 3;
114 const char *sg_prec_names[] = { "None",
115  "Mean-Based",
116  "Stochastic" };
117 
118 // Stochastic division approaches
120 const int num_sg_div = 5;
122 const char *sg_div_names[] = { "Direct",
123  "SPD-Direct",
124  "Mean-Based",
125  "Quadrature",
126  "CG"};
127 
128 // Stochastic division preconditioner approaches
130 const int num_sg_divprec = 5;
132 const char *sg_divprec_names[] = { "None",
133  "Diag",
134  "Jacobi",
135  "GS",
136  "Schur"};
137 
138 
139 // Option for Schur complement precond: full or diag D
141 const int num_schur_option = 2;
143 const char *schur_option_names[] = { "full", "diag"};
144 
145 // Full matrix or linear matrix (pb = dim + 1 ) used for preconditioner
147 const int num_prec_option = 2;
149 const char *prec_option_names[] = { "full", "linear"};
150 
151 // #define _GNU_SOURCE 1
152 // #include <fenv.h>
153 
154 template<typename ordinal_type, typename value_type, typename Storage>
155 //void returnScalarAsDenseMatrix(Scalar const &inval,
159 {
160  Stokhos::OrthogPolyApprox<ordinal_type, value_type> val= inval.getOrthogPolyApprox();
162  ordinal_type pb = val.size();
163  const value_type* cv = val.coeff();
164 
165  denseEntry->putScalar(0.0);
166  typename Cijk_type::k_iterator k_begin = Cijk->k_begin();
167  typename Cijk_type::k_iterator k_end = Cijk->k_end();
168  if (pb < Cijk->num_k())
169  k_end = Cijk->find_k(pb);
171  ordinal_type i,j,k;
172  for (typename Cijk_type::k_iterator k_it=k_begin; k_it!=k_end; ++k_it) {
173  k = index(k_it);
174  for (typename Cijk_type::kj_iterator j_it = Cijk->j_begin(k_it); j_it != Cijk->j_end(k_it); ++j_it) {
175  j = index(j_it);
176  for (typename Cijk_type::kji_iterator i_it = Cijk->i_begin(j_it); i_it != Cijk->i_end(j_it); ++i_it) {
177  i = index(i_it);
178  cijk = value(i_it);
179  (*denseEntry)(i,j) += cijk*cv[k];
180  }
181  }
182  }
183 }
184 
185 typedef Xpetra::Matrix<Scalar, LocalOrdinal, GlobalOrdinal, Node> Xpetra_Matrix;
186 typedef Xpetra::Map<LocalOrdinal, GlobalOrdinal, Node> Xpetra_Map;
187 
188 template<typename ordinal_type,typename value_type>
192 {
193  ordinal_type sz = basis->size();
195  sz, sz));
196  size_t maxLength = A->getNodeMaxNumRowEntries();
197  size_t NumEntries;
198  Scalar val;
199  Teuchos::Array<ordinal_type> Indices(maxLength);
200  Teuchos::Array<Scalar> Values(maxLength);
201  Teuchos::RCP<const Xpetra_Map> colMap = A->getColMap();
202  for (ordinal_type i = 0 ; i < Teuchos::as<ordinal_type>(A->getNodeNumRows()); ++i) {
203  A->getLocalRowCopy(i, Indices(), Values(), NumEntries);
204  fos << "++++++++++++++" << std::endl << "row " << A->getRowMap()->getGlobalElement(i) << ": ";
205  fos << " col ids: ";
206  for (size_t ii=0; ii<NumEntries; ++ii) fos << colMap->getGlobalElement(Indices[ii]) << " ";
207  fos << std::endl << "++++++++++++++" << std::endl;
208  for (size_t k=0; k< NumEntries; ++k) {
209  val = Values[k];
210  Teuchos::OSTab tab1(fos);
211  fos << std::endl << "col " << colMap->getGlobalElement(Indices[k]) << std::endl;
212  returnScalarAsDenseMatrix(val,denseEntry,Cijk);
213  //TODO tab thing
214  Teuchos::OSTab tab2(fos);
215  denseEntry->print(fos);
216  }
217  }
218 }
219 
220 int main(int argc, char *argv[]) {
221  typedef double MeshScalar;
222  typedef double BasisScalar;
224  typedef Teuchos::ScalarTraits<Scalar>::magnitudeType magnitudeType;
225 
226  using Teuchos::RCP;
227  using Teuchos::rcp;
228  using Teuchos::Array;
229  using Teuchos::ArrayRCP;
230  using Teuchos::ArrayView;
232 
233 // Initialize MPI
234 #ifdef HAVE_MPI
235  MPI_Init(&argc,&argv);
236 #endif
237 
238  LocalOrdinal MyPID;
239 
240  try {
241 
242  // Create a communicator for Epetra objects
243  RCP<const Epetra_Comm> globalComm;
244 #ifdef HAVE_MPI
245  globalComm = rcp(new Epetra_MpiComm(MPI_COMM_WORLD));
246 #else
247  globalComm = rcp(new Epetra_SerialComm);
248 #endif
249  MyPID = globalComm->MyPID();
250 
251  // Setup command line options
253  CLP.setDocString(
254  "This example runs an interlaced stochastic Galerkin solvers.\n");
255 
256  int n = 32;
257  CLP.setOption("num_mesh", &n, "Number of mesh points in each direction");
258 
259  // multigrid specific options
260  int minAggSize = 1;
261  CLP.setOption("min_agg_size", &minAggSize, "multigrid aggregate size");
262  Multigrid_Smoother smoother_type = CHEBYSHEV;
263  CLP.setOption("smoother_type", &smoother_type,
265  "Multigrid smoother types");
266  int smootherSweeps = 3;
267  CLP.setOption("smoother_sweeps", &smootherSweeps, "# multigrid smoother sweeps");
268  bool plainAgg=false;
269  CLP.setOption("plain_aggregation", "smoothed_aggregation", &plainAgg, "multigrid prolongator smoothing");
270  LocalOrdinal nsSize=1;
271  CLP.setOption("nullspace_size", &nsSize, "multigrid nullspace dimension");
272  int maxAMGLevels=10;
273  CLP.setOption("max_multigrid_levels", &maxAMGLevels, "maximum # levels in multigrid preconditioner");
274 
275  bool printTimings=true;
276  CLP.setOption("timings", "notimings", &printTimings, "print timing summary");
277 
278 
279  bool symmetric = false;
280  CLP.setOption("symmetric", "unsymmetric", &symmetric, "Symmetric discretization");
281 
282  int num_spatial_procs = -1;
283  CLP.setOption("num_spatial_procs", &num_spatial_procs, "Number of spatial processors (set -1 for all available procs)");
284 
285  SG_RF randField = UNIFORM;
286  CLP.setOption("rand_field", &randField,
288  "Random field type");
289 
290  double mu = 0.2;
291  CLP.setOption("mean", &mu, "Mean");
292 
293  double s = 0.1;
294  CLP.setOption("std_dev", &s, "Standard deviation");
295 
296  int num_KL = 2;
297  CLP.setOption("num_kl", &num_KL, "Number of KL terms");
298 
299  int order = 3;
300  CLP.setOption("order", &order, "Polynomial order");
301 
302  bool normalize_basis = true;
303  CLP.setOption("normalize", "unnormalize", &normalize_basis,
304  "Normalize PC basis");
305 
306  Krylov_Method solver_method = GMRES;
307  CLP.setOption("solver_method", &solver_method,
309  "Krylov solver method");
310 
311  SG_Prec prec_method = STOCHASTIC;
312  CLP.setOption("prec_method", &prec_method,
314  "Preconditioner method");
315 
316  SG_Div division_method = DIRECT;
317  CLP.setOption("division_method", &division_method,
319  "Stochastic division method");
320 
321  SG_DivPrec divprec_method = NO;
322  CLP.setOption("divprec_method", &divprec_method,
324  "Preconditioner for division method");
325  Schur_option schur_option = diag;
326  CLP.setOption("schur_option", &schur_option,
328  "Schur option");
329  Prec_option prec_option = whole;
330  CLP.setOption("prec_option", &prec_option,
332  "Prec option");
333 
334 
335  double solver_tol = 1e-12;
336  CLP.setOption("solver_tol", &solver_tol, "Outer solver tolerance");
337 
338  double div_tol = 1e-6;
339  CLP.setOption("div_tol", &div_tol, "Tolerance in Iterative Solver");
340 
341  int prec_level = 1;
342  CLP.setOption("prec_level", &prec_level, "Level in Schur Complement Prec 0->Solve A0u0=g0 with division; 1->Form 1x1 Schur Complement");
343 
344  int max_it_div = 50;
345  CLP.setOption("max_it_div", &max_it_div, "Maximum # of Iterations in Iterative Solver for Division");
346 
347  bool equilibrate = true; //JJH 8/26/12 changing to true to match ETP example
348  CLP.setOption("equilibrate", "noequilibrate", &equilibrate,
349  "Equilibrate the linear system");
350 
351  bool printHierarchy = false;
352  CLP.setOption("print_hierarchy", "noprint_Hierarchy", &printHierarchy,
353  "Print matrices in multigrid hierarchy");
354 
355 
356  CLP.parse( argc, argv );
357 
358  if (MyPID == 0) {
359  std::cout << "Summary of command line options:" << std::endl
360  << "\tnum_mesh = " << n << std::endl
361  << "\tsymmetric = " << symmetric << std::endl
362  << "\tnum_spatial_procs = " << num_spatial_procs << std::endl
363  << "\trand_field = " << sg_rf_names[randField]
364  << std::endl
365  << "\tmean = " << mu << std::endl
366  << "\tstd_dev = " << s << std::endl
367  << "\tnum_kl = " << num_KL << std::endl
368  << "\torder = " << order << std::endl
369  << "\tnormalize_basis = " << normalize_basis << std::endl
370  << "\tsolver_method = " << krylov_method_names[solver_method] << std::endl
371  << "\tprec_method = " << sg_prec_names[prec_method] << std::endl
372  << "\tdivision_method = " << sg_div_names[division_method] << std::endl
373  << "\tdiv_tol = " << div_tol << std::endl
374  << "\tdiv_prec = " << sg_divprec_names[divprec_method] << std::endl
375  << "\tprec_level = " << prec_level << std::endl
376  << "\tmax_it_div = " << max_it_div << std::endl
377  << "\t~~~ multigrid options ~~~" << std::endl
378  << "\tsmoother_type = " << multigrid_smoother_names[smoother_type] << std::endl
379  << "\tsmoother_sweeps = " << smootherSweeps << std::endl
380  << "\tplain_aggregation = " << plainAgg << std::endl
381  << "\tmax_multigrid_levels = " << maxAMGLevels << std::endl
382  << "\tnullspace_size = " << nsSize << std::endl
383  << "\tmin_agg_size = " << minAggSize << std::endl;
384  }
385  bool nonlinear_expansion = false;
386  if (randField == UNIFORM)
387  nonlinear_expansion = false;
388  else if (randField == LOGNORMAL)
389  nonlinear_expansion = true;
390 
391  {
392  TEUCHOS_FUNC_TIME_MONITOR("Total PCE Calculation Time");
393 
394  // Create Stochastic Galerkin basis and expansion
396  for (LocalOrdinal i=0; i<num_KL; i++)
397  if (randField == UNIFORM)
398  bases[i] = rcp(new Stokhos::LegendreBasis<LocalOrdinal,BasisScalar>(order, normalize_basis));
399  else if (randField == LOGNORMAL)
400  bases[i] = rcp(new Stokhos::HermiteBasis<int,double>(order, normalize_basis));
401  RCP<const Stokhos::CompletePolynomialBasis<LocalOrdinal,BasisScalar> > basis =
403  LocalOrdinal sz = basis->size();
404  RCP<Stokhos::Sparse3Tensor<LocalOrdinal,BasisScalar> > Cijk =
405  basis->computeTripleProductTensor();
406  RCP<const Stokhos::Quadrature<int,double> > quad =
408  RCP<ParameterList> expn_params = Teuchos::rcp(new ParameterList);
409  if (division_method == MEAN_DIV) {
410  expn_params->set("Division Strategy", "Mean-Based");
411  expn_params->set("Use Quadrature for Division", false);
412  }
413  else if (division_method == DIRECT) {
414  expn_params->set("Division Strategy", "Dense Direct");
415  expn_params->set("Use Quadrature for Division", false);
416  }
417  else if (division_method == SPD_DIRECT) {
418  expn_params->set("Division Strategy", "SPD Dense Direct");
419  expn_params->set("Use Quadrature for Division", false);
420  }
421  else if (division_method == CGD) {
422  expn_params->set("Division Strategy", "CG");
423  expn_params->set("Use Quadrature for Division", false);
424  }
425 
426  else if (division_method == QUAD) {
427  expn_params->set("Use Quadrature for Division", true);
428  }
429 
430  if (divprec_method == NO)
431  expn_params->set("Prec Strategy", "None");
432  else if (divprec_method == DIAG)
433  expn_params->set("Prec Strategy", "Diag");
434  else if (divprec_method == JACOBI)
435  expn_params->set("Prec Strategy", "Jacobi");
436  else if (divprec_method == GS)
437  expn_params->set("Prec Strategy", "GS");
438  else if (divprec_method == SCHUR)
439  expn_params->set("Prec Strategy", "Schur");
440 
441  if (schur_option == diag)
442  expn_params->set("Schur option", "diag");
443  else
444  expn_params->set("Schur option", "full");
445  if (prec_option == linear)
446  expn_params->set("Prec option", "linear");
447 
448 
449  if (equilibrate)
450  expn_params->set("Equilibrate", 1);
451  else
452  expn_params->set("Equilibrate", 0);
453  expn_params->set("Division Tolerance", div_tol);
454  expn_params->set("prec_iter", prec_level);
455  expn_params->set("max_it_div", max_it_div);
456 
457  RCP<Stokhos::OrthogPolyExpansion<LocalOrdinal,BasisScalar> > expansion =
459  basis, Cijk, quad, expn_params));
460 
461  if (MyPID == 0)
462  std::cout << "Stochastic Galerkin expansion size = " << sz << std::endl;
463 
464  // Create stochastic parallel distribution
465  ParameterList parallelParams;
466  parallelParams.set("Number of Spatial Processors", num_spatial_procs);
467  // parallelParams.set("Rebalance Stochastic Graph", true);
468  // Teuchos::ParameterList& isorropia_params =
469  // parallelParams.sublist("Isorropia");
470  // isorropia_params.set("Balance objective", "nonzeros");
471  RCP<Stokhos::ParallelData> sg_parallel_data =
472  rcp(new Stokhos::ParallelData(basis, Cijk, globalComm, parallelParams));
473  RCP<const EpetraExt::MultiComm> sg_comm =
474  sg_parallel_data->getMultiComm();
475  RCP<const Epetra_Comm> app_comm =
476  sg_parallel_data->getSpatialComm();
477 
478  // Create Teuchos::Comm from Epetra_Comm
479  RCP< Teuchos::Comm<int> > teuchos_app_comm;
480 #ifdef HAVE_MPI
481  RCP<const Epetra_MpiComm> app_mpi_comm =
482  Teuchos::rcp_dynamic_cast<const Epetra_MpiComm>(app_comm);
483  RCP<const Teuchos::OpaqueWrapper<MPI_Comm> > raw_mpi_comm =
484  Teuchos::opaqueWrapper(app_mpi_comm->Comm());
485  teuchos_app_comm = rcp(new Teuchos::MpiComm<int>(raw_mpi_comm));
486 #else
487  teuchos_app_comm = rcp(new Teuchos::SerialComm<int>());
488 #endif
489 
490  // Create application
492  RCP<problem_type> model =
493  rcp(new problem_type(teuchos_app_comm, n, num_KL, s, mu,
494  nonlinear_expansion, symmetric));
495 
496  // Create vectors and operators
497  typedef problem_type::Tpetra_Vector Tpetra_Vector;
498  typedef problem_type::Tpetra_CrsMatrix Tpetra_CrsMatrix;
499  typedef Tpetra::MatrixMarket::Writer<Tpetra_CrsMatrix> Writer;
500  //Xpetra matrices
501  typedef Xpetra::CrsMatrix<Scalar, LocalOrdinal, GlobalOrdinal, Node> Xpetra_CrsMatrix;
502  typedef Xpetra::MultiVector<Scalar, LocalOrdinal, GlobalOrdinal, Node> Xpetra_MultiVector;
503  typedef Xpetra::MultiVectorFactory<Scalar, LocalOrdinal, GlobalOrdinal, Node> Xpetra_MultiVectorFactory;
504  typedef Xpetra::TpetraCrsMatrix<Scalar, LocalOrdinal, GlobalOrdinal, Node> Xpetra_TpetraCrsMatrix;
505  typedef Xpetra::CrsMatrixWrap<Scalar, LocalOrdinal, GlobalOrdinal, Node> Xpetra_CrsMatrixWrap;
506  typedef Belos::MueLuOp<Scalar, LocalOrdinal, GlobalOrdinal, Node> Belos_MueLuOperator;
507  //MueLu typedefs
508  typedef MueLu::Hierarchy<Scalar, LocalOrdinal, GlobalOrdinal, Node> MueLu_Hierarchy;
509  typedef MueLu::SmootherPrototype<Scalar,LocalOrdinal,GlobalOrdinal,Node> SmootherPrototype;
510  typedef MueLu::TrilinosSmoother<Scalar,LocalOrdinal,GlobalOrdinal,Node> TrilinosSmoother;
511  typedef MueLu::SmootherFactory<Scalar,LocalOrdinal,GlobalOrdinal,Node> SmootherFactory;
512  typedef MueLu::FactoryManager<Scalar,LocalOrdinal,GlobalOrdinal,Node> FactoryManager;
513 
514  //feenableexcept(FE_ALL_EXCEPT);
515  //feenableexcept(FE_INVALID | FE_DIVBYZERO);
516 
517  RCP<Tpetra_Vector> p = Tpetra::createVector<Scalar>(model->get_p_map(0));
518  RCP<Tpetra_Vector> x = Tpetra::createVector<Scalar>(model->get_x_map());
519  x->putScalar(0.0);
520  RCP<Tpetra_Vector> f = Tpetra::createVector<Scalar>(model->get_f_map());
521  RCP<Tpetra_Vector> dx = Tpetra::createVector<Scalar>(model->get_x_map());
522  RCP<Tpetra_CrsMatrix> J = model->create_W();
523  RCP<Tpetra_CrsMatrix> J0;
524  if (prec_method == MEAN)
525  J0 = model->create_W();
526 
527  // Set PCE expansion of p
528  p->putScalar(0.0);
529  ArrayRCP<Scalar> p_view = p->get1dViewNonConst();
530  for (ArrayRCP<Scalar>::size_type i=0; i<p_view.size(); i++) {
531  p_view[i].reset(expansion);
532  p_view[i].copyForWrite();
533  }
534  Array<double> point(num_KL, 1.0);
535  Array<double> basis_vals(sz);
536  basis->evaluateBases(point, basis_vals);
537  if (order > 0) {
538  for (int i=0; i<num_KL; i++) {
539  p_view[i].term(i,1) = 1.0 / basis_vals[i+1];
540  }
541  }
542 
543  // Create preconditioner
544  typedef Ifpack2::Preconditioner<Scalar,LocalOrdinal,GlobalOrdinal,Node> Tprec;
545  RCP<Belos_MueLuOperator> M;
546  RCP<MueLu_Hierarchy> H;
547  RCP<Xpetra_CrsMatrix> xcrsJ = rcp(new Xpetra_TpetraCrsMatrix(J));
548  RCP<Xpetra_Matrix> xopJ = rcp(new Xpetra_CrsMatrixWrap(xcrsJ));
549  RCP<Xpetra_Matrix> xopJ0;
550  if (prec_method != NONE) {
551  ParameterList precParams;
552  std::string prec_name = "RILUK";
553  precParams.set("fact: iluk level-of-fill", 1);
554  precParams.set("fact: iluk level-of-overlap", 0);
555  //Ifpack2::Factory factory;
556  if (prec_method == MEAN) {
557  RCP<Xpetra_CrsMatrix> xcrsJ0 = rcp(new Xpetra_TpetraCrsMatrix(J0));
558  xopJ0 = rcp(new Xpetra_CrsMatrixWrap(xcrsJ0));
559  //M = factory.create<Tpetra_CrsMatrix>(prec_name, J0);
560  } else if (prec_method == STOCHASTIC) {
561  xopJ0 = xopJ;
562  //M = factory.create<Tpetra_CrsMatrix>(prec_name, J);
563  }
564  H = rcp(new MueLu_Hierarchy(xopJ0));
565  M = rcp(new Belos_MueLuOperator(H));
566  //M->setParameters(precParams);
567  if (nsSize==-1) nsSize=sz;
568  RCP<Xpetra_MultiVector> Z = Xpetra_MultiVectorFactory::Build(xcrsJ->getDomainMap(), nsSize);
569  size_t n = Z->getLocalLength();
570  for (LocalOrdinal j=0; j<nsSize; ++j) {
571  ArrayRCP<Scalar> col = Z->getDataNonConst(j);
572  for (size_t i=0; i<n; ++i) {
573  col[i].reset(expansion);
574  col[i].copyForWrite();
575  col[i].fastAccessCoeff(j) = 1.0;
576  }
577  }
578  H->GetLevel(0)->Set("Nullspace", Z);
579  //RCP<Teuchos::FancyOStream> fos = Teuchos::fancyOStream(Teuchos::rcpFromRef(std::cout));
580  //fos->setOutputToRootOnly(-1);
581  //Z->describe(*fos);
582  }
583 
584  // Evaluate model
585  model->computeResidual(*x, *p, *f);
586  model->computeJacobian(*x, *p, *J);
587 
588 
589  // Compute mean for mean-based preconditioner
590  if (prec_method == MEAN) {
591  size_t nrows = J->getNodeNumRows();
592  ArrayView<const LocalOrdinal> indices;
593  ArrayView<const Scalar> values;
594  J0->resumeFill();
595  for (size_t i=0; i<nrows; i++) {
596  J->getLocalRowView(i, indices, values);
597  Array<Scalar> values0(values.size());
598  for (LocalOrdinal j=0; j<values.size(); j++)
599  values0[j] = values[j].coeff(0);
600  J0->replaceLocalValues(i, indices, values0);
601  }
602  J0->fillComplete();
603  }
604 
605 
606  // compute preconditioner
607  if (prec_method != NONE) {
608  //M->initialize();
609  //M->compute();
610 
611  //override MueLu defaults via factory manager
612  RCP<FactoryManager> fm = rcp( new FactoryManager() );;
613 
614  //smoother
615  ParameterList smootherParamList;
616  RCP<SmootherPrototype> smooPrototype;
617  switch(smoother_type) {
618  case CHEBYSHEV: {
619  smootherParamList.set("chebyshev: degree", smootherSweeps);
620  smootherParamList.set("chebyshev: ratio eigenvalue", (double) 20);
621  Scalar minusOne=-1.0;
622  smootherParamList.set("chebyshev: max eigenvalue", minusOne);
623  smootherParamList.set("chebyshev: min eigenvalue", (double) 1.0);
624  smootherParamList.set("chebyshev: zero starting solution", true);
625  smooPrototype = rcp( new TrilinosSmoother("CHEBYSHEV", smootherParamList) );
626  break;
627  }
628 
629  case SGS:
630  default:
631  smootherParamList.set("relaxation: sweeps", smootherSweeps);
632  smootherParamList.set("relaxation: type", "Symmetric Gauss-Seidel");
633  smooPrototype = rcp( new TrilinosSmoother("RELAXATION", smootherParamList) );
634  break;
635  }
636 
637  RCP<SmootherFactory> smooFact = rcp( new SmootherFactory(smooPrototype) );
638  fm->SetFactory("Smoother", smooFact);
639 
640  // coarse level solve
641  // TODO until KLU in Amesos2 is fully templated, use incomplete factorization as coarsest level solve
642  ParameterList coarseParamList;
643  coarseParamList.set("fact: level-of-fill", 0);
644  RCP<SmootherPrototype> coarsePrototype = rcp( new TrilinosSmoother("ILUT", coarseParamList) );
645  //RCP<SmootherPrototype> coarsePrototype = rcp( new TrilinosSmoother("RILUK", coarseParamList) );
646  RCP<SmootherFactory> coarseSolverFact = rcp( new SmootherFactory(coarsePrototype, Teuchos::null) );
647  fm->SetFactory("CoarseSolver", coarseSolverFact);
648  //fm->SetFactory("CoarseSolver", smooFact);
649 
650  //allow for larger aggregates
651  typedef MueLu::CoupledAggregationFactory<LocalOrdinal,GlobalOrdinal,Node>
652  MueLu_CoupledAggregationFactory;
653  RCP<MueLu_CoupledAggregationFactory> aggFact = rcp(new MueLu_CoupledAggregationFactory());
654  aggFact->SetMinNodesPerAggregate(minAggSize);
655  fm->SetFactory("Aggregates", aggFact);
656 
657  //turn off damping
658  typedef MueLu::SaPFactory<Scalar,LocalOrdinal,GlobalOrdinal,Node> MueLu_SaPFactory;
659  if (plainAgg) {
660  RCP<MueLu_SaPFactory> sapFactory = rcp(new MueLu_SaPFactory);
661  sapFactory->SetDampingFactor( (Scalar) 0.0 );
662  fm->SetFactory("P", sapFactory);
663  }
664 
665  H->Setup(*fm,0,maxAMGLevels); //start at level 0, at most 3 levels
666 
667  if (printHierarchy)
668  {
669  //FIXME #levels
670  RCP<Teuchos::FancyOStream> fos = Teuchos::fancyOStream(Teuchos::rcpFromRef(std::cout));
671  int numLevels = H->GetNumLevels();
672  for (int i=0; i<numLevels; ++i) {
673  RCP<Xpetra_Matrix> A = H->GetLevel(i)->Get<RCP<Xpetra_Matrix> >("A");
674  *fos << "================\n" << "matrix A, multigrid level " << i << "\n================" << std::endl;
675  PrintMatrix<LocalOrdinal,BasisScalar>(*fos,A,Cijk,basis);
676  }
677  }
678  }
679 
680  // Setup Belos solver
681  RCP<ParameterList> belosParams = rcp(new ParameterList);
682  belosParams->set("Flexible Gmres", false);
683  belosParams->set("Num Blocks", 500);//20
684  belosParams->set("Convergence Tolerance", solver_tol);
685  belosParams->set("Maximum Iterations", 1000);
686  belosParams->set("Verbosity", 33);
687  belosParams->set("Output Style", 1);
688  belosParams->set("Output Frequency", 1);
689  typedef Tpetra::MultiVector<Scalar,LocalOrdinal,GlobalOrdinal,Node> MV;
690  typedef Belos::OperatorT<Tpetra::MultiVector<Scalar,LocalOrdinal,GlobalOrdinal,Node> > OP;
691  typedef Belos::OperatorTraits<Scalar,MV,OP> BOPT;
692  typedef Belos::MultiVecTraits<Scalar,MV> BMVT;
693  typedef Belos::MultiVecTraits<double,MV> BTMVT;
694  typedef Belos::LinearProblem<double,MV,OP> BLinProb;
695  typedef Belos::XpetraOp<Scalar, LocalOrdinal, GlobalOrdinal, Node> BXpetraOp;
696  RCP<OP> belosJ = rcp(new BXpetraOp(xopJ)); // Turns an Xpetra::Matrix object into a Belos operator
697  RCP< BLinProb > problem = rcp(new BLinProb(belosJ, dx, f));
698  if (prec_method != NONE)
699  problem->setRightPrec(M);
700  problem->setProblem();
701  RCP<Belos::SolverManager<double,MV,OP> > solver;
702  if (solver_method == CG)
703  solver = rcp(new Belos::PseudoBlockCGSolMgr<double,MV,OP>(problem, belosParams));
704  else if (solver_method == GMRES)
705  solver = rcp(new Belos::BlockGmresSolMgr<double,MV,OP>(problem, belosParams));
706 
707  // Print initial residual norm
708  std::vector<double> norm_f(1);
709  //BMVT::MvNorm(*f, norm_f);
710  BTMVT::MvNorm(*f, norm_f);
711  if (MyPID == 0)
712  std::cout << "\nInitial residual norm = " << norm_f[0] << std::endl;
713 
714  // Solve linear system
715  Belos::ReturnType ret = solver->solve();
716 
717  if (MyPID == 0) {
718  if (ret == Belos::Converged)
719  std::cout << "Solver converged!" << std::endl;
720  else
721  std::cout << "Solver failed to converge!" << std::endl;
722  }
723 
724  // Update x
725  x->update(-1.0, *dx, 1.0);
726  Writer::writeDenseFile("stochastic_solution.mm", x);
727 
728  // Compute new residual & response function
729  RCP<Tpetra_Vector> g = Tpetra::createVector<Scalar>(model->get_g_map(0));
730  f->putScalar(0.0);
731  model->computeResidual(*x, *p, *f);
732  model->computeResponse(*x, *p, *g);
733 
734  // Print final residual norm
735  //BMVT::MvNorm(*f, norm_f);
736  BTMVT::MvNorm(*f, norm_f);
737  if (MyPID == 0)
738  std::cout << "\nFinal residual norm = " << norm_f[0] << std::endl;
739 
740  // Expected results for num_mesh=32
741  double g_mean_exp = 0.172988; // expected response mean
742  double g_std_dev_exp = 0.0380007; // expected response std. dev.
743  double g_tol = 1e-6; // tolerance on determining success
744  if (n == 8) {
745  g_mean_exp = 1.327563e-01;
746  g_std_dev_exp = 2.949064e-02;
747  }
748 
749  double g_mean = g->get1dView()[0].mean();
750  double g_std_dev = g->get1dView()[0].standard_deviation();
751  std::cout << std::endl;
752  std::cout << "Response Mean = " << g_mean << std::endl;
753  std::cout << "Response Std. Dev. = " << g_std_dev << std::endl;
754  bool passed = false;
755  if (norm_f[0] < 1.0e-10 &&
756  std::abs(g_mean-g_mean_exp) < g_tol &&
757  std::abs(g_std_dev - g_std_dev_exp) < g_tol)
758  passed = true;
759  if (MyPID == 0) {
760  if (passed)
761  std::cout << "Example Passed!" << std::endl;
762  else{
763  std::cout << "Example Failed!" << std::endl;
764  std::cout << "Expected Response Mean = "<< g_mean_exp << std::endl;
765  std::cout << "Expected Response Std. Dev. = "<< g_std_dev_exp << std::endl;
766  }
767  }
768 
769  }
770 
771  if (printTimings) {
774  }
775 
776  }
777 
778  catch (std::exception& e) {
779  std::cout << e.what() << std::endl;
780  }
781  catch (string& s) {
782  std::cout << s << std::endl;
783  }
784  catch (char *s) {
785  std::cout << s << std::endl;
786  }
787  catch (...) {
788  std::cout << "Caught unknown exception!" <<std:: endl;
789  }
790 
791 #ifdef HAVE_MPI
792  MPI_Finalize() ;
793 #endif
794 
795 }
const char * multigrid_smoother_names[]
#define TEUCHOS_FUNC_TIME_MONITOR(FUNCNAME)
const char * sg_divprec_names[]
const Prec_option Prec_option_values[]
Hermite polynomial basis.
const Krylov_Method krylov_method_values[]
SparseArrayIterator< index_iterator, value_iterator >::value_type index(const SparseArrayIterator< index_iterator, value_iterator > &it)
Data structure storing a sparse 3-tensor C(i,j,k) in a a compressed format.
Xpetra::Matrix< Scalar, LocalOrdinal, GlobalOrdinal, Node > Xpetra_Matrix
const char * schur_option_names[]
const int num_multigrid_smoother
const char * prec_option_names[]
const SG_Prec sg_prec_values[]
pointer coeff()
Return coefficient array.
const SG_DivPrec sg_divprec_values[]
const Schur_option Schur_option_values[]
Kokkos::Serial node_type
const SG_Div sg_div_values[]
Abstract base class for multivariate orthogonal polynomials.
const Multigrid_Smoother multigrid_smoother_values[]
TEUCHOS_DEPRECATED RCP< T > rcp(T *p, Dealloc_T dealloc, bool owns_mem)
const int num_krylov_method
static void summarize(Ptr< const Comm< int > > comm, std::ostream &out=std::cout, const bool alwaysWriteLocal=false, const bool writeGlobalStats=true, const bool writeZeroTimers=true, const ECounterSetOp setOp=Intersection, const std::string &filter="", const bool ignoreZeroTimers=false)
void setOption(const char option_true[], const char option_false[], bool *option_val, const char documentation[]=NULL)
const SG_RF sg_rf_values[]
KokkosClassic::DefaultNode::DefaultNodeType Node
Xpetra::Map< LocalOrdinal, GlobalOrdinal, Node > Xpetra_Map
EParseCommandLineReturn parse(int argc, char *argv[], std::ostream *errout=&std::cerr) const
KOKKOS_INLINE_FUNCTION PCE< Storage > abs(const PCE< Storage > &a)
const int num_sg_rf
expr expr expr dx(i, j)
Multivariate orthogonal polynomial basis generated from a total-order complete-polynomial tensor prod...
void PrintMatrix(Teuchos::FancyOStream &fos, Teuchos::RCP< Xpetra_Matrix > const &A, Teuchos::RCP< Stokhos::Sparse3Tensor< ordinal_type, value_type > > const &Cijk, Teuchos::RCP< const Stokhos::OrthogPolyBasis< ordinal_type, value_type > > const &basis)
KOKKOS_INLINE_FUNCTION constexpr std::enable_if< is_view_uq_pce< view_type >::value, typename CijkType< view_type >::type >::type cijk(const view_type &view)
Legendre polynomial basis.
Stokhos::Sparse3Tensor< int, double > Cijk_type
int main(int argc, char **argv)
expr val()
const char * sg_rf_names[]
ScalarType f(const Teuchos::Array< ScalarType > &x, double a, double b)
void setDocString(const char doc_string[])
SparseArrayIterator< index_iterator, value_iterator >::value_reference value(const SparseArrayIterator< index_iterator, value_iterator > &it)
const char * sg_prec_names[]
ordinal_type size() const
Return size.
const char * sg_div_names[]
Orthogonal polynomial expansions based on numerical quadrature.
int n
Defines quadrature for a tensor product basis by tensor products of 1-D quadrature rules...
ScalarType g(const Teuchos::Array< ScalarType > &x, const ScalarType &y)
static void zeroOutTimers()
void returnScalarAsDenseMatrix(Sacado::PCE::OrthogPoly< value_type, Storage > const &inval, Teuchos::RCP< Teuchos::SerialDenseMatrix< ordinal_type, value_type > > &denseEntry, Teuchos::RCP< Stokhos::Sparse3Tensor< ordinal_type, value_type > > const &Cijk)
A linear 2-D diffusion problem.
const char * krylov_method_names[]