Anasazi  Version of the Day
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Groups Pages
AMGOperator.cpp
1 // @HEADER
2 // *****************************************************************************
3 // Anasazi: Block Eigensolvers Package
4 //
5 // Copyright 2004 NTESS and the Anasazi contributors.
6 // SPDX-License-Identifier: BSD-3-Clause
7 // *****************************************************************************
8 // @HEADER
9 
10 // This software is a result of the research described in the report
11 //
12 // "A comparison of algorithms for modal analysis in the absence
13 // of a sparse direct method", P. Arbenz, R. Lehoucq, and U. Hetmaniuk,
14 // Sandia National Laboratories, Technical report SAND2003-1028J.
15 //
16 // It is based on the Epetra, AztecOO, and ML packages defined in the Trilinos
17 // framework ( http://trilinos.org/ ).
18 
19 #include "AMGOperator.h"
20 
21 
22 AMGOperator::AMGOperator(const Epetra_Comm &_Comm, const Epetra_Operator *KK, int verb,
23  int nLevel, int smoother, int param,
24  int coarseSolver, int cycle,
25  int _numDofs, const Epetra_MultiVector *Z)
26  : MyComm(_Comm),
27  callBLAS(),
28  callLAPACK(),
29  K(KK),
30  Prec(0),
31  Q(Z),
32  QtQ(0),
33  numDofs(_numDofs),
34  leftProjection(false),
35  rightProjection(false),
36  ml_handle(0),
37  ml_agg(0),
38  AMG_NLevels(nLevel),
39  coarseLocalSize(0),
40  coarseGlobalSize(0),
41  ZcoarseTZcoarse(0),
42  verbose(verb)
43  {
44 
45  preProcess((numDofs == 1) ? 1 : 50);
46 
47  // Set a smoother for the MG method
48  if (smoother == 1) {
49  if (numDofs == 1) {
50  for (int j = 0; j < AMG_NLevels; ++j)
51  ML_Gen_Smoother_MLS(ml_handle, j, ML_BOTH, 30., param);
52  }
53  else {
54  int nBlocks, *blockIndices;
55  for (int j = 0; j < AMG_NLevels; ++j) {
56  ML_Gen_Blocks_Aggregates(ml_agg, j, &nBlocks, &blockIndices);
57  ML_Gen_Smoother_BlockDiagScaledCheby(ml_handle, j, ML_BOTH, 30., param,
58  nBlocks, blockIndices);
59  }
60  } // if (numDofs == 1)
61  }
62 
63  // Note that Symmetric Gauss Seidel does not parallelize well
64  if (smoother == 2) {
65  // Note: ML_DEFAULT specifies the damping parameter
66  // param specifies the number of iterations
67  if (numDofs == 1)
68  ML_Gen_Smoother_SymGaussSeidel(ml_handle, ML_ALL_LEVELS, ML_BOTH, param, ML_DEFAULT);
69  else
70  ML_Gen_Smoother_BlockGaussSeidel(ml_handle, ML_ALL_LEVELS, ML_BOTH, param, ML_DEFAULT,
71  numDofs);
72  }
73 
74  if (smoother == 3) {
75  // Note: ML_DEFAULT specifies the damping parameter
76  // param specifies the number of iterations
77  if (numDofs == 1)
78  ML_Gen_Smoother_Jacobi(ml_handle, ML_ALL_LEVELS, ML_BOTH, param, ML_DEFAULT);
79  else {
80  int size = ml_handle->Amat[0].getrow->Nrows;
81  int *blockIndices = new int[size];
82  int j;
83  for (j = 0; j < size; ++j)
84  blockIndices[j] = j/numDofs;
85  for (j = 0; j < AMG_NLevels; ++j) {
86  size = ml_handle->Amat[j].getrow->Nrows;
87  ML_Gen_Smoother_VBlockJacobi(ml_handle, j, ML_BOTH, param, ML_DEFAULT,
88  size/numDofs, blockIndices);
89  }
90  delete[] blockIndices;
91  } // if (numDofs == 1)
92  }
93 
94  setCoarseSolver_Cycle(coarseSolver, cycle);
95 
96 }
97 
98 
99 AMGOperator::AMGOperator(const Epetra_Comm &_Comm, const Epetra_Operator *KK, int verb,
100  int nLevel, int smoother, int *param,
101  int coarseSolver, int cycle,
102  int _numDofs, const Epetra_MultiVector *Z)
103  : MyComm(_Comm),
104  callBLAS(),
105  callLAPACK(),
106  K(KK),
107  Prec(0),
108  Q(Z),
109  QtQ(0),
110  numDofs(_numDofs),
111  leftProjection(false),
112  rightProjection(false),
113  ml_handle(0),
114  ml_agg(0),
115  AMG_NLevels(nLevel),
116  coarseLocalSize(0),
117  coarseGlobalSize(0),
118  ZcoarseTZcoarse(0),
119  verbose(verb)
120  {
121 
122  preProcess((numDofs == 1) ? 1 : 50);
123 
124  // Set a smoother for the MG method
125  if (smoother == 1) {
126  if (numDofs == 1) {
127  for (int j = 0; j < AMG_NLevels; ++j)
128  ML_Gen_Smoother_MLS(ml_handle, j, ML_BOTH, 30., (param) ? param[j] : 2);
129  }
130  else {
131  int nBlocks, *blockIndices;
132  for (int j = 0; j < AMG_NLevels; ++j) {
133  ML_Gen_Blocks_Aggregates(ml_agg, j, &nBlocks, &blockIndices);
134  ML_Gen_Smoother_BlockDiagScaledCheby(ml_handle, j, ML_BOTH, 30., (param) ? param[j] : 2,
135  nBlocks, blockIndices);
136  }
137  } // if (numDofs == 1)
138  }
139 
140  // Note that Symmetric Gauss Seidel does not parallelize well
141  if (smoother == 2) {
142  // Note: ML_DEFAULT specifies the damping parameter
143  // param specifies the number of iterations
144  if (numDofs == 1) {
145  for (int j = 0; j < AMG_NLevels; ++j) {
146  ML_Gen_Smoother_SymGaussSeidel(ml_handle, j, ML_BOTH, (param) ? param[j] : 1, ML_DEFAULT);
147  }
148  }
149  else {
150  for (int j = 0; j < AMG_NLevels; ++j) {
151  ML_Gen_Smoother_BlockGaussSeidel(ml_handle, j, ML_BOTH, (param) ? param[j] : 1,
152  ML_DEFAULT, numDofs);
153  }
154  }
155  }
156 
157  if (smoother == 3) {
158  // Note: ML_DEFAULT specifies the damping parameter
159  // param specifies the number of iterations
160  if (numDofs == 1) {
161  for (int j = 0; j < AMG_NLevels; ++j) {
162  ML_Gen_Smoother_Jacobi(ml_handle, j, ML_BOTH, (param) ? param[j] : 1, ML_DEFAULT);
163  }
164  }
165  else {
166  int size = ml_handle->Amat[0].getrow->Nrows;
167  int *blockIndices = new int[size];
168  int j;
169  for (j = 0; j < size; ++j)
170  blockIndices[j] = j/numDofs;
171  for (j = 0; j < AMG_NLevels; ++j) {
172  size = ml_handle->Amat[j].getrow->Nrows;
173  ML_Gen_Smoother_VBlockJacobi(ml_handle, j, ML_BOTH, (param) ? param[j] : 1,
174  ML_DEFAULT, size/numDofs, blockIndices);
175  }
176  delete[] blockIndices;
177  } // if (numDofs == 1)
178  }
179 
180  setCoarseSolver_Cycle(coarseSolver, cycle);
181 
182 }
183 
184 
185 void AMGOperator::preProcess(int maxCoarseSize) {
186 
187  // Note the constness is cast away for ML
188  Epetra_RowMatrix *Krow = dynamic_cast<Epetra_RowMatrix*>(const_cast<Epetra_Operator*>(K));
189  if (Krow == 0) {
190  if (MyComm.MyPID() == 0) {
191  cerr << endl;
192  cerr << " !!! For AMG preconditioner, the matrix must be 'Epetra_RowMatrix' object !!!\n";
193  cerr << endl;
194  }
195  return;
196  }
197 
198  // Generate an ML multilevel preconditioner
199  ML_Set_PrintLevel(verbose);
200  ML_Create(&ml_handle, AMG_NLevels);
201 
202  EpetraMatrix2MLMatrix(ml_handle, 0, Krow);
203 
204  ML_Aggregate_Create(&ml_agg);
205  ML_Aggregate_Set_MaxCoarseSize(ml_agg, maxCoarseSize);
206  ML_Aggregate_Set_Threshold(ml_agg, 0.0);
207 
208  // Set the aggregation scheme
209  if ((Q == 0) || (numDofs == 1)) {
210  ML_Aggregate_Set_CoarsenScheme_Uncoupled(ml_agg);
211  }
212  else {
213  //--------------------------------------
214  //ML_Aggregate_Set_DampingFactor(ml_agg, 1.3333);
215  //ML_Aggregate_Set_CoarsenScheme_METIS(ml_agg);
216  //ML_Aggregate_Set_NodesPerAggr(ml_handle, ml_agg, -1, );
217  //--------------------------------------
218  ML_Aggregate_Set_DampingFactor(ml_agg, 1.3333);
219  ML_Aggregate_Set_CoarsenScheme_MIS(ml_agg);
220  //ML_Aggregate_Set_Phase3AggregateCreationAggressiveness(ml_agg, 0.1);
221  //--------------------------------------
222  //ML_Aggregate_Set_CoarsenScheme_Uncoupled(ml_agg);
223  //--------------------------------------
224  //ML_Aggregate_Set_BlockDiagScaling(ml_agg);
225  //--------------------------------------
226  ML_Aggregate_Set_NullSpace(ml_agg, numDofs, Q->NumVectors(), Q->Values(), Q->MyLength());
227  QtQ = new double[Q->NumVectors()*Q->NumVectors()];
228  double *work = new double[Q->NumVectors()*Q->NumVectors()];
229  callBLAS.GEMM('T', 'N', Q->NumVectors(), Q->NumVectors(), Q->MyLength(),
230  1.0, Q->Values(), Q->MyLength(), Q->Values(), Q->MyLength(),
231  0.0, work, Q->NumVectors());
232  MyComm.SumAll(work, QtQ, Q->NumVectors()*Q->NumVectors());
233  delete[] work;
234  int info = 0;
235  callLAPACK.POTRF('U', Q->NumVectors(), QtQ, Q->NumVectors(), &info);
236  if (info) {
237  cerr << endl;
238  cerr << " !!! In AMG preconditioner, the null space vectors are linearly dependent !!!\n";
239  cerr << endl;
240  delete[] QtQ;
241  QtQ = 0;
242  }
243  }
244 
245  AMG_NLevels = ML_Gen_MGHierarchy_UsingAggregation(ml_handle, 0, ML_INCREASING, ml_agg);
246 
247 }
248 
249 
250 void AMGOperator::setCoarseSolver_Cycle(int coarseSolver, int cycle) {
251 
252  // Specifies the solver on the coarsest level
253  if (coarseSolver > -1) {
254  switch (coarseSolver) {
255  case 0:
256  // Use SuperLU on the coarsest level
257  ML_Gen_CoarseSolverSuperLU(ml_handle, AMG_NLevels-1);
258  break;
259  case 1:
260  // Use Aztec PCG on the coarsest level
261  int options[AZ_OPTIONS_SIZE];
262  double params[AZ_PARAMS_SIZE];
263  int *proc_config = new int[AZ_PROC_SIZE];
264  double *status = new double[AZ_STATUS_SIZE];
265  AZ_defaults(options, params);
266  options[AZ_solver] = AZ_cg;
267  options[AZ_precond] = AZ_user_precond;
268  options[AZ_scaling] = AZ_none;
269  options[AZ_output] = 1;
270 // if (verbose < 3)
271 // options[AZ_output] = AZ_last;
272  if (verbose < 2)
273  options[AZ_output] = AZ_none;
274 #ifdef EPETRA_MPI
275  AZ_set_proc_config(proc_config, get_global_comm());
276 #endif
277  params[AZ_tol] = 1.0e-14;
278  coarseLocalSize = ml_handle->Amat[AMG_NLevels-1].invec_leng;
279  MyComm.SumAll(&coarseLocalSize, &coarseGlobalSize, 1);
280  if ((verbose > 0) && (MyComm.MyPID() == 0))
281  cout << "\n --> Total number of coarse grid unknowns = " << coarseGlobalSize << endl;
282  // Define the data for the projection
283  if (Q) {
284  int zCol = ml_agg->nullspace_dim;
285  double *ZZ = ml_agg->nullspace_vect;
286  ZcoarseTZcoarse = new double[zCol*zCol];
287  double *tmp = new double[zCol*zCol];
288  int info = 0;
289  callBLAS.GEMM('T', 'N', zCol, zCol, coarseLocalSize, 1.0, ZZ, coarseLocalSize,
290  ZZ, coarseLocalSize, 0.0, tmp, zCol);
291  MyComm.SumAll(tmp, ZcoarseTZcoarse, zCol*zCol);
292  callLAPACK.POTRF('U', zCol, ZcoarseTZcoarse, zCol, &info);
293  if (info) {
294  cerr << endl;
295  cerr << " !!! In AMG preconditioner, for the coarse level, ";
296  cerr << "the null space vectors are linearly dependent !!!\n";
297  cerr << endl;
298  }
299 #ifndef MACOSX
300  singularCoarse::setNullSpace(ml_agg->nullspace_vect, coarseLocalSize,
301  ml_agg->nullspace_dim, ZcoarseTZcoarse, &MyComm);
302 #endif
303  delete[] tmp;
304  }
305  options[AZ_max_iter] = coarseGlobalSize;
306 // // Subtract off the rigid body modes
307 // options[AZ_orth_kvecs] = AZ_TRUE;
308 // options[AZ_keep_kvecs] = (Q) ? coarseGlobalSize - Q->NumVectors() : coarseGlobalSize;
309 // options[AZ_apply_kvecs] = AZ_APPLY;
310 #ifdef MACOSX
311  ML_Gen_SmootherAztec(ml_handle, AMG_NLevels-1, options, params, proc_config, status,
312  options[AZ_max_iter], ML_PRESMOOTHER, NULL);
313 #else
314  ML_Gen_SmootherAztec(ml_handle, AMG_NLevels-1, options, params, proc_config, status,
315  options[AZ_max_iter], ML_PRESMOOTHER,
316  (Q) ? singularCoarse::projection : NULL);
317 #endif
318  break;
319  }
320  }
321 
322  // Type of cycle
323  switch (cycle) {
324  default:
325  case 0:
326  ML_Gen_Solver(ml_handle, ML_MGV, 0, AMG_NLevels-1);
327  break;
328  case 1:
329  ML_Gen_Solver(ml_handle, ML_MGW, 0, AMG_NLevels-1);
330  break;
331  }
332 
333  Prec = new ML_Epetra::MultiLevelOperator(ml_handle, MyComm, K->OperatorDomainMap(),
334  K->OperatorDomainMap());
335 
336  //------------------------------------
337  // This definition of Prec calls the old class 'Epetra_ML_Operator'
338  //Prec = new Epetra_ML_Operator(ml_handle, MyComm, K->OperatorDomainMap(),
339  // K->OperatorDomainMap());
340  //------------------------------------
341 
342 }
343 
344 
345 AMGOperator::~AMGOperator() {
346 
347  if (Prec) {
348  delete Prec;
349  Prec = 0;
350  ML_Destroy(&ml_handle);
351  ML_Aggregate_Destroy(&ml_agg);
352  }
353 
354  if (QtQ) {
355  delete[] QtQ;
356  QtQ = 0;
357  }
358 
359  if (ZcoarseTZcoarse) {
360  delete[] ZcoarseTZcoarse;
361  ZcoarseTZcoarse = 0;
362 #ifndef MACOSX
363  singularCoarse::setNullSpace(0, 0, 0, 0, 0);
364 #endif
365  }
366 
367 }
368 
369 
370 int AMGOperator::Apply(const Epetra_MultiVector& X, Epetra_MultiVector& Y) const {
371 
372  return K->Apply(X, Y);
373 
374 }
375 
376 
377 int AMGOperator::ApplyInverse(const Epetra_MultiVector& X, Epetra_MultiVector& Y) const {
378 
379  int xcol = X.NumVectors();
380 
381  if (Y.NumVectors() < xcol)
382  return -1;
383 
384  double *valX = (rightProjection == true) ? new double[xcol*X.MyLength()] : X.Values();
385 
386  Epetra_MultiVector X2(View, X.Map(), valX, X.MyLength(), xcol);
387 
388  if ((rightProjection == true) && (Q)) {
389  int qcol = Q->NumVectors();
390  double *work = new double[2*qcol*xcol];
391  memcpy(X2.Values(), X.Values(), xcol*X.MyLength()*sizeof(double));
392  callBLAS.GEMM('T', 'N', qcol, xcol, Q->MyLength(), 1.0, Q->Values(), Q->MyLength(),
393  X2.Values(), X2.MyLength(), 0.0, work + qcol*xcol, qcol);
394  MyComm.SumAll(work + qcol*xcol, work, qcol*xcol);
395  int info = 0;
396  callLAPACK.POTRS('U', qcol, xcol, QtQ, qcol, work, qcol, &info);
397  callBLAS.GEMM('N', 'N', X2.MyLength(), xcol, qcol, -1.0, Q->Values(), Q->MyLength(),
398  work, qcol, 1.0, X2.Values(), X2.MyLength());
399  delete[] work;
400  }
401 
402  Prec->ApplyInverse(X2, Y);
403 
404  if (rightProjection == true)
405  delete[] valX;
406 
407  if ((leftProjection == true) && (Q)) {
408  int qcol = Q->NumVectors();
409  double *work = new double[2*qcol*xcol];
410  callBLAS.GEMM('T', 'N', qcol, xcol, Q->MyLength(), 1.0, Q->Values(), Q->MyLength(),
411  Y.Values(), Y.MyLength(), 0.0, work + qcol*xcol, qcol);
412  MyComm.SumAll(work + qcol*xcol, work, qcol*xcol);
413  int info = 0;
414  callLAPACK.POTRS('U', qcol, xcol, QtQ, qcol, work, qcol, &info);
415  callBLAS.GEMM('N', 'N', Y.MyLength(), xcol, qcol, -1.0, Q->Values(), Q->MyLength(),
416  work, qcol, 1.0, Y.Values(), Y.MyLength());
417  delete[] work;
418  }
419 
420  return 0;
421 
422 }
423 
424