ROL
ROL_CompositeStepSQP.hpp
Go to the documentation of this file.
1 // @HEADER
2 // ************************************************************************
3 //
4 // Rapid Optimization Library (ROL) Package
5 // Copyright (2014) Sandia Corporation
6 //
7 // Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive
8 // license for use of this work by or on behalf of the U.S. Government.
9 //
10 // Redistribution and use in source and binary forms, with or without
11 // modification, are permitted provided that the following conditions are
12 // met:
13 //
14 // 1. Redistributions of source code must retain the above copyright
15 // notice, this list of conditions and the following disclaimer.
16 //
17 // 2. Redistributions in binary form must reproduce the above copyright
18 // notice, this list of conditions and the following disclaimer in the
19 // documentation and/or other materials provided with the distribution.
20 //
21 // 3. Neither the name of the Corporation nor the names of the
22 // contributors may be used to endorse or promote products derived from
23 // this software without specific prior written permission.
24 //
25 // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
26 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
29 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
30 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
31 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
32 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 //
37 // Questions? Contact lead developers:
38 // Drew Kouri (dpkouri@sandia.gov) and
39 // Denis Ridzal (dridzal@sandia.gov)
40 //
41 // ************************************************************************
42 // @HEADER
43 
44 #ifndef ROL_COMPOSITESTEPSQP_H
45 #define ROL_COMPOSITESTEPSQP_H
46 
47 #include "ROL_Types.hpp"
48 #include "ROL_Step.hpp"
49 #include <sstream>
50 #include <iomanip>
51 #include "Teuchos_SerialDenseMatrix.hpp"
52 #include "Teuchos_LAPACK.hpp"
53 
60 namespace ROL {
61 
62 template <class Real>
63 class CompositeStepSQP : public Step<Real> {
64 private:
65 
66  // Vectors used for cloning.
67  Teuchos::RCP<Vector<Real> > xvec_;
68  Teuchos::RCP<Vector<Real> > gvec_;
69  Teuchos::RCP<Vector<Real> > cvec_;
70  Teuchos::RCP<Vector<Real> > lvec_;
71 
72  // Diagnostic return flags for subalgorithms.
73  int flagCG_;
74  int flagAC_;
75  int iterCG_;
76 
77  // Stopping conditions.
79  Real tolCG_;
80 
81  // Tolerances and stopping conditions for subalgorithms.
82  Real lmhtol_;
83  Real qntol_;
84  Real pgtol_;
85  Real projtol_;
86  Real tangtol_;
87  Real tntmax_;
88 
89  // Trust-region parameters.
90  Real zeta_;
91  Real Delta_;
92  Real penalty_;
93  Real eta_;
94 
95  Real ared_;
96  Real pred_;
97  Real snorm_;
98  Real nnorm_;
99  Real tnorm_;
100 
101  // Output flags.
102  bool infoQN_;
103  bool infoLM_;
104  bool infoTS_;
105  bool infoAC_;
106  bool infoLS_;
107  bool infoALL_;
108 
109  // Performance summary.
116 
117  template <typename T> int sgn(T val) {
118  return (T(0) < val) - (val < T(0));
119  }
120 
121  void printInfoLS(std::vector<Real> res) {
122  if (infoLS_) {
123  std::stringstream hist;
124  hist << std::scientific << std::setprecision(8);
125  hist << "\n Augmented System Solver:\n";
126  hist << " True Residual\n";
127  for (unsigned j=0; j<res.size(); j++) {
128  hist << " " << std::left << std::setw(14) << res[j] << "\n";
129  }
130  hist << "\n";
131  std::cout << hist.str();
132  }
133  }
134 
135 public:
136 
137  virtual ~CompositeStepSQP() {}
138 
139  CompositeStepSQP( Teuchos::ParameterList & parlist ) : Step<Real>() {
140  //Teuchos::RCP<StepState<Real> > step_state = Step<Real>::getState();
141  flagCG_ = 0;
142  flagAC_ = 0;
143  iterCG_ = 0;
144 
145  Real nominal_tol = parlist.get("Nominal SQP Optimality Solver Tolerance", 1e-3);
146 
147  maxiterCG_ = parlist.get("Maximum Number of Krylov Iterations",20);
148  tolCG_ = parlist.get("Absolute Krylov Tolerance",1e-2);
149 
150  lmhtol_ = nominal_tol;
151  qntol_ = nominal_tol;
152  pgtol_ = nominal_tol;
153  projtol_ = nominal_tol;
154  tangtol_ = nominal_tol;
155  tntmax_ = 2.0;
156 
157  zeta_ = 0.8;
158  Delta_ = 1e2;
159  penalty_ = 1.0;
160  eta_ = 1e-8;
161 
162  snorm_ = 0.0;
163  nnorm_ = 0.0;
164  tnorm_ = 0.0;
165 
166  infoQN_ = false;
167  infoLM_ = false;
168  infoTS_ = false;
169  infoAC_ = false;
170  infoLS_ = false;
171  infoALL_ = false;
172  infoQN_ = infoQN_ || infoALL_;
173  infoLM_ = infoLM_ || infoALL_;
174  infoTS_ = infoTS_ || infoALL_;
175  infoAC_ = infoAC_ || infoALL_;
176  infoLS_ = infoLS_ || infoALL_;
177 
178  totalIterCG_ = 0;
179  totalProj_ = 0;
180  totalNegCurv_ = 0;
181  totalRef_ = 0;
182  totalCallLS_ = 0;
183  totalIterLS_ = 0;
184  }
185 
190  AlgorithmState<Real> &algo_state ) {
191  //Teuchos::RCP<StepState<Real> > step_state = Step<Real>::getState();
192 
193  xvec_ = x.clone();
194  gvec_ = g.clone();
195  lvec_ = l.clone();
196  cvec_ = c.clone();
197 
198  Teuchos::RCP<Vector<Real> > ajl = gvec_->clone();
199  Teuchos::RCP<Vector<Real> > gl = gvec_->clone();
200 
201  algo_state.nfval = 0;
202  algo_state.ncval = 0;
203  algo_state.ngrad = 0;
204 
205  Real zerotol = 0.0;
206 
207  // Update objective and constraint.
208  obj.update(x,true,algo_state.iter);
209  algo_state.value = obj.value(x, zerotol);
210  algo_state.nfval++;
211  con.update(x,true,algo_state.iter);
212  con.value(*cvec_, x, zerotol);
213  algo_state.cnorm = cvec_->norm();
214  algo_state.ncval++;
215  obj.gradient(*gvec_, x, zerotol);
216 
217  // Compute gradient of Lagrangian at new multiplier guess.
218  computeLagrangeMultiplier(l, x, *gvec_, con);
219  con.applyAdjointJacobian(*ajl, l, x, zerotol);
220  gl->set(*gvec_); gl->plus(*ajl);
221  algo_state.ngrad++;
222  algo_state.gnorm = gl->norm();
223  }
224 
227  void compute( Vector<Real> &s, const Vector<Real> &x, const Vector<Real> &l,
229  AlgorithmState<Real> &algo_state ) {
230  //Teuchos::RCP<StepState<Real> > step_state = Step<Real>::getState();
231  Real zerotol = 0.0;
232  Real f = 0.0;
233  Teuchos::RCP<Vector<Real> > n = xvec_->clone();
234  Teuchos::RCP<Vector<Real> > c = cvec_->clone();
235  Teuchos::RCP<Vector<Real> > t = xvec_->clone();
236  Teuchos::RCP<Vector<Real> > tCP = xvec_->clone();
237  Teuchos::RCP<Vector<Real> > g = gvec_->clone();
238  Teuchos::RCP<Vector<Real> > gf = gvec_->clone();
239  Teuchos::RCP<Vector<Real> > Wg = xvec_->clone();
240  Teuchos::RCP<Vector<Real> > ajl = gvec_->clone();
241 
242  Real f_new = 0.0;
243  Teuchos::RCP<Vector<Real> > l_new = lvec_->clone();
244  Teuchos::RCP<Vector<Real> > c_new = cvec_->clone();
245  Teuchos::RCP<Vector<Real> > g_new = gvec_->clone();
246  Teuchos::RCP<Vector<Real> > gf_new = gvec_->clone();
247 
248  // Evaluate objective ... should have been stored.
249  f = obj.value(x, zerotol);
250  algo_state.nfval++;
251  // Compute gradient of objective ... should have been stored.
252  obj.gradient(*gf, x, zerotol);
253  // Evaluate constraint ... should have been stored.
254  con.value(*c, x, zerotol);
255 
256  // Compute quasi-normal step.
257  computeQuasinormalStep(*n, *c, x, zeta_*Delta_, con);
258 
259  // Compute gradient of Lagrangian ... should have been stored.
260  con.applyAdjointJacobian(*ajl, l, x, zerotol);
261  g->set(*gf);
262  g->plus(*ajl);
263  algo_state.ngrad++;
264 
265  // Solve tangential subproblem.
266  solveTangentialSubproblem(*t, *tCP, *Wg, x, *g, *n, l, Delta_, obj, con);
268 
269  // Check acceptance of subproblem solutions, adjust merit function penalty parameter, ensure global convergence.
270  accept(s, *n, *t, f_new, *c_new, *gf_new, *l_new, *g_new, x, l, f, *gf, *c, *g, *tCP, *Wg, obj, con, algo_state);
271  }
272 
277  AlgorithmState<Real> &algo_state ) {
278  //Teuchos::RCP<StepState<Real> > state = Step<Real>::getState();
279 
280  Real zero = 0.0;
281  Real half = 0.5;
282  Real zerotol = zero;
283  Real ratio = zero;
284 
285  Teuchos::RCP<Vector<Real> > g = gvec_->clone();
286  Teuchos::RCP<Vector<Real> > ajl = gvec_->clone();
287  Teuchos::RCP<Vector<Real> > gl = gvec_->clone();
288  Teuchos::RCP<Vector<Real> > c = cvec_->clone();
289 
290  // Determine if the step gives sufficient reduction in the merit function,
291  // update the trust-region radius.
292  ratio = ared_/pred_;
293  if ((std::abs(ared_) < 1e-12) && std::abs(pred_) < 1e-12) {
294  ratio = 1.0;
295  }
296  if (ratio >= eta_) {
297  x.plus(s);
298  if (ratio >= 0.9) {
299  Delta_ = std::max(7.0*snorm_, Delta_);
300  }
301  else if (ratio >= 0.8) {
302  Delta_ = std::max(2.0*snorm_, Delta_);
303  }
304  obj.update(x,true,algo_state.iter);
305  con.update(x,true,algo_state.iter);
306  flagAC_ = 1;
307  }
308  else {
309  Delta_ = half*std::max(nnorm_, tnorm_);
310  obj.update(x,false,algo_state.iter);
311  con.update(x,false,algo_state.iter);
312  flagAC_ = 0;
313  } // if (ratio >= eta)
314 
315  Real val = obj.value(x, zerotol);
316  algo_state.nfval++;
317  obj.gradient(*g, x, zerotol);
318  computeLagrangeMultiplier(l, x, *g, con);
319  con.applyAdjointJacobian(*ajl, l, x, zerotol);
320  gl->set(*g); gl->plus(*ajl);
321  algo_state.ngrad++;
322  con.value(*c, x, zerotol);
323 
324  algo_state.value = val;
325  algo_state.gnorm = gl->norm();
326  algo_state.cnorm = c->norm();
327  algo_state.iter++;
328  algo_state.snorm = snorm_;
329 
330  // Update algorithm state
331  //(algo_state.iterateVec)->set(x);
332  }
333 
339  AlgorithmState<Real> &algo_state ) {}
340 
346  AlgorithmState<Real> &algo_state ) {}
347 
350  std::string printHeader( void ) const {
351  std::stringstream hist;
352  hist << " ";
353  hist << std::setw(6) << std::left << "iter";
354  hist << std::setw(15) << std::left << "fval";
355  hist << std::setw(15) << std::left << "cnorm";
356  hist << std::setw(15) << std::left << "gLnorm";
357  hist << std::setw(15) << std::left << "snorm";
358  hist << std::setw(10) << std::left << "delta";
359  hist << std::setw(10) << std::left << "nnorm";
360  hist << std::setw(10) << std::left << "tnorm";
361  hist << std::setw(8) << std::left << "#fval";
362  hist << std::setw(8) << std::left << "#grad";
363  hist << std::setw(8) << std::left << "iterCG";
364  hist << std::setw(8) << std::left << "flagCG";
365  hist << std::setw(8) << std::left << "accept";
366  hist << std::setw(8) << std::left << "linsys";
367  hist << "\n";
368  return hist.str();
369  }
370 
371  std::string printName( void ) const {
372  std::stringstream hist;
373  hist << "\n" << " Composite-step trust-region SQP solver";
374  hist << "\n";
375  return hist.str();
376  }
377 
380  std::string print( AlgorithmState<Real> & algo_state, bool pHeader = false ) const {
381  //const Teuchos::RCP<const StepState<Real> >& step_state = Step<Real>::getStepState();
382 
383  std::stringstream hist;
384  hist << std::scientific << std::setprecision(6);
385  if ( algo_state.iter == 0 ) {
386  hist << printName();
387  }
388  if ( pHeader ) {
389  hist << printHeader();
390  }
391  if ( algo_state.iter == 0 ) {
392  hist << " ";
393  hist << std::setw(6) << std::left << algo_state.iter;
394  hist << std::setw(15) << std::left << algo_state.value;
395  hist << std::setw(15) << std::left << algo_state.cnorm;
396  hist << std::setw(15) << std::left << algo_state.gnorm;
397  hist << "\n";
398  }
399  else {
400  hist << " ";
401  hist << std::setw(6) << std::left << algo_state.iter;
402  hist << std::setw(15) << std::left << algo_state.value;
403  hist << std::setw(15) << std::left << algo_state.cnorm;
404  hist << std::setw(15) << std::left << algo_state.gnorm;
405  hist << std::setw(15) << std::left << algo_state.snorm;
406  hist << std::scientific << std::setprecision(2);
407  hist << std::setw(10) << std::left << Delta_;
408  hist << std::setw(10) << std::left << nnorm_;
409  hist << std::setw(10) << std::left << tnorm_;
410  hist << std::scientific << std::setprecision(6);
411  hist << std::setw(8) << std::left << algo_state.nfval;
412  hist << std::setw(8) << std::left << algo_state.ngrad;
413  hist << std::setw(8) << std::left << iterCG_;
414  hist << std::setw(8) << std::left << flagCG_;
415  hist << std::setw(8) << std::left << flagAC_;
416  hist << std::left << totalCallLS_ << "/" << totalIterLS_;
417  hist << "\n";
418  }
419  return hist.str();
420  }
421 
434 
435  Real zerotol = 0.0;
436  std::vector<Real> augiters;
437 
438  if (infoLM_) {
439  std::stringstream hist;
440  hist << "\n SQP_lagrange_multiplier\n";
441  std::cout << hist.str();
442  }
443 
444  /* Apply adjoint of constraint Jacobian to current multiplier. */
445  Teuchos::RCP<Vector<Real> > ajl = gvec_->clone();
446  con.applyAdjointJacobian(*ajl, l, x, zerotol);
447 
448  /* Form right-hand side of the augmented system. */
449  Teuchos::RCP<Vector<Real> > b1 = gvec_->clone();
450  Teuchos::RCP<Vector<Real> > b2 = cvec_->clone();
451  // b1 is the negative gradient of the Lagrangian
452  b1->set(gf); b1->plus(*ajl); b1->scale(-1.0);
453  // b2 is zero
454  b2->zero();
455 
456  /* Declare left-hand side of augmented system. */
457  Teuchos::RCP<Vector<Real> > v1 = xvec_->clone();
458  Teuchos::RCP<Vector<Real> > v2 = lvec_->clone();
459 
460  /* Compute linear solver tolerance. */
461  Real b1norm = b1->norm();
462  Real tol = lmhtol_*b1norm;
463 
464  /* Solve augmented system. */
465  augiters = con.solveAugmentedSystem(*v1, *v2, *b1, *b2, x, tol);
466  totalCallLS_++;
467  totalIterLS_ = totalIterLS_ + augiters.size();
468  printInfoLS(augiters);
469 
470  /* Return updated Lagrange multiplier. */
471  // v2 is the multiplier update
472  l.plus(*v2);
473 
474  } // computeLagrangeMultiplier
475 
476 
500 
501  if (infoQN_) {
502  std::stringstream hist;
503  hist << "\n SQP_quasi-normal_step\n";
504  std::cout << hist.str();
505  }
506 
507  Real zero = 0.0;
508  Real one = 1.0;
509  Real zerotol = zero;
510  std::vector<Real> augiters;
511 
512  /* Compute Cauchy step nCP. */
513  Teuchos::RCP<Vector<Real> > nCP = xvec_->clone();
514  Teuchos::RCP<Vector<Real> > nCPdual = gvec_->clone();
515  Teuchos::RCP<Vector<Real> > nN = xvec_->clone();
516  Teuchos::RCP<Vector<Real> > ctemp = cvec_->clone();
517  Teuchos::RCP<Vector<Real> > dualc0 = lvec_->clone();
518  dualc0->set(c.dual());
519  con.applyAdjointJacobian(*nCPdual, *dualc0, x, zerotol);
520  nCP->set(nCPdual->dual());
521  con.applyJacobian(*ctemp, *nCP, x, zerotol);
522 
523  Real normsquare_ctemp = ctemp->dot(*ctemp);
524  if (normsquare_ctemp != zero) {
525  nCP->scale( -(nCP->dot(*nCP))/normsquare_ctemp );
526  }
527 
528  /* If the Cauchy step nCP is outside the trust region,
529  return the scaled Cauchy step. */
530  Real norm_nCP = nCP->norm();
531  if (norm_nCP >= delta) {
532  n.set(*nCP);
533  n.scale( delta/norm_nCP );
534  if (infoQN_) {
535  std::stringstream hist;
536  hist << " taking partial Cauchy step\n";
537  std::cout << hist.str();
538  }
539  return;
540  }
541 
542  /* Compute 'Newton' step, for example, by solving a problem
543  related to finding the minimum norm solution of min || c(x_k)*s + c ||^2. */
544  // Compute tolerance for linear solver.
545  con.applyJacobian(*ctemp, *nCP, x, zerotol);
546  ctemp->plus(c);
547  Real tol = qntol_*ctemp->norm();
548  // Form right-hand side.
549  ctemp->scale(-one);
550  nCPdual->set(nCP->dual());
551  nCPdual->scale(-one);
552  // Declare left-hand side of augmented system.
553  Teuchos::RCP<Vector<Real> > dn = xvec_->clone();
554  Teuchos::RCP<Vector<Real> > y = lvec_->clone();
555  // Solve augmented system.
556  augiters = con.solveAugmentedSystem(*dn, *y, *nCPdual, *ctemp, x, tol);
557  totalCallLS_++;
558  totalIterLS_ = totalIterLS_ + augiters.size();
559  printInfoLS(augiters);
560 
561  nN->set(*dn);
562  nN->plus(*nCP);
563 
564  /* Either take full or partial Newton step, depending on
565  the trust-region constraint. */
566  Real norm_nN = nN->norm();
567  if (norm_nN <= delta) {
568  // Take full feasibility step.
569  n.set(*nN);
570  if (infoQN_) {
571  std::stringstream hist;
572  hist << " taking full Newton step\n";
573  std::cout << hist.str();
574  }
575  }
576  else {
577  // Take convex combination n = nCP+tau*(nN-nCP),
578  // so that ||n|| = delta. In other words, solve
579  // scalar quadratic equation: ||nCP+tau*(nN-nCP)||^2 = delta^2.
580  Real aa = dn->dot(*dn);
581  Real bb = dn->dot(*nCP);
582  Real cc = norm_nCP*norm_nCP - delta*delta;
583  Real tau = (-bb+sqrt(bb*bb-aa*cc))/aa;
584  n.set(*nCP);
585  n.axpy(tau, *dn);
586  if (infoQN_) {
587  std::stringstream hist;
588  hist << " taking dogleg step\n";
589  std::cout << hist.str();
590  }
591  }
592 
593  } // computeQuasinormalStep
594 
595 
610  const Vector<Real> &x, const Vector<Real> &g, const Vector<Real> &n, const Vector<Real> &l,
611  Real delta, Objective<Real> &obj, EqualityConstraint<Real> &con) {
612 
613  /* Initialization of the CG step. */
614  bool orthocheck = true; // set to true if want to check orthogonality
615  // of Wr and r, otherwise set to false
616  Real tol_ortho = 0.5; // orthogonality measure; represets a bound on norm( \hat{S}, 2), where
617  // \hat{S} is defined in Heinkenschloss/Ridzal., "A Matrix-Free Trust-Region SQP Method"
618  Real S_max = 1.0; // another orthogonality measure; norm(S) needs to be bounded by
619  // a modest constant; norm(S) is small if the approximation of
620  // the null space projector is good
621  Real zero = 0.0;
622  Real one = 1.0;
623  Real zerotol = std::sqrt(ROL_EPSILON);
624  std::vector<Real> augiters;
625  iterCG_ = 1;
626  flagCG_ = 0;
627  t.zero();
628  tCP.zero();
629  Teuchos::RCP<Vector<Real> > r = gvec_->clone();
630  Teuchos::RCP<Vector<Real> > pdesc = xvec_->clone();
631  Teuchos::RCP<Vector<Real> > tprev = xvec_->clone();
632  Teuchos::RCP<Vector<Real> > Wr = xvec_->clone();
633  Teuchos::RCP<Vector<Real> > Hp = gvec_->clone();
634  Teuchos::RCP<Vector<Real> > xtemp = xvec_->clone();
635  Teuchos::RCP<Vector<Real> > gtemp = gvec_->clone();
636  Teuchos::RCP<Vector<Real> > ltemp = lvec_->clone();
637  Teuchos::RCP<Vector<Real> > czero = cvec_->clone();
638  czero->zero();
639  r->set(g);
640  obj.hessVec(*gtemp, n, x, zerotol);
641  r->plus(*gtemp);
642  con.applyAdjointHessian(*gtemp, l, n, x, zerotol);
643  r->plus(*gtemp);
644  Real normg = r->norm();
645  Real normWg = zero;
646  Real pHp = zero;
647  Real rp = zero;
648  Real alpha = zero;
649  Real normp = zero;
650  Real normr = zero;
651  Real normt = zero;
652  std::vector<Real> normWr(maxiterCG_+1, zero);
653 
654  std::vector<Teuchos::RCP<Vector<Real > > > p; // stores search directions
655  std::vector<Teuchos::RCP<Vector<Real > > > Hps; // stores duals of hessvec's applied to p's
656  std::vector<Teuchos::RCP<Vector<Real > > > rs; // stores duals of residuals
657  std::vector<Teuchos::RCP<Vector<Real > > > Wrs; // stores duals of projected residuals
658 
659  Real rptol = 1e-12;
660 
661  if (infoTS_) {
662  std::stringstream hist;
663  hist << "\n SQP_tangential_subproblem\n";
664  hist << std::setw(6) << std::right << "iter" << std::setw(18) << "||Wr||/||Wr0||" << std::setw(15) << "||s||";
665  hist << std::setw(15) << "delta" << std::setw(15) << "||c'(x)s||" << "\n";
666  std::cout << hist.str();
667  }
668 
669  if (normg == 0) {
670  if (infoTS_) {
671  std::stringstream hist;
672  hist << " >>> Tangential subproblem: Initial gradient is zero! \n";
673  std::cout << hist.str();
674  }
675  iterCG_ = 0; Wg.zero(); flagCG_ = 0;
676  return;
677  }
678 
679  /* Start CG loop. */
680  while (iterCG_ < maxiterCG_) {
681 
682  // Store tangential Cauchy point (which is the current iterate in the second iteration).
683  if (iterCG_ == 2) {
684  tCP.set(t);
685  }
686 
687  // Compute (inexact) projection W*r.
688  if (iterCG_ == 1) {
689  // Solve augmented system.
690  Real tol = pgtol_;
691  augiters = con.solveAugmentedSystem(*Wr, *ltemp, *r, *czero, x, tol);
692  totalCallLS_++;
693  totalIterLS_ = totalIterLS_ + augiters.size();
694  printInfoLS(augiters);
695 
696  Wg.set(*Wr);
697  normWg = Wg.norm();
698  if (orthocheck) {
699  Wrs.push_back(xvec_->clone());
700  (Wrs[iterCG_-1])->set(*Wr);
701  }
702  // Check if done (small initial projected residual).
703  if (normWg == zero) {
704  flagCG_ = 0;
705  iterCG_--;
706  if (infoTS_) {
707  std::stringstream hist;
708  hist << " Initial projected residual is close to zero! \n";
709  std::cout << hist.str();
710  }
711  return;
712  }
713  // Set first residual to projected gradient.
714  // change r->set(Wg);
715  r->set(Wg.dual());
716  if (orthocheck) {
717  rs.push_back(xvec_->clone());
718  // change (rs[0])->set(*r);
719  (rs[0])->set(r->dual());
720  }
721  }
722  else {
723  // Solve augmented system.
724  Real tol = projtol_;
725  augiters = con.solveAugmentedSystem(*Wr, *ltemp, *r, *czero, x, tol);
726  totalCallLS_++;
727  totalIterLS_ = totalIterLS_ + augiters.size();
728  printInfoLS(augiters);
729 
730  if (orthocheck) {
731  Wrs.push_back(xvec_->clone());
732  (Wrs[iterCG_-1])->set(*Wr);
733  }
734  }
735 
736  normWr[iterCG_-1] = Wr->norm();
737 
738  if (infoTS_) {
739  Teuchos::RCP<Vector<Real> > ct = cvec_->clone();
740  con.applyJacobian(*ct, t, x, zerotol);
741  Real linc = ct->norm();
742  std::stringstream hist;
743  hist << std::scientific << std::setprecision(6);
744  hist << std::setw(6) << std::right << iterCG_-1 << std::setw(18) << normWr[iterCG_-1]/normWg << std::setw(15) << t.norm();
745  hist << std::setw(15) << delta << std::setw(15) << linc << "\n";
746  std::cout << hist.str();
747  }
748 
749  // Check if done (small relative residual).
750  if (normWr[iterCG_-1]/normWg < tolCG_) {
751  flagCG_ = 0;
752  iterCG_ = iterCG_-1;
753  if (infoTS_) {
754  std::stringstream hist;
755  hist << " || W(g + H*(n+s)) || <= cgtol*|| W(g + H*n)|| \n";
756  std::cout << hist.str();
757  }
758  return;
759  }
760 
761  // Check nonorthogonality, one-norm of (WR*R/diag^2 - I)
762  if (orthocheck) {
763  Teuchos::SerialDenseMatrix<int,Real> Wrr(iterCG_,iterCG_); // holds matrix Wrs'*rs
764  Teuchos::SerialDenseMatrix<int,Real> T(iterCG_,iterCG_); // holds matrix T=(1/diag)*Wrs'*rs*(1/diag)
765  Teuchos::SerialDenseMatrix<int,Real> Tm1(iterCG_,iterCG_); // holds matrix Tm1=T-I
766  for (int i=0; i<iterCG_; i++) {
767  for (int j=0; j<iterCG_; j++) {
768  Wrr(i,j) = (Wrs[i])->dot(*rs[j]);
769  T(i,j) = Wrr(i,j)/(normWr[i]*normWr[j]);
770  Tm1(i,j) = T(i,j);
771  if (i==j) {
772  Tm1(i,j) = Tm1(i,j) - 1.0;
773  }
774  }
775  }
776  if (Tm1.normOne() >= tol_ortho) {
777  Teuchos::LAPACK<int,Real> lapack;
778  std::vector<int> ipiv(iterCG_);
779  int info;
780  std::vector<Real> work(3*iterCG_);
781  // compute inverse of T
782  lapack.GETRF(iterCG_, iterCG_, T.values(), T.stride(), &ipiv[0], &info);
783  lapack.GETRI(iterCG_, T.values(), T.stride(), &ipiv[0], &work[0], 3*iterCG_, &info);
784  Tm1 = T;
785  for (int i=0; i<iterCG_; i++) {
786  Tm1(i,i) = Tm1(i,i) - 1.0;
787  }
788  if (Tm1.normOne() > S_max) {
789  flagCG_ = 4;
790  if (infoTS_) {
791  std::stringstream hist;
792  hist << " large nonorthogonality in W(R)'*R detected \n";
793  std::cout << hist.str();
794  }
795  return;
796  }
797  }
798  }
799 
800  // Full orthogonalization.
801  p.push_back(xvec_->clone());
802  (p[iterCG_-1])->set(*Wr);
803  (p[iterCG_-1])->scale(-one);
804  for (int j=1; j<iterCG_; j++) {
805  Real scal = (p[iterCG_-1])->dot(*(Hps[j-1])) / (p[j-1])->dot(*(Hps[j-1]));
806  Teuchos::RCP<Vector<Real> > pj = xvec_->clone();
807  pj->set(*p[j-1]);
808  pj->scale(-scal);
809  (p[iterCG_-1])->plus(*pj);
810  }
811 
812  // change Hps.push_back(gvec_->clone());
813  Hps.push_back(xvec_->clone());
814  // change obj.hessVec(*(Hps[iterCG_-1]), *(p[iterCG_-1]), x, zerotol);
815  obj.hessVec(*Hp, *(p[iterCG_-1]), x, zerotol);
816  con.applyAdjointHessian(*gtemp, l, *(p[iterCG_-1]), x, zerotol);
817  // change (Hps[iterCG_-1])->plus(*gtemp);
818  Hp->plus(*gtemp);
819  // "Preconditioning" step.
820  (Hps[iterCG_-1])->set(Hp->dual());
821 
822  pHp = (p[iterCG_-1])->dot(*(Hps[iterCG_-1]));
823  // change rp = (p[iterCG_-1])->dot(*r);
824  rp = (p[iterCG_-1])->dot(*(rs[iterCG_-1]));
825 
826  normp = (p[iterCG_-1])->norm();
827  normr = r->norm();
828 
829  // Negative curvature stopping condition.
830  if (pHp <= 0) {
831  pdesc->set(*(p[iterCG_-1])); // p is the descent direction
832  if ((std::abs(rp) >= rptol*normp*normr) && (sgn(rp) == 1)) {
833  pdesc->scale(-one); // -p is the descent direction
834  }
835  flagCG_ = 2;
836  Real a = pdesc->dot(*pdesc);
837  Real b = pdesc->dot(t);
838  Real c = t.dot(t) - delta*delta;
839  // Positive root of a*theta^2 + 2*b*theta + c = 0.
840  Real theta = (-b + std::sqrt(b*b - a*c)) / a;
841  xtemp->set(*(p[iterCG_-1]));
842  xtemp->scale(theta);
843  t.plus(*xtemp);
844  // Store as tangential Cauchy point if terminating in first iteration.
845  if (iterCG_ == 1) {
846  tCP.set(t);
847  }
848  if (infoTS_) {
849  std::stringstream hist;
850  hist << " negative curvature detected \n";
851  std::cout << hist.str();
852  }
853  return;
854  }
855 
856  // Want to enforce nonzero alpha's.
857  if (std::abs(rp) < rptol*normp*normr) {
858  flagCG_ = 5;
859  if (infoTS_) {
860  std::stringstream hist;
861  hist << " Zero alpha due to inexactness. \n";
862  std::cout << hist.str();
863  }
864  return;
865  }
866 
867  alpha = - rp/pHp;
868 
869  // Iterate update.
870  tprev->set(t);
871  xtemp->set(*(p[iterCG_-1]));
872  xtemp->scale(alpha);
873  t.plus(*xtemp);
874 
875  // Trust-region stopping condition.
876  normt = t.norm();
877  if (normt >= delta) {
878  pdesc->set(*(p[iterCG_-1])); // p is the descent direction
879  if (sgn(rp) == 1) {
880  pdesc->scale(-one); // -p is the descent direction
881  }
882  Real a = pdesc->dot(*pdesc);
883  Real b = pdesc->dot(*tprev);
884  Real c = tprev->dot(*tprev) - delta*delta;
885  // Positive root of a*theta^2 + 2*b*theta + c = 0.
886  Real theta = (-b + std::sqrt(b*b - a*c)) / a;
887  xtemp->set(*(p[iterCG_-1]));
888  xtemp->scale(theta);
889  t.set(*tprev);
890  t.plus(*xtemp);
891  // Store as tangential Cauchy point if terminating in first iteration.
892  if (iterCG_ == 1) {
893  tCP.set(t);
894  }
895  flagCG_ = 3;
896  if (infoTS_) {
897  std::stringstream hist;
898  hist << " trust-region condition active \n";
899  std::cout << hist.str();
900  }
901  return;
902  }
903 
904  // Residual update.
905  xtemp->set(*(Hps[iterCG_-1]));
906  xtemp->scale(alpha);
907  // change r->plus(*gtemp);
908  r->plus(xtemp->dual());
909  if (orthocheck) {
910  // change rs.push_back(gvec_->clone());
911  rs.push_back(xvec_->clone());
912  // change (rs[iterCG_])->set(*r);
913  (rs[iterCG_])->set(r->dual());
914  }
915 
916  iterCG_++;
917 
918  } // while (iterCG_ < maxiterCG_)
919 
920  flagCG_ = 1;
921  if (infoTS_) {
922  std::stringstream hist;
923  hist << " maximum number of iterations reached \n";
924  std::cout << hist.str();
925  }
926 
927  } // solveTangentialSubproblem
928 
929 
932  void accept(Vector<Real> &s, Vector<Real> &n, Vector<Real> &t, Real f_new, Vector<Real> &c_new,
933  Vector<Real> &gf_new, Vector<Real> &l_new, Vector<Real> &g_new,
934  const Vector<Real> &x, const Vector<Real> &l, Real f, const Vector<Real> &gf, const Vector<Real> &c,
935  const Vector<Real> &g, Vector<Real> &tCP, Vector<Real> &Wg,
937 
938  Real beta = 1e-8; // predicted reduction parameter
939  Real tol_red_tang = 1e-3; // internal reduction factor for tangtol
940  Real tol_red_all = 1e-1; // internal reduction factor for qntol, lmhtol, pgtol, projtol, tangtol
941  //bool glob_refine = true; // true - if subsolver tolerances are adjusted in this routine, keep adjusted values globally
942  // false - if subsolver tolerances are adjusted in this routine, discard adjusted values
943  Real tol_fdiff = 1e-12; // relative objective function difference for ared computation
944  int ct_max = 10; // maximum number of globalization tries
945  Real mintol = 1e-16; // smallest projection tolerance value
946 
947  // Determines max value of |rpred|/pred.
948  Real rpred_over_pred = 0.5*(1-eta_);
949 
950  if (infoAC_) {
951  std::stringstream hist;
952  hist << "\n SQP_accept\n";
953  std::cout << hist.str();
954  }
955 
956  Real zero = 0.0;
957  Real one = 1.0;
958  Real two = 2.0;
959  Real half = one/two;
960  Real zerotol = std::sqrt(ROL_EPSILON);
961  std::vector<Real> augiters;
962 
963  Real pred = zero;
964  Real ared = zero;
965  Real rpred = zero;
966  Real part_pred = zero;
967  Real linc_preproj = zero;
968  Real linc_postproj = zero;
969  Real tangtol_start = zero;
970  Real tangtol = tangtol_;
971  //Real projtol = projtol_;
972  bool flag = false;
973  int num_proj = 0;
974  bool try_tCP = false;
975  Real fdiff = zero;
976 
977  Teuchos::RCP<Vector<Real> > xtrial = xvec_->clone();
978  Teuchos::RCP<Vector<Real> > Jl = gvec_->clone();
979  Teuchos::RCP<Vector<Real> > gfJl = gvec_->clone();
980  Teuchos::RCP<Vector<Real> > Jnc = cvec_->clone();
981  Teuchos::RCP<Vector<Real> > t_orig = xvec_->clone();
982  Teuchos::RCP<Vector<Real> > t_dual = gvec_->clone();
983  Teuchos::RCP<Vector<Real> > Jt_orig = cvec_->clone();
984  Teuchos::RCP<Vector<Real> > t_m_tCP = xvec_->clone();
985  Teuchos::RCP<Vector<Real> > ltemp = lvec_->clone();
986  Teuchos::RCP<Vector<Real> > xtemp = xvec_->clone();
987  Teuchos::RCP<Vector<Real> > rt = cvec_->clone();
988  Teuchos::RCP<Vector<Real> > Hn = gvec_->clone();
989  Teuchos::RCP<Vector<Real> > Hto = gvec_->clone();
990  Teuchos::RCP<Vector<Real> > cxxvec = gvec_->clone();
991  Teuchos::RCP<Vector<Real> > czero = cvec_->clone();
992  czero->zero();
993  Real Jnc_normsquared = zero;
994  Real c_normsquared = zero;
995 
996  // Compute and store some quantities for later use. Necessary
997  // because of the function and constraint updates below.
998  con.applyAdjointJacobian(*Jl, l, x, zerotol);
999  con.applyJacobian(*Jnc, n, x, zerotol);
1000  Jnc->plus(c);
1001  Jnc_normsquared = Jnc->dot(*Jnc);
1002  c_normsquared = c.dot(c);
1003 
1004  for (int ct=0; ct<ct_max; ct++) {
1005 
1006  try_tCP = true;
1007  t_m_tCP->set(t);
1008  t_m_tCP->scale(-one);
1009  t_m_tCP->plus(tCP);
1010  if (t_m_tCP->norm() == zero) {
1011  try_tCP = false;
1012  }
1013 
1014  t_orig->set(t);
1015  con.applyJacobian(*Jt_orig, *t_orig, x, zerotol);
1016  linc_preproj = Jt_orig->norm();
1017  pred = one;
1018  rpred = two*rpred_over_pred*pred;
1019  flag = false;
1020  num_proj = 1;
1021  tangtol_start = tangtol;
1022 
1023  while (std::abs(rpred)/pred > rpred_over_pred) {
1024  // Compute projected tangential step.
1025  if (flag) {
1026  tangtol = tol_red_tang*tangtol;
1027  num_proj++;
1028  if (tangtol < mintol) {
1029  if (infoAC_) {
1030  std::stringstream hist;
1031  hist << "\n The projection of the tangential step cannot be done with sufficient precision.\n";
1032  hist << " Is the quasi-normal step very small? Continuing with no global convergence guarantees.\n";
1033  std::cout << hist.str();
1034  }
1035  break;
1036  }
1037  }
1038  // Solve augmented system.
1039  Real tol = tangtol;
1040  // change augiters = con.solveAugmentedSystem(t, *ltemp, *t_orig, *czero, x, tol);
1041  t_dual->set(t_orig->dual());
1042  augiters = con.solveAugmentedSystem(t, *ltemp, *t_dual, *czero, x, tol);
1043  totalCallLS_++;
1044  totalIterLS_ = totalIterLS_ + augiters.size();
1045  printInfoLS(augiters);
1046  totalProj_++;
1047  con.applyJacobian(*rt, t, x, zerotol);
1048  linc_postproj = rt->norm();
1049 
1050  // Compute composite step.
1051  s.set(t);
1052  s.plus(n);
1053 
1054  // Compute some quantities before updating the objective and the constraint.
1055  obj.hessVec(*Hn, n, x, zerotol);
1056  con.applyAdjointHessian(*cxxvec, l, n, x, zerotol);
1057  Hn->plus(*cxxvec);
1058  obj.hessVec(*Hto, *t_orig, x, zerotol);
1059  con.applyAdjointHessian(*cxxvec, l, *t_orig, x, zerotol);
1060  Hto->plus(*cxxvec);
1061 
1062  // Compute objective, constraint, etc. values at the trial point.
1063  xtrial->set(x);
1064  xtrial->plus(s);
1065  obj.update(*xtrial,false,algo_state.iter);
1066  con.update(*xtrial,false,algo_state.iter);
1067  f_new = obj.value(*xtrial, zerotol);
1068  obj.gradient(gf_new, *xtrial, zerotol);
1069  con.value(c_new, *xtrial, zerotol);
1070  l_new.set(l);
1071  computeLagrangeMultiplier(l_new, *xtrial, gf_new, con);
1072 
1073  // Penalty parameter update.
1074  part_pred = - Wg.dot(*t_orig);
1075  gfJl->set(gf);
1076  gfJl->plus(*Jl);
1077  // change part_pred -= gfJl->dot(n);
1078  part_pred -= n.dot(gfJl->dual());
1079  // change part_pred -= half*Hn->dot(n);
1080  part_pred -= half*n.dot(Hn->dual());
1081  // change part_pred -= half*Hto->dot(*t_orig);
1082  part_pred -= half*t_orig->dot(Hto->dual());
1083  ltemp->set(l_new);
1084  ltemp->axpy(-one, l);
1085  // change part_pred -= Jnc->dot(*ltemp);
1086  part_pred -= Jnc->dot(ltemp->dual());
1087 
1088  if ( part_pred < -half*penalty_*(c_normsquared-Jnc_normsquared) ) {
1089  penalty_ = ( -two * part_pred / (c_normsquared-Jnc_normsquared) ) + beta;
1090  }
1091 
1092  pred = part_pred + penalty_*(c_normsquared-Jnc_normsquared);
1093 
1094  // Computation of rpred.
1095  // change rpred = - ltemp->dot(*rt) - penalty_ * rt->dot(*rt) - two * penalty_ * rt->dot(*Jnc);
1096  rpred = - rt->dot(ltemp->dual()) - penalty_ * rt->dot(*rt) - two * penalty_ * rt->dot(*Jnc);
1097  // change Teuchos::RCP<Vector<Real> > lrt = lvec_->clone();
1098  //lrt->set(*rt);
1099  //rpred = - ltemp->dot(*rt) - penalty_ * std::pow(rt->norm(), 2) - two * penalty_ * lrt->dot(*Jnc);
1100  flag = 1;
1101 
1102  } // while (std::abs(rpred)/pred > rpred_over_pred)
1103 
1104  tangtol = tangtol_start;
1105 
1106  // Check if the solution of the tangential subproblem is
1107  // disproportionally large compared to total trial step.
1108  xtemp->set(n);
1109  xtemp->plus(t);
1110  if ( t_orig->norm()/xtemp->norm() < tntmax_ ) {
1111  break;
1112  }
1113  else {
1114  t_m_tCP->set(*t_orig);
1115  t_m_tCP->scale(-one);
1116  t_m_tCP->plus(tCP);
1117  if ((t_m_tCP->norm() > 0) && try_tCP) {
1118  if (infoAC_) {
1119  std::stringstream hist;
1120  hist << " ---> now trying tangential Cauchy point\n";
1121  std::cout << hist.str();
1122  }
1123  t.set(tCP);
1124  }
1125  else {
1126  if (infoAC_) {
1127  std::stringstream hist;
1128  hist << " ---> recomputing quasi-normal step and re-solving tangential subproblem\n";
1129  std::cout << hist.str();
1130  }
1131  totalRef_++;
1132  // Reset global quantities.
1133  obj.update(x);
1134  con.update(x);
1135  /*lmhtol = tol_red_all*lmhtol;
1136  qntol = tol_red_all*qntol;
1137  pgtol = tol_red_all*pgtol;
1138  projtol = tol_red_all*projtol;
1139  tangtol = tol_red_all*tangtol;
1140  if (glob_refine) {
1141  lmhtol_ = lmhtol;
1142  qntol_ = qntol;
1143  pgtol_ = pgtol;
1144  projtol_ = projtol;
1145  tangtol_ = tangtol;
1146  }*/
1147  lmhtol_ *= tol_red_all;
1148  qntol_ *= tol_red_all;
1149  pgtol_ *= tol_red_all;
1150  projtol_ *= tol_red_all;
1151  tangtol_ *= tol_red_all;
1152  // Recompute the quasi-normal step.
1153  computeQuasinormalStep(n, c, x, zeta_*Delta_, con);
1154  // Solve tangential subproblem.
1155  solveTangentialSubproblem(t, tCP, Wg, x, g, n, l, Delta_, obj, con);
1156  totalIterCG_ += iterCG_;
1157  if (flagCG_ == 1) {
1158  totalNegCurv_++;
1159  }
1160  }
1161  } // else w.r.t. ( t_orig->norm()/xtemp->norm() < tntmax )
1162 
1163  } // for (int ct=0; ct<ct_max; ct++)
1164 
1165  // Compute actual reduction;
1166  fdiff = f - f_new;
1167  // Heuristic 1: If fdiff is very small compared to f, set it to 0,
1168  // in order to prevent machine precision issues.
1169  if (std::abs(fdiff / (f+1e-24)) < tol_fdiff) {
1170  fdiff = 1e-14;
1171  }
1172  // change ared = fdiff + (l.dot(c) - l_new.dot(c_new)) + penalty_*(c.dot(c) - c_new.dot(c_new));
1173  // change ared = fdiff + (l.dot(c) - l_new.dot(c_new)) + penalty_*(std::pow(c.norm(),2) - std::pow(c_new.norm(),2));
1174  ared = fdiff + (c.dot(l.dual()) - c_new.dot(l_new.dual())) + penalty_*(c.dot(c) - c_new.dot(c_new));
1175 
1176  // Store actual and predicted reduction.
1177  ared_ = ared;
1178  pred_ = pred;
1179 
1180  // Store step and vector norms.
1181  snorm_ = s.norm();
1182  nnorm_ = n.norm();
1183  tnorm_ = t.norm();
1184 
1185  // Print diagnostics.
1186  if (infoAC_) {
1187  std::stringstream hist;
1188  hist << "\n Trial step info ...\n";
1189  hist << " n_norm = " << nnorm_ << "\n";
1190  hist << " t_norm = " << tnorm_ << "\n";
1191  hist << " s_norm = " << snorm_ << "\n";
1192  hist << " xtrial_norm = " << xtrial->norm() << "\n";
1193  hist << " f_old = " << f << "\n";
1194  hist << " f_trial = " << f_new << "\n";
1195  hist << " f_old-f_trial = " << f-f_new << "\n";
1196  hist << " ||c_old|| = " << c.norm() << "\n";
1197  hist << " ||c_trial|| = " << c_new.norm() << "\n";
1198  hist << " ||Jac*t_preproj|| = " << linc_preproj << "\n";
1199  hist << " ||Jac*t_postproj|| = " << linc_postproj << "\n";
1200  hist << " ||t_tilde||/||t|| = " << t_orig->norm() / t.norm() << "\n";
1201  hist << " ||t_tilde||/||n+t|| = " << t_orig->norm() / snorm_ << "\n";
1202  hist << " # projections = " << num_proj << "\n";
1203  hist << " penalty param = " << penalty_ << "\n";
1204  hist << " ared = " << ared_ << "\n";
1205  hist << " pred = " << pred_ << "\n";
1206  hist << " ared/pred = " << ared_/pred_ << "\n";
1207  std::cout << hist.str();
1208  }
1209 
1210  } // accept
1211 
1212 }; // class CompositeStepSQP
1213 
1214 } // namespace ROL
1215 
1216 #endif
Provides the interface to evaluate objective functions.
std::string printHeader(void) const
Print iterate header.
void accept(Vector< Real > &s, Vector< Real > &n, Vector< Real > &t, Real f_new, Vector< Real > &c_new, Vector< Real > &gf_new, Vector< Real > &l_new, Vector< Real > &g_new, const Vector< Real > &x, const Vector< Real > &l, Real f, const Vector< Real > &gf, const Vector< Real > &c, const Vector< Real > &g, Vector< Real > &tCP, Vector< Real > &Wg, Objective< Real > &obj, EqualityConstraint< Real > &con, AlgorithmState< Real > &algo_state)
Check acceptance of subproblem solutions, adjust merit function penalty parameter, ensure global convergence.
virtual const Vector & dual() const
Return dual representation of , for example, the result of applying a Riesz map, or change of basis...
Definition: ROL_Vector.hpp:211
virtual void scale(const Real alpha)=0
Compute where .
Teuchos::RCP< Vector< Real > > cvec_
void update(Vector< Real > &x, Vector< Real > &l, const Vector< Real > &s, Objective< Real > &obj, EqualityConstraint< Real > &con, AlgorithmState< Real > &algo_state)
Update step, if successful.
virtual void plus(const Vector &x)=0
Compute , where .
virtual void axpy(const Real alpha, const Vector &x)
Compute where .
Definition: ROL_Vector.hpp:141
void printInfoLS(std::vector< Real > res)
virtual Real value(const Vector< Real > &x, Real &tol)=0
Compute value.
Provides the interface to compute optimization steps.
Definition: ROL_Step.hpp:63
virtual void hessVec(Vector< Real > &hv, const Vector< Real > &v, const Vector< Real > &x, Real &tol)
Apply Hessian approximation to vector.
Contains definitions of custom data types in ROL.
std::string printName(void) const
Print step name.
virtual void applyAdjointHessian(Vector< Real > &ahuv, const Vector< Real > &u, const Vector< Real > &v, const Vector< Real > &x, Real &tol)
Apply the derivative of the adjoint of the constraint Jacobian at to vector in direction ...
virtual Teuchos::RCP< Vector > clone() const =0
Clone to make a new (uninitialized) vector.
virtual void zero()
Set to zero vector.
Definition: ROL_Vector.hpp:155
void computeLagrangeMultiplier(Vector< Real > &l, const Vector< Real > &x, const Vector< Real > &gf, EqualityConstraint< Real > &con)
Compute Lagrange multipliers by solving the least-squares problem minimizing the gradient of the Lagr...
Defines the linear algebra or vector space interface.
Definition: ROL_Vector.hpp:72
void solveTangentialSubproblem(Vector< Real > &t, Vector< Real > &tCP, Vector< Real > &Wg, const Vector< Real > &x, const Vector< Real > &g, const Vector< Real > &n, const Vector< Real > &l, Real delta, Objective< Real > &obj, EqualityConstraint< Real > &con)
Solve tangential subproblem.
virtual Real dot(const Vector &x) const =0
Compute where .
virtual void update(const Vector< Real > &x, bool flag=true, int iter=-1)
Update constraint functions. x is the optimization variable, flag = true if optimization variable is ...
State for algorithm class. Will be used for restarts.
Definition: ROL_Types.hpp:76
virtual void gradient(Vector< Real > &g, const Vector< Real > &x, Real &tol)
Compute gradient.
virtual std::vector< Real > solveAugmentedSystem(Vector< Real > &v1, Vector< Real > &v2, const Vector< Real > &b1, const Vector< Real > &b2, const Vector< Real > &x, Real &tol)
Approximately solves the augmented system where , , , , is an identity operator, and is a zero operator.
Defines the equality constraint operator interface.
void initialize(Vector< Real > &x, const Vector< Real > &g, Vector< Real > &l, const Vector< Real > &c, Objective< Real > &obj, EqualityConstraint< Real > &con, AlgorithmState< Real > &algo_state)
Initialize step.
Implements the computation of optimization steps with composite-step trust-region SQP methods...
Teuchos::RCP< Vector< Real > > lvec_
virtual void applyAdjointJacobian(Vector< Real > &ajv, const Vector< Real > &v, const Vector< Real > &x, Real &tol)
Apply the adjoint of the the constraint Jacobian at , , to vector .
CompositeStepSQP(Teuchos::ParameterList &parlist)
Provides the interface to apply upper and lower bound constraints.
void update(Vector< Real > &x, const Vector< Real > &s, Objective< Real > &obj, BoundConstraint< Real > &con, AlgorithmState< Real > &algo_state)
Update step, for bound constraints; here only to satisfy the interface requirements, does nothing, needs refactoring.
virtual void applyJacobian(Vector< Real > &jv, const Vector< Real > &v, const Vector< Real > &x, Real &tol)
Apply the constraint Jacobian at , , to vector .
Teuchos::RCP< Vector< Real > > gvec_
void compute(Vector< Real > &s, const Vector< Real > &x, const Vector< Real > &l, Objective< Real > &obj, EqualityConstraint< Real > &con, AlgorithmState< Real > &algo_state)
Compute step.
virtual void set(const Vector &x)
Set where .
Definition: ROL_Vector.hpp:194
virtual Real norm() const =0
Returns where .
virtual void value(Vector< Real > &c, const Vector< Real > &x, Real &tol)=0
Evaluate the constraint operator at .
virtual void update(const Vector< Real > &x, bool flag=true, int iter=-1)
Update objective function.
std::string print(AlgorithmState< Real > &algo_state, bool pHeader=false) const
Print iterate status.
Teuchos::RCP< Vector< Real > > xvec_
void computeQuasinormalStep(Vector< Real > &n, const Vector< Real > &c, const Vector< Real > &x, Real delta, EqualityConstraint< Real > &con)
Compute quasi-normal step by minimizing the norm of the linearized constraint.
void compute(Vector< Real > &s, const Vector< Real > &x, Objective< Real > &obj, BoundConstraint< Real > &con, AlgorithmState< Real > &algo_state)
Compute step for bound constraints; here only to satisfy the interface requirements, does nothing, needs refactoring.
static const double ROL_EPSILON
Platform-dependent machine epsilon.
Definition: ROL_Types.hpp:115