ROL
ROL_TypeE_CompositeStepAlgorithm_Def.hpp
Go to the documentation of this file.
1 // @HEADER
2 // ************************************************************************
3 //
4 // Rapid Optimization Library (ROL) Package
5 // Copyright (2014) Sandia Corporation
6 //
7 // Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive
8 // license for use of this work by or on behalf of the U.S. Government.
9 //
10 // Redistribution and use in source and binary forms, with or without
11 // modification, are permitted provided that the following conditions are
12 // met:
13 //
14 // 1. Redistributions of source code must retain the above copyright
15 // notice, this list of conditions and the following disclaimer.
16 //
17 // 2. Redistributions in binary form must reproduce the above copyright
18 // notice, this list of conditions and the following disclaimer in the
19 // documentation and/or other materials provided with the distribution.
20 //
21 // 3. Neither the name of the Corporation nor the names of the
22 // contributors may be used to endorse or promote products derived from
23 // this software without specific prior written permission.
24 //
25 // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
26 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
29 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
30 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
31 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
32 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 //
37 // Questions? Contact lead developers:
38 // Drew Kouri (dpkouri@sandia.gov) and
39 // Denis Ridzal (dridzal@sandia.gov)
40 //
41 // ************************************************************************
42 // @HEADER
43 
44 #ifndef ROL_TYPEE_COMPOSITESTEPALGORITHM_DEF_H
45 #define ROL_TYPEE_COMPOSITESTEPALGORITHM_DEF_H
46 
47 namespace ROL {
48 namespace TypeE {
49 
50 template<typename Real>
52  : TypeE::Algorithm<Real>::Algorithm(), list_(list) {
53  // Set status test
54  status_->reset();
56 
57  flagCG_ = 0;
58  flagAC_ = 0;
59  iterCG_ = 0;
60 
61  Real one(1), two(2), p8(0.8), zero(0), oem8(1.e-8);
62  ParameterList& cslist = list_.sublist("Step").sublist("Composite Step");
63 
64  //maxiterOSS_ = cslist.sublist("Optimality System Solver").get("Iteration Limit", 50);
65  tolOSS_ = cslist.sublist("Optimality System Solver").get("Nominal Relative Tolerance", 1e-8);
66  tolOSSfixed_ = cslist.sublist("Optimality System Solver").get("Fix Tolerance", true);
67  maxiterCG_ = cslist.sublist("Tangential Subproblem Solver").get("Iteration Limit", 20);
68  tolCG_ = cslist.sublist("Tangential Subproblem Solver").get("Relative Tolerance", 1e-2);
69  Delta_ = cslist.get("Initial Radius", 1e2);
70  useConHess_ = cslist.get("Use Constraint Hessian", true);
71  verbosity_ = list_.sublist("General").get("Output Level", 0);
72  printHeader_ = (verbosity_ > 2);
73 
74  lmhtol_ = tolOSS_;
75  qntol_ = tolOSS_;
76  pgtol_ = tolOSS_;
77  projtol_ = tolOSS_;
78  tangtol_ = tolOSS_;
79  tntmax_ = two;
80 
81  zeta_ = p8;
82  penalty_ = one;
83  eta_ = oem8;
84 
85  snorm_ = zero;
86  nnorm_ = zero;
87  tnorm_ = zero;
88 
89  infoALL_ = false;
90  if (verbosity_ > 1) {
91  infoALL_ = true;
92  }
93  infoQN_ = false;
94  infoLM_ = false;
95  infoTS_ = false;
96  infoAC_ = false;
97  infoLS_ = false;
100  infoTS_ = infoTS_ || infoALL_;
101  infoAC_ = infoAC_ || infoALL_;
102  infoLS_ = infoLS_ || infoALL_;
103 
104  totalIterCG_ = 0;
105  totalProj_ = 0;
106  totalNegCurv_ = 0;
107  totalRef_ = 0;
108  totalCallLS_ = 0;
109  totalIterLS_ = 0;
110 }
111 
112 
115 template<typename Real>
117  const Vector<Real> &x,
118  const Vector<Real> &l,
119  Objective<Real> &obj,
120  Constraint<Real> &con,
121  std::ostream &os) {
122 
123  Real zerotol = std::sqrt(ROL_EPSILON<Real>());
124  Real f = 0.0;
125  ROL::Ptr<Vector<Real> > n = xvec_->clone();
126  ROL::Ptr<Vector<Real> > c = cvec_->clone();
127  ROL::Ptr<Vector<Real> > t = xvec_->clone();
128  ROL::Ptr<Vector<Real> > tCP = xvec_->clone();
129  ROL::Ptr<Vector<Real> > g = gvec_->clone();
130  ROL::Ptr<Vector<Real> > gf = gvec_->clone();
131  ROL::Ptr<Vector<Real> > Wg = xvec_->clone();
132  ROL::Ptr<Vector<Real> > ajl = gvec_->clone();
133 
134  Real f_new = 0.0;
135  ROL::Ptr<Vector<Real> > l_new = lvec_->clone();
136  ROL::Ptr<Vector<Real> > c_new = cvec_->clone();
137  ROL::Ptr<Vector<Real> > g_new = gvec_->clone();
138  ROL::Ptr<Vector<Real> > gf_new = gvec_->clone();
139 
140  // Evaluate objective ... should have been stored.
141  f = obj.value(x, zerotol);
142  state_->nfval++;
143  // Compute gradient of objective ... should have been stored.
144  obj.gradient(*gf, x, zerotol);
145  // Evaluate constraint ... should have been stored.
146  con.value(*c, x, zerotol);
147 
148  // Compute quasi-normal step.
149  computeQuasinormalStep(*n, *c, x, zeta_*Delta_, con, os);
150 
151  // Compute gradient of Lagrangian ... should have been stored.
152  con.applyAdjointJacobian(*ajl, l, x, zerotol);
153  g->set(*gf);
154  g->plus(*ajl);
155  state_->ngrad++;
156 
157  // Solve tangential subproblem.
158  solveTangentialSubproblem(*t, *tCP, *Wg, x, *g, *n, l, Delta_, obj, con, os);
159  totalIterCG_ += iterCG_;
160 
161  // Check acceptance of subproblem solutions, adjust merit function penalty parameter, ensure global convergence.
162  accept(s, *n, *t, f_new, *c_new, *gf_new, *l_new, *g_new, x, l, f, *gf, *c, *g, *tCP, *Wg, obj, con, os);
163 }
164 
165 
166 template<typename Real>
168  const Vector<Real> &x,
169  const Vector<Real> &gf,
170  Constraint<Real> &con,
171  std::ostream &os) {
172 
173  Real one(1);
174  Real zerotol = std::sqrt(ROL_EPSILON<Real>());
175  std::vector<Real> augiters;
176 
177  if (infoLM_) {
178  // std::ios_base::fmtflags osFlags(os.flags());
179  os << "\n Lagrange multiplier step\n";
180  // os.flags(osFlags);
181  }
182 
183  /* Apply adjoint of constraint Jacobian to current multiplier. */
184  Ptr<Vector<Real> > ajl = gvec_->clone();
185  con.applyAdjointJacobian(*ajl, l, x, zerotol);
186 
187  /* Form right-hand side of the augmented system. */
188  Ptr<Vector<Real> > b1 = gvec_->clone();
189  Ptr<Vector<Real> > b2 = cvec_->clone();
190  // b1 is the negative gradient of the Lagrangian
191  b1->set(gf); b1->plus(*ajl); b1->scale(-one);
192  // b2 is zero
193  b2->zero();
194 
195  /* Declare left-hand side of augmented system. */
196  Ptr<Vector<Real> > v1 = xvec_->clone();
197  Ptr<Vector<Real> > v2 = lvec_->clone();
198 
199  /* Compute linear solver tolerance. */
200  Real b1norm = b1->norm();
201  Real tol = setTolOSS(lmhtol_*b1norm);
202 
203  /* Solve augmented system. */
204  augiters = con.solveAugmentedSystem(*v1, *v2, *b1, *b2, x, tol);
205  totalCallLS_++;
206  totalIterLS_ = totalIterLS_ + augiters.size();
207  printInfoLS(augiters, os);
208 
209  /* Return updated Lagrange multiplier. */
210  // v2 is the multiplier update
211  l.plus(*v2);
212 
213 } // computeLagrangeMultiplier
214 
215 
216 template<typename Real>
218  const Vector<Real> &c,
219  const Vector<Real> &x,
220  Real delta,
221  Constraint<Real> &con,
222  std::ostream &os) {
223 
224  if (infoQN_) {
225  // std::ios_base::fmtflags osFlags(os.flags());
226  os << "\n Quasi-normal step\n";
227  // os.flags(osFlags);
228  }
229 
230  Real zero(0);
231  Real one(1);
232  Real zerotol = std::sqrt(ROL_EPSILON<Real>()); //zero;
233  std::vector<Real> augiters;
234 
235  /* Compute Cauchy step nCP. */
236  Ptr<Vector<Real> > nCP = xvec_->clone();
237  Ptr<Vector<Real> > nCPdual = gvec_->clone();
238  Ptr<Vector<Real> > nN = xvec_->clone();
239  Ptr<Vector<Real> > ctemp = cvec_->clone();
240  Ptr<Vector<Real> > dualc0 = lvec_->clone();
241  dualc0->set(c.dual());
242  con.applyAdjointJacobian(*nCPdual, *dualc0, x, zerotol);
243  nCP->set(nCPdual->dual());
244  con.applyJacobian(*ctemp, *nCP, x, zerotol);
245 
246  Real normsquare_ctemp = ctemp->dot(*ctemp);
247  if (normsquare_ctemp != zero) {
248  nCP->scale( -(nCP->dot(*nCP))/normsquare_ctemp );
249  }
250 
251  /* If the Cauchy step nCP is outside the trust region,
252  return the scaled Cauchy step. */
253  Real norm_nCP = nCP->norm();
254  if (norm_nCP >= delta) {
255  n.set(*nCP);
256  n.scale( delta/norm_nCP );
257  if (infoQN_) {
258  // std::ios_base::fmtflags osFlags(os.flags());
259  os << " taking partial Cauchy step\n";
260  // os.flags(osFlags);
261  }
262  return;
263  }
264 
265  /* Compute 'Newton' step, for example, by solving a problem
266  related to finding the minimum norm solution of min || c(x_k)*s + c ||^2. */
267  // Compute tolerance for linear solver.
268  con.applyJacobian(*ctemp, *nCP, x, zerotol);
269  ctemp->plus(c);
270  Real tol = setTolOSS(qntol_*ctemp->norm());
271  // Form right-hand side.
272  ctemp->scale(-one);
273  nCPdual->set(nCP->dual());
274  nCPdual->scale(-one);
275  // Declare left-hand side of augmented system.
276  Ptr<Vector<Real> > dn = xvec_->clone();
277  Ptr<Vector<Real> > y = lvec_->clone();
278  // Solve augmented system.
279  augiters = con.solveAugmentedSystem(*dn, *y, *nCPdual, *ctemp, x, tol);
280  totalCallLS_++;
281  totalIterLS_ = totalIterLS_ + augiters.size();
282  printInfoLS(augiters, os);
283 
284  nN->set(*dn);
285  nN->plus(*nCP);
286 
287  /* Either take full or partial Newton step, depending on
288  the trust-region constraint. */
289  Real norm_nN = nN->norm();
290  if (norm_nN <= delta) {
291  // Take full feasibility step.
292  n.set(*nN);
293  if (infoQN_) {
294  // std::ios_base::fmtflags osFlags(os.flags());
295  os << " taking full Newton step\n";
296  // os.flags(osFlags);
297  }
298  }
299  else {
300  // Take convex combination n = nCP+tau*(nN-nCP),
301  // so that ||n|| = delta. In other words, solve
302  // scalar quadratic equation: ||nCP+tau*(nN-nCP)||^2 = delta^2.
303  Real aa = dn->dot(*dn);
304  Real bb = dn->dot(*nCP);
305  Real cc = norm_nCP*norm_nCP - delta*delta;
306  Real tau = (-bb+sqrt(bb*bb-aa*cc))/aa;
307  n.set(*nCP);
308  n.axpy(tau, *dn);
309  if (infoQN_) {
310  // std::ios_base::fmtflags osFlags(os.flags());
311  os << " taking dogleg step\n";
312  // os.flags(osFlags);
313  }
314  }
315 
316 } // computeQuasinormalStep
317 
318 
319 template<typename Real>
321  Vector<Real> &tCP,
322  Vector<Real> &Wg,
323  const Vector<Real> &x,
324  const Vector<Real> &g,
325  const Vector<Real> &n,
326  const Vector<Real> &l,
327  Real delta,
328  Objective<Real> &obj,
329  Constraint<Real> &con,
330  std::ostream &os) {
331 
332  /* Initialization of the CG step. */
333  bool orthocheck = true; // set to true if want to check orthogonality
334  // of Wr and r, otherwise set to false
335  Real tol_ortho = 0.5; // orthogonality measure; represets a bound on norm( \hat{S}, 2), where
336  // \hat{S} is defined in Heinkenschloss/Ridzal., "A Matrix-Free Trust-Region SQP Method"
337  Real S_max = 1.0; // another orthogonality measure; norm(S) needs to be bounded by
338  // a modest constant; norm(S) is small if the approximation of
339  // the null space projector is good
340  Real zero(0);
341  Real one(1);
342  Real zerotol = std::sqrt(ROL_EPSILON<Real>());
343  std::vector<Real> augiters;
344  iterCG_ = 1;
345  flagCG_ = 0;
346  t.zero();
347  tCP.zero();
348  Ptr<Vector<Real> > r = gvec_->clone();
349  Ptr<Vector<Real> > pdesc = xvec_->clone();
350  Ptr<Vector<Real> > tprev = xvec_->clone();
351  Ptr<Vector<Real> > Wr = xvec_->clone();
352  Ptr<Vector<Real> > Hp = gvec_->clone();
353  Ptr<Vector<Real> > xtemp = xvec_->clone();
354  Ptr<Vector<Real> > gtemp = gvec_->clone();
355  Ptr<Vector<Real> > ltemp = lvec_->clone();
356  Ptr<Vector<Real> > czero = cvec_->clone();
357  czero->zero();
358  r->set(g);
359  obj.hessVec(*gtemp, n, x, zerotol);
360  r->plus(*gtemp);
361  if (useConHess_) {
362  con.applyAdjointHessian(*gtemp, l, n, x, zerotol);
363  r->plus(*gtemp);
364  }
365  Real normg = r->norm();
366  Real normWg = zero;
367  Real pHp = zero;
368  Real rp = zero;
369  Real alpha = zero;
370  Real normp = zero;
371  Real normr = zero;
372  Real normt = zero;
373  std::vector<Real> normWr(maxiterCG_+1, zero);
374 
375  std::vector<Ptr<Vector<Real > > > p; // stores search directions
376  std::vector<Ptr<Vector<Real > > > Hps; // stores duals of hessvec's applied to p's
377  std::vector<Ptr<Vector<Real > > > rs; // stores duals of residuals
378  std::vector<Ptr<Vector<Real > > > Wrs; // stores duals of projected residuals
379 
380  Real rptol(1e-12);
381 
382  if (infoTS_) {
383  std::ios_base::fmtflags osFlags(os.flags());
384  os << "\n Tangential subproblem\n";
385  os << std::setw(6) << std::right << "iter" << std::setw(18) << "||Wr||/||Wr0||" << std::setw(15) << "||s||";
386  os << std::setw(15) << "delta" << std::setw(15) << "||c'(x)s||" << "\n";
387  os.flags(osFlags);
388  }
389 
390  if (normg == 0) {
391  if (infoTS_) {
392  // std::ios_base::fmtflags osFlags(os.flags());
393  os << " >>> Tangential subproblem: Initial gradient is zero! \n";
394  // os.flags(osFlags);
395  }
396  iterCG_ = 0; Wg.zero(); flagCG_ = 0;
397  return;
398  }
399 
400  /* Start CG loop. */
401  while (iterCG_ < maxiterCG_) {
402 
403  // Store tangential Cauchy point (which is the current iterate in the second iteration).
404  if (iterCG_ == 2) {
405  tCP.set(t);
406  }
407 
408  // Compute (inexact) projection W*r.
409  if (iterCG_ == 1) {
410  // Solve augmented system.
411  Real tol = setTolOSS(pgtol_);
412  augiters = con.solveAugmentedSystem(*Wr, *ltemp, *r, *czero, x, tol);
413  totalCallLS_++;
414  totalIterLS_ = totalIterLS_ + augiters.size();
415  printInfoLS(augiters, os);
416 
417  Wg.set(*Wr);
418  normWg = Wg.norm();
419  if (orthocheck) {
420  Wrs.push_back(xvec_->clone());
421  (Wrs[iterCG_-1])->set(*Wr);
422  }
423  // Check if done (small initial projected residual).
424  if (normWg == zero) {
425  flagCG_ = 0;
426  iterCG_--;
427  if (infoTS_) {
428  // std::ios_base::fmtflags osFlags(os.flags());
429  os << " Initial projected residual is close to zero! \n";
430  // os.flags(osFlags);
431  }
432  return;
433  }
434  // Set first residual to projected gradient.
435  // change r->set(Wg);
436  r->set(Wg.dual());
437  if (orthocheck) {
438  rs.push_back(xvec_->clone());
439  // change (rs[0])->set(*r);
440  (rs[0])->set(r->dual());
441  }
442  }
443  else {
444  // Solve augmented system.
445  Real tol = setTolOSS(projtol_);
446  augiters = con.solveAugmentedSystem(*Wr, *ltemp, *r, *czero, x, tol);
447  totalCallLS_++;
448  totalIterLS_ = totalIterLS_ + augiters.size();
449  printInfoLS(augiters, os);
450 
451  if (orthocheck) {
452  Wrs.push_back(xvec_->clone());
453  (Wrs[iterCG_-1])->set(*Wr);
454  }
455  }
456 
457  normWr[iterCG_-1] = Wr->norm();
458 
459  if (infoTS_) {
460  Ptr<Vector<Real> > ct = cvec_->clone();
461  con.applyJacobian(*ct, t, x, zerotol);
462  Real linc = ct->norm();
463  std::ios_base::fmtflags osFlags(os.flags());
464  os << std::scientific << std::setprecision(6);
465  os << std::setw(6) << std::right << iterCG_-1 << std::setw(18) << normWr[iterCG_-1]/normWg << std::setw(15) << t.norm();
466  os << std::setw(15) << delta << std::setw(15) << linc << "\n";
467  os.flags(osFlags);
468  }
469 
470  // Check if done (small relative residual).
471  if (normWr[iterCG_-1]/normWg < tolCG_) {
472  flagCG_ = 0;
473  iterCG_ = iterCG_-1;
474  if (infoTS_) {
475  // std::ios_base::fmtflags osFlags(os.flags());
476  os << " || W(g + H*(n+s)) || <= cgtol*|| W(g + H*n)|| \n";
477  // os.flags(osFlags);
478  }
479  return;
480  }
481 
482  // Check nonorthogonality, one-norm of (WR*R/diag^2 - I)
483  if (orthocheck) {
484  LA::Matrix<Real> Wrr(iterCG_,iterCG_); // holds matrix Wrs'*rs
485  LA::Matrix<Real> T(iterCG_,iterCG_); // holds matrix T=(1/diag)*Wrs'*rs*(1/diag)
486  LA::Matrix<Real> Tm1(iterCG_,iterCG_); // holds matrix Tm1=T-I
487  for (int i=0; i<iterCG_; i++) {
488  for (int j=0; j<iterCG_; j++) {
489  Wrr(i,j) = (Wrs[i])->dot(*rs[j]);
490  T(i,j) = Wrr(i,j)/(normWr[i]*normWr[j]);
491  Tm1(i,j) = T(i,j);
492  if (i==j) {
493  Tm1(i,j) = Tm1(i,j) - one;
494  }
495  }
496  }
497  if (Tm1.normOne() >= tol_ortho) {
498  LAPACK<int,Real> lapack;
499  std::vector<int> ipiv(iterCG_);
500  int info;
501  std::vector<Real> work(3*iterCG_);
502  // compute inverse of T
503  lapack.GETRF(iterCG_, iterCG_, T.values(), T.stride(), &ipiv[0], &info);
504  lapack.GETRI(iterCG_, T.values(), T.stride(), &ipiv[0], &work[0], 3*iterCG_, &info);
505  Tm1 = T;
506  for (int i=0; i<iterCG_; i++) {
507  Tm1(i,i) = Tm1(i,i) - one;
508  }
509  if (Tm1.normOne() > S_max) {
510  flagCG_ = 4;
511  if (infoTS_) {
512  // std::ios_base::fmtflags osFlags(os.flags());
513  os << " large nonorthogonality in W(R)'*R detected \n";
514  // os.flags(osFlags);
515  }
516  return;
517  }
518  }
519  }
520 
521  // Full orthogonalization.
522  p.push_back(xvec_->clone());
523  (p[iterCG_-1])->set(*Wr);
524  (p[iterCG_-1])->scale(-one);
525  for (int j=1; j<iterCG_; j++) {
526  Real scal = (p[iterCG_-1])->dot(*(Hps[j-1])) / (p[j-1])->dot(*(Hps[j-1]));
527  Ptr<Vector<Real> > pj = xvec_->clone();
528  pj->set(*p[j-1]);
529  pj->scale(-scal);
530  (p[iterCG_-1])->plus(*pj);
531  }
532 
533  // change Hps.push_back(gvec_->clone());
534  Hps.push_back(xvec_->clone());
535  // change obj.hessVec(*(Hps[iterCG_-1]), *(p[iterCG_-1]), x, zerotol);
536  obj.hessVec(*Hp, *(p[iterCG_-1]), x, zerotol);
537  if (useConHess_) {
538  con.applyAdjointHessian(*gtemp, l, *(p[iterCG_-1]), x, zerotol);
539  // change (Hps[iterCG_-1])->plus(*gtemp);
540  Hp->plus(*gtemp);
541  }
542  // "Preconditioning" step.
543  (Hps[iterCG_-1])->set(Hp->dual());
544 
545  pHp = (p[iterCG_-1])->dot(*(Hps[iterCG_-1]));
546  // change rp = (p[iterCG_-1])->dot(*r);
547  rp = (p[iterCG_-1])->dot(*(rs[iterCG_-1]));
548 
549  normp = (p[iterCG_-1])->norm();
550  normr = r->norm();
551 
552  // Negative curvature stopping condition.
553  if (pHp <= 0) {
554  pdesc->set(*(p[iterCG_-1])); // p is the descent direction
555  if ((std::abs(rp) >= rptol*normp*normr) && (sgn(rp) == 1)) {
556  pdesc->scale(-one); // -p is the descent direction
557  }
558  flagCG_ = 2;
559  Real a = pdesc->dot(*pdesc);
560  Real b = pdesc->dot(t);
561  Real c = t.dot(t) - delta*delta;
562  // Positive root of a*theta^2 + 2*b*theta + c = 0.
563  Real theta = (-b + std::sqrt(b*b - a*c)) / a;
564  xtemp->set(*(p[iterCG_-1]));
565  xtemp->scale(theta);
566  t.plus(*xtemp);
567  // Store as tangential Cauchy point if terminating in first iteration.
568  if (iterCG_ == 1) {
569  tCP.set(t);
570  }
571  if (infoTS_) {
572  // std::ios_base::fmtflags osFlags(os.flags());
573  os << " negative curvature detected \n";
574  // os.flags(osFlags);
575  }
576  return;
577  }
578 
579  // Want to enforce nonzero alpha's.
580  if (std::abs(rp) < rptol*normp*normr) {
581  flagCG_ = 5;
582  if (infoTS_) {
583  // std::ios_base::fmtflags osFlags(os.flags());
584  os << " Zero alpha due to inexactness. \n";
585  // os.flags(osFlags);
586  }
587  return;
588  }
589 
590  alpha = - rp/pHp;
591 
592  // Iterate update.
593  tprev->set(t);
594  xtemp->set(*(p[iterCG_-1]));
595  xtemp->scale(alpha);
596  t.plus(*xtemp);
597 
598  // Trust-region stopping condition.
599  normt = t.norm();
600  if (normt >= delta) {
601  pdesc->set(*(p[iterCG_-1])); // p is the descent direction
602  if (sgn(rp) == 1) {
603  pdesc->scale(-one); // -p is the descent direction
604  }
605  Real a = pdesc->dot(*pdesc);
606  Real b = pdesc->dot(*tprev);
607  Real c = tprev->dot(*tprev) - delta*delta;
608  // Positive root of a*theta^2 + 2*b*theta + c = 0.
609  Real theta = (-b + std::sqrt(b*b - a*c)) / a;
610  xtemp->set(*(p[iterCG_-1]));
611  xtemp->scale(theta);
612  t.set(*tprev);
613  t.plus(*xtemp);
614  // Store as tangential Cauchy point if terminating in first iteration.
615  if (iterCG_ == 1) {
616  tCP.set(t);
617  }
618  flagCG_ = 3;
619  if (infoTS_) {
620  // std::ios_base::fmtflags osFlags(os.flags());
621  os << " trust-region condition active \n";
622  // os.flags(osFlags);
623  }
624  return;
625  }
626 
627  // Residual update.
628  xtemp->set(*(Hps[iterCG_-1]));
629  xtemp->scale(alpha);
630  // change r->plus(*gtemp);
631  r->plus(xtemp->dual());
632  if (orthocheck) {
633  // change rs.push_back(gvec_->clone());
634  rs.push_back(xvec_->clone());
635  // change (rs[iterCG_])->set(*r);
636  (rs[iterCG_])->set(r->dual());
637  }
638 
639  iterCG_++;
640 
641  } // while (iterCG_ < maxiterCG_)
642 
643  flagCG_ = 1;
644  if (infoTS_) {
645  // std::ios_base::fmtflags osFlags(os.flags());
646  os << " maximum number of iterations reached \n";
647  // os.flags(osFlags);
648  }
649 
650 } // solveTangentialSubproblem
651 
652 
653 template<typename Real>
655  Vector<Real> &gf_new, Vector<Real> &l_new, Vector<Real> &g_new,
656  const Vector<Real> &x, const Vector<Real> &l, Real f, const Vector<Real> &gf, const Vector<Real> &c,
657  const Vector<Real> &g, Vector<Real> &tCP, Vector<Real> &Wg,
658  Objective<Real> &obj, Constraint<Real> &con, std::ostream &os) {
659 
660  Real beta = 1e-8; // predicted reduction parameter
661  Real tol_red_tang = 1e-3; // internal reduction factor for tangtol
662  Real tol_red_all = 1e-1; // internal reduction factor for qntol, lmhtol, pgtol, projtol, tangtol
663  //bool glob_refine = true; // true - if subsolver tolerances are adjusted in this routine, keep adjusted values globally
664  // false - if subsolver tolerances are adjusted in this routine, discard adjusted values
665  Real tol_fdiff = 1e-12; // relative objective function difference for ared computation
666  int ct_max = 10; // maximum number of globalization tries
667  Real mintol = 1e-16; // smallest projection tolerance value
668 
669  // Determines max value of |rpred|/pred.
670  Real rpred_over_pred = 0.5*(1-eta_);
671 
672  if (infoAC_) {
673  // std::ios_base::fmtflags osFlags(os.flags());
674  os << "\n Composite step acceptance\n";
675  // os.flags(osFlags);
676  }
677 
678  Real zero = 0.0;
679  Real one = 1.0;
680  Real two = 2.0;
681  Real half = one/two;
682  Real zerotol = std::sqrt(ROL_EPSILON<Real>());
683  std::vector<Real> augiters;
684 
685  Real pred = zero;
686  Real ared = zero;
687  Real rpred = zero;
688  Real part_pred = zero;
689  Real linc_preproj = zero;
690  Real linc_postproj = zero;
691  Real tangtol_start = zero;
692  Real tangtol = tangtol_;
693  //Real projtol = projtol_;
694  bool flag = false;
695  int num_proj = 0;
696  bool try_tCP = false;
697  Real fdiff = zero;
698 
699  Ptr<Vector<Real> > xtrial = xvec_->clone();
700  Ptr<Vector<Real> > Jl = gvec_->clone();
701  Ptr<Vector<Real> > gfJl = gvec_->clone();
702  Ptr<Vector<Real> > Jnc = cvec_->clone();
703  Ptr<Vector<Real> > t_orig = xvec_->clone();
704  Ptr<Vector<Real> > t_dual = gvec_->clone();
705  Ptr<Vector<Real> > Jt_orig = cvec_->clone();
706  Ptr<Vector<Real> > t_m_tCP = xvec_->clone();
707  Ptr<Vector<Real> > ltemp = lvec_->clone();
708  Ptr<Vector<Real> > xtemp = xvec_->clone();
709  Ptr<Vector<Real> > rt = cvec_->clone();
710  Ptr<Vector<Real> > Hn = gvec_->clone();
711  Ptr<Vector<Real> > Hto = gvec_->clone();
712  Ptr<Vector<Real> > cxxvec = gvec_->clone();
713  Ptr<Vector<Real> > czero = cvec_->clone();
714  czero->zero();
715  Real Jnc_normsquared = zero;
716  Real c_normsquared = zero;
717 
718  // Compute and store some quantities for later use. Necessary
719  // because of the function and constraint updates below.
720  con.applyAdjointJacobian(*Jl, l, x, zerotol);
721  con.applyJacobian(*Jnc, n, x, zerotol);
722  Jnc->plus(c);
723  Jnc_normsquared = Jnc->dot(*Jnc);
724  c_normsquared = c.dot(c);
725 
726  for (int ct=0; ct<ct_max; ct++) {
727 
728  try_tCP = true;
729  t_m_tCP->set(t);
730  t_m_tCP->scale(-one);
731  t_m_tCP->plus(tCP);
732  if (t_m_tCP->norm() == zero) {
733  try_tCP = false;
734  }
735 
736  t_orig->set(t);
737  con.applyJacobian(*Jt_orig, *t_orig, x, zerotol);
738  linc_preproj = Jt_orig->norm();
739  pred = one;
740  rpred = two*rpred_over_pred*pred;
741  flag = false;
742  num_proj = 1;
743  tangtol_start = tangtol;
744 
745  while (std::abs(rpred)/pred > rpred_over_pred) {
746  // Compute projected tangential step.
747  if (flag) {
748  tangtol = tol_red_tang*tangtol;
749  num_proj++;
750  if (tangtol < mintol) {
751  if (infoAC_) {
752  // std::ios_base::fmtflags osFlags(os.flags());
753  os << "\n The projection of the tangential step cannot be done with sufficient precision.\n";
754  os << " Is the quasi-normal step very small? Continuing with no global convergence guarantees.\n";
755  // os.flags(osFlags);
756  }
757  break;
758  }
759  }
760  // Solve augmented system.
761  Real tol = setTolOSS(tangtol);
762  // change augiters = con.solveAugmentedSystem(t, *ltemp, *t_orig, *czero, x, tol);
763  t_dual->set(t_orig->dual());
764  augiters = con.solveAugmentedSystem(t, *ltemp, *t_dual, *czero, x, tol);
765  totalCallLS_++;
766  totalIterLS_ = totalIterLS_ + augiters.size();
767  printInfoLS(augiters, os);
768  totalProj_++;
769  con.applyJacobian(*rt, t, x, zerotol);
770  linc_postproj = rt->norm();
771 
772  // Compute composite step.
773  s.set(t);
774  s.plus(n);
775 
776  // Compute some quantities before updating the objective and the constraint.
777  obj.hessVec(*Hn, n, x, zerotol);
778  if (useConHess_) {
779  con.applyAdjointHessian(*cxxvec, l, n, x, zerotol);
780  Hn->plus(*cxxvec);
781  }
782  obj.hessVec(*Hto, *t_orig, x, zerotol);
783  if (useConHess_) {
784  con.applyAdjointHessian(*cxxvec, l, *t_orig, x, zerotol);
785  Hto->plus(*cxxvec);
786  }
787 
788  // Compute objective, constraint, etc. values at the trial point.
789  xtrial->set(x);
790  xtrial->plus(s);
791  obj.update(*xtrial,UpdateType::Trial,state_->iter);
792  con.update(*xtrial,UpdateType::Trial,state_->iter);
793  f_new = obj.value(*xtrial, zerotol);
794  obj.gradient(gf_new, *xtrial, zerotol);
795  con.value(c_new, *xtrial, zerotol);
796  l_new.set(l);
797  computeLagrangeMultiplier(l_new, *xtrial, gf_new, con, os);
798 
799  // Penalty parameter update.
800  part_pred = - Wg.dot(*t_orig);
801  gfJl->set(gf);
802  gfJl->plus(*Jl);
803  // change part_pred -= gfJl->dot(n);
804  //part_pred -= n.dot(gfJl->dual());
805  part_pred -= n.apply(*gfJl);
806  // change part_pred -= half*Hn->dot(n);
807  //part_pred -= half*n.dot(Hn->dual());
808  part_pred -= half*n.apply(*Hn);
809  // change part_pred -= half*Hto->dot(*t_orig);
810  //part_pred -= half*t_orig->dot(Hto->dual());
811  part_pred -= half*t_orig->apply(*Hto);
812  ltemp->set(l_new);
813  ltemp->axpy(-one, l);
814  // change part_pred -= Jnc->dot(*ltemp);
815  //part_pred -= Jnc->dot(ltemp->dual());
816  part_pred -= Jnc->apply(*ltemp);
817 
818  if ( part_pred < -half*penalty_*(c_normsquared-Jnc_normsquared) ) {
819  penalty_ = ( -two * part_pred / (c_normsquared-Jnc_normsquared) ) + beta;
820  }
821 
822  pred = part_pred + penalty_*(c_normsquared-Jnc_normsquared);
823 
824  // Computation of rpred.
825  // change rpred = - ltemp->dot(*rt) - penalty_ * rt->dot(*rt) - two * penalty_ * rt->dot(*Jnc);
826  //rpred = - rt->dot(ltemp->dual()) - penalty_ * rt->dot(*rt) - two * penalty_ * rt->dot(*Jnc);
827  rpred = - rt->apply(*ltemp) - penalty_ * rt->dot(*rt) - two * penalty_ * rt->dot(*Jnc);
828  // change Ptr<Vector<Real> > lrt = lvec_->clone();
829  //lrt->set(*rt);
830  //rpred = - ltemp->dot(*rt) - penalty_ * std::pow(rt->norm(), 2) - two * penalty_ * lrt->dot(*Jnc);
831  flag = 1;
832 
833  } // while (std::abs(rpred)/pred > rpred_over_pred)
834 
835  tangtol = tangtol_start;
836 
837  // Check if the solution of the tangential subproblem is
838  // disproportionally large compared to total trial step.
839  xtemp->set(n);
840  xtemp->plus(t);
841  if ( t_orig->norm()/xtemp->norm() < tntmax_ ) {
842  break;
843  }
844  else {
845  t_m_tCP->set(*t_orig);
846  t_m_tCP->scale(-one);
847  t_m_tCP->plus(tCP);
848  if ((t_m_tCP->norm() > 0) && try_tCP) {
849  if (infoAC_) {
850  // std::ios_base::fmtflags osFlags(os.flags());
851  os << " ---> now trying tangential Cauchy point\n";
852  // os.flags(osFlags);
853  }
854  t.set(tCP);
855  }
856  else {
857  if (infoAC_) {
858  // std::ios_base::fmtflags osFlags(os.flags());
859  os << " ---> recomputing quasi-normal step and re-solving tangential subproblem\n";
860  // os.flags(osFlags);
861  }
862  totalRef_++;
863  // Reset global quantities.
864  obj.update(x, UpdateType::Trial, state_->iter);
865  con.update(x, UpdateType::Trial, state_->iter);
866  /*lmhtol = tol_red_all*lmhtol;
867  qntol = tol_red_all*qntol;
868  pgtol = tol_red_all*pgtol;
869  projtol = tol_red_all*projtol;
870  tangtol = tol_red_all*tangtol;
871  if (glob_refine) {
872  lmhtol_ = lmhtol;
873  qntol_ = qntol;
874  pgtol_ = pgtol;
875  projtol_ = projtol;
876  tangtol_ = tangtol;
877  }*/
878  if (!tolOSSfixed_) {
879  lmhtol_ *= tol_red_all;
880  qntol_ *= tol_red_all;
881  pgtol_ *= tol_red_all;
882  projtol_ *= tol_red_all;
883  tangtol_ *= tol_red_all;
884  }
885  // Recompute the quasi-normal step.
886  computeQuasinormalStep(n, c, x, zeta_*Delta_, con, os);
887  // Solve tangential subproblem.
888  solveTangentialSubproblem(t, tCP, Wg, x, g, n, l, Delta_, obj, con, os);
889  totalIterCG_ += iterCG_;
890  if (flagCG_ == 1) {
891  totalNegCurv_++;
892  }
893  }
894  } // else w.r.t. ( t_orig->norm()/xtemp->norm() < tntmax )
895 
896  } // for (int ct=0; ct<ct_max; ct++)
897 
898  // Compute actual reduction;
899  fdiff = f - f_new;
900  // Heuristic 1: If fdiff is very small compared to f, set it to 0,
901  // in order to prevent machine precision issues.
902  Real em24(1e-24);
903  Real em14(1e-14);
904  if (std::abs(fdiff / (f+em24)) < tol_fdiff) {
905  fdiff = em14;
906  }
907  // change ared = fdiff + (l.dot(c) - l_new.dot(c_new)) + penalty_*(c.dot(c) - c_new.dot(c_new));
908  // change ared = fdiff + (l.dot(c) - l_new.dot(c_new)) + penalty_*(std::pow(c.norm(),2) - std::pow(c_new.norm(),2));
909  //ared = fdiff + (c.dot(l.dual()) - c_new.dot(l_new.dual())) + penalty_*(c.dot(c) - c_new.dot(c_new));
910  ared = fdiff + (c.apply(l) - c_new.apply(l_new)) + penalty_*(c.dot(c) - c_new.dot(c_new));
911 
912  // Store actual and predicted reduction.
913  ared_ = ared;
914  pred_ = pred;
915 
916  // Store step and vector norms.
917  snorm_ = s.norm();
918  nnorm_ = n.norm();
919  tnorm_ = t.norm();
920 
921  // Print diagnostics.
922  if (infoAC_) {
923  std::ios_base::fmtflags osFlags(os.flags());
924  os << std::scientific << std::setprecision(6);
925  os << "\n Trial step info ...\n";
926  os << " n_norm = " << nnorm_ << "\n";
927  os << " t_norm = " << tnorm_ << "\n";
928  os << " s_norm = " << snorm_ << "\n";
929  os << " xtrial_norm = " << xtrial->norm() << "\n";
930  os << " f_old = " << f << "\n";
931  os << " f_trial = " << f_new << "\n";
932  os << " f_old-f_trial = " << f-f_new << "\n";
933  os << " ||c_old|| = " << c.norm() << "\n";
934  os << " ||c_trial|| = " << c_new.norm() << "\n";
935  os << " ||Jac*t_preproj|| = " << linc_preproj << "\n";
936  os << " ||Jac*t_postproj|| = " << linc_postproj << "\n";
937  os << " ||t_tilde||/||t|| = " << t_orig->norm() / t.norm() << "\n";
938  os << " ||t_tilde||/||n+t|| = " << t_orig->norm() / snorm_ << "\n";
939  os << " # projections = " << num_proj << "\n";
940  os << " penalty param = " << penalty_ << "\n";
941  os << " ared = " << ared_ << "\n";
942  os << " pred = " << pred_ << "\n";
943  os << " ared/pred = " << ared_/pred_ << "\n";
944  os.flags(osFlags);
945  }
946 
947 } // accept
948 
949 template<typename Real>
951  Vector<Real> &l,
952  const Vector<Real> &s,
953  Objective<Real> &obj,
954  Constraint<Real> &con,
955  std::ostream &os) {
956  Real zero(0);
957  Real one(1);
958  Real two(2);
959  Real seven(7);
960  Real half(0.5);
961  Real zp9(0.9);
962  Real zp8(0.8);
963  Real em12(1e-12);
964  Real zerotol = std::sqrt(ROL_EPSILON<Real>()); //zero;
965  Real ratio(zero);
966 
967  Ptr<Vector<Real> > g = gvec_->clone();
968  Ptr<Vector<Real> > ajl = gvec_->clone();
969  Ptr<Vector<Real> > gl = gvec_->clone();
970  Ptr<Vector<Real> > c = cvec_->clone();
971 
972  // Determine if the step gives sufficient reduction in the merit function,
973  // update the trust-region radius.
974  ratio = ared_/pred_;
975  if ((std::abs(ared_) < em12) && std::abs(pred_) < em12) {
976  ratio = one;
977  }
978  if (ratio >= eta_) {
979  x.plus(s);
980  if (ratio >= zp9) {
981  Delta_ = std::max(seven*snorm_, Delta_);
982  }
983  else if (ratio >= zp8) {
984  Delta_ = std::max(two*snorm_, Delta_);
985  }
986  obj.update(x,UpdateType::Accept,state_->iter);
987  con.update(x,UpdateType::Accept,state_->iter);
988  flagAC_ = 1;
989  }
990  else {
991  Delta_ = half*std::max(nnorm_, tnorm_);
992  obj.update(x,UpdateType::Revert,state_->iter);
993  con.update(x,UpdateType::Revert,state_->iter);
994  flagAC_ = 0;
995  } // if (ratio >= eta)
996 
997  Real val = obj.value(x, zerotol);
998  state_->nfval++;
999  obj.gradient(*g, x, zerotol);
1000  computeLagrangeMultiplier(l, x, *g, con, os);
1001  con.applyAdjointJacobian(*ajl, l, x, zerotol);
1002  gl->set(*g); gl->plus(*ajl);
1003  state_->ngrad++;
1004  con.value(*c, x, zerotol);
1005 
1006  state_->gradientVec->set(*gl);
1007  state_->constraintVec->set(*c);
1008 
1009  state_->value = val;
1010  state_->gnorm = gl->norm();
1011  state_->cnorm = c->norm();
1012  state_->iter++;
1013  state_->snorm = snorm_;
1014 
1015  // Update algorithm state
1016  //(state_->iterateVec)->set(x);
1017 }
1018 
1019 
1020 template<typename Real>
1022  const Vector<Real> &g,
1023  Vector<Real> &l,
1024  const Vector<Real> &c,
1025  Objective<Real> &obj,
1026  Constraint<Real> &con,
1027  std::ostream &os) {
1028  Real zerotol = std::sqrt(ROL_EPSILON<Real>());
1030 
1031  // Initialize the algorithm state.
1032  state_->nfval = 0;
1033  state_->ncval = 0;
1034  state_->ngrad = 0;
1035 
1036  xvec_ = x.clone();
1037  gvec_ = g.clone();
1038  lvec_ = l.clone();
1039  cvec_ = c.clone();
1040 
1041  Ptr<Vector<Real> > ajl = gvec_->clone();
1042  Ptr<Vector<Real> > gl = gvec_->clone();
1043 
1044  // Update objective and constraint.
1045  obj.update(x,UpdateType::Initial,state_->iter);
1046  state_->value = obj.value(x, zerotol);
1047  state_->nfval++;
1048  con.update(x,UpdateType::Initial,state_->iter);
1049  con.value(*cvec_, x, zerotol);
1050  state_->cnorm = cvec_->norm();
1051  state_->ncval++;
1052  obj.gradient(*gvec_, x, zerotol);
1053 
1054  // Compute gradient of Lagrangian at new multiplier guess.
1055  computeLagrangeMultiplier(l, x, *gvec_, con, os);
1056  con.applyAdjointJacobian(*ajl, l, x, zerotol);
1057  gl->set(*gvec_); gl->plus(*ajl);
1058  state_->ngrad++;
1059  state_->gnorm = gl->norm();
1060 }
1061 
1062 
1063 template<typename Real>
1065  const Vector<Real> &g,
1066  Objective<Real> &obj,
1067  Constraint<Real> &econ,
1068  Vector<Real> &emul,
1069  const Vector<Real> &eres,
1070  std::ostream &outStream) {
1071 
1072  initialize(x, g, emul, eres, obj, econ, outStream);
1073 
1074  // Output.
1075  if (verbosity_ > 0) writeOutput(outStream, true);
1076 
1077  // Step vector.
1078  Ptr<Vector<Real> > s = x.clone();
1079 
1080  while (status_->check(*state_)) {
1081  computeTrial(*s, x, emul, obj, econ, outStream);
1082  updateRadius(x, emul, *s, obj, econ, outStream);
1083 
1084  // Update output.
1085  if (verbosity_ > 0) writeOutput(outStream, printHeader_);
1086  }
1087 
1088  if (verbosity_ > 0) TypeE::Algorithm<Real>::writeExitStatus(outStream);
1089 }
1090 
1091 
1092 template<typename Real>
1093 void CompositeStepAlgorithm<Real>::writeHeader(std::ostream& os) const {
1094  std::ios_base::fmtflags osFlags(os.flags());
1095  if (verbosity_>1) {
1096  os << std::string(144,'-') << std::endl;
1097  os << "Composite Step status output definitions" << std::endl << std::endl;
1098  os << " iter - Number of iterates (steps taken)" << std::endl;
1099  os << " fval - Objective function value" << std::endl;
1100  os << " cnorm - Norm of the constraint violation" << std::endl;
1101  os << " gLnorm - Norm of the gradient of the Lagrangian" << std::endl;
1102  os << " snorm - Norm of the step" << std::endl;
1103  os << " delta - Trust-region radius" << std::endl;
1104  os << " nnorm - Norm of the quasinormal step" << std::endl;
1105  os << " tnorm - Norm of the tangential step" << std::endl;
1106  os << " #fval - Number of times the objective was computed" << std::endl;
1107  os << " #grad - Number of times the gradient was computed" << std::endl;
1108  os << " iterCG - Number of projected CG iterations" << std::endl;
1109  os << " flagCG - Flag returned by projected CG" << std::endl;
1110  os << " accept - Acceptance flag for the trial step" << std::endl;
1111  os << " linsys - Number of augmented solver calls/iterations" << std::endl;
1112  os << std::string(144,'-') << std::endl;
1113  }
1114  os << " ";
1115  os << std::setw(6) << std::left << "iter";
1116  os << std::setw(15) << std::left << "fval";
1117  os << std::setw(15) << std::left << "cnorm";
1118  os << std::setw(15) << std::left << "gLnorm";
1119  os << std::setw(15) << std::left << "snorm";
1120  os << std::setw(10) << std::left << "delta";
1121  os << std::setw(10) << std::left << "nnorm";
1122  os << std::setw(10) << std::left << "tnorm";
1123  os << std::setw(8) << std::left << "#fval";
1124  os << std::setw(8) << std::left << "#grad";
1125  os << std::setw(8) << std::left << "iterCG";
1126  os << std::setw(8) << std::left << "flagCG";
1127  os << std::setw(8) << std::left << "accept";
1128  os << std::setw(8) << std::left << "linsys";
1129  os << std::endl;
1130  os.flags(osFlags);
1131 }
1132 
1133 
1134 template<typename Real>
1135 void CompositeStepAlgorithm<Real>::writeName(std::ostream& os) const {
1136  std::ios_base::fmtflags osFlags(os.flags());
1137  os << std::endl << "Composite-Step Trust-Region Solver (Type E, Equality Constraints)";
1138  os << std::endl;
1139  os.flags(osFlags);
1140 }
1141 
1142 
1143 template<typename Real>
1144 void CompositeStepAlgorithm<Real>::writeOutput(std::ostream& os, const bool print_header) const {
1145  std::ios_base::fmtflags osFlags(os.flags());
1146  os << std::scientific << std::setprecision(6);
1147  if (state_->iter == 0) writeName(os);
1148  if (print_header) writeHeader(os);
1149  if (state_->iter == 0 ) {
1150  os << " ";
1151  os << std::setw(6) << std::left << state_->iter;
1152  os << std::setw(15) << std::left << state_->value;
1153  os << std::setw(15) << std::left << state_->cnorm;
1154  os << std::setw(15) << std::left << state_->gnorm;
1155  os << std::setw(15) << std::left << "---";
1156  os << std::setw(10) << std::left << "---";
1157  os << std::setw(10) << std::left << "---";
1158  os << std::setw(10) << std::left << "---";
1159  os << std::setw(8) << std::left << "---";
1160  os << std::setw(8) << std::left << "---";
1161  os << std::setw(8) << std::left << "---";
1162  os << std::setw(8) << std::left << "---";
1163  os << std::setw(8) << std::left << "---";
1164  os << std::setw(8) << std::left << "---";
1165  os << std::endl;
1166  }
1167  else {
1168  os << " ";
1169  os << std::setw(6) << std::left << state_->iter;
1170  os << std::setw(15) << std::left << state_->value;
1171  os << std::setw(15) << std::left << state_->cnorm;
1172  os << std::setw(15) << std::left << state_->gnorm;
1173  os << std::setw(15) << std::left << state_->snorm;
1174  os << std::scientific << std::setprecision(2);
1175  os << std::setw(10) << std::left << Delta_;
1176  os << std::setw(10) << std::left << nnorm_;
1177  os << std::setw(10) << std::left << tnorm_;
1178  os << std::scientific << std::setprecision(6);
1179  os << std::setw(8) << std::left << state_->nfval;
1180  os << std::setw(8) << std::left << state_->ngrad;
1181  os << std::setw(8) << std::left << iterCG_;
1182  os << std::setw(8) << std::left << flagCG_;
1183  os << std::setw(8) << std::left << flagAC_;
1184  os << std::left << totalCallLS_ << "/" << totalIterLS_;
1185  os << std::endl;
1186  }
1187  os.flags(osFlags);
1188 }
1189 
1190 
1191 template<typename Real>
1192 template<typename T>
1194  return (T(0) < val) - (val < T(0));
1195 }
1196 
1197 
1198 template<typename Real>
1199 void CompositeStepAlgorithm<Real>::printInfoLS(const std::vector<Real> &res, std::ostream &os) const {
1200  if (infoLS_) {
1201  std::ios_base::fmtflags osFlags(os.flags());
1202  os << std::scientific << std::setprecision(8);
1203  os << std::endl << " Augmented System Solver:" << std::endl;
1204  os << " True Residual" << std::endl;
1205  for (unsigned j=0; j<res.size(); j++) {
1206  os << " " << std::left << std::setw(14) << res[j] << std::endl;
1207  }
1208  os << std::endl;
1209  os.flags(osFlags);
1210  }
1211 }
1212 
1213 
1214 template<typename Real>
1215 Real CompositeStepAlgorithm<Real>::setTolOSS(const Real intol) const {
1216  return tolOSSfixed_ ? tolOSS_ : intol;
1217 }
1218 
1219 } // namespace ROL
1220 } // namespace TypeE
1221 
1222 #endif
Provides the interface to evaluate objective functions.
virtual const Vector & dual() const
Return dual representation of , for example, the result of applying a Riesz map, or change of basis...
Definition: ROL_Vector.hpp:226
virtual void applyJacobian(Vector< Real > &jv, const Vector< Real > &v, const Vector< Real > &x, Real &tol)
Apply the constraint Jacobian at , , to vector .
virtual void scale(const Real alpha)=0
Compute where .
virtual ROL::Ptr< Vector > clone() const =0
Clone to make a new (uninitialized) vector.
virtual Real apply(const Vector< Real > &x) const
Apply to a dual vector. This is equivalent to the call .
Definition: ROL_Vector.hpp:238
virtual void plus(const Vector &x)=0
Compute , where .
virtual void update(const Vector< Real > &x, UpdateType type, int iter=-1)
Update constraint function.
void computeTrial(Vector< Real > &s, const Vector< Real > &x, const Vector< Real > &l, Objective< Real > &obj, Constraint< Real > &con, std::ostream &os)
Compute trial step.
virtual void axpy(const Real alpha, const Vector &x)
Compute where .
Definition: ROL_Vector.hpp:153
virtual Real value(const Vector< Real > &x, Real &tol)=0
Compute value.
virtual void hessVec(Vector< Real > &hv, const Vector< Real > &v, const Vector< Real > &x, Real &tol)
Apply Hessian approximation to vector.
void accept(Vector< Real > &s, Vector< Real > &n, Vector< Real > &t, Real f_new, Vector< Real > &c_new, Vector< Real > &gf_new, Vector< Real > &l_new, Vector< Real > &g_new, const Vector< Real > &x, const Vector< Real > &l, Real f, const Vector< Real > &gf, const Vector< Real > &c, const Vector< Real > &g, Vector< Real > &tCP, Vector< Real > &Wg, Objective< Real > &obj, Constraint< Real > &con, std::ostream &os)
Check acceptance of subproblem solutions, adjust merit function penalty parameter, ensure global convergence.
virtual void writeHeader(std::ostream &os) const override
Print iterate header.
virtual void zero()
Set to zero vector.
Definition: ROL_Vector.hpp:167
Defines the linear algebra or vector space interface.
Definition: ROL_Vector.hpp:80
virtual void value(Vector< Real > &c, const Vector< Real > &x, Real &tol)=0
Evaluate the constraint operator at .
virtual void update(const Vector< Real > &x, UpdateType type, int iter=-1)
Update objective function.
virtual Real dot(const Vector &x) const =0
Compute where .
Objective_SerialSimOpt(const Ptr< Obj > &obj, const V &ui) z0_ zero()
Provides an interface to check status of optimization algorithms for problems with equality constrain...
virtual void gradient(Vector< Real > &g, const Vector< Real > &x, Real &tol)
Compute gradient.
virtual void run(Vector< Real > &x, const Vector< Real > &g, Objective< Real > &obj, Constraint< Real > &econ, Vector< Real > &emul, const Vector< Real > &eres, std::ostream &outStream=std::cout) override
Run algorithm on equality constrained problems (Type-E). This general interface supports the use of d...
virtual void applyAdjointHessian(Vector< Real > &ahuv, const Vector< Real > &u, const Vector< Real > &v, const Vector< Real > &x, Real &tol)
Apply the derivative of the adjoint of the constraint Jacobian at to vector in direction ...
const Ptr< CombinedStatusTest< Real > > status_
void computeQuasinormalStep(Vector< Real > &n, const Vector< Real > &c, const Vector< Real > &x, Real delta, Constraint< Real > &con, std::ostream &os)
Compute quasi-normal step by minimizing the norm of the linearized constraint.
void updateRadius(Vector< Real > &x, Vector< Real > &l, const Vector< Real > &s, Objective< Real > &obj, Constraint< Real > &con, std::ostream &os)
Update trust-region radius, take step, etc.
virtual void applyAdjointJacobian(Vector< Real > &ajv, const Vector< Real > &v, const Vector< Real > &x, Real &tol)
Apply the adjoint of the the constraint Jacobian at , , to vector .
virtual void writeName(std::ostream &os) const override
Print step name.
virtual void writeExitStatus(std::ostream &os) const
void initialize(Vector< Real > &x, const Vector< Real > &g, Vector< Real > &l, const Vector< Real > &c, Objective< Real > &obj, Constraint< Real > &con, std::ostream &outStream=std::cout)
Initialize algorithm by computing a few quantities.
virtual std::vector< Real > solveAugmentedSystem(Vector< Real > &v1, Vector< Real > &v2, const Vector< Real > &b1, const Vector< Real > &b2, const Vector< Real > &x, Real &tol)
Approximately solves the augmented system where , , , , is an identity or Riesz operator...
void initialize(const Vector< Real > &x, const Vector< Real > &g, const Vector< Real > &mul, const Vector< Real > &c)
virtual void set(const Vector &x)
Set where .
Definition: ROL_Vector.hpp:209
virtual Real norm() const =0
Returns where .
void computeLagrangeMultiplier(Vector< Real > &l, const Vector< Real > &x, const Vector< Real > &gf, Constraint< Real > &con, std::ostream &os)
Compute Lagrange multipliers by solving the least-squares problem minimizing the gradient of the Lagr...
void solveTangentialSubproblem(Vector< Real > &t, Vector< Real > &tCP, Vector< Real > &Wg, const Vector< Real > &x, const Vector< Real > &g, const Vector< Real > &n, const Vector< Real > &l, Real delta, Objective< Real > &obj, Constraint< Real > &con, std::ostream &os)
Solve tangential subproblem.
void printInfoLS(const std::vector< Real > &res, std::ostream &os) const
virtual void writeOutput(std::ostream &os, const bool print_header=false) const override
Print iterate status.
Defines the general constraint operator interface.