ROL
ROL_TypeB_LSecantBAlgorithm_Def.hpp
Go to the documentation of this file.
1 // @HEADER
2 // *****************************************************************************
3 // Rapid Optimization Library (ROL) Package
4 //
5 // Copyright 2014 NTESS and the ROL contributors.
6 // SPDX-License-Identifier: BSD-3-Clause
7 // *****************************************************************************
8 // @HEADER
9 
10 #ifndef ROL_TYPEB_LSecantBALGORITHM_DEF_HPP
11 #define ROL_TYPEB_LSecantBALGORITHM_DEF_HPP
12 
13 namespace ROL {
14 namespace TypeB {
15 
16 template<typename Real>
18  const Ptr<Secant<Real>> &secant) {
19  // Set status test
20  status_->reset();
21  status_->add(makePtr<StatusTest<Real>>(list));
22 
23  ParameterList &trlist = list.sublist("Step").sublist("Line Search");
24  state_->searchSize = static_cast<Real>(1);
25  // Krylov Parameters
26  maxit_ = list.sublist("General").sublist("Krylov").get("Iteration Limit", 20);
27  tol1_ = list.sublist("General").sublist("Krylov").get("Absolute Tolerance", 1e-4);
28  tol2_ = list.sublist("General").sublist("Krylov").get("Relative Tolerance", 1e-2);
29  // Algorithm-Specific Parameters
30  ROL::ParameterList &lmlist = trlist.sublist("Quasi-Newton").sublist("L-Secant-B");
31  mu0_ = lmlist.get("Sufficient Decrease Parameter", 1e-2);
32  spexp_ = lmlist.get("Relative Tolerance Exponent", 1.0);
33  spexp_ = std::max(static_cast<Real>(1),std::min(spexp_,static_cast<Real>(2)));
34  redlim_ = lmlist.sublist("Cauchy Point").get("Maximum Number of Reduction Steps", 10);
35  explim_ = lmlist.sublist("Cauchy Point").get("Maximum Number of Expansion Steps", 10);
36  alpha_ = lmlist.sublist("Cauchy Point").get("Initial Step Size", 1.0);
37  normAlpha_ = lmlist.sublist("Cauchy Point").get("Normalize Initial Step Size", false);
38  interpf_ = lmlist.sublist("Cauchy Point").get("Reduction Rate", 0.1);
39  extrapf_ = lmlist.sublist("Cauchy Point").get("Expansion Rate", 10.0);
40  qtol_ = lmlist.sublist("Cauchy Point").get("Decrease Tolerance", 1e-8);
41  interpfPS_ = trlist.sublist("Line-Search Method").get("Backtracking Rate", 0.5);
42  // Output Parameters
43  verbosity_ = list.sublist("General").get("Output Level",0);
44  writeHeader_ = verbosity_ > 2;
45  // Secant Information
46  useSecantPrecond_ = true;
47  useSecantHessVec_ = true;
49  if (secant == nullPtr) {
50  std::string secantType = list.sublist("General").sublist("Secant").get("Type","Limited-Memory Secant");
51  esec_ = StringToESecant(secantType);
52  secant_ = SecantFactory<Real>(list,mode);
53  }
54 }
55 
56 template<typename Real>
58  const Vector<Real> &g,
59  Objective<Real> &obj,
61  std::ostream &outStream) {
62  const Real one(1);
63  hasEcon_ = true;
64  if (proj_ == nullPtr) {
65  proj_ = makePtr<PolyhedralProjection<Real>>(makePtrFromRef(bnd));
66  hasEcon_ = false;
67  }
68  // Initialize data
70  // Update approximate gradient and approximate objective function.
71  Real ftol = static_cast<Real>(0.1)*ROL_OVERFLOW<Real>();
72  proj_->project(x,outStream); state_->nproj++;
73  state_->iterateVec->set(x);
74  obj.update(x,UpdateType::Initial,state_->iter);
75  state_->value = obj.value(x,ftol); state_->nfval++;
76  obj.gradient(*state_->gradientVec,x,ftol); state_->ngrad++;
77  state_->stepVec->set(x);
78  state_->stepVec->axpy(-one,state_->gradientVec->dual());
79  proj_->project(*state_->stepVec,outStream); state_->nproj++;
80  state_->stepVec->axpy(-one,x);
81  state_->gnorm = state_->stepVec->norm();
82  state_->snorm = ROL_INF<Real>();
83  // Normalize initial CP step length
84  if (normAlpha_) {
85  alpha_ /= state_->gradientVec->norm();
86  }
87  // Initialize null space projection
88  if (hasEcon_) {
89  rcon_ = makePtr<ReducedLinearConstraint<Real>>(proj_->getLinearConstraint(),
90  makePtrFromRef(bnd),
91  makePtrFromRef(x));
92  ns_ = makePtr<NullSpaceOperator<Real>>(rcon_,x,
93  *proj_->getResidual());
94  }
95 }
96 
97 template<typename Real>
99  const Vector<Real> &g,
100  Objective<Real> &obj,
102  std::ostream &outStream ) {
103  const Real zero(0), one(1);
104  Real tol0 = ROL_EPSILON<Real>(), ftol = ROL_EPSILON<Real>();
105  Real gfnorm(0), gs(0), ftrial(0), q(0), tol(0), stol(0), snorm(0);
106  // Initialize trust-region data
107  initialize(x,g,obj,bnd,outStream);
108  Ptr<Vector<Real>> s = x.clone();
109  Ptr<Vector<Real>> gmod = g.clone(), gfree = g.clone();
110  Ptr<Vector<Real>> pwa1 = x.clone(), pwa2 = x.clone(), pwa3 = x.clone();
111  Ptr<Vector<Real>> dwa1 = g.clone(), dwa2 = g.clone(), dwa3 = g.clone();
112 
113  // Output
114  if (verbosity_ > 0) writeOutput(outStream,true);
115 
116  while (status_->check(*state_)) {
117  /**** SOLVE LINEARLY CONSTRAINED QUADRATIC SUBPROBLEM ****/
118  // Compute Cauchy point
119  snorm = dcauchy(*s,
120  alpha_,
121  q,
122  x,
123  state_->gradientVec->dual(),
124  *secant_,
125  *dwa1,
126  *dwa2,
127  outStream); // Approximately solve 1D optimization problem for alpha
128  x.plus(*s); // Set x = proj(x[0] - alpha*g)
129 
130  // Model gradient at s = x[1] - x[0]
131  gmod->set(*dwa1); // hessVec from Cauchy point computation
132  gmod->plus(*state_->gradientVec);
133  gfree->set(*gmod);
134  //bnd.pruneActive(*gfree,x,zero);
135  pwa1->set(gfree->dual());
136  bnd.pruneActive(*pwa1,x,zero);
137  gfree->set(pwa1->dual());
138  if (hasEcon_) {
139  gfnorm = gfree->norm();
140  if (gfnorm > zero) {
141  applyFreePrecond(*pwa1,*gfree,x,*secant_,bnd,tol0,*dwa1,*pwa2);
142  gfnorm = pwa1->norm();
143  }
144  }
145  else {
146  gfnorm = gfree->norm();
147  }
148  SPiter_ = 0; SPflag_ = 0;
149  if (verbosity_ > 1) {
150  outStream << " Norm of free gradient components: " << gfnorm << std::endl;
151  outStream << std::endl;
152  }
153 
154  // Linearly constrained quadratic subproblem solve
155  // Run CG for subspace minimization
156  tol = std::min(tol1_,tol2_*std::pow(gfnorm,spexp_));
157  stol = tol; //zero;
158  if (gfnorm > zero) {
159  snorm = dpcg(*s, SPflag_, SPiter_, *gfree, x, *secant_, bnd,
160  tol, stol, maxit_,
161  *pwa1, *dwa1, *pwa2, *dwa2, *pwa3, *dwa3,
162  outStream);
163  if (verbosity_ > 1) {
164  outStream << " Computation of CG step" << std::endl;
165  outStream << " CG step length: " << snorm << std::endl;
166  outStream << " Number of CG iterations: " << SPiter_ << std::endl;
167  outStream << " CG flag: " << SPflag_ << std::endl;
168  outStream << std::endl;
169  }
170  x.plus(*s);
171  // Project to ensure that step is feasible
172  // May lose feasibility because of numerical errors
173  proj_->project(x,outStream); state_->nproj++;
174  }
175  state_->stepVec->set(x);
176  state_->stepVec->axpy(-one,*state_->iterateVec);
177  gs = state_->gradientVec->apply(*state_->stepVec);
178 
179  // Projected search
180  state_->snorm = dsrch(*state_->iterateVec,*state_->stepVec,ftrial,state_->searchSize,
181  state_->value,gs,obj,*pwa1,outStream);
182  x.set(*state_->iterateVec);
183 
184  // Update algorithm state
185  state_->iter++;
186  state_->value = ftrial;
187  obj.update(x,UpdateType::Accept,state_->iter);
188  dwa1->set(*state_->gradientVec);
189  obj.gradient(*state_->gradientVec,x,ftol); state_->ngrad++;
190  state_->gnorm = TypeB::Algorithm<Real>::optimalityCriterion(x,*state_->gradientVec,*pwa1,outStream);
191 
192  // Update secant information
193  secant_->updateStorage(x,*state_->gradientVec,*dwa1,*state_->stepVec,
194  state_->snorm,state_->iter);
195 
196  // Update Output
197  if (verbosity_ > 0) writeOutput(outStream,writeHeader_);
198  }
199  if (verbosity_ > 0) TypeB::Algorithm<Real>::writeExitStatus(outStream);
200 }
201 
202 template<typename Real>
204  const Vector<Real> &x, const Real alpha,
205  std::ostream &outStream) const {
206  const Real one(1);
207  s.set(x); s.axpy(alpha,w);
208  proj_->project(s,outStream); state_->nproj++;
209  s.axpy(-one,x);
210  return s.norm();
211 }
212 
213 template<typename Real>
215  Real &alpha,
216  Real &q,
217  const Vector<Real> &x,
218  const Vector<Real> &g,
219  Secant<Real> &secant,
220  Vector<Real> &dwa, Vector<Real> &dwa1,
221  std::ostream &outStream) {
222  const Real half(0.5);
223  bool interp = false;
224  Real gs(0), snorm(0);
225  // Compute s = P(x[0] - alpha g[0])
226  snorm = dgpstep(s,g,x,-alpha,outStream);
227  secant.applyB(dwa,s);
228  gs = s.dot(g);
229  q = half * s.apply(dwa) + gs;
230  interp = (q > mu0_*gs);
231  // Either increase or decrease alpha to find approximate Cauchy point
232  int cnt = 0;
233  if (interp) {
234  bool search = true;
235  while (search) {
236  alpha *= interpf_;
237  snorm = dgpstep(s,g,x,-alpha,outStream);
238  secant.applyB(dwa,s);
239  gs = s.dot(g);
240  q = half * s.apply(dwa) + gs;
241  search = (q > mu0_*gs) && (cnt < redlim_);
242  cnt++;
243  }
244  }
245  else {
246  bool search = true;
247  Real alphas = alpha;
248  Real qs = q;
249  dwa1.set(dwa);
250  while (search) {
251  alpha *= extrapf_;
252  snorm = dgpstep(s,g,x,-alpha,outStream);
253  if (cnt < explim_) {
254  secant.applyB(dwa,s);
255  gs = s.dot(g);
256  q = half * s.apply(dwa) + gs;
257  if (q <= mu0_*gs && std::abs(q-qs) > qtol_*std::abs(qs)) {
258  dwa1.set(dwa);
259  search = true;
260  alphas = alpha;
261  qs = q;
262  }
263  else {
264  q = qs;
265  dwa.set(dwa1);
266  search = false;
267  }
268  }
269  else {
270  search = false;
271  }
272  cnt++;
273  }
274  alpha = alphas;
275  snorm = dgpstep(s,g,x,-alpha,outStream);
276  }
277  if (verbosity_ > 1) {
278  outStream << " Cauchy point" << std::endl;
279  outStream << " Step length (alpha): " << alpha << std::endl;
280  outStream << " Step length (alpha*g): " << snorm << std::endl;
281  outStream << " Model decrease: " << -q << std::endl;
282  if (!interp) {
283  outStream << " Number of extrapolation steps: " << cnt << std::endl;
284  }
285  }
286  return snorm;
287 }
288 
289 template<typename Real>
290 Real LSecantBAlgorithm<Real>::dsrch(Vector<Real> &x, Vector<Real> &s, Real &fnew, Real &beta,
291  Real fold, Real gs, Objective<Real> &obj,
292  Vector<Real> &pwa, std::ostream &outStream) {
293  const Real one(1);
294  Real tol = std::sqrt(ROL_EPSILON<Real>());
295  Real snorm(0);
296  int nsteps = 0;
297  // Reduce beta until sufficient decrease is satisfied
298  bool search = true;
299  beta = one;
300  while (search) {
301  nsteps++;
302  pwa.set(x); pwa.axpy(beta,s);
303  obj.update(pwa,UpdateType::Trial);
304  fnew = obj.value(pwa,tol); state_->nfval++;
305  if (fnew <= fold + mu0_*beta*gs) search = false;
306  else beta *= interpfPS_;
307  }
308  s.scale(beta);
309  x.plus(s);
310  snorm = s.norm();
311  if (verbosity_ > 1) {
312  outStream << std::endl;
313  outStream << " Line search" << std::endl;
314  outStream << " Step length (beta): " << beta << std::endl;
315  outStream << " Step length (beta*s): " << snorm << std::endl;
316  outStream << " New objective value: " << fnew << std::endl;
317  outStream << " Old objective value: " << fold << std::endl;
318  outStream << " Descent verification (gs): " << gs << std::endl;
319  outStream << " Number of steps: " << nsteps << std::endl;
320  }
321  return snorm;
322 }
323 
324 template<typename Real>
325 Real LSecantBAlgorithm<Real>::dpcg(Vector<Real> &w, int &iflag, int &iter,
326  const Vector<Real> &g, const Vector<Real> &x,
327  Secant<Real> &secant,
329  const Real tol, const Real stol, const int itermax,
331  Vector<Real> &t, Vector<Real> &pwa, Vector<Real> &dwa,
332  std::ostream &outStream) const {
333  // p = step (primal)
334  // q = hessian applied to step p (dual)
335  // t = gradient (dual)
336  // r = preconditioned gradient (primal)
337  Real tol0 = ROL_EPSILON<Real>(), tolB(0);
338  const Real minIntSize = ROL_EPSILON<Real>();
339  const Real zero(0), half(0.5), one(1), two(2);
340  Real rho(0), kappa(0), beta(0), sigma(0), alpha(0), alphatmp(0);
341  Real rtr(0), tnorm(0), sMs(0), pMp(0), sMp(0);
342  iter = 0; iflag = 0;
343  // Initialize step
344  w.zero();
345  // Compute residual
346  t.set(g); t.scale(-one);
347  // Preconditioned residual
348  applyFreePrecond(r,t,x,secant,bnd,tol0,dwa,pwa);
349  //rho = r.dot(t.dual());
350  rho = r.apply(t);
351  // Initialize direction
352  p.set(r);
353  pMp = (!hasEcon_ ? rho : p.dot(p)); // If no equality constraint, used preconditioned norm
354  // Iterate CG
355  for (iter = 0; iter < itermax; ++iter) {
356  // Apply Hessian to direction dir
357  applyFreeHessian(q,p,x,secant,bnd,tol0,pwa);
358  // Compute step length
359  kappa = p.apply(q);
360  alpha = rho/kappa;
361  // Check if iterate is feasible
362  pwa.set(x); pwa.plus(w); pwa.axpy(alpha,p);
363  if (!bnd.isFeasible(pwa)) { // Bisection to find the largest feasible step size
364  sigma = zero;
365  tolB = minIntSize * alpha;
366  while (alpha-sigma > tolB) {
367  alphatmp = sigma + half * (alpha - sigma);
368  pwa.set(x); pwa.plus(w); pwa.axpy(alphatmp,p);
369  if (!bnd.isFeasible(pwa)) alpha = alphatmp;
370  else sigma = alphatmp;
371  }
372  w.axpy(sigma,p);
373  t.axpy(-sigma,q);
374  sMs = sMs + two*sigma*sMp + sigma*sigma*pMp;
375  iflag = 2;
376  break;
377  }
378  // Update iterate and residual
379  w.axpy(alpha,p);
380  t.axpy(-alpha,q);
381  applyFreePrecond(r,t,x,secant,bnd,tol0,dwa,pwa);
382  // Exit if residual tolerance is met
383  //rtr = r.dot(t.dual());
384  rtr = r.apply(t);
385  tnorm = t.norm();
386  if (rtr <= stol*stol || tnorm <= tol) {
387  sMs = sMs + two*alpha*sMp + alpha*alpha*pMp;
388  iflag = 0;
389  break;
390  }
391  // Compute p = r + beta * p
392  beta = rtr/rho;
393  p.scale(beta); p.plus(r);
394  rho = rtr;
395  // Update dot products
396  // sMs = <s, inv(M)s> or <s, s> if equality constraint present
397  // sMp = <s, inv(M)p> or <s, p> if equality constraint present
398  // pMp = <p, inv(M)p> or <p, p> if equality constraint present
399  sMs = sMs + two*alpha*sMp + alpha*alpha*pMp;
400  sMp = beta*(sMp + alpha*pMp);
401  pMp = (!hasEcon_ ? rho : p.dot(p)) + beta*beta*pMp;
402  }
403  // Check iteration count
404  if (iter == itermax) iflag = 1;
405  if (iflag != 1) iter++;
406  return std::sqrt(sMs); // w.norm();
407 }
408 
409 template<typename Real>
411  const Vector<Real> &v,
412  const Vector<Real> &x,
413  Secant<Real> &secant,
415  Real &tol,
416  Vector<Real> &pwa) const {
417  const Real zero(0);
418  pwa.set(v);
419  bnd.pruneActive(pwa,x,zero);
420  secant.applyB(hv,pwa);
421  pwa.set(hv.dual());
422  bnd.pruneActive(pwa,x,zero);
423  hv.set(pwa.dual());
424 }
425 
426 template<typename Real>
428  const Vector<Real> &v,
429  const Vector<Real> &x,
430  Secant<Real> &secant,
432  Real &tol,
433  Vector<Real> &dwa,
434  Vector<Real> &pwa) const {
435  if (!hasEcon_) {
436  const Real zero(0);
437  pwa.set(v.dual());
438  bnd.pruneActive(pwa,x,zero);
439  dwa.set(pwa.dual());
440  secant.applyH(hv,dwa);
441  bnd.pruneActive(hv,x,zero);
442  }
443  else {
444  // Perform null space projection
445  rcon_->setX(makePtrFromRef(x));
446  ns_->update(x);
447  pwa.set(v.dual());
448  ns_->apply(hv,pwa,tol);
449  }
450 }
451 
452 template<typename Real>
453 void LSecantBAlgorithm<Real>::writeHeader( std::ostream& os ) const {
454  std::ios_base::fmtflags osFlags(os.flags());
455  if (verbosity_ > 1) {
456  os << std::string(114,'-') << std::endl;
457  os << " L-Secant-B line search method status output definitions" << std::endl << std::endl;
458  os << " iter - Number of iterates (steps taken)" << std::endl;
459  os << " value - Objective function value" << std::endl;
460  os << " gnorm - Norm of the gradient" << std::endl;
461  os << " snorm - Norm of the step (update to optimization vector)" << std::endl;
462  os << " LSpar - Line-Search parameter" << std::endl;
463  os << " #fval - Number of times the objective function was evaluated" << std::endl;
464  os << " #grad - Number of times the gradient was computed" << std::endl;
465  os << " #proj - Number of times the projection was applied" << std::endl;
466  os << " iterCG - Number of Truncated CG iterations" << std::endl << std::endl;
467  os << " flagGC - Trust-Region Truncated CG flag" << std::endl;
468  os << " 0 - Converged" << std::endl;
469  os << " 1 - Iteration Limit Exceeded" << std::endl;
470  os << " 2 - Bounds Exceeded" << std::endl;
471  os << std::string(114,'-') << std::endl;
472  }
473  os << " ";
474  os << std::setw(6) << std::left << "iter";
475  os << std::setw(15) << std::left << "value";
476  os << std::setw(15) << std::left << "gnorm";
477  os << std::setw(15) << std::left << "snorm";
478  os << std::setw(15) << std::left << "LSpar";
479  os << std::setw(10) << std::left << "#fval";
480  os << std::setw(10) << std::left << "#grad";
481  os << std::setw(10) << std::left << "#proj";
482  os << std::setw(10) << std::left << "iterCG";
483  os << std::setw(10) << std::left << "flagCG";
484  os << std::endl;
485  os.flags(osFlags);
486 }
487 
488 template<typename Real>
489 void LSecantBAlgorithm<Real>::writeName( std::ostream& os ) const {
490  std::ios_base::fmtflags osFlags(os.flags());
491  os << std::endl << "L-Secant-B Line-Search Method (Type B, Bound Constraints)" << std::endl;
492  os.flags(osFlags);
493 }
494 
495 template<typename Real>
496 void LSecantBAlgorithm<Real>::writeOutput( std::ostream& os, bool write_header ) const {
497  std::ios_base::fmtflags osFlags(os.flags());
498  os << std::scientific << std::setprecision(6);
499  if ( state_->iter == 0 ) writeName(os);
500  if ( write_header ) writeHeader(os);
501  if ( state_->iter == 0 ) {
502  os << " ";
503  os << std::setw(6) << std::left << state_->iter;
504  os << std::setw(15) << std::left << state_->value;
505  os << std::setw(15) << std::left << state_->gnorm;
506  os << std::setw(15) << std::left << "---";
507  os << std::setw(15) << std::left << state_->searchSize;
508  os << std::setw(10) << std::left << state_->nfval;
509  os << std::setw(10) << std::left << state_->ngrad;
510  os << std::setw(10) << std::left << state_->nproj;
511  os << std::setw(10) << std::left << "---";
512  os << std::setw(10) << std::left << "---";
513  os << std::endl;
514  }
515  else {
516  os << " ";
517  os << std::setw(6) << std::left << state_->iter;
518  os << std::setw(15) << std::left << state_->value;
519  os << std::setw(15) << std::left << state_->gnorm;
520  os << std::setw(15) << std::left << state_->snorm;
521  os << std::setw(15) << std::left << state_->searchSize;
522  os << std::setw(10) << std::left << state_->nfval;
523  os << std::setw(10) << std::left << state_->ngrad;
524  os << std::setw(10) << std::left << state_->nproj;
525  os << std::setw(10) << std::left << SPiter_;
526  os << std::setw(10) << std::left << SPflag_;
527  os << std::endl;
528  }
529  os.flags(osFlags);
530 }
531 
532 } // namespace TypeB
533 } // namespace ROL
534 
535 #endif
virtual bool isFeasible(const Vector< Real > &v)
Check if the vector, v, is feasible.
Provides the interface to evaluate objective functions.
virtual const Vector & dual() const
Return dual representation of , for example, the result of applying a Riesz map, or change of basis...
Definition: ROL_Vector.hpp:192
virtual void scale(const Real alpha)=0
Compute where .
LSecantBAlgorithm(ParameterList &list, const Ptr< Secant< Real >> &secant=nullPtr)
virtual ROL::Ptr< Vector > clone() const =0
Clone to make a new (uninitialized) vector.
virtual Real apply(const Vector< Real > &x) const
Apply to a dual vector. This is equivalent to the call .
Definition: ROL_Vector.hpp:204
virtual void plus(const Vector &x)=0
Compute , where .
virtual void axpy(const Real alpha, const Vector &x)
Compute where .
Definition: ROL_Vector.hpp:119
virtual Real value(const Vector< Real > &x, Real &tol)=0
Compute value.
virtual void applyH(Vector< Real > &Hv, const Vector< Real > &v) const =0
virtual void writeExitStatus(std::ostream &os) const
ESecant StringToESecant(std::string s)
Definition: ROL_Types.hpp:513
virtual void zero()
Set to zero vector.
Definition: ROL_Vector.hpp:133
Defines the linear algebra or vector space interface.
Definition: ROL_Vector.hpp:46
virtual void update(const Vector< Real > &x, UpdateType type, int iter=-1)
Update objective function.
virtual Real dot(const Vector &x) const =0
Compute where .
Objective_SerialSimOpt(const Ptr< Obj > &obj, const V &ui) z0_ zero()
Real dgpstep(Vector< Real > &s, const Vector< Real > &w, const Vector< Real > &x, const Real alpha, std::ostream &outStream=std::cout) const
virtual void gradient(Vector< Real > &g, const Vector< Real > &x, Real &tol)
Compute gradient.
Real dpcg(Vector< Real > &w, int &iflag, int &iter, const Vector< Real > &g, const Vector< Real > &x, Secant< Real > &secant, BoundConstraint< Real > &bnd, const Real tol, const Real stol, const int itermax, Vector< Real > &p, Vector< Real > &q, Vector< Real > &r, Vector< Real > &t, Vector< Real > &pwa, Vector< Real > &dwa, std::ostream &outStream=std::cout) const
Real dcauchy(Vector< Real > &s, Real &alpha, Real &q, const Vector< Real > &x, const Vector< Real > &g, Secant< Real > &secant, Vector< Real > &dwa, Vector< Real > &dwa1, std::ostream &outStream=std::cout)
ESecantMode
Definition: ROL_Secant.hpp:23
void writeName(std::ostream &os) const override
Print step name.
Provides interface for and implements limited-memory secant operators.
Definition: ROL_Secant.hpp:45
Provides an interface to check status of optimization algorithms.
void run(Vector< Real > &x, const Vector< Real > &g, Objective< Real > &obj, BoundConstraint< Real > &bnd, std::ostream &outStream=std::cout) override
Run algorithm on bound constrained problems (Type-B). This general interface supports the use of dual...
virtual void applyB(Vector< Real > &Bv, const Vector< Real > &v) const =0
void pruneActive(Vector< Real > &v, const Vector< Real > &x, Real eps=Real(0))
Set variables to zero if they correspond to the -active set.
Provides the interface to apply upper and lower bound constraints.
void writeOutput(std::ostream &os, const bool write_header=false) const override
Print iterate status.
Real optimalityCriterion(const Vector< Real > &x, const Vector< Real > &g, Vector< Real > &primal, std::ostream &outStream=std::cout) const
void applyFreePrecond(Vector< Real > &hv, const Vector< Real > &v, const Vector< Real > &x, Secant< Real > &secant, BoundConstraint< Real > &bnd, Real &tol, Vector< Real > &dwa, Vector< Real > &pwa) const
void initialize(const Vector< Real > &x, const Vector< Real > &g)
Real dsrch(Vector< Real > &x, Vector< Real > &s, Real &fnew, Real &beta, Real fold, Real gs, Objective< Real > &obj, Vector< Real > &pwa, std::ostream &outStream=std::cout)
void writeHeader(std::ostream &os) const override
Print iterate header.
virtual void set(const Vector &x)
Set where .
Definition: ROL_Vector.hpp:175
virtual Real norm() const =0
Returns where .
void initialize(Vector< Real > &x, const Vector< Real > &g, Objective< Real > &obj, BoundConstraint< Real > &bnd, std::ostream &outStream=std::cout)
void applyFreeHessian(Vector< Real > &hv, const Vector< Real > &v, const Vector< Real > &x, Secant< Real > &secant, BoundConstraint< Real > &bnd, Real &tol, Vector< Real > &pwa) const