ROL
ROL_TypeB_LSecantBAlgorithm_Def.hpp
Go to the documentation of this file.
1 // @HEADER
2 // ************************************************************************
3 //
4 // Rapid Optimization Library (ROL) Package
5 // Copyright (2014) Sandia Corporation
6 //
7 // Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive
8 // license for use of this work by or on behalf of the U.S. Government.
9 //
10 // Redistribution and use in source and binary forms, with or without
11 // modification, are permitted provided that the following conditions are
12 // met:
13 //
14 // 1. Redistributions of source code must retain the above copyright
15 // notice, this list of conditions and the following disclaimer.
16 //
17 // 2. Redistributions in binary form must reproduce the above copyright
18 // notice, this list of conditions and the following disclaimer in the
19 // documentation and/or other materials provided with the distribution.
20 //
21 // 3. Neither the name of the Corporation nor the names of the
22 // contributors may be used to endorse or promote products derived from
23 // this software without specific prior written permission.
24 //
25 // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
26 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
29 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
30 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
31 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
32 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 //
37 // Questions? Contact lead developers:
38 // Drew Kouri (dpkouri@sandia.gov) and
39 // Denis Ridzal (dridzal@sandia.gov)
40 //
41 // ************************************************************************
42 // @HEADER
43 
44 #ifndef ROL_TYPEB_LSecantBALGORITHM_DEF_HPP
45 #define ROL_TYPEB_LSecantBALGORITHM_DEF_HPP
46 
47 namespace ROL {
48 namespace TypeB {
49 
50 template<typename Real>
52  const Ptr<Secant<Real>> &secant) {
53  // Set status test
54  status_->reset();
55  status_->add(makePtr<StatusTest<Real>>(list));
56 
57  ParameterList &trlist = list.sublist("Step").sublist("Line Search");
58  state_->searchSize = static_cast<Real>(1);
59  // Krylov Parameters
60  maxit_ = list.sublist("General").sublist("Krylov").get("Iteration Limit", 20);
61  tol1_ = list.sublist("General").sublist("Krylov").get("Absolute Tolerance", 1e-4);
62  tol2_ = list.sublist("General").sublist("Krylov").get("Relative Tolerance", 1e-2);
63  // Algorithm-Specific Parameters
64  ROL::ParameterList &lmlist = trlist.sublist("Quasi-Newton").sublist("L-Secant-B");
65  mu0_ = lmlist.get("Sufficient Decrease Parameter", 1e-2);
66  spexp_ = lmlist.get("Relative Tolerance Exponent", 1.0);
67  spexp_ = std::max(static_cast<Real>(1),std::min(spexp_,static_cast<Real>(2)));
68  redlim_ = lmlist.sublist("Cauchy Point").get("Maximum Number of Reduction Steps", 10);
69  explim_ = lmlist.sublist("Cauchy Point").get("Maximum Number of Expansion Steps", 10);
70  alpha_ = lmlist.sublist("Cauchy Point").get("Initial Step Size", 1.0);
71  normAlpha_ = lmlist.sublist("Cauchy Point").get("Normalize Initial Step Size", false);
72  interpf_ = lmlist.sublist("Cauchy Point").get("Reduction Rate", 0.1);
73  extrapf_ = lmlist.sublist("Cauchy Point").get("Expansion Rate", 10.0);
74  qtol_ = lmlist.sublist("Cauchy Point").get("Decrease Tolerance", 1e-8);
75  interpfPS_ = trlist.sublist("Line-Search Method").get("Backtracking Rate", 0.5);
76  // Output Parameters
77  verbosity_ = list.sublist("General").get("Output Level",0);
78  writeHeader_ = verbosity_ > 2;
79  // Secant Information
80  useSecantPrecond_ = true;
81  useSecantHessVec_ = true;
83  if (secant == nullPtr) {
84  esec_ = StringToESecant(list.sublist("General").sublist("Secant").get("Type","Limited-Memory Secant"));
85  secant_ = SecantFactory<Real>(list,mode);
86  }
87 }
88 
89 template<typename Real>
91  const Vector<Real> &g,
92  Objective<Real> &obj,
94  std::ostream &outStream) {
95  const Real one(1);
96  hasEcon_ = true;
97  if (proj_ == nullPtr) {
98  proj_ = makePtr<PolyhedralProjection<Real>>(makePtrFromRef(bnd));
99  hasEcon_ = false;
100  }
101  // Initialize data
103  // Update approximate gradient and approximate objective function.
104  Real ftol = static_cast<Real>(0.1)*ROL_OVERFLOW<Real>();
105  proj_->project(x,outStream); state_->nproj++;
106  state_->iterateVec->set(x);
107  obj.update(x,UpdateType::Initial,state_->iter);
108  state_->value = obj.value(x,ftol); state_->nfval++;
109  obj.gradient(*state_->gradientVec,x,ftol); state_->ngrad++;
110  state_->stepVec->set(x);
111  state_->stepVec->axpy(-one,state_->gradientVec->dual());
112  proj_->project(*state_->stepVec,outStream); state_->nproj++;
113  state_->stepVec->axpy(-one,x);
114  state_->gnorm = state_->stepVec->norm();
115  state_->snorm = ROL_INF<Real>();
116  // Normalize initial CP step length
117  if (normAlpha_) {
118  alpha_ /= state_->gradientVec->norm();
119  }
120  // Initialize null space projection
121  if (hasEcon_) {
122  rcon_ = makePtr<ReducedLinearConstraint<Real>>(proj_->getLinearConstraint(),
123  makePtrFromRef(bnd),
124  makePtrFromRef(x));
125  ns_ = makePtr<NullSpaceOperator<Real>>(rcon_,x,
126  *proj_->getResidual());
127  }
128 }
129 
130 template<typename Real>
132  const Vector<Real> &g,
133  Objective<Real> &obj,
135  std::ostream &outStream ) {
136  const Real zero(0), one(1);
137  Real tol0 = ROL_EPSILON<Real>(), ftol = ROL_EPSILON<Real>();
138  Real gfnorm(0), gs(0), ftrial(0), q(0), tol(0), stol(0), snorm(0);
139  // Initialize trust-region data
140  initialize(x,g,obj,bnd,outStream);
141  Ptr<Vector<Real>> s = x.clone();
142  Ptr<Vector<Real>> gmod = g.clone(), gfree = g.clone();
143  Ptr<Vector<Real>> pwa1 = x.clone(), pwa2 = x.clone(), pwa3 = x.clone();
144  Ptr<Vector<Real>> dwa1 = g.clone(), dwa2 = g.clone(), dwa3 = g.clone();
145 
146  // Output
147  if (verbosity_ > 0) writeOutput(outStream,true);
148 
149  while (status_->check(*state_)) {
150  /**** SOLVE LINEARLY CONSTRAINED QUADRATIC SUBPROBLEM ****/
151  // Compute Cauchy point
152  snorm = dcauchy(*s,
153  alpha_,
154  q,
155  x,
156  state_->gradientVec->dual(),
157  *secant_,
158  *dwa1,
159  *dwa2,
160  outStream); // Approximately solve 1D optimization problem for alpha
161  x.plus(*s); // Set x = proj(x[0] - alpha*g)
162 
163  // Model gradient at s = x[1] - x[0]
164  gmod->set(*dwa1); // hessVec from Cauchy point computation
165  gmod->plus(*state_->gradientVec);
166  gfree->set(*gmod);
167  //bnd.pruneActive(*gfree,x,zero);
168  pwa1->set(gfree->dual());
169  bnd.pruneActive(*pwa1,x,zero);
170  gfree->set(pwa1->dual());
171  if (hasEcon_) {
172  applyFreePrecond(*pwa1,*gfree,x,*secant_,bnd,tol0,*dwa1,*pwa2);
173  gfnorm = pwa1->norm();
174  }
175  else {
176  gfnorm = gfree->norm();
177  }
178  SPiter_ = 0; SPflag_ = 0;
179  if (verbosity_ > 1) {
180  outStream << " Norm of free gradient components: " << gfnorm << std::endl;
181  outStream << std::endl;
182  }
183 
184  // Linearly constrained quadratic subproblem solve
185  // Run CG for subspace minimization
186  tol = std::min(tol1_,tol2_*std::pow(gfnorm,spexp_));
187  stol = tol; //zero;
188  if (gfnorm > zero) {
189  snorm = dpcg(*s, SPflag_, SPiter_, *gfree, x, *secant_, bnd,
190  tol, stol, maxit_,
191  *pwa1, *dwa1, *pwa2, *dwa2, *pwa3, *dwa3,
192  outStream);
193  if (verbosity_ > 1) {
194  outStream << " Computation of CG step" << std::endl;
195  outStream << " CG step length: " << snorm << std::endl;
196  outStream << " Number of CG iterations: " << SPiter_ << std::endl;
197  outStream << " CG flag: " << SPflag_ << std::endl;
198  outStream << std::endl;
199  }
200  x.plus(*s);
201  // Project to ensure that step is feasible
202  // May lose feasibility because of numerical errors
203  proj_->project(x,outStream); state_->nproj++;
204  }
205  state_->stepVec->set(x);
206  state_->stepVec->axpy(-one,*state_->iterateVec);
207  gs = state_->gradientVec->apply(*state_->stepVec);
208 
209  // Projected search
210  state_->snorm = dsrch(*state_->iterateVec,*state_->stepVec,ftrial,state_->searchSize,
211  state_->value,gs,obj,*pwa1,outStream);
212  x.set(*state_->iterateVec);
213 
214  // Update algorithm state
215  state_->iter++;
216  state_->value = ftrial;
217  obj.update(x,UpdateType::Accept,state_->iter);
218  dwa1->set(*state_->gradientVec);
219  obj.gradient(*state_->gradientVec,x,ftol); state_->ngrad++;
220  state_->gnorm = TypeB::Algorithm<Real>::optimalityCriterion(x,*state_->gradientVec,*pwa1,outStream);
221 
222  // Update secant information
223  secant_->updateStorage(x,*state_->gradientVec,*dwa1,*state_->stepVec,
224  state_->snorm,state_->iter);
225 
226  // Update Output
227  if (verbosity_ > 0) writeOutput(outStream,writeHeader_);
228  }
229  if (verbosity_ > 0) TypeB::Algorithm<Real>::writeExitStatus(outStream);
230 }
231 
232 template<typename Real>
234  const Vector<Real> &x, const Real alpha,
235  std::ostream &outStream) const {
236  const Real one(1);
237  s.set(x); s.axpy(alpha,w);
238  proj_->project(s,outStream); state_->nproj++;
239  s.axpy(-one,x);
240  return s.norm();
241 }
242 
243 template<typename Real>
245  Real &alpha,
246  Real &q,
247  const Vector<Real> &x,
248  const Vector<Real> &g,
249  Secant<Real> &secant,
250  Vector<Real> &dwa, Vector<Real> &dwa1,
251  std::ostream &outStream) {
252  const Real half(0.5);
253  bool interp = false;
254  Real gs(0), snorm(0);
255  // Compute s = P(x[0] - alpha g[0])
256  snorm = dgpstep(s,g,x,-alpha,outStream);
257  secant.applyB(dwa,s);
258  gs = s.dot(g);
259  q = half * s.apply(dwa) + gs;
260  interp = (q > mu0_*gs);
261  // Either increase or decrease alpha to find approximate Cauchy point
262  int cnt = 0;
263  if (interp) {
264  bool search = true;
265  while (search) {
266  alpha *= interpf_;
267  snorm = dgpstep(s,g,x,-alpha,outStream);
268  secant.applyB(dwa,s);
269  gs = s.dot(g);
270  q = half * s.apply(dwa) + gs;
271  search = (q > mu0_*gs) && (cnt < redlim_);
272  cnt++;
273  }
274  }
275  else {
276  bool search = true;
277  Real alphas = alpha;
278  Real qs = q;
279  dwa1.set(dwa);
280  while (search) {
281  alpha *= extrapf_;
282  snorm = dgpstep(s,g,x,-alpha,outStream);
283  if (cnt < explim_) {
284  secant.applyB(dwa,s);
285  gs = s.dot(g);
286  q = half * s.apply(dwa) + gs;
287  if (q <= mu0_*gs && std::abs(q-qs) > qtol_*std::abs(qs)) {
288  dwa1.set(dwa);
289  search = true;
290  alphas = alpha;
291  qs = q;
292  }
293  else {
294  q = qs;
295  dwa.set(dwa1);
296  search = false;
297  }
298  }
299  else {
300  search = false;
301  }
302  cnt++;
303  }
304  alpha = alphas;
305  snorm = dgpstep(s,g,x,-alpha,outStream);
306  }
307  if (verbosity_ > 1) {
308  outStream << " Cauchy point" << std::endl;
309  outStream << " Step length (alpha): " << alpha << std::endl;
310  outStream << " Step length (alpha*g): " << snorm << std::endl;
311  outStream << " Model decrease: " << -q << std::endl;
312  if (!interp) {
313  outStream << " Number of extrapolation steps: " << cnt << std::endl;
314  }
315  }
316  return snorm;
317 }
318 
319 template<typename Real>
320 Real LSecantBAlgorithm<Real>::dsrch(Vector<Real> &x, Vector<Real> &s, Real &fnew, Real &beta,
321  Real fold, Real gs, Objective<Real> &obj,
322  Vector<Real> &pwa, std::ostream &outStream) {
323  const Real one(1);
324  Real tol = std::sqrt(ROL_EPSILON<Real>());
325  Real snorm(0);
326  int nsteps = 0;
327  // Reduce beta until sufficient decrease is satisfied
328  bool search = true;
329  beta = one;
330  while (search) {
331  nsteps++;
332  pwa.set(x); pwa.axpy(beta,s);
333  obj.update(pwa,UpdateType::Trial);
334  fnew = obj.value(pwa,tol); state_->nfval++;
335  if (fnew <= fold + mu0_*beta*gs) search = false;
336  else beta *= interpfPS_;
337  }
338  s.scale(beta);
339  x.plus(s);
340  snorm = s.norm();
341  if (verbosity_ > 1) {
342  outStream << std::endl;
343  outStream << " Line search" << std::endl;
344  outStream << " Step length (beta): " << beta << std::endl;
345  outStream << " Step length (beta*s): " << snorm << std::endl;
346  outStream << " New objective value: " << fnew << std::endl;
347  outStream << " Old objective value: " << fold << std::endl;
348  outStream << " Descent verification (gs): " << gs << std::endl;
349  outStream << " Number of steps: " << nsteps << std::endl;
350  }
351  return snorm;
352 }
353 
354 template<typename Real>
355 Real LSecantBAlgorithm<Real>::dpcg(Vector<Real> &w, int &iflag, int &iter,
356  const Vector<Real> &g, const Vector<Real> &x,
357  Secant<Real> &secant,
359  const Real tol, const Real stol, const int itermax,
361  Vector<Real> &t, Vector<Real> &pwa, Vector<Real> &dwa,
362  std::ostream &outStream) const {
363  // p = step (primal)
364  // q = hessian applied to step p (dual)
365  // t = gradient (dual)
366  // r = preconditioned gradient (primal)
367  Real tol0 = ROL_EPSILON<Real>(), tolB(0);
368  const Real minIntSize = ROL_EPSILON<Real>();
369  const Real zero(0), half(0.5), one(1), two(2);
370  Real rho(0), kappa(0), beta(0), sigma(0), alpha(0), alphatmp(0);
371  Real rtr(0), tnorm(0), sMs(0), pMp(0), sMp(0);
372  iter = 0; iflag = 0;
373  // Initialize step
374  w.zero();
375  // Compute residual
376  t.set(g); t.scale(-one);
377  // Preconditioned residual
378  applyFreePrecond(r,t,x,secant,bnd,tol0,dwa,pwa);
379  //rho = r.dot(t.dual());
380  rho = r.apply(t);
381  // Initialize direction
382  p.set(r);
383  pMp = (!hasEcon_ ? rho : p.dot(p)); // If no equality constraint, used preconditioned norm
384  // Iterate CG
385  for (iter = 0; iter < itermax; ++iter) {
386  // Apply Hessian to direction dir
387  applyFreeHessian(q,p,x,secant,bnd,tol0,pwa);
388  // Compute step length
389  kappa = p.apply(q);
390  alpha = rho/kappa;
391  // Check if iterate is feasible
392  pwa.set(x); pwa.plus(w); pwa.axpy(alpha,p);
393  if (!bnd.isFeasible(pwa)) { // Bisection to find the largest feasible step size
394  sigma = zero;
395  tolB = minIntSize * alpha;
396  while (alpha-sigma > tolB) {
397  alphatmp = sigma + half * (alpha - sigma);
398  pwa.set(x); pwa.plus(w); pwa.axpy(alphatmp,p);
399  if (!bnd.isFeasible(pwa)) alpha = alphatmp;
400  else sigma = alphatmp;
401  }
402  w.axpy(sigma,p);
403  t.axpy(-sigma,q);
404  sMs = sMs + two*sigma*sMp + sigma*sigma*pMp;
405  iflag = 2;
406  break;
407  }
408  // Update iterate and residual
409  w.axpy(alpha,p);
410  t.axpy(-alpha,q);
411  applyFreePrecond(r,t,x,secant,bnd,tol0,dwa,pwa);
412  // Exit if residual tolerance is met
413  //rtr = r.dot(t.dual());
414  rtr = r.apply(t);
415  tnorm = t.norm();
416  if (rtr <= stol*stol || tnorm <= tol) {
417  sMs = sMs + two*alpha*sMp + alpha*alpha*pMp;
418  iflag = 0;
419  break;
420  }
421  // Compute p = r + beta * p
422  beta = rtr/rho;
423  p.scale(beta); p.plus(r);
424  rho = rtr;
425  // Update dot products
426  // sMs = <s, inv(M)s> or <s, s> if equality constraint present
427  // sMp = <s, inv(M)p> or <s, p> if equality constraint present
428  // pMp = <p, inv(M)p> or <p, p> if equality constraint present
429  sMs = sMs + two*alpha*sMp + alpha*alpha*pMp;
430  sMp = beta*(sMp + alpha*pMp);
431  pMp = (!hasEcon_ ? rho : p.dot(p)) + beta*beta*pMp;
432  }
433  // Check iteration count
434  if (iter == itermax) iflag = 1;
435  if (iflag != 1) iter++;
436  return std::sqrt(sMs); // w.norm();
437 }
438 
439 template<typename Real>
441  const Vector<Real> &v,
442  const Vector<Real> &x,
443  Secant<Real> &secant,
445  Real &tol,
446  Vector<Real> &pwa) const {
447  const Real zero(0);
448  pwa.set(v);
449  bnd.pruneActive(pwa,x,zero);
450  secant.applyB(hv,pwa);
451  pwa.set(hv.dual());
452  bnd.pruneActive(pwa,x,zero);
453  hv.set(pwa.dual());
454 }
455 
456 template<typename Real>
458  const Vector<Real> &v,
459  const Vector<Real> &x,
460  Secant<Real> &secant,
462  Real &tol,
463  Vector<Real> &dwa,
464  Vector<Real> &pwa) const {
465  if (!hasEcon_) {
466  const Real zero(0);
467  pwa.set(v.dual());
468  bnd.pruneActive(pwa,x,zero);
469  dwa.set(pwa.dual());
470  secant.applyH(hv,dwa);
471  bnd.pruneActive(hv,x,zero);
472  }
473  else {
474  // Perform null space projection
475  rcon_->setX(makePtrFromRef(x));
476  ns_->update(x);
477  pwa.set(v.dual());
478  ns_->apply(hv,pwa,tol);
479  }
480 }
481 
482 template<typename Real>
483 void LSecantBAlgorithm<Real>::writeHeader( std::ostream& os ) const {
484  std::ios_base::fmtflags osFlags(os.flags());
485  if (verbosity_ > 1) {
486  os << std::string(114,'-') << std::endl;
487  os << " L-Secant-B line search method status output definitions" << std::endl << std::endl;
488  os << " iter - Number of iterates (steps taken)" << std::endl;
489  os << " value - Objective function value" << std::endl;
490  os << " gnorm - Norm of the gradient" << std::endl;
491  os << " snorm - Norm of the step (update to optimization vector)" << std::endl;
492  os << " LSpar - Line-Search parameter" << std::endl;
493  os << " #fval - Number of times the objective function was evaluated" << std::endl;
494  os << " #grad - Number of times the gradient was computed" << std::endl;
495  os << " #proj - Number of times the projection was applied" << std::endl;
496  os << " iterCG - Number of Truncated CG iterations" << std::endl << std::endl;
497  os << " flagGC - Trust-Region Truncated CG flag" << std::endl;
498  os << " 0 - Converged" << std::endl;
499  os << " 1 - Iteration Limit Exceeded" << std::endl;
500  os << " 2 - Bounds Exceeded" << std::endl;
501  os << std::string(114,'-') << std::endl;
502  }
503  os << " ";
504  os << std::setw(6) << std::left << "iter";
505  os << std::setw(15) << std::left << "value";
506  os << std::setw(15) << std::left << "gnorm";
507  os << std::setw(15) << std::left << "snorm";
508  os << std::setw(15) << std::left << "LSpar";
509  os << std::setw(10) << std::left << "#fval";
510  os << std::setw(10) << std::left << "#grad";
511  os << std::setw(10) << std::left << "#proj";
512  os << std::setw(10) << std::left << "iterCG";
513  os << std::setw(10) << std::left << "flagCG";
514  os << std::endl;
515  os.flags(osFlags);
516 }
517 
518 template<typename Real>
519 void LSecantBAlgorithm<Real>::writeName( std::ostream& os ) const {
520  std::ios_base::fmtflags osFlags(os.flags());
521  os << std::endl << "L-Secant-B Line-Search Method (Type B, Bound Constraints)" << std::endl;
522  os.flags(osFlags);
523 }
524 
525 template<typename Real>
526 void LSecantBAlgorithm<Real>::writeOutput( std::ostream& os, bool write_header ) const {
527  std::ios_base::fmtflags osFlags(os.flags());
528  os << std::scientific << std::setprecision(6);
529  if ( state_->iter == 0 ) writeName(os);
530  if ( write_header ) writeHeader(os);
531  if ( state_->iter == 0 ) {
532  os << " ";
533  os << std::setw(6) << std::left << state_->iter;
534  os << std::setw(15) << std::left << state_->value;
535  os << std::setw(15) << std::left << state_->gnorm;
536  os << std::setw(15) << std::left << "---";
537  os << std::setw(15) << std::left << state_->searchSize;
538  os << std::setw(10) << std::left << state_->nfval;
539  os << std::setw(10) << std::left << state_->ngrad;
540  os << std::setw(10) << std::left << state_->nproj;
541  os << std::setw(10) << std::left << "---";
542  os << std::setw(10) << std::left << "---";
543  os << std::endl;
544  }
545  else {
546  os << " ";
547  os << std::setw(6) << std::left << state_->iter;
548  os << std::setw(15) << std::left << state_->value;
549  os << std::setw(15) << std::left << state_->gnorm;
550  os << std::setw(15) << std::left << state_->snorm;
551  os << std::setw(15) << std::left << state_->searchSize;
552  os << std::setw(10) << std::left << state_->nfval;
553  os << std::setw(10) << std::left << state_->ngrad;
554  os << std::setw(10) << std::left << state_->nproj;
555  os << std::setw(10) << std::left << SPiter_;
556  os << std::setw(10) << std::left << SPflag_;
557  os << std::endl;
558  }
559  os.flags(osFlags);
560 }
561 
562 } // namespace TypeB
563 } // namespace ROL
564 
565 #endif
virtual bool isFeasible(const Vector< Real > &v)
Check if the vector, v, is feasible.
Provides the interface to evaluate objective functions.
virtual const Vector & dual() const
Return dual representation of , for example, the result of applying a Riesz map, or change of basis...
Definition: ROL_Vector.hpp:226
virtual void scale(const Real alpha)=0
Compute where .
LSecantBAlgorithm(ParameterList &list, const Ptr< Secant< Real >> &secant=nullPtr)
virtual ROL::Ptr< Vector > clone() const =0
Clone to make a new (uninitialized) vector.
virtual Real apply(const Vector< Real > &x) const
Apply to a dual vector. This is equivalent to the call .
Definition: ROL_Vector.hpp:238
virtual void plus(const Vector &x)=0
Compute , where .
virtual void axpy(const Real alpha, const Vector &x)
Compute where .
Definition: ROL_Vector.hpp:153
virtual Real value(const Vector< Real > &x, Real &tol)=0
Compute value.
virtual void applyH(Vector< Real > &Hv, const Vector< Real > &v) const =0
virtual void writeExitStatus(std::ostream &os) const
ESecant StringToESecant(std::string s)
Definition: ROL_Types.hpp:543
virtual void zero()
Set to zero vector.
Definition: ROL_Vector.hpp:167
Defines the linear algebra or vector space interface.
Definition: ROL_Vector.hpp:80
virtual void update(const Vector< Real > &x, UpdateType type, int iter=-1)
Update objective function.
virtual Real dot(const Vector &x) const =0
Compute where .
Objective_SerialSimOpt(const Ptr< Obj > &obj, const V &ui) z0_ zero()
Real dgpstep(Vector< Real > &s, const Vector< Real > &w, const Vector< Real > &x, const Real alpha, std::ostream &outStream=std::cout) const
virtual void gradient(Vector< Real > &g, const Vector< Real > &x, Real &tol)
Compute gradient.
Real dpcg(Vector< Real > &w, int &iflag, int &iter, const Vector< Real > &g, const Vector< Real > &x, Secant< Real > &secant, BoundConstraint< Real > &bnd, const Real tol, const Real stol, const int itermax, Vector< Real > &p, Vector< Real > &q, Vector< Real > &r, Vector< Real > &t, Vector< Real > &pwa, Vector< Real > &dwa, std::ostream &outStream=std::cout) const
Real dcauchy(Vector< Real > &s, Real &alpha, Real &q, const Vector< Real > &x, const Vector< Real > &g, Secant< Real > &secant, Vector< Real > &dwa, Vector< Real > &dwa1, std::ostream &outStream=std::cout)
ESecantMode
Definition: ROL_Secant.hpp:57
void writeName(std::ostream &os) const override
Print step name.
Provides interface for and implements limited-memory secant operators.
Definition: ROL_Secant.hpp:79
Provides an interface to check status of optimization algorithms.
void run(Vector< Real > &x, const Vector< Real > &g, Objective< Real > &obj, BoundConstraint< Real > &bnd, std::ostream &outStream=std::cout) override
Run algorithm on bound constrained problems (Type-B). This general interface supports the use of dual...
virtual void applyB(Vector< Real > &Bv, const Vector< Real > &v) const =0
void pruneActive(Vector< Real > &v, const Vector< Real > &x, Real eps=Real(0))
Set variables to zero if they correspond to the -active set.
Provides the interface to apply upper and lower bound constraints.
void writeOutput(std::ostream &os, const bool write_header=false) const override
Print iterate status.
Real optimalityCriterion(const Vector< Real > &x, const Vector< Real > &g, Vector< Real > &primal, std::ostream &outStream=std::cout) const
void applyFreePrecond(Vector< Real > &hv, const Vector< Real > &v, const Vector< Real > &x, Secant< Real > &secant, BoundConstraint< Real > &bnd, Real &tol, Vector< Real > &dwa, Vector< Real > &pwa) const
void initialize(const Vector< Real > &x, const Vector< Real > &g)
Real dsrch(Vector< Real > &x, Vector< Real > &s, Real &fnew, Real &beta, Real fold, Real gs, Objective< Real > &obj, Vector< Real > &pwa, std::ostream &outStream=std::cout)
void writeHeader(std::ostream &os) const override
Print iterate header.
virtual void set(const Vector &x)
Set where .
Definition: ROL_Vector.hpp:209
virtual Real norm() const =0
Returns where .
void initialize(Vector< Real > &x, const Vector< Real > &g, Objective< Real > &obj, BoundConstraint< Real > &bnd, std::ostream &outStream=std::cout)
void applyFreeHessian(Vector< Real > &hv, const Vector< Real > &v, const Vector< Real > &x, Secant< Real > &secant, BoundConstraint< Real > &bnd, Real &tol, Vector< Real > &pwa) const