ROL
ROL_TrustRegionStep.hpp
Go to the documentation of this file.
1 // @HEADER
2 // ************************************************************************
3 //
4 // Rapid Optimization Library (ROL) Package
5 // Copyright (2014) Sandia Corporation
6 //
7 // Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive
8 // license for use of this work by or on behalf of the U.S. Government.
9 //
10 // Redistribution and use in source and binary forms, with or without
11 // modification, are permitted provided that the following conditions are
12 // met:
13 //
14 // 1. Redistributions of source code must retain the above copyright
15 // notice, this list of conditions and the following disclaimer.
16 //
17 // 2. Redistributions in binary form must reproduce the above copyright
18 // notice, this list of conditions and the following disclaimer in the
19 // documentation and/or other materials provided with the distribution.
20 //
21 // 3. Neither the name of the Corporation nor the names of the
22 // contributors may be used to endorse or promote products derived from
23 // this software without specific prior written permission.
24 //
25 // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
26 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
29 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
30 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
31 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
32 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 //
37 // Questions? Contact lead developers:
38 // Drew Kouri (dpkouri@sandia.gov) and
39 // Denis Ridzal (dridzal@sandia.gov)
40 //
41 // ************************************************************************
42 // @HEADER
43 
44 #ifndef ROL_TRUSTREGIONSTEP_H
45 #define ROL_TRUSTREGIONSTEP_H
46 
47 #include "ROL_Types.hpp"
48 #include "ROL_Step.hpp"
49 #include "ROL_Secant.hpp"
50 #include "ROL_TrustRegion.hpp"
51 #include <sstream>
52 #include <iomanip>
53 
126 namespace ROL {
127 
128 template <class Real>
129 class TrustRegionStep : public Step<Real> {
130 private:
131 
132  Teuchos::RCP<Secant<Real> > secant_;
133  Teuchos::RCP<TrustRegion<Real> > trustRegion_;
134 
135  Teuchos::RCP<Vector<Real> > xnew_;
136  Teuchos::RCP<Vector<Real> > xold_;
137  Teuchos::RCP<Vector<Real> > gp_;
138 
141 
144 
146 
147  std::vector<bool> useInexact_;
148  int TRflag_ ;
149  int TR_nfval_;
150  int TR_ngrad_;
151  int CGflag_;
152  int CGiter_;
153 
154  Real delMax_;
155 
156  Real alpha_init_;
157  int max_fval_;
158 
159  Real scale0_;
160  Real scale1_;
161 
162  bool softUp_;
163  Real scaleEps_;
164 
180  AlgorithmState<Real> &algo_state ) {
181  Teuchos::RCP<StepState<Real> > state = Step<Real>::getState();
182  if ( useInexact_[1] ) {
183  Real c = scale0_*std::max(1.e-2,std::min(1.0,1.e4*algo_state.gnorm));
184  Real gtol1 = c*(state->searchSize);
185  Real gtol0 = scale1_*gtol1 + 1.0;
186  while ( gtol0 > gtol1*scale1_ ) {
187  obj.gradient(*(state->gradientVec),x,gtol1);
188  algo_state.gnorm = computeCriticalityMeasure(*(state->gradientVec),x,con);
189  gtol0 = gtol1;
190  c = scale0_*std::max(1.e-2,std::min(1.0,1.e4*algo_state.gnorm));
191  gtol1 = c*std::min(algo_state.gnorm,state->searchSize);
192  }
193  algo_state.ngrad++;
194  }
195  else {
196  Real gtol = std::sqrt(ROL_EPSILON);
197  obj.gradient(*(state->gradientVec),x,gtol);
198  algo_state.ngrad++;
199  algo_state.gnorm = computeCriticalityMeasure(*(state->gradientVec),x,con);
200  }
201  }
202 
212  if ( con.isActivated() ) {
213  if ( useProjectedGrad_ ) {
214  gp_->set(g);
215  con.computeProjectedGradient( *gp_, x );
216  return gp_->norm();
217  }
218  else {
219  xnew_->set(x);
220  xnew_->axpy(-1.0,g.dual());
221  con.project(*xnew_);
222  xnew_->axpy(-1.0,x);
223  return xnew_->norm();
224  }
225  }
226  else {
227  return g.norm();
228  }
229  }
230 
231  void TrustRegionFactory(Teuchos::ParameterList &parlist) {
232  switch(etr_) {
233  case TRUSTREGION_DOGLEG:
234  trustRegion_ = Teuchos::rcp(new DogLeg<Real>(parlist)); break;
236  trustRegion_ = Teuchos::rcp(new DoubleDogLeg<Real>(parlist)); break;
238  trustRegion_ = Teuchos::rcp(new TruncatedCG<Real>(parlist)); break;
240  default:
241  trustRegion_ = Teuchos::rcp(new CauchyPoint<Real>(parlist)); break;
242  }
243  }
244 
245 public:
246 
247  virtual ~TrustRegionStep() {}
248 
256  TrustRegionStep( Teuchos::ParameterList & parlist ) : Step<Real>() {
257  Teuchos::RCP<StepState<Real> > step_state = Step<Real>::getState();
258 
259  // Enumerations
260  etr_ = StringToETrustRegion(parlist.get("Trust-Region Subproblem Solver Type","Cauchy Point"));
261  esec_ = StringToESecant(parlist.get("Secant Type","Limited-Memory BFGS"));
262  // Secant Information
263  useSecantPrecond_ = parlist.get("Use Secant Preconditioning", false);
264  useSecantHessVec_ = parlist.get("Use Secant Hessian-Times-A-Vector", false);
265  // Trust-Region Parameters
266  step_state->searchSize = parlist.get("Initial Trust-Region Radius", -1.0);
267  delMax_ = parlist.get("Maximum Trust-Region Radius", 1000.0);
268  // Inexactness Information
269  useInexact_.clear();
270  useInexact_.push_back(parlist.get("Use Inexact Objective Function", false));
271  useInexact_.push_back(parlist.get("Use Inexact Gradient", false));
272  useInexact_.push_back(parlist.get("Use Inexact Hessian-Times-A-Vector", false));
273  scale0_ = parlist.get("Gradient Update Tolerance Scaling",1.e-1);
274  scale1_ = parlist.get("Gradient Update Relative Tolerance",2.0);
275 
276  // Initialize Trust Region Subproblem Solver Object
277  useProjectedGrad_ = parlist.get("Use Projected Gradient Criticality Measure", false);
278  max_fval_ = parlist.get("Maximum Number of Function Evaluations", 20);
279  alpha_init_ = parlist.get("Initial Linesearch Parameter", 1.0);
280  TrustRegionFactory(parlist);
281 
282  // Secant Parameters
283  secant_ = Teuchos::null;
284  if ( useSecantPrecond_ || useSecantHessVec_ ) {
285  int L = parlist.get("Maximum Secant Storage",10);
286  int BBtype = parlist.get("Barzilai-Borwein Type",1);
287  secant_ = getSecant<Real>(esec_,L,BBtype);
288  }
289 
290  // Changing Objective Functions
291  softUp_ = parlist.get("Variable Objective Function",false);
292 
293  // Scale for epsilon active sets
294  scaleEps_ = parlist.get("Scale for Epsilon Active Sets",1.0);
295  }
296 
306  TrustRegionStep( Teuchos::RCP<Secant<Real> > &secant, Teuchos::ParameterList &parlist )
307  : Step<Real>(), secant_(secant) {
308  Teuchos::RCP<StepState<Real> > step_state = Step<Real>::getState();
309 
310  // Enumerations
311  etr_ = StringToETrustRegion(parlist.get("Trust-Region Subproblem Solver Type","Cauchy Point"));
312  esec_ = StringToESecant(parlist.get("Secant Type","Limited-Memory BFGS"));
313  // Secant Information
314  useSecantPrecond_ = parlist.get("Use Secant Preconditioning", false);
315  useSecantHessVec_ = parlist.get("Use Secant Hessian-Times-A-Vector", false);
316  // Trust-Region Parameters
317  step_state->searchSize = parlist.get("Initial Trust-Region Radius", -1.0);
318  delMax_ = parlist.get("Maximum Trust-Region Radius", 1000.0);
319  // Inexactness Information
320  useInexact_.clear();
321  useInexact_.push_back(parlist.get("Use Inexact Objective Function", false));
322  useInexact_.push_back(parlist.get("Use Inexact Gradient", false));
323  useInexact_.push_back(parlist.get("Use Inexact Hessian-Times-A-Vector", false));
324  scale0_ = parlist.get("Gradient Update Tolerance Scaling",1.e-1);
325  scale1_ = parlist.get("Gradient Update Relative Tolerance",2.0);
326 
327  // Initialize Trust Region Subproblem Solver Object
328  useProjectedGrad_ = parlist.get("Use Projected Gradient Criticality Measure", false);
329  max_fval_ = parlist.get("Maximum Number of Function Evaluations", 20);
330  alpha_init_ = parlist.get("Initial Linesearch Parameter", 1.0);
331  TrustRegionFactory(parlist);
332 
333  // Changing Objective Functions
334  softUp_ = parlist.get("Variable Objective Function",false);
335 
336  // Scale for epsilon active sets
337  scaleEps_ = parlist.get("Scale for Epsilon Active Sets",1.0);
338  }
339 
348  void initialize( Vector<Real> &x, const Vector<Real> &s, const Vector<Real> &g,
350  AlgorithmState<Real> &algo_state ) {
351  Teuchos::RCP<StepState<Real> > step_state = Step<Real>::getState();
352 
353  trustRegion_->initialize(x,s,g);
354 
355  algo_state.nfval = 0;
356  algo_state.ngrad = 0;
357 
358  Real htol = std::sqrt(ROL_EPSILON);
359  Real ftol = 0.1*ROL_OVERFLOW;
360 
361  step_state->descentVec = s.clone();
362  step_state->gradientVec = g.clone();
363 
364  if ( con.isActivated() ) {
365  con.project(x);
366  xnew_ = x.clone();
367  xold_ = x.clone();
368  }
369 
370  if ( con.isActivated() || secant_ != Teuchos::null ) {
371  gp_ = g.clone();
372  }
373 
374  // Update approximate gradient and approximate objective function.
375  obj.update(x,true,algo_state.iter);
376  updateGradient(x,obj,con,algo_state);
377  algo_state.snorm = 1.e10;
378  algo_state.value = obj.value(x,ftol);
379  algo_state.nfval++;
380 
381  // Evaluate Objective Function at Cauchy Point
382  if ( step_state->searchSize <= 0.0 ) {
383  Teuchos::RCP<Vector<Real> > Bg = g.clone();
384  if ( useSecantHessVec_ ) {
385  secant_->applyB(*Bg,(step_state->gradientVec)->dual(),x);
386  }
387  else {
388  obj.hessVec(*Bg,(step_state->gradientVec)->dual(),x,htol);
389  }
390  Real gBg = Bg->dot(*(step_state->gradientVec));
391  Real alpha = 1.0;
392  if ( gBg > ROL_EPSILON ) {
393  alpha = algo_state.gnorm*algo_state.gnorm/gBg;
394  }
395  // Evaluate the objective function at the Cauchy point
396  Teuchos::RCP<Vector<Real> > cp = s.clone();
397  cp->set((step_state->gradientVec)->dual());
398  cp->scale(-alpha);
399  Teuchos::RCP<Vector<Real> > xcp = x.clone();
400  xcp->set(x);
401  xcp->plus(*cp);
402  if ( con.isActivated() ) {
403  con.project(*xcp);
404  }
405  obj.update(*xcp);
406  Real fnew = obj.value(*xcp,ftol); // MUST DO SOMETHING HERE WITH FTOL
407  algo_state.nfval++;
408  // Perform cubic interpolation to determine initial trust region radius
409  Real gs = cp->dot((step_state->gradientVec)->dual());
410  Real a = fnew - algo_state.value - gs - 0.5*alpha*alpha*gBg;
411  if ( std::abs(a) < ROL_EPSILON ) {
412  // a = 0 implies the objective is quadratic in the negative gradient direction
413  step_state->searchSize = std::min(alpha*algo_state.gnorm,delMax_);
414  }
415  else {
416  Real b = 0.5*alpha*alpha*gBg;
417  Real c = gs;
418  if ( b*b-3.0*a*c > ROL_EPSILON ) {
419  // There is at least one critical point
420  Real t1 = (-b-std::sqrt(b*b-3.0*a*c))/(3.0*a);
421  Real t2 = (-b+std::sqrt(b*b-3.0*a*c))/(3.0*a);
422  if ( 6.0*a*t1 + 2.0*b > 0.0 ) {
423  // t1 is the minimizer
424  step_state->searchSize = std::min(t1*alpha*algo_state.gnorm,delMax_);
425  }
426  else {
427  // t2 is the minimizer
428  step_state->searchSize = std::min(t2*alpha*algo_state.gnorm,delMax_);
429  }
430  }
431  else {
432  step_state->searchSize = std::min(alpha*algo_state.gnorm,delMax_);
433  }
434  }
435  }
436  }
437 
449  AlgorithmState<Real> &algo_state ) {
450  Teuchos::RCP<StepState<Real> > step_state = Step<Real>::getState();
451 
452  Real eps = 0.0;
453  if ( con.isActivated() ) {
454  eps = scaleEps_*algo_state.gnorm;
455  }
457 
458  CGflag_ = 0;
459  CGiter_ = 0;
460  trustRegion_->run(s,algo_state.snorm,step_state->searchSize,CGflag_,CGiter_,
461  x,*(step_state->gradientVec),algo_state.gnorm,pObj);
462 
463  if ( con.isActivated() ) {
464  xnew_->set(x);
465  xnew_->plus(s);
466  con.project(*xnew_);
467  s.set(*xnew_);
468  s.axpy(-1.0,x);
469  }
470  }
471 
484  AlgorithmState<Real> &algo_state ) {
485  Teuchos::RCP<StepState<Real> > state = Step<Real>::getState();
486 
487  Real tol = std::sqrt(ROL_EPSILON);
488 
489  Real eps = 0.0;
490  if ( con.isActivated() ) {
491  eps = algo_state.gnorm;
492  }
494 
495  // Store previous step for constraint computations
496  if ( con.isActivated() ) {
497  xold_->set(x);
498  }
499 
500  // Update trust-region information
501  TRflag_ = 0;
502  TR_nfval_ = 0;
503  TR_ngrad_ = 0;
504  Real fold = algo_state.value;
505  Real fnew = 0.0;
506  algo_state.iter++;
507  trustRegion_->update(x,fnew,state->searchSize,TR_nfval_,TR_ngrad_,TRflag_,
508  s,algo_state.snorm,fold,*(state->gradientVec),algo_state.iter,pObj);
509  algo_state.nfval += TR_nfval_;
510  algo_state.ngrad += TR_ngrad_;
511  if ( softUp_ ) {
512  pObj.update(x,true,algo_state.iter);
513  fnew = pObj.value(x,tol);
514  algo_state.nfval++;
515  }
516  algo_state.value = fnew;
517 
518  // If step is accepted ...
519  // Compute new gradient and update secant storage
520  if ( TRflag_ == 0 || TRflag_ == 1 ) {
521  // Perform line search (smoothing) to ensure decrease
522  if ( con.isActivated() ) {
523  // Compute new gradient
524  obj.gradient(*gp_,x,tol); // MUST DO SOMETHING HERE WITH TOL
525  algo_state.ngrad++;
526  // Compute smoothed step
527  Real alpha = 1.0;
528  xnew_->set(x);
529  xnew_->axpy(-alpha*alpha_init_,gp_->dual());
530  con.project(*xnew_);
531  // Compute new objective value
532  if ( softUp_ ) {
533  obj.update(*xnew_);
534  }
535  else {
536  obj.update(*xnew_,true,algo_state.iter);
537  }
538  Real ftmp = obj.value(*xnew_,tol); // MUST DO SOMETHING HERE WITH TOL
539  algo_state.nfval++;
540  // Perform smoothing
541  int cnt = 0;
542  alpha = 1.0/alpha_init_;
543  while ( (fnew-ftmp) <= 1.e-4*(fnew-fold) ) {
544  xnew_->set(x);
545  xnew_->axpy(-alpha*alpha_init_,gp_->dual());
546  con.project(*xnew_);
547  if ( softUp_ ) {
548  obj.update(*xnew_);
549  }
550  else {
551  obj.update(*xnew_,true,algo_state.iter);
552  }
553  ftmp = obj.value(*xnew_,tol); // MUST DO SOMETHING HERE WITH TOL
554  algo_state.nfval++;
555  if ( cnt >= max_fval_ ) {
556  break;
557  }
558  alpha *= 0.5;
559  cnt++;
560  }
561  // Store objective function and iteration information
562  fnew = ftmp;
563  x.set(*xnew_);
564  if ( softUp_ ) {
565  obj.update(x,true,algo_state.iter);
566  fnew = pObj.value(x,tol);
567  algo_state.nfval++;
568  }
569  algo_state.value = fnew;
570  }
571 
572  // Store previous gradient for secant update
573  if ( secant_ != Teuchos::null ) {
574  gp_->set(*(state->gradientVec));
575  }
576 
577  // Update objective function and approximate model
578  //obj.update(x,true,algo_state.iter);
579  updateGradient(x,obj,con,algo_state);
580 
581  // Update secant information
582  if ( secant_ != Teuchos::null ) {
583  if ( con.isActivated() ) { // Compute new constrained step
584  xnew_->set(x);
585  xnew_->axpy(-1.0,*xold_);
586  secant_->update(*(state->gradientVec),*gp_,*xnew_,algo_state.snorm,algo_state.iter+1);
587  }
588  else {
589  secant_->update(*(state->gradientVec),*gp_,s,algo_state.snorm,algo_state.iter+1);
590  }
591  }
592 
593  // Update algorithm state
594  (algo_state.iterateVec)->set(x);
595  }
596  else {
597  // If step is rejected and soft updates are performed,
598  // then update gradient.
599  if ( softUp_ ) {
600  updateGradient(x,obj,con,algo_state);
601  }
602  }
603  }
604 
609  std::string printHeader( void ) const {
610  std::stringstream hist;
611  hist << " ";
612  hist << std::setw(6) << std::left << "iter";
613  hist << std::setw(15) << std::left << "value";
614  hist << std::setw(15) << std::left << "gnorm";
615  hist << std::setw(15) << std::left << "snorm";
616  hist << std::setw(15) << std::left << "delta";
617  hist << std::setw(10) << std::left << "#fval";
618  hist << std::setw(10) << std::left << "#grad";
619  hist << std::setw(10) << std::left << "tr_flag";
620  if ( etr_ == TRUSTREGION_TRUNCATEDCG ) {
621  hist << std::setw(10) << std::left << "iterCG";
622  hist << std::setw(10) << std::left << "flagCG";
623  }
624  hist << "\n";
625  return hist.str();
626  }
627 
632  std::string printName( void ) const {
633  std::stringstream hist;
634  hist << "\n" << ETrustRegionToString(etr_) << " Trust-Region solver";
637  hist << " with " << ESecantToString(esec_) << " preconditioning\n";
638  }
639  else if ( !useSecantPrecond_ && useSecantHessVec_ ) {
640  hist << " with " << ESecantToString(esec_) << " Hessian approximation\n";
641  }
642  else {
643  hist << " with " << ESecantToString(esec_) << " preconditioning and Hessian approximation\n";
644  }
645  }
646  else {
647  hist << "\n";
648  }
649  return hist.str();
650  }
651 
659  std::string print( AlgorithmState<Real> & algo_state, bool print_header = false ) const {
660  const Teuchos::RCP<const StepState<Real> >& step_state = Step<Real>::getStepState();
661 
662  std::stringstream hist;
663  hist << std::scientific << std::setprecision(6);
664  if ( algo_state.iter == 0 ) {
665  hist << printName();
666  }
667  if ( print_header ) {
668  hist << printHeader();
669  }
670  if ( algo_state.iter == 0 ) {
671  hist << " ";
672  hist << std::setw(6) << std::left << algo_state.iter;
673  hist << std::setw(15) << std::left << algo_state.value;
674  hist << std::setw(15) << std::left << algo_state.gnorm;
675  hist << std::setw(15) << std::left << " ";
676  hist << std::setw(15) << std::left << step_state->searchSize;
677  hist << "\n";
678  }
679  else {
680  hist << " ";
681  hist << std::setw(6) << std::left << algo_state.iter;
682  hist << std::setw(15) << std::left << algo_state.value;
683  hist << std::setw(15) << std::left << algo_state.gnorm;
684  hist << std::setw(15) << std::left << algo_state.snorm;
685  hist << std::setw(15) << std::left << step_state->searchSize;
686  hist << std::setw(10) << std::left << algo_state.nfval;
687  hist << std::setw(10) << std::left << algo_state.ngrad;
688  hist << std::setw(10) << std::left << TRflag_;
689  if ( etr_ == TRUSTREGION_TRUNCATEDCG ) {
690  hist << std::setw(10) << std::left << CGiter_;
691  hist << std::setw(10) << std::left << CGflag_;
692  }
693  hist << "\n";
694  }
695  return hist.str();
696  }
697 
698 }; // class Step
699 
700 } // namespace ROL
701 
702 #endif
Provides the interface to evaluate objective functions.
ESecant esec_
Secant type.
virtual const Vector & dual() const
Return dual representation of , for example, the result of applying a Riesz map, or change of basis...
Definition: ROL_Vector.hpp:211
Real value(const Vector< Real > &x, Real &tol)
Compute value.
virtual void axpy(const Real alpha, const Vector &x)
Compute where .
Definition: ROL_Vector.hpp:141
bool useSecantPrecond_
Flag whether to use a secant preconditioner.
virtual Real value(const Vector< Real > &x, Real &tol)=0
Compute value.
Provides the interface to compute optimization steps.
Definition: ROL_Step.hpp:63
virtual void hessVec(Vector< Real > &hv, const Vector< Real > &v, const Vector< Real > &x, Real &tol)
Apply Hessian approximation to vector.
Teuchos::RCP< StepState< Real > > getState(void)
Definition: ROL_Step.hpp:68
Contains definitions of custom data types in ROL.
Real alpha_init_
Initial line-search parameter for projected methods.
virtual Teuchos::RCP< Vector > clone() const =0
Clone to make a new (uninitialized) vector.
TrustRegionStep(Teuchos::RCP< Secant< Real > > &secant, Teuchos::ParameterList &parlist)
Constructor.
ESecant StringToESecant(std::string s)
Definition: ROL_Types.hpp:352
Defines the linear algebra or vector space interface.
Definition: ROL_Vector.hpp:72
Teuchos::RCP< Vector< Real > > gp_
Container for previous gradient vector.
Teuchos::RCP< Vector< Real > > xold_
Container for previous iteration vector.
Teuchos::RCP< Secant< Real > > secant_
Container for secant approximation.
void compute(Vector< Real > &s, const Vector< Real > &x, Objective< Real > &obj, BoundConstraint< Real > &con, AlgorithmState< Real > &algo_state)
Compute step.
std::string printName(void) const
Print step name.
State for algorithm class. Will be used for restarts.
Definition: ROL_Types.hpp:76
virtual void gradient(Vector< Real > &g, const Vector< Real > &x, Real &tol)
Compute gradient.
bool isActivated(void)
Check if bounds are on.
ETrustRegion etr_
Trust-region subproblem solver type.
std::string printHeader(void) const
Print iterate header.
Teuchos::RCP< Vector< Real > > xnew_
Container for updated iteration vector.
ESecant
Enumeration of secant update algorithms.
Definition: ROL_Types.hpp:295
Provides interface for the double dog leg trust-region subproblem solver.
int CGflag_
Truncated CG termination flag.
virtual Teuchos::RCP< const StepState< Real > > getStepState(void) const
Get state for step object.
Definition: ROL_Step.hpp:188
void update(Vector< Real > &x, const Vector< Real > &s, Objective< Real > &obj, BoundConstraint< Real > &con, AlgorithmState< Real > &algo_state)
Update step, if successful.
Real computeCriticalityMeasure(const Vector< Real > &g, const Vector< Real > &x, BoundConstraint< Real > &con)
Compute the criticality measure.
bool useSecantHessVec_
Flag whether to use a secant Hessian.
std::vector< bool > useInexact_
Contains flags for inexact (0) objective function, (1) gradient, (2) Hessian.
Provides interface for and implements limited-memory secant operators.
Definition: ROL_Secant.hpp:67
Real scale0_
Scale for inexact gradient computation.
void initialize(Vector< Real > &x, const Vector< Real > &s, const Vector< Real > &g, Objective< Real > &obj, BoundConstraint< Real > &con, AlgorithmState< Real > &algo_state)
Initialize step.
Real delMax_
Maximum trust-region radius.
TrustRegionStep(Teuchos::ParameterList &parlist)
Constructor.
std::string print(AlgorithmState< Real > &algo_state, bool print_header=false) const
Print iterate status.
Provides the interface to apply upper and lower bound constraints.
int TR_ngrad_
Trust-region gradient evaluation counter.
void update(const Vector< Real > &x, bool flag=true, int iter=-1)
Update objective function.
Provides interface for truncated CG trust-region subproblem solver.
void computeProjectedGradient(Vector< Real > &g, const Vector< Real > &x)
Compute projected gradient.
ETrustRegion StringToETrustRegion(std::string s)
Definition: ROL_Types.hpp:742
void updateGradient(Vector< Real > &x, Objective< Real > &obj, BoundConstraint< Real > &con, AlgorithmState< Real > &algo_state)
Update gradient to iteratively satisfy inexactness condition.
Provides interface for dog leg trust-region subproblem solver.
Definition: ROL_DogLeg.hpp:58
Provides interface for the Cauchy point trust-region subproblem solver.
int TR_nfval_
Trust-region function evaluation counter.
Teuchos::RCP< Vector< Real > > iterateVec
Definition: ROL_Types.hpp:90
virtual void set(const Vector &x)
Set where .
Definition: ROL_Vector.hpp:194
virtual Real norm() const =0
Returns where .
int max_fval_
Maximum function evaluations in line-search for projected methods.
virtual void update(const Vector< Real > &x, bool flag=true, int iter=-1)
Update objective function.
int TRflag_
Trust-region exit flag.
ETrustRegion
Enumeration of trust-region solver types.
Definition: ROL_Types.hpp:688
std::string ETrustRegionToString(ETrustRegion tr)
Definition: ROL_Types.hpp:696
int CGiter_
Truncated CG iteration count.
std::string ESecantToString(ESecant tr)
Definition: ROL_Types.hpp:304
static const double ROL_OVERFLOW
Platform-dependent maximum double.
Definition: ROL_Types.hpp:123
bool useProjectedGrad_
Flag whether to use the projected gradient criticality measure.
Teuchos::RCP< TrustRegion< Real > > trustRegion_
Container for trust-region object.
Real scale1_
Scale for inexact gradient computation.
virtual void project(Vector< Real > &x)
Project optimization variables onto the bounds.
void TrustRegionFactory(Teuchos::ParameterList &parlist)
Provides the interface to compute optimization steps with trust regions.
static const double ROL_EPSILON
Platform-dependent machine epsilon.
Definition: ROL_Types.hpp:115