ROL
ROL_FletcherStep.hpp
Go to the documentation of this file.
1 // @HEADER
2 // *****************************************************************************
3 // Rapid Optimization Library (ROL) Package
4 //
5 // Copyright 2014 NTESS and the ROL contributors.
6 // SPDX-License-Identifier: BSD-3-Clause
7 // *****************************************************************************
8 // @HEADER
9 
10 #ifndef ROL_FLETCHERSTEP_H
11 #define ROL_FLETCHERSTEP_H
12 
13 #include "ROL_FletcherBase.hpp"
14 #include "ROL_Step.hpp"
15 #include "ROL_TrustRegionStep.hpp"
16 #include "ROL_LineSearchStep.hpp"
17 #include "ROL_Types.hpp"
18 #include "ROL_ParameterList.hpp"
25 namespace ROL {
26 
27 template <class Real>
28 class FletcherStep : public Step<Real> {
29 private:
30  ROL::Ptr<Step<Real> > step_;
31  ROL::Ptr<BoundConstraint<Real> > bnd_;
32 
33  ROL::ParameterList parlist_;
34 
35  ROL::Ptr<Vector<Real> > x_;
36 
37  // Lagrange multiplier update
42  // Subproblem information
43  bool print_;
44  std::string subStep_;
45 
46  Real delta_;
47  Real deltaMin_;
50 
52 
53  ROL::Ptr<Vector<Real> > g_;
54 
56 
57  // For printing output
58  mutable bool isDeltaChanged_;
59  mutable bool isPenaltyChanged_;
60 
62 
63  mutable int stepHeaderLength_; // For formatting
64 
66  BoundConstraint<Real> &bnd) {
67  Real gnorm = 0.;
68  // Compute norm of projected gradient
69  if (bnd.isActivated()) {
70  x_->set(x);
71  x_->axpy(-1.,g.dual());
72  bnd.project(*x_);
73  x_->axpy(-1.,x);
74  gnorm = x_->norm();
75  }
76  else {
77  gnorm = g.norm();
78  }
79  return gnorm;
80  }
81 
82 public:
83 
85  using Step<Real>::compute;
86  using Step<Real>::update;
87 
89 
90  FletcherStep(ROL::ParameterList &parlist)
91  : Step<Real>(), bnd_activated_(false), numSuccessSteps_(0),
93  Real zero(0), one(1), two(2), oe8(1.e8), oe1(1.e-1), oem6(1e-6), oem8(1.e-8);
94 
95  ROL::ParameterList& sublist = parlist.sublist("Step").sublist("Fletcher");
96  Step<Real>::getState()->searchSize = sublist.get("Penalty Parameter",one);
97  delta_ = sublist.get("Regularization Parameter",zero);
98  deltaMin_ = sublist.get("Min Regularization Parameter",oem8);
99  deltaUpdate_ = sublist.get("Regularization Parameter Decrease Factor", oe1);
100  // penalty parameters
101  penaltyUpdate_ = sublist.get("Penalty Parameter Growth Factor", two);
102  modifyPenalty_ = sublist.get("Modify Penalty Parameter", false);
103  maxPenaltyParam_ = sublist.get("Maximum Penalty Parameter", oe8);
104  minPenaltyParam_ = sublist.get("Minimum Penalty Parameter", oem6);
105 
106  subStep_ = sublist.get("Subproblem Solver", "Trust Region");
107 
108  parlist_ = parlist;
109  }
110 
115  AlgorithmState<Real> &algo_state ) {
116  bnd_ = ROL::makePtr<BoundConstraint<Real>>();
117  bnd_->deactivate();
118  initialize(x,g,l,c,obj,con,*bnd_,algo_state);
119  }
120 
125  AlgorithmState<Real> &algo_state ) {
126  // Determine what kind of step
127  bnd_activated_ = bnd.isActivated();
128 
129  ROL::ParameterList trlist(parlist_);
130  bool inexactFletcher = trlist.sublist("Step").sublist("Fletcher").get("Inexact Solves", false);
131  if( inexactFletcher ) {
132  trlist.sublist("General").set("Inexact Objective Value", true);
133  trlist.sublist("General").set("Inexact Gradient", true);
134  }
135  if( bnd_activated_ ) {
136  trlist.sublist("Step").sublist("Trust Region").set("Subproblem Model", "Coleman-Li");
137  }
138 
139  if ( subStep_ == "Line Search" ) {
140  step_ = makePtr<LineSearchStep<Real>>(trlist);
141  }
142  else {
143  step_ = makePtr<TrustRegionStep<Real>>(trlist);
144  }
145  std::string solverType = parlist_.sublist("Step").sublist("Trust Region").get("Subproblem Solver", "Truncated CG");
146  etr_ = StringToETrustRegion(solverType);
147 
148  // Initialize class members
149  g_ = g.clone();
150  x_ = x.clone();
151 
152  // Rest of initialize
153  FletcherBase<Real>& fletcher = dynamic_cast<FletcherBase<Real>&>(obj);
154 
155  tr_algo_state_.iterateVec = x.clone();
156  tr_algo_state_.minIterVec = x.clone();
157  tr_algo_state_.lagmultVec = l.clone();
158 
159  step_->initialize(x, g, obj, bnd, tr_algo_state_);
160 
161  // Initialize step state
162  ROL::Ptr<StepState<Real> > state = Step<Real>::getState();
163  state->descentVec = x.clone();
164  state->gradientVec = g.clone();
165  state->constraintVec = c.clone();
166  // Initialize the algorithm state
167  algo_state.nfval = 0;
168  algo_state.ncval = 0;
169  algo_state.ngrad = 0;
170 
171  algo_state.value = fletcher.getObjectiveValue(x);
172  algo_state.gnorm = computeProjGradientNorm(*(fletcher.getLagrangianGradient(x)),
173  x, bnd);
174  algo_state.aggregateGradientNorm = tr_algo_state_.gnorm;
175 
176  state->constraintVec->set(*(fletcher.getConstraintVec(x)));
177  algo_state.cnorm = (state->constraintVec)->norm();
178  // Update evaluation counters
179  algo_state.ncval = fletcher.getNumberConstraintEvaluations();
180  algo_state.nfval = fletcher.getNumberFunctionEvaluations();
181  algo_state.ngrad = fletcher.getNumberGradientEvaluations();
182  }
183 
186  void compute( Vector<Real> &s, const Vector<Real> &x, const Vector<Real> &l,
187  Objective<Real> &obj, Constraint<Real> &con,
188  AlgorithmState<Real> &algo_state ) {
189  compute(s,x,l,obj,con,*bnd_, algo_state);
190  }
191 
194  void compute( Vector<Real> &s, const Vector<Real> &x, const Vector<Real> &l,
195  Objective<Real> &obj, Constraint<Real> &con,
196  BoundConstraint<Real> &bnd, AlgorithmState<Real> &algo_state ) {
197  step_->compute( s, x, obj, bnd, tr_algo_state_ );
198  }
199 
204  AlgorithmState<Real> &algo_state ) {
205  update(x,l,s,obj,con,*bnd_, algo_state);
206  }
207 
213  AlgorithmState<Real> &algo_state ) {
214 
215  // This should be in print, but this will not work there
216  isDeltaChanged_ = false;
217  isPenaltyChanged_ = false;
218  bool modified = false;
219 
220  FletcherBase<Real> &fletcher = dynamic_cast<FletcherBase<Real>&>(obj);
221  ROL::Ptr<StepState<Real> > fletcherState = Step<Real>::getState();
222  const ROL::Ptr<const StepState<Real> > state = step_->getStepState();
223 
224  step_->update(x,s,obj,bnd,tr_algo_state_);
225  numSuccessSteps_ += (state->flag == 0);
226 
227  Real gPhiNorm = tr_algo_state_.gnorm;
228  Real cnorm = (fletcherState->constraintVec)->norm();
229  bool too_infeasible = cnorm > static_cast<Real>(100.)*gPhiNorm;
230  bool too_feasible = cnorm < static_cast<Real>(1e-2)*gPhiNorm;
231 
232  if( too_infeasible && !modified && modifyPenalty_ && numSuccessSteps_ > 1 ) {
233  Real penaltyParam = Step<Real>::getStepState()->searchSize;
234  if( penaltyParam >= maxPenaltyParam_ ) {
235  // Penalty parameter too large, exit
236  algo_state.flag = true;
237  }
238  penaltyParam *= penaltyUpdate_;
239  penaltyParam = std::min(penaltyParam, maxPenaltyParam_);
240  fletcher.setPenaltyParameter(penaltyParam);
241  Step<Real>::getState()->searchSize = penaltyParam;
242  isPenaltyChanged_ = true;
243  modified = true;
244  }
245 
246  if( too_feasible && !modified && modifyPenalty_ && numSuccessSteps_ > 1 ) {
247  Real penaltyParam = Step<Real>::getStepState()->searchSize;
248  if( penaltyParam <= minPenaltyParam_ ) {
249  // Penalty parameter too small, exit (this is unlikely)
250  algo_state.flag = true;
251  }
252  penaltyParam /= penaltyUpdate_;
253  penaltyParam = std::max(penaltyParam, minPenaltyParam_);
254  fletcher.setPenaltyParameter(penaltyParam);
255  Step<Real>::getState()->searchSize = penaltyParam;
256  isPenaltyChanged_ = true;
257  modified = true;
258  }
259 
260  if( delta_ > deltaMin_ && !modified ) {
261  Real deltaNext = delta_ * deltaUpdate_;
262  if( gPhiNorm < deltaNext ) {
263  delta_ = deltaNext;
264  fletcher.setDelta(deltaNext);
265  isDeltaChanged_ = true;
266  modified = true;
267  }
268  }
269 
270  if( modified ) {
271  // Penalty function has been changed somehow, need to recompute
272  Real tol = static_cast<Real>(1e-12);
273  tr_algo_state_.value = fletcher.value(x, tol);
274  fletcher.gradient(*g_, x, tol);
275 
276  tr_algo_state_.nfval++;
277  tr_algo_state_.ngrad++;
278  tr_algo_state_.ncval++;
279  tr_algo_state_.minIter = tr_algo_state_.iter;
280  tr_algo_state_.minValue = tr_algo_state_.value;
281  tr_algo_state_.gnorm = computeProjGradientNorm(*g_, x, bnd);
282  }
283 
284  // Update the step and store in state
285  algo_state.iterateVec->set(x);
286  algo_state.iter++;
287 
288  fletcherState->descentVec->set(s);
289  fletcherState->gradientVec->set(*(fletcher.getLagrangianGradient(x)));
290  fletcherState->constraintVec->set(*(fletcher.getConstraintVec(x)));
291 
292  // Update objective function value
293  algo_state.value = fletcher.getObjectiveValue(x);
294  // Update constraint value
295  algo_state.cnorm = (fletcherState->constraintVec)->norm();
296  // Update the step size
297  algo_state.snorm = tr_algo_state_.snorm;
298  // Compute gradient of the Lagrangian
299  algo_state.gnorm = computeProjGradientNorm(*(fletcherState->gradientVec),
300  x, bnd);
301  // Compute gradient of penalty function
302  algo_state.aggregateGradientNorm = tr_algo_state_.gnorm;
303  // Update evaluation countersgetConstraintVec
304  algo_state.nfval = fletcher.getNumberFunctionEvaluations();
305  algo_state.ngrad = fletcher.getNumberGradientEvaluations();
306  algo_state.ncval = fletcher.getNumberConstraintEvaluations();
307  // Update objective function and constraints
308  // fletcher.update(x,true,algo_state.iter);
309  // Update multipliers
310  algo_state.lagmultVec->set(*(fletcher.getMultiplierVec(x)));
311  }
312 
315  std::string printHeader( void ) const {
316  std::stringstream hist;
317  if( subStep_ == "Trust Region" ) {
318  hist << " ";
319  hist << std::setw(6) << std::left << "iter";
320  hist << std::setw(15) << std::left << "merit";
321  hist << std::setw(15) << std::left << "fval";
322  hist << std::setw(15) << std::left << "gpnorm";
323  hist << std::setw(15) << std::left << "gLnorm";
324  hist << std::setw(15) << std::left << "cnorm";
325  hist << std::setw(15) << std::left << "snorm";
326  hist << std::setw(15) << std::left << "tr_radius";
327  hist << std::setw(10) << std::left << "tr_flag";
328  if ( etr_ == TRUSTREGION_TRUNCATEDCG && subStep_ == "Trust Region") {
329  hist << std::setw(10) << std::left << "iterCG";
330  hist << std::setw(10) << std::left << "flagCG";
331  }
332  hist << std::setw(15) << std::left << "penalty";
333  hist << std::setw(15) << std::left << "delta";
334  hist << std::setw(10) << std::left << "#fval";
335  hist << std::setw(10) << std::left << "#grad";
336  hist << std::setw(10) << std::left << "#cval";
337  hist << "\n";
338  }
339  else {
340  std::string stepHeader = step_->printHeader();
341  stepHeaderLength_ = stepHeader.length();
342  hist << stepHeader.substr(0, stepHeaderLength_-1);
343  hist << std::setw(15) << std::left << "fval";
344  hist << std::setw(15) << std::left << "gLnorm";
345  hist << std::setw(15) << std::left << "cnorm";
346  hist << std::setw(15) << std::left << "penalty";
347  hist << std::setw(15) << std::left << "delta";
348  hist << std::setw(10) << std::left << "#cval";
349  hist << "\n";
350  }
351  return hist.str();
352  }
353 
356  std::string printName( void ) const {
357  std::stringstream hist;
358  hist << "\n" << " Fletcher solver : " << subStep_;
359  hist << "\n";
360  return hist.str();
361  }
362 
365  std::string print( AlgorithmState<Real> &algo_state, bool pHeader = false ) const {
366  std::string stepHist = step_->print( tr_algo_state_, false );
367  stepHist.erase(std::remove(stepHist.end()-3, stepHist.end(),'\n'), stepHist.end());
368  std::string name = step_->printName();
369  size_t pos = stepHist.find(name);
370  if ( pos != std::string::npos ) {
371  stepHist.erase(pos, name.length());
372  }
373 
374  std::stringstream hist;
375  hist << std::scientific << std::setprecision(6);
376  if ( algo_state.iter == 0 ) {
377  hist << printName();
378  }
379  if ( pHeader ) {
380  hist << printHeader();
381  }
382 
383  std::string penaltyString = getValueString( Step<Real>::getStepState()->searchSize, isPenaltyChanged_ );
384  std::string deltaString = getValueString( delta_, isDeltaChanged_ );
385 
386  if( subStep_ == "Trust Region" ) {
387  hist << " ";
388  hist << std::setw(6) << std::left << algo_state.iter;
389  hist << std::setw(15) << std::left << tr_algo_state_.value;
390  hist << std::setw(15) << std::left << algo_state.value;
391  hist << std::setw(15) << std::left << tr_algo_state_.gnorm;
392  hist << std::setw(15) << std::left << algo_state.gnorm;
393  hist << std::setw(15) << std::left << algo_state.cnorm;
394  hist << std::setw(15) << std::left << stepHist.substr(38,15); // snorm
395  hist << std::setw(15) << std::left << stepHist.substr(53,15); // tr_radius
396  hist << std::setw(10) << std::left << (algo_state.iter == 0 ? "" : stepHist.substr(88,10)); // tr_flag
397  if ( etr_ == TRUSTREGION_TRUNCATEDCG && subStep_ == "Trust Region") {
398  hist << std::setw(10) << std::left << (algo_state.iter == 0 ? "" : stepHist.substr(93,10)); // iterCG
399  hist << std::setw(10) << std::left << (algo_state.iter == 0 ? "" : stepHist.substr(103,10)); // flagCG
400  }
401  hist << std::setw(15) << std::left << penaltyString;
402  hist << std::setw(15) << std::left << deltaString;
403  hist << std::setw(10) << std::left << (algo_state.iter == 0 ? "" : stepHist.substr(68,10)); // #fval
404  hist << std::setw(10) << std::left << (algo_state.iter == 0 ? "" : stepHist.substr(78,10)); // #gval
405  hist << std::setw(10) << std::left << algo_state.ncval;
406  hist << "\n";
407  } else {
408  hist << std::setw(stepHeaderLength_-1) << std::left << stepHist;
409  hist << std::setw(15) << std::left << algo_state.value;
410  hist << std::setw(15) << std::left << algo_state.gnorm;
411  hist << std::setw(15) << std::left << algo_state.cnorm;
412  hist << std::setw(15) << std::left << penaltyString;
413  hist << std::setw(15) << std::left << deltaString;
414  hist << std::setw(10) << std::left << algo_state.ncval;
415  hist << "\n";
416  }
417 
418  return hist.str();
419  }
420 
421  std::string getValueString( const Real value, const bool print ) const {
422  std::stringstream valString;
423  valString << std::scientific << std::setprecision(6);
424  if( print ) {
425  valString << std::setw(15) << std::left << value;
426  } else {
427  valString << std::setw(15) << "";
428  }
429  return valString.str();
430  }
431 
437  AlgorithmState<Real> &algo_state ) {}
438 
444  AlgorithmState<Real> &algo_state ) {}
445 
446 }; // class FletcherStep
447 
448 } // namespace ROL
449 
450 #endif
void update(Vector< Real > &x, Vector< Real > &l, const Vector< Real > &s, Objective< Real > &obj, Constraint< Real > &con, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Update step, if successful (equality and bound constraints).
Provides the interface to evaluate objective functions.
virtual const Vector & dual() const
Return dual representation of , for example, the result of applying a Riesz map, or change of basis...
Definition: ROL_Vector.hpp:192
int getNumberGradientEvaluations() const
void setDelta(Real delta)
virtual ROL::Ptr< Vector > clone() const =0
Clone to make a new (uninitialized) vector.
AlgorithmState< Real > tr_algo_state_
Real getObjectiveValue(const Vector< Real > &x)
bool isActivated(void) const
Check if bounds are on.
virtual Real value(const Vector< Real > &x, Real &tol)=0
Compute value.
Provides the interface to compute optimization steps.
Definition: ROL_Step.hpp:34
std::string print(AlgorithmState< Real > &algo_state, bool pHeader=false) const
Print iterate status.
void compute(Vector< Real > &s, const Vector< Real > &x, const Vector< Real > &l, Objective< Real > &obj, Constraint< Real > &con, AlgorithmState< Real > &algo_state)
Compute step (equality constraint).
Contains definitions of custom data types in ROL.
const Ptr< Vector< Real > > getMultiplierVec(const Vector< Real > &x)
ROL::Objective_SimOpt value
int getNumberFunctionEvaluations() const
FletcherStep(ROL::ParameterList &parlist)
void update(Vector< Real > &x, Vector< Real > &l, const Vector< Real > &s, Objective< Real > &obj, Constraint< Real > &con, AlgorithmState< Real > &algo_state)
Update step, if successful (equality constraint).
Defines the linear algebra or vector space interface.
Definition: ROL_Vector.hpp:46
ROL::Ptr< BoundConstraint< Real > > bnd_
Objective_SerialSimOpt(const Ptr< Obj > &obj, const V &ui) z0_ zero()
State for algorithm class. Will be used for restarts.
Definition: ROL_Types.hpp:109
ROL::Ptr< Vector< Real > > x_
virtual void gradient(Vector< Real > &g, const Vector< Real > &x, Real &tol)
Compute gradient.
void compute(Vector< Real > &s, const Vector< Real > &x, const Vector< Real > &l, Objective< Real > &obj, Constraint< Real > &con, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Compute step (equality and bound constraints).
void setPenaltyParameter(Real sigma)
ROL::Ptr< StepState< Real > > getState(void)
Definition: ROL_Step.hpp:39
const Ptr< Vector< Real > > getConstraintVec(const Vector< Real > &x)
ROL::ParameterList parlist_
Real computeProjGradientNorm(const Vector< Real > &g, const Vector< Real > &x, BoundConstraint< Real > &bnd)
ROL::Ptr< Vector< Real > > iterateVec
Definition: ROL_Types.hpp:123
ROL::Ptr< Vector< Real > > g_
Provides the interface to compute Fletcher steps.
virtual void project(Vector< Real > &x)
Project optimization variables onto the bounds.
std::string getValueString(const Real value, const bool print) const
void update(Vector< Real > &x, const Vector< Real > &s, Objective< Real > &obj, BoundConstraint< Real > &con, AlgorithmState< Real > &algo_state)
Update step, for bound constraints; here only to satisfy the interface requirements, does nothing, needs refactoring.
int getNumberConstraintEvaluations() const
std::string printName(void) const
Print step name.
Provides the interface to apply upper and lower bound constraints.
void initialize(Vector< Real > &x, const Vector< Real > &g, Vector< Real > &l, const Vector< Real > &c, Objective< Real > &obj, Constraint< Real > &con, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Initialize step with equality and bound constraints.
const Ptr< Vector< Real > > getLagrangianGradient(const Vector< Real > &x)
std::string printHeader(void) const
Print iterate header.
ETrustRegion StringToETrustRegion(std::string s)
ROL::Ptr< Vector< Real > > lagmultVec
Definition: ROL_Types.hpp:124
void compute(Vector< Real > &s, const Vector< Real > &x, Objective< Real > &obj, BoundConstraint< Real > &con, AlgorithmState< Real > &algo_state)
Compute step for bound constraints; here only to satisfy the interface requirements, does nothing, needs refactoring.
virtual Real norm() const =0
Returns where .
ETrustRegion
Enumeration of trust-region solver types.
void initialize(Vector< Real > &x, const Vector< Real > &g, Vector< Real > &l, const Vector< Real > &c, Objective< Real > &obj, Constraint< Real > &con, AlgorithmState< Real > &algo_state)
Initialize step with equality constraint.
ROL::Ptr< Step< Real > > step_
Defines the general constraint operator interface.
const ROL::Ptr< const StepState< Real > > getStepState(void) const
Get state for step object.
Definition: ROL_Step.hpp:177