ROL
ROL_InteriorPointStep.hpp
Go to the documentation of this file.
1 // @HEADER
2 // ************************************************************************
3 //
4 // Rapid Optimization Library (ROL) Package
5 // Copyright (2014) Sandia Corporation
6 //
7 // Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive
8 // license for use of this work by or on behalf of the U.S. Government.
9 //
10 // Redistribution and use in source and binary forms, with or without
11 // modification, are permitted provided that the following conditions are
12 // met:
13 //
14 // 1. Redistributions of source code must retain the above copyright
15 // notice, this list of conditions and the following disclaimer.
16 //
17 // 2. Redistributions in binary form must reproduce the above copyright
18 // notice, this list of conditions and the following disclaimer in the
19 // documentation and/or other materials provided with the distribution.
20 //
21 // 3. Neither the name of the Corporation nor the names of the
22 // contributors may be used to endorse or promote products derived from
23 // this software without specific prior written permission.
24 //
25 // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
26 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
29 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
30 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
31 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
32 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 //
37 // Questions? Contact lead developers:
38 // Drew Kouri (dpkouri@sandia.gov) and
39 // Denis Ridzal (dridzal@sandia.gov)
40 //
41 // ************************************************************************
42 // @HEADER
43 
44 #ifndef ROL_INTERIORPOINTSTEP_H
45 #define ROL_INTERIORPOINTSTEP_H
46 
47 #include "ROL_CompositeStep.hpp"
49 #include "ROL_InteriorPoint.hpp"
51 #include "ROL_Types.hpp"
53 
54 
55 namespace ROL {
56 
57 template <class Real>
58 class InteriorPointStep : public Step<Real> {
59 
62 
63 private:
64 
65  ROL::Ptr<StatusTest<Real> > status_;
66  ROL::Ptr<Step<Real> > step_;
67  ROL::Ptr<Algorithm<Real> > algo_;
68  ROL::Ptr<BoundConstraint<Real> > bnd_;
69  ROL::ParameterList parlist_;
70 
71  // Storage
72  ROL::Ptr<Vector<Real> > x_;
73  ROL::Ptr<Vector<Real> > g_;
74  ROL::Ptr<Vector<Real> > l_;
75  ROL::Ptr<Vector<Real> > c_;
76 
77  Real mu_; // Barrier parameter
78  Real mumin_; // Minimal value of barrier parameter
79  Real mumax_; // Maximal value of barrier parameter
80  Real rho_; // Barrier parameter reduction factor
81 
82  // For the subproblem
83  int subproblemIter_; // Status test maximum number of iterations
84 
85  int verbosity_; // Adjust level of detail in printing step information
86  bool print_;
87 
89 
91  std::string stepname_;
92 
93 public:
94 
96  using Step<Real>::compute;
97  using Step<Real>::update;
98 
100 
101  InteriorPointStep(ROL::ParameterList &parlist) :
102  Step<Real>(),
103  status_(ROL::nullPtr),
104  step_(ROL::nullPtr),
105  algo_(ROL::nullPtr),
106  parlist_(parlist),
107  x_(ROL::nullPtr),
108  g_(ROL::nullPtr),
109  l_(ROL::nullPtr),
110  c_(ROL::nullPtr),
111  hasEquality_(false),
113  stepname_("Composite Step") {
114 
115  using ROL::ParameterList;
116 
117  verbosity_ = parlist.sublist("General").get("Print Verbosity",0);
118 
119  // List of general Interior Point parameters
120  ParameterList& iplist = parlist.sublist("Step").sublist("Interior Point");
121  mu_ = iplist.get("Initial Barrier Penalty",1.0);
122  mumin_ = iplist.get("Minimum Barrier Penalty",1.e-4);
123  mumax_ = iplist.get("Maximum Barrier Penalty",1e8);
124  rho_ = iplist.get("Barrier Penalty Reduction Factor",0.5);
125 
126  // Subproblem step information
127  print_ = iplist.sublist("Subproblem").get("Print History",false);
128  Real gtol = iplist.sublist("Subproblem").get("Optimality Tolerance",1e-8);
129  Real ctol = iplist.sublist("Subproblem").get("Feasibility Tolerance",1e-8);
130  Real stol = static_cast<Real>(1e-6)*std::min(gtol,ctol);
131  int maxit = iplist.sublist("Subproblem").get("Iteration Limit",1000);
132  parlist_.sublist("Status Test").set("Gradient Tolerance", gtol);
133  parlist_.sublist("Status Test").set("Constraint Tolerance", ctol);
134  parlist_.sublist("Status Test").set("Step Tolerance", stol);
135  parlist_.sublist("Status Test").set("Iteration Limit", maxit);
136  // Get step name from parameterlist
137  stepname_ = iplist.sublist("Subproblem").get("Step Type","Composite Step");
139  }
140 
143  void initialize( Vector<Real> &x, const Vector<Real> &g,
144  Vector<Real> &l, const Vector<Real> &c,
145  Objective<Real> &obj, Constraint<Real> &con,
146  AlgorithmState<Real> &algo_state ) {
147  hasEquality_ = true;
148 
149  ROL::Ptr<StepState<Real> > state = Step<Real>::getState();
150  state->descentVec = x.clone();
151  state->gradientVec = g.clone();
152  state->constraintVec = c.clone();
153 
154  // Initialize storage
155  x_ = x.clone();
156  g_ = g.clone();
157  l_ = l.clone();
158  c_ = c.clone();
159 
160  x_->set(x);
161 
162  auto& ipobj = dynamic_cast<IPOBJ&>(obj);
163  auto& ipcon = dynamic_cast<IPCON&>(con);
164 
165  // Set initial penalty
166  ipobj.updatePenalty(mu_);
167 
168  algo_state.nfval = 0;
169  algo_state.ncval = 0;
170  algo_state.ngrad = 0;
171 
172  Real zerotol = 0.0;
173  obj.update(x,true,algo_state.iter);
174  algo_state.value = obj.value(x,zerotol);
175 
176  obj.gradient(*g_,x,zerotol);
177  algo_state.gnorm = g_->norm();
178 
179  con.value(*c_,x,zerotol);
180  algo_state.cnorm = c_->norm();
181 
182  algo_state.nfval += ipobj.getNumberFunctionEvaluations();
183  algo_state.ngrad += ipobj.getNumberGradientEvaluations();
184  algo_state.ncval += ipcon.getNumberConstraintEvaluations();
185 
186  }
187 
188 
189 
192  AlgorithmState<Real> &algo_state ) {
193  bnd.projectInterior(x);
194  initialize(x,g,l,c,obj,con,algo_state);
195  }
196 
197 
200  void initialize( Vector<Real> &x, const Vector<Real> &g,
202  AlgorithmState<Real> &algo_state ) {
203  bnd.projectInterior(x);
204 
205  ROL::Ptr<StepState<Real> > state = Step<Real>::getState();
206  state->descentVec = x.clone();
207  state->gradientVec = g.clone();
208 
209  // Initialize storage
210  x_ = x.clone(); x_->set(x);
211  g_ = g.clone();
212 
213  // Set initial penalty
214  auto& ipobj = dynamic_cast<IPOBJ&>(obj);
215  ipobj.updatePenalty(mu_);
216 
217  algo_state.nfval = 0;
218  algo_state.ncval = 0;
219  algo_state.ngrad = 0;
220 
221  Real zerotol = std::sqrt(ROL_EPSILON<Real>());
222  obj.update(x,true,algo_state.iter);
223  algo_state.value = obj.value(x,zerotol);
224 
225  obj.gradient(*g_,x,zerotol);
226  algo_state.gnorm = g_->norm();
227 
228  algo_state.cnorm = static_cast<Real>(0);
229 
230  algo_state.nfval += ipobj.getNumberFunctionEvaluations();
231  algo_state.ngrad += ipobj.getNumberGradientEvaluations();
232 
233  bnd_ = ROL::makePtr<BoundConstraint<Real>>();
234  bnd_->deactivate();
235  }
236 
237 
238 
242  const Vector<Real> &x,
243  const Vector<Real> &l,
244  Objective<Real> &obj,
245  Constraint<Real> &con,
246  AlgorithmState<Real> &algo_state ) {
247  // Grab interior point objective and constraint
248  //auto& ipobj = dynamic_cast<IPOBJ&>(obj);
249  //auto& ipcon = dynamic_cast<IPCON&>(con);
250 
251  Real one(1);
252  // Create the algorithm
253  Ptr<Objective<Real>> penObj;
255  Ptr<Objective<Real>> raw_obj = makePtrFromRef(obj);
256  Ptr<Constraint<Real>> raw_con = makePtrFromRef(con);
257  Ptr<StepState<Real>> state = Step<Real>::getState();
258  penObj = makePtr<AugmentedLagrangian<Real>>(raw_obj,raw_con,l,one,x,*(state->constraintVec),parlist_);
259  }
260  else if (stepType_ == STEP_FLETCHER) {
261  Ptr<Objective<Real>> raw_obj = makePtrFromRef(obj);
262  Ptr<Constraint<Real>> raw_con = makePtrFromRef(con);
263  Ptr<StepState<Real>> state = Step<Real>::getState();
264  penObj = makePtr<Fletcher<Real>>(raw_obj,raw_con,x,*(state->constraintVec),parlist_);
265  }
266  else {
267  penObj = makePtrFromRef(obj);
268  stepname_ = "Composite Step";
270  }
271  algo_ = ROL::makePtr<Algorithm<Real>>(stepname_,parlist_,false);
272  //algo_ = ROL::makePtr<Algorithm<Real>>("Composite Step",parlist_,false);
273 
274  // Run the algorithm
275  x_->set(x); l_->set(l);
276  algo_->run(*x_,*g_,*l_,*c_,*penObj,con,print_);
277  s.set(*x_); s.axpy(-one,x);
278 
279  // Get number of iterations from the subproblem solve
280  subproblemIter_ = (algo_->getState())->iter;
281  }
282 
284  const Vector<Real> &x,
285  const Vector<Real> &l,
286  Objective<Real> &obj,
287  Constraint<Real> &con,
289  AlgorithmState<Real> &algo_state ) {
290  compute(s,x,l,obj,con,algo_state);
291  }
292 
293  // Bound constrained
295  const Vector<Real> &x,
296  Objective<Real> &obj,
298  AlgorithmState<Real> &algo_state ) {
299  // Grab interior point objective and constraint
300  auto& ipobj = dynamic_cast<IPOBJ&>(obj);
301 
302  // Create the algorithm
303  algo_ = ROL::makePtr<Algorithm<Real>>("Trust Region",parlist_,false);
304 
305  // Run the algorithm
306  x_->set(x);
307  algo_->run(*x_,*g_,ipobj,*bnd_,print_);
308  s.set(*x_); s.axpy(-1.0,x);
309 
310  // Get number of iterations from the subproblem solve
311  subproblemIter_ = (algo_->getState())->iter;
312  }
313 
314 
315 
319  Vector<Real> &l,
320  const Vector<Real> &s,
321  Objective<Real> &obj,
322  Constraint<Real> &con,
323  AlgorithmState<Real> &algo_state ) {
324  // Grab interior point objective and constraint
325  auto& ipobj = dynamic_cast<IPOBJ&>(obj);
326  auto& ipcon = dynamic_cast<IPCON&>(con);
327 
328  // If we can change the barrier parameter, do so
329  if( (rho_< 1.0 && mu_ > mumin_) || (rho_ > 1.0 && mu_ < mumax_) ) {
330  mu_ *= rho_;
331  ipobj.updatePenalty(mu_);
332  }
333 
334  ROL::Ptr<StepState<Real> > state = Step<Real>::getState();
335  state->SPiter = subproblemIter_;
336 
337  // Update optimization vector
338  x.plus(s);
339 
340  algo_state.iterateVec->set(x);
341  state->descentVec->set(s);
342  algo_state.snorm = s.norm();
343  algo_state.iter++;
344 
345  Real zerotol = 0.0;
346 
347  algo_state.value = ipobj.value(x,zerotol);
348  algo_state.value = ipobj.getObjectiveValue();
349 
350  ipcon.value(*c_,x,zerotol);
351  state->constraintVec->set(*c_);
352 
353  ipobj.gradient(*g_,x,zerotol);
354  state->gradientVec->set(*g_);
355 
356  ipcon.applyAdjointJacobian(*g_,*l_,x,zerotol);
357  state->gradientVec->plus(*g_);
358 
359  algo_state.gnorm = g_->norm();
360  algo_state.cnorm = state->constraintVec->norm();
361  algo_state.snorm = s.norm();
362 
363  algo_state.nfval += ipobj.getNumberFunctionEvaluations();
364  algo_state.ngrad += ipobj.getNumberGradientEvaluations();
365  algo_state.ncval += ipcon.getNumberConstraintEvaluations();
366 
367  }
368 
370  Vector<Real> &l,
371  const Vector<Real> &s,
372  Objective<Real> &obj,
373  Constraint<Real> &con,
375  AlgorithmState<Real> &algo_state ) {
376  update(x,l,s,obj,con,algo_state);
377 
378  ROL::Ptr<StepState<Real> > state = Step<Real>::getState();
379  x_->set(x);
380  x_->axpy(static_cast<Real>(-1),state->gradientVec->dual());
381  bnd.project(*x_);
382  x_->axpy(static_cast<Real>(-1),x);
383  algo_state.gnorm = x_->norm();
384  }
385 
387  const Vector<Real> &s,
388  Objective<Real> &obj,
390  AlgorithmState<Real> &algo_state ) {
391  // Grab interior point objective
392  auto& ipobj = dynamic_cast<IPOBJ&>(obj);
393 
394  // If we can change the barrier parameter, do so
395  if( (rho_< 1.0 && mu_ > mumin_) || (rho_ > 1.0 && mu_ < mumax_) ) {
396  mu_ *= rho_;
397  ipobj.updatePenalty(mu_);
398  }
399 
400  ROL::Ptr<StepState<Real> > state = Step<Real>::getState();
401 
402  // Update optimization vector
403  x.plus(s);
404 
405  algo_state.iterateVec->set(x);
406  state->descentVec->set(s);
407  algo_state.snorm = s.norm();
408  algo_state.iter++;
409 
410  Real zerotol = std::sqrt(ROL_EPSILON<Real>());
411 
412  algo_state.value = ipobj.value(x,zerotol);
413  algo_state.value = ipobj.getObjectiveValue();
414 
415  ipobj.gradient(*g_,x,zerotol);
416  state->gradientVec->set(*g_);
417 
418  x_->set(x);
419  x_->axpy(static_cast<Real>(-1),state->gradientVec->dual());
420  bnd.project(*x_);
421  x_->axpy(static_cast<Real>(-1),x);
422 
423  algo_state.gnorm = x_->norm();
424  algo_state.snorm = s.norm();
425 
426  algo_state.nfval += ipobj.getNumberFunctionEvaluations();
427  algo_state.ngrad += ipobj.getNumberGradientEvaluations();
428  }
429 
432  std::string printHeader( void ) const {
433  std::stringstream hist;
434 
435  if( verbosity_ > 0 ) {
436 
437  hist << std::string(116,'-') << "\n";
438  hist << "Interior Point status output definitions\n\n";
439 
440  hist << " IPiter - Number of interior point steps taken\n";
441  hist << " SPiter - Number of subproblem solver iterations\n";
442  hist << " penalty - Penalty parameter multiplying the barrier objective\n";
443  hist << " fval - Number of objective evaluations\n";
444  if (hasEquality_) {
445  hist << " cnorm - Norm of the composite constraint\n";
446  hist << " gLnorm - Norm of the Lagrangian's gradient\n";
447  }
448  else {
449  hist << " gnorm - Norm of the projected norm of the objective gradient\n";
450  }
451  hist << " snorm - Norm of step (update to optimzation and slack vector)\n";
452  hist << " #fval - Number of objective function evaluations\n";
453  hist << " #grad - Number of gradient evaluations\n";
454  if (hasEquality_) {
455  hist << " #cval - Number of composite constraint evaluations\n";
456  }
457  hist << std::string(116,'-') << "\n";
458  }
459 
460  hist << " ";
461  hist << std::setw(9) << std::left << "IPiter";
462  hist << std::setw(9) << std::left << "SPiter";
463  hist << std::setw(15) << std::left << "penalty";
464  hist << std::setw(15) << std::left << "fval";
465  if (hasEquality_) {
466  hist << std::setw(15) << std::left << "cnorm";
467  hist << std::setw(15) << std::left << "gLnorm";
468  }
469  else {
470  hist << std::setw(15) << std::left << "gnorm";
471  }
472  hist << std::setw(15) << std::left << "snorm";
473  hist << std::setw(8) << std::left << "#fval";
474  hist << std::setw(8) << std::left << "#grad";
475  if (hasEquality_) {
476  hist << std::setw(8) << std::left << "#cval";
477  }
478 
479  hist << "\n";
480  return hist.str();
481  }
482 
485  std::string printName( void ) const {
486  std::stringstream hist;
487  hist << "\n" << "Primal Interior Point Solver\n";
488  return hist.str();
489  }
490 
493  std::string print( AlgorithmState<Real> &algo_state, bool pHeader = false ) const {
494  std::stringstream hist;
495  hist << std::scientific << std::setprecision(6);
496  if ( algo_state.iter == 0 ) {
497  hist << printName();
498  }
499  if ( pHeader ) {
500  hist << printHeader();
501  }
502  if ( algo_state.iter == 0 ) {
503  hist << " ";
504  hist << std::setw(9) << std::left << algo_state.iter;
505  hist << std::setw(9) << std::left << subproblemIter_;
506  hist << std::setw(15) << std::left << mu_;
507  hist << std::setw(15) << std::left << algo_state.value;
508  if (hasEquality_) {
509  hist << std::setw(15) << std::left << algo_state.cnorm;
510  }
511  hist << std::setw(15) << std::left << algo_state.gnorm;
512  hist << "\n";
513  }
514  else {
515  hist << " ";
516  hist << std::setw(9) << std::left << algo_state.iter;
517  hist << std::setw(9) << std::left << subproblemIter_;
518  hist << std::setw(15) << std::left << mu_;
519  hist << std::setw(15) << std::left << algo_state.value;
520  if (hasEquality_) {
521  hist << std::setw(15) << std::left << algo_state.cnorm;
522  }
523  hist << std::setw(15) << std::left << algo_state.gnorm;
524  hist << std::setw(15) << std::left << algo_state.snorm;
525 // hist << std::scientific << std::setprecision(6);
526  hist << std::setw(8) << std::left << algo_state.nfval;
527  hist << std::setw(8) << std::left << algo_state.ngrad;
528  if (hasEquality_) {
529  hist << std::setw(8) << std::left << algo_state.ncval;
530  }
531  hist << "\n";
532  }
533  return hist.str();
534  }
535 
536 }; // class InteriorPointStep
537 
538 } // namespace ROL
539 
540 #endif // ROL_INTERIORPOINTSTEP_H
Provides the interface to evaluate objective functions.
EStep StringToEStep(std::string s)
Definition: ROL_Types.hpp:389
virtual void projectInterior(Vector< Real > &x)
Project optimization variables into the interior of the feasible set.
virtual ROL::Ptr< Vector > clone() const =0
Clone to make a new (uninitialized) vector.
Constraint_Partitioned< Real > IPCON
void update(Vector< Real > &x, Vector< Real > &l, const Vector< Real > &s, Objective< Real > &obj, Constraint< Real > &con, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Update step, if successful (equality constraints).
void initialize(Vector< Real > &x, const Vector< Real > &g, Vector< Real > &l, const Vector< Real > &c, Objective< Real > &obj, Constraint< Real > &con, AlgorithmState< Real > &algo_state)
Initialize step with equality constraint.
virtual void plus(const Vector &x)=0
Compute , where .
InteriorPoint::PenalizedObjective< Real > IPOBJ
virtual void axpy(const Real alpha, const Vector &x)
Compute where .
Definition: ROL_Vector.hpp:153
ROL::Ptr< Vector< Real > > c_
virtual Real value(const Vector< Real > &x, Real &tol)=0
Compute value.
Provides the interface to compute optimization steps.
Definition: ROL_Step.hpp:69
ROL::Ptr< Step< Real > > step_
Contains definitions of custom data types in ROL.
void compute(Vector< Real > &s, const Vector< Real > &x, Objective< Real > &obj, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Compute step.
Defines the linear algebra or vector space interface.
Definition: ROL_Vector.hpp:80
virtual void value(Vector< Real > &c, const Vector< Real > &x, Real &tol)=0
Evaluate the constraint operator at .
ROL::Ptr< Vector< Real > > g_
State for algorithm class. Will be used for restarts.
Definition: ROL_Types.hpp:143
virtual void gradient(Vector< Real > &g, const Vector< Real > &x, Real &tol)
Compute gradient.
void initialize(Vector< Real > &x, const Vector< Real > &g, Vector< Real > &l, const Vector< Real > &c, Objective< Real > &obj, Constraint< Real > &con, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Initialize step with equality constraint.
ROL::Ptr< Algorithm< Real > > algo_
ROL::Ptr< StepState< Real > > getState(void)
Definition: ROL_Step.hpp:74
void update(Vector< Real > &x, Vector< Real > &l, const Vector< Real > &s, Objective< Real > &obj, Constraint< Real > &con, AlgorithmState< Real > &algo_state)
Update step, if successful (equality constraints).
Has both inequality and equality constraints. Treat inequality constraint as equality with slack vari...
void initialize(Vector< Real > &x, const Vector< Real > &g, Objective< Real > &obj, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Initialize step with no equality constraint.
ROL::Ptr< Vector< Real > > iterateVec
Definition: ROL_Types.hpp:157
ROL::Ptr< Vector< Real > > x_
InteriorPointStep(ROL::ParameterList &parlist)
void compute(Vector< Real > &s, const Vector< Real > &x, const Vector< Real > &l, Objective< Real > &obj, Constraint< Real > &con, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Compute step (equality constraints).
std::string print(AlgorithmState< Real > &algo_state, bool pHeader=false) const
Print iterate status.
Provides the interface to apply upper and lower bound constraints.
void update(Vector< Real > &x, const Vector< Real > &s, Objective< Real > &obj, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Update step, if successful.
std::string printName(void) const
Print step name.
std::string printHeader(void) const
Print iterate header.
ROL::Ptr< StatusTest< Real > > status_
void compute(Vector< Real > &s, const Vector< Real > &x, const Vector< Real > &l, Objective< Real > &obj, Constraint< Real > &con, AlgorithmState< Real > &algo_state)
Compute step (equality constraints).
ROL::Ptr< Vector< Real > > l_
virtual void set(const Vector &x)
Set where .
Definition: ROL_Vector.hpp:209
virtual Real norm() const =0
Returns where .
virtual void update(const Vector< Real > &x, bool flag=true, int iter=-1)
Update objective function.
ROL::Ptr< BoundConstraint< Real > > bnd_
EStep
Enumeration of step types.
Definition: ROL_Types.hpp:274
Defines the general constraint operator interface.
virtual void project(Vector< Real > &x)
Project optimization variables onto the bounds.