ROL
ROL_AugmentedLagrangianStep.hpp
Go to the documentation of this file.
1 // @HEADER
2 // *****************************************************************************
3 // Rapid Optimization Library (ROL) Package
4 //
5 // Copyright 2014 NTESS and the ROL contributors.
6 // SPDX-License-Identifier: BSD-3-Clause
7 // *****************************************************************************
8 // @HEADER
9 
10 #ifndef ROL_AUGMENTEDLAGRANGIANSTEP_H
11 #define ROL_AUGMENTEDLAGRANGIANSTEP_H
12 
14 #include "ROL_Types.hpp"
15 #include "ROL_Algorithm.hpp"
16 #include "ROL_ParameterList.hpp"
17 
18 // Step (bound constrained or unconstrained) includes
19 #include "ROL_LineSearchStep.hpp"
20 #include "ROL_TrustRegionStep.hpp"
23 #include "ROL_BundleStep.hpp"
25 
26 // StatusTest includes
27 #include "ROL_StatusTest.hpp"
28 #include "ROL_BundleStatusTest.hpp"
29 
117 namespace ROL {
118 
119 template<class Real>
121 
122 template<class Real>
124 
125 template <class Real>
126 class AugmentedLagrangianStep : public Step<Real> {
127 private:
128  ROL::Ptr<StatusTest<Real>> status_;
129  ROL::Ptr<Step<Real>> step_;
130  ROL::Ptr<Algorithm<Real>> algo_;
131  ROL::Ptr<Vector<Real>> x_;
132  ROL::Ptr<BoundConstraint<Real>> bnd_;
133 
134  ROL::ParameterList parlist_;
135  // Lagrange multiplier update
142  // Optimality tolerance update
147  // Feasibility tolerance update
152  // Subproblem information
153  bool print_;
154  int maxit_;
156  std::string subStep_;
160  // Scaling information
162  Real fscale_;
163  Real cscale_;
164  // Verbosity flag
166 
168  const Real mu, Objective<Real> &obj,
169  BoundConstraint<Real> &bnd) {
171  = dynamic_cast<AugmentedLagrangian<Real>&>(obj);
172  Real gnorm = 0., tol = std::sqrt(ROL_EPSILON<Real>());
173  augLag.gradient(g,x,tol);
174  if ( scaleLagrangian_ ) {
175  g.scale(mu);
176  }
177  // Compute norm of projected gradient
178  if (bnd.isActivated()) {
179  x_->set(x);
180  x_->axpy(static_cast<Real>(-1),g.dual());
181  bnd.project(*x_);
182  x_->axpy(static_cast<Real>(-1),x);
183  gnorm = x_->norm();
184  }
185  else {
186  gnorm = g.norm();
187  }
188  return gnorm;
189  }
190 
191 public:
192 
194  using Step<Real>::compute;
195  using Step<Real>::update;
196 
198 
199  AugmentedLagrangianStep(ROL::ParameterList &parlist)
200  : Step<Real>(), algo_(ROL::nullPtr),
201  x_(ROL::nullPtr), parlist_(parlist), subproblemIter_(0) {
202  Real one(1), p1(0.1), p9(0.9), ten(1.e1), oe8(1.e8), oem8(1.e-8);
203  ROL::ParameterList& sublist = parlist.sublist("Step").sublist("Augmented Lagrangian");
204  useDefaultInitPen_ = sublist.get("Use Default Initial Penalty Parameter",true);
205  Step<Real>::getState()->searchSize = sublist.get("Initial Penalty Parameter",ten);
206  // Multiplier update parameters
207  scaleLagrangian_ = sublist.get("Use Scaled Augmented Lagrangian", false);
208  minPenaltyLowerBound_ = sublist.get("Penalty Parameter Reciprocal Lower Bound", p1);
210  penaltyUpdate_ = sublist.get("Penalty Parameter Growth Factor", ten);
211  maxPenaltyParam_ = sublist.get("Maximum Penalty Parameter", oe8);
212  // Optimality tolerance update
213  optIncreaseExponent_ = sublist.get("Optimality Tolerance Update Exponent", one);
214  optDecreaseExponent_ = sublist.get("Optimality Tolerance Decrease Exponent", one);
215  optToleranceInitial_ = sublist.get("Initial Optimality Tolerance", one);
216  // Feasibility tolerance update
217  feasIncreaseExponent_ = sublist.get("Feasibility Tolerance Update Exponent", p1);
218  feasDecreaseExponent_ = sublist.get("Feasibility Tolerance Decrease Exponent", p9);
219  feasToleranceInitial_ = sublist.get("Initial Feasibility Tolerance", one);
220  // Subproblem information
221  print_ = sublist.get("Print Intermediate Optimization History", false);
222  maxit_ = sublist.get("Subproblem Iteration Limit", 1000);
223  subStep_ = sublist.get("Subproblem Step Type", "Trust Region");
224  parlist_.sublist("Step").set("Type",subStep_);
225  parlist_.sublist("Status Test").set("Iteration Limit",maxit_);
226  // Verbosity setting
227  verbosity_ = parlist.sublist("General").get("Print Verbosity", 0);
228  print_ = (verbosity_ > 0 ? true : print_);
229  // Outer iteration tolerances
230  outerFeasTolerance_ = parlist.sublist("Status Test").get("Constraint Tolerance", oem8);
231  outerOptTolerance_ = parlist.sublist("Status Test").get("Gradient Tolerance", oem8);
232  outerStepTolerance_ = parlist.sublist("Status Test").get("Step Tolerance", oem8);
233  // Scaling
234  useDefaultScaling_ = sublist.get("Use Default Problem Scaling", true);
235  fscale_ = sublist.get("Objective Scaling", 1.0);
236  cscale_ = sublist.get("Constraint Scaling", 1.0);
237  }
238 
243  AlgorithmState<Real> &algo_state ) {
244  bnd_ = ROL::makePtr<BoundConstraint<Real>>();
245  bnd_->deactivate();
246  initialize(x,g,l,c,obj,con,*bnd_,algo_state);
247  }
248 
253  AlgorithmState<Real> &algo_state ) {
254  Real one(1), TOL(1.e-2);
256  = dynamic_cast<AugmentedLagrangian<Real>&>(obj);
257  // Initialize step state
258  ROL::Ptr<StepState<Real> > state = Step<Real>::getState();
259  state->descentVec = x.clone();
260  state->gradientVec = g.clone();
261  state->constraintVec = c.clone();
262  // Initialize additional storage
263  x_ = x.clone();
264  // Initialize the algorithm state
265  algo_state.nfval = 0;
266  algo_state.ncval = 0;
267  algo_state.ngrad = 0;
268  // Project x onto the feasible set
269  if ( bnd.isActivated() ) {
270  bnd.project(x);
271  }
272  // Update objective and constraint.
273  augLag.update(x,true,algo_state.iter);
274  if (useDefaultScaling_) {
275  fscale_ = one/std::max(one,augLag.getObjectiveGradient(x)->norm());
276  try {
277  Real tol = std::sqrt(ROL_EPSILON<Real>());
278  Ptr<Vector<Real>> ji = x.clone();
279  Real maxji(0), normji(0);
280  for (int i = 0; i < c.dimension(); ++i) {
281  con.applyAdjointJacobian(*ji,*c.basis(i),x,tol);
282  normji = ji->norm();
283  maxji = std::max(normji,maxji);
284  }
285  cscale_ = one/std::max(one,maxji);
286  }
287  catch (std::exception &e) {
288  cscale_ = one;
289  }
290  }
291  augLag.setScaling(fscale_,cscale_);
292  algo_state.value = augLag.getObjectiveValue(x);
293  algo_state.gnorm = computeGradient(*(state->gradientVec),x,state->searchSize,obj,bnd);
294  augLag.getConstraintVec(*(state->constraintVec),x);
295  algo_state.cnorm = (state->constraintVec)->norm();
296  if (useDefaultInitPen_) {
297  Step<Real>::getState()->searchSize
298  = std::max(static_cast<Real>(1e-8),std::min(static_cast<Real>(10)*
299  std::max(one,std::abs(fscale_*algo_state.value))
300  /std::max(one,std::pow(cscale_*algo_state.cnorm,2)),
301  static_cast<Real>(1e-2)*maxPenaltyParam_));
302  }
303  // Update evaluation counters
304  algo_state.ncval += augLag.getNumberConstraintEvaluations();
305  algo_state.nfval += augLag.getNumberFunctionEvaluations();
306  algo_state.ngrad += augLag.getNumberGradientEvaluations();
307  // Initialize intermediate stopping tolerances
308  minPenaltyReciprocal_ = std::min(one/state->searchSize,minPenaltyLowerBound_);
309  optTolerance_ = std::max<Real>(TOL*outerOptTolerance_,
311  optTolerance_ = std::min<Real>(optTolerance_,TOL*algo_state.gnorm);
312  feasTolerance_ = std::max<Real>(TOL*outerFeasTolerance_,
314  if (verbosity_ > 0) {
315  std::cout << std::endl;
316  std::cout << "Augmented Lagrangian Initialize" << std::endl;
317  std::cout << "Objective Scaling: " << fscale_ << std::endl;
318  std::cout << "Constraint Scaling: " << cscale_ << std::endl;
319  std::cout << std::endl;
320  }
321  }
322 
325  void compute( Vector<Real> &s, const Vector<Real> &x, const Vector<Real> &l,
326  Objective<Real> &obj, Constraint<Real> &con,
327  AlgorithmState<Real> &algo_state ) {
328  compute(s,x,l,obj,con,*bnd_,algo_state);
329  }
330 
333  void compute( Vector<Real> &s, const Vector<Real> &x, const Vector<Real> &l,
334  Objective<Real> &obj, Constraint<Real> &con,
335  BoundConstraint<Real> &bnd, AlgorithmState<Real> &algo_state ) {
336  Real one(1);
337  //AugmentedLagrangian<Real> &augLag
338  // = dynamic_cast<AugmentedLagrangian<Real>&>(obj);
339  parlist_.sublist("Status Test").set("Gradient Tolerance",optTolerance_);
340  parlist_.sublist("Status Test").set("Step Tolerance",1.e-6*optTolerance_);
341  Ptr<Objective<Real>> penObj;
342  if (subStep_ == "Bundle") {
343  step_ = makePtr<BundleStep<Real>>(parlist_);
344  status_ = makePtr<BundleStatusTest<Real>>(parlist_);
345  penObj = makePtrFromRef(obj);
346  }
347  else if (subStep_ == "Line Search") {
348  step_ = makePtr<LineSearchStep<Real>>(parlist_);
349  status_ = makePtr<StatusTest<Real>>(parlist_);
350  penObj = makePtrFromRef(obj);
351  }
352  else if (subStep_ == "Moreau-Yosida Penalty") {
353  step_ = makePtr<MoreauYosidaPenaltyStep<Real>>(parlist_);
354  status_ = makePtr<StatusTest<Real>>(parlist_);
355  Ptr<Objective<Real>> raw_obj = makePtrFromRef(obj);
356  penObj = ROL::makePtr<MoreauYosidaPenalty<Real>>(raw_obj,bnd_,x,parlist_);
357  }
358  else if (subStep_ == "Primal Dual Active Set") {
359  step_ = makePtr<PrimalDualActiveSetStep<Real>>(parlist_);
360  status_ = makePtr<StatusTest<Real>>(parlist_);
361  penObj = makePtrFromRef(obj);
362  }
363  else if (subStep_ == "Trust Region") {
364  step_ = makePtr<TrustRegionStep<Real>>(parlist_);
365  status_ = makePtr<StatusTest<Real>>(parlist_);
366  penObj = makePtrFromRef(obj);
367  }
368  else if (subStep_ == "Interior Point") {
369  step_ = makePtr<InteriorPointStep<Real>>(parlist_);
370  status_ = makePtr<StatusTest<Real>>(parlist_);
371  Ptr<Objective<Real>> raw_obj = makePtrFromRef(obj);
372  penObj = ROL::makePtr<InteriorPoint::PenalizedObjective<Real>>(raw_obj,bnd_,x,parlist_);
373  }
374  else {
375  throw Exception::NotImplemented(">>> ROL::AugmentedLagrangianStep: Incompatible substep type!");
376  }
377  algo_ = makePtr<Algorithm<Real>>(step_,status_,false);
378  //algo_ = ROL::makePtr<Algorithm<Real>>(subStep_,parlist_,false);
379  x_->set(x);
380  if ( bnd.isActivated() ) {
381  //algo_->run(*x_,augLag,bnd,print_);
382  algo_->run(*x_,*penObj,bnd,print_);
383  }
384  else {
385  //algo_->run(*x_,augLag,print_);
386  algo_->run(*x_,*penObj,print_);
387  }
388  s.set(*x_); s.axpy(-one,x);
389  subproblemIter_ = (algo_->getState())->iter;
390  }
391 
396  AlgorithmState<Real> &algo_state ) {
397  update(x,l,s,obj,con,*bnd_,algo_state);
398  }
399 
405  AlgorithmState<Real> &algo_state ) {
406  Real one(1), oem2(1.e-2);
408  = dynamic_cast<AugmentedLagrangian<Real>&>(obj);
409  ROL::Ptr<StepState<Real> > state = Step<Real>::getState();
410  state->SPiter = subproblemIter_;
411  // Update the step and store in state
412  x.plus(s);
413  algo_state.iterateVec->set(x);
414  state->descentVec->set(s);
415  algo_state.snorm = s.norm();
416  algo_state.iter++;
417  // Update objective function value
418  obj.update(x);
419  algo_state.value = augLag.getObjectiveValue(x);
420  // Update constraint value
421  augLag.getConstraintVec(*(state->constraintVec),x);
422  algo_state.cnorm = (state->constraintVec)->norm();
423  // Compute gradient of the augmented Lagrangian
424  algo_state.gnorm = computeGradient(*(state->gradientVec),x,state->searchSize,obj,bnd);
425  algo_state.gnorm /= std::min(fscale_,cscale_);
426  // Update evaluation counters
427  algo_state.nfval += augLag.getNumberFunctionEvaluations();
428  algo_state.ngrad += augLag.getNumberGradientEvaluations();
429  algo_state.ncval += augLag.getNumberConstraintEvaluations();
430  // Update objective function and constraints
431  augLag.update(x,true,algo_state.iter);
432  // Update multipliers
433  minPenaltyReciprocal_ = std::min(one/state->searchSize,minPenaltyLowerBound_);
434  if ( cscale_*algo_state.cnorm < feasTolerance_ ) {
435  l.axpy(state->searchSize*cscale_,(state->constraintVec)->dual());
436  if ( algo_->getState()->statusFlag == EXITSTATUS_CONVERGED ) {
437  optTolerance_ = std::max(oem2*outerOptTolerance_,
439  }
440  feasTolerance_ = std::max(oem2*outerFeasTolerance_,
442  // Update Algorithm State
443  algo_state.snorm += state->searchSize*cscale_*algo_state.cnorm;
444  algo_state.lagmultVec->set(l);
445  }
446  else {
447  state->searchSize = std::min(penaltyUpdate_*state->searchSize,maxPenaltyParam_);
448  optTolerance_ = std::max(oem2*outerOptTolerance_,
450  feasTolerance_ = std::max(oem2*outerFeasTolerance_,
452  }
453  augLag.reset(l,state->searchSize);
454  }
455 
458  std::string printHeader( void ) const {
459  std::stringstream hist;
460 
461  if(verbosity_>0) {
462  hist << std::string(114,'-') << std::endl;
463  hist << "Augmented Lagrangian status output definitions" << std::endl << std::endl;
464  hist << " iter - Number of iterates (steps taken)" << std::endl;
465  hist << " fval - Objective function value" << std::endl;
466  hist << " cnorm - Norm of the constraint violation" << std::endl;
467  hist << " gLnorm - Norm of the gradient of the Lagrangian" << std::endl;
468  hist << " snorm - Norm of the step" << std::endl;
469  hist << " penalty - Penalty parameter" << std::endl;
470  hist << " feasTol - Feasibility tolerance" << std::endl;
471  hist << " optTol - Optimality tolerance" << std::endl;
472  hist << " #fval - Number of times the objective was computed" << std::endl;
473  hist << " #grad - Number of times the gradient was computed" << std::endl;
474  hist << " #cval - Number of times the constraint was computed" << std::endl;
475  hist << " subIter - Number of iterations to solve subproblem" << std::endl;
476  hist << std::string(114,'-') << std::endl;
477  }
478  hist << " ";
479  hist << std::setw(6) << std::left << "iter";
480  hist << std::setw(15) << std::left << "fval";
481  hist << std::setw(15) << std::left << "cnorm";
482  hist << std::setw(15) << std::left << "gLnorm";
483  hist << std::setw(15) << std::left << "snorm";
484  hist << std::setw(10) << std::left << "penalty";
485  hist << std::setw(10) << std::left << "feasTol";
486  hist << std::setw(10) << std::left << "optTol";
487  hist << std::setw(8) << std::left << "#fval";
488  hist << std::setw(8) << std::left << "#grad";
489  hist << std::setw(8) << std::left << "#cval";
490  hist << std::setw(8) << std::left << "subIter";
491  hist << std::endl;
492  return hist.str();
493  }
494 
497  std::string printName( void ) const {
498  std::stringstream hist;
499  hist << std::endl << " Augmented Lagrangian Solver";
500  hist << std::endl;
501  hist << "Subproblem Solver: " << subStep_ << std::endl;
502  return hist.str();
503  }
504 
507  std::string print( AlgorithmState<Real> &algo_state, bool pHeader = false ) const {
508  std::stringstream hist;
509  hist << std::scientific << std::setprecision(6);
510  if ( algo_state.iter == 0 ) {
511  hist << printName();
512  }
513  if ( pHeader ) {
514  hist << printHeader();
515  }
516  if ( algo_state.iter == 0 ) {
517  hist << " ";
518  hist << std::setw(6) << std::left << algo_state.iter;
519  hist << std::setw(15) << std::left << algo_state.value;
520  hist << std::setw(15) << std::left << algo_state.cnorm;
521  hist << std::setw(15) << std::left << algo_state.gnorm;
522  hist << std::setw(15) << std::left << " ";
523  hist << std::scientific << std::setprecision(2);
524  hist << std::setw(10) << std::left << Step<Real>::getStepState()->searchSize;
525  hist << std::setw(10) << std::left << std::max(feasTolerance_,outerFeasTolerance_);
526  hist << std::setw(10) << std::left << std::max(optTolerance_,outerOptTolerance_);
527  hist << std::endl;
528  }
529  else {
530  hist << " ";
531  hist << std::setw(6) << std::left << algo_state.iter;
532  hist << std::setw(15) << std::left << algo_state.value;
533  hist << std::setw(15) << std::left << algo_state.cnorm;
534  hist << std::setw(15) << std::left << algo_state.gnorm;
535  hist << std::setw(15) << std::left << algo_state.snorm;
536  hist << std::scientific << std::setprecision(2);
537  hist << std::setw(10) << std::left << Step<Real>::getStepState()->searchSize;
538  hist << std::setw(10) << std::left << feasTolerance_;
539  hist << std::setw(10) << std::left << optTolerance_;
540  hist << std::scientific << std::setprecision(6);
541  hist << std::setw(8) << std::left << algo_state.nfval;
542  hist << std::setw(8) << std::left << algo_state.ngrad;
543  hist << std::setw(8) << std::left << algo_state.ncval;
544  hist << std::setw(8) << std::left << subproblemIter_;
545  hist << std::endl;
546  }
547  return hist.str();
548  }
549 
555  AlgorithmState<Real> &algo_state ) {}
556 
562  AlgorithmState<Real> &algo_state ) {}
563 
564 }; // class AugmentedLagrangianStep
565 
566 } // namespace ROL
567 
568 #endif
Provides the interface to evaluate objective functions.
Provides the interface to evaluate the augmented Lagrangian.
void initialize(Vector< Real > &x, const Vector< Real > &g, Vector< Real > &l, const Vector< Real > &c, Objective< Real > &obj, Constraint< Real > &con, AlgorithmState< Real > &algo_state)
Initialize step with equality constraint.
virtual const Vector & dual() const
Return dual representation of , for example, the result of applying a Riesz map, or change of basis...
Definition: ROL_Vector.hpp:192
virtual void scale(const Real alpha)=0
Compute where .
virtual ROL::Ptr< Vector > clone() const =0
Clone to make a new (uninitialized) vector.
virtual int dimension() const
Return dimension of the vector space.
Definition: ROL_Vector.hpp:162
virtual ROL::Ptr< Vector > basis(const int i) const
Return i-th basis vector.
Definition: ROL_Vector.hpp:148
std::string printHeader(void) const
Print iterate header.
virtual void plus(const Vector &x)=0
Compute , where .
virtual void axpy(const Real alpha, const Vector &x)
Compute where .
Definition: ROL_Vector.hpp:119
bool isActivated(void) const
Check if bounds are on.
virtual int getNumberConstraintEvaluations(void) const
Provides the interface to compute optimization steps.
Definition: ROL_Step.hpp:34
AugmentedLagrangianStep(ROL::ParameterList &parlist)
Contains definitions of custom data types in ROL.
virtual Real getObjectiveValue(const Vector< Real > &x)
Implements the computation of optimization steps using Moreau-Yosida regularized bound constraints...
Defines the linear algebra or vector space interface.
Definition: ROL_Vector.hpp:46
Provides the interface to compute augmented Lagrangian steps.
virtual void update(const Vector< Real > &x, UpdateType type, int iter=-1)
Update objective function.
std::string printName(void) const
Print step name.
Real computeGradient(Vector< Real > &g, const Vector< Real > &x, const Real mu, Objective< Real > &obj, BoundConstraint< Real > &bnd)
ROL::Ptr< BoundConstraint< Real > > bnd_
virtual void reset(const Vector< Real > &multiplier, const Real penaltyParameter)
State for algorithm class. Will be used for restarts.
Definition: ROL_Types.hpp:109
ROL::Ptr< Algorithm< Real > > algo_
ROL::Ptr< StepState< Real > > getState(void)
Definition: ROL_Step.hpp:39
virtual void gradient(Vector< Real > &g, const Vector< Real > &x, Real &tol)
Compute gradient.
ROL::Ptr< Vector< Real > > iterateVec
Definition: ROL_Types.hpp:123
virtual void update(const Vector< Real > &x, bool flag=true, int iter=-1)
Update objective function.
virtual void project(Vector< Real > &x)
Project optimization variables onto the bounds.
ROL::Ptr< StatusTest< Real > > status_
void compute(Vector< Real > &s, const Vector< Real > &x, const Vector< Real > &l, Objective< Real > &obj, Constraint< Real > &con, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Compute step (equality and bound constraints).
Provides the interface to apply upper and lower bound constraints.
void initialize(Vector< Real > &x, const Vector< Real > &g, Vector< Real > &l, const Vector< Real > &c, Objective< Real > &obj, Constraint< Real > &con, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Initialize step with equality and bound constraints.
virtual void applyAdjointJacobian(Vector< Real > &ajv, const Vector< Real > &v, const Vector< Real > &x, Real &tol)
Apply the adjoint of the the constraint Jacobian at , , to vector .
void update(Vector< Real > &x, Vector< Real > &l, const Vector< Real > &s, Objective< Real > &obj, Constraint< Real > &con, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Update step, if successful (equality and bound constraints).
virtual int getNumberGradientEvaluations(void) const
std::string print(AlgorithmState< Real > &algo_state, bool pHeader=false) const
Print iterate status.
ROL::Ptr< Vector< Real > > lagmultVec
Definition: ROL_Types.hpp:124
virtual int getNumberFunctionEvaluations(void) const
void setScaling(const Real fscale, const Real cscale=1.0)
const Ptr< const Vector< Real > > getObjectiveGradient(const Vector< Real > &x)
virtual void set(const Vector &x)
Set where .
Definition: ROL_Vector.hpp:175
virtual Real norm() const =0
Returns where .
virtual void getConstraintVec(Vector< Real > &c, const Vector< Real > &x)
void update(Vector< Real > &x, Vector< Real > &l, const Vector< Real > &s, Objective< Real > &obj, Constraint< Real > &con, AlgorithmState< Real > &algo_state)
Update step, if successful (equality constraint).
void update(Vector< Real > &x, const Vector< Real > &s, Objective< Real > &obj, BoundConstraint< Real > &con, AlgorithmState< Real > &algo_state)
Update step, for bound constraints; here only to satisfy the interface requirements, does nothing, needs refactoring.
Defines the general constraint operator interface.
void compute(Vector< Real > &s, const Vector< Real > &x, const Vector< Real > &l, Objective< Real > &obj, Constraint< Real > &con, AlgorithmState< Real > &algo_state)
Compute step (equality constraint).
void compute(Vector< Real > &s, const Vector< Real > &x, Objective< Real > &obj, BoundConstraint< Real > &con, AlgorithmState< Real > &algo_state)
Compute step for bound constraints; here only to satisfy the interface requirements, does nothing, needs refactoring.