ROL
ROL_AugmentedLagrangianStep.hpp
Go to the documentation of this file.
1 // @HEADER
2 // ************************************************************************
3 //
4 // Rapid Optimization Library (ROL) Package
5 // Copyright (2014) Sandia Corporation
6 //
7 // Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive
8 // license for use of this work by or on behalf of the U.S. Government.
9 //
10 // Redistribution and use in source and binary forms, with or without
11 // modification, are permitted provided that the following conditions are
12 // met:
13 //
14 // 1. Redistributions of source code must retain the above copyright
15 // notice, this list of conditions and the following disclaimer.
16 //
17 // 2. Redistributions in binary form must reproduce the above copyright
18 // notice, this list of conditions and the following disclaimer in the
19 // documentation and/or other materials provided with the distribution.
20 //
21 // 3. Neither the name of the Corporation nor the names of the
22 // contributors may be used to endorse or promote products derived from
23 // this software without specific prior written permission.
24 //
25 // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
26 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
29 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
30 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
31 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
32 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 //
37 // Questions? Contact lead developers:
38 // Drew Kouri (dpkouri@sandia.gov) and
39 // Denis Ridzal (dridzal@sandia.gov)
40 //
41 // ************************************************************************
42 // @HEADER
43 
44 #ifndef ROL_AUGMENTEDLAGRANGIANSTEP_H
45 #define ROL_AUGMENTEDLAGRANGIANSTEP_H
46 
48 #include "ROL_Types.hpp"
49 #include "ROL_Algorithm.hpp"
50 #include "ROL_ParameterList.hpp"
51 
52 // Step (bound constrained or unconstrained) includes
53 #include "ROL_LineSearchStep.hpp"
54 #include "ROL_TrustRegionStep.hpp"
57 #include "ROL_BundleStep.hpp"
59 
60 // StatusTest includes
61 #include "ROL_StatusTest.hpp"
62 #include "ROL_BundleStatusTest.hpp"
63 
151 namespace ROL {
152 
153 template<class Real>
155 
156 template<class Real>
158 
159 template <class Real>
160 class AugmentedLagrangianStep : public Step<Real> {
161 private:
162  ROL::Ptr<StatusTest<Real>> status_;
163  ROL::Ptr<Step<Real>> step_;
164  ROL::Ptr<Algorithm<Real>> algo_;
165  ROL::Ptr<Vector<Real>> x_;
166  ROL::Ptr<BoundConstraint<Real>> bnd_;
167 
168  ROL::ParameterList parlist_;
169  // Lagrange multiplier update
176  // Optimality tolerance update
181  // Feasibility tolerance update
186  // Subproblem information
187  bool print_;
188  int maxit_;
190  std::string subStep_;
194  // Scaling information
196  Real fscale_;
197  Real cscale_;
198  // Verbosity flag
200 
202  const Real mu, Objective<Real> &obj,
203  BoundConstraint<Real> &bnd) {
205  = dynamic_cast<AugmentedLagrangian<Real>&>(obj);
206  Real gnorm = 0., tol = std::sqrt(ROL_EPSILON<Real>());
207  augLag.gradient(g,x,tol);
208  if ( scaleLagrangian_ ) {
209  g.scale(mu);
210  }
211  // Compute norm of projected gradient
212  if (bnd.isActivated()) {
213  x_->set(x);
214  x_->axpy(static_cast<Real>(-1),g.dual());
215  bnd.project(*x_);
216  x_->axpy(static_cast<Real>(-1),x);
217  gnorm = x_->norm();
218  }
219  else {
220  gnorm = g.norm();
221  }
222  return gnorm;
223  }
224 
225 public:
226 
228  using Step<Real>::compute;
229  using Step<Real>::update;
230 
232 
233  AugmentedLagrangianStep(ROL::ParameterList &parlist)
234  : Step<Real>(), algo_(ROL::nullPtr),
235  x_(ROL::nullPtr), parlist_(parlist), subproblemIter_(0) {
236  Real one(1), p1(0.1), p9(0.9), ten(1.e1), oe8(1.e8), oem8(1.e-8);
237  ROL::ParameterList& sublist = parlist.sublist("Step").sublist("Augmented Lagrangian");
238  useDefaultInitPen_ = sublist.get("Use Default Initial Penalty Parameter",true);
239  Step<Real>::getState()->searchSize = sublist.get("Initial Penalty Parameter",ten);
240  // Multiplier update parameters
241  scaleLagrangian_ = sublist.get("Use Scaled Augmented Lagrangian", false);
242  minPenaltyLowerBound_ = sublist.get("Penalty Parameter Reciprocal Lower Bound", p1);
244  penaltyUpdate_ = sublist.get("Penalty Parameter Growth Factor", ten);
245  maxPenaltyParam_ = sublist.get("Maximum Penalty Parameter", oe8);
246  // Optimality tolerance update
247  optIncreaseExponent_ = sublist.get("Optimality Tolerance Update Exponent", one);
248  optDecreaseExponent_ = sublist.get("Optimality Tolerance Decrease Exponent", one);
249  optToleranceInitial_ = sublist.get("Initial Optimality Tolerance", one);
250  // Feasibility tolerance update
251  feasIncreaseExponent_ = sublist.get("Feasibility Tolerance Update Exponent", p1);
252  feasDecreaseExponent_ = sublist.get("Feasibility Tolerance Decrease Exponent", p9);
253  feasToleranceInitial_ = sublist.get("Initial Feasibility Tolerance", one);
254  // Subproblem information
255  print_ = sublist.get("Print Intermediate Optimization History", false);
256  maxit_ = sublist.get("Subproblem Iteration Limit", 1000);
257  subStep_ = sublist.get("Subproblem Step Type", "Trust Region");
258  parlist_.sublist("Step").set("Type",subStep_);
259  parlist_.sublist("Status Test").set("Iteration Limit",maxit_);
260  // Verbosity setting
261  verbosity_ = parlist.sublist("General").get("Print Verbosity", 0);
262  print_ = (verbosity_ > 0 ? true : print_);
263  // Outer iteration tolerances
264  outerFeasTolerance_ = parlist.sublist("Status Test").get("Constraint Tolerance", oem8);
265  outerOptTolerance_ = parlist.sublist("Status Test").get("Gradient Tolerance", oem8);
266  outerStepTolerance_ = parlist.sublist("Status Test").get("Step Tolerance", oem8);
267  // Scaling
268  useDefaultScaling_ = sublist.get("Use Default Problem Scaling", true);
269  fscale_ = sublist.get("Objective Scaling", 1.0);
270  cscale_ = sublist.get("Constraint Scaling", 1.0);
271  }
272 
277  AlgorithmState<Real> &algo_state ) {
278  bnd_ = ROL::makePtr<BoundConstraint<Real>>();
279  bnd_->deactivate();
280  initialize(x,g,l,c,obj,con,*bnd_,algo_state);
281  }
282 
287  AlgorithmState<Real> &algo_state ) {
288  Real one(1), TOL(1.e-2);
290  = dynamic_cast<AugmentedLagrangian<Real>&>(obj);
291  // Initialize step state
292  ROL::Ptr<StepState<Real> > state = Step<Real>::getState();
293  state->descentVec = x.clone();
294  state->gradientVec = g.clone();
295  state->constraintVec = c.clone();
296  // Initialize additional storage
297  x_ = x.clone();
298  // Initialize the algorithm state
299  algo_state.nfval = 0;
300  algo_state.ncval = 0;
301  algo_state.ngrad = 0;
302  // Project x onto the feasible set
303  if ( bnd.isActivated() ) {
304  bnd.project(x);
305  bnd.update(x,true,algo_state.iter);
306  }
307  // Update objective and constraint.
308  augLag.update(x,true,algo_state.iter);
309  if (useDefaultScaling_) {
310  fscale_ = one/std::max(one,augLag.getObjectiveGradient(x)->norm());
311  try {
312  Real tol = std::sqrt(ROL_EPSILON<Real>());
313  Ptr<Vector<Real>> ji = x.clone();
314  Real maxji(0), normji(0);
315  for (int i = 0; i < c.dimension(); ++i) {
316  con.applyAdjointJacobian(*ji,*c.basis(i),x,tol);
317  normji = ji->norm();
318  maxji = std::max(normji,maxji);
319  }
320  cscale_ = one/std::max(one,maxji);
321  }
322  catch (std::exception &e) {
323  cscale_ = one;
324  }
325  }
326  augLag.setScaling(fscale_,cscale_);
327  algo_state.value = augLag.getObjectiveValue(x);
328  algo_state.gnorm = computeGradient(*(state->gradientVec),x,state->searchSize,obj,bnd);
329  augLag.getConstraintVec(*(state->constraintVec),x);
330  algo_state.cnorm = (state->constraintVec)->norm();
331  if (useDefaultInitPen_) {
332  Step<Real>::getState()->searchSize
333  = std::max(static_cast<Real>(1e-8),std::min(static_cast<Real>(10)*
334  std::max(one,std::abs(fscale_*algo_state.value))
335  /std::max(one,std::pow(cscale_*algo_state.cnorm,2)),
336  static_cast<Real>(1e-2)*maxPenaltyParam_));
337  }
338  // Update evaluation counters
339  algo_state.ncval += augLag.getNumberConstraintEvaluations();
340  algo_state.nfval += augLag.getNumberFunctionEvaluations();
341  algo_state.ngrad += augLag.getNumberGradientEvaluations();
342  // Initialize intermediate stopping tolerances
343  minPenaltyReciprocal_ = std::min(one/state->searchSize,minPenaltyLowerBound_);
344  optTolerance_ = std::max<Real>(TOL*outerOptTolerance_,
346  optTolerance_ = std::min<Real>(optTolerance_,TOL*algo_state.gnorm);
347  feasTolerance_ = std::max<Real>(TOL*outerFeasTolerance_,
349  if (verbosity_ > 0) {
350  std::cout << std::endl;
351  std::cout << "Augmented Lagrangian Initialize" << std::endl;
352  std::cout << "Objective Scaling: " << fscale_ << std::endl;
353  std::cout << "Constraint Scaling: " << cscale_ << std::endl;
354  std::cout << std::endl;
355  }
356  }
357 
360  void compute( Vector<Real> &s, const Vector<Real> &x, const Vector<Real> &l,
361  Objective<Real> &obj, Constraint<Real> &con,
362  AlgorithmState<Real> &algo_state ) {
363  compute(s,x,l,obj,con,*bnd_,algo_state);
364  }
365 
368  void compute( Vector<Real> &s, const Vector<Real> &x, const Vector<Real> &l,
369  Objective<Real> &obj, Constraint<Real> &con,
370  BoundConstraint<Real> &bnd, AlgorithmState<Real> &algo_state ) {
371  Real one(1);
372  //AugmentedLagrangian<Real> &augLag
373  // = dynamic_cast<AugmentedLagrangian<Real>&>(obj);
374  parlist_.sublist("Status Test").set("Gradient Tolerance",optTolerance_);
375  parlist_.sublist("Status Test").set("Step Tolerance",1.e-6*optTolerance_);
376  Ptr<Objective<Real>> penObj;
377  if (subStep_ == "Bundle") {
378  step_ = makePtr<BundleStep<Real>>(parlist_);
379  status_ = makePtr<BundleStatusTest<Real>>(parlist_);
380  penObj = makePtrFromRef(obj);
381  }
382  else if (subStep_ == "Line Search") {
383  step_ = makePtr<LineSearchStep<Real>>(parlist_);
384  status_ = makePtr<StatusTest<Real>>(parlist_);
385  penObj = makePtrFromRef(obj);
386  }
387  else if (subStep_ == "Moreau-Yosida Penalty") {
388  step_ = makePtr<MoreauYosidaPenaltyStep<Real>>(parlist_);
389  status_ = makePtr<StatusTest<Real>>(parlist_);
390  Ptr<Objective<Real>> raw_obj = makePtrFromRef(obj);
391  penObj = ROL::makePtr<MoreauYosidaPenalty<Real>>(raw_obj,bnd_,x,parlist_);
392  }
393  else if (subStep_ == "Primal Dual Active Set") {
394  step_ = makePtr<PrimalDualActiveSetStep<Real>>(parlist_);
395  status_ = makePtr<StatusTest<Real>>(parlist_);
396  penObj = makePtrFromRef(obj);
397  }
398  else if (subStep_ == "Trust Region") {
399  step_ = makePtr<TrustRegionStep<Real>>(parlist_);
400  status_ = makePtr<StatusTest<Real>>(parlist_);
401  penObj = makePtrFromRef(obj);
402  }
403  else if (subStep_ == "Interior Point") {
404  step_ = makePtr<InteriorPointStep<Real>>(parlist_);
405  status_ = makePtr<StatusTest<Real>>(parlist_);
406  Ptr<Objective<Real>> raw_obj = makePtrFromRef(obj);
407  penObj = ROL::makePtr<InteriorPoint::PenalizedObjective<Real>>(raw_obj,bnd_,x,parlist_);
408  }
409  else {
410  throw Exception::NotImplemented(">>> ROL::AugmentedLagrangianStep: Incompatible substep type!");
411  }
412  algo_ = makePtr<Algorithm<Real>>(step_,status_,false);
413  //algo_ = ROL::makePtr<Algorithm<Real>>(subStep_,parlist_,false);
414  x_->set(x);
415  if ( bnd.isActivated() ) {
416  //algo_->run(*x_,augLag,bnd,print_);
417  algo_->run(*x_,*penObj,bnd,print_);
418  }
419  else {
420  //algo_->run(*x_,augLag,print_);
421  algo_->run(*x_,*penObj,print_);
422  }
423  s.set(*x_); s.axpy(-one,x);
424  subproblemIter_ = (algo_->getState())->iter;
425  }
426 
431  AlgorithmState<Real> &algo_state ) {
432  update(x,l,s,obj,con,*bnd_,algo_state);
433  }
434 
440  AlgorithmState<Real> &algo_state ) {
441  Real one(1), oem2(1.e-2);
443  = dynamic_cast<AugmentedLagrangian<Real>&>(obj);
444  ROL::Ptr<StepState<Real> > state = Step<Real>::getState();
445  state->SPiter = subproblemIter_;
446  // Update the step and store in state
447  x.plus(s);
448  algo_state.iterateVec->set(x);
449  state->descentVec->set(s);
450  algo_state.snorm = s.norm();
451  algo_state.iter++;
452  // Update objective function value
453  obj.update(x);
454  algo_state.value = augLag.getObjectiveValue(x);
455  // Update constraint value
456  augLag.getConstraintVec(*(state->constraintVec),x);
457  algo_state.cnorm = (state->constraintVec)->norm();
458  // Compute gradient of the augmented Lagrangian
459  algo_state.gnorm = computeGradient(*(state->gradientVec),x,state->searchSize,obj,bnd);
460  algo_state.gnorm /= std::min(fscale_,cscale_);
461  // Update evaluation counters
462  algo_state.nfval += augLag.getNumberFunctionEvaluations();
463  algo_state.ngrad += augLag.getNumberGradientEvaluations();
464  algo_state.ncval += augLag.getNumberConstraintEvaluations();
465  // Update objective function and constraints
466  augLag.update(x,true,algo_state.iter);
467  bnd.update(x,true,algo_state.iter);
468  // Update multipliers
469  minPenaltyReciprocal_ = std::min(one/state->searchSize,minPenaltyLowerBound_);
470  if ( cscale_*algo_state.cnorm < feasTolerance_ ) {
471  l.axpy(state->searchSize*cscale_,(state->constraintVec)->dual());
472  if ( algo_->getState()->statusFlag == EXITSTATUS_CONVERGED ) {
473  optTolerance_ = std::max(oem2*outerOptTolerance_,
475  }
476  feasTolerance_ = std::max(oem2*outerFeasTolerance_,
478  // Update Algorithm State
479  algo_state.snorm += state->searchSize*cscale_*algo_state.cnorm;
480  algo_state.lagmultVec->set(l);
481  }
482  else {
483  state->searchSize = std::min(penaltyUpdate_*state->searchSize,maxPenaltyParam_);
484  optTolerance_ = std::max(oem2*outerOptTolerance_,
486  feasTolerance_ = std::max(oem2*outerFeasTolerance_,
488  }
489  augLag.reset(l,state->searchSize);
490  }
491 
494  std::string printHeader( void ) const {
495  std::stringstream hist;
496 
497  if(verbosity_>0) {
498  hist << std::string(114,'-') << std::endl;
499  hist << "Augmented Lagrangian status output definitions" << std::endl << std::endl;
500  hist << " iter - Number of iterates (steps taken)" << std::endl;
501  hist << " fval - Objective function value" << std::endl;
502  hist << " cnorm - Norm of the constraint violation" << std::endl;
503  hist << " gLnorm - Norm of the gradient of the Lagrangian" << std::endl;
504  hist << " snorm - Norm of the step" << std::endl;
505  hist << " penalty - Penalty parameter" << std::endl;
506  hist << " feasTol - Feasibility tolerance" << std::endl;
507  hist << " optTol - Optimality tolerance" << std::endl;
508  hist << " #fval - Number of times the objective was computed" << std::endl;
509  hist << " #grad - Number of times the gradient was computed" << std::endl;
510  hist << " #cval - Number of times the constraint was computed" << std::endl;
511  hist << " subIter - Number of iterations to solve subproblem" << std::endl;
512  hist << std::string(114,'-') << std::endl;
513  }
514  hist << " ";
515  hist << std::setw(6) << std::left << "iter";
516  hist << std::setw(15) << std::left << "fval";
517  hist << std::setw(15) << std::left << "cnorm";
518  hist << std::setw(15) << std::left << "gLnorm";
519  hist << std::setw(15) << std::left << "snorm";
520  hist << std::setw(10) << std::left << "penalty";
521  hist << std::setw(10) << std::left << "feasTol";
522  hist << std::setw(10) << std::left << "optTol";
523  hist << std::setw(8) << std::left << "#fval";
524  hist << std::setw(8) << std::left << "#grad";
525  hist << std::setw(8) << std::left << "#cval";
526  hist << std::setw(8) << std::left << "subIter";
527  hist << std::endl;
528  return hist.str();
529  }
530 
533  std::string printName( void ) const {
534  std::stringstream hist;
535  hist << std::endl << " Augmented Lagrangian Solver";
536  hist << std::endl;
537  hist << "Subproblem Solver: " << subStep_ << std::endl;
538  return hist.str();
539  }
540 
543  std::string print( AlgorithmState<Real> &algo_state, bool pHeader = false ) const {
544  std::stringstream hist;
545  hist << std::scientific << std::setprecision(6);
546  if ( algo_state.iter == 0 ) {
547  hist << printName();
548  }
549  if ( pHeader ) {
550  hist << printHeader();
551  }
552  if ( algo_state.iter == 0 ) {
553  hist << " ";
554  hist << std::setw(6) << std::left << algo_state.iter;
555  hist << std::setw(15) << std::left << algo_state.value;
556  hist << std::setw(15) << std::left << algo_state.cnorm;
557  hist << std::setw(15) << std::left << algo_state.gnorm;
558  hist << std::setw(15) << std::left << " ";
559  hist << std::scientific << std::setprecision(2);
560  hist << std::setw(10) << std::left << Step<Real>::getStepState()->searchSize;
561  hist << std::setw(10) << std::left << std::max(feasTolerance_,outerFeasTolerance_);
562  hist << std::setw(10) << std::left << std::max(optTolerance_,outerOptTolerance_);
563  hist << std::endl;
564  }
565  else {
566  hist << " ";
567  hist << std::setw(6) << std::left << algo_state.iter;
568  hist << std::setw(15) << std::left << algo_state.value;
569  hist << std::setw(15) << std::left << algo_state.cnorm;
570  hist << std::setw(15) << std::left << algo_state.gnorm;
571  hist << std::setw(15) << std::left << algo_state.snorm;
572  hist << std::scientific << std::setprecision(2);
573  hist << std::setw(10) << std::left << Step<Real>::getStepState()->searchSize;
574  hist << std::setw(10) << std::left << feasTolerance_;
575  hist << std::setw(10) << std::left << optTolerance_;
576  hist << std::scientific << std::setprecision(6);
577  hist << std::setw(8) << std::left << algo_state.nfval;
578  hist << std::setw(8) << std::left << algo_state.ngrad;
579  hist << std::setw(8) << std::left << algo_state.ncval;
580  hist << std::setw(8) << std::left << subproblemIter_;
581  hist << std::endl;
582  }
583  return hist.str();
584  }
585 
591  AlgorithmState<Real> &algo_state ) {}
592 
598  AlgorithmState<Real> &algo_state ) {}
599 
600 }; // class AugmentedLagrangianStep
601 
602 } // namespace ROL
603 
604 #endif
Provides the interface to evaluate objective functions.
Provides the interface to evaluate the augmented Lagrangian.
void initialize(Vector< Real > &x, const Vector< Real > &g, Vector< Real > &l, const Vector< Real > &c, Objective< Real > &obj, Constraint< Real > &con, AlgorithmState< Real > &algo_state)
Initialize step with equality constraint.
virtual const Vector & dual() const
Return dual representation of , for example, the result of applying a Riesz map, or change of basis...
Definition: ROL_Vector.hpp:226
virtual void scale(const Real alpha)=0
Compute where .
virtual ROL::Ptr< Vector > clone() const =0
Clone to make a new (uninitialized) vector.
virtual int dimension() const
Return dimension of the vector space.
Definition: ROL_Vector.hpp:196
virtual ROL::Ptr< Vector > basis(const int i) const
Return i-th basis vector.
Definition: ROL_Vector.hpp:182
std::string printHeader(void) const
Print iterate header.
virtual void plus(const Vector &x)=0
Compute , where .
virtual void axpy(const Real alpha, const Vector &x)
Compute where .
Definition: ROL_Vector.hpp:153
bool isActivated(void) const
Check if bounds are on.
virtual int getNumberConstraintEvaluations(void) const
Provides the interface to compute optimization steps.
Definition: ROL_Step.hpp:68
AugmentedLagrangianStep(ROL::ParameterList &parlist)
Contains definitions of custom data types in ROL.
virtual void update(const Vector< Real > &x, bool flag=true, int iter=-1)
Update bounds.
virtual Real getObjectiveValue(const Vector< Real > &x)
Implements the computation of optimization steps using Moreau-Yosida regularized bound constraints...
Defines the linear algebra or vector space interface.
Definition: ROL_Vector.hpp:80
Provides the interface to compute augmented Lagrangian steps.
std::string printName(void) const
Print step name.
Real computeGradient(Vector< Real > &g, const Vector< Real > &x, const Real mu, Objective< Real > &obj, BoundConstraint< Real > &bnd)
ROL::Ptr< BoundConstraint< Real > > bnd_
virtual void reset(const Vector< Real > &multiplier, const Real penaltyParameter)
State for algorithm class. Will be used for restarts.
Definition: ROL_Types.hpp:143
ROL::Ptr< Algorithm< Real > > algo_
ROL::Ptr< StepState< Real > > getState(void)
Definition: ROL_Step.hpp:73
virtual void gradient(Vector< Real > &g, const Vector< Real > &x, Real &tol)
Compute gradient.
ROL::Ptr< Vector< Real > > iterateVec
Definition: ROL_Types.hpp:157
virtual void update(const Vector< Real > &x, bool flag=true, int iter=-1)
Update objective function.
ROL::Ptr< StatusTest< Real > > status_
void compute(Vector< Real > &s, const Vector< Real > &x, const Vector< Real > &l, Objective< Real > &obj, Constraint< Real > &con, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Compute step (equality and bound constraints).
Provides the interface to apply upper and lower bound constraints.
void initialize(Vector< Real > &x, const Vector< Real > &g, Vector< Real > &l, const Vector< Real > &c, Objective< Real > &obj, Constraint< Real > &con, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Initialize step with equality and bound constraints.
virtual void applyAdjointJacobian(Vector< Real > &ajv, const Vector< Real > &v, const Vector< Real > &x, Real &tol)
Apply the adjoint of the the constraint Jacobian at , , to vector .
void update(Vector< Real > &x, Vector< Real > &l, const Vector< Real > &s, Objective< Real > &obj, Constraint< Real > &con, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Update step, if successful (equality and bound constraints).
virtual int getNumberGradientEvaluations(void) const
std::string print(AlgorithmState< Real > &algo_state, bool pHeader=false) const
Print iterate status.
ROL::Ptr< Vector< Real > > lagmultVec
Definition: ROL_Types.hpp:158
virtual int getNumberFunctionEvaluations(void) const
void setScaling(const Real fscale, const Real cscale=1.0)
const Ptr< const Vector< Real > > getObjectiveGradient(const Vector< Real > &x)
virtual void set(const Vector &x)
Set where .
Definition: ROL_Vector.hpp:209
virtual Real norm() const =0
Returns where .
virtual void update(const Vector< Real > &x, bool flag=true, int iter=-1)
Update objective function.
virtual void getConstraintVec(Vector< Real > &c, const Vector< Real > &x)
void update(Vector< Real > &x, Vector< Real > &l, const Vector< Real > &s, Objective< Real > &obj, Constraint< Real > &con, AlgorithmState< Real > &algo_state)
Update step, if successful (equality constraint).
void update(Vector< Real > &x, const Vector< Real > &s, Objective< Real > &obj, BoundConstraint< Real > &con, AlgorithmState< Real > &algo_state)
Update step, for bound constraints; here only to satisfy the interface requirements, does nothing, needs refactoring.
Defines the general constraint operator interface.
virtual void project(Vector< Real > &x)
Project optimization variables onto the bounds.
void compute(Vector< Real > &s, const Vector< Real > &x, const Vector< Real > &l, Objective< Real > &obj, Constraint< Real > &con, AlgorithmState< Real > &algo_state)
Compute step (equality constraint).
void compute(Vector< Real > &s, const Vector< Real > &x, Objective< Real > &obj, BoundConstraint< Real > &con, AlgorithmState< Real > &algo_state)
Compute step for bound constraints; here only to satisfy the interface requirements, does nothing, needs refactoring.