44 #ifndef ROL_AUGMENTEDLAGRANGIANSTEP_H
45 #define ROL_AUGMENTEDLAGRANGIANSTEP_H
50 #include "ROL_ParameterList.hpp"
159 template <
class Real>
165 ROL::Ptr<Vector<Real>>
x_;
166 ROL::Ptr<BoundConstraint<Real>>
bnd_;
206 Real gnorm = 0., tol = std::sqrt(ROL_EPSILON<Real>());
214 x_->axpy(static_cast<Real>(-1),g.
dual());
216 x_->axpy(static_cast<Real>(-1),x);
236 Real one(1), p1(0.1), p9(0.9), ten(1.e1), oe8(1.e8), oem8(1.e-8);
237 ROL::ParameterList& sublist = parlist.sublist(
"Step").sublist(
"Augmented Lagrangian");
244 penaltyUpdate_ = sublist.get(
"Penalty Parameter Growth Factor", ten);
255 print_ = sublist.get(
"Print Intermediate Optimization History",
false);
256 maxit_ = sublist.get(
"Subproblem Iteration Limit", 1000);
257 subStep_ = sublist.get(
"Subproblem Step Type",
"Trust Region");
261 verbosity_ = parlist.sublist(
"General").get(
"Print Verbosity", 0);
269 fscale_ = sublist.get(
"Objective Scaling", 1.0);
270 cscale_ = sublist.get(
"Constraint Scaling", 1.0);
278 bnd_ = ROL::makePtr<BoundConstraint<Real>>();
288 Real one(1), TOL(1.e-2);
293 state->descentVec = x.
clone();
294 state->gradientVec = g.
clone();
295 state->constraintVec = c.
clone();
299 algo_state.
nfval = 0;
300 algo_state.
ncval = 0;
301 algo_state.
ngrad = 0;
312 Real tol = std::sqrt(ROL_EPSILON<Real>());
313 Ptr<Vector<Real>> ji = x.
clone();
314 Real maxji(0), normji(0);
315 for (
int i = 0; i < c.
dimension(); ++i) {
318 maxji = std::max(normji,maxji);
320 cscale_ = one/std::max(one,maxji);
322 catch (std::exception &e) {
330 algo_state.
cnorm = (state->constraintVec)->norm();
333 = std::max(static_cast<Real>(1e-8),std::min(static_cast<Real>(10)*
350 std::cout << std::endl;
351 std::cout <<
"Augmented Lagrangian Initialize" << std::endl;
352 std::cout <<
"Objective Scaling: " <<
fscale_ << std::endl;
353 std::cout <<
"Constraint Scaling: " <<
cscale_ << std::endl;
354 std::cout << std::endl;
376 Ptr<Objective<Real>> penObj;
380 penObj = makePtrFromRef(obj);
382 else if (
subStep_ ==
"Line Search") {
385 penObj = makePtrFromRef(obj);
387 else if (
subStep_ ==
"Moreau-Yosida Penalty") {
390 Ptr<Objective<Real>> raw_obj = makePtrFromRef(obj);
391 penObj = ROL::makePtr<MoreauYosidaPenalty<Real>>(raw_obj,
bnd_,x,
parlist_);
393 else if (
subStep_ ==
"Primal Dual Active Set") {
396 penObj = makePtrFromRef(obj);
398 else if (
subStep_ ==
"Trust Region") {
401 penObj = makePtrFromRef(obj);
403 else if (
subStep_ ==
"Interior Point") {
406 Ptr<Objective<Real>> raw_obj = makePtrFromRef(obj);
407 penObj = ROL::makePtr<InteriorPoint::PenalizedObjective<Real>>(raw_obj,
bnd_,x,
parlist_);
441 Real one(1), oem2(1.e-2);
449 state->descentVec->set(s);
457 algo_state.
cnorm = (state->constraintVec)->norm();
471 l.
axpy(state->searchSize*
cscale_,(state->constraintVec)->dual());
489 augLag.
reset(l,state->searchSize);
495 std::stringstream hist;
498 hist << std::string(114,
'-') << std::endl;
499 hist <<
"Augmented Lagrangian status output definitions" << std::endl << std::endl;
500 hist <<
" iter - Number of iterates (steps taken)" << std::endl;
501 hist <<
" fval - Objective function value" << std::endl;
502 hist <<
" cnorm - Norm of the constraint violation" << std::endl;
503 hist <<
" gLnorm - Norm of the gradient of the Lagrangian" << std::endl;
504 hist <<
" snorm - Norm of the step" << std::endl;
505 hist <<
" penalty - Penalty parameter" << std::endl;
506 hist <<
" feasTol - Feasibility tolerance" << std::endl;
507 hist <<
" optTol - Optimality tolerance" << std::endl;
508 hist <<
" #fval - Number of times the objective was computed" << std::endl;
509 hist <<
" #grad - Number of times the gradient was computed" << std::endl;
510 hist <<
" #cval - Number of times the constraint was computed" << std::endl;
511 hist <<
" subIter - Number of iterations to solve subproblem" << std::endl;
512 hist << std::string(114,
'-') << std::endl;
515 hist << std::setw(6) << std::left <<
"iter";
516 hist << std::setw(15) << std::left <<
"fval";
517 hist << std::setw(15) << std::left <<
"cnorm";
518 hist << std::setw(15) << std::left <<
"gLnorm";
519 hist << std::setw(15) << std::left <<
"snorm";
520 hist << std::setw(10) << std::left <<
"penalty";
521 hist << std::setw(10) << std::left <<
"feasTol";
522 hist << std::setw(10) << std::left <<
"optTol";
523 hist << std::setw(8) << std::left <<
"#fval";
524 hist << std::setw(8) << std::left <<
"#grad";
525 hist << std::setw(8) << std::left <<
"#cval";
526 hist << std::setw(8) << std::left <<
"subIter";
534 std::stringstream hist;
535 hist << std::endl <<
" Augmented Lagrangian Solver";
537 hist <<
"Subproblem Solver: " <<
subStep_ << std::endl;
544 std::stringstream hist;
545 hist << std::scientific << std::setprecision(6);
546 if ( algo_state.
iter == 0 ) {
552 if ( algo_state.
iter == 0 ) {
554 hist << std::setw(6) << std::left << algo_state.
iter;
555 hist << std::setw(15) << std::left << algo_state.
value;
556 hist << std::setw(15) << std::left << algo_state.
cnorm;
557 hist << std::setw(15) << std::left << algo_state.
gnorm;
558 hist << std::setw(15) << std::left <<
" ";
559 hist << std::scientific << std::setprecision(2);
560 hist << std::setw(10) << std::left << Step<Real>::getStepState()->searchSize;
567 hist << std::setw(6) << std::left << algo_state.
iter;
568 hist << std::setw(15) << std::left << algo_state.
value;
569 hist << std::setw(15) << std::left << algo_state.
cnorm;
570 hist << std::setw(15) << std::left << algo_state.
gnorm;
571 hist << std::setw(15) << std::left << algo_state.
snorm;
572 hist << std::scientific << std::setprecision(2);
573 hist << std::setw(10) << std::left << Step<Real>::getStepState()->searchSize;
576 hist << std::scientific << std::setprecision(6);
577 hist << std::setw(8) << std::left << algo_state.
nfval;
578 hist << std::setw(8) << std::left << algo_state.
ngrad;
579 hist << std::setw(8) << std::left << algo_state.
ncval;
Real feasDecreaseExponent_
Provides the interface to evaluate objective functions.
ROL::Ptr< Vector< Real > > x_
Provides the interface to evaluate the augmented Lagrangian.
void initialize(Vector< Real > &x, const Vector< Real > &g, Vector< Real > &l, const Vector< Real > &c, Objective< Real > &obj, Constraint< Real > &con, AlgorithmState< Real > &algo_state)
Initialize step with equality constraint.
ROL::Ptr< Step< Real > > step_
virtual const Vector & dual() const
Return dual representation of , for example, the result of applying a Riesz map, or change of basis...
virtual void scale(const Real alpha)=0
Compute where .
virtual ROL::Ptr< Vector > clone() const =0
Clone to make a new (uninitialized) vector.
virtual int dimension() const
Return dimension of the vector space.
virtual ROL::Ptr< Vector > basis(const int i) const
Return i-th basis vector.
std::string printHeader(void) const
Print iterate header.
virtual void plus(const Vector &x)=0
Compute , where .
virtual void axpy(const Real alpha, const Vector &x)
Compute where .
bool isActivated(void) const
Check if bounds are on.
virtual int getNumberConstraintEvaluations(void) const
Provides the interface to compute optimization steps.
AugmentedLagrangianStep(ROL::ParameterList &parlist)
Real feasIncreaseExponent_
Real feasToleranceInitial_
Contains definitions of custom data types in ROL.
virtual void update(const Vector< Real > &x, bool flag=true, int iter=-1)
Update bounds.
virtual Real getObjectiveValue(const Vector< Real > &x)
Implements the computation of optimization steps using Moreau-Yosida regularized bound constraints...
Defines the linear algebra or vector space interface.
~AugmentedLagrangianStep()
Provides the interface to compute augmented Lagrangian steps.
std::string printName(void) const
Print step name.
Real computeGradient(Vector< Real > &g, const Vector< Real > &x, const Real mu, Objective< Real > &obj, BoundConstraint< Real > &bnd)
ROL::Ptr< BoundConstraint< Real > > bnd_
Real optIncreaseExponent_
virtual void reset(const Vector< Real > &multiplier, const Real penaltyParameter)
State for algorithm class. Will be used for restarts.
ROL::ParameterList parlist_
ROL::Ptr< Algorithm< Real > > algo_
ROL::Ptr< StepState< Real > > getState(void)
Real minPenaltyLowerBound_
virtual void gradient(Vector< Real > &g, const Vector< Real > &x, Real &tol)
Compute gradient.
Real optDecreaseExponent_
Real minPenaltyReciprocal_
ROL::Ptr< Vector< Real > > iterateVec
virtual void update(const Vector< Real > &x, bool flag=true, int iter=-1)
Update objective function.
ROL::Ptr< StatusTest< Real > > status_
void compute(Vector< Real > &s, const Vector< Real > &x, const Vector< Real > &l, Objective< Real > &obj, Constraint< Real > &con, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Compute step (equality and bound constraints).
Real optToleranceInitial_
Provides the interface to apply upper and lower bound constraints.
void initialize(Vector< Real > &x, const Vector< Real > &g, Vector< Real > &l, const Vector< Real > &c, Objective< Real > &obj, Constraint< Real > &con, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Initialize step with equality and bound constraints.
virtual void applyAdjointJacobian(Vector< Real > &ajv, const Vector< Real > &v, const Vector< Real > &x, Real &tol)
Apply the adjoint of the the constraint Jacobian at , , to vector .
void update(Vector< Real > &x, Vector< Real > &l, const Vector< Real > &s, Objective< Real > &obj, Constraint< Real > &con, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Update step, if successful (equality and bound constraints).
virtual int getNumberGradientEvaluations(void) const
std::string print(AlgorithmState< Real > &algo_state, bool pHeader=false) const
Print iterate status.
ROL::Ptr< Vector< Real > > lagmultVec
virtual int getNumberFunctionEvaluations(void) const
void setScaling(const Real fscale, const Real cscale=1.0)
const Ptr< const Vector< Real > > getObjectiveGradient(const Vector< Real > &x)
virtual void set(const Vector &x)
Set where .
virtual Real norm() const =0
Returns where .
virtual void update(const Vector< Real > &x, bool flag=true, int iter=-1)
Update objective function.
virtual void getConstraintVec(Vector< Real > &c, const Vector< Real > &x)
void update(Vector< Real > &x, Vector< Real > &l, const Vector< Real > &s, Objective< Real > &obj, Constraint< Real > &con, AlgorithmState< Real > &algo_state)
Update step, if successful (equality constraint).
void update(Vector< Real > &x, const Vector< Real > &s, Objective< Real > &obj, BoundConstraint< Real > &con, AlgorithmState< Real > &algo_state)
Update step, for bound constraints; here only to satisfy the interface requirements, does nothing, needs refactoring.
Defines the general constraint operator interface.
virtual void project(Vector< Real > &x)
Project optimization variables onto the bounds.
void compute(Vector< Real > &s, const Vector< Real > &x, const Vector< Real > &l, Objective< Real > &obj, Constraint< Real > &con, AlgorithmState< Real > &algo_state)
Compute step (equality constraint).
void compute(Vector< Real > &s, const Vector< Real > &x, Objective< Real > &obj, BoundConstraint< Real > &con, AlgorithmState< Real > &algo_state)
Compute step for bound constraints; here only to satisfy the interface requirements, does nothing, needs refactoring.