10 #ifndef ROL_AUGMENTEDLAGRANGIANSTEP_H
11 #define ROL_AUGMENTEDLAGRANGIANSTEP_H
16 #include "ROL_ParameterList.hpp"
125 template <
class Real>
131 ROL::Ptr<Vector<Real>>
x_;
132 ROL::Ptr<BoundConstraint<Real>>
bnd_;
172 Real gnorm = 0., tol = std::sqrt(ROL_EPSILON<Real>());
180 x_->axpy(static_cast<Real>(-1),g.
dual());
182 x_->axpy(static_cast<Real>(-1),x);
202 Real one(1), p1(0.1), p9(0.9), ten(1.e1), oe8(1.e8), oem8(1.e-8);
203 ROL::ParameterList& sublist = parlist.sublist(
"Step").sublist(
"Augmented Lagrangian");
210 penaltyUpdate_ = sublist.get(
"Penalty Parameter Growth Factor", ten);
221 print_ = sublist.get(
"Print Intermediate Optimization History",
false);
222 maxit_ = sublist.get(
"Subproblem Iteration Limit", 1000);
223 subStep_ = sublist.get(
"Subproblem Step Type",
"Trust Region");
227 verbosity_ = parlist.sublist(
"General").get(
"Print Verbosity", 0);
235 fscale_ = sublist.get(
"Objective Scaling", 1.0);
236 cscale_ = sublist.get(
"Constraint Scaling", 1.0);
244 bnd_ = ROL::makePtr<BoundConstraint<Real>>();
254 Real one(1), TOL(1.e-2);
259 state->descentVec = x.
clone();
260 state->gradientVec = g.
clone();
261 state->constraintVec = c.
clone();
265 algo_state.
nfval = 0;
266 algo_state.
ncval = 0;
267 algo_state.
ngrad = 0;
277 Real tol = std::sqrt(ROL_EPSILON<Real>());
278 Ptr<Vector<Real>> ji = x.
clone();
279 Real maxji(0), normji(0);
280 for (
int i = 0; i < c.
dimension(); ++i) {
283 maxji = std::max(normji,maxji);
285 cscale_ = one/std::max(one,maxji);
287 catch (std::exception &e) {
295 algo_state.
cnorm = (state->constraintVec)->norm();
298 = std::max(static_cast<Real>(1e-8),std::min(static_cast<Real>(10)*
315 std::cout << std::endl;
316 std::cout <<
"Augmented Lagrangian Initialize" << std::endl;
317 std::cout <<
"Objective Scaling: " <<
fscale_ << std::endl;
318 std::cout <<
"Constraint Scaling: " <<
cscale_ << std::endl;
319 std::cout << std::endl;
341 Ptr<Objective<Real>> penObj;
345 penObj = makePtrFromRef(obj);
347 else if (
subStep_ ==
"Line Search") {
350 penObj = makePtrFromRef(obj);
352 else if (
subStep_ ==
"Moreau-Yosida Penalty") {
355 Ptr<Objective<Real>> raw_obj = makePtrFromRef(obj);
356 penObj = ROL::makePtr<MoreauYosidaPenalty<Real>>(raw_obj,
bnd_,x,
parlist_);
358 else if (
subStep_ ==
"Primal Dual Active Set") {
361 penObj = makePtrFromRef(obj);
363 else if (
subStep_ ==
"Trust Region") {
366 penObj = makePtrFromRef(obj);
368 else if (
subStep_ ==
"Interior Point") {
371 Ptr<Objective<Real>> raw_obj = makePtrFromRef(obj);
372 penObj = ROL::makePtr<InteriorPoint::PenalizedObjective<Real>>(raw_obj,
bnd_,x,
parlist_);
406 Real one(1), oem2(1.e-2);
414 state->descentVec->set(s);
422 algo_state.
cnorm = (state->constraintVec)->norm();
435 l.
axpy(state->searchSize*
cscale_,(state->constraintVec)->dual());
453 augLag.
reset(l,state->searchSize);
459 std::stringstream hist;
462 hist << std::string(114,
'-') << std::endl;
463 hist <<
"Augmented Lagrangian status output definitions" << std::endl << std::endl;
464 hist <<
" iter - Number of iterates (steps taken)" << std::endl;
465 hist <<
" fval - Objective function value" << std::endl;
466 hist <<
" cnorm - Norm of the constraint violation" << std::endl;
467 hist <<
" gLnorm - Norm of the gradient of the Lagrangian" << std::endl;
468 hist <<
" snorm - Norm of the step" << std::endl;
469 hist <<
" penalty - Penalty parameter" << std::endl;
470 hist <<
" feasTol - Feasibility tolerance" << std::endl;
471 hist <<
" optTol - Optimality tolerance" << std::endl;
472 hist <<
" #fval - Number of times the objective was computed" << std::endl;
473 hist <<
" #grad - Number of times the gradient was computed" << std::endl;
474 hist <<
" #cval - Number of times the constraint was computed" << std::endl;
475 hist <<
" subIter - Number of iterations to solve subproblem" << std::endl;
476 hist << std::string(114,
'-') << std::endl;
479 hist << std::setw(6) << std::left <<
"iter";
480 hist << std::setw(15) << std::left <<
"fval";
481 hist << std::setw(15) << std::left <<
"cnorm";
482 hist << std::setw(15) << std::left <<
"gLnorm";
483 hist << std::setw(15) << std::left <<
"snorm";
484 hist << std::setw(10) << std::left <<
"penalty";
485 hist << std::setw(10) << std::left <<
"feasTol";
486 hist << std::setw(10) << std::left <<
"optTol";
487 hist << std::setw(8) << std::left <<
"#fval";
488 hist << std::setw(8) << std::left <<
"#grad";
489 hist << std::setw(8) << std::left <<
"#cval";
490 hist << std::setw(8) << std::left <<
"subIter";
498 std::stringstream hist;
499 hist << std::endl <<
" Augmented Lagrangian Solver";
501 hist <<
"Subproblem Solver: " <<
subStep_ << std::endl;
508 std::stringstream hist;
509 hist << std::scientific << std::setprecision(6);
510 if ( algo_state.
iter == 0 ) {
516 if ( algo_state.
iter == 0 ) {
518 hist << std::setw(6) << std::left << algo_state.
iter;
519 hist << std::setw(15) << std::left << algo_state.
value;
520 hist << std::setw(15) << std::left << algo_state.
cnorm;
521 hist << std::setw(15) << std::left << algo_state.
gnorm;
522 hist << std::setw(15) << std::left <<
" ";
523 hist << std::scientific << std::setprecision(2);
524 hist << std::setw(10) << std::left << Step<Real>::getStepState()->searchSize;
531 hist << std::setw(6) << std::left << algo_state.
iter;
532 hist << std::setw(15) << std::left << algo_state.
value;
533 hist << std::setw(15) << std::left << algo_state.
cnorm;
534 hist << std::setw(15) << std::left << algo_state.
gnorm;
535 hist << std::setw(15) << std::left << algo_state.
snorm;
536 hist << std::scientific << std::setprecision(2);
537 hist << std::setw(10) << std::left << Step<Real>::getStepState()->searchSize;
540 hist << std::scientific << std::setprecision(6);
541 hist << std::setw(8) << std::left << algo_state.
nfval;
542 hist << std::setw(8) << std::left << algo_state.
ngrad;
543 hist << std::setw(8) << std::left << algo_state.
ncval;
Real feasDecreaseExponent_
Provides the interface to evaluate objective functions.
ROL::Ptr< Vector< Real > > x_
Provides the interface to evaluate the augmented Lagrangian.
void initialize(Vector< Real > &x, const Vector< Real > &g, Vector< Real > &l, const Vector< Real > &c, Objective< Real > &obj, Constraint< Real > &con, AlgorithmState< Real > &algo_state)
Initialize step with equality constraint.
ROL::Ptr< Step< Real > > step_
virtual const Vector & dual() const
Return dual representation of , for example, the result of applying a Riesz map, or change of basis...
virtual void scale(const Real alpha)=0
Compute where .
virtual ROL::Ptr< Vector > clone() const =0
Clone to make a new (uninitialized) vector.
virtual int dimension() const
Return dimension of the vector space.
virtual ROL::Ptr< Vector > basis(const int i) const
Return i-th basis vector.
std::string printHeader(void) const
Print iterate header.
virtual void plus(const Vector &x)=0
Compute , where .
virtual void axpy(const Real alpha, const Vector &x)
Compute where .
bool isActivated(void) const
Check if bounds are on.
virtual int getNumberConstraintEvaluations(void) const
Provides the interface to compute optimization steps.
AugmentedLagrangianStep(ROL::ParameterList &parlist)
Real feasIncreaseExponent_
Real feasToleranceInitial_
Contains definitions of custom data types in ROL.
virtual Real getObjectiveValue(const Vector< Real > &x)
Implements the computation of optimization steps using Moreau-Yosida regularized bound constraints...
Defines the linear algebra or vector space interface.
~AugmentedLagrangianStep()
Provides the interface to compute augmented Lagrangian steps.
virtual void update(const Vector< Real > &x, UpdateType type, int iter=-1)
Update objective function.
std::string printName(void) const
Print step name.
Real computeGradient(Vector< Real > &g, const Vector< Real > &x, const Real mu, Objective< Real > &obj, BoundConstraint< Real > &bnd)
ROL::Ptr< BoundConstraint< Real > > bnd_
Real optIncreaseExponent_
virtual void reset(const Vector< Real > &multiplier, const Real penaltyParameter)
State for algorithm class. Will be used for restarts.
ROL::ParameterList parlist_
ROL::Ptr< Algorithm< Real > > algo_
ROL::Ptr< StepState< Real > > getState(void)
Real minPenaltyLowerBound_
virtual void gradient(Vector< Real > &g, const Vector< Real > &x, Real &tol)
Compute gradient.
Real optDecreaseExponent_
Real minPenaltyReciprocal_
ROL::Ptr< Vector< Real > > iterateVec
virtual void update(const Vector< Real > &x, bool flag=true, int iter=-1)
Update objective function.
virtual void project(Vector< Real > &x)
Project optimization variables onto the bounds.
ROL::Ptr< StatusTest< Real > > status_
void compute(Vector< Real > &s, const Vector< Real > &x, const Vector< Real > &l, Objective< Real > &obj, Constraint< Real > &con, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Compute step (equality and bound constraints).
Real optToleranceInitial_
Provides the interface to apply upper and lower bound constraints.
void initialize(Vector< Real > &x, const Vector< Real > &g, Vector< Real > &l, const Vector< Real > &c, Objective< Real > &obj, Constraint< Real > &con, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Initialize step with equality and bound constraints.
virtual void applyAdjointJacobian(Vector< Real > &ajv, const Vector< Real > &v, const Vector< Real > &x, Real &tol)
Apply the adjoint of the the constraint Jacobian at , , to vector .
void update(Vector< Real > &x, Vector< Real > &l, const Vector< Real > &s, Objective< Real > &obj, Constraint< Real > &con, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Update step, if successful (equality and bound constraints).
virtual int getNumberGradientEvaluations(void) const
std::string print(AlgorithmState< Real > &algo_state, bool pHeader=false) const
Print iterate status.
ROL::Ptr< Vector< Real > > lagmultVec
virtual int getNumberFunctionEvaluations(void) const
void setScaling(const Real fscale, const Real cscale=1.0)
const Ptr< const Vector< Real > > getObjectiveGradient(const Vector< Real > &x)
virtual void set(const Vector &x)
Set where .
virtual Real norm() const =0
Returns where .
virtual void getConstraintVec(Vector< Real > &c, const Vector< Real > &x)
void update(Vector< Real > &x, Vector< Real > &l, const Vector< Real > &s, Objective< Real > &obj, Constraint< Real > &con, AlgorithmState< Real > &algo_state)
Update step, if successful (equality constraint).
void update(Vector< Real > &x, const Vector< Real > &s, Objective< Real > &obj, BoundConstraint< Real > &con, AlgorithmState< Real > &algo_state)
Update step, for bound constraints; here only to satisfy the interface requirements, does nothing, needs refactoring.
Defines the general constraint operator interface.
void compute(Vector< Real > &s, const Vector< Real > &x, const Vector< Real > &l, Objective< Real > &obj, Constraint< Real > &con, AlgorithmState< Real > &algo_state)
Compute step (equality constraint).
void compute(Vector< Real > &s, const Vector< Real > &x, Objective< Real > &obj, BoundConstraint< Real > &con, AlgorithmState< Real > &algo_state)
Compute step for bound constraints; here only to satisfy the interface requirements, does nothing, needs refactoring.