44 #ifndef ROL_AUGMENTEDLAGRANGIANSTEP_H
45 #define ROL_AUGMENTEDLAGRANGIANSTEP_H
52 #include "ROL_ParameterList.hpp"
143 template <
class Real>
147 ROL::Ptr<Vector<Real> >
x_;
148 ROL::Ptr<BoundConstraint<Real> >
bnd_;
188 Real gnorm = 0., tol = std::sqrt(ROL_EPSILON<Real>());
196 x_->axpy(static_cast<Real>(-1),g.
dual());
198 x_->axpy(static_cast<Real>(-1),x);
218 Real one(1), p1(0.1), p9(0.9), ten(1.e1), oe8(1.e8), oem8(1.e-8);
219 ROL::ParameterList& sublist = parlist.sublist(
"Step").sublist(
"Augmented Lagrangian");
226 penaltyUpdate_ = sublist.get(
"Penalty Parameter Growth Factor", ten);
237 print_ = sublist.get(
"Print Intermediate Optimization History",
false);
238 maxit_ = sublist.get(
"Subproblem Iteration Limit", 1000);
239 subStep_ = sublist.get(
"Subproblem Step Type",
"Trust Region");
243 verbosity_ = parlist.sublist(
"General").get(
"Print Verbosity", 0);
251 fscale_ = sublist.get(
"Objective Scaling", 1.0);
252 cscale_ = sublist.get(
"Constraint Scaling", 1.0);
260 bnd_ = ROL::makePtr<BoundConstraint<Real>>();
270 Real one(1), TOL(1.e-2);
275 state->descentVec = x.
clone();
276 state->gradientVec = g.
clone();
277 state->constraintVec = c.
clone();
281 algo_state.
nfval = 0;
282 algo_state.
ncval = 0;
283 algo_state.
ngrad = 0;
294 Real tol = std::sqrt(ROL_EPSILON<Real>());
295 Ptr<Vector<Real>> ji = x.
clone();
296 Real maxji(0), normji(0);
297 for (
int i = 0; i < c.
dimension(); ++i) {
300 maxji = std::max(normji,maxji);
302 cscale_ = one/std::max(one,maxji);
304 catch (std::exception &e) {
312 algo_state.
cnorm = (state->constraintVec)->norm();
315 = std::max(static_cast<Real>(1e-8),std::min(static_cast<Real>(10)*
332 std::cout << std::endl;
333 std::cout <<
"Augmented Lagrangian Initialize" << std::endl;
334 std::cout <<
"Objective Scaling: " <<
fscale_ << std::endl;
335 std::cout <<
"Constraint Scaling: " <<
cscale_ << std::endl;
336 std::cout << std::endl;
384 Real one(1), oem2(1.e-2);
392 state->descentVec->set(s);
399 algo_state.
cnorm = (state->constraintVec)->norm();
413 l.
axpy(state->searchSize*
cscale_,(state->constraintVec)->dual());
429 augLag.
reset(l,state->searchSize);
435 std::stringstream hist;
438 hist << std::string(114,
'-') << std::endl;
439 hist <<
"Augmented Lagrangian status output definitions" << std::endl << std::endl;
440 hist <<
" iter - Number of iterates (steps taken)" << std::endl;
441 hist <<
" fval - Objective function value" << std::endl;
442 hist <<
" cnorm - Norm of the constraint violation" << std::endl;
443 hist <<
" gLnorm - Norm of the gradient of the Lagrangian" << std::endl;
444 hist <<
" snorm - Norm of the step" << std::endl;
445 hist <<
" penalty - Penalty parameter" << std::endl;
446 hist <<
" feasTol - Feasibility tolerance" << std::endl;
447 hist <<
" optTol - Optimality tolerance" << std::endl;
448 hist <<
" #fval - Number of times the objective was computed" << std::endl;
449 hist <<
" #grad - Number of times the gradient was computed" << std::endl;
450 hist <<
" #cval - Number of times the constraint was computed" << std::endl;
451 hist <<
" subIter - Number of iterations to solve subproblem" << std::endl;
452 hist << std::string(114,
'-') << std::endl;
455 hist << std::setw(6) << std::left <<
"iter";
456 hist << std::setw(15) << std::left <<
"fval";
457 hist << std::setw(15) << std::left <<
"cnorm";
458 hist << std::setw(15) << std::left <<
"gLnorm";
459 hist << std::setw(15) << std::left <<
"snorm";
460 hist << std::setw(10) << std::left <<
"penalty";
461 hist << std::setw(10) << std::left <<
"feasTol";
462 hist << std::setw(10) << std::left <<
"optTol";
463 hist << std::setw(8) << std::left <<
"#fval";
464 hist << std::setw(8) << std::left <<
"#grad";
465 hist << std::setw(8) << std::left <<
"#cval";
466 hist << std::setw(8) << std::left <<
"subIter";
474 std::stringstream hist;
475 hist << std::endl <<
" Augmented Lagrangian Solver";
477 hist <<
"Subproblem Solver: " <<
subStep_ << std::endl;
484 std::stringstream hist;
485 hist << std::scientific << std::setprecision(6);
486 if ( algo_state.
iter == 0 ) {
492 if ( algo_state.
iter == 0 ) {
494 hist << std::setw(6) << std::left << algo_state.
iter;
495 hist << std::setw(15) << std::left << algo_state.
value;
496 hist << std::setw(15) << std::left << algo_state.
cnorm;
497 hist << std::setw(15) << std::left << algo_state.
gnorm;
498 hist << std::setw(15) << std::left <<
" ";
499 hist << std::scientific << std::setprecision(2);
500 hist << std::setw(10) << std::left << Step<Real>::getStepState()->searchSize;
507 hist << std::setw(6) << std::left << algo_state.
iter;
508 hist << std::setw(15) << std::left << algo_state.
value;
509 hist << std::setw(15) << std::left << algo_state.
cnorm;
510 hist << std::setw(15) << std::left << algo_state.
gnorm;
511 hist << std::setw(15) << std::left << algo_state.
snorm;
512 hist << std::scientific << std::setprecision(2);
513 hist << std::setw(10) << std::left << Step<Real>::getStepState()->searchSize;
516 hist << std::scientific << std::setprecision(6);
517 hist << std::setw(8) << std::left << algo_state.
nfval;
518 hist << std::setw(8) << std::left << algo_state.
ngrad;
519 hist << std::setw(8) << std::left << algo_state.
ncval;
Real feasDecreaseExponent_
Provides the interface to evaluate objective functions.
ROL::Ptr< Vector< Real > > x_
Provides the interface to evaluate the augmented Lagrangian.
void initialize(Vector< Real > &x, const Vector< Real > &g, Vector< Real > &l, const Vector< Real > &c, Objective< Real > &obj, Constraint< Real > &con, AlgorithmState< Real > &algo_state)
Initialize step with equality constraint.
virtual const Vector & dual() const
Return dual representation of , for example, the result of applying a Riesz map, or change of basis...
virtual void scale(const Real alpha)=0
Compute where .
virtual ROL::Ptr< Vector > clone() const =0
Clone to make a new (uninitialized) vector.
virtual int dimension() const
Return dimension of the vector space.
virtual ROL::Ptr< Vector > basis(const int i) const
Return i-th basis vector.
std::string printHeader(void) const
Print iterate header.
virtual void plus(const Vector &x)=0
Compute , where .
virtual void axpy(const Real alpha, const Vector &x)
Compute where .
bool isActivated(void) const
Check if bounds are on.
virtual int getNumberConstraintEvaluations(void) const
Provides the interface to compute optimization steps.
AugmentedLagrangianStep(ROL::ParameterList &parlist)
Real feasIncreaseExponent_
Real feasToleranceInitial_
Contains definitions of custom data types in ROL.
virtual void update(const Vector< Real > &x, bool flag=true, int iter=-1)
Update bounds.
virtual Real getObjectiveValue(const Vector< Real > &x)
Defines the linear algebra or vector space interface.
~AugmentedLagrangianStep()
Provides the interface to compute augmented Lagrangian steps.
std::string printName(void) const
Print step name.
Real computeGradient(Vector< Real > &g, const Vector< Real > &x, const Real mu, Objective< Real > &obj, BoundConstraint< Real > &bnd)
ROL::Ptr< BoundConstraint< Real > > bnd_
Real optIncreaseExponent_
virtual void reset(const Vector< Real > &multiplier, const Real penaltyParameter)
State for algorithm class. Will be used for restarts.
ROL::ParameterList parlist_
ROL::Ptr< Algorithm< Real > > algo_
ROL::Ptr< StepState< Real > > getState(void)
Real minPenaltyLowerBound_
virtual void gradient(Vector< Real > &g, const Vector< Real > &x, Real &tol)
Compute gradient.
Real optDecreaseExponent_
Real minPenaltyReciprocal_
ROL::Ptr< Vector< Real > > iterateVec
virtual void update(const Vector< Real > &x, bool flag=true, int iter=-1)
Update objective function.
void compute(Vector< Real > &s, const Vector< Real > &x, const Vector< Real > &l, Objective< Real > &obj, Constraint< Real > &con, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Compute step (equality and bound constraints).
Real optToleranceInitial_
Provides the interface to apply upper and lower bound constraints.
void initialize(Vector< Real > &x, const Vector< Real > &g, Vector< Real > &l, const Vector< Real > &c, Objective< Real > &obj, Constraint< Real > &con, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Initialize step with equality and bound constraints.
virtual void applyAdjointJacobian(Vector< Real > &ajv, const Vector< Real > &v, const Vector< Real > &x, Real &tol)
Apply the adjoint of the the constraint Jacobian at , , to vector .
void update(Vector< Real > &x, Vector< Real > &l, const Vector< Real > &s, Objective< Real > &obj, Constraint< Real > &con, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Update step, if successful (equality and bound constraints).
virtual int getNumberGradientEvaluations(void) const
std::string print(AlgorithmState< Real > &algo_state, bool pHeader=false) const
Print iterate status.
ROL::Ptr< Vector< Real > > lagmultVec
virtual int getNumberFunctionEvaluations(void) const
void setScaling(const Real fscale, const Real cscale=1.0)
const Ptr< const Vector< Real > > getObjectiveGradient(const Vector< Real > &x)
virtual void set(const Vector &x)
Set where .
virtual Real norm() const =0
Returns where .
virtual void getConstraintVec(Vector< Real > &c, const Vector< Real > &x)
void update(Vector< Real > &x, Vector< Real > &l, const Vector< Real > &s, Objective< Real > &obj, Constraint< Real > &con, AlgorithmState< Real > &algo_state)
Update step, if successful (equality constraint).
void update(Vector< Real > &x, const Vector< Real > &s, Objective< Real > &obj, BoundConstraint< Real > &con, AlgorithmState< Real > &algo_state)
Update step, for bound constraints; here only to satisfy the interface requirements, does nothing, needs refactoring.
Defines the general constraint operator interface.
virtual void project(Vector< Real > &x)
Project optimization variables onto the bounds.
void compute(Vector< Real > &s, const Vector< Real > &x, const Vector< Real > &l, Objective< Real > &obj, Constraint< Real > &con, AlgorithmState< Real > &algo_state)
Compute step (equality constraint).
void compute(Vector< Real > &s, const Vector< Real > &x, Objective< Real > &obj, BoundConstraint< Real > &con, AlgorithmState< Real > &algo_state)
Compute step for bound constraints; here only to satisfy the interface requirements, does nothing, needs refactoring.