10 #ifndef ROL_MOREAUYOSIDAPENALTYSTEP_H 
   11 #define ROL_MOREAUYOSIDAPENALTYSTEP_H 
   24 #include "ROL_ParameterList.hpp" 
   91 class AugmentedLagrangianStep;
 
   94 class MoreauYosidaPenaltyStep : 
public Step<Real> {
 
   98   ROL::Ptr<Algorithm<Real>>       
algo_;
 
   99   ROL::Ptr<Vector<Real>>          
x_; 
 
  100   ROL::Ptr<Vector<Real>>          
g_; 
 
  101   ROL::Ptr<Vector<Real>>          
l_; 
 
  102   ROL::Ptr<BoundConstraint<Real>> 
bnd_;
 
  123     Real zerotol = std::sqrt(ROL_EPSILON<Real>());
 
  130     myPen.
gradient(*(state->gradientVec), x, zerotol);
 
  132     state->gradientVec->plus(*
g_);
 
  133     gLnorm_ = (state->gradientVec)->norm();
 
  135     con.
value(*(state->constraintVec),x, zerotol);
 
  136     algo_state.
cnorm = (state->constraintVec)->norm();
 
  151     Real zerotol = std::sqrt(ROL_EPSILON<Real>());
 
  157     myPen.
gradient(*(state->gradientVec), x, zerotol);
 
  158     gLnorm_ = (state->gradientVec)->norm();
 
  160     algo_state.
cnorm = 
static_cast<Real
>(0);
 
  178       x_(ROL::nullPtr), 
g_(ROL::nullPtr), 
l_(ROL::nullPtr),
 
  182     Real ten(10), oem6(1.e-6), oem8(1.e-8);
 
  183     ROL::ParameterList& steplist = parlist.sublist(
"Step").sublist(
"Moreau-Yosida Penalty");
 
  185     tau_ = steplist.get(
"Penalty Parameter Growth Factor",ten);
 
  187     print_ = steplist.sublist(
"Subproblem").get(
"Print History",
false);
 
  189     Real gtol = steplist.sublist(
"Subproblem").get(
"Optimality Tolerance",oem8);
 
  190     Real ctol = steplist.sublist(
"Subproblem").get(
"Feasibility Tolerance",oem8);
 
  191     Real stol = oem6*std::min(gtol,ctol);
 
  192     int maxit = steplist.sublist(
"Subproblem").get(
"Iteration Limit",1000);
 
  193     parlist_.sublist(
"Status Test").set(
"Gradient Tolerance",   gtol);
 
  194     parlist_.sublist(
"Status Test").set(
"Constraint Tolerance", ctol);
 
  195     parlist_.sublist(
"Status Test").set(
"Step Tolerance",       stol);
 
  196     parlist_.sublist(
"Status Test").set(
"Iteration Limit",      maxit);
 
  198     stepname_ = steplist.sublist(
"Subproblem").get(
"Step Type",
"Composite Step");
 
  210     state->descentVec    = x.
clone();
 
  211     state->gradientVec   = g.
clone();
 
  212     state->constraintVec = c.
clone();
 
  222     algo_state.
nfval = 0;
 
  223     algo_state.
ncval = 0;
 
  224     algo_state.
ngrad = 0;
 
  235     state->descentVec    = x.
clone();
 
  236     state->gradientVec   = g.
clone();
 
  245     algo_state.
nfval = 0;
 
  246     algo_state.
ncval = 0;
 
  247     algo_state.
ngrad = 0;
 
  250     bnd_ = ROL::makePtr<BoundConstraint<Real>>();
 
  263     Ptr<Objective<Real>> penObj;
 
  265       Ptr<Objective<Real>>  raw_obj = makePtrFromRef(obj);
 
  266       Ptr<Constraint<Real>> raw_con = makePtrFromRef(con);
 
  268       penObj = makePtr<AugmentedLagrangian<Real>>(raw_obj,raw_con,l,one,x,*(state->constraintVec),
parlist_);
 
  272       Ptr<Objective<Real>>  raw_obj = makePtrFromRef(obj);
 
  273       Ptr<Constraint<Real>> raw_con = makePtrFromRef(con);
 
  275       penObj = makePtr<Fletcher<Real>>(raw_obj,raw_con,x,*(state->constraintVec),
parlist_);
 
  279       penObj    = makePtrFromRef(obj);
 
  286     x_->set(x); 
l_->set(l);
 
  330     state->descentVec->set(s);
 
  342       state->searchSize *= 
tau_;
 
  347     algo_state.
ncval += (
algo_->getState())->ncval;
 
  361     state->descentVec->set(s);
 
  371       state->searchSize *= 
tau_;
 
  383     std::stringstream hist;
 
  385     hist << std::setw(6)  << std::left << 
"iter";
 
  386     hist << std::setw(15) << std::left << 
"fval";
 
  388       hist << std::setw(15) << std::left << 
"cnorm";
 
  390     hist << std::setw(15) << std::left << 
"gnorm";
 
  391     hist << std::setw(15) << std::left << 
"ifeas";
 
  392     hist << std::setw(15) << std::left << 
"snorm";
 
  393     hist << std::setw(10) << std::left << 
"penalty";
 
  394     hist << std::setw(8) << std::left << 
"#fval";
 
  395     hist << std::setw(8) << std::left << 
"#grad";
 
  397       hist << std::setw(8) << std::left << 
"#cval";
 
  399     hist << std::setw(8) << std::left << 
"subIter";
 
  407     std::stringstream hist;
 
  408     hist << 
"\n" << 
" Moreau-Yosida Penalty solver";
 
  416     std::stringstream hist;
 
  417     hist << std::scientific << std::setprecision(6);
 
  418     if ( algo_state.
iter == 0 ) {
 
  424     if ( algo_state.
iter == 0 ) {
 
  426       hist << std::setw(6)  << std::left << algo_state.
iter;
 
  427       hist << std::setw(15) << std::left << algo_state.
value;
 
  429         hist << std::setw(15) << std::left << algo_state.
cnorm;
 
  431       hist << std::setw(15) << std::left << 
gLnorm_;
 
  433       hist << std::setw(15) << std::left << 
" ";
 
  434       hist << std::scientific << std::setprecision(2);
 
  435       hist << std::setw(10) << std::left << Step<Real>::getStepState()->searchSize;
 
  440       hist << std::setw(6)  << std::left << algo_state.
iter;
 
  441       hist << std::setw(15) << std::left << algo_state.
value;
 
  443         hist << std::setw(15) << std::left << algo_state.
cnorm;
 
  445       hist << std::setw(15) << std::left << 
gLnorm_;
 
  447       hist << std::setw(15) << std::left << algo_state.
snorm;
 
  448       hist << std::scientific << std::setprecision(2);
 
  449       hist << std::setw(10) << std::left << Step<Real>::getStepState()->searchSize;
 
  450       hist << std::scientific << std::setprecision(6);
 
  451       hist << std::setw(8) << std::left << algo_state.
nfval;
 
  452       hist << std::setw(8) << std::left << algo_state.
ngrad;
 
  454         hist << std::setw(8) << std::left << algo_state.
ncval;
 
Provides the interface to evaluate objective functions. 
~MoreauYosidaPenaltyStep()
EStep StringToEStep(std::string s)
virtual ROL::Ptr< Vector > clone() const =0
Clone to make a new (uninitialized) vector. 
ROL::Ptr< Vector< Real > > g_
virtual void plus(const Vector &x)=0
Compute , where . 
virtual void update(const Vector< Real > &x, UpdateType type, int iter=-1)
Update constraint function. 
void updateState(const Vector< Real > &x, const Vector< Real > &l, Objective< Real > &obj, Constraint< Real > &con, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
virtual void axpy(const Real alpha, const Vector &x)
Compute  where . 
bool isActivated(void) const 
Check if bounds are on. 
std::string printHeader(void) const 
Print iterate header. 
Provides the interface to compute optimization steps. 
int getNumberGradientEvaluations(void)
MoreauYosidaPenaltyStep(ROL::ParameterList &parlist)
Contains definitions of custom data types in ROL. 
Real value(const Vector< Real > &x, Real &tol)
Compute value. 
ROL::Ptr< Vector< Real > > x_
void initialize(Vector< Real > &x, const Vector< Real > &g, Vector< Real > &l, const Vector< Real > &c, Objective< Real > &obj, Constraint< Real > &con, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Initialize step with equality constraint. 
void updateMultipliers(Real mu, const ROL::Vector< Real > &x)
Defines the linear algebra or vector space interface. 
virtual void value(Vector< Real > &c, const Vector< Real > &x, Real &tol)=0
Evaluate the constraint operator  at . 
ROL::Ptr< Algorithm< Real > > algo_
ROL::Ptr< BoundConstraint< Real > > bnd_
State for algorithm class. Will be used for restarts. 
ROL::Ptr< StatusTest< Real > > status_
void update(Vector< Real > &x, const Vector< Real > &s, Objective< Real > &obj, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Update step, for bound constraints. 
ROL::Ptr< StepState< Real > > getState(void)
ROL::ParameterList parlist_
Provides the interface to evaluate the Moreau-Yosida penalty function. 
void update(Vector< Real > &x, Vector< Real > &l, const Vector< Real > &s, Objective< Real > &obj, Constraint< Real > &con, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Update step, if successful (equality and bound constraints). 
ROL::Ptr< Vector< Real > > iterateVec
ROL::Ptr< Vector< Real > > l_
void gradient(Vector< Real > &g, const Vector< Real > &x, Real &tol)
Compute gradient. 
virtual void project(Vector< Real > &x)
Project optimization variables onto the bounds. 
void compute(Vector< Real > &s, const Vector< Real > &x, Objective< Real > &obj, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Compute step for bound constraints. 
void initialize(Vector< Real > &x, const Vector< Real > &g, Objective< Real > &obj, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Initialize step without equality constraint. 
Provides the interface to apply upper and lower bound constraints. 
int getNumberFunctionEvaluations(void)
virtual void applyAdjointJacobian(Vector< Real > &ajv, const Vector< Real > &v, const Vector< Real > &x, Real &tol)
Apply the adjoint of the the constraint Jacobian at , , to vector . 
std::string printName(void) const 
Print step name. 
ROL::Ptr< Vector< Real > > lagmultVec
void updateState(const Vector< Real > &x, Objective< Real > &obj, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
void update(const Vector< Real > &x, bool flag=true, int iter=-1)
Update Moreau-Yosida penalty function. 
virtual void set(const Vector &x)
Set  where . 
virtual Real norm() const =0
Returns  where . 
ROL::Ptr< Step< Real > > step_
std::string print(AlgorithmState< Real > &algo_state, bool pHeader=false) const 
Print iterate status. 
EStep
Enumeration of step types. 
Real testComplementarity(const ROL::Vector< Real > &x)
Defines the general constraint operator interface. 
void compute(Vector< Real > &s, const Vector< Real > &x, const Vector< Real > &l, Objective< Real > &obj, Constraint< Real > &con, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Compute step (equality and bound constraints).