10 #ifndef ROL_INTERIORPOINTSTEP_H
11 #define ROL_INTERIORPOINTSTEP_H
30 class AugmentedLagrangianStep;
33 class InteriorPointStep :
public Step<Real> {
42 ROL::Ptr<Algorithm<Real> >
algo_;
43 ROL::Ptr<BoundConstraint<Real> >
bnd_;
47 ROL::Ptr<Vector<Real> >
x_;
48 ROL::Ptr<Vector<Real> >
g_;
49 ROL::Ptr<Vector<Real> >
l_;
50 ROL::Ptr<Vector<Real> >
c_;
90 using ROL::ParameterList;
92 verbosity_ = parlist.sublist(
"General").get(
"Print Verbosity",0);
95 ParameterList& iplist = parlist.sublist(
"Step").sublist(
"Interior Point");
96 mu_ = iplist.get(
"Initial Barrier Penalty",1.0);
97 mumin_ = iplist.get(
"Minimum Barrier Penalty",1.e-4);
98 mumax_ = iplist.get(
"Maximum Barrier Penalty",1e8);
99 rho_ = iplist.get(
"Barrier Penalty Reduction Factor",0.5);
102 print_ = iplist.sublist(
"Subproblem").get(
"Print History",
false);
103 Real gtol = iplist.sublist(
"Subproblem").get(
"Optimality Tolerance",1e-8);
104 Real ctol = iplist.sublist(
"Subproblem").get(
"Feasibility Tolerance",1e-8);
105 Real stol =
static_cast<Real
>(1e-6)*std::min(gtol,ctol);
106 int maxit = iplist.sublist(
"Subproblem").get(
"Iteration Limit",1000);
107 parlist_.sublist(
"Status Test").set(
"Gradient Tolerance", gtol);
108 parlist_.sublist(
"Status Test").set(
"Constraint Tolerance", ctol);
109 parlist_.sublist(
"Status Test").set(
"Step Tolerance", stol);
110 parlist_.sublist(
"Status Test").set(
"Iteration Limit", maxit);
112 stepname_ = iplist.sublist(
"Subproblem").get(
"Step Type",
"Composite Step");
125 state->descentVec = x.
clone();
126 state->gradientVec = g.
clone();
127 state->constraintVec = c.
clone();
137 auto& ipobj =
dynamic_cast<IPOBJ&
>(obj);
138 auto& ipcon =
dynamic_cast<IPCON&
>(con);
141 ipobj.updatePenalty(
mu_);
143 algo_state.
nfval = 0;
144 algo_state.
ncval = 0;
145 algo_state.
ngrad = 0;
157 algo_state.
nfval += ipobj.getNumberFunctionEvaluations();
158 algo_state.
ngrad += ipobj.getNumberGradientEvaluations();
159 algo_state.
ncval += ipcon.getNumberConstraintEvaluations();
181 state->descentVec = x.
clone();
182 state->gradientVec = g.
clone();
189 auto& ipobj =
dynamic_cast<IPOBJ&
>(obj);
192 algo_state.
nfval = 0;
193 algo_state.
ncval = 0;
194 algo_state.
ngrad = 0;
196 Real zerotol = std::sqrt(ROL_EPSILON<Real>());
203 algo_state.
cnorm =
static_cast<Real
>(0);
205 algo_state.
nfval += ipobj.getNumberFunctionEvaluations();
206 algo_state.
ngrad += ipobj.getNumberGradientEvaluations();
208 bnd_ = ROL::makePtr<BoundConstraint<Real>>();
228 Ptr<Objective<Real>> penObj;
230 Ptr<Objective<Real>> raw_obj = makePtrFromRef(obj);
231 Ptr<Constraint<Real>> raw_con = makePtrFromRef(con);
233 penObj = makePtr<AugmentedLagrangian<Real>>(raw_obj,raw_con,l,one,x,*(state->constraintVec),
parlist_);
237 Ptr<Objective<Real>> raw_obj = makePtrFromRef(obj);
238 Ptr<Constraint<Real>> raw_con = makePtrFromRef(con);
240 penObj = makePtr<Fletcher<Real>>(raw_obj,raw_con,x,*(state->constraintVec),
parlist_);
244 penObj = makePtrFromRef(obj);
253 x_->set(x);
l_->set(l);
268 compute(s,x,l,obj,con,algo_state);
278 auto& ipobj =
dynamic_cast<IPOBJ&
>(obj);
298 s.
set(*
x_); s.
axpy(static_cast<Real>(-1),x);
315 auto& ipobj =
dynamic_cast<IPOBJ&
>(obj);
316 auto& ipcon =
dynamic_cast<IPCON&
>(con);
321 ipobj.updatePenalty(
mu_);
331 state->descentVec->set(s);
337 algo_state.
value = ipobj.value(x,zerotol);
338 algo_state.
value = ipobj.getObjectiveValue();
340 ipcon.value(*
c_,x,zerotol);
341 state->constraintVec->set(*
c_);
343 ipobj.gradient(*
g_,x,zerotol);
344 state->gradientVec->set(*
g_);
346 ipcon.applyAdjointJacobian(*
g_,*
l_,x,zerotol);
347 state->gradientVec->plus(*
g_);
350 algo_state.
cnorm = state->constraintVec->norm();
353 algo_state.
nfval += ipobj.getNumberFunctionEvaluations();
354 algo_state.
ngrad += ipobj.getNumberGradientEvaluations();
355 algo_state.
ncval += ipcon.getNumberConstraintEvaluations();
366 update(x,l,s,obj,con,algo_state);
370 x_->axpy(static_cast<Real>(-1),state->gradientVec->dual());
372 x_->axpy(static_cast<Real>(-1),x);
382 auto& ipobj =
dynamic_cast<IPOBJ&
>(obj);
387 ipobj.updatePenalty(
mu_);
396 state->descentVec->set(s);
400 Real zerotol = std::sqrt(ROL_EPSILON<Real>());
402 algo_state.
value = ipobj.value(x,zerotol);
403 algo_state.
value = ipobj.getObjectiveValue();
405 ipobj.gradient(*
g_,x,zerotol);
406 state->gradientVec->set(*
g_);
409 x_->axpy(static_cast<Real>(-1),state->gradientVec->dual());
411 x_->axpy(static_cast<Real>(-1),x);
416 algo_state.
nfval += ipobj.getNumberFunctionEvaluations();
417 algo_state.
ngrad += ipobj.getNumberGradientEvaluations();
423 std::stringstream hist;
427 hist << std::string(116,
'-') <<
"\n";
428 hist <<
"Interior Point status output definitions\n\n";
430 hist <<
" IPiter - Number of interior point steps taken\n";
431 hist <<
" SPiter - Number of subproblem solver iterations\n";
432 hist <<
" penalty - Penalty parameter multiplying the barrier objective\n";
433 hist <<
" fval - Number of objective evaluations\n";
435 hist <<
" cnorm - Norm of the composite constraint\n";
436 hist <<
" gLnorm - Norm of the Lagrangian's gradient\n";
439 hist <<
" gnorm - Norm of the projected norm of the objective gradient\n";
441 hist <<
" snorm - Norm of step (update to optimzation and slack vector)\n";
442 hist <<
" #fval - Number of objective function evaluations\n";
443 hist <<
" #grad - Number of gradient evaluations\n";
445 hist <<
" #cval - Number of composite constraint evaluations\n";
447 hist << std::string(116,
'-') <<
"\n";
451 hist << std::setw(9) << std::left <<
"IPiter";
452 hist << std::setw(9) << std::left <<
"SPiter";
453 hist << std::setw(15) << std::left <<
"penalty";
454 hist << std::setw(15) << std::left <<
"fval";
456 hist << std::setw(15) << std::left <<
"cnorm";
457 hist << std::setw(15) << std::left <<
"gLnorm";
460 hist << std::setw(15) << std::left <<
"gnorm";
462 hist << std::setw(15) << std::left <<
"snorm";
463 hist << std::setw(8) << std::left <<
"#fval";
464 hist << std::setw(8) << std::left <<
"#grad";
466 hist << std::setw(8) << std::left <<
"#cval";
476 std::stringstream hist;
477 hist <<
"\n" <<
"Primal Interior Point Solver\n";
484 std::stringstream hist;
485 hist << std::scientific << std::setprecision(6);
486 if ( algo_state.
iter == 0 ) {
492 if ( algo_state.
iter == 0 ) {
494 hist << std::setw(9) << std::left << algo_state.
iter;
496 hist << std::setw(15) << std::left <<
mu_;
497 hist << std::setw(15) << std::left << algo_state.
value;
499 hist << std::setw(15) << std::left << algo_state.
cnorm;
501 hist << std::setw(15) << std::left << algo_state.
gnorm;
506 hist << std::setw(9) << std::left << algo_state.
iter;
508 hist << std::setw(15) << std::left <<
mu_;
509 hist << std::setw(15) << std::left << algo_state.
value;
511 hist << std::setw(15) << std::left << algo_state.
cnorm;
513 hist << std::setw(15) << std::left << algo_state.
gnorm;
514 hist << std::setw(15) << std::left << algo_state.
snorm;
516 hist << std::setw(8) << std::left << algo_state.
nfval;
517 hist << std::setw(8) << std::left << algo_state.
ngrad;
519 hist << std::setw(8) << std::left << algo_state.
ncval;
530 #endif // ROL_INTERIORPOINTSTEP_H
Provides the interface to evaluate objective functions.
EStep StringToEStep(std::string s)
virtual ROL::Ptr< Vector > clone() const =0
Clone to make a new (uninitialized) vector.
Constraint_Partitioned< Real > IPCON
void update(Vector< Real > &x, Vector< Real > &l, const Vector< Real > &s, Objective< Real > &obj, Constraint< Real > &con, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Update step, if successful (equality constraints).
void initialize(Vector< Real > &x, const Vector< Real > &g, Vector< Real > &l, const Vector< Real > &c, Objective< Real > &obj, Constraint< Real > &con, AlgorithmState< Real > &algo_state)
Initialize step with equality constraint.
virtual void plus(const Vector &x)=0
Compute , where .
virtual void projectInterior(Vector< Real > &x)
Project optimization variables into the interior of the feasible set.
InteriorPoint::PenalizedObjective< Real > IPOBJ
virtual void axpy(const Real alpha, const Vector &x)
Compute where .
ROL::Ptr< Vector< Real > > c_
virtual Real value(const Vector< Real > &x, Real &tol)=0
Compute value.
Provides the interface to compute optimization steps.
ROL::Ptr< Step< Real > > step_
ROL::ParameterList parlist_
Contains definitions of custom data types in ROL.
void compute(Vector< Real > &s, const Vector< Real > &x, Objective< Real > &obj, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Compute step.
Defines the linear algebra or vector space interface.
virtual void value(Vector< Real > &c, const Vector< Real > &x, Real &tol)=0
Evaluate the constraint operator at .
virtual void update(const Vector< Real > &x, UpdateType type, int iter=-1)
Update objective function.
ROL::Ptr< Vector< Real > > g_
State for algorithm class. Will be used for restarts.
virtual void gradient(Vector< Real > &g, const Vector< Real > &x, Real &tol)
Compute gradient.
void initialize(Vector< Real > &x, const Vector< Real > &g, Vector< Real > &l, const Vector< Real > &c, Objective< Real > &obj, Constraint< Real > &con, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Initialize step with equality constraint.
ROL::Ptr< Algorithm< Real > > algo_
ROL::Ptr< StepState< Real > > getState(void)
void update(Vector< Real > &x, Vector< Real > &l, const Vector< Real > &s, Objective< Real > &obj, Constraint< Real > &con, AlgorithmState< Real > &algo_state)
Update step, if successful (equality constraints).
Has both inequality and equality constraints. Treat inequality constraint as equality with slack vari...
void initialize(Vector< Real > &x, const Vector< Real > &g, Objective< Real > &obj, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Initialize step with no equality constraint.
ROL::Ptr< Vector< Real > > iterateVec
ROL::Ptr< Vector< Real > > x_
InteriorPointStep(ROL::ParameterList &parlist)
void compute(Vector< Real > &s, const Vector< Real > &x, const Vector< Real > &l, Objective< Real > &obj, Constraint< Real > &con, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Compute step (equality constraints).
virtual void project(Vector< Real > &x)
Project optimization variables onto the bounds.
void updatePenalty(Real mu)
std::string print(AlgorithmState< Real > &algo_state, bool pHeader=false) const
Print iterate status.
Provides the interface to apply upper and lower bound constraints.
void update(Vector< Real > &x, const Vector< Real > &s, Objective< Real > &obj, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Update step, if successful.
std::string printName(void) const
Print step name.
std::string printHeader(void) const
Print iterate header.
ROL::Ptr< StatusTest< Real > > status_
void compute(Vector< Real > &s, const Vector< Real > &x, const Vector< Real > &l, Objective< Real > &obj, Constraint< Real > &con, AlgorithmState< Real > &algo_state)
Compute step (equality constraints).
ROL::Ptr< Vector< Real > > l_
virtual void set(const Vector &x)
Set where .
virtual Real norm() const =0
Returns where .
ROL::Ptr< BoundConstraint< Real > > bnd_
EStep
Enumeration of step types.
Defines the general constraint operator interface.