10 #ifndef ROL_TYPEP_PROXGRADIENTALGORITHM_DEF_HPP
11 #define ROL_TYPEP_PROXGRADIENTALGORITHM_DEF_HPP
16 template<
typename Real>
23 ParameterList &lslist = list.sublist(
"Step").sublist(
"Line Search");
24 maxit_ = lslist.get(
"Function Evaluation Limit", 20);
25 alpha0_ = lslist.get(
"Initial Step Size", 1.0);
26 normAlpha_ = lslist.get(
"Normalize Initial Step Size",
false);
27 alpha0bnd_ = lslist.get(
"Lower Bound for Initial Step Size", 1e-4);
28 useralpha_ = lslist.get(
"User Defined Initial Step Size",
false);
29 usePrevAlpha_ = lslist.get(
"Use Previous Step Length as Initial Guess",
false);
30 c1_ = lslist.get(
"Sufficient Decrease Tolerance", 1e-4);
31 maxAlpha_ = lslist.get(
"Maximum Step Size", alpha0_);
32 useAdapt_ = lslist.get(
"Use Adaptive Step Size Selection",
true);
33 initProx_ = lslist.get(
"Apply Prox to Initial Guess",
false);
34 rhodec_ = lslist.sublist(
"Line-Search Method").get(
"Backtracking Rate", 0.5);
35 rhoinc_ = lslist.sublist(
"Line-Search Method").get(
"Increase Rate" , 2.0);
36 t0_ = list.sublist(
"Status Test").get(
"Gradient Scale" , 1.0);
37 verbosity_ = list.sublist(
"General").get(
"Output Level", 0);
38 writeHeader_ = verbosity_ > 2;
41 template<
typename Real>
48 std::ostream &outStream) {
53 Real ftol = std::sqrt(ROL_EPSILON<Real>());
55 nobj.
prox(*state_->iterateVec,x,state_->searchSize,ftol);
57 x.
set(*state_->iterateVec);
62 state_->svalue = sobj.
value(x,ftol); state_->nsval++;
63 state_->nvalue = nobj.
value(x,ftol); state_->nnval++;
64 state_->value = state_->svalue + state_->nvalue;
66 sobj.
gradient(*state_->gradientVec,x,ftol); state_->ngrad++;
67 dg.
set(state_->gradientVec->dual());
71 bool flag = maxAlpha_ == alpha0_;
73 pgstep(px, *state_->stepVec, nobj, x, dg, t0_, ftol);
74 state_->snorm = state_->stepVec->norm();
76 Real snew = sobj.
value(px,ftol);
79 Real gs = state_->gradientVec->apply(*state_->stepVec);
80 alpha0_ = (state_->snorm * state_->snorm) / std::abs(snew - state_->svalue - gs);
81 alpha0_ = ((alpha0_ > alpha0bnd_) ? alpha0_ : one);
82 if (flag) maxAlpha_ = alpha0_;
86 alpha0_ /= state_->gradientVec->norm();
87 state_->searchSize = alpha0_;
89 pgstep(*state_->iterateVec, *state_->stepVec, nobj, x, dg, state_->searchSize, ftol);
90 state_->snorm = state_->stepVec->norm();
91 state_->gnorm = state_->snorm / state_->searchSize;
94 template<
typename Real>
99 std::ostream &outStream ) {
101 Real tol(std::sqrt(ROL_EPSILON<Real>()));
104 initialize(x,g,sobj,nobj,*px,*dg,outStream);
105 Real strial(0), ntrial(0), Ftrial(0), Qk(0);
106 Real strialP(0), ntrialP(0), FtrialP(0), alphaP(0);
107 Real snorm(state_->snorm), searchSize(state_->searchSize);
109 bool incAlpha =
false, accept =
true;
112 if (verbosity_ > 0) writeOutput(outStream,
true);
115 while (status_->check(*state_)) {
118 state_->searchSize = searchSize;
121 strial = sobj.
value(*state_->iterateVec,tol);
123 ntrial = nobj.
value(*state_->iterateVec,tol);
124 Ftrial = strial + ntrial;
127 Qk = state_->gradientVec->apply(*state_->stepVec) + ntrial - state_->nvalue;
128 incAlpha = (Ftrial - state_->value <= c1_*Qk);
129 if (verbosity_ > 1) {
130 outStream <<
" In TypeP::GradientAlgorithm: Line Search" << std::endl;
131 outStream <<
" Step size: " << state_->searchSize << std::endl;
132 outStream <<
" Trial smooth value: " << strial << std::endl;
133 outStream <<
" Trial nonsmooth value: " << ntrial << std::endl;
134 outStream <<
" Computed reduction: " << state_->value-Ftrial << std::endl;
135 outStream <<
" Dot product of gradient and step: " << Qk << std::endl;
136 outStream <<
" Sufficient decrease bound: " << -Qk*c1_ << std::endl;
137 outStream <<
" Number of function evaluations: " << ls_nfval << std::endl;
138 outStream <<
" Increase alpha?: " << incAlpha << std::endl;
140 if (incAlpha && useAdapt_) {
141 ntrialP = ROL_INF<Real>();
142 strialP = ROL_INF<Real>();
143 FtrialP = ntrialP + strialP;
144 while ( Ftrial - state_->value <= c1_*Qk
146 && state_->searchSize < maxAlpha_
147 && ls_nfval < maxit_ ) {
152 pxP->set(*state_->iterateVec);
153 alphaP = state_->searchSize;
158 state_->searchSize *= rhoinc_;
159 state_->searchSize = std::min(state_->searchSize,maxAlpha_);
161 pgstep(*state_->iterateVec, *state_->stepVec, nobj, x, *dg, state_->searchSize, tol);
164 strial = sobj.
value(*state_->iterateVec,tol);
166 ntrial = nobj.
value(*state_->iterateVec,tol);
167 Ftrial = strial + ntrial;
170 Qk = state_->gradientVec->apply(*state_->stepVec) + ntrial - state_->nvalue;
171 if (verbosity_ > 1) {
172 outStream << std::endl;
173 outStream <<
" Step size: " << state_->searchSize << std::endl;
174 outStream <<
" Trial smooth value: " << strial << std::endl;
175 outStream <<
" Trial nonsmooth value: " << ntrial << std::endl;
176 outStream <<
" Computed reduction: " << state_->value-Ftrial << std::endl;
177 outStream <<
" Dot product of gradient and step: " << Qk << std::endl;
178 outStream <<
" Sufficient decrease bound: " << -Qk*c1_ << std::endl;
179 outStream <<
" Number of function evaluations: " << ls_nfval << std::endl;
182 if (Ftrial - state_->value > c1_*Qk || Ftrial > FtrialP) {
183 state_->iterateVec->set(*pxP);
187 state_->searchSize = alphaP;
188 state_->stepVec->set(*state_->iterateVec);
189 state_->stepVec->axpy(-one,x);
194 while ( Ftrial - state_->value > c1_*Qk && ls_nfval < maxit_ ) {
196 state_->searchSize *= rhodec_;
198 pgstep(*state_->iterateVec, *state_->stepVec, nobj, x, *dg, state_->searchSize, tol);
201 strial = sobj.
value(*state_->iterateVec,tol);
203 ntrial = nobj.
value(*state_->iterateVec,tol);
204 Ftrial = strial + ntrial;
207 Qk = state_->gradientVec->apply(*state_->stepVec) + ntrial - state_->nvalue;
208 if (verbosity_ > 1) {
209 outStream << std::endl;
210 outStream <<
" Step size: " << state_->searchSize << std::endl;
211 outStream <<
" Trial smooth value: " << strial << std::endl;
212 outStream <<
" Trial nonsmooth value: " << ntrial << std::endl;
213 outStream <<
" Computed reduction: " << state_->value-Ftrial << std::endl;
214 outStream <<
" Dot product of gradient and step: " << Qk << std::endl;
215 outStream <<
" Sufficient decrease bound: " << -Qk*c1_ << std::endl;
216 outStream <<
" Number of function evaluations: " << ls_nfval << std::endl;
220 state_->nsval += ls_nfval;
221 state_->nnval += ls_nfval;
224 state_->snorm = state_->stepVec->norm();
228 x.
set(*state_->iterateVec);
231 state_->svalue = strial;
232 state_->nvalue = ntrial;
233 state_->value = Ftrial;
242 sobj.
gradient(*state_->gradientVec,x,tol);
244 dg->set(state_->gradientVec->dual());
247 searchSize = state_->searchSize;
248 if (!usePrevAlpha_ && !useAdapt_) searchSize = alpha0_;
249 pgstep(*state_->iterateVec, *state_->stepVec, nobj, x, *dg, searchSize, tol);
250 snorm = state_->stepVec->norm();
251 state_->gnorm = snorm / searchSize;
254 if (verbosity_ > 0) writeOutput(outStream,writeHeader_);
259 template<
typename Real>
261 std::ios_base::fmtflags osFlags(os.flags());
262 if (verbosity_ > 1) {
263 os << std::string(109,
'-') << std::endl;
264 os <<
"Proximal gradient descent";
265 os <<
" status output definitions" << std::endl << std::endl;
266 os <<
" iter - Number of iterates (steps taken)" << std::endl;
267 os <<
" value - Objective function value" << std::endl;
268 os <<
" gnorm - Norm of the proximal gradient with parameter alpha" << std::endl;
269 os <<
" snorm - Norm of the step (update to optimization vector)" << std::endl;
270 os <<
" alpha - Line search step length" << std::endl;
271 os <<
" #sval - Cumulative number of times the smooth objective function was evaluated" << std::endl;
272 os <<
" #nval - Cumulative number of times the nonsmooth objective function was evaluated" << std::endl;
273 os <<
" #grad - Cumulative number of times the gradient was computed" << std::endl;
274 os <<
" #prox - Cumulative number of times the proximal operator was computed" << std::endl;
275 os << std::string(109,
'-') << std::endl;
279 os << std::setw(6) << std::left <<
"iter";
280 os << std::setw(15) << std::left <<
"value";
281 os << std::setw(15) << std::left <<
"gnorm";
282 os << std::setw(15) << std::left <<
"snorm";
283 os << std::setw(15) << std::left <<
"alpha";
284 os << std::setw(10) << std::left <<
"#sval";
285 os << std::setw(10) << std::left <<
"#nval";
286 os << std::setw(10) << std::left <<
"#grad";
287 os << std::setw(10) << std::left <<
"#nprox";
292 template<
typename Real>
294 std::ios_base::fmtflags osFlags(os.flags());
295 os << std::endl <<
"Proximal Gradient Descent with Bidirectional Line Search (Type P)" << std::endl;
299 template<
typename Real>
301 std::ios_base::fmtflags osFlags(os.flags());
302 os << std::scientific << std::setprecision(6);
303 if ( state_->iter == 0 ) writeName(os);
304 if ( write_header ) writeHeader(os);
305 if ( state_->iter == 0 ) {
307 os << std::setw(6) << std::left << state_->iter;
308 os << std::setw(15) << std::left << state_->value;
309 os << std::setw(15) << std::left << state_->gnorm;
310 os << std::setw(15) << std::left <<
"---";
311 os << std::setw(15) << std::left <<
"---";
312 os << std::setw(10) << std::left << state_->nsval;
313 os << std::setw(10) << std::left << state_->nnval;
314 os << std::setw(10) << std::left << state_->ngrad;
315 os << std::setw(10) << std::left << state_->nprox;
320 os << std::setw(6) << std::left << state_->iter;
321 os << std::setw(15) << std::left << state_->value;
322 os << std::setw(15) << std::left << state_->gnorm;
323 os << std::setw(15) << std::left << state_->snorm;
324 os << std::setw(15) << std::left << state_->searchSize;
325 os << std::setw(10) << std::left << state_->nsval;
326 os << std::setw(10) << std::left << state_->nnval;
327 os << std::setw(10) << std::left << state_->ngrad;
328 os << std::setw(10) << std::left << state_->nprox;
Provides the interface to evaluate objective functions.
virtual ROL::Ptr< Vector > clone() const =0
Clone to make a new (uninitialized) vector.
ProxGradientAlgorithm(ParameterList &list)
virtual Real value(const Vector< Real > &x, Real &tol)=0
Compute value.
virtual void prox(Vector< Real > &Pv, const Vector< Real > &v, Real t, Real &tol)
Compute the proximity operator.
Defines the linear algebra or vector space interface.
virtual void update(const Vector< Real > &x, UpdateType type, int iter=-1)
Update objective function.
void run(Vector< Real > &x, const Vector< Real > &g, Objective< Real > &sobj, Objective< Real > &nobj, std::ostream &outStream=std::cout) override
Run algorithm on unconstrained problems (Type-U). This general interface supports the use of dual opt...
void initialize(Vector< Real > &x, const Vector< Real > &g, Objective< Real > &sobj, Objective< Real > &nobj, Vector< Real > &px, Vector< Real > &dg, std::ostream &outStream=std::cout)
virtual void gradient(Vector< Real > &g, const Vector< Real > &x, Real &tol)
Compute gradient.
Provides an interface to check status of optimization algorithms.
void writeOutput(std::ostream &os, bool write_header=false) const override
Print iterate status.
virtual void set(const Vector &x)
Set where .
void writeHeader(std::ostream &os) const override
Print iterate header.
void writeName(std::ostream &os) const override
Print step name.
virtual void writeExitStatus(std::ostream &os) const
void initialize(const Vector< Real > &x, const Vector< Real > &g)