47 #include "MoochoPack_LineSearchWatchDog_Step.hpp"
48 #include "MoochoPack_MoochoAlgorithmStepNames.hpp"
49 #include "MoochoPack_moocho_algo_conversion.hpp"
50 #include "IterationPack_print_algorithm_step.hpp"
51 #include "ConstrainedOptPack_MeritFuncCalc1DQuadratic.hpp"
52 #include "ConstrainedOptPack_MeritFuncCalcNLP.hpp"
53 #include "ConstrainedOptPack_print_vector_change_stats.hpp"
54 #include "ConstrainedOptPack/src/VectorWithNorms.h"
55 #include "AbstractLinAlgPack/src/AbstractLinAlgPack_MatrixOp.hpp"
56 #include "DenseLinAlgPack_DVectorClass.hpp"
57 #include "DenseLinAlgPack_DVectorOp.hpp"
58 #include "DenseLinAlgPack_DVectorOut.hpp"
59 #include "DenseLinAlgPack_LinAlgOpPack.hpp"
62 const int NORMAL_LINE_SEARCH = -1;
65 namespace LinAlgOpPack {
70 const direct_line_search_ptr_t& direct_line_search
71 ,
const merit_func_ptr_t& merit_func
73 , value_type opt_kkt_err_threshold
74 , value_type feas_kkt_err_threshold
77 direct_line_search_(direct_line_search)
78 , merit_func_(merit_func)
80 , opt_kkt_err_threshold_(opt_kkt_err_threshold)
81 , feas_kkt_err_threshold_(feas_kkt_err_threshold)
82 , watch_k_(NORMAL_LINE_SEARCH)
86 , poss_type step_poss, IterationPack::EDoStepType type, poss_type assoc_step_poss)
88 using DenseLinAlgPack::norm_inf;
94 using LinAlgOpPack::V_MtV;
96 using ConstrainedOptPack::print_vector_change_stats;
98 NLPAlgo &algo = rsqp_algo(_algo);
99 NLPAlgoState &s = algo.rsqp_state();
100 NLP &nlp = algo.nlp();
102 EJournalOutputLevel olevel = algo.algo_cntr().journal_output_level();
103 std::ostream& out = algo.track().journal_out();
104 out << std::boolalpha;
107 if( (
int)olevel >= (
int)PRINT_ALGORITHM_STEPS ) {
108 using IterationPack::print_algorithm_step;
109 print_algorithm_step( algo, step_poss, type, assoc_step_poss, out );
119 &x_kp1 = s.x().set_k(+1).v();
121 &f_kp1 = s.f().set_k(+1);
123 &c_kp1 = s.c().set_k(+1).v();
126 &f_k = s.f().get_k(0);
128 &c_k = s.c().get_k(0).v();
130 &x_k = s.x().get_k(0).v();
132 &d_k = s.d().get_k(0).v();
134 &alpha_k = s.alpha().get_k(0);
141 Dphi_k = merit_func().deriv();
143 throw LineSearchFailure(
"LineSearch2ndOrderCorrect_Step::do_step(...) : "
144 "Error, d_k is not a descent direction for the merit function " );
149 &phi_kp1 = s.phi().set_k(+1) = merit_func().value( f_kp1, c_kp1 );
153 &phi_k = s.phi().set_k(0) = merit_func().value( f_k, c_k );
163 phi_calc( &merit_func(), &nlp );
168 if( watch_k_ == NORMAL_LINE_SEARCH ) {
170 opt_kkt_err_k = s.opt_kkt_err().get_k(0),
171 feas_kkt_err_k = s.feas_kkt_err().get_k(0);
172 if( opt_kkt_err_k <= opt_kkt_err_threshold() && feas_kkt_err_k <= feas_kkt_err_threshold() ) {
173 if( (
int)olevel >= (
int)PRINT_ALGORITHM_STEPS ) {
174 out <<
"\nopt_kkt_err_k = " << opt_kkt_err_k <<
" <= opt_kkt_err_threshold = "
175 << opt_kkt_err_threshold() << std::endl
176 <<
"\nfeas_kkt_err_k = " << feas_kkt_err_k <<
" <= feas_kkt_err_threshold = "
177 << feas_kkt_err_threshold() << std::endl
178 <<
"\nSwitching to watchdog linesearch ...\n";
184 if( (
int)olevel >= (
int)PRINT_ALGORITHM_STEPS ) {
185 out <<
"\nTrial point:\n"
186 <<
"phi_k = " << phi_k << std::endl
187 <<
"Dphi_k = " << Dphi_k << std::endl
188 <<
"phi_kp1 = " << phi_kp1 << std::endl;
191 bool ls_success =
true,
198 const value_type phi_cord = phi_k + eta() * Dphi_k;
199 const bool accept_step = phi_kp1 <= phi_cord;
201 if( (
int)olevel >= (
int)PRINT_ALGORITHM_STEPS ) {
202 out <<
"\n*** Zeroth watchdog iteration:\n"
203 <<
"\nphi_kp1 = " << phi_kp1 << ( accept_step ?
" <= " :
" > " )
204 <<
"phi_k + eta * Dphi_k = " << phi_cord << std::endl;
207 if( phi_kp1 > phi_cord ) {
208 if( (
int)olevel >= (int)PRINT_ALGORITHM_STEPS ) {
209 out <<
"\nAccept this increase for now but watch out next iteration!\n";
214 nrm_co_ = norm_inf( c_k );
220 const value_type mu_k = s.mu().get_k(0);
221 s.mu().set_k(+1) = mu_k;
226 if( (
int)olevel >= (
int)PRINT_ALGORITHM_STEPS ) {
227 out <<
"\nAll is good!\n";
236 if( (
int)olevel >= (
int)PRINT_ALGORITHM_STEPS ) {
237 out <<
"\n*** First watchdog iteration:\n"
238 <<
"\nDo a line search to determine x_kp1 = x_k + alpha_k * d_k ...\n";
241 const DVectorSlice xd[2] = { x_k(), d_k() };
242 MeritFuncCalc1DQuadratic phi_calc_1d( phi_calc, 1, xd, &x_kp1() );
243 ls_success = direct_line_search().do_line_search( phi_calc_1d, phi_k
245 , (
int)olevel >= (
int)PRINT_ALGORITHM_STEPS ?
246 &out : static_cast<std::ostream*>(0) );
250 value_type phi_cord = 0;
253 if( ( test1 = ( phi_k <= phio_ ) )
254 || ( test2 = phi_kp1 <= ( phi_cord = phio_ + eta() * Dphio_ ) ) )
257 if( (
int)olevel >= (
int)PRINT_ALGORITHM_STEPS ) {
259 <<
"\nphi_k = " << phi_k << ( test1 ?
" <= " :
" > " )
260 <<
"phi_km1 = " << phio_ << std::endl
261 <<
"phi_kp1 = " << phi_kp1 << ( test2 ?
" <= " :
" > " )
262 <<
"phi_km1 + eta * Dphi_km1 = " << phi_cord << std::endl
263 <<
"This is a sufficent reduction so reset watchdog.\n";
268 else if ( ! ( test1 = ( phi_kp1 <= phio_ ) ) ) {
271 if( (
int)olevel >= (int)PRINT_ALGORITHM_STEPS ) {
273 <<
"\nphi_kp1 = " << phi_kp1 <<
" > phi_km1 = " << phio_ << std::endl
274 <<
"This is not good reduction in phi so do linesearch from x_km1\n"
275 <<
"\n* Go back to x_km1: x_kp1 = x_k - alpha_k * d_k ...\n";
285 s.alpha().set_k(0) = -1.0;
286 s.d().set_k(0).v() = do_;
287 s.f().set_k(+1) = fo_;
289 if( (
int)olevel >= (
int)PRINT_ALGORITHM_STEPS ) {
290 out <<
"Output iteration k ...\n"
295 algo.track().output_iteration( algo );
305 if( (
int)olevel >= (
int)PRINT_ALGORITHM_STEPS ) {
306 out <<
"\n* Take the step from x_k = x_km2 to x_kp1 for iteration k (k+1)\n"
307 <<
"Find: x_kp1 = x_k + alpha_k * d_k = x_km2 + alpha_k * d_km2\n ...\n";
311 value_type &alpha_k = s.alpha().set_k(0) = 1.0;
317 const DVector &x_k = xo_;
320 const DVector &d_k = s.d().set_k(0).v() = do_;
323 const value_type &Dphi_k = Dphio_;
326 const value_type &phi_k = s.phi().set_k(0) = phio_;
330 algo.nlp().set_f( &s.f().set_k(+1) );
331 algo.nlp().set_c( &s.c().set_k(+1).v() );
332 phi_calc.set_nlp( algo.get_nlp() );
338 DVector &x_kp1 = s.x().set_k(+1).v();
339 V_VpV( &x_kp1, x_k, d_k );
342 value_type &phi_kp1 = s.phi().set_k(+1) = phiop1_;
344 const DVectorSlice xd[2] = { x_k(), d_k() };
345 MeritFuncCalc1DQuadratic phi_calc_1d( phi_calc, 1, xd, &x_kp1() );
346 ls_success = direct_line_search().do_line_search(
349 , (
int)olevel >= (
int)PRINT_ALGORITHM_STEPS ?
350 &out : static_cast<std::ostream*>(0) );
352 if( (
int)olevel >= (
int)PRINT_ALGORITHM_STEPS ) {
353 out <<
"\nOutput iteration k (k+1) ...\n"
355 <<
"Reinitialize watchdog algorithm\n";
359 algo.track().output_iteration( algo );
369 algo.do_step_next( EvalNewPoint_name );
373 if( (
int)olevel >= (
int)PRINT_ALGORITHM_STEPS ) {
375 <<
"phi_kp1 = " << phi_kp1 <<
" <= phi_km1 = " << phio_ << std::endl
376 <<
"\nAccept this step but do a linesearch next iteration!\n";
379 const value_type mu_k = s.mu().get_k(0);
380 s.mu().set_k(+1) = mu_k;
387 case NORMAL_LINE_SEARCH:
390 if( (
int)olevel >= (
int)PRINT_ALGORITHM_STEPS ) {
391 if( watch_k_ == 2 ) {
392 out <<
"\n*** Second watchdog iteration:\n"
393 <<
"Do a line search to determine x_kp1 = x_k + alpha_k * d_k ...\n";
396 out <<
"\n*** Normal linesearch:\n"
397 <<
"Do a line search to determine x_kp1 = x_k + alpha_k * d_k ...\n";
401 const DVectorSlice xd[2] = { x_k(), d_k() };
402 MeritFuncCalc1DQuadratic phi_calc_1d( phi_calc, 1, xd, &x_kp1() );
403 ls_success = direct_line_search().do_line_search( phi_calc_1d, phi_k
405 , (
int)olevel >= (
int)PRINT_ALGORITHM_STEPS ?
406 &out : static_cast<std::ostream*>(0) );
418 if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) {
419 out <<
"\nalpha = " << s.alpha().get_k(0) <<
"\n";
420 out <<
"\nphi_kp1 = " << s.phi().get_k(+1) <<
"\n";
423 if( static_cast<int>(olevel) >= static_cast<int>(PRINT_VECTORS) ) {
424 out <<
"\nd_k = \n" << s.d().get_k(0)();
425 out <<
"\nx_kp1 = \n" << s.x().get_k(+1)();
429 throw LineSearchFailure(
"LineSearchWatchDog_Step::do_step(): Line search failure");
436 , poss_type step_poss, IterationPack::EDoStepType type, poss_type assoc_step_poss
437 , std::ostream& out,
const std::string& L )
const
439 out << L <<
"*** Use the Watchdog linesearch when near solution.\n"
440 << L <<
"default: opt_kkt_err_threshold = 0.0\n"
441 << L <<
" feas_kkt_err_threshold = 0.0\n"
442 << L <<
" eta = 1.0e-4\n"
443 << L <<
" watch_k = NORMAL_LINE_SEARCH\n"
444 << L <<
"begin definition of NLP merit function phi.value(f(x),c(x)):\n";
446 merit_func().print_merit_func( out, L +
" " );
448 out << L <<
"end definition\n"
449 << L <<
"Dphi_k = phi.deriv()\n"
450 << L <<
"if Dphi_k >= 0 then\n"
451 << L <<
" throw line_search_failure\n"
453 << L <<
"phi_kp1 = phi_k.value(f_kp1,c_kp1)\n"
454 << L <<
"phi_k = phi.value(f_k,c_k)\n"
455 << L <<
"if watch_k == NORMAL_LINE_SEARCH then\n"
456 << L <<
" if opt_kkt_err <= opt_kkt_err_threshold\n"
457 << L <<
" and feas_kkt_err <= feas_kkt_err_threshold then\n"
458 << L <<
" *** Start using watchdog from now on!\n"
459 << L <<
" watch_k = 0\n"
462 << L <<
"if watch_k == 0 then\n"
463 << L <<
" *** Zeroth watchdog iteration\n"
464 << L <<
" if phi_kp1 >= phi_k + eta * Dphi_k then\n"
465 << L <<
" *** Accept this increase for now but watch out next iteration!\n"
466 << L <<
" *** Save the first point\n"
467 << L <<
" xo = x_k\n"
468 << L <<
" fo = f_k\n"
469 << L <<
" nrm_co = norm_inf_c_k\n"
470 << L <<
" do = d_k\n"
471 << L <<
" phio = phi_k\n"
472 << L <<
" Dphio = Dphi_k\n"
473 << L <<
" phiop1 = phi_kp1\n"
474 << L <<
" *** Skip the update of the penalty parameter next iteration.\n"
475 << L <<
" mu_kp1 = mu_k\n"
476 << L <<
" *** Continue with next step in watchdog\n"
477 << L <<
" watch_k = 1\n"
479 << L <<
" *** This is a good step so take it!\n"
481 << L <<
"else if watch_k == 1 then\n"
482 << L <<
" *** First watchdog iteration\n"
483 << L <<
" Do line search for: x_kp1 = x_k + alpha_k + d_k\n"
484 << L <<
" -> alpha_k, x_kp1, f_kp1, c_kp1, phi_kp1\n"
485 << L <<
" if ( phi_k <= phio ) or ( phi_kp1 <= phio + eta * Dphio ) then\n"
486 << L <<
" *** We will accept this step and reinitialize the watchdog\n"
487 << L <<
" watch_k = 0\n"
488 << L <<
" else if ( phi_kp1 > phio ) then\n"
489 << L <<
" *** This reduction is no good!\n"
490 << L <<
" *** Go back from x_k to x_km1 for this iteration (k)\n"
491 << L <<
" alpha_k = -1.0\n"
492 << L <<
" d_k = do\n"
493 << L <<
" f_kp1 = fo\n"
494 << L <<
" Output this iteration (k)\n"
496 << L <<
" *** Go from x_k = x_km2 to x_kp1 for this iteration (k+1)\n"
497 << L <<
" alpha_k = 1\n"
498 << L <<
" x_k = xo\n"
499 << L <<
" d_k = do\n"
500 << L <<
" Dphi_k = Dphio\n"
501 << L <<
" phi_k = phio\n"
502 << L <<
" x_kp1 = x_k + d_k\n"
503 << L <<
" phi_kp1 = phiop1\n"
504 << L <<
" Do line search for: x_kp1 = x_k + alpha_k + d_k\n"
505 << L <<
" -> alpha_k, x_kp1, f_kp1, c_kp1, phi_kp1\n"
506 << L <<
" Output this iteration (k+1)\n"
508 << L <<
" *** Any updates for k (k+2) should use the last updated value\n"
509 << L <<
" *** which was for k-2 (k) since there is not much info for k-1 (k+1).\n"
510 << L <<
" *** Be careful here and make sure this works with other steps.\n"
511 << L <<
" goto EvalNewPoint\n"
513 << L <<
" *** Accept this reduction but do a linesearch next iteration!\n"
514 << L <<
" *** Skip the update of the penalty parameter next iteration.\n"
515 << L <<
" mu_kp1 = mu_k\n"
516 << L <<
" *** Continue with next step in watchdog\n"
517 << L <<
" watch_k = 2\n"
519 << L <<
"else if ( watch_k == 2 ) then\n"
520 << L <<
" *** Second watchdog iteration\n"
521 << L <<
" Do line search for: x_kp1 = x_k + alpha_k + d_k\n"
522 << L <<
" -> alpha_k, x_kp1, f_kp1, c_kp1, phi_kp1\n"
523 << L <<
" *** Reset the watchdog algorithm\n"
524 << L <<
" watch_k = 1\n"
525 << L <<
"else if ( watch_k == NORMAL_LINE_SEARCH ) then\n"
526 << L <<
" Do line search for: x_kp1 = x_k + alpha_k + d_k\n"
527 << L <<
" -> alpha_k, x_kp1, f_kp1, c_kp1, phi_kp1\n"
528 << L <<
" begin direct line search : \""
529 <<
typeName(direct_line_search()) <<
"\"\n";
531 direct_line_search().print_algorithm( out, L +
" " );
534 << L <<
" end direct line search\n"
536 << L <<
"if maximum number of linesearch iterations are exceeded then\n"
537 << L <<
" throw line_search_failure\n"
LineSearchWatchDog_Step(const direct_line_search_ptr_t &direct_line_search=0, const merit_func_ptr_t &merit_func=0, value_type eta=1e-4, value_type opt_kkt_err_threshold=1e-1, value_type feas_kkt_err_threshold=1e-3)
void Vt_S(VectorMutable *v_lhs, const value_type &alpha)
void Vp_StV(VectorMutable *v_lhs, const value_type &alpha, const Vector &v_rhs)
void V_VpV(VectorMutable *v_lhs, const V1 &V1_rhs1, const V2 &V2_rhs2)
void Vp_StMtV(VectorMutable *v_lhs, value_type alpha, const MatrixOp &M_rhs1, BLAS_Cpp::Transp trans_rhs1, const Vector &v_rhs2, value_type beta=1.0)
void print_step(const Algorithm &algo, poss_type step_poss, IterationPack::EDoStepType type, poss_type assoc_step_poss, std::ostream &out, const std::string &leading_str) const
bool do_step(Algorithm &algo, poss_type step_poss, IterationPack::EDoStepType type, poss_type assoc_step_poss)
#define TEUCHOS_TEST_FOR_EXCEPT(throw_exception_test)
std::string typeName(const T &t)
void Vp_V(VectorMutable *v_lhs, const V &V_rhs)