48 #include "MoochoPack_NLPAlgoConfigMamaJama.hpp"
49 #include "MoochoPack_NLPAlgo.hpp"
50 #include "MoochoPack_NLPAlgoContainer.hpp"
51 #include "IterationPack_AlgorithmSetOptions.hpp"
52 #include "AbstractLinAlgPack_MatrixSymPosDefCholFactor.hpp"
54 #include "ConstrainedOptPack_MatrixSymPosDefLBFGS.hpp"
58 #include "ConstrainedOptPack_VariableBoundsTester.hpp"
60 #include "NLPInterfacePack_NLPDirect.hpp"
62 #include "NLPInterfacePack_CalcFiniteDiffProd.hpp"
63 #include "NLPInterfacePack_NLPVarReductPerm.hpp"
66 #include "ConstrainedOptPack_DirectLineSearchArmQuad_Strategy.hpp"
67 #include "ConstrainedOptPack_DirectLineSearchArmQuad_StrategySetOptions.hpp"
68 #include "ConstrainedOptPack_MeritFuncNLPL1.hpp"
69 #include "ConstrainedOptPack_MeritFuncNLPModL1.hpp"
72 #ifndef MOOCHO_NO_BASIS_PERM_DIRECT_SOLVERS
73 #include "ConstrainedOptPack_DecompositionSystemVarReductPerm.hpp"
76 #include "ConstrainedOptPack_QPSolverRelaxedTester.hpp"
77 #include "ConstrainedOptPack_QPSolverRelaxedTesterSetOptions.hpp"
78 #include "ConstrainedOptPack_QPSolverRelaxedQPSchur.hpp"
79 #include "ConstrainedOptPack_QPSolverRelaxedQPSchurSetOptions.hpp"
80 #include "ConstrainedOptPack_QPSchurInitKKTSystemHessianFull.hpp"
82 #ifdef CONSTRAINED_OPTIMIZATION_PACK_USE_QPKWIK
83 #include "ConstrainedOptPack_QPSolverRelaxedQPKWIK.hpp"
85 #ifdef CONSTRAINED_OPTIMIZATION_PACK_USE_QPOPT
89 #include "MoochoPack_MoochoAlgorithmStepNames.hpp"
98 #include "MoochoPack_ReducedGradientStd_Step.hpp"
99 #include "MoochoPack_InitFinDiffReducedHessian_Step.hpp"
100 #include "MoochoPack_InitFinDiffReducedHessian_StepSetOptions.hpp"
101 #include "MoochoPack_ReducedHessianSerialization_Step.hpp"
102 #include "MoochoPack_ReducedHessianSerialization_StepSetOptions.hpp"
103 #include "MoochoPack_ReducedHessianSecantUpdateStd_Step.hpp"
104 #include "MoochoPack_ReducedHessianSecantUpdateBFGSFull_Strategy.hpp"
111 #include "MoochoPack_BFGSUpdate_Strategy.hpp"
112 #include "MoochoPack_BFGSUpdate_StrategySetOptions.hpp"
113 #include "MoochoPack_QuasiNormalStepStd_Step.hpp"
114 #include "MoochoPack_CheckDescentQuasiNormalStep_Step.hpp"
115 #include "MoochoPack_CheckDecompositionFromPy_Step.hpp"
116 #include "MoochoPack_CheckDecompositionFromRPy_Step.hpp"
117 #include "MoochoPack_TangentialStepWithoutBounds_Step.hpp"
118 #include "MoochoPack_TangentialStepWithInequStd_Step.hpp"
119 #include "MoochoPack_TangentialStepWithInequStd_StepSetOptions.hpp"
120 #include "MoochoPack_SetDBoundsStd_AddedStep.hpp"
121 #include "MoochoPack_QPFailureReinitReducedHessian_Step.hpp"
122 #include "MoochoPack_CalcDFromYPYZPZ_Step.hpp"
123 #include "MoochoPack_CalcDFromYPY_Step.hpp"
124 #include "MoochoPack_CalcDFromZPZ_Step.hpp"
125 #include "MoochoPack_LineSearchFailureNewDecompositionSelection_Step.hpp"
126 #include "MoochoPack_LineSearchFilter_Step.hpp"
127 #include "MoochoPack_LineSearchFilter_StepSetOptions.hpp"
128 #include "MoochoPack_LineSearchFullStep_Step.hpp"
129 #include "MoochoPack_LineSearchDirect_Step.hpp"
130 #include "MoochoPack_LineSearchNLE_Step.hpp"
141 #include "MoochoPack_CalcReducedGradLagrangianStd_AddedStep.hpp"
142 #include "MoochoPack_CheckConvergenceStd_AddedStep.hpp"
143 #include "MoochoPack_CheckConvergenceStd_Strategy.hpp"
144 #include "MoochoPack_CheckSkipBFGSUpdateStd_StepSetOptions.hpp"
145 #include "MoochoPack_MeritFunc_DummyUpdate_Step.hpp"
146 #include "MoochoPack_MeritFunc_PenaltyParamUpdate_AddedStepSetOptions.hpp"
147 #include "MoochoPack_MeritFunc_PenaltyParamUpdateMultFree_AddedStep.hpp"
155 #include "MoochoPack_act_set_stats.hpp"
156 #include "MoochoPack_qp_solver_stats.hpp"
157 #include "MoochoPack_quasi_newton_stats.hpp"
162 #include "Teuchos_AbstractFactoryStd.hpp"
163 #include "Teuchos_dyn_cast.hpp"
164 #include "ReleaseResource_ref_count_ptr.hpp"
165 #include "Teuchos_Assert.hpp"
168 #include "OptionsFromStreamPack_StringToIntMap.hpp"
169 #include "OptionsFromStreamPack_StringToBool.hpp"
177 const double INF_BASIS_COND_CHANGE_FRAC = 1e+20;
180 namespace MoochoPack {
186 NLPAlgoConfigMamaJama::SOptionValues::SOptionValues()
187 :max_basis_cond_change_frac_(-1.0)
188 ,exact_reduced_hessian_(false)
189 ,quasi_newton_(QN_AUTO)
190 ,num_lbfgs_updates_stored_(-1)
191 ,lbfgs_auto_scaling_(true)
192 ,hessian_initialization_(INIT_HESS_AUTO)
193 ,qp_solver_type_(QP_AUTO)
194 ,reinit_hessian_on_qp_fail_(true)
195 ,line_search_method_(LINE_SEARCH_AUTO)
196 ,merit_function_type_(MERIT_FUNC_AUTO)
197 ,l1_penalty_param_update_(L1_PENALTY_PARAM_AUTO)
198 ,full_steps_after_k_(-1)
200 ,num_pz_damp_iters_(0)
225 ,std::ostream *trase_out
234 <<
"*****************************************************************\n"
235 <<
"*** NLPAlgoConfigMamaJama Configuration ***\n"
237 <<
"*** Here, summary information about how the algorithm is ***\n"
238 <<
"*** configured is printed so that the user can see how the ***\n"
239 <<
"*** properties of the NLP and the set options influence ***\n"
240 <<
"*** how an algorithm is configured. ***\n"
241 <<
"*****************************************************************\n";
251 *trase_out <<
"\n*** Creating the NLPAlgo algo object ...\n";
258 opt_setter( algo.get() );
261 algo_cntr->set_algo(algo);
262 algo->set_algo_cntr(algo_cntr);
271 *trase_out <<
"\n*** Setting the NLP and track objects to the algo object ...\n";
273 algo->set_nlp( algo_cntr->get_nlp().get() );
274 algo->set_track( algo_cntr->get_track() );
281 readin_options( *options_, &uov_, trase_out );
286 <<
"\n*** Warning, no OptionsFromStream object was set so a default set"
287 " of options will be used!\n";
291 NLP &nlp = algo->nlp();
292 nlp.initialize(algo->algo_cntr().check_results());
299 nb = nlp.num_bounded_x();
302 NLPFirstOrder *nlp_foi = NULL;
303 NLPSecondOrder *nlp_soi = NULL;
304 NLPDirect *nlp_fod = NULL;
305 bool tailored_approach =
false;
308 ,&nlp_foi, &nlp_soi, &nlp_fod, &tailored_approach
311 const int max_dof_quasi_newton_dense
319 <<
"\n*** Sorting out some of the options given input options ...\n";
322 if( tailored_approach ) {
326 <<
"\nThis is a tailored approach NLP (NLPDirect) which forces the following options:\n"
327 <<
"merit_function_type = L1;\n"
328 <<
"l1_penalty_parameter_update = MULT_FREE;\n"
329 <<
"null_space_matrix = EXPLICIT;\n"
332 cov_.merit_function_type_
334 cov_.l1_penalty_param_update_
335 = L1_PENALTY_PARAM_MULT_FREE;
337 = DecompositionSystemStateStepBuilderStd::NULL_SPACE_MATRIX_EXPLICIT;
340 if( !tailored_approach && uov_.merit_function_type_ != MERIT_FUNC_L1 ) {
343 <<
"\nThe only merit function currently supported is:\n"
344 <<
"merit_function_type = L1;\n"
347 cov_.merit_function_type_ = MERIT_FUNC_L1;
351 if( uov_.line_search_method_ == LINE_SEARCH_NONE ) {
354 <<
"\nThere are no equality constraints (m == 0) and line_search_method==NONE so set the following options:\n"
355 <<
"line_search_method = NONE;\n"
356 <<
"merit_function_type = L1;\n"
359 cov_.line_search_method_ = LINE_SEARCH_NONE;
360 cov_.merit_function_type_ = MERIT_FUNC_L1;
365 <<
"\nThere are no equality constraints (m == 0) and line_search_method==AUTO so set the following options:\n"
366 <<
"line_search_method = DIRECT;\n"
367 <<
"merit_function_type = L1;\n"
370 cov_.line_search_method_ = LINE_SEARCH_DIRECT;
371 cov_.merit_function_type_ = MERIT_FUNC_L1;
376 switch( uov_.quasi_newton_ ) {
380 <<
"\nquasi_newton == AUTO:"
381 <<
"\nnlp.num_bounded_x() == " << nlp.num_bounded_x() <<
":\n";
382 if( n - r > max_dof_quasi_newton_dense ) {
385 <<
"n-r = " << n-r <<
" > max_dof_quasi_newton_dense = "
386 << max_dof_quasi_newton_dense <<
":\n"
387 <<
"setting quasi_newton == LBFGS\n";
388 cov_.quasi_newton_ = QN_LBFGS;
393 <<
"n-r = " << n-r <<
" <= max_dof_quasi_newton_dense = "
394 << max_dof_quasi_newton_dense <<
":\n"
395 <<
"setting quasi_newton == BFGS\n";
396 cov_.quasi_newton_ = QN_BFGS;
404 cov_.quasi_newton_ = uov_.quasi_newton_;
410 if( uov_.qp_solver_type_ == QP_AUTO && nb == 0 ) {
411 cov_.qp_solver_type_ = QP_AUTO;
417 set_default_options(uov_,&cov_,trase_out);
420 if( cov_.line_search_method_ == LINE_SEARCH_2ND_ORDER_CORRECT ) {
423 "\nline_search_method == 2ND_ORDER_CORRECT:\n"
424 "Sorry, the second order corrrection linesearch is not updated yet!\n"
425 "setting line_search_method = FILTER ...\n";
426 cov_.line_search_method_ = LINE_SEARCH_FILTER;
428 if( cov_.line_search_method_ == LINE_SEARCH_WATCHDOG ) {
431 "\nline_search_method ==WATCHDOG:\n"
432 "Sorry, the watchdog linesearch is not updated yet!\n"
433 "setting line_search_method = DIRECT ...\n";
434 cov_.line_search_method_ = LINE_SEARCH_DIRECT;
438 if( cov_.qp_solver_type_ == QP_QPKWIK && nb ) {
441 <<
"\nqp_solver == QPKWIK and nlp.num_bounded_x() == " << nb <<
" > 0:\n"
442 <<
"Setting quasi_newton == BFGS...\n";
443 cov_.quasi_newton_ = QN_BFGS;
450 decomp_sys_ptr_t decomp_sys;
452 #ifndef MOOCHO_NO_BASIS_PERM_DIRECT_SOLVERS
460 trase_out, nlp, nlp_foi, nlp_soi, nlp_fod, tailored_approach
465 #ifndef MOOCHO_NO_BASIS_PERM_DIRECT_SOLVERS
467 decomp_sys_perm = Teuchos::rcp_dynamic_cast<DecompositionSystemVarReductPerm>(decomp_sys);
475 <<
"\n*** Creating the state object and setting up iteration quantity objects ...\n";
490 ? ( tailored_approach
491 ? ( nlp_fod->var_dep().size()
492 ? nlp.space_x()->sub_space(nlp_fod->var_dep())->clone()
494 : decomp_sys->space_range()
499 ? ( tailored_approach
500 ?( nlp_fod->var_indep().size()
501 ? nlp.space_x()->sub_space(nlp_fod->var_indep())->clone()
503 : decomp_sys->space_null()
515 trase_out, nlp, nlp_foi, nlp_soi, nlp_fod, tailored_approach, decomp_sys
521 if( !cov_.exact_reduced_hessian_ ) {
523 abstract_factory_rHL = Teuchos::null;
528 maintain_original = nb;
532 maintain_inverse = ( (!nb && m==r) || cov_.qp_solver_type_==QP_QPSCHUR
533 || algo->algo_cntr().check_results() );
534 switch( cov_.quasi_newton_ ) {
538 MatrixSymPosDefCholFactor::PostMod(
549 MatrixSymPosDefLBFGS::PostMod(
550 cov_.num_lbfgs_updates_stored_
553 ,cov_.lbfgs_auto_scaling_
562 state->set_iter_quant(
565 new IterQuantityAccessContiguous<MatrixSymOp>(
568 ,abstract_factory_rHL
581 if( cov_.line_search_method_ != LINE_SEARCH_NONE
582 && cov_.line_search_method_ != LINE_SEARCH_FILTER) {
584 merit_func_factory = Teuchos::null;
585 switch( cov_.merit_function_type_ ) {
590 case MERIT_FUNC_MOD_L1:
591 case MERIT_FUNC_MOD_L1_INCR:
598 state->set_iter_quant(
601 new IterQuantityAccessContiguous<MeritFuncNLP>(
610 if (cov_.line_search_method_ == LINE_SEARCH_FILTER)
613 state->set_iter_quant(
616 new IterQuantityAccessContiguous<Filter_T>(1,FILTER_IQ_STRING)
623 state->set_iter_quant(
626 new IterQuantityAccessContiguous<VectorMutable>(
630 state->set_iter_quant(
633 new IterQuantityAccessContiguous<VectorMutable>(
638 state->set_iter_quant(
641 new IterQuantityAccessContiguous<ActSetStats>( 1, act_set_stats_name ) )
644 state->set_iter_quant(
647 new IterQuantityAccessContiguous<QPSolverStats>( 1, qp_solver_stats_name ) )
659 typedef IterQuantityAccessContiguous<value_type> IQ_scalar_cngs;
660 typedef IterQuantityAccessContiguous<VectorMutable> IQ_vector_cngs;
662 dyn_cast<IQ_vector_cngs>(state->x()).resize(2);
663 dyn_cast<IQ_scalar_cngs>(state->f()).resize(2);
664 if(m)
dyn_cast<IQ_vector_cngs>(state->c()).resize(2);
666 if(m && nlp_foi) state->Gc();
669 #ifndef MOOCHO_NO_BASIS_PERM_DIRECT_SOLVERS
670 && decomp_sys_perm.
get() == NULL
673 if(m)
dyn_cast<IQ_vector_cngs>(state->Ypy()).resize(2);
675 #ifndef MOOCHO_NO_BASIS_PERM_DIRECT_SOLVERS
676 && decomp_sys_perm.
get() == NULL
679 if(m)
dyn_cast<IQ_vector_cngs>(state->Zpz()).resize(2);
680 dyn_cast<IQ_vector_cngs>(state->d()).resize(2);
683 dyn_cast<IQ_vector_cngs>(state->rGf()).resize(2);
690 dyn_cast<IQ_scalar_cngs>(state->alpha()).resize(2);
691 dyn_cast<IQ_scalar_cngs>(state->mu()).resize(2);
692 dyn_cast<IQ_scalar_cngs>(state->phi()).resize(2);
694 dyn_cast<IQ_scalar_cngs>(state->opt_kkt_err()).resize(2);
695 dyn_cast<IQ_scalar_cngs>(state->feas_kkt_err()).resize(2);
697 dyn_cast<IQ_vector_cngs>(state->rGL()).resize(2);
699 if(m)
dyn_cast<IQ_vector_cngs>(state->lambda()).resize(2);
700 dyn_cast<IQ_vector_cngs>(state->nu()).resize(2);
703 algo->set_state( state );
710 *trase_out <<
"\n*** Creating and setting the step objects ...\n";
730 algo_step_ptr_t eval_new_point_step = Teuchos::null;
735 trase_out, nlp, nlp_foi, nlp_soi, nlp_fod, tailored_approach, decomp_sys
736 ,&eval_new_point_step, &calc_fd_prod, &bounds_tester, &new_decomp_selection_strategy
740 algo_step_ptr_t quansi_normal_step_step = Teuchos::null;
741 if( !tailored_approach ) {
746 algo_step_ptr_t check_decomp_from_py_step = Teuchos::null;
747 algo_step_ptr_t check_decomp_from_Rpy_step = Teuchos::null;
748 if( new_decomp_selection_strategy.
get() && cov_.max_basis_cond_change_frac_ < INF_BASIS_COND_CHANGE_FRAC ) {
751 new_decomp_selection_strategy
752 ,cov_.max_basis_cond_change_frac_
756 new_decomp_selection_strategy
757 ,cov_.max_basis_cond_change_frac_
762 algo_step_ptr_t check_descent_quansi_normal_step_step = Teuchos::null;
763 if( algo->algo_cntr().check_results() ) {
768 algo_step_ptr_t reduced_gradient_step = Teuchos::null;
769 if( !tailored_approach ) {
774 algo_step_ptr_t check_skip_bfgs_update_step = Teuchos::null;
775 if(!cov_.exact_reduced_hessian_) {
780 opt_setter( step.
get() );
783 check_skip_bfgs_update_step = step;
787 algo_step_ptr_t reduced_hessian_step = Teuchos::null;
791 secant_update_strategy = Teuchos::null;
792 switch( cov_.quasi_newton_ )
805 opt_setter( bfgs_strategy.get() );
808 switch( cov_.quasi_newton_ ) {
819 true, std::logic_error
820 ,
"NLPAlgoConfigMamaJama::config_algo_cntr(...) : Error, "
821 "The quansi_newton options of PBFGS and LPBFGS have not been updated yet!" );
835 algo->state().set_iter_quant(
836 quasi_newton_stats_name
837 ,
Teuchos::rcp(
new IterQuantityAccessContiguous<QuasiNewtonStats>(
839 ,quasi_newton_stats_name
849 algo_step_ptr_t init_red_hess_step = Teuchos::null;
851 cov_.hessian_initialization_ == INIT_HESS_FIN_DIFF_SCALE_IDENTITY
852 || cov_.hessian_initialization_ == INIT_HESS_FIN_DIFF_SCALE_DIAGONAL
853 || cov_.hessian_initialization_ == INIT_HESS_FIN_DIFF_SCALE_DIAGONAL_ABS
861 switch( cov_.hessian_initialization_ ) {
862 case INIT_HESS_FIN_DIFF_SCALE_IDENTITY:
863 init_hess = InitFinDiffReducedHessian_Step::SCALE_IDENTITY;
865 case INIT_HESS_FIN_DIFF_SCALE_DIAGONAL:
866 init_hess = InitFinDiffReducedHessian_Step::SCALE_DIAGONAL;
868 case INIT_HESS_FIN_DIFF_SCALE_DIAGONAL_ABS:
869 init_hess = InitFinDiffReducedHessian_Step::SCALE_DIAGONAL_ABS;
878 opt_setter( _init_red_hess_step.
get() );
881 init_red_hess_step = _init_red_hess_step;
883 else if(cov_.hessian_initialization_==INIT_HESS_SERIALIZE ) {
887 ReducedHessianSerialization_StepSetOptions
888 opt_setter( _init_red_hess_step.
get() );
889 opt_setter.set_options( *options_ );
891 init_red_hess_step = _init_red_hess_step;
895 algo_step_ptr_t set_d_bounds_step = Teuchos::null;
896 algo_step_ptr_t tangential_step_step = Teuchos::null;
899 tangental_step_output_bounds_step
901 tangental_step_output_bounds_step->max_pz_norm(cov_.max_pz_norm_);
902 tangental_step_output_bounds_step->num_pz_damp_iters(cov_.num_pz_damp_iters_);
903 tangential_step_step = tangental_step_output_bounds_step;
910 switch( cov_.qp_solver_type_ ) {
917 init_kkt_sys = Teuchos::null;
918 switch( cov_.quasi_newton_ ) {
921 init_kkt_sys =
Teuchos::rcp(
new QPSchurInitKKTSystemHessianFull());
934 _qp_solver =
Teuchos::rcp(
new QPSolverRelaxedQPSchur(init_kkt_sys));
936 QPSolverRelaxedQPSchurSetOptions
937 qp_options_setter(_qp_solver.
get());
938 qp_options_setter.set_options( *options_ );
940 qp_solver = _qp_solver;
944 #ifdef CONSTRAINED_OPTIMIZATION_PACK_USE_QPKWIK
948 qp_solver = _qp_solver;
951 true,std::logic_error,
"Error! QPKWIK interface is not supported since "
952 "CONSTRAINED_OPTIMIZATION_PACK_USE_QPKWIK is not defined!");
977 qp_solver_tester =
Teuchos::rcp(
new QPSolverRelaxedTester());
979 QPSolverRelaxedTesterSetOptions
980 opt_setter( qp_solver_tester.
get() );
981 opt_setter.set_options( *options_ );
987 qp_solver, qp_solver_tester ) );
990 opt_setter( tangential_step_with_inequ_step.
get() );
993 tangential_step_step = tangential_step_with_inequ_step;
1001 algo_step_ptr_t calc_d_from_Ypy_Zpy_step = Teuchos::null;
1007 algo_step_ptr_t calc_reduced_grad_lagr_step = Teuchos::null;
1014 algo_step_ptr_t check_convergence_step = Teuchos::null;
1023 opt_setter( check_convergence_strategy.
get() );
1030 check_convergence_step = _check_convergence_step;
1034 algo_step_ptr_t merit_func_penalty_param_update_step = Teuchos::null;
1035 if( cov_.line_search_method_ == LINE_SEARCH_FILTER ) {
1038 else if( cov_.line_search_method_ != LINE_SEARCH_NONE ) {
1040 param_update_step = Teuchos::null;
1041 switch( cov_.merit_function_type_ ) {
1042 case MERIT_FUNC_L1: {
1043 switch(cov_.l1_penalty_param_update_) {
1044 case L1_PENALTY_PARAM_WITH_MULT:
1048 true, std::logic_error
1049 ,
"NLPAlgoConfigMamaJama::config_algo_cntr(...) : Error, "
1050 "The l1_penalty_parameter_update option of MULT_FREE has not been updated yet!" );
1052 case L1_PENALTY_PARAM_MULT_FREE:
1061 case MERIT_FUNC_MOD_L1:
1062 case MERIT_FUNC_MOD_L1_INCR:
1066 true, std::logic_error
1067 ,
"NLPAlgoConfigMamaJama::config_algo_cntr(...) : Error, "
1068 "The merit_function_type options of MODIFIED_L1 and MODIFIED_L1_INCR have not been updated yet!" );
1073 if(options_.
get()) {
1075 ppu_options_setter( param_update_step.
get() );
1078 merit_func_penalty_param_update_step = param_update_step;
1082 algo_step_ptr_t line_search_full_step_step = Teuchos::null;
1088 algo_step_ptr_t line_search_step = Teuchos::null;
1089 if( cov_.line_search_method_ != LINE_SEARCH_NONE ) {
1091 direct_line_search =
Teuchos::rcp(
new DirectLineSearchArmQuad_Strategy());
1092 if(options_.
get()) {
1094 ls_options_setter( direct_line_search.
get(),
"DirectLineSearchArmQuadSQPStep" );
1098 switch( cov_.line_search_method_ ) {
1099 case LINE_SEARCH_DIRECT: {
1103 case LINE_SEARCH_2ND_ORDER_CORRECT: {
1105 true, std::logic_error
1106 ,
"NLPAlgoConfigMamaJama::config_algo_cntr(...) : Error, "
1107 "The line_search_method option of 2ND_ORDER_CORRECT has not been updated yet!" );
1110 case LINE_SEARCH_WATCHDOG: {
1112 true, std::logic_error
1113 ,
"NLPAlgoConfigMamaJama::config_algo_cntr(...) : Error, "
1114 "The line_search_method option of WATCHDOG has not been updated yet!" );
1117 case LINE_SEARCH_FILTER: {
1125 LineSearchFilter_StepSetOptions options_setter(line_search_filter_step.
get());
1126 options_setter.set_options(*options_);
1129 line_search_step = line_search_filter_step;
1132 case LINE_SEARCH_AUTO:
1133 case LINE_SEARCH_NONE:
1144 if( new_decomp_selection_strategy.
get() ) {
1148 ,new_decomp_selection_strategy
1165 <<
"\nConfiguring an algorithm for an unconstrained "
1166 <<
"NLP (m == 0, num_bounded_x == 0) ...\n";
1174 <<
"\nConfiguring an algorithm for a simple bound constrained "
1175 <<
"NLP (m == 0, num_bounded_x > 0) ...\n";
1181 algo->insert_step( ++step_num, EvalNewPoint_name, eval_new_point_step );
1184 algo->insert_step( ++step_num, ReducedGradient_name, reduced_gradient_step );
1189 algo->insert_step( ++step_num, CalcReducedGradLagrangian_name, calc_reduced_grad_lagr_step );
1192 algo->insert_step( ++step_num, CheckConvergence_name, check_convergence_step );
1197 algo->insert_step( ++step_num, ReducedHessian_name, reduced_hessian_step );
1200 if(init_red_hess_step.get()) {
1201 algo->insert_assoc_step(
1202 step_num, IterationPack::PRE_STEP, 1
1203 ,
"ReducedHessianInitialization"
1209 algo->insert_step( ++step_num, TangentialStep_name, tangential_step_step );
1212 algo->insert_assoc_step(
1214 ,IterationPack::PRE_STEP
1227 algo->insert_step( ++step_num, CalcReducedGradLagrangian_name, calc_reduced_grad_lagr_step );
1230 algo->insert_step( ++step_num, CheckConvergence_name, check_convergence_step );
1235 if( cov_.line_search_method_ == LINE_SEARCH_NONE ) {
1236 algo->insert_step( ++step_num, LineSearch_name, line_search_full_step_step );
1240 algo->insert_step( ++step_num, LineSearch_name, line_search_step );
1245 algo->insert_assoc_step(
1247 ,IterationPack::PRE_STEP
1249 ,
"LineSearchFullStep"
1250 ,line_search_full_step_step
1253 algo->insert_assoc_step(
1255 ,IterationPack::PRE_STEP
1257 ,
"MeritFunc_DummyUpdate"
1269 <<
"\nConfiguring an algorithm for a system of nonlinear equations "
1270 <<
"NLP (n == m) ...\n";
1272 if(algo->state().get_iter_quant_id(merit_func_nlp_name)!=IterationPack::AlgorithmState::DOES_NOT_EXIST)
1273 algo->state().erase_iter_quant(merit_func_nlp_name);
1276 int assoc_step_num = 0;
1279 algo->insert_step( ++step_num, EvalNewPoint_name, eval_new_point_step );
1280 if( check_descent_quansi_normal_step_step.get() && tailored_approach && algo->algo_cntr().check_results() )
1282 algo->insert_assoc_step(
1284 ,IterationPack::POST_STEP
1286 ,
"CheckDescentQuasiNormalStep"
1287 ,check_descent_quansi_normal_step_step
1292 if( !tailored_approach ) {
1293 algo->insert_step( ++step_num, QuasiNormalStep_name, quansi_normal_step_step );
1295 if( check_decomp_from_py_step.get() )
1296 algo->insert_assoc_step(
1298 ,IterationPack::POST_STEP
1300 ,
"CheckDecompositionFromPy"
1301 ,check_decomp_from_py_step
1303 if( check_decomp_from_Rpy_step.get() )
1304 algo->insert_assoc_step(
1306 ,IterationPack::POST_STEP
1308 ,
"CheckDecompositionFromRPy"
1309 ,check_decomp_from_Rpy_step
1311 if( check_descent_quansi_normal_step_step.get() )
1312 algo->insert_assoc_step(
1314 ,IterationPack::POST_STEP
1316 ,
"CheckDescentQuasiNormalStep"
1317 ,check_descent_quansi_normal_step_step
1322 algo->insert_step( ++step_num, CheckConvergence_name, check_convergence_step );
1328 if( cov_.line_search_method_ == LINE_SEARCH_NONE ) {
1329 algo->insert_step( ++step_num, LineSearch_name, line_search_full_step_step );
1333 algo->insert_step( ++step_num, LineSearch_name, line_search_step );
1338 algo->insert_assoc_step(
1340 ,IterationPack::PRE_STEP
1342 ,
"LineSearchFullStep"
1343 ,line_search_full_step_step
1348 else if ( m > 0 || nb > 0 ) {
1358 <<
"\nConfiguring an algorithm for a nonlinear equality constrained "
1359 <<
"NLP ( m > 0 && num_bounded_x == 0) ...\n";
1367 <<
"\nConfiguring an algorithm for a nonlinear generally constrained "
1368 <<
"NLP ( num_bounded_x > 0 ) ...\n";
1372 int assoc_step_num = 0;
1375 algo->insert_step( ++step_num, EvalNewPoint_name, eval_new_point_step );
1376 if( check_descent_quansi_normal_step_step.get() && tailored_approach && algo->algo_cntr().check_results() )
1378 algo->insert_assoc_step(
1380 ,IterationPack::POST_STEP
1382 ,
"CheckDescentQuasiNormalStep"
1383 ,check_descent_quansi_normal_step_step
1388 if( !tailored_approach ) {
1389 algo->insert_step( ++step_num, QuasiNormalStep_name, quansi_normal_step_step );
1391 if( check_decomp_from_py_step.get() )
1392 algo->insert_assoc_step(
1394 ,IterationPack::POST_STEP
1396 ,
"CheckDecompositionFromPy"
1397 ,check_decomp_from_py_step
1399 if( check_decomp_from_Rpy_step.get() )
1400 algo->insert_assoc_step(
1402 ,IterationPack::POST_STEP
1404 ,
"CheckDecompositionFromRPy"
1405 ,check_decomp_from_Rpy_step
1407 if( check_descent_quansi_normal_step_step.get() )
1408 algo->insert_assoc_step(
1410 ,IterationPack::POST_STEP
1412 ,
"CheckDescentQuasiNormalStep"
1413 ,check_descent_quansi_normal_step_step
1418 if( !tailored_approach ) {
1419 algo->insert_step( ++step_num, ReducedGradient_name, reduced_gradient_step );
1425 algo->insert_step( ++step_num, CalcReducedGradLagrangian_name, calc_reduced_grad_lagr_step );
1429 if( !tailored_approach ) {
1434 algo->insert_step( ++step_num, CheckConvergence_name, check_convergence_step );
1438 algo->insert_step( ++step_num, ReducedHessian_name, reduced_hessian_step );
1441 if(init_red_hess_step.get()) {
1442 algo->insert_assoc_step(
1443 step_num, IterationPack::PRE_STEP, 1
1444 ,
"ReducedHessianInitialization"
1450 algo->insert_assoc_step(
1452 ,IterationPack::PRE_STEP
1454 ,CheckSkipBFGSUpdate_name
1455 ,check_skip_bfgs_update_step
1459 algo->insert_step( ++step_num, TangentialStep_name, tangential_step_step );
1462 algo->insert_assoc_step(
1464 ,IterationPack::PRE_STEP
1472 algo->insert_step( ++step_num, CalcDFromYPYZPZ_name, calc_d_from_Ypy_Zpy_step );
1477 algo->insert_step( ++step_num, CalcReducedGradLagrangian_name, calc_reduced_grad_lagr_step );
1481 if( !tailored_approach ) {
1486 algo->insert_step( ++step_num, CheckConvergence_name, check_convergence_step );
1490 if( cov_.line_search_method_ == LINE_SEARCH_NONE ) {
1491 algo->insert_step( ++step_num, LineSearch_name, line_search_full_step_step );
1495 algo->insert_step( ++step_num, LineSearch_name, line_search_step );
1500 algo->insert_assoc_step(
1502 ,IterationPack::PRE_STEP
1504 ,
"LineSearchFullStep"
1505 ,line_search_full_step_step
1508 if(merit_func_penalty_param_update_step.get()) {
1509 algo->insert_assoc_step(
1511 ,IterationPack::PRE_STEP
1513 ,
"MeritFunc_PenaltyParamUpdate"
1514 ,merit_func_penalty_param_update_step
1532 _algo == NULL, std::invalid_argument
1533 ,
"NLPAlgoConfigMamaJama::init_algo(_algo) : Error, "
1534 "_algo can not be NULL" );
1538 NLP &nlp = algo.nlp();
1548 << std::setprecision(algo.algo_cntr().journal_print_digits())
1559 void NLPAlgoConfigMamaJama::readin_options(
1562 , std::ostream *trase_out
1565 namespace ofsp = OptionsFromStreamPack;
1566 using ofsp::OptionsFromStream;
1567 typedef OptionsFromStream::options_group_t options_group_t;
1568 using ofsp::StringToIntMap;
1569 using ofsp::StringToBool;
1574 const std::string opt_grp_name =
"NLPAlgoConfigMamaJama";
1575 const OptionsFromStream::options_group_t optgrp = options.
options_group( opt_grp_name );
1576 if( OptionsFromStream::options_group_exists( optgrp ) ) {
1579 const int num_opts = 13;
1581 MAX_BASIS_COND_CHANGE_FRAC
1582 ,EXACT_REDUCED_HESSIAN
1584 ,NUM_LBFGS_UPDATES_STORED
1586 ,HESSIAN_INITIALIZATION
1588 ,REINIT_HESSIAN_ON_QP_FAIL
1590 ,MERIT_FUNCTION_TYPE
1591 ,L1_PENALTY_PARAM_UPDATE
1595 const char* SMamaJama[num_opts] = {
1596 "max_basis_cond_change_frac"
1597 ,
"exact_reduced_hessian"
1599 ,
"num_lbfgs_updates_stored"
1600 ,
"lbfgs_auto_scaling"
1601 ,
"hessian_initialization"
1603 ,
"reinit_hessian_on_qp_fail"
1604 ,
"line_search_method"
1605 ,
"merit_function_type"
1606 ,
"l1_penalty_parameter_update"
1608 ,
"num_pz_damp_iters"
1610 StringToIntMap mama_jama_map( opt_grp_name, num_opts, SMamaJama );
1612 options_group_t::const_iterator itr = optgrp.begin();
1613 for( ; itr != optgrp.end(); ++itr ) {
1614 switch( (EMamaJama)mama_jama_map( ofsp::option_name(itr) ) ) {
1615 case MAX_BASIS_COND_CHANGE_FRAC:
1616 ov->max_basis_cond_change_frac_ = std::atof( ofsp::option_value(itr).c_str() );
1618 case EXACT_REDUCED_HESSIAN:
1619 ov->exact_reduced_hessian_ = StringToBool(
"exact_reduced_hessian", ofsp::option_value(itr).c_str() );
1623 const std::string &opt_val = ofsp::option_value(itr);
1624 if( opt_val ==
"AUTO" )
1625 ov->quasi_newton_ = QN_AUTO;
1626 else if( opt_val ==
"BFGS" )
1627 ov->quasi_newton_ = QN_BFGS;
1628 else if( opt_val ==
"PBFGS" )
1629 ov->quasi_newton_ = QN_PBFGS;
1630 else if( opt_val ==
"LBFGS" )
1631 ov->quasi_newton_ = QN_LBFGS;
1632 else if( opt_val ==
"LPBFGS" )
1633 ov->quasi_newton_ = QN_LPBFGS;
1636 true, std::invalid_argument
1637 ,
"NLPAlgoConfigMamaJama::readin_options(...) : "
1638 "Error, incorrect value for \"quasi_newton\" "
1639 ", Only options of BFGS, PBFGS"
1640 ", LBFGS, LPBFGS and AUTO are avalible."
1644 case NUM_LBFGS_UPDATES_STORED:
1645 ov->num_lbfgs_updates_stored_ = std::atoi( ofsp::option_value(itr).c_str() );
1647 case LBFGS_AUTO_SCALING:
1648 ov->lbfgs_auto_scaling_
1649 = StringToBool(
"lbfgs_auto_scaling", ofsp::option_value(itr).c_str() );
1651 case HESSIAN_INITIALIZATION:
1653 const std::string &opt_val = ofsp::option_value(itr);
1654 if( opt_val ==
"IDENTITY" )
1655 ov->hessian_initialization_ = INIT_HESS_IDENTITY;
1656 else if( opt_val ==
"FINITE_DIFF_SCALE_IDENTITY" )
1657 ov->hessian_initialization_ = INIT_HESS_FIN_DIFF_SCALE_IDENTITY;
1658 else if( opt_val ==
"FINITE_DIFF_DIAGONAL" )
1659 ov->hessian_initialization_ = INIT_HESS_FIN_DIFF_SCALE_DIAGONAL;
1660 else if( opt_val ==
"FINITE_DIFF_DIAGONAL_ABS" )
1661 ov->hessian_initialization_ = INIT_HESS_FIN_DIFF_SCALE_DIAGONAL_ABS;
1662 else if( opt_val ==
"AUTO" )
1663 ov->hessian_initialization_ = INIT_HESS_AUTO;
1664 else if( opt_val ==
"SERIALIZE" )
1665 ov->hessian_initialization_ = INIT_HESS_SERIALIZE;
1668 true, std::invalid_argument
1669 ,
"NLPAlgoConfigMamaJama::readin_options(...) : "
1670 "Error, incorrect value for \"hessian_initialization\" "
1671 ", Only options of IDENTITY, SERIALIZE, FINITE_DIFF_SCALE_IDENTITY,"
1672 " FINITE_DIFF_DIAGONAL, FINITE_DIFF_DIAGONAL_ABS and AUTO"
1678 const std::string &qp_solver = ofsp::option_value(itr);
1679 if( qp_solver ==
"AUTO" ) {
1680 ov->qp_solver_type_ = QP_AUTO;
1681 }
else if( qp_solver ==
"QPSOL" ) {
1682 ov->qp_solver_type_ = QP_QPSOL;
1683 }
else if( qp_solver ==
"QPOPT" ) {
1684 #ifdef CONSTRAINED_OPTIMIZATION_PACK_USE_QPOPT
1685 ov->qp_solver_type_ = QP_QPOPT;
1688 true, std::invalid_argument
1689 ,
"NLPAlgoConfigMamaJama::readin_options(...) : QPOPT is not supported,"
1690 " must define CONSTRAINED_OPTIMIZATION_PACK_USE_QPOPT!" );
1692 }
else if( qp_solver ==
"QPKWIK" ) {
1693 #ifdef CONSTRAINED_OPTIMIZATION_PACK_USE_QPKWIK
1694 ov->qp_solver_type_ = QP_QPKWIK;
1697 true, std::invalid_argument
1698 ,
"NLPAlgoConfigMamaJama::readin_options(...) : QPKWIK is not supported,"
1699 " must define CONSTRAINED_OPTIMIZATION_PACK_USE_QPKWIK!" );
1701 }
else if( qp_solver ==
"QPSCHUR" ) {
1702 ov->qp_solver_type_ = QP_QPSCHUR;
1705 true, std::invalid_argument
1706 ,
"NLPAlgoConfigMamaJama::readin_options(...) : "
1707 "Error, incorrect value for \"qp_solver\" "
1708 "Only qp solvers QPOPT, QPSOL, QPKWIK, QPSCHUR and AUTO are avalible." );
1712 case REINIT_HESSIAN_ON_QP_FAIL:
1713 ov->reinit_hessian_on_qp_fail_ = StringToBool(
"reinit_hessian_on_qp_fail", ofsp::option_value(itr).c_str() );
1715 case LINE_SEARCH_METHOD:
1717 const std::string &option = ofsp::option_value(itr);
1718 if( option ==
"NONE" ) {
1719 ov->line_search_method_ = LINE_SEARCH_NONE;
1720 }
else if( option ==
"DIRECT" ) {
1721 ov->line_search_method_ = LINE_SEARCH_DIRECT;
1722 }
else if( option ==
"2ND_ORDER_CORRECT" ) {
1723 ov->line_search_method_ = LINE_SEARCH_2ND_ORDER_CORRECT;
1724 }
else if( option ==
"WATCHDOG" ) {
1725 ov->line_search_method_ = LINE_SEARCH_WATCHDOG;
1726 }
else if( option ==
"AUTO" ) {
1727 ov->line_search_method_ = LINE_SEARCH_AUTO;
1728 }
else if( option ==
"FILTER" ) {
1729 ov->line_search_method_ = LINE_SEARCH_FILTER;
1732 true, std::invalid_argument
1733 ,
"NLPAlgoConfigMamaJama::readin_options(...) : "
1734 "Error, incorrect value for \"line_search_method\".\n"
1735 "Only the options NONE, DIRECT, 2ND_ORDER_CORRECT, FILTER, WATCHDOG "
1736 "and AUTO are avalible." );
1740 case MERIT_FUNCTION_TYPE:
1742 const std::string &option = ofsp::option_value(itr);
1743 if( option ==
"L1" )
1744 ov->merit_function_type_ = MERIT_FUNC_L1;
1745 else if( option ==
"MODIFIED_L1" )
1746 ov->merit_function_type_ = MERIT_FUNC_MOD_L1;
1747 else if( option ==
"MODIFIED_L1_INCR" )
1748 ov->merit_function_type_ = MERIT_FUNC_MOD_L1_INCR;
1749 else if( option ==
"AUTO" )
1750 ov->merit_function_type_ = MERIT_FUNC_AUTO;
1753 true, std::invalid_argument
1754 ,
"NLPAlgoConfigMamaJama::readin_options(...) : "
1755 "Error, incorrect value for \"merit_function_type\".\n"
1756 "Only the options L1, MODIFIED_L1, MODIFIED_L1_INCR "
1757 "and AUTO are avalible." );
1760 case L1_PENALTY_PARAM_UPDATE:
1762 const std::string &option = ofsp::option_value(itr);
1763 if( option ==
"WITH_MULT" )
1764 ov->l1_penalty_param_update_
1765 = L1_PENALTY_PARAM_WITH_MULT;
1766 else if( option ==
"MULT_FREE" )
1767 ov->l1_penalty_param_update_
1768 = L1_PENALTY_PARAM_MULT_FREE;
1769 else if( option ==
"AUTO" )
1770 ov->l1_penalty_param_update_
1771 = L1_PENALTY_PARAM_AUTO;
1774 true, std::invalid_argument
1775 ,
"NLPAlgoConfigMamaJama::readin_options(...) : "
1776 "Error, incorrect value for \"l1_penalty_param_update\".\n"
1777 "Only the options WITH_MULT, MULT_FREE and AUTO"
1782 const std::string &option = ofsp::option_value(itr);
1783 ov->max_pz_norm_ = std::atof(option.c_str());
1786 case NUM_PZ_DAMP_ITERS: {
1787 const std::string &option = ofsp::option_value(itr);
1788 ov->num_pz_damp_iters_ = std::atoi(option.c_str());
1800 <<
"\n\n*** Warning! The options group \"NLPAlgoConfigMamaJama\" was not found.\n"
1801 <<
"Using a default set of options instead ... \n";
1809 void NLPAlgoConfigMamaJama::set_default_options(
1810 const SOptionValues &uov
1812 ,std::ostream *trase_out
1817 <<
"\n*** Setting option defaults for options not set by the user or determined some other way ...\n";
1819 if( cov->max_basis_cond_change_frac_ < 0.0 && uov.max_basis_cond_change_frac_ < 0.0 ) {
1822 <<
"\nmax_basis_cond_change_frac < 0 : setting max_basis_cond_change_frac = 1e+4 \n";
1823 cov->max_basis_cond_change_frac_ = 1e+4;
1826 cov->max_basis_cond_change_frac_ = uov.max_basis_cond_change_frac_;
1828 cov->exact_reduced_hessian_ = uov.exact_reduced_hessian_;
1829 if( cov->quasi_newton_ == QN_AUTO && uov.quasi_newton_ == QN_AUTO ) {
1832 <<
"\nquasi_newton == AUTO: setting quasi_newton = BFGS\n";
1833 cov->quasi_newton_ = QN_BFGS;
1835 else if(cov->quasi_newton_ == QN_AUTO) {
1836 cov->quasi_newton_ = uov.quasi_newton_;
1838 if( cov->num_lbfgs_updates_stored_ < 0 && uov.num_lbfgs_updates_stored_ < 0 ) {
1841 <<
"\nnum_lbfgs_updates_stored < 0 : setting num_lbfgs_updates_stored = 10\n";
1842 cov->num_lbfgs_updates_stored_ = 10;
1844 else if(cov->num_lbfgs_updates_stored_ < 0) {
1845 cov->num_lbfgs_updates_stored_ = uov.num_lbfgs_updates_stored_;
1847 cov->lbfgs_auto_scaling_ = uov.lbfgs_auto_scaling_;
1848 if( cov->hessian_initialization_ == INIT_HESS_AUTO && uov.hessian_initialization_ == INIT_HESS_AUTO ) {
1851 <<
"\nhessian_initialization == AUTO: setting hessian_initialization = IDENTITY\n";
1852 cov->hessian_initialization_ = INIT_HESS_IDENTITY;
1860 else if(cov->hessian_initialization_ == INIT_HESS_AUTO) {
1861 cov->hessian_initialization_ = uov.hessian_initialization_;
1863 if( cov->qp_solver_type_ == QP_AUTO && uov.qp_solver_type_ == QP_AUTO ) {
1864 #ifdef CONSTRAINED_OPTIMIZATION_PACK_USE_QPKWIK
1867 <<
"\nqp_solver_type == AUTO: setting qp_solver_type = QPKWIK\n";
1868 cov->qp_solver_type_ = QP_QPKWIK;
1872 <<
"\nqp_solver_type == AUTO: setting qp_solver_type = QPSCHUR\n";
1873 cov->qp_solver_type_ = QP_QPSCHUR;
1876 else if(cov->qp_solver_type_ == QP_AUTO) {
1877 cov->qp_solver_type_ = uov.qp_solver_type_;
1879 cov->reinit_hessian_on_qp_fail_ = uov.reinit_hessian_on_qp_fail_;
1880 if( cov->line_search_method_ == LINE_SEARCH_AUTO && uov.line_search_method_ == LINE_SEARCH_AUTO ) {
1883 <<
"\nline_search_method == AUTO: setting line_search_method = FILTER\n";
1884 cov->line_search_method_ = LINE_SEARCH_FILTER;
1886 else if(cov->line_search_method_ == LINE_SEARCH_AUTO) {
1887 cov->line_search_method_ = uov.line_search_method_;
1889 if( cov->merit_function_type_ == MERIT_FUNC_AUTO && uov.merit_function_type_ == MERIT_FUNC_AUTO ) {
1892 <<
"\nmerit_function_type == AUTO: setting merit_function_type = MODIFIED_L1_INCR\n";
1893 cov->merit_function_type_ = MERIT_FUNC_MOD_L1_INCR;
1895 else if(cov->merit_function_type_ == MERIT_FUNC_AUTO) {
1896 cov->merit_function_type_ = uov.merit_function_type_;
1898 if( cov->l1_penalty_param_update_ == L1_PENALTY_PARAM_AUTO && uov.l1_penalty_param_update_ == L1_PENALTY_PARAM_AUTO ) {
1901 <<
"\nl1_penalty_param_update == AUTO: setting l1_penalty_param_update = MULT_FREE\n";
1902 cov->l1_penalty_param_update_ = L1_PENALTY_PARAM_MULT_FREE;
1904 else if(cov->l1_penalty_param_update_ == L1_PENALTY_PARAM_AUTO) {
1905 cov->l1_penalty_param_update_ = uov.l1_penalty_param_update_;
1907 if( cov->full_steps_after_k_ < 0 && uov.full_steps_after_k_ < 0 ) {
1910 <<
"\nfull_steps_after_k < 0 : the line search will never be turned off after so many iterations\n";
1913 cov->full_steps_after_k_ = uov.full_steps_after_k_;
1915 cov->max_pz_norm_ = uov.max_pz_norm_;
1916 cov->num_pz_damp_iters_ = uov.num_pz_damp_iters_;
1919 <<
"\n*** End setting default options\n";
Checks for descent in the decomposed equality constraints with respect to the range space step Ypy us...
Directs the algorithm to reinitalize the reduced Hessian on the event of a QP failure.
Set options for CheckSkipBFGSUpdateStd_Step from a OptionsFromStream object.
Simply updates merit_func_nlp_k = merit_func_nlp_km1
void process_nlp_and_options(std::ostream *trase_out, NLP &nlp, NLPFirstOrder **nlp_foi, NLPSecondOrder **nlp_soi, NLPDirect **nlp_fod, bool *tailored_approach)
Process the NLP and process the options passed in from set_options(). Postconditions: ...
Checks if a BFGS update should be preformed.
virtual void max_iter(size_t max_iter)
void create_eval_new_point(std::ostream *trase_out, NLP &nlp, NLPFirstOrder *nlp_foi, NLPSecondOrder *nlp_soi, NLPDirect *nlp_fod, bool tailored_approach, const Teuchos::RCP< DecompositionSystem > &decomp_sys, Teuchos::RCP< IterationPack::AlgorithmStep > *eval_new_point_step, Teuchos::RCP< CalcFiniteDiffProd > *calc_fd_prod, Teuchos::RCP< VariableBoundsTester > *bounds_tester, Teuchos::RCP< NewDecompositionSelection_Strategy > *new_decomp_selection_strategy)
Create the EvalNewPoint step object and allocated objects.
Initializes the reduced hessian using a single finite difference along the null space of the constrai...
Set options for MeritFunc_PenaltyParamUpdate_AddedStep from a OptionsFromStream object.
Interface NLPAlgoContainer uses to access NLPAlgo.
Set options for TangentialStepWithInequStd_Step from an OptionsFromStream object. ...
#define TEUCHOS_TEST_FOR_EXCEPTION(throw_exception_test, Exception, msg)
void set_options(const OptionsFromStream &options)
T_To & dyn_cast(T_From &from)
Computes the reducecd gradient of the objective rGf_k = Z_k' * Gf_k
SOptionValues & current_option_values()
Return the current option values being used.
rSQP Algorithm control class.
Specializes the update of the penalty parameter for a merit function as: min_mu = |(Gf_k+nu_k)'* Ypy_...
Directs the selection of a new decomposition if the line search fails.
const options_ptr_t & get_options() const
void init_algo(NLPAlgoInterface *algo)
void add_iter_quantities(std::ostream *trase_out, NLP &nlp, NLPFirstOrder *nlp_foi, NLPSecondOrder *nlp_soi, NLPDirect *nlp_fod, bool tailored_approach, const Teuchos::RCP< DecompositionSystem > &decomp_sys, const Teuchos::RCP< NLPAlgoState > &state)
Add the common iteration quantities to the state object.
Set options for CheckConvergence_Strategy from an OptionsFromStream object.
Implementation for NLPAlgo solver.
TEUCHOS_DEPRECATED RCP< T > rcp(T *p, Dealloc_T dealloc, bool owns_mem)
Takes the full step x_kp1 = x_k + d_k (d_k = Ypy_k + Zpz_k).
virtual std::ostream & journal_out() const
Delegates the line search to a DirectLineSearch_Strategy object.
Reduced space SQP state encapsulation interface.
Computes the bounds for the QP subproblem from the NLP bounds.
Solves the unconstrained QP subproblem: min qp_grad' * pz + (1/2) * pz' * rHL * pz.
virtual void max_run_time(double max_iter)
AlgorithmTracker & track()
Updates rHL_k using a secant update.
Delegates the line search to a DirectLineSearch_Strategy object.
Serializes rHL_k to and from a file.
Set options for InitFinDiffReducedHessian_Step from an OptionsFromStream object.
Perform BFGS updates on full reduced Hessian.
Check if the decomposition is going singular and if it is select a new decomposition.
void config_algo_cntr(NLPAlgoContainer *algo_cntr, std::ostream *trase_out)
void create_decomp_sys(std::ostream *trase_out, NLP &nlp, NLPFirstOrder *nlp_foi, NLPSecondOrder *nlp_soi, NLPDirect *nlp_fod, bool tailored_approach, Teuchos::RCP< DecompositionSystem > *decomp_sys)
Create the decomposition system object.
Calculates the reduced gradient of the Lagrangian rGL = rGf + Z' * nu + GcUP' * lambda(equ_undecomp) ...
Implementation of CheckConvergence_Strategy interface.
options_group_t options_group(const std::string &options_group_name)
NLPAlgoState & rsqp_state()
<<std aggr>="">> members for algo_cntr
Calculates the range space step by, solving for py = -inv(R)*c(equ_decomp), then setting Ypy = Y * py...
Strategy interface which contains the guts for a dampened BFGS update.
Set options for BFGSUpdate_Strategy from an OptionsFromStream object.
void set_options(const options_ptr_t &options)
Set the OptionsFromStream object that will be used for specifying the options.
void do_step_first(Algorithm::poss_type first_step_poss)
Solves the reduced QP subproblem with bounds and/or general inequalities.
Check if the decomposition is going singular and if it is select a new decomposition.
void set_options(const options_ptr_t &options)
Set the options that will be used to configure the algorithmic objects.
#define TEUCHOS_TEST_FOR_EXCEPT(throw_exception_test)
Filter line-search step class.