72 #ifndef MOOCHO_NO_BASIS_PERM_DIRECT_SOLVERS
82 #ifdef CONSTRAINED_OPTIMIZATION_PACK_USE_QPKWIK
85 #ifdef CONSTRAINED_OPTIMIZATION_PACK_USE_QPOPT
177 const double INF_BASIS_COND_CHANGE_FRAC = 1e+20;
180 namespace MoochoPack {
187 :max_basis_cond_change_frac_(-1.0)
188 ,exact_reduced_hessian_(false)
189 ,quasi_newton_(QN_AUTO)
190 ,num_lbfgs_updates_stored_(-1)
191 ,lbfgs_auto_scaling_(true)
192 ,hessian_initialization_(INIT_HESS_AUTO)
193 ,qp_solver_type_(QP_AUTO)
194 ,reinit_hessian_on_qp_fail_(true)
195 ,line_search_method_(LINE_SEARCH_AUTO)
196 ,merit_function_type_(MERIT_FUNC_AUTO)
197 ,l1_penalty_param_update_(L1_PENALTY_PARAM_AUTO)
198 ,full_steps_after_k_(-1)
200 ,num_pz_damp_iters_(0)
225 ,std::ostream *trase_out
234 <<
"*****************************************************************\n"
235 <<
"*** NLPAlgoConfigMamaJama Configuration ***\n"
237 <<
"*** Here, summary information about how the algorithm is ***\n"
238 <<
"*** configured is printed so that the user can see how the ***\n"
239 <<
"*** properties of the NLP and the set options influence ***\n"
240 <<
"*** how an algorithm is configured. ***\n"
241 <<
"*****************************************************************\n";
251 *trase_out <<
"\n*** Creating the NLPAlgo algo object ...\n";
258 opt_setter( algo.get() );
261 algo_cntr->set_algo(algo);
262 algo->set_algo_cntr(algo_cntr);
271 *trase_out <<
"\n*** Setting the NLP and track objects to the algo object ...\n";
273 algo->set_nlp( algo_cntr->get_nlp().get() );
274 algo->set_track( algo_cntr->get_track() );
286 <<
"\n*** Warning, no OptionsFromStream object was set so a default set"
287 " of options will be used!\n";
291 NLP &nlp = algo->nlp();
292 nlp.initialize(algo->algo_cntr().check_results());
299 nb = nlp.num_bounded_x();
302 NLPFirstOrder *nlp_foi = NULL;
303 NLPSecondOrder *nlp_soi = NULL;
304 NLPDirect *nlp_fod = NULL;
305 bool tailored_approach =
false;
308 ,&nlp_foi, &nlp_soi, &nlp_fod, &tailored_approach
311 const int max_dof_quasi_newton_dense
319 <<
"\n*** Sorting out some of the options given input options ...\n";
322 if( tailored_approach ) {
326 <<
"\nThis is a tailored approach NLP (NLPDirect) which forces the following options:\n"
327 <<
"merit_function_type = L1;\n"
328 <<
"l1_penalty_parameter_update = MULT_FREE;\n"
329 <<
"null_space_matrix = EXPLICIT;\n"
343 <<
"\nThe only merit function currently supported is:\n"
344 <<
"merit_function_type = L1;\n"
354 <<
"\nThere are no equality constraints (m == 0) and line_search_method==NONE so set the following options:\n"
355 <<
"line_search_method = NONE;\n"
356 <<
"merit_function_type = L1;\n"
365 <<
"\nThere are no equality constraints (m == 0) and line_search_method==AUTO so set the following options:\n"
366 <<
"line_search_method = DIRECT;\n"
367 <<
"merit_function_type = L1;\n"
380 <<
"\nquasi_newton == AUTO:"
381 <<
"\nnlp.num_bounded_x() == " << nlp.num_bounded_x() <<
":\n";
382 if( n - r > max_dof_quasi_newton_dense ) {
385 <<
"n-r = " << n-r <<
" > max_dof_quasi_newton_dense = "
386 << max_dof_quasi_newton_dense <<
":\n"
387 <<
"setting quasi_newton == LBFGS\n";
393 <<
"n-r = " << n-r <<
" <= max_dof_quasi_newton_dense = "
394 << max_dof_quasi_newton_dense <<
":\n"
395 <<
"setting quasi_newton == BFGS\n";
423 "\nline_search_method == 2ND_ORDER_CORRECT:\n"
424 "Sorry, the second order corrrection linesearch is not updated yet!\n"
425 "setting line_search_method = FILTER ...\n";
431 "\nline_search_method ==WATCHDOG:\n"
432 "Sorry, the watchdog linesearch is not updated yet!\n"
433 "setting line_search_method = DIRECT ...\n";
441 <<
"\nqp_solver == QPKWIK and nlp.num_bounded_x() == " << nb <<
" > 0:\n"
442 <<
"Setting quasi_newton == BFGS...\n";
450 decomp_sys_ptr_t decomp_sys;
452 #ifndef MOOCHO_NO_BASIS_PERM_DIRECT_SOLVERS
460 trase_out, nlp, nlp_foi, nlp_soi, nlp_fod, tailored_approach
465 #ifndef MOOCHO_NO_BASIS_PERM_DIRECT_SOLVERS
467 decomp_sys_perm = Teuchos::rcp_dynamic_cast<DecompositionSystemVarReductPerm>(decomp_sys);
475 <<
"\n*** Creating the state object and setting up iteration quantity objects ...\n";
490 ? ( tailored_approach
491 ? ( nlp_fod->var_dep().size()
492 ? nlp.space_x()->sub_space(nlp_fod->var_dep())->clone()
494 : decomp_sys->space_range()
499 ? ( tailored_approach
500 ?( nlp_fod->var_indep().size()
501 ? nlp.space_x()->sub_space(nlp_fod->var_indep())->clone()
503 : decomp_sys->space_null()
515 trase_out, nlp, nlp_foi, nlp_soi, nlp_fod, tailored_approach, decomp_sys
528 maintain_original = nb;
533 || algo->algo_cntr().check_results() );
538 MatrixSymPosDefCholFactor::PostMod(
549 MatrixSymPosDefLBFGS::PostMod(
562 state->set_iter_quant(
565 new IterQuantityAccessContiguous<MatrixSymOp>(
568 ,abstract_factory_rHL
598 state->set_iter_quant(
601 new IterQuantityAccessContiguous<MeritFuncNLP>(
613 state->set_iter_quant(
623 state->set_iter_quant(
626 new IterQuantityAccessContiguous<VectorMutable>(
630 state->set_iter_quant(
633 new IterQuantityAccessContiguous<VectorMutable>(
638 state->set_iter_quant(
644 state->set_iter_quant(
659 typedef IterQuantityAccessContiguous<value_type> IQ_scalar_cngs;
660 typedef IterQuantityAccessContiguous<VectorMutable> IQ_vector_cngs;
662 dyn_cast<IQ_vector_cngs>(state->x()).resize(2);
663 dyn_cast<IQ_scalar_cngs>(state->f()).resize(2);
664 if(m)
dyn_cast<IQ_vector_cngs>(state->c()).resize(2);
666 if(m && nlp_foi) state->Gc();
669 #ifndef MOOCHO_NO_BASIS_PERM_DIRECT_SOLVERS
670 && decomp_sys_perm.
get() == NULL
673 if(m)
dyn_cast<IQ_vector_cngs>(state->Ypy()).resize(2);
675 #ifndef MOOCHO_NO_BASIS_PERM_DIRECT_SOLVERS
676 && decomp_sys_perm.
get() == NULL
679 if(m)
dyn_cast<IQ_vector_cngs>(state->Zpz()).resize(2);
680 dyn_cast<IQ_vector_cngs>(state->d()).resize(2);
683 dyn_cast<IQ_vector_cngs>(state->rGf()).resize(2);
690 dyn_cast<IQ_scalar_cngs>(state->alpha()).resize(2);
691 dyn_cast<IQ_scalar_cngs>(state->mu()).resize(2);
692 dyn_cast<IQ_scalar_cngs>(state->phi()).resize(2);
694 dyn_cast<IQ_scalar_cngs>(state->opt_kkt_err()).resize(2);
695 dyn_cast<IQ_scalar_cngs>(state->feas_kkt_err()).resize(2);
697 dyn_cast<IQ_vector_cngs>(state->rGL()).resize(2);
699 if(m)
dyn_cast<IQ_vector_cngs>(state->lambda()).resize(2);
700 dyn_cast<IQ_vector_cngs>(state->nu()).resize(2);
703 algo->set_state( state );
710 *trase_out <<
"\n*** Creating and setting the step objects ...\n";
735 trase_out, nlp, nlp_foi, nlp_soi, nlp_fod, tailored_approach, decomp_sys
736 ,&eval_new_point_step, &calc_fd_prod, &bounds_tester, &new_decomp_selection_strategy
741 if( !tailored_approach ) {
751 new_decomp_selection_strategy
756 new_decomp_selection_strategy
762 algo_step_ptr_t check_descent_quansi_normal_step_step =
Teuchos::null;
763 if( algo->algo_cntr().check_results() ) {
769 if( !tailored_approach ) {
780 opt_setter( step.
get() );
783 check_skip_bfgs_update_step = step;
805 opt_setter( bfgs_strategy.get() );
819 true, std::logic_error
820 ,
"NLPAlgoConfigMamaJama::config_algo_cntr(...) : Error, "
821 "The quansi_newton options of PBFGS and LPBFGS have not been updated yet!" );
835 algo->state().set_iter_quant(
837 ,
Teuchos::rcp(
new IterQuantityAccessContiguous<QuasiNewtonStats>(
878 opt_setter( _init_red_hess_step.
get() );
881 init_red_hess_step = _init_red_hess_step;
887 ReducedHessianSerialization_StepSetOptions
888 opt_setter( _init_red_hess_step.
get() );
889 opt_setter.set_options( *
options_ );
891 init_red_hess_step = _init_red_hess_step;
899 tangental_step_output_bounds_step
903 tangential_step_step = tangental_step_output_bounds_step;
921 init_kkt_sys =
Teuchos::rcp(
new QPSchurInitKKTSystemHessianFull());
934 _qp_solver =
Teuchos::rcp(
new QPSolverRelaxedQPSchur(init_kkt_sys));
936 QPSolverRelaxedQPSchurSetOptions
937 qp_options_setter(_qp_solver.
get());
938 qp_options_setter.set_options( *
options_ );
940 qp_solver = _qp_solver;
944 #ifdef CONSTRAINED_OPTIMIZATION_PACK_USE_QPKWIK
948 qp_solver = _qp_solver;
951 true,std::logic_error,
"Error! QPKWIK interface is not supported since "
952 "CONSTRAINED_OPTIMIZATION_PACK_USE_QPKWIK is not defined!");
977 qp_solver_tester =
Teuchos::rcp(
new QPSolverRelaxedTester());
979 QPSolverRelaxedTesterSetOptions
980 opt_setter( qp_solver_tester.
get() );
981 opt_setter.set_options( *
options_ );
987 qp_solver, qp_solver_tester ) );
990 opt_setter( tangential_step_with_inequ_step.
get() );
993 tangential_step_step = tangential_step_with_inequ_step;
1007 algo_step_ptr_t calc_reduced_grad_lagr_step =
Teuchos::null;
1023 opt_setter( check_convergence_strategy.
get() );
1030 check_convergence_step = _check_convergence_step;
1034 algo_step_ptr_t merit_func_penalty_param_update_step =
Teuchos::null;
1048 true, std::logic_error
1049 ,
"NLPAlgoConfigMamaJama::config_algo_cntr(...) : Error, "
1050 "The l1_penalty_parameter_update option of MULT_FREE has not been updated yet!" );
1066 true, std::logic_error
1067 ,
"NLPAlgoConfigMamaJama::config_algo_cntr(...) : Error, "
1068 "The merit_function_type options of MODIFIED_L1 and MODIFIED_L1_INCR have not been updated yet!" );
1075 ppu_options_setter( param_update_step.
get() );
1078 merit_func_penalty_param_update_step = param_update_step;
1091 direct_line_search =
Teuchos::rcp(
new DirectLineSearchArmQuad_Strategy());
1094 ls_options_setter( direct_line_search.
get(),
"DirectLineSearchArmQuadSQPStep" );
1105 true, std::logic_error
1106 ,
"NLPAlgoConfigMamaJama::config_algo_cntr(...) : Error, "
1107 "The line_search_method option of 2ND_ORDER_CORRECT has not been updated yet!" );
1112 true, std::logic_error
1113 ,
"NLPAlgoConfigMamaJama::config_algo_cntr(...) : Error, "
1114 "The line_search_method option of WATCHDOG has not been updated yet!" );
1125 LineSearchFilter_StepSetOptions options_setter(line_search_filter_step.
get());
1126 options_setter.set_options(*
options_);
1129 line_search_step = line_search_filter_step;
1144 if( new_decomp_selection_strategy.
get() ) {
1148 ,new_decomp_selection_strategy
1165 <<
"\nConfiguring an algorithm for an unconstrained "
1166 <<
"NLP (m == 0, num_bounded_x == 0) ...\n";
1174 <<
"\nConfiguring an algorithm for a simple bound constrained "
1175 <<
"NLP (m == 0, num_bounded_x > 0) ...\n";
1200 if(init_red_hess_step.get()) {
1201 algo->insert_assoc_step(
1203 ,
"ReducedHessianInitialization"
1212 algo->insert_assoc_step(
1236 algo->insert_step( ++step_num,
LineSearch_name, line_search_full_step_step );
1245 algo->insert_assoc_step(
1249 ,
"LineSearchFullStep"
1250 ,line_search_full_step_step
1253 algo->insert_assoc_step(
1257 ,
"MeritFunc_DummyUpdate"
1269 <<
"\nConfiguring an algorithm for a system of nonlinear equations "
1270 <<
"NLP (n == m) ...\n";
1276 int assoc_step_num = 0;
1280 if( check_descent_quansi_normal_step_step.get() && tailored_approach && algo->algo_cntr().check_results() )
1282 algo->insert_assoc_step(
1286 ,
"CheckDescentQuasiNormalStep"
1287 ,check_descent_quansi_normal_step_step
1292 if( !tailored_approach ) {
1295 if( check_decomp_from_py_step.get() )
1296 algo->insert_assoc_step(
1300 ,
"CheckDecompositionFromPy"
1301 ,check_decomp_from_py_step
1303 if( check_decomp_from_Rpy_step.get() )
1304 algo->insert_assoc_step(
1308 ,
"CheckDecompositionFromRPy"
1309 ,check_decomp_from_Rpy_step
1311 if( check_descent_quansi_normal_step_step.get() )
1312 algo->insert_assoc_step(
1316 ,
"CheckDescentQuasiNormalStep"
1317 ,check_descent_quansi_normal_step_step
1329 algo->insert_step( ++step_num,
LineSearch_name, line_search_full_step_step );
1338 algo->insert_assoc_step(
1342 ,
"LineSearchFullStep"
1343 ,line_search_full_step_step
1348 else if ( m > 0 || nb > 0 ) {
1358 <<
"\nConfiguring an algorithm for a nonlinear equality constrained "
1359 <<
"NLP ( m > 0 && num_bounded_x == 0) ...\n";
1367 <<
"\nConfiguring an algorithm for a nonlinear generally constrained "
1368 <<
"NLP ( num_bounded_x > 0 ) ...\n";
1372 int assoc_step_num = 0;
1376 if( check_descent_quansi_normal_step_step.get() && tailored_approach && algo->algo_cntr().check_results() )
1378 algo->insert_assoc_step(
1382 ,
"CheckDescentQuasiNormalStep"
1383 ,check_descent_quansi_normal_step_step
1388 if( !tailored_approach ) {
1391 if( check_decomp_from_py_step.get() )
1392 algo->insert_assoc_step(
1396 ,
"CheckDecompositionFromPy"
1397 ,check_decomp_from_py_step
1399 if( check_decomp_from_Rpy_step.get() )
1400 algo->insert_assoc_step(
1404 ,
"CheckDecompositionFromRPy"
1405 ,check_decomp_from_Rpy_step
1407 if( check_descent_quansi_normal_step_step.get() )
1408 algo->insert_assoc_step(
1412 ,
"CheckDescentQuasiNormalStep"
1413 ,check_descent_quansi_normal_step_step
1418 if( !tailored_approach ) {
1429 if( !tailored_approach ) {
1441 if(init_red_hess_step.get()) {
1442 algo->insert_assoc_step(
1444 ,
"ReducedHessianInitialization"
1450 algo->insert_assoc_step(
1455 ,check_skip_bfgs_update_step
1462 algo->insert_assoc_step(
1481 if( !tailored_approach ) {
1491 algo->insert_step( ++step_num,
LineSearch_name, line_search_full_step_step );
1500 algo->insert_assoc_step(
1504 ,
"LineSearchFullStep"
1505 ,line_search_full_step_step
1508 if(merit_func_penalty_param_update_step.get()) {
1509 algo->insert_assoc_step(
1513 ,
"MeritFunc_PenaltyParamUpdate"
1514 ,merit_func_penalty_param_update_step
1532 _algo == NULL, std::invalid_argument
1533 ,
"NLPAlgoConfigMamaJama::init_algo(_algo) : Error, "
1534 "_algo can not be NULL" );
1538 NLP &nlp = algo.nlp();
1548 << std::setprecision(algo.algo_cntr().journal_print_digits())
1562 , std::ostream *trase_out
1565 namespace ofsp = OptionsFromStreamPack;
1566 using ofsp::OptionsFromStream;
1567 typedef OptionsFromStream::options_group_t options_group_t;
1568 using ofsp::StringToIntMap;
1574 const std::string opt_grp_name =
"NLPAlgoConfigMamaJama";
1575 const OptionsFromStream::options_group_t optgrp = options.
options_group( opt_grp_name );
1576 if( OptionsFromStream::options_group_exists( optgrp ) ) {
1579 const int num_opts = 13;
1581 MAX_BASIS_COND_CHANGE_FRAC
1582 ,EXACT_REDUCED_HESSIAN
1584 ,NUM_LBFGS_UPDATES_STORED
1586 ,HESSIAN_INITIALIZATION
1588 ,REINIT_HESSIAN_ON_QP_FAIL
1590 ,MERIT_FUNCTION_TYPE
1591 ,L1_PENALTY_PARAM_UPDATE
1595 const char* SMamaJama[num_opts] = {
1596 "max_basis_cond_change_frac"
1597 ,
"exact_reduced_hessian"
1599 ,
"num_lbfgs_updates_stored"
1600 ,
"lbfgs_auto_scaling"
1601 ,
"hessian_initialization"
1603 ,
"reinit_hessian_on_qp_fail"
1604 ,
"line_search_method"
1605 ,
"merit_function_type"
1606 ,
"l1_penalty_parameter_update"
1608 ,
"num_pz_damp_iters"
1610 StringToIntMap mama_jama_map( opt_grp_name, num_opts, SMamaJama );
1612 options_group_t::const_iterator itr = optgrp.begin();
1613 for( ; itr != optgrp.end(); ++itr ) {
1615 case MAX_BASIS_COND_CHANGE_FRAC:
1618 case EXACT_REDUCED_HESSIAN:
1624 if( opt_val ==
"AUTO" )
1626 else if( opt_val ==
"BFGS" )
1628 else if( opt_val ==
"PBFGS" )
1630 else if( opt_val ==
"LBFGS" )
1632 else if( opt_val ==
"LPBFGS" )
1636 true, std::invalid_argument
1637 ,
"NLPAlgoConfigMamaJama::readin_options(...) : "
1638 "Error, incorrect value for \"quasi_newton\" "
1639 ", Only options of BFGS, PBFGS"
1640 ", LBFGS, LPBFGS and AUTO are avalible."
1644 case NUM_LBFGS_UPDATES_STORED:
1647 case LBFGS_AUTO_SCALING:
1651 case HESSIAN_INITIALIZATION:
1654 if( opt_val ==
"IDENTITY" )
1656 else if( opt_val ==
"FINITE_DIFF_SCALE_IDENTITY" )
1658 else if( opt_val ==
"FINITE_DIFF_DIAGONAL" )
1660 else if( opt_val ==
"FINITE_DIFF_DIAGONAL_ABS" )
1662 else if( opt_val ==
"AUTO" )
1664 else if( opt_val ==
"SERIALIZE" )
1668 true, std::invalid_argument
1669 ,
"NLPAlgoConfigMamaJama::readin_options(...) : "
1670 "Error, incorrect value for \"hessian_initialization\" "
1671 ", Only options of IDENTITY, SERIALIZE, FINITE_DIFF_SCALE_IDENTITY,"
1672 " FINITE_DIFF_DIAGONAL, FINITE_DIFF_DIAGONAL_ABS and AUTO"
1679 if( qp_solver ==
"AUTO" ) {
1681 }
else if( qp_solver ==
"QPSOL" ) {
1683 }
else if( qp_solver ==
"QPOPT" ) {
1684 #ifdef CONSTRAINED_OPTIMIZATION_PACK_USE_QPOPT
1688 true, std::invalid_argument
1689 ,
"NLPAlgoConfigMamaJama::readin_options(...) : QPOPT is not supported,"
1690 " must define CONSTRAINED_OPTIMIZATION_PACK_USE_QPOPT!" );
1692 }
else if( qp_solver ==
"QPKWIK" ) {
1693 #ifdef CONSTRAINED_OPTIMIZATION_PACK_USE_QPKWIK
1697 true, std::invalid_argument
1698 ,
"NLPAlgoConfigMamaJama::readin_options(...) : QPKWIK is not supported,"
1699 " must define CONSTRAINED_OPTIMIZATION_PACK_USE_QPKWIK!" );
1701 }
else if( qp_solver ==
"QPSCHUR" ) {
1705 true, std::invalid_argument
1706 ,
"NLPAlgoConfigMamaJama::readin_options(...) : "
1707 "Error, incorrect value for \"qp_solver\" "
1708 "Only qp solvers QPOPT, QPSOL, QPKWIK, QPSCHUR and AUTO are avalible." );
1712 case REINIT_HESSIAN_ON_QP_FAIL:
1715 case LINE_SEARCH_METHOD:
1718 if( option ==
"NONE" ) {
1720 }
else if( option ==
"DIRECT" ) {
1722 }
else if( option ==
"2ND_ORDER_CORRECT" ) {
1724 }
else if( option ==
"WATCHDOG" ) {
1726 }
else if( option ==
"AUTO" ) {
1728 }
else if( option ==
"FILTER" ) {
1732 true, std::invalid_argument
1733 ,
"NLPAlgoConfigMamaJama::readin_options(...) : "
1734 "Error, incorrect value for \"line_search_method\".\n"
1735 "Only the options NONE, DIRECT, 2ND_ORDER_CORRECT, FILTER, WATCHDOG "
1736 "and AUTO are avalible." );
1740 case MERIT_FUNCTION_TYPE:
1743 if( option ==
"L1" )
1745 else if( option ==
"MODIFIED_L1" )
1747 else if( option ==
"MODIFIED_L1_INCR" )
1749 else if( option ==
"AUTO" )
1753 true, std::invalid_argument
1754 ,
"NLPAlgoConfigMamaJama::readin_options(...) : "
1755 "Error, incorrect value for \"merit_function_type\".\n"
1756 "Only the options L1, MODIFIED_L1, MODIFIED_L1_INCR "
1757 "and AUTO are avalible." );
1760 case L1_PENALTY_PARAM_UPDATE:
1763 if( option ==
"WITH_MULT" )
1766 else if( option ==
"MULT_FREE" )
1769 else if( option ==
"AUTO" )
1774 true, std::invalid_argument
1775 ,
"NLPAlgoConfigMamaJama::readin_options(...) : "
1776 "Error, incorrect value for \"l1_penalty_param_update\".\n"
1777 "Only the options WITH_MULT, MULT_FREE and AUTO"
1786 case NUM_PZ_DAMP_ITERS: {
1800 <<
"\n\n*** Warning! The options group \"NLPAlgoConfigMamaJama\" was not found.\n"
1801 <<
"Using a default set of options instead ... \n";
1812 ,std::ostream *trase_out
1817 <<
"\n*** Setting option defaults for options not set by the user or determined some other way ...\n";
1822 <<
"\nmax_basis_cond_change_frac < 0 : setting max_basis_cond_change_frac = 1e+4 \n";
1832 <<
"\nquasi_newton == AUTO: setting quasi_newton = BFGS\n";
1841 <<
"\nnum_lbfgs_updates_stored < 0 : setting num_lbfgs_updates_stored = 10\n";
1851 <<
"\nhessian_initialization == AUTO: setting hessian_initialization = IDENTITY\n";
1864 #ifdef CONSTRAINED_OPTIMIZATION_PACK_USE_QPKWIK
1867 <<
"\nqp_solver_type == AUTO: setting qp_solver_type = QPKWIK\n";
1872 <<
"\nqp_solver_type == AUTO: setting qp_solver_type = QPSCHUR\n";
1883 <<
"\nline_search_method == AUTO: setting line_search_method = FILTER\n";
1892 <<
"\nmerit_function_type == AUTO: setting merit_function_type = MODIFIED_L1_INCR\n";
1901 <<
"\nl1_penalty_param_update == AUTO: setting l1_penalty_param_update = MULT_FREE\n";
1910 <<
"\nfull_steps_after_k < 0 : the line search will never be turned off after so many iterations\n";
1919 <<
"\n*** End setting default options\n";
Checks for descent in the decomposed equality constraints with respect to the range space step Ypy us...
Directs the algorithm to reinitalize the reduced Hessian on the event of a QP failure.
Set options for CheckSkipBFGSUpdateStd_Step from a OptionsFromStream object.
Extracts options from a text stream and then allows convenient access to them.
Set options for QPSolverRelaxedQPSchur from an OptionsFromStream object.
Simply updates merit_func_nlp_k = merit_func_nlp_km1
AbstractLinAlgPack::size_type size_type
DecompositionSystemStateStepBuilderStd decomp_sys_step_builder_
Builder class for some common code.
void process_nlp_and_options(std::ostream *trase_out, NLP &nlp, NLPFirstOrder **nlp_foi, NLPSecondOrder **nlp_soi, NLPDirect **nlp_fod, bool *tailored_approach)
Process the NLP and process the options passed in from set_options(). Postconditions: ...
Solves Quadratic Programming (QP) problem using the primal-dual active-set solver QPKWIK...
EMeritFunctionType merit_function_type_
Checks if a BFGS update should be preformed.
value_type max_basis_cond_change_frac_
const std::string FILTER_IQ_STRING
Teuchos::RCP< const OptionsFromStreamPack::OptionsFromStream > options_ptr_t
bool StringToBool(const char *opt_name, const char *str)
Convert a string "true" or "false" into bool #true# or #false#.
virtual void max_iter(size_t max_iter)
void create_eval_new_point(std::ostream *trase_out, NLP &nlp, NLPFirstOrder *nlp_foi, NLPSecondOrder *nlp_soi, NLPDirect *nlp_fod, bool tailored_approach, const Teuchos::RCP< DecompositionSystem > &decomp_sys, Teuchos::RCP< IterationPack::AlgorithmStep > *eval_new_point_step, Teuchos::RCP< CalcFiniteDiffProd > *calc_fd_prod, Teuchos::RCP< VariableBoundsTester > *bounds_tester, Teuchos::RCP< NewDecompositionSelection_Strategy > *new_decomp_selection_strategy)
Create the EvalNewPoint step object and allocated objects.
const std::string CheckSkipBFGSUpdate_name
const std::string qp_solver_stats_name
Name given to the active set statistics iteration quantity.
Initializes the reduced hessian using a single finite difference along the null space of the constrai...
Set options for MeritFunc_PenaltyParamUpdate_AddedStep from a OptionsFromStream object.
Interface NLPAlgoContainer uses to access NLPAlgo.
Set options for TangentialStepWithInequStd_Step from an OptionsFromStream object. ...
Set options for DirectLineSearchArmQuad_Strategy from a OptionsFromStream object. ...
#define TEUCHOS_TEST_FOR_EXCEPTION(throw_exception_test, Exception, msg)
const std::string & option_name(OptionsGroup::const_iterator &itr)
void set_options(const OptionsFromStream &options)
Overridden from SetOptionsFromStream and calls setOption(...).
Computes the reducecd gradient of the objective rGf_k = Z_k' * Gf_k
const std::string CalcReducedGradLagrangian_name
SOptionValues & current_option_values()
Return the current option values being used.
int num_lbfgs_updates_stored_
rSQP Algorithm control class.
Specializes the update of the penalty parameter for a merit function as: min_mu = |(Gf_k+nu_k)'* Ypy_...
const std::string dl_name
OptionsFromStreamPack::OptionsFromStream * options
Directs the selection of a new decomposition if the line search fails.
const options_ptr_t & get_options() const
void init_algo(NLPAlgoInterface *algo)
Solves Quadratic Programming (QP) problems using QPSchur.
void add_iter_quantities(std::ostream *trase_out, NLP &nlp, NLPFirstOrder *nlp_foi, NLPSecondOrder *nlp_soi, NLPDirect *nlp_fod, bool tailored_approach, const Teuchos::RCP< DecompositionSystem > &decomp_sys, const Teuchos::RCP< NLPAlgoState > &state)
Add the common iteration quantities to the state object.
Set options for CheckConvergence_Strategy from an OptionsFromStream object.
EQuasiNewton quasi_newton_
Implementation for NLPAlgo solver.
bool reinit_hessian_on_qp_fail_
SOptionValues uov_
Options structs.
const std::string CalcDFromYPYZPZ_name
const std::string merit_func_nlp_name
static void readin_options(const OptionsFromStreamPack::OptionsFromStream &options, SOptionValues *option_values, std::ostream *trase_out)
Read in the options from a stream.
TEUCHOS_DEPRECATED RCP< T > rcp(T *p, Dealloc_T dealloc, bool owns_mem)
ELineSearchMethod line_search_method_
Takes the full step x_kp1 = x_k + d_k (d_k = Ypy_k + Zpz_k).
virtual std::ostream & journal_out() const
Return a reference to a std::ostream to be used to output debug information and the like...
options_ptr_t options_
Smart pointer to options.
EQPSolverType qp_solver_type_
static void set_default_options(const SOptionValues &user_option_values, SOptionValues *current_option_values, std::ostream *trase_out)
Set the defaults for options not set by the user.
T_To & dyn_cast(T_From &from)
const std::string QuasiNormalStep_name
Delegates the line search to a DirectLineSearch_Strategy object.
Reduced space SQP state encapsulation interface.
Computes the bounds for the QP subproblem from the NLP bounds.
Solves the unconstrained QP subproblem: min qp_grad' * pz + (1/2) * pz' * rHL * pz.
const std::string EvalNewPoint_name
virtual void max_run_time(double max_iter)
Set the maximum runtime (in minues) The runtime is checked at the end of each iteration and if it exc...
AlgorithmTracker & track()
Updates rHL_k using a secant update.
Delegates the line search to a DirectLineSearch_Strategy object.
Serializes rHL_k to and from a file.
Set options for InitFinDiffReducedHessian_Step from an OptionsFromStream object.
Set options for Algorithm from an OptionsFromStream object.
Perform BFGS updates on full reduced Hessian.
const std::string rHL_name
Check if the decomposition is going singular and if it is select a new decomposition.
void config_algo_cntr(NLPAlgoContainer *algo_cntr, std::ostream *trase_out)
void create_decomp_sys(std::ostream *trase_out, NLP &nlp, NLPFirstOrder *nlp_foi, NLPSecondOrder *nlp_soi, NLPDirect *nlp_fod, bool tailored_approach, Teuchos::RCP< DecompositionSystem > *decomp_sys)
Create the decomposition system object.
Calculates the reduced gradient of the Lagrangian rGL = rGf + Z' * nu + GcUP' * lambda(equ_undecomp) ...
const std::string act_set_stats_name
Name given to the active set statistics iteration quantity.
EHessianInitialization hessian_initialization_
Implementation of CheckConvergence_Strategy interface.
options_group_t options_group(const std::string &options_group_name)
const std::string ReducedGradient_name
NLPAlgoState & rsqp_state()
<<std aggr>="">> members for algo_cntr
Calculates the range space step by, solving for py = -inv(R)*c(equ_decomp), then setting Ypy = Y * py...
Strategy interface which contains the guts for a dampened BFGS update.
const std::string ReducedHessian_name
Set options for BFGSUpdate_Strategy from an OptionsFromStream object.
const std::string TangentialStep_name
void set_options(const options_ptr_t &options)
Set the OptionsFromStream object that will be used for specifying the options.
void do_step_first(Algorithm::poss_type first_step_poss)
Implementation of initial KKT system for all variables initially free and Ko = G. ...
Solves the reduced QP subproblem with bounds and/or general inequalities.
Check if the decomposition is going singular and if it is select a new decomposition.
EL1PenaltyParamUpdate l1_penalty_param_update_
bool exact_reduced_hessian_
void set_options(const options_ptr_t &options)
Set the options that will be used to configure the algorithmic objects.
const std::string du_name
const std::string CheckConvergence_name
#define TEUCHOS_TEST_FOR_EXCEPT(throw_exception_test)
Filter line-search step class.
const std::string quasi_newton_stats_name
Name given to the quasi-Newton updating staistics iteration quantity.
const std::string LineSearch_name
const std::string & option_value(OptionsGroup::const_iterator &itr)