44 #include "MoochoPack_EvalNewPointTailoredApproach_Step.hpp"
45 #include "MoochoPack_Exceptions.hpp"
46 #include "MoochoPack_moocho_algo_conversion.hpp"
47 #include "IterationPack_print_algorithm_step.hpp"
48 #include "ConstrainedOptPack_MatrixIdentConcatStd.hpp"
49 #include "NLPInterfacePack_NLPDirect.hpp"
50 #include "AbstractLinAlgPack_MatrixOpOut.hpp"
51 #include "AbstractLinAlgPack_VectorMutable.hpp"
52 #include "AbstractLinAlgPack_VectorStdOps.hpp"
53 #include "AbstractLinAlgPack_VectorOut.hpp"
54 #include "AbstractLinAlgPack_assert_print_nan_inf.hpp"
55 #include "AbstractLinAlgPack_LinAlgOpPack.hpp"
56 #include "Teuchos_dyn_cast.hpp"
57 #include "Teuchos_Assert.hpp"
59 namespace MoochoPack {
61 EvalNewPointTailoredApproach_Step::EvalNewPointTailoredApproach_Step(
62 const deriv_tester_ptr_t &deriv_tester
63 ,
const bounds_tester_ptr_t &bounds_tester
66 :deriv_tester_(deriv_tester)
67 ,bounds_tester_(bounds_tester)
68 ,fd_deriv_testing_(fd_deriv_testing)
80 using AbstractLinAlgPack::assert_print_nan_inf;
81 using LinAlgOpPack::V_MtV;
82 using IterationPack::print_algorithm_step;
84 NLPAlgo &algo = rsqp_algo(_algo);
86 NLPDirect &nlp =
dyn_cast<NLPDirect>(algo.nlp());
88 EJournalOutputLevel olevel = algo.algo_cntr().journal_output_level();
89 EJournalOutputLevel ns_olevel = algo.algo_cntr().null_space_journal_output_level();
93 if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) {
94 using IterationPack::print_algorithm_step;
95 print_algorithm_step( algo, step_poss, type, assoc_step_poss, out );
98 if(!nlp.is_initialized())
99 nlp.initialize(algo.algo_cntr().check_results());
103 rcp(&nlp,
false), Teuchos::getFancyOStream(
rcp(&out,
false)),
104 convertToVerbLevel(olevel) );
107 var_dep = nlp.var_dep(),
108 var_indep = nlp.var_indep();
111 s.var_indep(var_indep);
120 ,
"EvalNewPointTailoredApproach_Step::do_step(...) : Error, "
121 "Undecomposed equalities are supported yet!" );
123 IterQuantityAccess<VectorMutable>
126 if( x_iq.last_updated() == IterQuantity::NONE_UPDATED ) {
127 if( static_cast<int>(olevel) >=
static_cast<int>(PRINT_ALGORITHM_STEPS) ) {
128 out <<
"\nx is not updated for any k so set x_k = nlp.xinit() ...\n";
130 x_iq.set_k(0) = nlp.xinit();
134 if(algo.algo_cntr().check_results()) {
135 assert_print_nan_inf(
136 x_iq.get_k(0),
"x_k", true
137 , int(olevel) >= int(PRINT_ALGORITHM_STEPS) ? &out : NULL
139 if( nlp.num_bounded_x() > 0 ) {
140 if(!bounds_tester().check_in_bounds(
141 int(olevel) >=
int(PRINT_ALGORITHM_STEPS) ? &out : NULL
142 ,
int(olevel) >=
int(PRINT_VECTORS)
143 ,
int(olevel) >=
int(PRINT_ITERATION_QUANTITIES)
146 ,x_iq.get_k(0),
"x_k"
151 ,
"EvalNewPointTailoredApproach_Step::do_step(...) : Error, "
152 "the variables bounds xl <= x_k <= xu where violated!" );
157 Vector &x = x_iq.get_k(0);
159 if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) {
160 out <<
"\n||x_k||inf = " << x.norm_inf();
162 out <<
"\n||x(var_dep)_k||inf = " << x.sub_view(var_dep)->norm_inf();
163 if( var_indep.size() )
164 out <<
"\n||x(var_indep)_k||inf = " << x.sub_view(var_indep)->norm_inf();
167 if( static_cast<int>(olevel) >= static_cast<int>(PRINT_VECTORS) ) {
168 out <<
"\nx_k = \n" << x;
170 out <<
"\nx(var_dep)_k = \n" << *x.sub_view(var_dep);
172 if( static_cast<int>(ns_olevel) >= static_cast<int>(PRINT_VECTORS) ) {
173 if( var_indep.size() )
174 out <<
"\nx(var_indep)_k = \n" << *x.sub_view(var_indep);
178 bool recalc_c =
true;
180 if( !s.c().updated_k(0) ) {
190 &Z_k = s.Z().set_k(0),
191 &Y_k = s.Y().set_k(0),
192 *Uz_k = (m > r) ? &s.Uz().set_k(0) : NULL,
193 *Uy_k = (m > r) ? &s.Uy().set_k(0) : NULL;
195 &cZ_k =
dyn_cast<MatrixIdentConcatStd>(Z_k);
199 bool reconstruct_Z_D = (cZ_k.rows() == n || cZ_k.cols() != n-r || cZ_k.D_ptr().total_count() > 1);
200 MatrixIdentConcatStd::D_ptr_t
201 D_ptr = Teuchos::null;
202 if( reconstruct_Z_D )
203 D_ptr = nlp.factory_D()->create();
205 D_ptr = cZ_k.D_ptr();
208 const bool supports_Gf = nlp.supports_Gf();
210 GcU = (m > r) ? nlp.factory_GcU()->create() : Teuchos::null;
212 &py_k = s.py().set_k(0);
213 nlp.unset_quantities();
216 ,!s.f().updated_k(0) ? &s.f().set_k(0) : NULL
219 ,supports_Gf?&s.Gf().set_k(0):NULL
223 ,
const_cast<MatrixOp*
>(D_ptr.get())
226 s.equ_decomp( nlp.con_decomp() );
227 s.equ_undecomp( nlp.con_undecomp() );
229 if( static_cast<int>(olevel) >=
static_cast<int>(PRINT_ALGORITHM_STEPS) ) {
230 out <<
"\nQuantities computed directly from NLPDirect nlp object ...\n";
231 out <<
"\nf_k = " << s.f().get_k(0);
232 out <<
"\n||c_k||inf = " << s.c().get_k(0).norm_inf();
234 const Vector &Gf = s.Gf().get_k(0);
235 out <<
"\n||Gf_k||inf = " << Gf.norm_inf();
237 out <<
"\n||Gf(var_dep)_k||inf = " << Gf.sub_view(var_dep)->norm_inf();
238 if (var_indep.size())
239 out <<
"\n||Gf(var_indep)_k||inf = " << Gf.sub_view(var_indep)->norm_inf();
241 out <<
"\n||py_k||inf = " << s.py().get_k(0).norm_inf();
242 out <<
"\n||rGf_k||inf = " << s.rGf().get_k(0).norm_inf();
246 if( static_cast<int>(ns_olevel) >= static_cast<int>(PRINT_VECTORS) ) {
247 out <<
"\nrGf_k = \n" << s.rGf().get_k(0);
251 if( static_cast<int>(olevel) >= static_cast<int>(PRINT_VECTORS) ) {
252 out <<
"\nc_k = \n" << s.c().get_k(0);
254 out <<
"\nGf_k = \n" << s.Gf().get_k(0);
255 out <<
"\npy_k = \n" << s.py().get_k(0);
259 if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ITERATION_QUANTITIES) ) {
260 out <<
"\nD = -inv(C)*N = \n" << *D_ptr;
264 if( static_cast<int>(ns_olevel) >= static_cast<int>(PRINT_ITERATION_QUANTITIES) ) {
265 out <<
"Printing column norms of D = -inv(C)*N:\n";
267 e_i = D_ptr->space_rows().create_member();
269 D_i = D_ptr->space_cols().create_member();
271 for(
int i = 1; i <= (n-r); ++i ) {
275 out <<
" ||D(:,"<<i<<
")||_2 = " << D_i->norm_2() <<
"\n";
279 if(algo.algo_cntr().check_results()) {
280 assert_print_nan_inf(s.f().get_k(0),
"f_k",
true,&out);
281 assert_print_nan_inf(s.c().get_k(0),
"c_k",
true,&out);
283 assert_print_nan_inf(s.Gf().get_k(0),
"Gf_k",
true,&out);
284 assert_print_nan_inf(s.py().get_k(0),
"py_k",
true,&out);
285 assert_print_nan_inf(s.rGf().get_k(0),
"rGf_k",
true,&out);
289 if( fd_deriv_testing() == FD_TEST
290 || ( fd_deriv_testing() == FD_DEFAULT && algo.algo_cntr().check_results() ) )
293 if( olevel >= PRINT_ALGORITHM_STEPS ) {
294 out <<
"\n*** Checking derivatives by finite differences\n";
297 const bool has_bounds = nlp.num_bounded_x() > 0;
298 const bool nlp_passed = deriv_tester().finite_diff_check(
301 ,has_bounds ? &nlp.xl() : (
const Vector*)NULL
302 ,has_bounds ? &nlp.xu() : (
const Vector*)NULL
304 ,supports_Gf?&s.Gf().get_k(0):NULL
310 ,olevel >= PRINT_VECTORS
311 ,( olevel >= PRINT_ALGORITHM_STEPS ) ? &out : (std::ostream*)NULL
315 ,
"EvalNewPointTailoredApproach_Step::do_step(...) : Error, "
316 "the tests of the nlp derivatives failed!" );
319 if( reconstruct_Z_D ) {
327 ,nlp.space_x()->sub_space(nlp.var_indep())->clone()
328 ,MatrixIdentConcatStd::TOP
336 if( olevel >= PRINT_ALGORITHM_STEPS )
337 out <<
"\nUpdating py_k, Y_k, and Uy_k given D_k ...\n";
338 calc_py_Y_Uy( nlp, D_ptr, &py_k, &Y_k, Uy_k, olevel, out );
342 &Ypy_k = s.Ypy().set_k(0);
345 if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) {
346 out <<
"\nQuantities after computing final py and Ypy ...\n";
347 out <<
"\n||py_k||inf = " << s.py().get_k(0).norm_inf();
348 out <<
"\n||Ypy_k||inf = " << s.Ypy().get_k(0).norm_inf();
352 if( static_cast<int>(olevel) >= static_cast<int>(PRINT_VECTORS) ) {
353 out <<
"\npy_k = \n" << s.py().get_k(0);
356 if( static_cast<int>(ns_olevel) >= static_cast<int>(PRINT_VECTORS) ) {
358 out <<
"\nYpy(var_indep)_k = \n" << *s.Ypy().get_k(0).sub_view(var_indep);
362 if( static_cast<int>(olevel) >= static_cast<int>(PRINT_VECTORS) ) {
364 out <<
"\nYpy(var_dep)_k = \n" << *s.Ypy().get_k(0).sub_view(var_dep);
365 out <<
"\nYpy_k = \n" << s.Ypy().get_k(0);
369 if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ITERATION_QUANTITIES) ) {
370 out <<
"\nZ_k = \n" << s.Z().get_k(0);
371 out <<
"\nY_k = \n" << s.Y().get_k(0);
381 ,
poss_type assoc_step_poss, std::ostream& out,
const std::string& L
385 << L <<
"*** Evaluate the new point for the \"Tailored Approach\"\n"
386 << L <<
"if nlp is not initialized then initialize the nlp\n"
387 << L <<
"if x is not updated for any k then set x_k = nlp.xinit\n"
388 << L <<
"if Gf is supported then set Gf_k = Gf(x_k) <: space_x\n"
389 << L <<
"For Gc(:,equ_decomp) = [ C' ; N' ] <: space_x|space_c(equ_decomp) compute:\n"
390 << L <<
" py_k = -inv(C)*c_k\n"
391 << L <<
" D = -inv(C)*N <: R^(n x (n-m))\n"
392 << L <<
" rGf_k = D'*Gf_k(var_dep) + Gf_k(var_indep)\n"
393 << L <<
" Z_k = [ D ; I ] <: R^(n x (n-m))\n"
394 << L <<
" if m > r then\n"
395 << L <<
" Uz_k = Gc(var_indep,equ_undecomp)' + Gc(var_dep,equ_undecomp)'*D\n"
397 << L <<
"if ( (fd_deriv_testing==FD_TEST)\n"
398 << L <<
" or (fd_deriv_testing==FD_DEFAULT and check_results==true)\n"
400 << L <<
" check Gf_k, py_k, rGf_k, D, Uz (if m > r) and Vz (if mI > 0) by finite differences.\n"
404 << L <<
"Ypy_k = Y_k * py_k\n"
405 << L <<
"if c_k is not updated c_k = c(x_k) <: space_c\n"
406 << L <<
"if mI > 0 and h_k is not updated h_k = h(x_k) <: space_h\n"
407 << L <<
"if f_k is not updated f_k = f(x_k) <: REAL\n"
virtual void print_calc_py_Y_Uy(std::ostream &out, const std::string &leading_str) const =0
Overridden by subclass to print how py and Y are computed.
Thrown if a runtime test failed.
#define TEUCHOS_TEST_FOR_EXCEPTION(throw_exception_test, Exception, msg)
T_To & dyn_cast(T_From &from)
rSQP Algorithm control class.
bool do_step(Algorithm &algo, poss_type step_poss, IterationPack::EDoStepType type, poss_type assoc_step_poss)
void print_step(const Algorithm &algo, poss_type step_poss, IterationPack::EDoStepType type, poss_type assoc_step_poss, std::ostream &out, const std::string &leading_str) const
TEUCHOS_DEPRECATED RCP< T > rcp(T *p, Dealloc_T dealloc, bool owns_mem)
virtual std::ostream & journal_out() const
Reduced space SQP state encapsulation interface.
AlgorithmTracker & track()
virtual void uninitialize_Y_Uy(MatrixOp *Y, MatrixOp *Uy)=0
Call to uninitialize the matrices.
NLPAlgoState & rsqp_state()
<<std aggr>="">> members for algo_cntr
virtual void calc_py_Y_Uy(const NLPDirect &nlp, const D_ptr_t &D, VectorMutable *py, MatrixOp *Y, MatrixOp *Uy, EJournalOutputLevel olevel, std::ostream &out)=0
Overridden by subclass to compute py, Y and Uy.