59 const secant_update_ptr_t& secant_update
61 :secant_update_(secant_update)
62 ,num_basis_(NO_BASIS_UPDATED_YET)
63 ,iter_k_rHL_init_ident_(-1)
94 bool return_val =
true;
97 IterQuantityAccess<index_type>
98 &num_basis_iq = s.num_basis();
99 IterQuantityAccess<VectorMutable>
103 IterQuantityAccess<MatrixOp>
105 IterQuantityAccess<MatrixSymOp>
109 const NLP &nlp = algo.nlp();
113 nind = m ? Z_iq.get_k(Z_iq.last_updated()).
cols() : 0;
117 bool new_basis =
false;
119 const int last_updated_k = num_basis_iq.last_updated();
120 if( last_updated_k != IterQuantity::NONE_UPDATED ) {
121 const index_type num_basis_last = num_basis_iq.get_k(last_updated_k);
122 if( num_basis_ == NO_BASIS_UPDATED_YET )
123 num_basis_ = num_basis_last;
124 else if( num_basis_ != num_basis_last )
126 num_basis_ = num_basis_last;
131 if( !rHL_iq.updated_k(0) ) {
137 out <<
"\nBasis changed. Reinitializing rHL_k = eye(n-r) ...\n";
139 dyn_cast<MatrixSymInitDiag>(rHL_iq.set_k(0)).init_identity(
140 Z_iq.get_k(0).space_rows()
143 if( algo.algo_cntr().calc_matrix_norms() )
144 out <<
"\n||rHL_k||inf = " << rHL_iq.get_k(0).calc_norm(MatrixOp::MAT_NORM_INF).value << std::endl;
146 out <<
"\nrHL_k = \n" << rHL_iq.get_k(0);
147 quasi_newton_stats_(s).set_k(0).set_updated_stats(
149 iter_k_rHL_init_ident_ = s.
k();
157 if( rHL_iq.updated_k(-1) && rGf_iq.updated_k(-1) ) {
165 <<
"\nPerforming Secant update ...\n";
169 &rGf_k = rGf_iq.get_k(0),
170 &rGf_km1 = rGf_iq.get_k(-1),
171 &pz_km1 = pz_iq.get_k(-1);
173 alpha_km1 = s.alpha().get_k(-1);
174 VectorSpace::vec_mut_ptr_t
175 y_bfgs = rGf_k.space().create_member(),
176 s_bfgs = pz_km1.space().create_member();
182 V_VmV( y_bfgs.get(), rGf_k, rGf_km1 );
184 if( w_iq.updated_k(-1) )
186 Vp_StV( y_bfgs.get(), - alpha_km1, w_iq.get_k(-1) );
190 V_StV( s_bfgs.get(), alpha_km1, pz_iq.get_k(-1) );
193 out <<
"\n||y_bfgs||inf = " << y_bfgs->norm_inf() << std::endl;
194 out <<
"\n||s_bfgs||inf = " << s_bfgs->norm_inf() << std::endl;
197 if( static_cast<int>(ns_olevel) >= static_cast<int>(
PRINT_VECTORS) ) {
198 out <<
"\ny_bfgs =\n" << *y_bfgs;
199 out <<
"\ns_bfgs =\n" << *s_bfgs;
204 &rHL_k = rHL_iq.set_k(0,-1);
207 if(!secant_update().perform_update(
208 s_bfgs.get(), y_bfgs.get()
209 ,iter_k_rHL_init_ident_ == s.
k() - 1
210 ,
out, ns_olevel, &algo, &s, &rHL_k
220 int k_last_offset = rHL_iq.last_updated();
221 bool set_current =
false;
222 if( k_last_offset != IterQuantity::NONE_UPDATED && k_last_offset < 0 ) {
223 const MatrixSymOp &rHL_k_last = rHL_iq.get_k(k_last_offset);
224 const size_type nind_last = rHL_k_last.rows();
225 if( nind_last != nind) {
228 <<
"No new basis was selected.\n"
229 <<
"The previous matrix rHL_k(" << k_last_offset <<
") was found but its dimmension\n"
230 <<
"rHL_k(" << k_last_offset <<
").rows() = " << nind_last <<
" != n-r = " << nind << std::endl;
236 <<
"No new basis was selected so using previously updated...\n "
237 <<
"rHL_k = rHL_k(" << k_last_offset <<
")\n";
239 rHL_iq.set_k(0) = rHL_k_last;
240 quasi_newton_stats_(s).set_k(0).set_updated_stats(
244 rHL_iq.get_k(0).output( out <<
"\nrHL_k = \n" );
252 <<
"\nInitializing rHL = eye(n-r) "
253 <<
"(k = " << algo.
state().
k() <<
")...\n";
258 dyn_cast<MatrixSymInitDiag>(rHL_iq.set_k(0)).init_identity(
259 Z_iq.get_k(0).space_rows() );
260 iter_k_rHL_init_ident_ = s.
k();
261 quasi_newton_stats_(s).set_k(0).set_updated_stats(
271 MatrixOp::EMatNormType mat_nrm_inf = MatrixOp::MAT_NORM_INF;
274 if(algo.algo_cntr().calc_matrix_norms())
275 out <<
"\n||rHL_k||inf = " << rHL_iq.get_k(0).calc_norm(mat_nrm_inf).value << std::endl;
276 if(algo.algo_cntr().calc_conditioning()) {
277 const MatrixSymOpNonsing
278 *rHL_ns_k =
dynamic_cast<const MatrixSymOpNonsing*
>(&rHL_iq.get_k(0));
280 out <<
"\ncond_inf(rHL_k) = " << rHL_ns_k->calc_cond_num(mat_nrm_inf).value << std::endl;
284 out <<
"\nrHL_k = \n" << rHL_iq.get_k(0);
290 out <<
"\nThe matrix rHL_k has already been updated so leave it\n";
299 ,std::ostream&
out,
const std::string& L
303 << L <<
"*** Calculate the reduced hessian of the Lagrangian rHL = Z' * HL * Z\n"
304 << L <<
"default: num_basis_remembered = NO_BASIS_UPDATED_YET\n"
305 << L <<
" iter_k_rHL_init_ident = -1\n"
306 << L <<
"if num_basis_remembered = NO_BASIS_UPDATED_YET then\n"
307 << L <<
" num_basis_remembered = num_basis\n"
309 << L <<
"if num_basis_remembered != num_basis then\n"
310 << L <<
" num_basis_remembered = num_basis\n"
311 << L <<
" new_basis = true\n"
313 << L <<
"if rHL_k is not updated then\n"
314 << L <<
" if new_basis == true then\n"
315 << L <<
" *** Transition rHL to the new basis by just starting over.\n"
316 << L <<
" rHL_k = eye(n-r) *** must support MatrixSymInitDiag interface\n"
317 << L <<
" iter_k_rHL_init_ident = k\n"
318 << L <<
" goto next step\n"
320 << L <<
" if rHL_km1 and rGf_km1 are updated then\n"
321 << L <<
" *** We should have the information to perform a BFGS update\n"
322 << L <<
" y = rGf_k - rGf_km1\n"
323 << L <<
" s = alpha_km1 * pz_km1\n"
324 << L <<
" if k - 1 == iter_k_rHL_init_ident then\n"
325 << L <<
" first_update = true\n"
327 << L <<
" first_update = false\n"
329 << L <<
" rHL_k = rHL_km1\n"
330 << L <<
" begin secant update\n"
331 << L <<
" (" <<
typeName(secant_update()) <<
")\n"
333 secant_update().print_step( out, L+
" " );
335 << L <<
" end secant update\n"
337 << L <<
" *** We have no information for which to preform a BFGS update.\n"
338 << L <<
" k_last_offset = last iteration rHL was updated for\n"
339 << L <<
" if k_last_offset does not exist then\n"
340 << L <<
" *** We are left with no choise but to initialize rHL\n"
341 << L <<
" rHL_k = eye(n-r) *** must support MatrixSymInitDiag interface\n"
342 << L <<
" iter_k_rHL_init_ident = k\n"
344 << L <<
" *** No new basis has been selected so we may as well\n"
345 << L <<
" *** just use the last rHL that was updated\n"
346 << L <<
" rHL_k = rHL_k(k_last_offset)\n"
AbstractLinAlgPack::size_type size_type
std::string typeName(const T &t)
RTOp_index_type index_type
ReducedHessianSecantUpdateStd_Step(const secant_update_ptr_t &secant_update=Teuchos::null)
void Vt_S(VectorMutable *v_lhs, const value_type &alpha)
v_lhs *= alpha
void Vp_StV(VectorMutable *v_lhs, const value_type &alpha, const Vector &v_rhs)
v_lhs = alpha * v_rhs + v_lhs
rSQP Algorithm control class.
void print_step(const Algorithm &algo, poss_type step_poss, IterationPack::EDoStepType type, poss_type assoc_step_poss, std::ostream &out, const std::string &leading_str) const
void V_StV(VectorMutable *v_lhs, value_type alpha, const V &V_rhs)
v_lhs = alpha * V_rhs.
bool do_step(Algorithm &algo, poss_type step_poss, IterationPack::EDoStepType type, poss_type assoc_step_poss)
virtual std::ostream & journal_out() const
Return a reference to a std::ostream to be used to output debug information and the like...
EJournalOutputLevel
enum for journal output.
T_To & dyn_cast(T_From &from)
void V_VpV(VectorMutable *v_lhs, const V1 &V1_rhs1, const V2 &V2_rhs2)
Reduced space SQP state encapsulation interface.
void print_algorithm_step(const Algorithm &algo, Algorithm::poss_type step_poss, EDoStepType type, Algorithm::poss_type assoc_step_poss, std::ostream &out)
Prints to 'out' the algorithm step.
AlgorithmTracker & track()
value_type dot(const Vector &v_rhs1, const Vector &v_rhs2)
result = v_rhs1' * v_rhs2
void V_MtV(VectorMutable *v_lhs, const MatrixOp &M_rhs1, BLAS_Cpp::Transp trans_rhs1, const V &V_rhs2)
v_lhs = op(M_rhs1) * V_rhs2.
AbstractLinAlgPack::value_type value_type
Acts as the central hub for an iterative algorithm.
NLPAlgoState & rsqp_state()
<<std aggr>="">> members for algo_cntr
size_type cols(size_type rows, size_type cols, BLAS_Cpp::Transp _trans)
Return columns of a possible transposed matrix.
void V_VmV(VectorMutable *v_lhs, const V1 &V1_rhs1, const V2 &V2_rhs2)
v_lhs = V_rhs1 - V_rhs2.
NLPAlgo & rsqp_algo(Algorithm &algo)
Convert from a Algorithm to a NLPAlgo.
void Vp_V(VectorMutable *v_lhs, const V &V_rhs)
v_lhs += V_rhs.