MOOCHO (Single Doxygen Collection)  Version of the Day
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
MoochoPack_CheckConvergenceStd_Strategy.cpp
Go to the documentation of this file.
1 // @HEADER
2 // ***********************************************************************
3 //
4 // Moocho: Multi-functional Object-Oriented arCHitecture for Optimization
5 // Copyright (2003) Sandia Corporation
6 //
7 // Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive
8 // license for use of this work by or on behalf of the U.S. Government.
9 //
10 // Redistribution and use in source and binary forms, with or without
11 // modification, are permitted provided that the following conditions are
12 // met:
13 //
14 // 1. Redistributions of source code must retain the above copyright
15 // notice, this list of conditions and the following disclaimer.
16 //
17 // 2. Redistributions in binary form must reproduce the above copyright
18 // notice, this list of conditions and the following disclaimer in the
19 // documentation and/or other materials provided with the distribution.
20 //
21 // 3. Neither the name of the Corporation nor the names of the
22 // contributors may be used to endorse or promote products derived from
23 // this software without specific prior written permission.
24 //
25 // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
26 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
29 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
30 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
31 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
32 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 //
37 // Questions? Contact Roscoe A. Bartlett (rabartl@sandia.gov)
38 //
39 // ***********************************************************************
40 // @HEADER
41 
42 #include <assert.h>
43 
44 #include <ostream>
45 #include <limits>
46 #include <sstream>
47 
60 #include "Teuchos_dyn_cast.hpp"
61 #include "Teuchos_Assert.hpp"
62 
63 namespace MoochoPack {
64 
66  EOptErrorCheck opt_error_check
67  ,EScaleKKTErrorBy scale_opt_error_by
68  ,EScaleKKTErrorBy scale_feas_error_by
69  ,EScaleKKTErrorBy scale_comp_error_by
70  ,bool scale_opt_error_by_Gf
71  )
72  :
74  opt_error_check,
75  scale_opt_error_by,
76  scale_feas_error_by,
77  scale_comp_error_by,
78  scale_opt_error_by_Gf
79  )
80  {}
81 
83  Algorithm& _algo
84  )
85  {
88 
89  NLPAlgo &algo = rsqp_algo(_algo);
90  NLPAlgoState &s = algo.rsqp_state();
91  NLP &nlp = algo.nlp();
92 
93  EJournalOutputLevel olevel = algo.algo_cntr().journal_output_level();
94  std::ostream& out = algo.track().journal_out();
95 
96  const size_type
97  n = nlp.n(),
98  m = nlp.m(),
99  nb = nlp.num_bounded_x();
100 
101  // Get the iteration quantities
102  IterQuantityAccess<value_type>
103  &opt_kkt_err_iq = s.opt_kkt_err(),
104  &feas_kkt_err_iq = s.feas_kkt_err(),
105  &comp_kkt_err_iq = s.comp_kkt_err();
106 
107  IterQuantityAccess<VectorMutable>
108  &x_iq = s.x(),
109  &d_iq = s.d(),
110  &Gf_iq = s.Gf(),
111  *c_iq = m ? &s.c() : NULL,
112  *rGL_iq = n > m ? &s.rGL() : NULL,
113  *GL_iq = n > m ? &s.GL() : NULL,
114  *nu_iq = n > m ? &s.nu() : NULL;
115 
116  // opt_err = (||rGL||inf or ||GL||) / (||Gf|| + scale_kkt_factor)
117  value_type
118  norm_inf_Gf_k = 0.0,
119  norm_inf_GLrGL_k = 0.0;
120 
121  if( n > m && scale_opt_error_by_Gf() && Gf_iq.updated_k(0) ) {
123  norm_inf_Gf_k = Gf_iq.get_k(0).norm_inf(),
124  "||Gf_k||inf",true,&out
125  );
126  }
127 
128  // NOTE:
129  // The strategy object CheckConvergenceIP_Strategy assumes
130  // that this will always be the gradient of the lagrangian
131  // of the original problem, not the gradient of the lagrangian
132  // for psi. (don't use augmented nlp info here)
133  if( n > m ) {
134  if( opt_error_check() == OPT_ERROR_REDUCED_GRADIENT_LAGR ) {
135  assert_print_nan_inf( norm_inf_GLrGL_k = rGL_iq->get_k(0).norm_inf(),
136  "||rGL_k||inf",true,&out);
137  }
138  else {
139  assert_print_nan_inf( norm_inf_GLrGL_k = GL_iq->get_k(0).norm_inf(),
140  "||GL_k||inf",true,&out);
141  }
142  }
143 
144  const value_type
145  opt_scale_factor = 1.0 + norm_inf_Gf_k,
146  opt_err = norm_inf_GLrGL_k / opt_scale_factor;
147 
148  // feas_err
149  const value_type feas_err = ( ( m ? c_iq->get_k(0).norm_inf() : 0.0 ) );
150 
151  // comp_err
152  value_type comp_err = 0.0;
153  if ( n > m ) {
154  if (nb > 0) {
155  comp_err = combined_nu_comp_err(nu_iq->get_k(0), x_iq.get_k(0), nlp.xl(), nlp.xu());
156  }
157  if(m) {
158  assert_print_nan_inf( feas_err,"||c_k||inf",true,&out);
159  }
160  }
161 
162  // scaling factors
163  const value_type
164  scale_opt_factor = CalculateScalingFactor(s, scale_opt_error_by()),
165  scale_feas_factor = CalculateScalingFactor(s, scale_feas_error_by()),
166  scale_comp_factor = CalculateScalingFactor(s, scale_comp_error_by());
167 
168  // kkt_err
169  const value_type
170  opt_kkt_err_k = opt_err/scale_opt_factor,
171  feas_kkt_err_k = feas_err/scale_feas_factor,
172  comp_kkt_err_k = comp_err/scale_comp_factor;
173 
174  // update the iteration quantities
175  if(n > m) opt_kkt_err_iq.set_k(0) = opt_kkt_err_k;
176  feas_kkt_err_iq.set_k(0) = feas_kkt_err_k;
177  comp_kkt_err_iq.set_k(0) = comp_kkt_err_k;
178 
179  // step_err
180  value_type step_err = 0.0;
181  if( d_iq.updated_k(0) ) {
182  step_err = AbstractLinAlgPack::max_rel_step(x_iq.get_k(0),d_iq.get_k(0));
183  assert_print_nan_inf( step_err,"max(d(i)/max(1,x(i)),i=1...n)",true,&out);
184  }
185 
186  const value_type
187  opt_tol = algo.algo_cntr().opt_tol(),
188  feas_tol = algo.algo_cntr().feas_tol(),
189  comp_tol = algo.algo_cntr().comp_tol(),
190  step_tol = algo.algo_cntr().step_tol();
191 
192  const bool found_solution =
193  opt_kkt_err_k < opt_tol
194  && feas_kkt_err_k < feas_tol
195  && comp_kkt_err_k < comp_tol
196  && step_err < step_tol;
197 
198  if( int(olevel) >= int(PRINT_ALGORITHM_STEPS) || (int(olevel) >= int(PRINT_BASIC_ALGORITHM_INFO) && found_solution) )
199  {
200  out
201  << "\nscale_opt_factor = " << scale_opt_factor
202  << " (scale_opt_error_by = " << (scale_opt_error_by()==SCALE_BY_ONE ? "SCALE_BY_ONE"
203  : (scale_opt_error_by()==SCALE_BY_NORM_INF_X ? "SCALE_BY_NORM_INF_X"
204  : "SCALE_BY_NORM_2_X" ) ) << ")"
205 
206  << "\nscale_feas_factor = " << scale_feas_factor
207  << " (scale_feas_error_by = " << (scale_feas_error_by()==SCALE_BY_ONE ? "SCALE_BY_ONE"
208  : (scale_feas_error_by()==SCALE_BY_NORM_INF_X ? "SCALE_BY_NORM_INF_X"
209  : "SCALE_BY_NORM_2_X" ) ) << ")"
210 
211  << "\nscale_comp_factor = " << scale_comp_factor
212  << " (scale_comp_error_by = " << (scale_comp_error_by()==SCALE_BY_ONE ? "SCALE_BY_ONE"
213  : (scale_comp_error_by()==SCALE_BY_NORM_INF_X ? "SCALE_BY_NORM_INF_X"
214  : "SCALE_BY_NORM_2_X" ) ) << ")"
215  << "\nopt_scale_factor = " << opt_scale_factor
216  << " (scale_opt_error_by_Gf = " << (scale_opt_error_by_Gf()?"true":"false") << ")"
217  << "\nopt_kkt_err_k = " << opt_kkt_err_k << ( opt_kkt_err_k < opt_tol ? " < " : " > " )
218  << "opt_tol = " << opt_tol
219  << "\nfeas_kkt_err_k = " << feas_kkt_err_k << ( feas_kkt_err_k < feas_tol ? " < " : " > " )
220  << "feas_tol = " << feas_tol
221  << "\ncomp_kkt_err_k = " << comp_kkt_err_k << ( comp_kkt_err_k < comp_tol ? " < " : " > " )
222  << "comp_tol = " << comp_tol
223  << "\nstep_err = " << step_err << ( step_err < step_tol ? " < " : " > " )
224  << "step_tol = " << step_tol
225  << std::endl;
226  }
227 
228  return found_solution;
229 
230  }
231 
232 void CheckConvergenceStd_Strategy::print_step( const Algorithm& _algo, std::ostream& out, const std::string& L ) const
233  {
234  out
235  << L << "*** Check to see if the KKT error is small enough for convergence\n"
236  << L << "if scale_(opt|feas|comp)_error_by == SCALE_BY_ONE then\n"
237  << L << " scale_(opt|feas|comp)_factor = 1.0\n"
238  << L << "else if scale_(opt|feas|comp)_error_by == SCALE_BY_NORM_2_X then\n"
239  << L << " scale_(opt|feas|comp)_factor = 1.0 + norm_2(x_k)\n"
240  << L << "else if scale_(opt|feas|comp)_error_by == SCALE_BY_NORM_INF_X then\n"
241  << L << " scale_(opt|feas|comp)_factor = 1.0 + norm_inf(x_k)\n"
242  << L << "end\n"
243  << L << "if scale_opt_error_by_Gf == true then\n"
244  << L << " opt_scale_factor = 1.0 + norm_inf(Gf_k)\n"
245  << L << "else\n"
246  << L << " opt_scale_factor = 1.0\n"
247  << L << "end\n";
248  if( opt_error_check() == OPT_ERROR_REDUCED_GRADIENT_LAGR )
249  {
250  out
251  << L << "opt_err = norm_inf(rGL_k)/opt_scale_factor\n";
252  }
253  else
254  {
255  out
256  << L << "opt_err = norm_inf(GL_k)/opt_scale_factor\n";
257  }
258 
259  out
260  << L << "feas_err = norm_inf(c_k)\n"
261  << L << "comp_err = max(i, nu(i)*(xu(i)-x(i)), -nu(i)*(x(i)-xl(i)))\n"
262  << L << "opt_kkt_err_k = opt_err/scale_opt_factor\n"
263  << L << "feas_kkt_err_k = feas_err/scale_feas_factor\n"
264  << L << "comp_kkt_err_k = feas_err/scale_comp_factor\n"
265  << L << "if d_k is updated then\n"
266  << L << " step_err = max( |d_k(i)|/(1+|x_k(i)|), i=1..n )\n"
267  << L << "else\n"
268  << L << " step_err = 0\n"
269  << L << "end\n"
270  << L << "if opt_kkt_err_k < opt_tol\n"
271  << L << " and feas_kkt_err_k < feas_tol\n"
272  << L << " and step_err < step_tol then\n"
273  << L << " report optimal x_k, lambda_k and nu_k to the nlp\n"
274  << L << " terminate, the solution has beed found!\n"
275  << L << "end\n";
276  }
277 
278 
280  {
281  // scale_kkt_factor
282  value_type scale_factor = 1.0;
283  switch(scale_by)
284  {
285  case SCALE_BY_ONE:
286  scale_factor = 1.0;
287  break;
288  case SCALE_BY_NORM_2_X:
289  scale_factor = 1.0 + state.x().get_k(0).norm_2();
290  break;
291  case SCALE_BY_NORM_INF_X:
292  scale_factor = 1.0 + state.x().get_k(0).norm_inf();
293  break;
294  default:
295  TEUCHOS_TEST_FOR_EXCEPT(true); // Should never be called
296  }
297 
298  return scale_factor;
299  }
300 
301 } // end namespace MoochoPack
302 
AbstractLinAlgPack::size_type size_type
CheckConvergenceStd_Strategy(EOptErrorCheck opt_error_check=OPT_ERROR_REDUCED_GRADIENT_LAGR, EScaleKKTErrorBy scale_opt_error_by=SCALE_BY_ONE, EScaleKKTErrorBy scale_feas_error_by=SCALE_BY_ONE, EScaleKKTErrorBy scale_comp_error_by=SCALE_BY_ONE, bool scale_opt_error_by_Gf=true)
rSQP Algorithm control class.
Strategy interface for performing convergence checks.
value_type max_rel_step(const Vector &x, const Vector &d)
Computes the maximum relative step of x = x + d.
value_type CalculateScalingFactor(NLPAlgoState &state, EScaleKKTErrorBy scale_by) const
virtual void print_step(const Algorithm &_algo, std::ostream &out, const std::string &L) const
virtual std::ostream & journal_out() const
Return a reference to a std::ostream to be used to output debug information and the like...
EJournalOutputLevel
enum for journal output.
value_type combined_nu_comp_err(const Vector &v, const Vector &x, const Vector &xl, const Vector &xu)
Computes an estimate of the complementarity error.
Reduced space SQP state encapsulation interface.
std::ostream * out
AlgorithmTracker & track()
bool assert_print_nan_inf(const value_type &val, const char name[], bool throw_excpt, std::ostream *out)
This function asserts if a value_type scalare is a NaN or Inf and optionally prints out these entires...
AbstractLinAlgPack::value_type value_type
Acts as the central hub for an iterative algorithm.
NLPAlgoState & rsqp_state()
<<std aggr>="">> members for algo_cntr
int n
NLPAlgo & rsqp_algo(Algorithm &algo)
Convert from a Algorithm to a NLPAlgo.
#define TEUCHOS_TEST_FOR_EXCEPT(throw_exception_test)