MOOCHO (Single Doxygen Collection)  Version of the Day
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
MoochoPack_CalcLambdaIndepStd_AddedStep.cpp
Go to the documentation of this file.
1 #if 0
2 
3 // @HEADER
4 // ***********************************************************************
5 //
6 // Moocho: Multi-functional Object-Oriented arCHitecture for Optimization
7 // Copyright (2003) Sandia Corporation
8 //
9 // Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive
10 // license for use of this work by or on behalf of the U.S. Government.
11 //
12 // Redistribution and use in source and binary forms, with or without
13 // modification, are permitted provided that the following conditions are
14 // met:
15 //
16 // 1. Redistributions of source code must retain the above copyright
17 // notice, this list of conditions and the following disclaimer.
18 //
19 // 2. Redistributions in binary form must reproduce the above copyright
20 // notice, this list of conditions and the following disclaimer in the
21 // documentation and/or other materials provided with the distribution.
22 //
23 // 3. Neither the name of the Corporation nor the names of the
24 // contributors may be used to endorse or promote products derived from
25 // this software without specific prior written permission.
26 //
27 // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
28 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
31 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
32 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
33 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
34 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
35 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
36 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
37 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 //
39 // Questions? Contact Roscoe A. Bartlett (rabartl@sandia.gov)
40 //
41 // ***********************************************************************
42 // @HEADER
43 
44 #include <ostream>
45 
50 #include "ConstrainedOptPack/src/VectorWithNorms.h"
52 #include "AbstractLinAlgPack/src/AbstractLinAlgPack_MatrixOp.hpp"
56 
57 namespace LinAlgOpPack {
60 }
61 
63  , poss_type step_poss, IterationPack::EDoStepType type, poss_type assoc_step_poss)
64 {
65 
66  using BLAS_Cpp::no_trans;
67  using BLAS_Cpp::trans;
68 
71 
73 
75 
76  using LinAlgOpPack::Vp_V;
79 
80  NLPAlgo &algo = rsqp_algo(_algo);
81  NLPAlgoState &s = algo.rsqp_state();
82  Range1D decomp = s.equ_decomp();
83 
84  EJournalOutputLevel olevel = algo.algo_cntr().journal_output_level();
85  std::ostream& out = algo.track().journal_out();
86 
87  // print step header.
88  if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) {
90  print_algorithm_step( algo, step_poss, type, assoc_step_poss, out );
91  }
92 
93  // Compute: lambda(decomp) = inv(Gc(decomp)'* Y)' * ( - Y' * (Gf + nu) - U' * lambda(undecomp) )
94  // where U = Gc(undecomp)' * Y
95 
96  // Must resize lambda here explicitly since we will only be updating a region of it.
97  // If lambda(undecomp) has already been updated then lambda will have been resized
98  // already but lambda(decomp) will not be initialized yet.
99  if( !s.lambda().updated_k(0) ) s.lambda().set_k(0).v().resize( algo.nlp().m() );
100 
101  DVectorSlice lambda_decomp = s.lambda().get_k(0).v()(decomp);
102 
103  // lambda_decomp_tmp1 = - Y' * (Gf + nu)
104  if( algo.nlp().has_bounds() ) {
105  // _tmp = Gf + nu
106  DVector _tmp = s.Gf().get_k(0)();
107  DVectorSlice _vs_tmp = _tmp; // only create this DVectorSlice once
108  Vp_V( &_vs_tmp, s.nu().get_k(0)() );
109  // lambda_decomp_tmp1 = - Y' * _tmp
110  V_StMtV( &lambda_decomp, -1.0, s.Y().get_k(0), trans, _vs_tmp );
111  }
112  else {
113  // lambda_decomp__tmp1 = - Y' * Gf
114  V_StMtV( &lambda_decomp, -1.0, s.Y().get_k(0), trans, s.Gf().get_k(0)() );
115  }
116 
117  // lambda_decomp_tmp2 = lambda_decomp_tmp1 - U' * lambda(undecomp)
118  if( algo.nlp().r() < algo.nlp().m() ) {
119  Range1D undecomp = s.equ_undecomp();
120  Vp_StMtV( &lambda_decomp, -1.0, s.U().get_k(0), trans, s.lambda().get_k(0).v()(undecomp) );
121  }
122  // else lambda(decomp)_tmp2 = lambda(decomp)_tmp1
123 
124  // lambda_decomp = inv(Gc(decomp)'* Y)' * lambda_decomp_tmp2
125  s.decomp_sys().solve_transAtY( lambda_decomp, trans, &lambda_decomp );
126 
127  if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) {
128  out << "\nmax(|lambda_k(equ_decomp)(i)|) = " << norm_inf(lambda_decomp)
129  << "\nmin(|lambda_k(equ_decomp)(i)|) = " << min_abs(lambda_decomp) << std::endl;
130  }
131 
132  if( static_cast<int>(olevel) >= static_cast<int>(PRINT_VECTORS) ) {
133  out << "\nlambda_k(equ_decomp) = \n" << lambda_decomp;
134  }
135 
136  return true;
137 }
138 
140  , poss_type step_poss, IterationPack::EDoStepType type, poss_type assoc_step_poss
141  , std::ostream& out, const std::string& L ) const
142 {
143  out
144  << L << "*** Calculate the Lagrange multipliers for the decomposed constraints\n"
145  << L << "lambda_k(equ_decomp) = - inv(Gc_k(:,equ_decomp)'*Y_k)\n"
146  << L << " * (Y_k'*(Gf_k + nu_k) + U_k'*lambda_k(equ_undecomp))\n";
147 }
148 
149 #endif // 0
void print_step(const Algorithm &algo, poss_type step_poss, IterationPack::EDoStepType type, poss_type assoc_step_poss, std::ostream &out, const std::string &leading_str) const
void Vp_StV(VectorMutable *v_lhs, const value_type &alpha, const Vector &v_rhs)
v_lhs = alpha * v_rhs + v_lhs
void V_StMtV(VectorMutable *v_lhs, value_type alpha, const MatrixOp &M_rhs1, BLAS_Cpp::Transp trans_rhs1, const V &V_rhs2)
v_lhs = alpha * op(M_rhs1) * V_rhs2.
Transposed.
void Vp_MtV(VectorMutable *v_lhs, const MatrixOp &M_rhs1, BLAS_Cpp::Transp trans_rhs1, const V &V_rhs2)
v_lhs += op(M_rhs1) * V_rhs2.
Not transposed.
EJournalOutputLevel
enum for journal output.
std::ostream * out
void print_algorithm_step(const Algorithm &algo, Algorithm::poss_type step_poss, EDoStepType type, Algorithm::poss_type assoc_step_poss, std::ostream &out)
Prints to 'out' the algorithm step.
bool do_step(Algorithm &algo, poss_type step_poss, IterationPack::EDoStepType type, poss_type assoc_step_poss)
void Vp_StMtV(VectorMutable *v_lhs, value_type alpha, const MatrixOp &M_rhs1, BLAS_Cpp::Transp trans_rhs1, const Vector &v_rhs2, value_type beta=1.0)
v_lhs = alpha * op(M_rhs1) * v_rhs2 + beta * v_lhs (BLAS xGEMV)
value_type norm_inf(const SparseVectorSlice< T_Ele > &sv_rhs)
result = ||sv_rhs||inf (BLAS IxAMAX)
DenseLinAlgPack::VectorSliceTmpl< value_type > DVectorSlice
void Vt_S(DVectorSlice *vs_lhs, value_type alpha)
vs_lhs *= alpha (BLAS xSCAL) (*** Note that alpha == 0.0 is handeled as vs_lhs = 0.0)
value_type min_abs(const DVectorSlice &mu)
Minimum |mu(i)|.
value_type norm_inf(const DVectorSlice &vs_rhs)
result = ||vs_rhs||infinity (BLAS IxAMAX)
NLPAlgo & rsqp_algo(Algorithm &algo)
Convert from a Algorithm to a NLPAlgo.
void Vp_V(VectorMutable *v_lhs, const V &V_rhs)
v_lhs += V_rhs.