ROL
ROL_MoreauYosidaPenaltyStep.hpp
Go to the documentation of this file.
1// @HEADER
2// ************************************************************************
3//
4// Rapid Optimization Library (ROL) Package
5// Copyright (2014) Sandia Corporation
6//
7// Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive
8// license for use of this work by or on behalf of the U.S. Government.
9//
10// Redistribution and use in source and binary forms, with or without
11// modification, are permitted provided that the following conditions are
12// met:
13//
14// 1. Redistributions of source code must retain the above copyright
15// notice, this list of conditions and the following disclaimer.
16//
17// 2. Redistributions in binary form must reproduce the above copyright
18// notice, this list of conditions and the following disclaimer in the
19// documentation and/or other materials provided with the distribution.
20//
21// 3. Neither the name of the Corporation nor the names of the
22// contributors may be used to endorse or promote products derived from
23// this software without specific prior written permission.
24//
25// THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
26// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
29// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
30// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
31// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
32// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36//
37// Questions? Contact lead developers:
38// Drew Kouri (dpkouri@sandia.gov) and
39// Denis Ridzal (dridzal@sandia.gov)
40//
41// ************************************************************************
42// @HEADER
43
44#ifndef ROL_MOREAUYOSIDAPENALTYSTEP_H
45#define ROL_MOREAUYOSIDAPENALTYSTEP_H
46
48#include "ROL_Types.hpp"
50#include "ROL_CompositeStep.hpp"
51#include "ROL_FletcherStep.hpp"
52#include "ROL_BundleStep.hpp"
55#include "ROL_Algorithm.hpp"
58#include "ROL_ParameterList.hpp"
59
122namespace ROL {
123
124template<class Real>
125class AugmentedLagrangianStep;
126
127template <class Real>
128class MoreauYosidaPenaltyStep : public Step<Real> {
129private:
130 ROL::Ptr<StatusTest<Real>> status_;
131 ROL::Ptr<Step<Real>> step_;
132 ROL::Ptr<Algorithm<Real>> algo_;
133 ROL::Ptr<Vector<Real>> x_;
134 ROL::Ptr<Vector<Real>> g_;
135 ROL::Ptr<Vector<Real>> l_;
136 ROL::Ptr<BoundConstraint<Real>> bnd_;
137
140 Real tau_;
141 bool print_;
143
144 ROL::ParameterList parlist_;
147
149 std::string stepname_;
150
151 void updateState(const Vector<Real> &x, const Vector<Real> &l,
152 Objective<Real> &obj,
154 AlgorithmState<Real> &algo_state) {
156 = dynamic_cast<MoreauYosidaPenalty<Real>&>(obj);
157 Real zerotol = std::sqrt(ROL_EPSILON<Real>());
158 ROL::Ptr<StepState<Real> > state = Step<Real>::getState();
159 // Update objective and constraint.
160 myPen.update(x,true,algo_state.iter);
161 con.update(x,true,algo_state.iter);
162 // Compute norm of the gradient of the Lagrangian
163 algo_state.value = myPen.value(x, zerotol);
164 myPen.gradient(*(state->gradientVec), x, zerotol);
165 con.applyAdjointJacobian(*g_,l,x,zerotol);
166 state->gradientVec->plus(*g_);
167 gLnorm_ = (state->gradientVec)->norm();
168 // Compute constraint violation
169 con.value(*(state->constraintVec),x, zerotol);
170 algo_state.cnorm = (state->constraintVec)->norm();
172 algo_state.gnorm = std::max(gLnorm_,compViolation_);
173 // Update state
174 algo_state.nfval++;
175 algo_state.ngrad++;
176 algo_state.ncval++;
177 }
178
180 Objective<Real> &obj,
182 AlgorithmState<Real> &algo_state) {
184 = dynamic_cast<MoreauYosidaPenalty<Real>&>(obj);
185 Real zerotol = std::sqrt(ROL_EPSILON<Real>());
186 ROL::Ptr<StepState<Real> > state = Step<Real>::getState();
187 // Update objective and constraint.
188 myPen.update(x,true,algo_state.iter);
189 // Compute norm of the gradient of the Lagrangian
190 algo_state.value = myPen.value(x, zerotol);
191 myPen.gradient(*(state->gradientVec), x, zerotol);
192 gLnorm_ = (state->gradientVec)->norm();
193 // Compute constraint violation
194 algo_state.cnorm = static_cast<Real>(0);
196 algo_state.gnorm = std::max(gLnorm_,compViolation_);
197 // Update state
198 algo_state.nfval++;
199 algo_state.ngrad++;
200 }
201
202public:
203
204 using Step<Real>::initialize;
205 using Step<Real>::compute;
206 using Step<Real>::update;
207
209
210 MoreauYosidaPenaltyStep(ROL::ParameterList &parlist)
211 : Step<Real>(), algo_(ROL::nullPtr),
212 x_(ROL::nullPtr), g_(ROL::nullPtr), l_(ROL::nullPtr),
213 tau_(10), print_(false), parlist_(parlist), subproblemIter_(0),
214 hasEquality_(false) {
215 // Parse parameters
216 Real ten(10), oem6(1.e-6), oem8(1.e-8);
217 ROL::ParameterList& steplist = parlist.sublist("Step").sublist("Moreau-Yosida Penalty");
218 Step<Real>::getState()->searchSize = steplist.get("Initial Penalty Parameter",ten);
219 tau_ = steplist.get("Penalty Parameter Growth Factor",ten);
220 updatePenalty_ = steplist.get("Update Penalty",true);
221 print_ = steplist.sublist("Subproblem").get("Print History",false);
222 // Set parameters for step subproblem
223 Real gtol = steplist.sublist("Subproblem").get("Optimality Tolerance",oem8);
224 Real ctol = steplist.sublist("Subproblem").get("Feasibility Tolerance",oem8);
225 Real stol = oem6*std::min(gtol,ctol);
226 int maxit = steplist.sublist("Subproblem").get("Iteration Limit",1000);
227 parlist_.sublist("Status Test").set("Gradient Tolerance", gtol);
228 parlist_.sublist("Status Test").set("Constraint Tolerance", ctol);
229 parlist_.sublist("Status Test").set("Step Tolerance", stol);
230 parlist_.sublist("Status Test").set("Iteration Limit", maxit);
231 // Get step name from parameterlist
232 stepname_ = steplist.sublist("Subproblem").get("Step Type","Composite Step");
234 }
235
240 AlgorithmState<Real> &algo_state ) {
241 hasEquality_ = true;
242 // Initialize step state
243 ROL::Ptr<StepState<Real> > state = Step<Real>::getState();
244 state->descentVec = x.clone();
245 state->gradientVec = g.clone();
246 state->constraintVec = c.clone();
247 // Initialize additional storage
248 x_ = x.clone();
249 g_ = g.clone();
250 l_ = l.clone();
251 // Project x onto the feasible set
252 if ( bnd.isActivated() ) {
253 bnd.project(x);
254 }
255 // Initialize the algorithm state
256 algo_state.nfval = 0;
257 algo_state.ncval = 0;
258 algo_state.ngrad = 0;
259 updateState(x,l,obj,con,bnd,algo_state);
260 }
261
266 AlgorithmState<Real> &algo_state ) {
267 // Initialize step state
268 ROL::Ptr<StepState<Real> > state = Step<Real>::getState();
269 state->descentVec = x.clone();
270 state->gradientVec = g.clone();
271 // Initialize additional storage
272 x_ = x.clone();
273 g_ = g.clone();
274 // Project x onto the feasible set
275 if ( bnd.isActivated() ) {
276 bnd.project(x);
277 }
278 // Initialize the algorithm state
279 algo_state.nfval = 0;
280 algo_state.ncval = 0;
281 algo_state.ngrad = 0;
282 updateState(x,obj,bnd,algo_state);
283
284 bnd_ = ROL::makePtr<BoundConstraint<Real>>();
285 bnd_->deactivate();
286 }
287
290 void compute( Vector<Real> &s, const Vector<Real> &x, const Vector<Real> &l,
293 AlgorithmState<Real> &algo_state ) {
294 //MoreauYosidaPenalty<Real> &myPen
295 // = dynamic_cast<MoreauYosidaPenalty<Real>&>(obj);
296 Real one(1);
297 Ptr<Objective<Real>> penObj;
299 Ptr<Objective<Real>> raw_obj = makePtrFromRef(obj);
300 Ptr<Constraint<Real>> raw_con = makePtrFromRef(con);
301 Ptr<StepState<Real>> state = Step<Real>::getState();
302 penObj = makePtr<AugmentedLagrangian<Real>>(raw_obj,raw_con,l,one,x,*(state->constraintVec),parlist_);
303 step_ = makePtr<AugmentedLagrangianStep<Real>>(parlist_);
304 }
305 else if (stepType_ == STEP_FLETCHER) {
306 Ptr<Objective<Real>> raw_obj = makePtrFromRef(obj);
307 Ptr<Constraint<Real>> raw_con = makePtrFromRef(con);
308 Ptr<StepState<Real>> state = Step<Real>::getState();
309 penObj = makePtr<Fletcher<Real>>(raw_obj,raw_con,x,*(state->constraintVec),parlist_);
310 step_ = makePtr<FletcherStep<Real>>(parlist_);
311 }
312 else {
313 penObj = makePtrFromRef(obj);
314 stepname_ = "Composite Step";
316 step_ = makePtr<CompositeStep<Real>>(parlist_);
317 }
318 status_ = makePtr<ConstraintStatusTest<Real>>(parlist_);
319 algo_ = ROL::makePtr<Algorithm<Real>>(step_,status_,false);
320 x_->set(x); l_->set(l);
321 algo_->run(*x_,*l_,*penObj,con,print_);
322 s.set(*x_); s.axpy(-one,x);
323 subproblemIter_ = (algo_->getState())->iter;
324 }
325
330 AlgorithmState<Real> &algo_state ) {
331 Real one(1);
333 = dynamic_cast<MoreauYosidaPenalty<Real>&>(obj);
334 if (stepType_ == STEP_BUNDLE) {
335 status_ = makePtr<BundleStatusTest<Real>>(parlist_);
336 step_ = makePtr<BundleStep<Real>>(parlist_);
337 }
338 else if (stepType_ == STEP_LINESEARCH) {
339 status_ = makePtr<StatusTest<Real>>(parlist_);
340 step_ = makePtr<LineSearchStep<Real>>(parlist_);
341 }
342 else {
343 status_ = makePtr<StatusTest<Real>>(parlist_);
344 step_ = makePtr<TrustRegionStep<Real>>(parlist_);
345 }
346 algo_ = ROL::makePtr<Algorithm<Real>>(step_,status_,false);
347 x_->set(x);
348 algo_->run(*x_,myPen,*bnd_,print_);
349 s.set(*x_); s.axpy(-one,x);
350 subproblemIter_ = (algo_->getState())->iter;
351 }
352
353
359 AlgorithmState<Real> &algo_state ) {
361 = dynamic_cast<MoreauYosidaPenalty<Real>&>(obj);
362 ROL::Ptr<StepState<Real> > state = Step<Real>::getState();
363 state->SPiter = subproblemIter_;
364 state->descentVec->set(s);
365 // Update iterate and Lagrange multiplier
366 x.plus(s);
367 l.set(*l_);
368 // Update objective and constraint
369 algo_state.iter++;
370 con.update(x,true,algo_state.iter);
371 myPen.update(x,true,algo_state.iter);
372 // Update state
373 updateState(x,l,obj,con,bnd,algo_state);
374 // Update multipliers
375 if (updatePenalty_) {
376 state->searchSize *= tau_;
377 }
378 myPen.updateMultipliers(state->searchSize,x);
379 algo_state.nfval += myPen.getNumberFunctionEvaluations() + ((algo_->getState())->nfval);
380 algo_state.ngrad += myPen.getNumberGradientEvaluations() + ((algo_->getState())->ngrad);
381 algo_state.ncval += (algo_->getState())->ncval;
382 algo_state.snorm = s.norm();
383 algo_state.iterateVec->set(x);
384 algo_state.lagmultVec->set(l);
385 }
386
389 void update( Vector<Real> &x, const Vector<Real> &s,
391 AlgorithmState<Real> &algo_state ) {
393 = dynamic_cast<MoreauYosidaPenalty<Real>&>(obj);
394 ROL::Ptr<StepState<Real> > state = Step<Real>::getState();
395 state->descentVec->set(s);
396 // Update iterate and Lagrange multiplier
397 x.plus(s);
398 // Update objective and constraint
399 algo_state.iter++;
400 myPen.update(x,true,algo_state.iter);
401 // Update state
402 updateState(x,obj,bnd,algo_state);
403 // Update multipliers
404 if (updatePenalty_) {
405 state->searchSize *= tau_;
406 }
407 myPen.updateMultipliers(state->searchSize,x);
408 algo_state.nfval += myPen.getNumberFunctionEvaluations() + ((algo_->getState())->nfval);
409 algo_state.ngrad += myPen.getNumberGradientEvaluations() + ((algo_->getState())->ngrad);
410 algo_state.snorm = s.norm();
411 algo_state.iterateVec->set(x);
412 }
413
416 std::string printHeader( void ) const {
417 std::stringstream hist;
418 hist << " ";
419 hist << std::setw(6) << std::left << "iter";
420 hist << std::setw(15) << std::left << "fval";
421 if (hasEquality_) {
422 hist << std::setw(15) << std::left << "cnorm";
423 }
424 hist << std::setw(15) << std::left << "gnorm";
425 hist << std::setw(15) << std::left << "ifeas";
426 hist << std::setw(15) << std::left << "snorm";
427 hist << std::setw(10) << std::left << "penalty";
428 hist << std::setw(8) << std::left << "#fval";
429 hist << std::setw(8) << std::left << "#grad";
430 if (hasEquality_) {
431 hist << std::setw(8) << std::left << "#cval";
432 }
433 hist << std::setw(8) << std::left << "subIter";
434 hist << "\n";
435 return hist.str();
436 }
437
440 std::string printName( void ) const {
441 std::stringstream hist;
442 hist << "\n" << " Moreau-Yosida Penalty solver";
443 hist << "\n";
444 return hist.str();
445 }
446
449 std::string print( AlgorithmState<Real> &algo_state, bool pHeader = false ) const {
450 std::stringstream hist;
451 hist << std::scientific << std::setprecision(6);
452 if ( algo_state.iter == 0 ) {
453 hist << printName();
454 }
455 if ( pHeader ) {
456 hist << printHeader();
457 }
458 if ( algo_state.iter == 0 ) {
459 hist << " ";
460 hist << std::setw(6) << std::left << algo_state.iter;
461 hist << std::setw(15) << std::left << algo_state.value;
462 if (hasEquality_) {
463 hist << std::setw(15) << std::left << algo_state.cnorm;
464 }
465 hist << std::setw(15) << std::left << gLnorm_;
466 hist << std::setw(15) << std::left << compViolation_;
467 hist << std::setw(15) << std::left << " ";
468 hist << std::scientific << std::setprecision(2);
469 hist << std::setw(10) << std::left << Step<Real>::getStepState()->searchSize;
470 hist << "\n";
471 }
472 else {
473 hist << " ";
474 hist << std::setw(6) << std::left << algo_state.iter;
475 hist << std::setw(15) << std::left << algo_state.value;
476 if (hasEquality_) {
477 hist << std::setw(15) << std::left << algo_state.cnorm;
478 }
479 hist << std::setw(15) << std::left << gLnorm_;
480 hist << std::setw(15) << std::left << compViolation_;
481 hist << std::setw(15) << std::left << algo_state.snorm;
482 hist << std::scientific << std::setprecision(2);
483 hist << std::setw(10) << std::left << Step<Real>::getStepState()->searchSize;
484 hist << std::scientific << std::setprecision(6);
485 hist << std::setw(8) << std::left << algo_state.nfval;
486 hist << std::setw(8) << std::left << algo_state.ngrad;
487 if (hasEquality_) {
488 hist << std::setw(8) << std::left << algo_state.ncval;
489 }
490 hist << std::setw(8) << std::left << subproblemIter_;
491 hist << "\n";
492 }
493 return hist.str();
494 }
495
496}; // class MoreauYosidaPenaltyStep
497
498} // namespace ROL
499
500#endif
Contains definitions of custom data types in ROL.
Provides the interface to apply upper and lower bound constraints.
bool isActivated(void) const
Check if bounds are on.
virtual void project(Vector< Real > &x)
Project optimization variables onto the bounds.
Defines the general constraint operator interface.
virtual void value(Vector< Real > &c, const Vector< Real > &x, Real &tol)=0
Evaluate the constraint operator at .
virtual void applyAdjointJacobian(Vector< Real > &ajv, const Vector< Real > &v, const Vector< Real > &x, Real &tol)
Apply the adjoint of the the constraint Jacobian at , , to vector .
virtual void update(const Vector< Real > &x, UpdateType type, int iter=-1)
Update constraint function.
Implements the computation of optimization steps using Moreau-Yosida regularized bound constraints.
void initialize(Vector< Real > &x, const Vector< Real > &g, Objective< Real > &obj, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Initialize step without equality constraint.
MoreauYosidaPenaltyStep(ROL::ParameterList &parlist)
void compute(Vector< Real > &s, const Vector< Real > &x, const Vector< Real > &l, Objective< Real > &obj, Constraint< Real > &con, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Compute step (equality and bound constraints).
void updateState(const Vector< Real > &x, Objective< Real > &obj, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
void update(Vector< Real > &x, const Vector< Real > &s, Objective< Real > &obj, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Update step, for bound constraints.
ROL::Ptr< BoundConstraint< Real > > bnd_
void update(Vector< Real > &x, Vector< Real > &l, const Vector< Real > &s, Objective< Real > &obj, Constraint< Real > &con, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Update step, if successful (equality and bound constraints).
void updateState(const Vector< Real > &x, const Vector< Real > &l, Objective< Real > &obj, Constraint< Real > &con, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
std::string printHeader(void) const
Print iterate header.
void initialize(Vector< Real > &x, const Vector< Real > &g, Vector< Real > &l, const Vector< Real > &c, Objective< Real > &obj, Constraint< Real > &con, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Initialize step with equality constraint.
std::string print(AlgorithmState< Real > &algo_state, bool pHeader=false) const
Print iterate status.
void compute(Vector< Real > &s, const Vector< Real > &x, Objective< Real > &obj, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Compute step for bound constraints.
std::string printName(void) const
Print step name.
ROL::Ptr< Algorithm< Real > > algo_
ROL::Ptr< StatusTest< Real > > status_
Provides the interface to evaluate the Moreau-Yosida penalty function.
Real testComplementarity(const ROL::Vector< Real > &x)
void update(const Vector< Real > &x, bool flag=true, int iter=-1)
Update Moreau-Yosida penalty function.
void gradient(Vector< Real > &g, const Vector< Real > &x, Real &tol)
Compute gradient.
Real value(const Vector< Real > &x, Real &tol)
Compute value.
void updateMultipliers(Real mu, const ROL::Vector< Real > &x)
Provides the interface to evaluate objective functions.
Provides the interface to compute optimization steps.
Definition: ROL_Step.hpp:68
ROL::Ptr< StepState< Real > > getState(void)
Definition: ROL_Step.hpp:73
Defines the linear algebra or vector space interface.
Definition: ROL_Vector.hpp:84
virtual Real norm() const =0
Returns where .
virtual void set(const Vector &x)
Set where .
Definition: ROL_Vector.hpp:209
virtual void plus(const Vector &x)=0
Compute , where .
virtual ROL::Ptr< Vector > clone() const =0
Clone to make a new (uninitialized) vector.
virtual void axpy(const Real alpha, const Vector &x)
Compute where .
Definition: ROL_Vector.hpp:153
@ STEP_BUNDLE
Definition: ROL_Types.hpp:278
@ STEP_AUGMENTEDLAGRANGIAN
Definition: ROL_Types.hpp:277
@ STEP_LINESEARCH
Definition: ROL_Types.hpp:280
@ STEP_COMPOSITESTEP
Definition: ROL_Types.hpp:279
@ STEP_FLETCHER
Definition: ROL_Types.hpp:285
EStep StringToEStep(std::string s)
Definition: ROL_Types.hpp:391
State for algorithm class. Will be used for restarts.
Definition: ROL_Types.hpp:143
ROL::Ptr< Vector< Real > > lagmultVec
Definition: ROL_Types.hpp:158
ROL::Ptr< Vector< Real > > iterateVec
Definition: ROL_Types.hpp:157