ROL
ROL_TypeB_LSecantBAlgorithm_Def.hpp
Go to the documentation of this file.
1// @HEADER
2// ************************************************************************
3//
4// Rapid Optimization Library (ROL) Package
5// Copyright (2014) Sandia Corporation
6//
7// Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive
8// license for use of this work by or on behalf of the U.S. Government.
9//
10// Redistribution and use in source and binary forms, with or without
11// modification, are permitted provided that the following conditions are
12// met:
13//
14// 1. Redistributions of source code must retain the above copyright
15// notice, this list of conditions and the following disclaimer.
16//
17// 2. Redistributions in binary form must reproduce the above copyright
18// notice, this list of conditions and the following disclaimer in the
19// documentation and/or other materials provided with the distribution.
20//
21// 3. Neither the name of the Corporation nor the names of the
22// contributors may be used to endorse or promote products derived from
23// this software without specific prior written permission.
24//
25// THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
26// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
29// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
30// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
31// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
32// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36//
37// Questions? Contact lead developers:
38// Drew Kouri (dpkouri@sandia.gov) and
39// Denis Ridzal (dridzal@sandia.gov)
40//
41// ************************************************************************
42// @HEADER
43
44#ifndef ROL_TYPEB_LSecantBALGORITHM_DEF_HPP
45#define ROL_TYPEB_LSecantBALGORITHM_DEF_HPP
46
47#include "unistd.h"
48namespace ROL {
49namespace TypeB {
50
51template<typename Real>
53 const Ptr<Secant<Real>> &secant) {
54 // Set status test
55 status_->reset();
56 status_->add(makePtr<StatusTest<Real>>(list));
57
58 ParameterList &trlist = list.sublist("Step").sublist("Line Search");
59 state_->searchSize = static_cast<Real>(1);
60 // Krylov Parameters
61 maxit_ = list.sublist("General").sublist("Krylov").get("Iteration Limit", 20);
62 tol1_ = list.sublist("General").sublist("Krylov").get("Absolute Tolerance", 1e-4);
63 tol2_ = list.sublist("General").sublist("Krylov").get("Relative Tolerance", 1e-2);
64 // Algorithm-Specific Parameters
65 ROL::ParameterList &lmlist = trlist.sublist("Quasi-Newton").sublist("L-Secant-B");
66 mu0_ = lmlist.get("Sufficient Decrease Parameter", 1e-2);
67 spexp_ = lmlist.get("Relative Tolerance Exponent", 1.0);
68 spexp_ = std::max(static_cast<Real>(1),std::min(spexp_,static_cast<Real>(2)));
69 redlim_ = lmlist.sublist("Cauchy Point").get("Maximum Number of Reduction Steps", 10);
70 explim_ = lmlist.sublist("Cauchy Point").get("Maximum Number of Expansion Steps", 10);
71 alpha_ = lmlist.sublist("Cauchy Point").get("Initial Step Size", 1.0);
72 normAlpha_ = lmlist.sublist("Cauchy Point").get("Normalize Initial Step Size", false);
73 interpf_ = lmlist.sublist("Cauchy Point").get("Reduction Rate", 0.1);
74 extrapf_ = lmlist.sublist("Cauchy Point").get("Expansion Rate", 10.0);
75 qtol_ = lmlist.sublist("Cauchy Point").get("Decrease Tolerance", 1e-8);
76 interpfPS_ = trlist.sublist("Line-Search Method").get("Backtracking Rate", 0.5);
77 // Output Parameters
78 verbosity_ = list.sublist("General").get("Output Level",0);
79 writeHeader_ = verbosity_ > 2;
80 // Secant Information
81 useSecantPrecond_ = true;
82 useSecantHessVec_ = true;
84 if (secant == nullPtr) {
85 esec_ = StringToESecant(list.sublist("General").sublist("Secant").get("Type","Limited-Memory Secant"));
86 secant_ = SecantFactory<Real>(list,mode);
87 }
88}
89
90template<typename Real>
92 const Vector<Real> &g,
93 Objective<Real> &obj,
95 std::ostream &outStream) {
96 const Real one(1);
97 hasEcon_ = true;
98 if (proj_ == nullPtr) {
99 proj_ = makePtr<PolyhedralProjection<Real>>(makePtrFromRef(bnd));
100 hasEcon_ = false;
101 }
102 // Initialize data
104 // Update approximate gradient and approximate objective function.
105 Real ftol = static_cast<Real>(0.1)*ROL_OVERFLOW<Real>();
106 proj_->project(x,outStream); state_->nproj++;
107 state_->iterateVec->set(x);
108 obj.update(x,UpdateType::Initial,state_->iter);
109 state_->value = obj.value(x,ftol); state_->nfval++;
110 obj.gradient(*state_->gradientVec,x,ftol); state_->ngrad++;
111 state_->stepVec->set(x);
112 state_->stepVec->axpy(-one,state_->gradientVec->dual());
113 proj_->project(*state_->stepVec,outStream); state_->nproj++;
114 state_->stepVec->axpy(-one,x);
115 state_->gnorm = state_->stepVec->norm();
116 state_->snorm = ROL_INF<Real>();
117 // Normalize initial CP step length
118 if (normAlpha_) {
119 alpha_ /= state_->gradientVec->norm();
120 }
121 // Initialize null space projection
122 if (hasEcon_) {
123 rcon_ = makePtr<ReducedLinearConstraint<Real>>(proj_->getLinearConstraint(),
124 makePtrFromRef(bnd),
125 makePtrFromRef(x));
126 ns_ = makePtr<NullSpaceOperator<Real>>(rcon_,x,
127 *proj_->getResidual());
128 }
129}
130
131template<typename Real>
133 const Vector<Real> &g,
134 Objective<Real> &obj,
136 std::ostream &outStream ) {
137 const Real zero(0), one(1);
138 Real tol0 = ROL_EPSILON<Real>(), ftol = ROL_EPSILON<Real>();
139 Real gfnorm(0), gs(0), ftrial(0), q(0), tol(0), stol(0), snorm(0);
140 // Initialize trust-region data
141 initialize(x,g,obj,bnd,outStream);
142 Ptr<Vector<Real>> s = x.clone();
143 Ptr<Vector<Real>> gmod = g.clone(), gfree = g.clone();
144 Ptr<Vector<Real>> pwa1 = x.clone(), pwa2 = x.clone(), pwa3 = x.clone();
145 Ptr<Vector<Real>> dwa1 = g.clone(), dwa2 = g.clone(), dwa3 = g.clone();
146
147 // Output
148 if (verbosity_ > 0) writeOutput(outStream,true);
149
150 while (status_->check(*state_)) {
151 /**** SOLVE LINEARLY CONSTRAINED QUADRATIC SUBPROBLEM ****/
152 // Compute Cauchy point
153 snorm = dcauchy(*s,
154 alpha_,
155 q,
156 x,
157 state_->gradientVec->dual(),
158 *secant_,
159 *dwa1,
160 *dwa2,
161 outStream); // Approximately solve 1D optimization problem for alpha
162 x.plus(*s); // Set x = proj(x[0] - alpha*g)
163
164 // Model gradient at s = x[1] - x[0]
165 gmod->set(*dwa1); // hessVec from Cauchy point computation
166 gmod->plus(*state_->gradientVec);
167 gfree->set(*gmod);
168 //bnd.pruneActive(*gfree,x,zero);
169 pwa1->set(gfree->dual());
170 bnd.pruneActive(*pwa1,x,zero);
171 gfree->set(pwa1->dual());
172 if (hasEcon_) {
173 applyFreePrecond(*pwa1,*gfree,x,*secant_,bnd,tol0,*dwa1,*pwa2);
174 gfnorm = pwa1->norm();
175 }
176 else {
177 gfnorm = gfree->norm();
178 }
179 SPiter_ = 0; SPflag_ = 0;
180 if (verbosity_ > 1) {
181 outStream << " Norm of free gradient components: " << gfnorm << std::endl;
182 outStream << std::endl;
183 }
184
185 // Linearly constrained quadratic subproblem solve
186 // Run CG for subspace minimization
187 tol = std::min(tol1_,tol2_*std::pow(gfnorm,spexp_));
188 stol = tol; //zero;
189 if (gfnorm > zero) {
190 snorm = dpcg(*s, SPflag_, SPiter_, *gfree, x, *secant_, bnd,
191 tol, stol, maxit_,
192 *pwa1, *dwa1, *pwa2, *dwa2, *pwa3, *dwa3,
193 outStream);
194 if (verbosity_ > 1) {
195 outStream << " Computation of CG step" << std::endl;
196 outStream << " CG step length: " << snorm << std::endl;
197 outStream << " Number of CG iterations: " << SPiter_ << std::endl;
198 outStream << " CG flag: " << SPflag_ << std::endl;
199 outStream << std::endl;
200 }
201 x.plus(*s);
202 // Project to ensure that step is feasible
203 // May lose feasibility because of numerical errors
204 proj_->project(x,outStream); state_->nproj++;
205 }
206 state_->stepVec->set(x);
207 state_->stepVec->axpy(-one,*state_->iterateVec);
208 gs = state_->gradientVec->apply(*state_->stepVec);
209
210 // Projected search
211 state_->snorm = dsrch(*state_->iterateVec,*state_->stepVec,ftrial,state_->searchSize,
212 state_->value,gs,obj,*pwa1,outStream);
213 x.set(*state_->iterateVec);
214
215 // Update algorithm state
216 state_->iter++;
217 state_->value = ftrial;
218 obj.update(x,UpdateType::Accept,state_->iter);
219 dwa1->set(*state_->gradientVec);
220 obj.gradient(*state_->gradientVec,x,ftol); state_->ngrad++;
221 state_->gnorm = TypeB::Algorithm<Real>::optimalityCriterion(x,*state_->gradientVec,*pwa1,outStream);
222
223 // Update secant information
224 secant_->updateStorage(x,*state_->gradientVec,*dwa1,*state_->stepVec,
225 state_->snorm,state_->iter);
226
227 // Update Output
228 if (verbosity_ > 0) writeOutput(outStream,writeHeader_);
229 }
230 if (verbosity_ > 0) TypeB::Algorithm<Real>::writeExitStatus(outStream);
231}
232
233template<typename Real>
235 const Vector<Real> &x, const Real alpha,
236 std::ostream &outStream) const {
237 const Real one(1);
238 s.set(x); s.axpy(alpha,w);
239 proj_->project(s,outStream); state_->nproj++;
240 s.axpy(-one,x);
241 return s.norm();
242}
243
244template<typename Real>
246 Real &alpha,
247 Real &q,
248 const Vector<Real> &x,
249 const Vector<Real> &g,
250 Secant<Real> &secant,
251 Vector<Real> &dwa, Vector<Real> &dwa1,
252 std::ostream &outStream) {
253 const Real half(0.5);
254 bool interp = false;
255 Real gs(0), snorm(0);
256 // Compute s = P(x[0] - alpha g[0])
257 snorm = dgpstep(s,g,x,-alpha,outStream);
258 secant.applyB(dwa,s);
259 gs = s.dot(g);
260 q = half * s.apply(dwa) + gs;
261 interp = (q > mu0_*gs);
262 // Either increase or decrease alpha to find approximate Cauchy point
263 int cnt = 0;
264 if (interp) {
265 bool search = true;
266 while (search) {
267 alpha *= interpf_;
268 snorm = dgpstep(s,g,x,-alpha,outStream);
269 secant.applyB(dwa,s);
270 gs = s.dot(g);
271 q = half * s.apply(dwa) + gs;
272 search = (q > mu0_*gs) && (cnt < redlim_);
273 cnt++;
274 }
275 }
276 else {
277 bool search = true;
278 Real alphas = alpha;
279 Real qs = q;
280 dwa1.set(dwa);
281 while (search) {
282 alpha *= extrapf_;
283 snorm = dgpstep(s,g,x,-alpha,outStream);
284 if (cnt < explim_) {
285 secant.applyB(dwa,s);
286 gs = s.dot(g);
287 q = half * s.apply(dwa) + gs;
288 if (q <= mu0_*gs && std::abs(q-qs) > qtol_*std::abs(qs)) {
289 dwa1.set(dwa);
290 search = true;
291 alphas = alpha;
292 qs = q;
293 }
294 else {
295 q = qs;
296 dwa.set(dwa1);
297 search = false;
298 }
299 }
300 else {
301 search = false;
302 }
303 cnt++;
304 }
305 alpha = alphas;
306 snorm = dgpstep(s,g,x,-alpha,outStream);
307 }
308 if (verbosity_ > 1) {
309 outStream << " Cauchy point" << std::endl;
310 outStream << " Step length (alpha): " << alpha << std::endl;
311 outStream << " Step length (alpha*g): " << snorm << std::endl;
312 outStream << " Model decrease: " << -q << std::endl;
313 if (!interp) {
314 outStream << " Number of extrapolation steps: " << cnt << std::endl;
315 }
316 }
317 return snorm;
318}
319
320template<typename Real>
321Real LSecantBAlgorithm<Real>::dsrch(Vector<Real> &x, Vector<Real> &s, Real &fnew, Real &beta,
322 Real fold, Real gs, Objective<Real> &obj,
323 Vector<Real> &pwa, std::ostream &outStream) {
324 const Real one(1);
325 Real tol = std::sqrt(ROL_EPSILON<Real>());
326 Real snorm(0);
327 int nsteps = 0;
328 // Reduce beta until sufficient decrease is satisfied
329 bool search = true;
330 beta = one;
331 while (search) {
332 nsteps++;
333 pwa.set(x); pwa.axpy(beta,s);
334 obj.update(pwa,UpdateType::Trial);
335 fnew = obj.value(pwa,tol); state_->nfval++;
336 if (fnew <= fold + mu0_*beta*gs) search = false;
337 else beta *= interpfPS_;
338 }
339 s.scale(beta);
340 x.plus(s);
341 snorm = s.norm();
342 if (verbosity_ > 1) {
343 outStream << std::endl;
344 outStream << " Line search" << std::endl;
345 outStream << " Step length (beta): " << beta << std::endl;
346 outStream << " Step length (beta*s): " << snorm << std::endl;
347 outStream << " New objective value: " << fnew << std::endl;
348 outStream << " Old objective value: " << fold << std::endl;
349 outStream << " Descent verification (gs): " << gs << std::endl;
350 outStream << " Number of steps: " << nsteps << std::endl;
351 }
352 return snorm;
353}
354
355template<typename Real>
356Real LSecantBAlgorithm<Real>::dpcg(Vector<Real> &w, int &iflag, int &iter,
357 const Vector<Real> &g, const Vector<Real> &x,
358 Secant<Real> &secant,
360 const Real tol, const Real stol, const int itermax,
363 std::ostream &outStream) const {
364 // p = step (primal)
365 // q = hessian applied to step p (dual)
366 // t = gradient (dual)
367 // r = preconditioned gradient (primal)
368 Real tol0 = ROL_EPSILON<Real>(), tolB(0);
369 const Real minIntSize = ROL_EPSILON<Real>();
370 const Real zero(0), half(0.5), one(1), two(2);
371 Real rho(0), kappa(0), beta(0), sigma(0), alpha(0), alphatmp(0);
372 Real rtr(0), tnorm(0), sMs(0), pMp(0), sMp(0);
373 iter = 0; iflag = 0;
374 // Initialize step
375 w.zero();
376 // Compute residual
377 t.set(g); t.scale(-one);
378 // Preconditioned residual
379 applyFreePrecond(r,t,x,secant,bnd,tol0,dwa,pwa);
380 //rho = r.dot(t.dual());
381 rho = r.apply(t);
382 // Initialize direction
383 p.set(r);
384 pMp = (!hasEcon_ ? rho : p.dot(p)); // If no equality constraint, used preconditioned norm
385 // Iterate CG
386 for (iter = 0; iter < itermax; ++iter) {
387 // Apply Hessian to direction dir
388 applyFreeHessian(q,p,x,secant,bnd,tol0,pwa);
389 // Compute step length
390 kappa = p.apply(q);
391 alpha = rho/kappa;
392 // Check if iterate is feasible
393 pwa.set(x); pwa.plus(w); pwa.axpy(alpha,p);
394 if (!bnd.isFeasible(pwa)) { // Bisection to find the largest feasible step size
395 sigma = zero;
396 tolB = minIntSize * alpha;
397 while (alpha-sigma > tolB) {
398 alphatmp = sigma + half * (alpha - sigma);
399 pwa.set(x); pwa.plus(w); pwa.axpy(alphatmp,p);
400 if (!bnd.isFeasible(pwa)) alpha = alphatmp;
401 else sigma = alphatmp;
402 }
403 w.axpy(sigma,p);
404 t.axpy(-sigma,q);
405 sMs = sMs + two*sigma*sMp + sigma*sigma*pMp;
406 iflag = 2;
407 break;
408 }
409 // Update iterate and residual
410 w.axpy(alpha,p);
411 t.axpy(-alpha,q);
412 applyFreePrecond(r,t,x,secant,bnd,tol0,dwa,pwa);
413 // Exit if residual tolerance is met
414 //rtr = r.dot(t.dual());
415 rtr = r.apply(t);
416 tnorm = t.norm();
417 if (rtr <= stol*stol || tnorm <= tol) {
418 sMs = sMs + two*alpha*sMp + alpha*alpha*pMp;
419 iflag = 0;
420 break;
421 }
422 // Compute p = r + beta * p
423 beta = rtr/rho;
424 p.scale(beta); p.plus(r);
425 rho = rtr;
426 // Update dot products
427 // sMs = <s, inv(M)s> or <s, s> if equality constraint present
428 // sMp = <s, inv(M)p> or <s, p> if equality constraint present
429 // pMp = <p, inv(M)p> or <p, p> if equality constraint present
430 sMs = sMs + two*alpha*sMp + alpha*alpha*pMp;
431 sMp = beta*(sMp + alpha*pMp);
432 pMp = (!hasEcon_ ? rho : p.dot(p)) + beta*beta*pMp;
433 }
434 // Check iteration count
435 if (iter == itermax) iflag = 1;
436 if (iflag != 1) iter++;
437 return std::sqrt(sMs); // w.norm();
438}
439
440template<typename Real>
442 const Vector<Real> &v,
443 const Vector<Real> &x,
444 Secant<Real> &secant,
446 Real &tol,
447 Vector<Real> &pwa) const {
448 const Real zero(0);
449 pwa.set(v);
450 bnd.pruneActive(pwa,x,zero);
451 secant.applyB(hv,pwa);
452 pwa.set(hv.dual());
453 bnd.pruneActive(pwa,x,zero);
454 hv.set(pwa.dual());
455}
456
457template<typename Real>
459 const Vector<Real> &v,
460 const Vector<Real> &x,
461 Secant<Real> &secant,
463 Real &tol,
464 Vector<Real> &dwa,
465 Vector<Real> &pwa) const {
466 if (!hasEcon_) {
467 const Real zero(0);
468 pwa.set(v.dual());
469 bnd.pruneActive(pwa,x,zero);
470 dwa.set(pwa.dual());
471 secant.applyH(hv,dwa);
472 bnd.pruneActive(hv,x,zero);
473 }
474 else {
475 // Perform null space projection
476 rcon_->setX(makePtrFromRef(x));
477 ns_->update(x);
478 pwa.set(v.dual());
479 ns_->apply(hv,pwa,tol);
480 }
481}
482
483template<typename Real>
484void LSecantBAlgorithm<Real>::writeHeader( std::ostream& os ) const {
485 std::stringstream hist;
486 if (verbosity_ > 1) {
487 hist << std::string(114,'-') << std::endl;
488 hist << " L-Secant-B line search method status output definitions" << std::endl << std::endl;
489 hist << " iter - Number of iterates (steps taken)" << std::endl;
490 hist << " value - Objective function value" << std::endl;
491 hist << " gnorm - Norm of the gradient" << std::endl;
492 hist << " snorm - Norm of the step (update to optimization vector)" << std::endl;
493 hist << " LSpar - Line-Search parameter" << std::endl;
494 hist << " #fval - Number of times the objective function was evaluated" << std::endl;
495 hist << " #grad - Number of times the gradient was computed" << std::endl;
496 hist << " #proj - Number of times the projection was applied" << std::endl;
497 hist << " iterCG - Number of Truncated CG iterations" << std::endl << std::endl;
498 hist << " flagGC - Trust-Region Truncated CG flag" << std::endl;
499 hist << " 0 - Converged" << std::endl;
500 hist << " 1 - Iteration Limit Exceeded" << std::endl;
501 hist << " 2 - Bounds Exceeded" << std::endl;
502 hist << std::string(114,'-') << std::endl;
503 }
504 hist << " ";
505 hist << std::setw(6) << std::left << "iter";
506 hist << std::setw(15) << std::left << "value";
507 hist << std::setw(15) << std::left << "gnorm";
508 hist << std::setw(15) << std::left << "snorm";
509 hist << std::setw(15) << std::left << "LSpar";
510 hist << std::setw(10) << std::left << "#fval";
511 hist << std::setw(10) << std::left << "#grad";
512 hist << std::setw(10) << std::left << "#proj";
513 hist << std::setw(10) << std::left << "iterCG";
514 hist << std::setw(10) << std::left << "flagCG";
515 hist << std::endl;
516 os << hist.str();
517}
518
519template<typename Real>
520void LSecantBAlgorithm<Real>::writeName( std::ostream& os ) const {
521 std::stringstream hist;
522 hist << std::endl << "L-Secant-B Line-Search Method (Type B, Bound Constraints)" << std::endl;
523 os << hist.str();
524}
525
526template<typename Real>
527void LSecantBAlgorithm<Real>::writeOutput( std::ostream& os, bool write_header ) const {
528 std::stringstream hist;
529 hist << std::scientific << std::setprecision(6);
530 if ( state_->iter == 0 ) writeName(os);
531 if ( write_header ) writeHeader(os);
532 if ( state_->iter == 0 ) {
533 hist << " ";
534 hist << std::setw(6) << std::left << state_->iter;
535 hist << std::setw(15) << std::left << state_->value;
536 hist << std::setw(15) << std::left << state_->gnorm;
537 hist << std::setw(15) << std::left << "---";
538 hist << std::setw(15) << std::left << state_->searchSize;
539 hist << std::setw(10) << std::left << state_->nfval;
540 hist << std::setw(10) << std::left << state_->ngrad;
541 hist << std::setw(10) << std::left << state_->nproj;
542 hist << std::setw(10) << std::left << "---";
543 hist << std::setw(10) << std::left << "---";
544 hist << std::endl;
545 }
546 else {
547 hist << " ";
548 hist << std::setw(6) << std::left << state_->iter;
549 hist << std::setw(15) << std::left << state_->value;
550 hist << std::setw(15) << std::left << state_->gnorm;
551 hist << std::setw(15) << std::left << state_->snorm;
552 hist << std::setw(15) << std::left << state_->searchSize;
553 hist << std::setw(10) << std::left << state_->nfval;
554 hist << std::setw(10) << std::left << state_->ngrad;
555 hist << std::setw(10) << std::left << state_->nproj;
556 hist << std::setw(10) << std::left << SPiter_;
557 hist << std::setw(10) << std::left << SPflag_;
558 hist << std::endl;
559 }
560 os << hist.str();
561}
562
563} // namespace TypeB
564} // namespace ROL
565
566#endif
Objective_SerialSimOpt(const Ptr< Obj > &obj, const V &ui) z0_ zero()
Provides the interface to apply upper and lower bound constraints.
virtual bool isFeasible(const Vector< Real > &v)
Check if the vector, v, is feasible.
void pruneActive(Vector< Real > &v, const Vector< Real > &x, Real eps=Real(0))
Set variables to zero if they correspond to the -active set.
Provides the interface to evaluate objective functions.
virtual void gradient(Vector< Real > &g, const Vector< Real > &x, Real &tol)
Compute gradient.
virtual Real value(const Vector< Real > &x, Real &tol)=0
Compute value.
virtual void update(const Vector< Real > &x, UpdateType type, int iter=-1)
Update objective function.
Provides interface for and implements limited-memory secant operators.
Definition: ROL_Secant.hpp:79
virtual void applyB(Vector< Real > &Bv, const Vector< Real > &v) const =0
virtual void applyH(Vector< Real > &Hv, const Vector< Real > &v) const =0
Provides an interface to check status of optimization algorithms.
void initialize(const Vector< Real > &x, const Vector< Real > &g)
Real optimalityCriterion(const Vector< Real > &x, const Vector< Real > &g, Vector< Real > &primal, std::ostream &outStream=std::cout) const
virtual void writeExitStatus(std::ostream &os) const
void applyFreeHessian(Vector< Real > &hv, const Vector< Real > &v, const Vector< Real > &x, Secant< Real > &secant, BoundConstraint< Real > &bnd, Real &tol, Vector< Real > &pwa) const
void writeOutput(std::ostream &os, bool write_header=false) const override
Print iterate status.
void run(Vector< Real > &x, const Vector< Real > &g, Objective< Real > &obj, BoundConstraint< Real > &bnd, std::ostream &outStream=std::cout) override
Run algorithm on bound constrained problems (Type-B). This general interface supports the use of dual...
Real dcauchy(Vector< Real > &s, Real &alpha, Real &q, const Vector< Real > &x, const Vector< Real > &g, Secant< Real > &secant, Vector< Real > &dwa, Vector< Real > &dwa1, std::ostream &outStream=std::cout)
Real dpcg(Vector< Real > &w, int &iflag, int &iter, const Vector< Real > &g, const Vector< Real > &x, Secant< Real > &secant, BoundConstraint< Real > &bnd, const Real tol, const Real stol, const int itermax, Vector< Real > &p, Vector< Real > &q, Vector< Real > &r, Vector< Real > &t, Vector< Real > &pwa, Vector< Real > &dwa, std::ostream &outStream=std::cout) const
void applyFreePrecond(Vector< Real > &hv, const Vector< Real > &v, const Vector< Real > &x, Secant< Real > &secant, BoundConstraint< Real > &bnd, Real &tol, Vector< Real > &dwa, Vector< Real > &pwa) const
Real dgpstep(Vector< Real > &s, const Vector< Real > &w, const Vector< Real > &x, const Real alpha, std::ostream &outStream=std::cout) const
void writeHeader(std::ostream &os) const override
Print iterate header.
void initialize(Vector< Real > &x, const Vector< Real > &g, Objective< Real > &obj, BoundConstraint< Real > &bnd, std::ostream &outStream=std::cout)
LSecantBAlgorithm(ParameterList &list, const Ptr< Secant< Real > > &secant=nullPtr)
Real dsrch(Vector< Real > &x, Vector< Real > &s, Real &fnew, Real &beta, Real fold, Real gs, Objective< Real > &obj, Vector< Real > &pwa, std::ostream &outStream=std::cout)
void writeName(std::ostream &os) const override
Print step name.
Defines the linear algebra or vector space interface.
Definition: ROL_Vector.hpp:84
virtual Real apply(const Vector< Real > &x) const
Apply to a dual vector. This is equivalent to the call .
Definition: ROL_Vector.hpp:238
virtual Real norm() const =0
Returns where .
virtual void set(const Vector &x)
Set where .
Definition: ROL_Vector.hpp:209
virtual void scale(const Real alpha)=0
Compute where .
virtual const Vector & dual() const
Return dual representation of , for example, the result of applying a Riesz map, or change of basis,...
Definition: ROL_Vector.hpp:226
virtual void plus(const Vector &x)=0
Compute , where .
virtual void zero()
Set to zero vector.
Definition: ROL_Vector.hpp:167
virtual ROL::Ptr< Vector > clone() const =0
Clone to make a new (uninitialized) vector.
virtual void axpy(const Real alpha, const Vector &x)
Compute where .
Definition: ROL_Vector.hpp:153
virtual Real dot(const Vector &x) const =0
Compute where .
ESecant StringToESecant(std::string s)
Definition: ROL_Types.hpp:543
ESecantMode
Definition: ROL_Secant.hpp:57
@ SECANTMODE_BOTH
Definition: ROL_Secant.hpp:60