ROL
ROL_TypeE_CompositeStepAlgorithm_Def.hpp
Go to the documentation of this file.
1 // @HEADER
2 // ************************************************************************
3 //
4 // Rapid Optimization Library (ROL) Package
5 // Copyright (2014) Sandia Corporation
6 //
7 // Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive
8 // license for use of this work by or on behalf of the U.S. Government.
9 //
10 // Redistribution and use in source and binary forms, with or without
11 // modification, are permitted provided that the following conditions are
12 // met:
13 //
14 // 1. Redistributions of source code must retain the above copyright
15 // notice, this list of conditions and the following disclaimer.
16 //
17 // 2. Redistributions in binary form must reproduce the above copyright
18 // notice, this list of conditions and the following disclaimer in the
19 // documentation and/or other materials provided with the distribution.
20 //
21 // 3. Neither the name of the Corporation nor the names of the
22 // contributors may be used to endorse or promote products derived from
23 // this software without specific prior written permission.
24 //
25 // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
26 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
29 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
30 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
31 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
32 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 //
37 // Questions? Contact lead developers:
38 // Drew Kouri (dpkouri@sandia.gov) and
39 // Denis Ridzal (dridzal@sandia.gov)
40 //
41 // ************************************************************************
42 // @HEADER
43 
44 #ifndef ROL_TYPEE_COMPOSITESTEPALGORITHM_DEF_H
45 #define ROL_TYPEE_COMPOSITESTEPALGORITHM_DEF_H
46 
47 namespace ROL {
48 namespace TypeE {
49 
50 template<typename Real>
52  : TypeE::Algorithm<Real>::Algorithm(), list_(list) {
53  // Set status test
54  status_->reset();
56 
57  flagCG_ = 0;
58  flagAC_ = 0;
59  iterCG_ = 0;
60 
61  Real one(1), two(2), p8(0.8), zero(0), oem8(1.e-8);
62  ParameterList& cslist = list_.sublist("Step").sublist("Composite Step");
63 
64  //maxiterOSS_ = cslist.sublist("Optimality System Solver").get("Iteration Limit", 50);
65  tolOSS_ = cslist.sublist("Optimality System Solver").get("Nominal Relative Tolerance", 1e-8);
66  tolOSSfixed_ = cslist.sublist("Optimality System Solver").get("Fix Tolerance", true);
67  maxiterCG_ = cslist.sublist("Tangential Subproblem Solver").get("Iteration Limit", 20);
68  tolCG_ = cslist.sublist("Tangential Subproblem Solver").get("Relative Tolerance", 1e-2);
69  Delta_ = cslist.get("Initial Radius", 1e2);
70  useConHess_ = cslist.get("Use Constraint Hessian", true);
71  verbosity_ = list_.sublist("General").get("Output Level", 0);
72  printHeader_ = (verbosity_ > 2);
73 
74  lmhtol_ = tolOSS_;
75  qntol_ = tolOSS_;
76  pgtol_ = tolOSS_;
77  projtol_ = tolOSS_;
78  tangtol_ = tolOSS_;
79  tntmax_ = two;
80 
81  zeta_ = p8;
82  penalty_ = one;
83  eta_ = oem8;
84 
85  snorm_ = zero;
86  nnorm_ = zero;
87  tnorm_ = zero;
88 
89  infoALL_ = false;
90  if (verbosity_ > 1) {
91  infoALL_ = true;
92  }
93  infoQN_ = false;
94  infoLM_ = false;
95  infoTS_ = false;
96  infoAC_ = false;
97  infoLS_ = false;
100  infoTS_ = infoTS_ || infoALL_;
101  infoAC_ = infoAC_ || infoALL_;
102  infoLS_ = infoLS_ || infoALL_;
103 
104  totalIterCG_ = 0;
105  totalProj_ = 0;
106  totalNegCurv_ = 0;
107  totalRef_ = 0;
108  totalCallLS_ = 0;
109  totalIterLS_ = 0;
110 }
111 
112 
115 template<typename Real>
117  const Vector<Real> &x,
118  const Vector<Real> &l,
119  Objective<Real> &obj,
120  Constraint<Real> &con) {
121 
122  Real zerotol = std::sqrt(ROL_EPSILON<Real>());
123  Real f = 0.0;
124  ROL::Ptr<Vector<Real> > n = xvec_->clone();
125  ROL::Ptr<Vector<Real> > c = cvec_->clone();
126  ROL::Ptr<Vector<Real> > t = xvec_->clone();
127  ROL::Ptr<Vector<Real> > tCP = xvec_->clone();
128  ROL::Ptr<Vector<Real> > g = gvec_->clone();
129  ROL::Ptr<Vector<Real> > gf = gvec_->clone();
130  ROL::Ptr<Vector<Real> > Wg = xvec_->clone();
131  ROL::Ptr<Vector<Real> > ajl = gvec_->clone();
132 
133  Real f_new = 0.0;
134  ROL::Ptr<Vector<Real> > l_new = lvec_->clone();
135  ROL::Ptr<Vector<Real> > c_new = cvec_->clone();
136  ROL::Ptr<Vector<Real> > g_new = gvec_->clone();
137  ROL::Ptr<Vector<Real> > gf_new = gvec_->clone();
138 
139  // Evaluate objective ... should have been stored.
140  f = obj.value(x, zerotol);
141  state_->nfval++;
142  // Compute gradient of objective ... should have been stored.
143  obj.gradient(*gf, x, zerotol);
144  // Evaluate constraint ... should have been stored.
145  con.value(*c, x, zerotol);
146 
147  // Compute quasi-normal step.
148  computeQuasinormalStep(*n, *c, x, zeta_*Delta_, con);
149 
150  // Compute gradient of Lagrangian ... should have been stored.
151  con.applyAdjointJacobian(*ajl, l, x, zerotol);
152  g->set(*gf);
153  g->plus(*ajl);
154  state_->ngrad++;
155 
156  // Solve tangential subproblem.
157  solveTangentialSubproblem(*t, *tCP, *Wg, x, *g, *n, l, Delta_, obj, con);
158  totalIterCG_ += iterCG_;
159 
160  // Check acceptance of subproblem solutions, adjust merit function penalty parameter, ensure global convergence.
161  accept(s, *n, *t, f_new, *c_new, *gf_new, *l_new, *g_new, x, l, f, *gf, *c, *g, *tCP, *Wg, obj, con);
162 }
163 
164 
165 template<typename Real>
167  const Vector<Real> &x,
168  const Vector<Real> &gf,
169  Constraint<Real> &con) {
170 
171  Real one(1);
172  Real zerotol = std::sqrt(ROL_EPSILON<Real>());
173  std::vector<Real> augiters;
174 
175  if (infoLM_) {
176  std::stringstream hist;
177  hist << "\n Lagrange multiplier step\n";
178  std::cout << hist.str();
179  }
180 
181  /* Apply adjoint of constraint Jacobian to current multiplier. */
182  Ptr<Vector<Real> > ajl = gvec_->clone();
183  con.applyAdjointJacobian(*ajl, l, x, zerotol);
184 
185  /* Form right-hand side of the augmented system. */
186  Ptr<Vector<Real> > b1 = gvec_->clone();
187  Ptr<Vector<Real> > b2 = cvec_->clone();
188  // b1 is the negative gradient of the Lagrangian
189  b1->set(gf); b1->plus(*ajl); b1->scale(-one);
190  // b2 is zero
191  b2->zero();
192 
193  /* Declare left-hand side of augmented system. */
194  Ptr<Vector<Real> > v1 = xvec_->clone();
195  Ptr<Vector<Real> > v2 = lvec_->clone();
196 
197  /* Compute linear solver tolerance. */
198  Real b1norm = b1->norm();
199  Real tol = setTolOSS(lmhtol_*b1norm);
200 
201  /* Solve augmented system. */
202  augiters = con.solveAugmentedSystem(*v1, *v2, *b1, *b2, x, tol);
203  totalCallLS_++;
204  totalIterLS_ = totalIterLS_ + augiters.size();
205  printInfoLS(augiters);
206 
207  /* Return updated Lagrange multiplier. */
208  // v2 is the multiplier update
209  l.plus(*v2);
210 
211 } // computeLagrangeMultiplier
212 
213 
214 template<typename Real>
216  const Vector<Real> &c,
217  const Vector<Real> &x,
218  Real delta,
219  Constraint<Real> &con) {
220 
221  if (infoQN_) {
222  std::stringstream hist;
223  hist << "\n Quasi-normal step\n";
224  std::cout << hist.str();
225  }
226 
227  Real zero(0);
228  Real one(1);
229  Real zerotol = std::sqrt(ROL_EPSILON<Real>()); //zero;
230  std::vector<Real> augiters;
231 
232  /* Compute Cauchy step nCP. */
233  Ptr<Vector<Real> > nCP = xvec_->clone();
234  Ptr<Vector<Real> > nCPdual = gvec_->clone();
235  Ptr<Vector<Real> > nN = xvec_->clone();
236  Ptr<Vector<Real> > ctemp = cvec_->clone();
237  Ptr<Vector<Real> > dualc0 = lvec_->clone();
238  dualc0->set(c.dual());
239  con.applyAdjointJacobian(*nCPdual, *dualc0, x, zerotol);
240  nCP->set(nCPdual->dual());
241  con.applyJacobian(*ctemp, *nCP, x, zerotol);
242 
243  Real normsquare_ctemp = ctemp->dot(*ctemp);
244  if (normsquare_ctemp != zero) {
245  nCP->scale( -(nCP->dot(*nCP))/normsquare_ctemp );
246  }
247 
248  /* If the Cauchy step nCP is outside the trust region,
249  return the scaled Cauchy step. */
250  Real norm_nCP = nCP->norm();
251  if (norm_nCP >= delta) {
252  n.set(*nCP);
253  n.scale( delta/norm_nCP );
254  if (infoQN_) {
255  std::stringstream hist;
256  hist << " taking partial Cauchy step\n";
257  std::cout << hist.str();
258  }
259  return;
260  }
261 
262  /* Compute 'Newton' step, for example, by solving a problem
263  related to finding the minimum norm solution of min || c(x_k)*s + c ||^2. */
264  // Compute tolerance for linear solver.
265  con.applyJacobian(*ctemp, *nCP, x, zerotol);
266  ctemp->plus(c);
267  Real tol = setTolOSS(qntol_*ctemp->norm());
268  // Form right-hand side.
269  ctemp->scale(-one);
270  nCPdual->set(nCP->dual());
271  nCPdual->scale(-one);
272  // Declare left-hand side of augmented system.
273  Ptr<Vector<Real> > dn = xvec_->clone();
274  Ptr<Vector<Real> > y = lvec_->clone();
275  // Solve augmented system.
276  augiters = con.solveAugmentedSystem(*dn, *y, *nCPdual, *ctemp, x, tol);
277  totalCallLS_++;
278  totalIterLS_ = totalIterLS_ + augiters.size();
279  printInfoLS(augiters);
280 
281  nN->set(*dn);
282  nN->plus(*nCP);
283 
284  /* Either take full or partial Newton step, depending on
285  the trust-region constraint. */
286  Real norm_nN = nN->norm();
287  if (norm_nN <= delta) {
288  // Take full feasibility step.
289  n.set(*nN);
290  if (infoQN_) {
291  std::stringstream hist;
292  hist << " taking full Newton step\n";
293  std::cout << hist.str();
294  }
295  }
296  else {
297  // Take convex combination n = nCP+tau*(nN-nCP),
298  // so that ||n|| = delta. In other words, solve
299  // scalar quadratic equation: ||nCP+tau*(nN-nCP)||^2 = delta^2.
300  Real aa = dn->dot(*dn);
301  Real bb = dn->dot(*nCP);
302  Real cc = norm_nCP*norm_nCP - delta*delta;
303  Real tau = (-bb+sqrt(bb*bb-aa*cc))/aa;
304  n.set(*nCP);
305  n.axpy(tau, *dn);
306  if (infoQN_) {
307  std::stringstream hist;
308  hist << " taking dogleg step\n";
309  std::cout << hist.str();
310  }
311  }
312 
313 } // computeQuasinormalStep
314 
315 
316 template<typename Real>
318  Vector<Real> &tCP,
319  Vector<Real> &Wg,
320  const Vector<Real> &x,
321  const Vector<Real> &g,
322  const Vector<Real> &n,
323  const Vector<Real> &l,
324  Real delta,
325  Objective<Real> &obj,
326  Constraint<Real> &con) {
327 
328  /* Initialization of the CG step. */
329  bool orthocheck = true; // set to true if want to check orthogonality
330  // of Wr and r, otherwise set to false
331  Real tol_ortho = 0.5; // orthogonality measure; represets a bound on norm( \hat{S}, 2), where
332  // \hat{S} is defined in Heinkenschloss/Ridzal., "A Matrix-Free Trust-Region SQP Method"
333  Real S_max = 1.0; // another orthogonality measure; norm(S) needs to be bounded by
334  // a modest constant; norm(S) is small if the approximation of
335  // the null space projector is good
336  Real zero(0);
337  Real one(1);
338  Real zerotol = std::sqrt(ROL_EPSILON<Real>());
339  std::vector<Real> augiters;
340  iterCG_ = 1;
341  flagCG_ = 0;
342  t.zero();
343  tCP.zero();
344  Ptr<Vector<Real> > r = gvec_->clone();
345  Ptr<Vector<Real> > pdesc = xvec_->clone();
346  Ptr<Vector<Real> > tprev = xvec_->clone();
347  Ptr<Vector<Real> > Wr = xvec_->clone();
348  Ptr<Vector<Real> > Hp = gvec_->clone();
349  Ptr<Vector<Real> > xtemp = xvec_->clone();
350  Ptr<Vector<Real> > gtemp = gvec_->clone();
351  Ptr<Vector<Real> > ltemp = lvec_->clone();
352  Ptr<Vector<Real> > czero = cvec_->clone();
353  czero->zero();
354  r->set(g);
355  obj.hessVec(*gtemp, n, x, zerotol);
356  r->plus(*gtemp);
357  if (useConHess_) {
358  con.applyAdjointHessian(*gtemp, l, n, x, zerotol);
359  r->plus(*gtemp);
360  }
361  Real normg = r->norm();
362  Real normWg = zero;
363  Real pHp = zero;
364  Real rp = zero;
365  Real alpha = zero;
366  Real normp = zero;
367  Real normr = zero;
368  Real normt = zero;
369  std::vector<Real> normWr(maxiterCG_+1, zero);
370 
371  std::vector<Ptr<Vector<Real > > > p; // stores search directions
372  std::vector<Ptr<Vector<Real > > > Hps; // stores duals of hessvec's applied to p's
373  std::vector<Ptr<Vector<Real > > > rs; // stores duals of residuals
374  std::vector<Ptr<Vector<Real > > > Wrs; // stores duals of projected residuals
375 
376  Real rptol(1e-12);
377 
378  if (infoTS_) {
379  std::stringstream hist;
380  hist << "\n Tangential subproblem\n";
381  hist << std::setw(6) << std::right << "iter" << std::setw(18) << "||Wr||/||Wr0||" << std::setw(15) << "||s||";
382  hist << std::setw(15) << "delta" << std::setw(15) << "||c'(x)s||" << "\n";
383  std::cout << hist.str();
384  }
385 
386  if (normg == 0) {
387  if (infoTS_) {
388  std::stringstream hist;
389  hist << " >>> Tangential subproblem: Initial gradient is zero! \n";
390  std::cout << hist.str();
391  }
392  iterCG_ = 0; Wg.zero(); flagCG_ = 0;
393  return;
394  }
395 
396  /* Start CG loop. */
397  while (iterCG_ < maxiterCG_) {
398 
399  // Store tangential Cauchy point (which is the current iterate in the second iteration).
400  if (iterCG_ == 2) {
401  tCP.set(t);
402  }
403 
404  // Compute (inexact) projection W*r.
405  if (iterCG_ == 1) {
406  // Solve augmented system.
407  Real tol = setTolOSS(pgtol_);
408  augiters = con.solveAugmentedSystem(*Wr, *ltemp, *r, *czero, x, tol);
409  totalCallLS_++;
410  totalIterLS_ = totalIterLS_ + augiters.size();
411  printInfoLS(augiters);
412 
413  Wg.set(*Wr);
414  normWg = Wg.norm();
415  if (orthocheck) {
416  Wrs.push_back(xvec_->clone());
417  (Wrs[iterCG_-1])->set(*Wr);
418  }
419  // Check if done (small initial projected residual).
420  if (normWg == zero) {
421  flagCG_ = 0;
422  iterCG_--;
423  if (infoTS_) {
424  std::stringstream hist;
425  hist << " Initial projected residual is close to zero! \n";
426  std::cout << hist.str();
427  }
428  return;
429  }
430  // Set first residual to projected gradient.
431  // change r->set(Wg);
432  r->set(Wg.dual());
433  if (orthocheck) {
434  rs.push_back(xvec_->clone());
435  // change (rs[0])->set(*r);
436  (rs[0])->set(r->dual());
437  }
438  }
439  else {
440  // Solve augmented system.
441  Real tol = setTolOSS(projtol_);
442  augiters = con.solveAugmentedSystem(*Wr, *ltemp, *r, *czero, x, tol);
443  totalCallLS_++;
444  totalIterLS_ = totalIterLS_ + augiters.size();
445  printInfoLS(augiters);
446 
447  if (orthocheck) {
448  Wrs.push_back(xvec_->clone());
449  (Wrs[iterCG_-1])->set(*Wr);
450  }
451  }
452 
453  normWr[iterCG_-1] = Wr->norm();
454 
455  if (infoTS_) {
456  Ptr<Vector<Real> > ct = cvec_->clone();
457  con.applyJacobian(*ct, t, x, zerotol);
458  Real linc = ct->norm();
459  std::stringstream hist;
460  hist << std::scientific << std::setprecision(6);
461  hist << std::setw(6) << std::right << iterCG_-1 << std::setw(18) << normWr[iterCG_-1]/normWg << std::setw(15) << t.norm();
462  hist << std::setw(15) << delta << std::setw(15) << linc << "\n";
463  std::cout << hist.str();
464  }
465 
466  // Check if done (small relative residual).
467  if (normWr[iterCG_-1]/normWg < tolCG_) {
468  flagCG_ = 0;
469  iterCG_ = iterCG_-1;
470  if (infoTS_) {
471  std::stringstream hist;
472  hist << " || W(g + H*(n+s)) || <= cgtol*|| W(g + H*n)|| \n";
473  std::cout << hist.str();
474  }
475  return;
476  }
477 
478  // Check nonorthogonality, one-norm of (WR*R/diag^2 - I)
479  if (orthocheck) {
480  LA::Matrix<Real> Wrr(iterCG_,iterCG_); // holds matrix Wrs'*rs
481  LA::Matrix<Real> T(iterCG_,iterCG_); // holds matrix T=(1/diag)*Wrs'*rs*(1/diag)
482  LA::Matrix<Real> Tm1(iterCG_,iterCG_); // holds matrix Tm1=T-I
483  for (int i=0; i<iterCG_; i++) {
484  for (int j=0; j<iterCG_; j++) {
485  Wrr(i,j) = (Wrs[i])->dot(*rs[j]);
486  T(i,j) = Wrr(i,j)/(normWr[i]*normWr[j]);
487  Tm1(i,j) = T(i,j);
488  if (i==j) {
489  Tm1(i,j) = Tm1(i,j) - one;
490  }
491  }
492  }
493  if (Tm1.normOne() >= tol_ortho) {
494  LAPACK<int,Real> lapack;
495  std::vector<int> ipiv(iterCG_);
496  int info;
497  std::vector<Real> work(3*iterCG_);
498  // compute inverse of T
499  lapack.GETRF(iterCG_, iterCG_, T.values(), T.stride(), &ipiv[0], &info);
500  lapack.GETRI(iterCG_, T.values(), T.stride(), &ipiv[0], &work[0], 3*iterCG_, &info);
501  Tm1 = T;
502  for (int i=0; i<iterCG_; i++) {
503  Tm1(i,i) = Tm1(i,i) - one;
504  }
505  if (Tm1.normOne() > S_max) {
506  flagCG_ = 4;
507  if (infoTS_) {
508  std::stringstream hist;
509  hist << " large nonorthogonality in W(R)'*R detected \n";
510  std::cout << hist.str();
511  }
512  return;
513  }
514  }
515  }
516 
517  // Full orthogonalization.
518  p.push_back(xvec_->clone());
519  (p[iterCG_-1])->set(*Wr);
520  (p[iterCG_-1])->scale(-one);
521  for (int j=1; j<iterCG_; j++) {
522  Real scal = (p[iterCG_-1])->dot(*(Hps[j-1])) / (p[j-1])->dot(*(Hps[j-1]));
523  Ptr<Vector<Real> > pj = xvec_->clone();
524  pj->set(*p[j-1]);
525  pj->scale(-scal);
526  (p[iterCG_-1])->plus(*pj);
527  }
528 
529  // change Hps.push_back(gvec_->clone());
530  Hps.push_back(xvec_->clone());
531  // change obj.hessVec(*(Hps[iterCG_-1]), *(p[iterCG_-1]), x, zerotol);
532  obj.hessVec(*Hp, *(p[iterCG_-1]), x, zerotol);
533  if (useConHess_) {
534  con.applyAdjointHessian(*gtemp, l, *(p[iterCG_-1]), x, zerotol);
535  // change (Hps[iterCG_-1])->plus(*gtemp);
536  Hp->plus(*gtemp);
537  }
538  // "Preconditioning" step.
539  (Hps[iterCG_-1])->set(Hp->dual());
540 
541  pHp = (p[iterCG_-1])->dot(*(Hps[iterCG_-1]));
542  // change rp = (p[iterCG_-1])->dot(*r);
543  rp = (p[iterCG_-1])->dot(*(rs[iterCG_-1]));
544 
545  normp = (p[iterCG_-1])->norm();
546  normr = r->norm();
547 
548  // Negative curvature stopping condition.
549  if (pHp <= 0) {
550  pdesc->set(*(p[iterCG_-1])); // p is the descent direction
551  if ((std::abs(rp) >= rptol*normp*normr) && (sgn(rp) == 1)) {
552  pdesc->scale(-one); // -p is the descent direction
553  }
554  flagCG_ = 2;
555  Real a = pdesc->dot(*pdesc);
556  Real b = pdesc->dot(t);
557  Real c = t.dot(t) - delta*delta;
558  // Positive root of a*theta^2 + 2*b*theta + c = 0.
559  Real theta = (-b + std::sqrt(b*b - a*c)) / a;
560  xtemp->set(*(p[iterCG_-1]));
561  xtemp->scale(theta);
562  t.plus(*xtemp);
563  // Store as tangential Cauchy point if terminating in first iteration.
564  if (iterCG_ == 1) {
565  tCP.set(t);
566  }
567  if (infoTS_) {
568  std::stringstream hist;
569  hist << " negative curvature detected \n";
570  std::cout << hist.str();
571  }
572  return;
573  }
574 
575  // Want to enforce nonzero alpha's.
576  if (std::abs(rp) < rptol*normp*normr) {
577  flagCG_ = 5;
578  if (infoTS_) {
579  std::stringstream hist;
580  hist << " Zero alpha due to inexactness. \n";
581  std::cout << hist.str();
582  }
583  return;
584  }
585 
586  alpha = - rp/pHp;
587 
588  // Iterate update.
589  tprev->set(t);
590  xtemp->set(*(p[iterCG_-1]));
591  xtemp->scale(alpha);
592  t.plus(*xtemp);
593 
594  // Trust-region stopping condition.
595  normt = t.norm();
596  if (normt >= delta) {
597  pdesc->set(*(p[iterCG_-1])); // p is the descent direction
598  if (sgn(rp) == 1) {
599  pdesc->scale(-one); // -p is the descent direction
600  }
601  Real a = pdesc->dot(*pdesc);
602  Real b = pdesc->dot(*tprev);
603  Real c = tprev->dot(*tprev) - delta*delta;
604  // Positive root of a*theta^2 + 2*b*theta + c = 0.
605  Real theta = (-b + std::sqrt(b*b - a*c)) / a;
606  xtemp->set(*(p[iterCG_-1]));
607  xtemp->scale(theta);
608  t.set(*tprev);
609  t.plus(*xtemp);
610  // Store as tangential Cauchy point if terminating in first iteration.
611  if (iterCG_ == 1) {
612  tCP.set(t);
613  }
614  flagCG_ = 3;
615  if (infoTS_) {
616  std::stringstream hist;
617  hist << " trust-region condition active \n";
618  std::cout << hist.str();
619  }
620  return;
621  }
622 
623  // Residual update.
624  xtemp->set(*(Hps[iterCG_-1]));
625  xtemp->scale(alpha);
626  // change r->plus(*gtemp);
627  r->plus(xtemp->dual());
628  if (orthocheck) {
629  // change rs.push_back(gvec_->clone());
630  rs.push_back(xvec_->clone());
631  // change (rs[iterCG_])->set(*r);
632  (rs[iterCG_])->set(r->dual());
633  }
634 
635  iterCG_++;
636 
637  } // while (iterCG_ < maxiterCG_)
638 
639  flagCG_ = 1;
640  if (infoTS_) {
641  std::stringstream hist;
642  hist << " maximum number of iterations reached \n";
643  std::cout << hist.str();
644  }
645 
646 } // solveTangentialSubproblem
647 
648 
649 template<typename Real>
651  Vector<Real> &gf_new, Vector<Real> &l_new, Vector<Real> &g_new,
652  const Vector<Real> &x, const Vector<Real> &l, Real f, const Vector<Real> &gf, const Vector<Real> &c,
653  const Vector<Real> &g, Vector<Real> &tCP, Vector<Real> &Wg,
654  Objective<Real> &obj, Constraint<Real> &con) {
655 
656  Real beta = 1e-8; // predicted reduction parameter
657  Real tol_red_tang = 1e-3; // internal reduction factor for tangtol
658  Real tol_red_all = 1e-1; // internal reduction factor for qntol, lmhtol, pgtol, projtol, tangtol
659  //bool glob_refine = true; // true - if subsolver tolerances are adjusted in this routine, keep adjusted values globally
660  // false - if subsolver tolerances are adjusted in this routine, discard adjusted values
661  Real tol_fdiff = 1e-12; // relative objective function difference for ared computation
662  int ct_max = 10; // maximum number of globalization tries
663  Real mintol = 1e-16; // smallest projection tolerance value
664 
665  // Determines max value of |rpred|/pred.
666  Real rpred_over_pred = 0.5*(1-eta_);
667 
668  if (infoAC_) {
669  std::stringstream hist;
670  hist << "\n Composite step acceptance\n";
671  std::cout << hist.str();
672  }
673 
674  Real zero = 0.0;
675  Real one = 1.0;
676  Real two = 2.0;
677  Real half = one/two;
678  Real zerotol = std::sqrt(ROL_EPSILON<Real>());
679  std::vector<Real> augiters;
680 
681  Real pred = zero;
682  Real ared = zero;
683  Real rpred = zero;
684  Real part_pred = zero;
685  Real linc_preproj = zero;
686  Real linc_postproj = zero;
687  Real tangtol_start = zero;
688  Real tangtol = tangtol_;
689  //Real projtol = projtol_;
690  bool flag = false;
691  int num_proj = 0;
692  bool try_tCP = false;
693  Real fdiff = zero;
694 
695  Ptr<Vector<Real> > xtrial = xvec_->clone();
696  Ptr<Vector<Real> > Jl = gvec_->clone();
697  Ptr<Vector<Real> > gfJl = gvec_->clone();
698  Ptr<Vector<Real> > Jnc = cvec_->clone();
699  Ptr<Vector<Real> > t_orig = xvec_->clone();
700  Ptr<Vector<Real> > t_dual = gvec_->clone();
701  Ptr<Vector<Real> > Jt_orig = cvec_->clone();
702  Ptr<Vector<Real> > t_m_tCP = xvec_->clone();
703  Ptr<Vector<Real> > ltemp = lvec_->clone();
704  Ptr<Vector<Real> > xtemp = xvec_->clone();
705  Ptr<Vector<Real> > rt = cvec_->clone();
706  Ptr<Vector<Real> > Hn = gvec_->clone();
707  Ptr<Vector<Real> > Hto = gvec_->clone();
708  Ptr<Vector<Real> > cxxvec = gvec_->clone();
709  Ptr<Vector<Real> > czero = cvec_->clone();
710  czero->zero();
711  Real Jnc_normsquared = zero;
712  Real c_normsquared = zero;
713 
714  // Compute and store some quantities for later use. Necessary
715  // because of the function and constraint updates below.
716  con.applyAdjointJacobian(*Jl, l, x, zerotol);
717  con.applyJacobian(*Jnc, n, x, zerotol);
718  Jnc->plus(c);
719  Jnc_normsquared = Jnc->dot(*Jnc);
720  c_normsquared = c.dot(c);
721 
722  for (int ct=0; ct<ct_max; ct++) {
723 
724  try_tCP = true;
725  t_m_tCP->set(t);
726  t_m_tCP->scale(-one);
727  t_m_tCP->plus(tCP);
728  if (t_m_tCP->norm() == zero) {
729  try_tCP = false;
730  }
731 
732  t_orig->set(t);
733  con.applyJacobian(*Jt_orig, *t_orig, x, zerotol);
734  linc_preproj = Jt_orig->norm();
735  pred = one;
736  rpred = two*rpred_over_pred*pred;
737  flag = false;
738  num_proj = 1;
739  tangtol_start = tangtol;
740 
741  while (std::abs(rpred)/pred > rpred_over_pred) {
742  // Compute projected tangential step.
743  if (flag) {
744  tangtol = tol_red_tang*tangtol;
745  num_proj++;
746  if (tangtol < mintol) {
747  if (infoAC_) {
748  std::stringstream hist;
749  hist << "\n The projection of the tangential step cannot be done with sufficient precision.\n";
750  hist << " Is the quasi-normal step very small? Continuing with no global convergence guarantees.\n";
751  std::cout << hist.str();
752  }
753  break;
754  }
755  }
756  // Solve augmented system.
757  Real tol = setTolOSS(tangtol);
758  // change augiters = con.solveAugmentedSystem(t, *ltemp, *t_orig, *czero, x, tol);
759  t_dual->set(t_orig->dual());
760  augiters = con.solveAugmentedSystem(t, *ltemp, *t_dual, *czero, x, tol);
761  totalCallLS_++;
762  totalIterLS_ = totalIterLS_ + augiters.size();
763  printInfoLS(augiters);
764  totalProj_++;
765  con.applyJacobian(*rt, t, x, zerotol);
766  linc_postproj = rt->norm();
767 
768  // Compute composite step.
769  s.set(t);
770  s.plus(n);
771 
772  // Compute some quantities before updating the objective and the constraint.
773  obj.hessVec(*Hn, n, x, zerotol);
774  if (useConHess_) {
775  con.applyAdjointHessian(*cxxvec, l, n, x, zerotol);
776  Hn->plus(*cxxvec);
777  }
778  obj.hessVec(*Hto, *t_orig, x, zerotol);
779  if (useConHess_) {
780  con.applyAdjointHessian(*cxxvec, l, *t_orig, x, zerotol);
781  Hto->plus(*cxxvec);
782  }
783 
784  // Compute objective, constraint, etc. values at the trial point.
785  xtrial->set(x);
786  xtrial->plus(s);
787  obj.update(*xtrial,UpdateType::Trial,state_->iter);
788  con.update(*xtrial,UpdateType::Trial,state_->iter);
789  f_new = obj.value(*xtrial, zerotol);
790  obj.gradient(gf_new, *xtrial, zerotol);
791  con.value(c_new, *xtrial, zerotol);
792  l_new.set(l);
793  computeLagrangeMultiplier(l_new, *xtrial, gf_new, con);
794 
795  // Penalty parameter update.
796  part_pred = - Wg.dot(*t_orig);
797  gfJl->set(gf);
798  gfJl->plus(*Jl);
799  // change part_pred -= gfJl->dot(n);
800  //part_pred -= n.dot(gfJl->dual());
801  part_pred -= n.apply(*gfJl);
802  // change part_pred -= half*Hn->dot(n);
803  //part_pred -= half*n.dot(Hn->dual());
804  part_pred -= half*n.apply(*Hn);
805  // change part_pred -= half*Hto->dot(*t_orig);
806  //part_pred -= half*t_orig->dot(Hto->dual());
807  part_pred -= half*t_orig->apply(*Hto);
808  ltemp->set(l_new);
809  ltemp->axpy(-one, l);
810  // change part_pred -= Jnc->dot(*ltemp);
811  //part_pred -= Jnc->dot(ltemp->dual());
812  part_pred -= Jnc->apply(*ltemp);
813 
814  if ( part_pred < -half*penalty_*(c_normsquared-Jnc_normsquared) ) {
815  penalty_ = ( -two * part_pred / (c_normsquared-Jnc_normsquared) ) + beta;
816  }
817 
818  pred = part_pred + penalty_*(c_normsquared-Jnc_normsquared);
819 
820  // Computation of rpred.
821  // change rpred = - ltemp->dot(*rt) - penalty_ * rt->dot(*rt) - two * penalty_ * rt->dot(*Jnc);
822  //rpred = - rt->dot(ltemp->dual()) - penalty_ * rt->dot(*rt) - two * penalty_ * rt->dot(*Jnc);
823  rpred = - rt->apply(*ltemp) - penalty_ * rt->dot(*rt) - two * penalty_ * rt->dot(*Jnc);
824  // change Ptr<Vector<Real> > lrt = lvec_->clone();
825  //lrt->set(*rt);
826  //rpred = - ltemp->dot(*rt) - penalty_ * std::pow(rt->norm(), 2) - two * penalty_ * lrt->dot(*Jnc);
827  flag = 1;
828 
829  } // while (std::abs(rpred)/pred > rpred_over_pred)
830 
831  tangtol = tangtol_start;
832 
833  // Check if the solution of the tangential subproblem is
834  // disproportionally large compared to total trial step.
835  xtemp->set(n);
836  xtemp->plus(t);
837  if ( t_orig->norm()/xtemp->norm() < tntmax_ ) {
838  break;
839  }
840  else {
841  t_m_tCP->set(*t_orig);
842  t_m_tCP->scale(-one);
843  t_m_tCP->plus(tCP);
844  if ((t_m_tCP->norm() > 0) && try_tCP) {
845  if (infoAC_) {
846  std::stringstream hist;
847  hist << " ---> now trying tangential Cauchy point\n";
848  std::cout << hist.str();
849  }
850  t.set(tCP);
851  }
852  else {
853  if (infoAC_) {
854  std::stringstream hist;
855  hist << " ---> recomputing quasi-normal step and re-solving tangential subproblem\n";
856  std::cout << hist.str();
857  }
858  totalRef_++;
859  // Reset global quantities.
860  obj.update(x, UpdateType::Trial, state_->iter);
861  con.update(x, UpdateType::Trial, state_->iter);
862  /*lmhtol = tol_red_all*lmhtol;
863  qntol = tol_red_all*qntol;
864  pgtol = tol_red_all*pgtol;
865  projtol = tol_red_all*projtol;
866  tangtol = tol_red_all*tangtol;
867  if (glob_refine) {
868  lmhtol_ = lmhtol;
869  qntol_ = qntol;
870  pgtol_ = pgtol;
871  projtol_ = projtol;
872  tangtol_ = tangtol;
873  }*/
874  if (!tolOSSfixed_) {
875  lmhtol_ *= tol_red_all;
876  qntol_ *= tol_red_all;
877  pgtol_ *= tol_red_all;
878  projtol_ *= tol_red_all;
879  tangtol_ *= tol_red_all;
880  }
881  // Recompute the quasi-normal step.
882  computeQuasinormalStep(n, c, x, zeta_*Delta_, con);
883  // Solve tangential subproblem.
884  solveTangentialSubproblem(t, tCP, Wg, x, g, n, l, Delta_, obj, con);
885  totalIterCG_ += iterCG_;
886  if (flagCG_ == 1) {
887  totalNegCurv_++;
888  }
889  }
890  } // else w.r.t. ( t_orig->norm()/xtemp->norm() < tntmax )
891 
892  } // for (int ct=0; ct<ct_max; ct++)
893 
894  // Compute actual reduction;
895  fdiff = f - f_new;
896  // Heuristic 1: If fdiff is very small compared to f, set it to 0,
897  // in order to prevent machine precision issues.
898  Real em24(1e-24);
899  Real em14(1e-14);
900  if (std::abs(fdiff / (f+em24)) < tol_fdiff) {
901  fdiff = em14;
902  }
903  // change ared = fdiff + (l.dot(c) - l_new.dot(c_new)) + penalty_*(c.dot(c) - c_new.dot(c_new));
904  // change ared = fdiff + (l.dot(c) - l_new.dot(c_new)) + penalty_*(std::pow(c.norm(),2) - std::pow(c_new.norm(),2));
905  //ared = fdiff + (c.dot(l.dual()) - c_new.dot(l_new.dual())) + penalty_*(c.dot(c) - c_new.dot(c_new));
906  ared = fdiff + (c.apply(l) - c_new.apply(l_new)) + penalty_*(c.dot(c) - c_new.dot(c_new));
907 
908  // Store actual and predicted reduction.
909  ared_ = ared;
910  pred_ = pred;
911 
912  // Store step and vector norms.
913  snorm_ = s.norm();
914  nnorm_ = n.norm();
915  tnorm_ = t.norm();
916 
917  // Print diagnostics.
918  if (infoAC_) {
919  std::stringstream hist;
920  hist << "\n Trial step info ...\n";
921  hist << " n_norm = " << nnorm_ << "\n";
922  hist << " t_norm = " << tnorm_ << "\n";
923  hist << " s_norm = " << snorm_ << "\n";
924  hist << " xtrial_norm = " << xtrial->norm() << "\n";
925  hist << " f_old = " << f << "\n";
926  hist << " f_trial = " << f_new << "\n";
927  hist << " f_old-f_trial = " << f-f_new << "\n";
928  hist << " ||c_old|| = " << c.norm() << "\n";
929  hist << " ||c_trial|| = " << c_new.norm() << "\n";
930  hist << " ||Jac*t_preproj|| = " << linc_preproj << "\n";
931  hist << " ||Jac*t_postproj|| = " << linc_postproj << "\n";
932  hist << " ||t_tilde||/||t|| = " << t_orig->norm() / t.norm() << "\n";
933  hist << " ||t_tilde||/||n+t|| = " << t_orig->norm() / snorm_ << "\n";
934  hist << " # projections = " << num_proj << "\n";
935  hist << " penalty param = " << penalty_ << "\n";
936  hist << " ared = " << ared_ << "\n";
937  hist << " pred = " << pred_ << "\n";
938  hist << " ared/pred = " << ared_/pred_ << "\n";
939  std::cout << hist.str();
940  }
941 
942 } // accept
943 
944 template<typename Real>
946  Vector<Real> &l,
947  const Vector<Real> &s,
948  Objective<Real> &obj,
949  Constraint<Real> &con) {
950  Real zero(0);
951  Real one(1);
952  Real two(2);
953  Real seven(7);
954  Real half(0.5);
955  Real zp9(0.9);
956  Real zp8(0.8);
957  Real em12(1e-12);
958  Real zerotol = std::sqrt(ROL_EPSILON<Real>()); //zero;
959  Real ratio(zero);
960 
961  Ptr<Vector<Real> > g = gvec_->clone();
962  Ptr<Vector<Real> > ajl = gvec_->clone();
963  Ptr<Vector<Real> > gl = gvec_->clone();
964  Ptr<Vector<Real> > c = cvec_->clone();
965 
966  // Determine if the step gives sufficient reduction in the merit function,
967  // update the trust-region radius.
968  ratio = ared_/pred_;
969  if ((std::abs(ared_) < em12) && std::abs(pred_) < em12) {
970  ratio = one;
971  }
972  if (ratio >= eta_) {
973  x.plus(s);
974  if (ratio >= zp9) {
975  Delta_ = std::max(seven*snorm_, Delta_);
976  }
977  else if (ratio >= zp8) {
978  Delta_ = std::max(two*snorm_, Delta_);
979  }
980  obj.update(x,UpdateType::Accept,state_->iter);
981  con.update(x,UpdateType::Accept,state_->iter);
982  flagAC_ = 1;
983  }
984  else {
985  Delta_ = half*std::max(nnorm_, tnorm_);
986  obj.update(x,UpdateType::Revert,state_->iter);
987  con.update(x,UpdateType::Revert,state_->iter);
988  flagAC_ = 0;
989  } // if (ratio >= eta)
990 
991  Real val = obj.value(x, zerotol);
992  state_->nfval++;
993  obj.gradient(*g, x, zerotol);
994  computeLagrangeMultiplier(l, x, *g, con);
995  con.applyAdjointJacobian(*ajl, l, x, zerotol);
996  gl->set(*g); gl->plus(*ajl);
997  state_->ngrad++;
998  con.value(*c, x, zerotol);
999 
1000  state_->gradientVec->set(*gl);
1001  state_->constraintVec->set(*c);
1002 
1003  state_->value = val;
1004  state_->gnorm = gl->norm();
1005  state_->cnorm = c->norm();
1006  state_->iter++;
1007  state_->snorm = snorm_;
1008 
1009  // Update algorithm state
1010  //(state_->iterateVec)->set(x);
1011 }
1012 
1013 
1014 template<typename Real>
1016  const Vector<Real> &g,
1017  Vector<Real> &l,
1018  const Vector<Real> &c,
1019  Objective<Real> &obj,
1020  Constraint<Real> &con,
1021  std::ostream &outStream ) {
1022  Real zerotol = std::sqrt(ROL_EPSILON<Real>());
1024 
1025  // Initialize the algorithm state.
1026  state_->nfval = 0;
1027  state_->ncval = 0;
1028  state_->ngrad = 0;
1029 
1030  xvec_ = x.clone();
1031  gvec_ = g.clone();
1032  lvec_ = l.clone();
1033  cvec_ = c.clone();
1034 
1035  Ptr<Vector<Real> > ajl = gvec_->clone();
1036  Ptr<Vector<Real> > gl = gvec_->clone();
1037 
1038  // Update objective and constraint.
1039  obj.update(x,UpdateType::Initial,state_->iter);
1040  state_->value = obj.value(x, zerotol);
1041  state_->nfval++;
1042  con.update(x,UpdateType::Initial,state_->iter);
1043  con.value(*cvec_, x, zerotol);
1044  state_->cnorm = cvec_->norm();
1045  state_->ncval++;
1046  obj.gradient(*gvec_, x, zerotol);
1047 
1048  // Compute gradient of Lagrangian at new multiplier guess.
1049  computeLagrangeMultiplier(l, x, *gvec_, con);
1050  con.applyAdjointJacobian(*ajl, l, x, zerotol);
1051  gl->set(*gvec_); gl->plus(*ajl);
1052  state_->ngrad++;
1053  state_->gnorm = gl->norm();
1054 }
1055 
1056 
1057 template<typename Real>
1059  const Vector<Real> &g,
1060  Objective<Real> &obj,
1061  Constraint<Real> &econ,
1062  Vector<Real> &emul,
1063  const Vector<Real> &eres,
1064  std::ostream &outStream ) {
1065 
1066  initialize(x,g,emul,eres,obj,econ,outStream);
1067 
1068  // Output.
1069  if (verbosity_ > 0) writeOutput(outStream,true);
1070 
1071  // Step vector.
1072  Ptr<Vector<Real> > s = x.clone();
1073 
1074  while (status_->check(*state_)) {
1075  computeTrial(*s, x, emul, obj, econ);
1076  updateRadius(x, emul, *s, obj, econ);
1077 
1078  // Update output.
1079  if (verbosity_ > 0) writeOutput(outStream,printHeader_);
1080  }
1081 
1082  if (verbosity_ > 0) TypeE::Algorithm<Real>::writeExitStatus(outStream);
1083 }
1084 
1085 
1086 template<typename Real>
1087 void CompositeStepAlgorithm<Real>::writeHeader(std::ostream& os) const {
1088  std::stringstream hist;
1089  if (verbosity_>1) {
1090  hist << std::string(144,'-') << std::endl;
1091  hist << "Composite Step status output definitions" << std::endl << std::endl;
1092  hist << " iter - Number of iterates (steps taken)" << std::endl;
1093  hist << " fval - Objective function value" << std::endl;
1094  hist << " cnorm - Norm of the constraint violation" << std::endl;
1095  hist << " gLnorm - Norm of the gradient of the Lagrangian" << std::endl;
1096  hist << " snorm - Norm of the step" << std::endl;
1097  hist << " delta - Trust-region radius" << std::endl;
1098  hist << " nnorm - Norm of the quasinormal step" << std::endl;
1099  hist << " tnorm - Norm of the tangential step" << std::endl;
1100  hist << " #fval - Number of times the objective was computed" << std::endl;
1101  hist << " #grad - Number of times the gradient was computed" << std::endl;
1102  hist << " iterCG - Number of projected CG iterations" << std::endl;
1103  hist << " flagCG - Flag returned by projected CG" << std::endl;
1104  hist << " accept - Acceptance flag for the trial step" << std::endl;
1105  hist << " linsys - Number of augmented solver calls/iterations" << std::endl;
1106  hist << std::string(144,'-') << std::endl;
1107  }
1108  hist << " ";
1109  hist << std::setw(6) << std::left << "iter";
1110  hist << std::setw(15) << std::left << "fval";
1111  hist << std::setw(15) << std::left << "cnorm";
1112  hist << std::setw(15) << std::left << "gLnorm";
1113  hist << std::setw(15) << std::left << "snorm";
1114  hist << std::setw(10) << std::left << "delta";
1115  hist << std::setw(10) << std::left << "nnorm";
1116  hist << std::setw(10) << std::left << "tnorm";
1117  hist << std::setw(8) << std::left << "#fval";
1118  hist << std::setw(8) << std::left << "#grad";
1119  hist << std::setw(8) << std::left << "iterCG";
1120  hist << std::setw(8) << std::left << "flagCG";
1121  hist << std::setw(8) << std::left << "accept";
1122  hist << std::setw(8) << std::left << "linsys";
1123  hist << std::endl;
1124  os << hist.str();
1125 }
1126 
1127 
1128 template<typename Real>
1129 void CompositeStepAlgorithm<Real>::writeName(std::ostream& os) const {
1130  std::stringstream hist;
1131  hist << std::endl << "Composite-Step Trust-Region Solver (Type E, Equality Constraints)";
1132  hist << std::endl;
1133  os << hist.str();
1134 }
1135 
1136 
1137 template<typename Real>
1138 void CompositeStepAlgorithm<Real>::writeOutput(std::ostream& os, const bool print_header) const {
1139  std::stringstream hist;
1140  hist << std::scientific << std::setprecision(6);
1141  if (state_->iter == 0) writeName(os);
1142  if (print_header) writeHeader(os);
1143  if (state_->iter == 0 ) {
1144  hist << " ";
1145  hist << std::setw(6) << std::left << state_->iter;
1146  hist << std::setw(15) << std::left << state_->value;
1147  hist << std::setw(15) << std::left << state_->cnorm;
1148  hist << std::setw(15) << std::left << state_->gnorm;
1149  hist << std::setw(15) << std::left << "---";
1150  hist << std::setw(10) << std::left << "---";
1151  hist << std::setw(10) << std::left << "---";
1152  hist << std::setw(10) << std::left << "---";
1153  hist << std::setw(8) << std::left << "---";
1154  hist << std::setw(8) << std::left << "---";
1155  hist << std::setw(8) << std::left << "---";
1156  hist << std::setw(8) << std::left << "---";
1157  hist << std::setw(8) << std::left << "---";
1158  hist << std::setw(8) << std::left << "---";
1159  hist << std::endl;
1160  }
1161  else {
1162  hist << " ";
1163  hist << std::setw(6) << std::left << state_->iter;
1164  hist << std::setw(15) << std::left << state_->value;
1165  hist << std::setw(15) << std::left << state_->cnorm;
1166  hist << std::setw(15) << std::left << state_->gnorm;
1167  hist << std::setw(15) << std::left << state_->snorm;
1168  hist << std::scientific << std::setprecision(2);
1169  hist << std::setw(10) << std::left << Delta_;
1170  hist << std::setw(10) << std::left << nnorm_;
1171  hist << std::setw(10) << std::left << tnorm_;
1172  hist << std::scientific << std::setprecision(6);
1173  hist << std::setw(8) << std::left << state_->nfval;
1174  hist << std::setw(8) << std::left << state_->ngrad;
1175  hist << std::setw(8) << std::left << iterCG_;
1176  hist << std::setw(8) << std::left << flagCG_;
1177  hist << std::setw(8) << std::left << flagAC_;
1178  hist << std::left << totalCallLS_ << "/" << totalIterLS_;
1179  hist << std::endl;
1180  }
1181  os << hist.str();
1182 }
1183 
1184 
1185 template<typename Real>
1186 template<typename T>
1188  return (T(0) < val) - (val < T(0));
1189 }
1190 
1191 
1192 template<typename Real>
1193 void CompositeStepAlgorithm<Real>::printInfoLS(const std::vector<Real> &res) const {
1194  if (infoLS_) {
1195  std::stringstream hist;
1196  hist << std::scientific << std::setprecision(8);
1197  hist << std::endl << " Augmented System Solver:" << std::endl;
1198  hist << " True Residual" << std::endl;
1199  for (unsigned j=0; j<res.size(); j++) {
1200  hist << " " << std::left << std::setw(14) << res[j] << std::endl;
1201  }
1202  hist << std::endl;
1203  std::cout << hist.str();
1204  }
1205 }
1206 
1207 
1208 template<typename Real>
1209 Real CompositeStepAlgorithm<Real>::setTolOSS(const Real intol) const {
1210  return tolOSSfixed_ ? tolOSS_ : intol;
1211 }
1212 
1213 } // namespace ROL
1214 } // namespace TypeE
1215 
1216 #endif
Provides the interface to evaluate objective functions.
void accept(Vector< Real > &s, Vector< Real > &n, Vector< Real > &t, Real f_new, Vector< Real > &c_new, Vector< Real > &gf_new, Vector< Real > &l_new, Vector< Real > &g_new, const Vector< Real > &x, const Vector< Real > &l, Real f, const Vector< Real > &gf, const Vector< Real > &c, const Vector< Real > &g, Vector< Real > &tCP, Vector< Real > &Wg, Objective< Real > &obj, Constraint< Real > &con)
Check acceptance of subproblem solutions, adjust merit function penalty parameter, ensure global convergence.
virtual void applyJacobian(Vector< Real > &jv, const Vector< Real > &v, const Vector< Real > &x, Real &tol)
Apply the constraint Jacobian at , , to vector .
virtual void scale(const Real alpha)=0
Compute where .
virtual ROL::Ptr< Vector > clone() const =0
Clone to make a new (uninitialized) vector.
virtual Real apply(const Vector< Real > &x) const
Apply to a dual vector. This is equivalent to the call .
Definition: ROL_Vector.hpp:238
virtual void plus(const Vector &x)=0
Compute , where .
virtual void update(const Vector< Real > &x, UpdateType type, int iter=-1)
Update constraint function.
virtual void axpy(const Real alpha, const Vector &x)
Compute where .
Definition: ROL_Vector.hpp:153
virtual Real value(const Vector< Real > &x, Real &tol)=0
Compute value.
virtual void hessVec(Vector< Real > &hv, const Vector< Real > &v, const Vector< Real > &x, Real &tol)
Apply Hessian approximation to vector.
virtual void writeExitStatus(std::ostream &os) const
virtual void writeHeader(std::ostream &os) const override
Print iterate header.
void computeTrial(Vector< Real > &s, const Vector< Real > &x, const Vector< Real > &l, Objective< Real > &obj, Constraint< Real > &con)
Compute trial step.
virtual void zero()
Set to zero vector.
Definition: ROL_Vector.hpp:167
Defines the linear algebra or vector space interface.
Definition: ROL_Vector.hpp:80
virtual void value(Vector< Real > &c, const Vector< Real > &x, Real &tol)=0
Evaluate the constraint operator at .
virtual void update(const Vector< Real > &x, UpdateType type, int iter=-1)
Update objective function.
virtual Real dot(const Vector &x) const =0
Compute where .
Objective_SerialSimOpt(const Ptr< Obj > &obj, const V &ui) z0_ zero()
void solveTangentialSubproblem(Vector< Real > &t, Vector< Real > &tCP, Vector< Real > &Wg, const Vector< Real > &x, const Vector< Real > &g, const Vector< Real > &n, const Vector< Real > &l, Real delta, Objective< Real > &obj, Constraint< Real > &con)
Solve tangential subproblem.
Provides an interface to check status of optimization algorithms for problems with equality constrain...
void computeLagrangeMultiplier(Vector< Real > &l, const Vector< Real > &x, const Vector< Real > &gf, Constraint< Real > &con)
Compute Lagrange multipliers by solving the least-squares problem minimizing the gradient of the Lagr...
void updateRadius(Vector< Real > &x, Vector< Real > &l, const Vector< Real > &s, Objective< Real > &obj, Constraint< Real > &con)
Update trust-region radius, take step, etc.
void computeQuasinormalStep(Vector< Real > &n, const Vector< Real > &c, const Vector< Real > &x, Real delta, Constraint< Real > &con)
Compute quasi-normal step by minimizing the norm of the linearized constraint.
virtual void gradient(Vector< Real > &g, const Vector< Real > &x, Real &tol)
Compute gradient.
virtual const Vector & dual() const
Return dual representation of , for example, the result of applying a Riesz map, or change of basis...
Definition: ROL_Vector.hpp:226
virtual void run(Vector< Real > &x, const Vector< Real > &g, Objective< Real > &obj, Constraint< Real > &econ, Vector< Real > &emul, const Vector< Real > &eres, std::ostream &outStream=std::cout) override
Run algorithm on equality constrained problems (Type-E). This general interface supports the use of d...
virtual void applyAdjointHessian(Vector< Real > &ahuv, const Vector< Real > &u, const Vector< Real > &v, const Vector< Real > &x, Real &tol)
Apply the derivative of the adjoint of the constraint Jacobian at to vector in direction ...
const Ptr< CombinedStatusTest< Real > > status_
virtual void applyAdjointJacobian(Vector< Real > &ajv, const Vector< Real > &v, const Vector< Real > &x, Real &tol)
Apply the adjoint of the the constraint Jacobian at , , to vector .
virtual void writeName(std::ostream &os) const override
Print step name.
void initialize(Vector< Real > &x, const Vector< Real > &g, Vector< Real > &l, const Vector< Real > &c, Objective< Real > &obj, Constraint< Real > &con, std::ostream &outStream=std::cout)
Initialize algorithm by computing a few quantities.
virtual std::vector< Real > solveAugmentedSystem(Vector< Real > &v1, Vector< Real > &v2, const Vector< Real > &b1, const Vector< Real > &b2, const Vector< Real > &x, Real &tol)
Approximately solves the augmented system where , , , , is an identity or Riesz operator...
void initialize(const Vector< Real > &x, const Vector< Real > &g, const Vector< Real > &mul, const Vector< Real > &c)
virtual void set(const Vector &x)
Set where .
Definition: ROL_Vector.hpp:209
virtual Real norm() const =0
Returns where .
void printInfoLS(const std::vector< Real > &res) const
virtual void writeOutput(std::ostream &os, const bool print_header=false) const override
Print iterate status.
Defines the general constraint operator interface.