KASKADE 7 development version
composite_step_solvers.hh
Go to the documentation of this file.
1#ifndef COMPOSITE_STEP_SOLVERS
2#define COMPOSITE_STEP_SOLVERS
3
4#include <memory> // std::unique_ptr
5#include <vector>
6
7#include <boost/timer/timer.hpp>
8
11#include "linalg/triplet.hh"
12#include "linalg/tcg.hh"
13
14namespace Kaskade
15{
16 namespace Bridge
17 {
18 template<class DirectSolver>
20 {
21 public:
22 DirectInnerSolver(int numberOfBlocks_,bool stabilization_=true) : numberOfBlocks(numberOfBlocks_), stabilization(stabilization_) {}
23
24 // on exit: sol=(dx, p), where dx is normal step, p is least squares update of Lagrange multiplier
25 void solveAdjAndNormal(std::vector<double>& sol, SparseLinearSystem const& lin)
26 {
28 lin.getMatrix(mat);
29 // mat.print();
30 if(stabilization)
31 {
32 // stabilization: cf. Conn/Gould/Toint pg. 110, (5.4.7)
33 AT.flush();
34 lin.getMatrixBlocks(AT,0,numberOfBlocks,numberOfBlocks,lin.nColBlocks());
35 // shift c'(x)^* to block matrix (0 c'(x)^*)
36 AT.shiftIndices(0,lin.cols(0,numberOfBlocks));
37 // AT is (0 c'(x)^*)
38 ATx.resize(0);
39 }
40 solver.reset(new DirectSolver(mat.nrows(),
41 2,
42 mat.ridx,
43 mat.cidx,
44 mat.data,
46 std::vector<double> r,s, soladj;
47 sz=lin.size();
48 lin.getRHSBlocks(r,0,numberOfBlocks);
49 lin.getRHSBlocks(s,numberOfBlocks,lin.nRowBlocks());
50
51 std::vector<double> rg(sz,0.0),rc(sz,0.0);
52 for(int i=0;i<r.size();++i) rg[i]=r[i];
53 for(int i=0;i<s.size();++i) rc[i+r.size()]=s[i];
54
55 solver->solve(rg,soladj);
56 solver->solve(rc,sol);
57 for(int i=r.size(); i<sol.size();++i) sol[i]=soladj[i];
58
59 }
60
61 // Application of "preconditioner"
62 // stabilization: cf. Conn/Gould/Toint pg. 110, (5.4.7)
63 void ax(std::vector<double>& sol, std::vector<double>const &r) const
64 {
65 std::vector<double> r1(sz,0.0),x;
66 if(stabilization && ATx.size()!=0)
67 for(int i=0;i<r.size();++i)
68 r1[i]=r[i]-ATx[i];
69 else
70 for(int i=0;i<r.size();++i)
71 r1[i]=r[i];
72 solver->solve(r1,x);
73 if(stabilization) AT.ax(ATx,x);
74 for(int i=0; i<sol.size(); ++i) sol[i]=x[i];
75 }
76
77 void resolveNormal(std::vector<double>& sol, SparseLinearSystem const& lin)
78 {
79 std::vector<double> s,r2(sz,0.0);
80 lin.getRHSBlocks(s,numberOfBlocks,lin.nRowBlocks());
81 for(int i=0;i<s.size();++i) r2[i+lin.rows(0,numberOfBlocks)]=s[i];
82 solver->solve(r2,sol);
83 for(int i=lin.rows(0,numberOfBlocks); i<sol.size();++i) sol[i]=0.0;
84
85 }
86 private:
88 mutable std::vector<double> ATx;
89 std::unique_ptr<DirectSolver> solver;
90 int numberOfBlocks, sz;
91 bool stabilization;
92 };
93
94 template <class VectorImpl,class InnerSolver>
96 {
97 public:
98 virtual void setRelativeAccuracy(double accuracy) { acc=accuracy; };
99 virtual double getRelativeAccuracy() { return acc; }
100 virtual double getAbsoluteAccuracy() { return acc; }
101 virtual bool improvementPossible() { return true; }
102 virtual int nSolutionVectors() const { return 2; }
103 ProjTCGSolver(InnerSolver& solver_, int numberOfBlocks_) : solver(solver_), numberOfBlocks(numberOfBlocks_) {}
104 virtual ~ProjTCGSolver() {}
105 private:
106
107 double acc;
108
109 virtual int doSolve(std::vector<AbstractVector* >& correction,
110 AbstractLinearization& linT, AbstractLinearization& linN, int start, double nu0)
111 {
112 SparseLinearSystem const &lT = dynamic_cast<SparseLinearSystem const &>(linT);
113 SparseLinearSystem const &lN = dynamic_cast<SparseLinearSystem const &>(linN);
114
115 boost::timer::cpu_timer timer;
116
117 std::vector<double> r(lN.rows(0,numberOfBlocks),0.0);
118
119 // r=f'(x)
120 lN.getRHSBlocks(r,0,numberOfBlocks);
121
122 double rsum(0.0);
123 for(int i=0; i<r.size(); ++i)
124 rsum += fabs(r[i]);
125
126
127 std::cout << "r:" << rsum << " " << r.size() << " " << numberOfBlocks << std::endl;
128
130 int dimx=lT.rows(0,numberOfBlocks);
131 std::vector<double> x,p,normalstep;
132 x.reserve(lT.size());
133 p.reserve(lT.size());
134
135 if(start >=1)
136 dynamic_cast<Bridge::Vector<VectorImpl>& >(*(correction[start-1])).write(normalstep);
137
138 dynamic_cast<Bridge::Vector<VectorImpl>const & >(linN.getOrigin()).write(x );
139
141 // H = c'(x)^*
142 lN.getMatrixBlocks(H,0,numberOfBlocks,numberOfBlocks,lN.nColBlocks());
143 H.shiftIndices(0,lN.cols(0,numberOfBlocks));
144 H.axpy(r,x);
145 H.flush();
146 // r = f'(x)+c'(x)^*p
147 lT.getMatrixBlocks(H,0,numberOfBlocks,0,numberOfBlocks);
148 // r = f'(x)+c'(x)^*p+nu_0 L_xx delta n
149 H.axpy(r,normalstep,nu0);
150
151 for(int i=0; i< r.size(); ++i)
152 {
153 r[i] *= -1.0;
154 x[i] = 0.0;
155 }
156
157 // min <Hx,x>+<r,x>
158 int exit=projectedtcg(x,p,H,solver,r,1e-6,100);
159
160 for(int i=0; i<dimx;++i) x[i] *= -1.0;
161 for(int i=dimx; i<lT.size();++i) x.push_back(0.0);
162 dynamic_cast<Bridge::Vector<VectorImpl>& >(*(correction[start])).read(x);
163
164 std::cout << " ProjTCG:" << (double)(timer.elapsed().user)/1e9 << std::endl;
165
166 if(exit==2)
167 {
168 for(int i=0; i<dimx;++i) p[i] *= -1.0;
169 for(int i=dimx; i<lT.size();++i) p.push_back(0.0);
170 dynamic_cast<Bridge::Vector<VectorImpl>& >(*(correction[start+1])).read(p);
171 return 2;
172 }
173 return 1;
174
175 }
176 InnerSolver& solver;
177 int numberOfBlocks;
178 };
179
180
181 template <class VectorImpl,class InnerSolver>
183 {
184 public:
185 virtual void setRelativeAccuracy(double accuracy) { acc=accuracy; };
186 virtual double getRelativeAccuracy() { return acc; }
187 virtual double getAbsoluteAccuracy() { return acc; }
188 virtual bool improvementPossible() { return true; }
189 virtual int nSolutionVectors() const { return 2; }
190 TCGSolver(InnerSolver& solver_, int numberOfBlocks_) : solver(solver_), numberOfBlocks(numberOfBlocks_) {}
191 virtual ~TCGSolver() {}
192 private:
193 double acc;
194
195 virtual int doSolve(std::vector<AbstractVector* >& correction,
196 AbstractLinearization& linT, AbstractLinearization& linN, int start, double d1, double d2, double d3, double d4, double nu0)
197 {
198 SparseLinearSystem const &lT = dynamic_cast<SparseLinearSystem const &>(linT);
199 SparseLinearSystem const &lN = dynamic_cast<SparseLinearSystem const &>(linN);
201 int dimx=lT.rows(0,numberOfBlocks);
202 std::vector<double> x,p,normalstep;
203 if(start >=1)
204 dynamic_cast<Bridge::Vector<VectorImpl>& >(*(correction[start-1])).write(normalstep);
205
206 solver.solveTCG(x,p,lT,lN,normalstep,nu0);
207
208 for(int i=0; i<dimx;++i) x[i] *= -1.0;
209 for(int i=dimx; i<x.size();++i) x[i] *= 0.0;
210 dynamic_cast<Bridge::Vector<VectorImpl>& >(*(correction[start])).read(x);
211 if(p.size() != 0)
212 {
213 for(int i=0; i<dimx;++i) p[i] *= -1.0;
214 for(int i=dimx; i<p.size();++i) p[i] *= 0.0;
215 dynamic_cast<Bridge::Vector<VectorImpl>& >(*(correction[start+1])).read(p);
216 return 2;
217 }
218 return 1;
219
220 }
221 InnerSolver& solver;
222 int numberOfBlocks;
223 };
224
225 template <class VectorImpl,class InnerSolver>
227 {
228 public:
229
230 virtual void setRelativeAccuracy(double accuracy) { acc=accuracy; };
231 virtual double getRelativeAccuracy() { return acc; }
232 virtual double getAbsoluteAccuracy() { return acc; }
233 virtual bool improvementPossible() { return true; }
234 virtual ~PINVSolver() {}
235
236 PINVSolver(InnerSolver& solver_, int numberOfBlocks_) : solver(solver_), numberOfBlocks(numberOfBlocks_) {};
237
238 void computeCorrectionAndAdjointCorrection(Kaskade::AbstractVector&, Kaskade::AbstractVector&, Kaskade::AbstractLinearization&){assert("not implemented");}
239 void computeSimplifiedCorrection(Kaskade::AbstractVector&, const Kaskade::AbstractLinearization&) const {assert("not implemented");}
240
241 private:
242
243 double acc;
244 virtual void doSolve(AbstractVector& correction,
245 AbstractVector& iterate,
247 {
248 SparseLinearSystem const &l = dynamic_cast<SparseLinearSystem const &>(lin);
249 int dimx=l.rows(0,numberOfBlocks);
250 if(dimx==l.size()) return;
251 std::vector<double> xcor(l.size(),0.0);
252 solver.solveAdjAndNormal(xcor,l);
253 for(int i=0; i< xcor.size(); ++i) xcor[i]*=-1.0;
254
255 std::vector<double> xiterate;
256 dynamic_cast<Bridge::Vector<VectorImpl>& >(iterate).write(xiterate);
257
258
259 // update dual variables of iterate
260 for(int i=dimx; i< xcor.size(); ++i)
261 {
262 xiterate[i]= xcor[i];
263 xcor[i]=0.0;
264 }
265
266 // read data into correction and iterate
267 dynamic_cast<Bridge::Vector<VectorImpl>& >(correction).read(xcor);
268 dynamic_cast<Bridge::Vector<VectorImpl>& >(iterate).read(xiterate);
269 }
270
271
272 virtual void doResolve(AbstractVector& correction,
273 AbstractLinearization const& lin) const
274 {
275 SparseLinearSystem const &l = dynamic_cast<SparseLinearSystem const &>(lin);
276 int dimx=l.rows(0,numberOfBlocks);
277 if(l.size()==dimx) return;
278 std::vector<double> x(l.size(),0.0);
279 solver.resolveNormal(x,l);
280 for(int i=0; i< dimx; ++i) x[i]*=-1.0;
281 for(int i=dimx; i< x.size(); ++i) x[i]=0.0;
282
283 dynamic_cast<Bridge::Vector<VectorImpl>& >(correction).read(x);
284 }
285
286 MatrixAsTriplet<double> m;
287 mutable MatrixAsTriplet<double> P;
288 InnerSolver& solver;
289 int numberOfBlocks;
290 };
291
292 // template<class LinImpl, class VectorImpl,class Preconditioner>
293 // class TCGWithPreconditioner : public AbstractTangentialSolver
294 // {
295 // public:
296 // virtual void setRelativeAccuracy(double accuracy) { acc=accuracy; };
297 // virtual double getRelativeAccuracy() { return acc; }
298 // virtual double getAbsoluteAccuracy() { return acc; }
299 // virtual bool improvementPossible() { return true; }
300 // virtual bool localConvergenceLikely() { return lCl; }
301
302 // virtual int nSolutionVectors() const { return 2; }
303
304 // TCGWithPreconditioner(int primalblocks_) :
305 // Me(3), Ae(4), addreg(0.0), primalblocks(primalblocks_) , tcgfile("tcg.log",std::ios::out)
306 // {}
307
308 // virtual ~TCGWithPreconditioner() {};
309
310 // virtual bool getNormInfo(std::vector<double>& M,std::vector<double>& A) const
311 // {
312 // M=Me;
313 // A=Ae;
314 // return true;
315 // }
316
317 // private:
318
319 // std::vector<double> Me, Ae;
320 // double acc;
321 // double addreg;
322 // bool lCl;
323
324 // virtual int doSolve(std::vector<AbstractVector* >& correction,
325 // AbstractLinearization& linT, AbstractLinearization& linN, int start,
326 // double ThetaAim, double omegaC, double omegaL, double omegaH, double nu0)
327 // {
328 // SparseLinearSystem &lT = dynamic_cast<SparseLinearSystem &>(linT);
329 // SparseLinearSystem &lN = dynamic_cast<SparseLinearSystem &>(linN);
330
331 // MatrixAsTriplet<double> NM;
332
333 // lN.getMatrix(NM);
334
335
336 // boost::timer timer;
337
338 // std::vector<double> r(lN.rows(0,primalblocks),0.0);
339
340 // // r = f'(x)
341
342 // lN.getRHSBlocks(r,0,primalblocks);
343 // int dimx=lT.rows(0,primalblocks);
344
345 // if(primalblocks < lT.nColBlocks())
346 // {
347 // // if equality constraints are present, then r += C'(x)^T p
348 // // this is for numerical stability
349 // std::vector<double> x;
350 // dynamic_cast<Bridge::Vector<VectorImpl>const & >(linN.getOrigin()).write(x);
351 // MatrixAsTriplet<double> CPrimeTransposed;
352 // lT.getMatrixBlocks(CPrimeTransposed,0,primalblocks,primalblocks,lT.nColBlocks());
353 // CPrimeTransposed.shiftIndices(0,lN.cols(0,primalblocks));
354 // CPrimeTransposed.axpy(r,x);
355 // }
356
357 // MatrixAsTriplet<double> Hessian;
358 // lT.getMatrixBlocks(Hessian,0,primalblocks,0,primalblocks);
359
360 // if(start >=1)
361 // {
362 // // if normal step was taken, then r += nu_0 H delta n
363 // // this guarantees second order approximation
364
365 // std::vector<double> normalstep;
366 // dynamic_cast<Bridge::Vector<VectorImpl>& >(*(correction[start-1])).write(normalstep);
367 // Hessian.axpy(r,normalstep,nu0);
368 // }
369
370 // std::vector<double> x(lT.size(),0.0),p(lT.size(),0.0);
371
372 // // Preconditioner is only created at the beginning of the whole algorithm
373
374 // // if(!(prec_ptr.get()))
375 // prec_ptr.reset(new Preconditioner(lN));
376
377
378 // int maxiter=1000;
379
380 // int exit(2);
381
382 // double negm(1e300), posdefalpha(1.0);
383
384 // // for unregularized tcg:
385
386 // addreg = 0.0;
387
388 // int ntrial(0);
389
390 // do
391 // {
392 // PrecWrapperForStdVector<Preconditioner> prec(*prec_ptr);
393 // maxiter=1000;
394 // std::cout << "Regularization " << addreg << std::endl;
395 // exit=tcgRegForCubic(x,p,Hessian,prec,r,acc,omegaL,omegaH, addreg,maxiter, ntrial,Me,Ae,tcgfile);
396 // if(exit==-1)
397 // prec_ptr.reset(new Preconditioner(lN));
398
399 // std::cout << "Negative Curvature: " << Ae[3] << std::endl;
400 // negm=std::min(negm,Ae[3]);
401 // if(addreg >0 ) posdefalpha=0.0;
402 // ntrial++;
403 // lCl = (addreg == 0.0);
404 // } while(exit !=1);
405
406 // addreg=std::max(0.0,-negm);
407
408 // // if(posdefalpha>0.0)
409 // // prec_ptr.reset(new Preconditioner(lT));
410 // //Preconditioner preconditioner(lN);
411
412 // std::vector<double> Mx(x.size());
413
414 // NM.ax(Mx,x);
415 // double sum(0.0);
416 // for(int i=0; i<x.size();++i)
417 // sum += x[i]*Mx[i];
418
419 // std::cout << "xMx:" << sum << " " << Me[0] << " ";
420
421 // sum=0.0;
422
423 // for(int i=0; i<x.size();++i)
424 // sum += p[i]*Mx[i];
425
426 // std::cout << "pMx:" << sum << " " << Me[2] << " ";
427
428 // NM.ax(Mx,p);
429 // sum=0.0;
430 // for(int i=0; i<x.size();++i)
431 // sum += p[i]*Mx[i];
432
433 // std::cout << "pMp:" << sum << " " << Me[1] << std::endl;
434
435
436 // // no update of lagrangian multiplier
437 // for(int i=dimx; i<lT.size();++i) x.push_back(0.0);
438 // dynamic_cast<Bridge::Vector<VectorImpl>& >(*(correction[start])).read(x);
439
440 // std::cout << " ProjTCG: exit:" << exit << " t:" << timer.elapsed() << " it:" << maxiter << std::endl;
441
442 // if(exit==2)
443 // {
444 // for(int i=dimx; i<lT.size();++i) p.push_back(0.0);
445 // dynamic_cast<Bridge::Vector<VectorImpl>& >(*(correction[start+1])).read(p);
446 // return 2;
447 // }
448 // return 1;
449
450 // }
451 // int primalblocks;
452 // std::ofstream tcgfile;
453 // std::unique_ptr<Preconditioner> prec_ptr;
454 // };
455
456
457 }
458} // namespace Kaskade
459#endif
virtual AbstractFunctionSpaceElement const & getOrigin() const =0
Get point of linearization.
Class that models the functionality of a (possibly inexact) linear solver.
void resolveNormal(std::vector< double > &sol, SparseLinearSystem const &lin)
void ax(std::vector< double > &sol, std::vector< double >const &r) const
DirectInnerSolver(int numberOfBlocks_, bool stabilization_=true)
void solveAdjAndNormal(std::vector< double > &sol, SparseLinearSystem const &lin)
virtual void setRelativeAccuracy(double accuracy)
void computeSimplifiedCorrection(Kaskade::AbstractVector &, const Kaskade::AbstractLinearization &) const
PINVSolver(InnerSolver &solver_, int numberOfBlocks_)
void computeCorrectionAndAdjointCorrection(Kaskade::AbstractVector &, Kaskade::AbstractVector &, Kaskade::AbstractLinearization &)
virtual int nSolutionVectors() const
The maximal number of solution vectors, returned by basis.
ProjTCGSolver(InnerSolver &solver_, int numberOfBlocks_)
virtual void setRelativeAccuracy(double accuracy)
Specify accuracy that should be achieved.
TCGSolver(InnerSolver &solver_, int numberOfBlocks_)
virtual int nSolutionVectors() const
The maximal number of solution vectors, returned by basis.
virtual void setRelativeAccuracy(double accuracy)
Specify accuracy that should be achieved.
Mathematical Vector that supports copy-on-write, implements AbstractFunctionSpaceElement.
virtual void read(std::vector< double > const &in, int vbegin, int vend)
virtual void write(std::vector< double > &out, int vbegin, int vend) const
SparseIndexInt nrows() const
Returns number of rows (computes them, if not known)
Definition: triplet.hh:202
std::vector< SparseIndexInt > ridx
row indices
Definition: triplet.hh:669
std::vector< SparseIndexInt > cidx
column indices
Definition: triplet.hh:671
void shiftIndices(SparseIndexInt row, SparseIndexInt col)
Shifts matrix indices. Can be used together with += to concatenate sparese matrices.
Definition: triplet.hh:334
std::vector< Scalar > data
data
Definition: triplet.hh:673
void axpy(Y &out, X const &in, Scalar alpha=1.0, int nvectors=1) const
Matrix-vector multiplication: out += alpha * (*this) * in.
Definition: triplet.hh:262
void ax(Y &out, X const &in) const
Matrix-vector multiplication: out = (*this) * in.
Definition: triplet.hh:312
Abstract base class for a sparse linear system.
Definition: linearsystem.hh:30
virtual void getMatrixBlocks(MatrixAsTriplet< double > &mat, int rbegin, int rend, int colbegin, int colend) const =0
Return matrix blocks of the linear system in triplet format.
virtual void getRHSBlocks(std::vector< double > &rhs, int rbegin, int rend) const =0
Return components of the right hand side of the linear system.
virtual void getMatrix(MatrixAsTriplet< double > &mat) const
Return the matrix of the linear system in triplet format.
Definition: linearsystem.hh:33
virtual int cols(int colbegin, int colend) const =0
virtual int nRowBlocks() const =0
number of row blocks
virtual int size() const
Return the number of variables of the linear system.
Definition: linearsystem.hh:39
virtual int rows(int rbegin, int rend) const =0
virtual int nColBlocks() const =0
number of column blocks
Functional::Scalar d1(Assembler &assembler, Functional const &f, typename Functional::AnsatzVars::VariableSet &x, typename Functional::Scalar tolerance=1e-6, typename Functional::Scalar increment=1e-12, bool toFile=false, std::string const &filename=std::string("d1error"))
Functional::Scalar d2(Assembler &assembler, Functional const &f, typename Functional::AnsatzVars::VariableSet const &x, typename Functional::Scalar increment=1e-12, typename Functional::Scalar tolerance=1e-6, bool toFile=false, std::string const &savefilename=std::string("d2error"))
Bridge classes that connect low level FE algorithms to higher level algorithms.