g2o
Loading...
Searching...
No Matches
block_solver.hpp
Go to the documentation of this file.
1// g2o - General Graph Optimization
2// Copyright (C) 2011 R. Kuemmerle, G. Grisetti, W. Burgard
3// All rights reserved.
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are
7// met:
8//
9// * Redistributions of source code must retain the above copyright notice,
10// this list of conditions and the following disclaimer.
11// * Redistributions in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the distribution.
14//
15// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
16// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
17// TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
18// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
19// HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
20// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
21// TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27#include <Eigen/LU>
28#include <cassert>
29#include <fstream>
30#include <iomanip>
31
32#include "g2o/stuff/logger.h"
33#include "g2o/stuff/macros.h"
34#include "g2o/stuff/misc.h"
35#include "g2o/stuff/timeutil.h"
36#include "sparse_optimizer.h"
37
38namespace g2o {
39
40template <typename Traits>
41BlockSolver<Traits>::BlockSolver(std::unique_ptr<LinearSolverType> linearSolver)
42 : BlockSolverBase(), _linearSolver(std::move(linearSolver)) {
43 // workspace
44 _xSize = 0;
45 _numPoses = 0;
46 _numLandmarks = 0;
47 _sizePoses = 0;
49 _doSchur = true;
50}
51
52template <typename Traits>
53void BlockSolver<Traits>::resize(int* blockPoseIndices, int numPoseBlocks,
54 int* blockLandmarkIndices,
55 int numLandmarkBlocks, int s) {
56 deallocate();
57
58 resizeVector(s);
59
60 if (_doSchur) {
61 // the following two are only used in schur
62 assert(_sizePoses > 0 && "allocating with wrong size");
63 _coefficients.reset(allocate_aligned<double>(s));
64 _bschur.reset(allocate_aligned<double>(_sizePoses));
65 }
66
67 _Hpp = std::make_unique<PoseHessianType>(blockPoseIndices, blockPoseIndices,
68 numPoseBlocks, numPoseBlocks);
69 if (_doSchur) {
70 _Hschur = std::make_unique<PoseHessianType>(
71 blockPoseIndices, blockPoseIndices, numPoseBlocks, numPoseBlocks);
72 _Hll = std::make_unique<LandmarkHessianType>(
73 blockLandmarkIndices, blockLandmarkIndices, numLandmarkBlocks,
74 numLandmarkBlocks);
75 _DInvSchur =
76 std::make_unique<SparseBlockMatrixDiagonal<LandmarkMatrixType>>(
77 _Hll->colBlockIndices());
78 _Hpl = std::make_unique<PoseLandmarkHessianType>(
79 blockPoseIndices, blockLandmarkIndices, numPoseBlocks,
80 numLandmarkBlocks);
81 _HplCCS = std::make_unique<SparseBlockMatrixCCS<PoseLandmarkMatrixType>>(
82 _Hpl->rowBlockIndices(), _Hpl->colBlockIndices());
83 _HschurTransposedCCS =
84 std::make_unique<SparseBlockMatrixCCS<PoseMatrixType>>(
85 _Hschur->colBlockIndices(), _Hschur->rowBlockIndices());
86#ifdef G2O_OPENMP
87 _coefficientsMutex.resize(numPoseBlocks);
88#endif
89 }
90}
91
92template <typename Traits>
94 _Hpp.reset();
95 _Hll.reset();
96 _Hpl.reset();
97 _Hschur.reset();
98 _DInvSchur.reset();
99 _coefficients.reset();
100 _bschur.reset();
101
102 _HplCCS.reset();
103 _HschurTransposedCCS.reset();
104}
105
106template <typename Traits>
108
109template <typename Traits>
111 assert(_optimizer);
112
113 size_t sparseDim = 0;
114 _numPoses = 0;
115 _numLandmarks = 0;
116 _sizePoses = 0;
117 _sizeLandmarks = 0;
118 int* blockPoseIndices = new int[_optimizer->indexMapping().size()];
119 int* blockLandmarkIndices = new int[_optimizer->indexMapping().size()];
120
121 for (size_t i = 0; i < _optimizer->indexMapping().size(); ++i) {
122 OptimizableGraph::Vertex* v = _optimizer->indexMapping()[i];
123 int dim = v->dimension();
124 if (!v->marginalized()) {
125 v->setColInHessian(_sizePoses);
126 _sizePoses += dim;
127 blockPoseIndices[_numPoses] = _sizePoses;
128 ++_numPoses;
129 } else {
130 v->setColInHessian(_sizeLandmarks);
131 _sizeLandmarks += dim;
132 blockLandmarkIndices[_numLandmarks] = _sizeLandmarks;
133 ++_numLandmarks;
134 }
135 sparseDim += dim;
136 }
137 resize(blockPoseIndices, _numPoses, blockLandmarkIndices, _numLandmarks,
138 sparseDim);
139 delete[] blockLandmarkIndices;
140 delete[] blockPoseIndices;
141
142 // allocate the diagonal on Hpp and Hll
143 int poseIdx = 0;
144 int landmarkIdx = 0;
145 for (size_t i = 0; i < _optimizer->indexMapping().size(); ++i) {
146 OptimizableGraph::Vertex* v = _optimizer->indexMapping()[i];
147 if (!v->marginalized()) {
148 // assert(poseIdx == v->hessianIndex());
149 PoseMatrixType* m = _Hpp->block(poseIdx, poseIdx, true);
150 if (zeroBlocks) m->setZero();
151 v->mapHessianMemory(m->data());
152 ++poseIdx;
153 } else {
154 LandmarkMatrixType* m = _Hll->block(landmarkIdx, landmarkIdx, true);
155 if (zeroBlocks) m->setZero();
156 v->mapHessianMemory(m->data());
157 ++landmarkIdx;
158 }
159 }
160 assert(poseIdx == _numPoses && landmarkIdx == _numLandmarks);
161
162 // temporary structures for building the pattern of the Schur complement
163 SparseBlockMatrixHashMap<PoseMatrixType>* schurMatrixLookup = 0;
164 if (_doSchur) {
165 schurMatrixLookup = new SparseBlockMatrixHashMap<PoseMatrixType>(
166 _Hschur->rowBlockIndices(), _Hschur->colBlockIndices());
167 schurMatrixLookup->blockCols().resize(_Hschur->blockCols().size());
168 }
169
170 // here we assume that the landmark indices start after the pose ones
171 // create the structure in Hpp, Hll and in Hpl
172 for (SparseOptimizer::EdgeContainer::const_iterator it =
173 _optimizer->activeEdges().begin();
174 it != _optimizer->activeEdges().end(); ++it) {
175 OptimizableGraph::Edge* e = *it;
176
177 for (size_t viIdx = 0; viIdx < e->vertices().size(); ++viIdx) {
180 int ind1 = v1->hessianIndex();
181 if (ind1 == -1) continue;
182 int indexV1Bak = ind1;
183 for (size_t vjIdx = viIdx + 1; vjIdx < e->vertices().size(); ++vjIdx) {
186 int ind2 = v2->hessianIndex();
187 if (ind2 == -1) continue;
188 ind1 = indexV1Bak;
189 bool transposedBlock = ind1 > ind2;
190 if (transposedBlock) { // make sure, we allocate the upper triangle
191 // block
192 std::swap(ind1, ind2);
193 }
194 if (!v1->marginalized() && !v2->marginalized()) {
195 PoseMatrixType* m = _Hpp->block(ind1, ind2, true);
196 if (zeroBlocks) m->setZero();
197 e->mapHessianMemory(m->data(), viIdx, vjIdx, transposedBlock);
198 if (_Hschur) { // assume this is only needed in case we solve with
199 // the schur complement
200 schurMatrixLookup->addBlock(ind1, ind2);
201 }
202 } else if (v1->marginalized() && v2->marginalized()) {
203 // RAINER hmm.... should we ever reach this here????
205 _Hll->block(ind1 - _numPoses, ind2 - _numPoses, true);
206 if (zeroBlocks) m->setZero();
207 e->mapHessianMemory(m->data(), viIdx, vjIdx, false);
208 } else {
209 if (v1->marginalized()) {
210 PoseLandmarkMatrixType* m = _Hpl->block(
211 v2->hessianIndex(), v1->hessianIndex() - _numPoses, true);
212 if (zeroBlocks) m->setZero();
214 m->data(), viIdx, vjIdx,
215 true); // transpose the block before writing to it
216 } else {
217 PoseLandmarkMatrixType* m = _Hpl->block(
218 v1->hessianIndex(), v2->hessianIndex() - _numPoses, true);
219 if (zeroBlocks) m->setZero();
220 e->mapHessianMemory(m->data(), viIdx, vjIdx,
221 false); // directly the block
222 }
223 }
224 }
225 }
226 }
227
228 if (!_doSchur) {
229 delete schurMatrixLookup;
230 return true;
231 }
232
233 _DInvSchur->diagonal().resize(landmarkIdx);
234 _Hpl->fillSparseBlockMatrixCCS(*_HplCCS);
235
236 for (OptimizableGraph::Vertex* v : _optimizer->indexMapping()) {
237 if (v->marginalized()) {
238 const HyperGraph::EdgeSet& vedges = v->edges();
239 for (HyperGraph::EdgeSet::const_iterator it1 = vedges.begin();
240 it1 != vedges.end(); ++it1) {
241 for (size_t i = 0; i < (*it1)->vertices().size(); ++i) {
243 (OptimizableGraph::Vertex*)(*it1)->vertex(i);
244 if (v1->hessianIndex() == -1 || v1 == v) continue;
245 for (HyperGraph::EdgeSet::const_iterator it2 = vedges.begin();
246 it2 != vedges.end(); ++it2) {
247 for (size_t j = 0; j < (*it2)->vertices().size(); ++j) {
249 (OptimizableGraph::Vertex*)(*it2)->vertex(j);
250 if (v2->hessianIndex() == -1 || v2 == v) continue;
251 int i1 = v1->hessianIndex();
252 int i2 = v2->hessianIndex();
253 if (i1 <= i2) {
254 schurMatrixLookup->addBlock(i1, i2);
255 }
256 }
257 }
258 }
259 }
260 }
261 }
262
263 _Hschur->takePatternFromHash(*schurMatrixLookup);
264 delete schurMatrixLookup;
265 _Hschur->fillSparseBlockMatrixCCSTransposed(*_HschurTransposedCCS);
266
267 return true;
268}
269
270template <typename Traits>
272 const std::vector<HyperGraph::Vertex*>& vset,
273 const HyperGraph::EdgeSet& edges) {
274 for (std::vector<HyperGraph::Vertex*>::const_iterator vit = vset.begin();
275 vit != vset.end(); ++vit) {
276 OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(*vit);
277 int dim = v->dimension();
278 if (!v->marginalized()) {
279 v->setColInHessian(_sizePoses);
280 _sizePoses += dim;
281 _Hpp->rowBlockIndices().push_back(_sizePoses);
282 _Hpp->colBlockIndices().push_back(_sizePoses);
283 _Hpp->blockCols().push_back(
285 ++_numPoses;
286 int ind = v->hessianIndex();
287 PoseMatrixType* m = _Hpp->block(ind, ind, true);
288 v->mapHessianMemory(m->data());
289 } else {
290 G2O_ERROR("updateStructure(): Schur not supported");
291 abort();
292 }
293 }
294 resizeVector(_sizePoses + _sizeLandmarks);
295
296 for (HyperGraph::EdgeSet::const_iterator it = edges.begin();
297 it != edges.end(); ++it) {
298 OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
299
300 for (size_t viIdx = 0; viIdx < e->vertices().size(); ++viIdx) {
303 int ind1 = v1->hessianIndex();
304 int indexV1Bak = ind1;
305 if (ind1 == -1) continue;
306 for (size_t vjIdx = viIdx + 1; vjIdx < e->vertices().size(); ++vjIdx) {
309 int ind2 = v2->hessianIndex();
310 if (ind2 == -1) continue;
311 ind1 = indexV1Bak;
312 bool transposedBlock = ind1 > ind2;
313 if (transposedBlock) // make sure, we allocate the upper triangular
314 // block
315 std::swap(ind1, ind2);
316
317 if (!v1->marginalized() && !v2->marginalized()) {
318 PoseMatrixType* m = _Hpp->block(ind1, ind2, true);
319 e->mapHessianMemory(m->data(), viIdx, vjIdx, transposedBlock);
320 } else {
321 G2O_ERROR("{}: not supported", __PRETTY_FUNCTION__);
322 }
323 }
324 }
325 }
326
327 return true;
328}
329
330template <typename Traits>
332 if (!_doSchur) {
333 double t = get_monotonic_time();
334 bool ok = _linearSolver->solve(*_Hpp, _x, _b);
336 if (globalStats) {
337 globalStats->timeLinearSolver = get_monotonic_time() - t;
338 globalStats->hessianDimension = globalStats->hessianPoseDimension =
339 _Hpp->cols();
340 }
341 return ok;
342 }
343
344 // schur thing
345
346 // backup the coefficient matrix
347 double t = get_monotonic_time();
348
349 // _Hschur = _Hpp, but keeping the pattern of _Hschur
350 _Hschur->clear();
351 _Hpp->add(*_Hschur);
352
353 //_DInvSchur->clear();
354 memset(_coefficients.get(), 0, _sizePoses * sizeof(double));
355#ifdef G2O_OPENMP
356#pragma omp parallel for default(shared) schedule(dynamic, 10)
357#endif
358 for (int landmarkIndex = 0;
359 landmarkIndex < static_cast<int>(_Hll->blockCols().size());
360 ++landmarkIndex) {
362 marginalizeColumn = _Hll->blockCols()[landmarkIndex];
363 assert(marginalizeColumn.size() == 1 &&
364 "more than one block in _Hll column");
365
366 // calculate inverse block for the landmark
367 const LandmarkMatrixType* D = marginalizeColumn.begin()->second;
368 assert(D && D->rows() == D->cols() && "Error in landmark matrix");
369 LandmarkMatrixType& Dinv = _DInvSchur->diagonal()[landmarkIndex];
370 Dinv = D->inverse();
371
372 LandmarkVectorType db(D->rows());
373 for (int j = 0; j < D->rows(); ++j) {
374 db[j] = _b[_Hll->rowBaseOfBlock(landmarkIndex) + _sizePoses + j];
375 }
376 db = Dinv * db;
377
378 assert((size_t)landmarkIndex < _HplCCS->blockCols().size() &&
379 "Index out of bounds");
381 landmarkColumn = _HplCCS->blockCols()[landmarkIndex];
382
383 for (typename SparseBlockMatrixCCS<
384 PoseLandmarkMatrixType>::SparseColumn::const_iterator it_outer =
385 landmarkColumn.begin();
386 it_outer != landmarkColumn.end(); ++it_outer) {
387 int i1 = it_outer->row;
388
389 const PoseLandmarkMatrixType* Bi = it_outer->block;
390 assert(Bi);
391
392 PoseLandmarkMatrixType BDinv = (*Bi) * (Dinv);
393 assert(_HplCCS->rowBaseOfBlock(i1) < _sizePoses && "Index out of bounds");
394 typename PoseVectorType::MapType Bb(
395 &_coefficients[_HplCCS->rowBaseOfBlock(i1)], Bi->rows());
396#ifdef G2O_OPENMP
397 ScopedOpenMPMutex mutexLock(&_coefficientsMutex[i1]);
398#endif
399 Bb.noalias() += (*Bi) * db;
400
401 assert(i1 >= 0 &&
402 i1 < static_cast<int>(_HschurTransposedCCS->blockCols().size()) &&
403 "Index out of bounds");
405 targetColumnIt = _HschurTransposedCCS->blockCols()[i1].begin();
406
408 0);
409 typename SparseBlockMatrixCCS<
410 PoseLandmarkMatrixType>::SparseColumn::const_iterator it_inner =
411 lower_bound(landmarkColumn.begin(), landmarkColumn.end(), aux);
412 for (; it_inner != landmarkColumn.end(); ++it_inner) {
413 int i2 = it_inner->row;
414 const PoseLandmarkMatrixType* Bj = it_inner->block;
415 assert(Bj);
416 while (targetColumnIt->row < i2 /*&& targetColumnIt != _HschurTransposedCCS->blockCols()[i1].end()*/)
417 ++targetColumnIt;
418 assert(targetColumnIt != _HschurTransposedCCS->blockCols()[i1].end() &&
419 targetColumnIt->row == i2 &&
420 "invalid iterator, something wrong with the matrix structure");
421 PoseMatrixType* Hi1i2 = targetColumnIt->block; //_Hschur->block(i1,i2);
422 assert(Hi1i2);
423 (*Hi1i2).noalias() -= BDinv * Bj->transpose();
424 }
425 }
426 }
427
428 // _bschur = _b for calling solver, and not touching _b
429 memcpy(_bschur.get(), _b, _sizePoses * sizeof(double));
430 for (int i = 0; i < _sizePoses; ++i) {
431 _bschur[i] -= _coefficients[i];
432 }
433
435 if (globalStats) {
436 globalStats->timeSchurComplement = get_monotonic_time() - t;
437 }
438
439 t = get_monotonic_time();
440 bool solvedPoses = _linearSolver->solve(*_Hschur, _x, _bschur.get());
441 if (globalStats) {
442 globalStats->timeLinearSolver = get_monotonic_time() - t;
443 globalStats->hessianPoseDimension = _Hpp->cols();
444 globalStats->hessianLandmarkDimension = _Hll->cols();
445 globalStats->hessianDimension = globalStats->hessianPoseDimension +
446 globalStats->hessianLandmarkDimension;
447 }
448
449 if (!solvedPoses) return false;
450
451 // _x contains the solution for the poses, now applying it to the landmarks to
452 // get the new part of the solution;
453 double* xp = _x;
454 double* cp = _coefficients.get();
455
456 double* xl = _x + _sizePoses;
457 double* cl = _coefficients.get() + _sizePoses;
458 double* bl = _b + _sizePoses;
459
460 // cp = -xp
461 for (int i = 0; i < _sizePoses; ++i) cp[i] = -xp[i];
462
463 // cl = bl
464 memcpy(cl, bl, _sizeLandmarks * sizeof(double));
465
466 // cl = bl - Bt * xp
467 // Bt->multiply(cl, cp);
468 _HplCCS->rightMultiply(cl, cp);
469
470 // xl = Dinv * cl
471 memset(xl, 0, _sizeLandmarks * sizeof(double));
472 _DInvSchur->multiply(xl, cl);
473 //_DInvSchur->rightMultiply(xl,cl);
474
475 return true;
476}
477
478template <typename Traits>
481 const std::vector<std::pair<int, int>>& blockIndices) {
482 double t = get_monotonic_time();
483 bool ok = _linearSolver->solvePattern(spinv, blockIndices, *_Hpp);
485 if (globalStats) {
486 globalStats->timeMarginals = get_monotonic_time() - t;
487 }
488 return ok;
489}
490
491template <typename Traits>
493 // clear b vector
494#ifdef G2O_OPENMP
495#pragma omp parallel for default( \
496 shared) if (_optimizer->indexMapping().size() > 1000)
497#endif
498 for (int i = 0; i < static_cast<int>(_optimizer->indexMapping().size());
499 ++i) {
500 OptimizableGraph::Vertex* v = _optimizer->indexMapping()[i];
501 assert(v);
503 }
504 _Hpp->clear();
505 if (_doSchur) {
506 _Hll->clear();
507 _Hpl->clear();
508 }
509
510 // resetting the terms for the pairwise constraints
511 // built up the current system by storing the Hessian blocks in the edges and
512 // vertices
513#ifndef G2O_OPENMP
514 // no threading, we do not need to copy the workspace
515 JacobianWorkspace& jacobianWorkspace = _optimizer->jacobianWorkspace();
516#else
517 // if running with threads need to produce copies of the workspace for each
518 // thread
519 JacobianWorkspace jacobianWorkspace = _optimizer->jacobianWorkspace();
520#pragma omp parallel for default(shared) firstprivate( \
521 jacobianWorkspace) if (_optimizer->activeEdges().size() > 100)
522#endif
523 for (int k = 0; k < static_cast<int>(_optimizer->activeEdges().size()); ++k) {
524 OptimizableGraph::Edge* e = _optimizer->activeEdges()[k];
526 jacobianWorkspace); // jacobian of the nodes' oplus (manifold)
528#ifndef NDEBUG
529 for (size_t i = 0; i < e->vertices().size(); ++i) {
530 const OptimizableGraph::Vertex* v =
531 static_cast<const OptimizableGraph::Vertex*>(e->vertex(i));
532 if (!v->fixed()) {
533 bool hasANan = arrayHasNaN(jacobianWorkspace.workspaceForVertex(i),
534 e->dimension() * v->dimension());
535 if (hasANan) {
536 G2O_WARN(
537 "buildSystem(): NaN within Jacobian for edge {} for vertex {}",
538 static_cast<void*>(e), i);
539 break;
540 }
541 }
542 }
543#endif
544 }
545
546 // flush the current system in a sparse block matrix
547#ifdef G2O_OPENMP
548#pragma omp parallel for default( \
549 shared) if (_optimizer->indexMapping().size() > 1000)
550#endif
551 for (int i = 0; i < static_cast<int>(_optimizer->indexMapping().size());
552 ++i) {
553 OptimizableGraph::Vertex* v = _optimizer->indexMapping()[i];
554 int iBase = v->colInHessian();
555 if (v->marginalized()) iBase += _sizePoses;
556 v->copyB(_b + iBase);
557 }
558
559 return false;
560}
561
562template <typename Traits>
563bool BlockSolver<Traits>::setLambda(double lambda, bool backup) {
564 if (backup) {
565 _diagonalBackupPose.resize(_numPoses);
566 _diagonalBackupLandmark.resize(_numLandmarks);
567 }
568#ifdef G2O_OPENMP
569#pragma omp parallel for default(shared) if (_numPoses > 100)
570#endif
571 for (int i = 0; i < _numPoses; ++i) {
572 PoseMatrixType* b = _Hpp->block(i, i);
573 if (backup) _diagonalBackupPose[i] = b->diagonal();
574 b->diagonal().array() += lambda;
575 }
576#ifdef G2O_OPENMP
577#pragma omp parallel for default(shared) if (_numLandmarks > 100)
578#endif
579 for (int i = 0; i < _numLandmarks; ++i) {
580 LandmarkMatrixType* b = _Hll->block(i, i);
581 if (backup) _diagonalBackupLandmark[i] = b->diagonal();
582 b->diagonal().array() += lambda;
583 }
584 return true;
585}
586
587template <typename Traits>
589 assert((int)_diagonalBackupPose.size() == _numPoses &&
590 "Mismatch in dimensions");
591 assert((int)_diagonalBackupLandmark.size() == _numLandmarks &&
592 "Mismatch in dimensions");
593 for (int i = 0; i < _numPoses; ++i) {
594 PoseMatrixType* b = _Hpp->block(i, i);
595 b->diagonal() = _diagonalBackupPose[i];
596 }
597 for (int i = 0; i < _numLandmarks; ++i) {
598 LandmarkMatrixType* b = _Hll->block(i, i);
599 b->diagonal() = _diagonalBackupLandmark[i];
600 }
601}
602
603template <typename Traits>
604bool BlockSolver<Traits>::init(SparseOptimizer* optimizer, bool online) {
605 _optimizer = optimizer;
606 if (!online) {
607 if (_Hpp) _Hpp->clear();
608 if (_Hpl) _Hpl->clear();
609 if (_Hll) _Hll->clear();
610 }
611 _linearSolver->init();
612 return true;
613}
614
615template <typename Traits>
617 _linearSolver->setWriteDebug(writeDebug);
618}
619
620template <typename Traits>
621bool BlockSolver<Traits>::saveHessian(const std::string& fileName) const {
622 return _Hpp->writeOctave(fileName.c_str(), true);
623}
624
625} // namespace g2o
base for the block solvers with some basic function interfaces
void resize(int *blockPoseIndices, int numPoseBlocks, int *blockLandmarkIndices, int numLandmarkBlocks, int totalDim)
virtual bool computeMarginals(SparseBlockMatrix< MatrixX > &spinv, const std::vector< std::pair< int, int > > &blockIndices)
virtual bool buildStructure(bool zeroBlocks=false)
Traits::LandmarkVectorType LandmarkVectorType
virtual void setWriteDebug(bool writeDebug)
virtual void restoreDiagonal()
virtual bool buildSystem()
virtual bool saveHessian(const std::string &fileName) const
write the hessian to disk using the specified file name
virtual bool solve()
virtual bool init(SparseOptimizer *optmizer, bool online=false)
Traits::PoseMatrixType PoseMatrixType
Traits::PoseLandmarkMatrixType PoseLandmarkMatrixType
BlockSolver(std::unique_ptr< LinearSolverType > linearSolver)
virtual bool updateStructure(const std::vector< HyperGraph::Vertex * > &vset, const HyperGraph::EdgeSet &edges)
virtual bool setLambda(double lambda, bool backup=false)
Traits::LandmarkMatrixType LandmarkMatrixType
const VertexContainer & vertices() const
const Vertex * vertex(size_t i) const
std::set< Edge * > EdgeSet
provide memory workspace for computing the Jacobians
double * workspaceForVertex(int vertexIndex)
int dimension() const
returns the dimensions of the error function
virtual void linearizeOplus(JacobianWorkspace &jacobianWorkspace)=0
virtual void mapHessianMemory(double *d, int i, int j, bool rowMajor)=0
virtual void constructQuadraticForm()=0
A general case Vertex for optimization.
virtual void mapHessianMemory(double *d)=0
bool marginalized() const
true => this node is marginalized out during the optimization
int colInHessian() const
get the row of this vertex in the Hessian
virtual void clearQuadraticForm()=0
int dimension() const
dimension of the estimated state belonging to this node
bool fixed() const
true => this node is fixed during the optimization
void setColInHessian(int c)
set the row of this vertex in the Hessian
virtual int copyB(double *b_) const =0
lock a mutex within a scope
size_t _xSize
Definition solver.h:142
Sparse matrix which uses blocks.
std::vector< RowBlock > SparseColumn
const std::vector< SparseColumn > & blockCols() const
the block matrices per block-column
Sparse matrix which uses blocks based on hash structures.
MatrixType * addBlock(int r, int c, bool zeroBlock=false)
const std::vector< SparseColumn > & blockCols() const
the block matrices per block-column
Sparse matrix which uses blocks.
std::map< int, SparseMatrixBlock * > IntBlockMap
#define G2O_ERROR(...)
Definition logger.h:89
#define G2O_WARN(...)
Definition logger.h:88
#define __PRETTY_FUNCTION__
Definition macros.h:90
some general case utility functions
bool arrayHasNaN(const double *array, int size, int *nanIndex=0)
Definition misc.h:168
double get_monotonic_time()
Definition timeutil.cpp:43
Definition jet.h:876
statistics about the optimization
Definition batch_stats.h:40
size_t hessianLandmarkDimension
dimension of the landmark matrix in Schur
Definition batch_stats.h:74
static G2OBatchStatistics * globalStats()
Definition batch_stats.h:77
size_t hessianDimension
rows / cols of the Hessian
Definition batch_stats.h:71
double timeSchurComplement
compute schur complement (0 if not done)
Definition batch_stats.h:54
double timeLinearSolver
time for solving, excluding Schur setup
Definition batch_stats.h:61
size_t hessianPoseDimension
dimension of the pose matrix in Schur
Definition batch_stats.h:72
utility functions for handling time related stuff