40template <
typename Traits>
52template <
typename Traits>
54 int* blockLandmarkIndices,
55 int numLandmarkBlocks,
int s) {
62 assert(_sizePoses > 0 &&
"allocating with wrong size");
63 _coefficients.reset(allocate_aligned<double>(s));
64 _bschur.reset(allocate_aligned<double>(_sizePoses));
67 _Hpp = std::make_unique<PoseHessianType>(blockPoseIndices, blockPoseIndices,
68 numPoseBlocks, numPoseBlocks);
70 _Hschur = std::make_unique<PoseHessianType>(
71 blockPoseIndices, blockPoseIndices, numPoseBlocks, numPoseBlocks);
72 _Hll = std::make_unique<LandmarkHessianType>(
73 blockLandmarkIndices, blockLandmarkIndices, numLandmarkBlocks,
76 std::make_unique<SparseBlockMatrixDiagonal<LandmarkMatrixType>>(
77 _Hll->colBlockIndices());
78 _Hpl = std::make_unique<PoseLandmarkHessianType>(
79 blockPoseIndices, blockLandmarkIndices, numPoseBlocks,
81 _HplCCS = std::make_unique<SparseBlockMatrixCCS<PoseLandmarkMatrixType>>(
82 _Hpl->rowBlockIndices(), _Hpl->colBlockIndices());
83 _HschurTransposedCCS =
84 std::make_unique<SparseBlockMatrixCCS<PoseMatrixType>>(
85 _Hschur->colBlockIndices(), _Hschur->rowBlockIndices());
87 _coefficientsMutex.resize(numPoseBlocks);
92template <
typename Traits>
99 _coefficients.reset();
103 _HschurTransposedCCS.reset();
106template <
typename Traits>
109template <
typename Traits>
113 size_t sparseDim = 0;
118 int* blockPoseIndices =
new int[_optimizer->indexMapping().size()];
119 int* blockLandmarkIndices =
new int[_optimizer->indexMapping().size()];
121 for (
size_t i = 0; i < _optimizer->indexMapping().size(); ++i) {
127 blockPoseIndices[_numPoses] = _sizePoses;
131 _sizeLandmarks += dim;
132 blockLandmarkIndices[_numLandmarks] = _sizeLandmarks;
137 resize(blockPoseIndices, _numPoses, blockLandmarkIndices, _numLandmarks,
139 delete[] blockLandmarkIndices;
140 delete[] blockPoseIndices;
145 for (
size_t i = 0; i < _optimizer->indexMapping().size(); ++i) {
150 if (zeroBlocks) m->setZero();
155 if (zeroBlocks) m->setZero();
160 assert(poseIdx == _numPoses && landmarkIdx == _numLandmarks);
166 _Hschur->rowBlockIndices(), _Hschur->colBlockIndices());
167 schurMatrixLookup->
blockCols().resize(_Hschur->blockCols().size());
172 for (SparseOptimizer::EdgeContainer::const_iterator it =
173 _optimizer->activeEdges().begin();
174 it != _optimizer->activeEdges().end(); ++it) {
177 for (
size_t viIdx = 0; viIdx < e->
vertices().size(); ++viIdx) {
181 if (ind1 == -1)
continue;
182 int indexV1Bak = ind1;
183 for (
size_t vjIdx = viIdx + 1; vjIdx < e->
vertices().size(); ++vjIdx) {
187 if (ind2 == -1)
continue;
189 bool transposedBlock = ind1 > ind2;
190 if (transposedBlock) {
192 std::swap(ind1, ind2);
196 if (zeroBlocks) m->setZero();
200 schurMatrixLookup->
addBlock(ind1, ind2);
205 _Hll->block(ind1 - _numPoses, ind2 - _numPoses,
true);
206 if (zeroBlocks) m->setZero();
212 if (zeroBlocks) m->setZero();
214 m->data(), viIdx, vjIdx,
219 if (zeroBlocks) m->setZero();
229 delete schurMatrixLookup;
233 _DInvSchur->diagonal().resize(landmarkIdx);
234 _Hpl->fillSparseBlockMatrixCCS(*_HplCCS);
237 if (v->marginalized()) {
239 for (HyperGraph::EdgeSet::const_iterator it1 = vedges.begin();
240 it1 != vedges.end(); ++it1) {
241 for (
size_t i = 0; i < (*it1)->vertices().size(); ++i) {
245 for (HyperGraph::EdgeSet::const_iterator it2 = vedges.begin();
246 it2 != vedges.end(); ++it2) {
247 for (
size_t j = 0; j < (*it2)->vertices().size(); ++j) {
254 schurMatrixLookup->
addBlock(i1, i2);
263 _Hschur->takePatternFromHash(*schurMatrixLookup);
264 delete schurMatrixLookup;
265 _Hschur->fillSparseBlockMatrixCCSTransposed(*_HschurTransposedCCS);
270template <
typename Traits>
272 const std::vector<HyperGraph::Vertex*>& vset,
274 for (std::vector<HyperGraph::Vertex*>::const_iterator vit = vset.begin();
275 vit != vset.end(); ++vit) {
281 _Hpp->rowBlockIndices().push_back(_sizePoses);
282 _Hpp->colBlockIndices().push_back(_sizePoses);
283 _Hpp->blockCols().push_back(
290 G2O_ERROR(
"updateStructure(): Schur not supported");
294 resizeVector(_sizePoses + _sizeLandmarks);
296 for (HyperGraph::EdgeSet::const_iterator it = edges.begin();
297 it != edges.end(); ++it) {
300 for (
size_t viIdx = 0; viIdx < e->
vertices().size(); ++viIdx) {
304 int indexV1Bak = ind1;
305 if (ind1 == -1)
continue;
306 for (
size_t vjIdx = viIdx + 1; vjIdx < e->
vertices().size(); ++vjIdx) {
310 if (ind2 == -1)
continue;
312 bool transposedBlock = ind1 > ind2;
315 std::swap(ind1, ind2);
330template <
typename Traits>
334 bool ok = _linearSolver->solve(*_Hpp, _x, _b);
354 memset(_coefficients.get(), 0, _sizePoses *
sizeof(
double));
356#pragma omp parallel for default(shared) schedule(dynamic, 10)
358 for (
int landmarkIndex = 0;
359 landmarkIndex < static_cast<int>(_Hll->blockCols().size());
362 marginalizeColumn = _Hll->blockCols()[landmarkIndex];
363 assert(marginalizeColumn.size() == 1 &&
364 "more than one block in _Hll column");
368 assert(D && D->rows() == D->cols() &&
"Error in landmark matrix");
373 for (
int j = 0; j < D->rows(); ++j) {
374 db[j] = _b[_Hll->rowBaseOfBlock(landmarkIndex) + _sizePoses + j];
378 assert((
size_t)landmarkIndex < _HplCCS->blockCols().size() &&
379 "Index out of bounds");
381 landmarkColumn = _HplCCS->blockCols()[landmarkIndex];
385 landmarkColumn.begin();
386 it_outer != landmarkColumn.end(); ++it_outer) {
387 int i1 = it_outer->row;
393 assert(_HplCCS->rowBaseOfBlock(i1) < _sizePoses &&
"Index out of bounds");
394 typename PoseVectorType::MapType Bb(
395 &_coefficients[_HplCCS->rowBaseOfBlock(i1)], Bi->rows());
399 Bb.noalias() += (*Bi) * db;
402 i1 <
static_cast<int>(_HschurTransposedCCS->blockCols().size()) &&
403 "Index out of bounds");
405 targetColumnIt = _HschurTransposedCCS->
blockCols()[i1].begin();
411 lower_bound(landmarkColumn.begin(), landmarkColumn.end(), aux);
412 for (; it_inner != landmarkColumn.end(); ++it_inner) {
413 int i2 = it_inner->row;
416 while (targetColumnIt->row < i2 )
418 assert(targetColumnIt != _HschurTransposedCCS->
blockCols()[i1].end() &&
419 targetColumnIt->row == i2 &&
420 "invalid iterator, something wrong with the matrix structure");
423 (*Hi1i2).noalias() -= BDinv * Bj->transpose();
429 memcpy(_bschur.get(), _b, _sizePoses *
sizeof(
double));
430 for (
int i = 0; i < _sizePoses; ++i) {
431 _bschur[i] -= _coefficients[i];
440 bool solvedPoses = _linearSolver->solve(*_Hschur, _x, _bschur.get());
449 if (!solvedPoses)
return false;
454 double* cp = _coefficients.get();
456 double* xl = _x + _sizePoses;
457 double* cl = _coefficients.get() + _sizePoses;
458 double* bl = _b + _sizePoses;
461 for (
int i = 0; i < _sizePoses; ++i) cp[i] = -xp[i];
464 memcpy(cl, bl, _sizeLandmarks *
sizeof(
double));
468 _HplCCS->rightMultiply(cl, cp);
471 memset(xl, 0, _sizeLandmarks *
sizeof(
double));
472 _DInvSchur->multiply(xl, cl);
478template <
typename Traits>
481 const std::vector<std::pair<int, int>>& blockIndices) {
483 bool ok = _linearSolver->solvePattern(spinv, blockIndices, *_Hpp);
491template <
typename Traits>
495#pragma omp parallel for default( \
496 shared) if (_optimizer->indexMapping().size() > 1000)
498 for (
int i = 0; i < static_cast<int>(_optimizer->indexMapping().size());
520#pragma omp parallel for default(shared) firstprivate( \
521 jacobianWorkspace) if (_optimizer->activeEdges().size() > 100)
523 for (
int k = 0; k < static_cast<int>(_optimizer->activeEdges().size()); ++k) {
529 for (
size_t i = 0; i < e->
vertices().size(); ++i) {
537 "buildSystem(): NaN within Jacobian for edge {} for vertex {}",
538 static_cast<void*
>(e), i);
548#pragma omp parallel for default( \
549 shared) if (_optimizer->indexMapping().size() > 1000)
551 for (
int i = 0; i < static_cast<int>(_optimizer->indexMapping().size());
556 v->
copyB(_b + iBase);
562template <
typename Traits>
565 _diagonalBackupPose.resize(_numPoses);
566 _diagonalBackupLandmark.resize(_numLandmarks);
569#pragma omp parallel for default(shared) if (_numPoses > 100)
571 for (
int i = 0; i < _numPoses; ++i) {
573 if (backup) _diagonalBackupPose[i] = b->diagonal();
574 b->diagonal().array() += lambda;
577#pragma omp parallel for default(shared) if (_numLandmarks > 100)
579 for (
int i = 0; i < _numLandmarks; ++i) {
581 if (backup) _diagonalBackupLandmark[i] = b->diagonal();
582 b->diagonal().array() += lambda;
587template <
typename Traits>
589 assert((
int)_diagonalBackupPose.size() == _numPoses &&
590 "Mismatch in dimensions");
591 assert((
int)_diagonalBackupLandmark.size() == _numLandmarks &&
592 "Mismatch in dimensions");
593 for (
int i = 0; i < _numPoses; ++i) {
595 b->diagonal() = _diagonalBackupPose[i];
597 for (
int i = 0; i < _numLandmarks; ++i) {
599 b->diagonal() = _diagonalBackupLandmark[i];
603template <
typename Traits>
605 _optimizer = optimizer;
607 if (_Hpp) _Hpp->
clear();
608 if (_Hpl) _Hpl->clear();
609 if (_Hll) _Hll->clear();
611 _linearSolver->init();
615template <
typename Traits>
617 _linearSolver->setWriteDebug(writeDebug);
620template <
typename Traits>
622 return _Hpp->writeOctave(fileName.c_str(),
true);
base for the block solvers with some basic function interfaces
void resize(int *blockPoseIndices, int numPoseBlocks, int *blockLandmarkIndices, int numLandmarkBlocks, int totalDim)
virtual bool computeMarginals(SparseBlockMatrix< MatrixX > &spinv, const std::vector< std::pair< int, int > > &blockIndices)
virtual bool buildStructure(bool zeroBlocks=false)
Traits::LandmarkVectorType LandmarkVectorType
virtual void setWriteDebug(bool writeDebug)
virtual void restoreDiagonal()
virtual bool buildSystem()
virtual bool saveHessian(const std::string &fileName) const
write the hessian to disk using the specified file name
virtual bool init(SparseOptimizer *optmizer, bool online=false)
Traits::PoseMatrixType PoseMatrixType
Traits::PoseLandmarkMatrixType PoseLandmarkMatrixType
BlockSolver(std::unique_ptr< LinearSolverType > linearSolver)
virtual bool updateStructure(const std::vector< HyperGraph::Vertex * > &vset, const HyperGraph::EdgeSet &edges)
virtual bool setLambda(double lambda, bool backup=false)
Traits::LandmarkMatrixType LandmarkMatrixType
const VertexContainer & vertices() const
const Vertex * vertex(size_t i) const
std::set< Edge * > EdgeSet
provide memory workspace for computing the Jacobians
double * workspaceForVertex(int vertexIndex)
int dimension() const
returns the dimensions of the error function
virtual void linearizeOplus(JacobianWorkspace &jacobianWorkspace)=0
virtual void mapHessianMemory(double *d, int i, int j, bool rowMajor)=0
virtual void constructQuadraticForm()=0
A general case Vertex for optimization.
virtual void mapHessianMemory(double *d)=0
bool marginalized() const
true => this node is marginalized out during the optimization
int colInHessian() const
get the row of this vertex in the Hessian
virtual void clearQuadraticForm()=0
int dimension() const
dimension of the estimated state belonging to this node
bool fixed() const
true => this node is fixed during the optimization
void setColInHessian(int c)
set the row of this vertex in the Hessian
virtual int copyB(double *b_) const =0
lock a mutex within a scope
Sparse matrix which uses blocks.
std::vector< RowBlock > SparseColumn
const std::vector< SparseColumn > & blockCols() const
the block matrices per block-column
Sparse matrix which uses blocks based on hash structures.
MatrixType * addBlock(int r, int c, bool zeroBlock=false)
const std::vector< SparseColumn > & blockCols() const
the block matrices per block-column
Sparse matrix which uses blocks.
std::map< int, SparseMatrixBlock * > IntBlockMap
#define __PRETTY_FUNCTION__
some general case utility functions
bool arrayHasNaN(const double *array, int size, int *nanIndex=0)
double get_monotonic_time()
statistics about the optimization
size_t hessianLandmarkDimension
dimension of the landmark matrix in Schur
static G2OBatchStatistics * globalStats()
size_t hessianDimension
rows / cols of the Hessian
double timeSchurComplement
compute schur complement (0 if not done)
double timeLinearSolver
time for solving, excluding Schur setup
size_t hessianPoseDimension
dimension of the pose matrix in Schur
utility functions for handling time related stuff