g2o
Loading...
Searching...
No Matches
optimization_algorithm_dogleg.cpp
Go to the documentation of this file.
1// g2o - General Graph Optimization
2// Copyright (C) 2011 R. Kuemmerle, G. Grisetti, W. Burgard
3// All rights reserved.
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are
7// met:
8//
9// * Redistributions of source code must retain the above copyright notice,
10// this list of conditions and the following disclaimer.
11// * Redistributions in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the distribution.
14//
15// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
16// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
17// TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
18// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
19// HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
20// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
21// TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
28
29#include <cassert>
30#include <iostream>
31
32#include "batch_stats.h"
33#include "block_solver.h"
34#include "g2o/stuff/logger.h"
35#include "g2o/stuff/timeutil.h"
36#include "solver.h"
37#include "sparse_optimizer.h"
38using namespace std;
39
40namespace g2o {
41
43 std::unique_ptr<BlockSolverBase> solver)
44 : OptimizationAlgorithmWithHessian(*solver.get()),
45 m_solver{std::move(solver)} {
47 _properties.makeProperty<Property<double>>("initialDelta", (double)1e4);
49 _properties.makeProperty<Property<int>>("maxTrialsAfterFailure", 100);
51 _properties.makeProperty<Property<double>>("initialLambda", (double)1e-7);
53 _properties.makeProperty<Property<double>>("lambdaFactor", 10.);
57 _lastNumTries = 0;
58 _currentLambda = 0.;
59}
60
62
63OptimizationAlgorithm::SolverResult OptimizationAlgorithmDogleg::solve(
64 int iteration, bool online) {
65 assert(_optimizer && "_optimizer not set");
66 assert(_solver.optimizer() == _optimizer &&
67 "underlying linear solver operates on different graph");
68
69 BlockSolverBase& blockSolver = static_cast<BlockSolverBase&>(_solver);
70
71 if (iteration == 0 &&
72 !online) { // built up the CCS structure, here due to easy time measure
73 bool ok = _solver.buildStructure();
74 if (!ok) {
75 G2O_WARN("{}: Failure while building CCS structure", __PRETTY_FUNCTION__);
76 return OptimizationAlgorithm::Fail;
77 }
78
79 // init some members to the current size of the problem
80 _hsd.resize(_solver.vectorSize());
81 _hdl.resize(_solver.vectorSize());
86 }
87
88 double t = get_monotonic_time();
91 if (globalStats) {
92 globalStats->timeResiduals = get_monotonic_time() - t;
94 }
95
96 double currentChi = _optimizer->activeRobustChi2();
97
99 if (globalStats) {
100 globalStats->timeQuadraticForm = get_monotonic_time() - t;
101 }
102
103 VectorX::ConstMapType b(_solver.b(), _solver.vectorSize());
104
105 // compute alpha
106 _auxVector.setZero();
107 blockSolver.multiplyHessian(_auxVector.data(), _solver.b());
108 double bNormSquared = b.squaredNorm();
109 double alpha = bNormSquared / _auxVector.dot(b);
110
111 _hsd = alpha * b;
112 double hsdNorm = _hsd.norm();
113 double hgnNorm = -1.;
114
115 bool solvedGaussNewton = false;
116 bool goodStep = false;
117 int& numTries = _lastNumTries;
118 numTries = 0;
119 do {
120 ++numTries;
121
122 if (!solvedGaussNewton) {
123 const double minLambda = cst(1e-12);
124 const double maxLambda = cst(1e3);
125 solvedGaussNewton = true;
126 // apply a damping factor to enforce positive definite Hessian, if the
127 // matrix appeared to be not positive definite in at least one iteration
128 // before. We apply a damping factor to obtain a PD matrix.
129 bool solverOk = false;
130 while (!solverOk) {
133 true); // add _currentLambda to the diagonal
134 solverOk = _solver.solve();
138 // simple strategy to control the damping factor
139 if (solverOk) {
141 std::max(minLambda,
142 _currentLambda / (cst(0.5) * _lamdbaFactor->value()));
143 } else {
145 if (_currentLambda > maxLambda) {
146 _currentLambda = maxLambda;
147 return Fail;
148 }
149 }
150 }
151 }
152 hgnNorm = VectorX::ConstMapType(_solver.x(), _solver.vectorSize()).norm();
153 }
154
155 VectorX::ConstMapType hgn(_solver.x(), _solver.vectorSize());
156 assert(hgnNorm >= 0. && "Norm of the GN step is not computed");
157
158 if (hgnNorm < _delta) {
159 _hdl = hgn;
161 } else if (hsdNorm > _delta) {
162 _hdl = _delta / hsdNorm * _hsd;
164 } else {
165 _auxVector = hgn - _hsd; // b - a
166 double c = _hsd.dot(_auxVector);
167 double bmaSquaredNorm = _auxVector.squaredNorm();
168 double beta;
169 if (c <= 0.)
170 beta = (-c + sqrt(c * c + bmaSquaredNorm *
171 (_delta * _delta - _hsd.squaredNorm()))) /
172 bmaSquaredNorm;
173 else {
174 double hsdSqrNorm = _hsd.squaredNorm();
175 beta =
176 (_delta * _delta - hsdSqrNorm) /
177 (c + sqrt(c * c + bmaSquaredNorm * (_delta * _delta - hsdSqrNorm)));
178 }
179 assert(beta > 0. && beta < 1 && "Error while computing beta");
180 _hdl = _hsd + beta * (hgn - _hsd);
182 assert(_hdl.norm() < _delta + 1e-5 &&
183 "Computed step does not correspond to the trust region");
184 }
185
186 // compute the linear gain
187 _auxVector.setZero();
188 blockSolver.multiplyHessian(_auxVector.data(), _hdl.data());
189 double linearGain = -1 * (_auxVector.dot(_hdl)) + 2 * (b.dot(_hdl));
190
191 // apply the update and see what happens
192 _optimizer->push();
193 _optimizer->update(_hdl.data());
195 double newChi = _optimizer->activeRobustChi2();
196 double nonLinearGain = currentChi - newChi;
197 if (fabs(linearGain) < 1e-12) linearGain = cst(1e-12);
198 double rho = nonLinearGain / linearGain;
199 // cerr << PVAR(nonLinearGain) << " " << PVAR(linearGain) << " " <<
200 // PVAR(rho) << endl;
201 if (rho > 0) { // step is good and will be accepted
203 goodStep = true;
204 } else { // recover previous state
205 _optimizer->pop();
206 }
207
208 // update trust region based on the step quality
209 if (rho > 0.75)
210 _delta = std::max<double>(_delta, 3 * _hdl.norm());
211 else if (rho < 0.25)
212 _delta *= 0.5;
213 } while (!goodStep && numTries < _maxTrialsAfterFailure->value());
214 if (numTries == _maxTrialsAfterFailure->value() || !goodStep)
215 return Terminate;
216 return OK;
217}
218
219void OptimizationAlgorithmDogleg::printVerbose(std::ostream& os) const {
220 os << "\t Delta= " << _delta << "\t step= " << stepType2Str(_lastStep)
221 << "\t tries= " << _lastNumTries;
222 if (!_wasPDInAllIterations) os << "\t lambda= " << _currentLambda;
223}
224
226 switch (stepType) {
227 case STEP_SD:
228 return "Descent";
229 case STEP_GN:
230 return "GN";
231 case STEP_DL:
232 return "Dogleg";
233 default:
234 return "Undefined";
235 }
236}
237
238} // namespace g2o
base for the block solvers with some basic function interfaces
virtual void multiplyHessian(double *dest, const double *src) const =0
int _lastStep
type of the step taken by the algorithm
virtual void printVerbose(std::ostream &os) const
static const char * stepType2Str(int stepType)
convert the type into an integer
double _currentLambda
the damping factor to force positive definite matrix
virtual SolverResult solve(int iteration, bool online=false)
OptimizationAlgorithmDogleg(std::unique_ptr< BlockSolverBase > solver)
Base for solvers operating on the approximated Hessian, e.g., Gauss-Newton, Levenberg.
SparseOptimizer * _optimizer
the optimizer the solver is working on
P * makeProperty(const std::string &name_, const typename P::ValueType &v)
Definition property.h:116
const T & value() const
Definition property.h:59
double * b()
return b, the right hand side of the system
Definition solver.h:101
virtual void restoreDiagonal()=0
virtual bool buildStructure(bool zeroBlocks=false)=0
size_t vectorSize() const
return the size of the solution vector (x) and b
Definition solver.h:105
double * x()
return x, the solution vector
Definition solver.h:98
virtual bool setLambda(double lambda, bool backup=false)=0
virtual bool solve()=0
virtual bool buildSystem()=0
SparseOptimizer * optimizer() const
the optimizer (graph) on which the solver works
Definition solver.h:108
void push(SparseOptimizer::VertexContainer &vlist)
push the estimate of a subset of the variables onto a stack
void pop(SparseOptimizer::VertexContainer &vlist)
pop (restore) the estimate a subset of the variables from the stack
void update(const double *update)
double activeRobustChi2() const
void discardTop(SparseOptimizer::VertexContainer &vlist)
#define G2O_WARN(...)
Definition logger.h:88
#define __PRETTY_FUNCTION__
Definition macros.h:90
constexpr double cst(long double v)
Definition misc.h:60
double get_monotonic_time()
Definition timeutil.cpp:43
Definition jet.h:876
statistics about the optimization
Definition batch_stats.h:40
double timeResiduals
residuals
Definition batch_stats.h:49
static G2OBatchStatistics * globalStats()
Definition batch_stats.h:77
double timeQuadraticForm
construct the quadratic form in the graph
Definition batch_stats.h:51
utility functions for handling time related stuff