g2o
Loading...
Searching...
No Matches
graph_optimizer_sparse_incremental.cpp
Go to the documentation of this file.
1// g2o - General Graph Optimization
2// Copyright (C) 2011 R. Kuemmerle, G. Grisetti, W. Burgard
3//
4// g2o is free software: you can redistribute it and/or modify
5// it under the terms of the GNU General Public License as published
6// by the Free Software Foundation, either version 3 of the License, or
7// (at your option) any later version.
8//
9// g2o is distributed in the hope that it will be useful,
10// but WITHOUT ANY WARRANTY; without even the implied warranty of
11// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12// GNU General Public License for more details.
13//
14// You should have received a copy of the GNU General Public License
15// along with this program. If not, see <http://www.gnu.org/licenses/>.
16
18
19#include <cassert>
20
25#include "g2o/stuff/macros.h"
26
27using namespace std;
28using namespace Eigen;
29
30namespace g2o {
31
32namespace {
33
34template <int p, int l>
35std::unique_ptr<g2o::Solver> AllocateCholmodSolver() {
36 std::cerr << "# Using CHOLMOD online poseDim " << p << " landMarkDim " << l
37 << " blockordering 1" << std::endl;
38
39 return std::make_unique<BlockSolverPL<p, l>>(
40 std::make_unique<LinearSolverCholmodOnline<
41 typename BlockSolverPL<p, l>::PoseMatrixType>>());
42}
43
47struct VertexBackup {
48 int hessianIndex;
49 OptimizableGraph::Vertex* vertex;
50 double* hessianData;
51 bool operator<(const VertexBackup& other) const {
52 return hessianIndex < other.hessianIndex;
53 }
54};
55} // namespace
56
60 cholmod_start(&_cholmodCommon);
61
62 // setup ordering strategy to not permute the matrix
63 _cholmodCommon.nmethods = 1;
64 _cholmodCommon.method[0].ordering = CHOLMOD_NATURAL;
65 _cholmodCommon.postorder = 0;
66 _cholmodCommon.supernodal = CHOLMOD_SIMPLICIAL;
67
68 _permutedUpdate = cholmod_allocate_triplet(1000, 1000, 1024, 0, CHOLMOD_REAL,
70 _L = 0;
73
75}
76
79 _updateMat.clear(true);
80 delete _cholmodSparse;
81 if (_cholmodFactor) {
82 cholmod_free_factor(&_cholmodFactor, &_cholmodCommon);
84 }
85 cholmod_free_triplet(&_permutedUpdate, &_cholmodCommon);
86 cholmod_finish(&_cholmodCommon);
87}
88
89int SparseOptimizerIncremental::optimize(int iterations, bool online) {
90 (void)iterations; // we only do one iteration anyhow
92 solver->init(online);
93
94 bool ok = true;
95
96 if (!online || batchStep) {
97 // cerr << "performing batch step" << endl;
98 if (!online) {
100 if (!ok) {
101 cerr << __PRETTY_FUNCTION__ << ": Failure while building CCS structure"
102 << endl;
103 return 0;
104 }
105 }
106
107 // copy over the updated estimate as new linearization point
108 if (slamDimension == 3) {
109 for (size_t i = 0; i < indexMapping().size(); ++i) {
110 OnlineVertexSE2* v = static_cast<OnlineVertexSE2*>(indexMapping()[i]);
112 }
113 } else if (slamDimension == 6) {
114 for (size_t i = 0; i < indexMapping().size(); ++i) {
115 OnlineVertexSE3* v = static_cast<OnlineVertexSE3*>(indexMapping()[i]);
117 }
118 }
119
121 // SparseOptimizer::linearizeSystem();
123
124 // mark vertices to be sorted as last
125 int numBlocksRequired = _ivMap.size();
126 if (_cmember.size() < numBlocksRequired) {
127 _cmember.resize(2 * numBlocksRequired);
128 }
129 memset(_cmember.data(), 0, numBlocksRequired * sizeof(int));
130 if (_ivMap.size() > 100) {
131 for (size_t i = _ivMap.size() - 20; i < _ivMap.size(); ++i) {
132 const HyperGraph::EdgeSet& eset = _ivMap[i]->edges();
133 for (HyperGraph::EdgeSet::const_iterator it = eset.begin();
134 it != eset.end(); ++it) {
135 OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
137 static_cast<OptimizableGraph::Vertex*>(e->vertices()[0]);
139 static_cast<OptimizableGraph::Vertex*>(e->vertices()[1]);
140 if (v1->hessianIndex() >= 0) _cmember(v1->hessianIndex()) = 1;
141 if (v2->hessianIndex() >= 0) _cmember(v2->hessianIndex()) = 1;
142 }
143 }
144 // OptimizableGraph::Vertex* lastPose = _ivMap.back();
145 //_cmember(lastPose->hessianIndex()) = 2;
146 }
147
148 ok = _underlyingSolver->solve();
149
150 // get the current cholesky factor along with the permutation
151 _L = _solverInterface->L();
152 if (_perm.size() < (int)_L->n) _perm.resize(2 * _L->n);
153 int* p = static_cast<int*>(_L->Perm);
154 for (size_t i = 0; i < _L->n; ++i) _perm[p[i]] = i;
155
156 } else {
157 // update the b vector
158 for (HyperGraph::VertexSet::iterator it = _touchedVertices.begin();
159 it != _touchedVertices.end(); ++it) {
161 int iBase = v->colInHessian();
162 v->copyB(_underlyingSolver->b() + iBase);
163 }
165 }
166
168
169 if (verbose()) {
171 cerr << "nodes = " << vertices().size()
172 << "\t edges= " << _activeEdges.size()
173 << "\t chi2= " << FIXED(activeChi2()) << endl;
174 }
175
177
178 if (!ok) return 0;
179 return 1;
180}
181
184 if (batchStep) {
186 }
187
188 for (HyperGraph::VertexSet::iterator it = vset.begin(); it != vset.end();
189 ++it) {
191 v->clearQuadraticForm(); // be sure that b is zero for this vertex
192 }
193
194 // get the touched vertices
195 _touchedVertices.clear();
196 for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end();
197 ++it) {
198 OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
200 static_cast<OptimizableGraph::Vertex*>(e->vertices()[0]);
202 static_cast<OptimizableGraph::Vertex*>(e->vertices()[1]);
203 if (!v1->fixed()) _touchedVertices.insert(v1);
204 if (!v2->fixed()) _touchedVertices.insert(v2);
205 }
206 // cerr << PVAR(_touchedVertices.size()) << endl;
207
208 // updating the internal structures
209 std::vector<HyperGraph::Vertex*> newVertices;
210 newVertices.reserve(vset.size());
211 _activeVertices.reserve(_activeVertices.size() + vset.size());
212 _activeEdges.reserve(_activeEdges.size() + eset.size());
213 for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end(); ++it)
214 _activeEdges.push_back(static_cast<OptimizableGraph::Edge*>(*it));
215 // cerr << "updating internal done." << endl;
216
217 // update the index mapping
218 size_t next = _ivMap.size();
219 for (HyperGraph::VertexSet::iterator it = vset.begin(); it != vset.end();
220 ++it) {
222 if (!v->fixed()) {
223 if (!v->marginalized()) {
224 v->setHessianIndex(next);
225 _ivMap.push_back(v);
226 newVertices.push_back(v);
227 _activeVertices.push_back(v);
228 next++;
229 } else // not supported right now
230 abort();
231 } else {
232 v->setHessianIndex(-1);
233 }
234 }
235 // cerr << "updating index mapping done." << endl;
236
237 // backup the tempindex and prepare sorting structure
238#ifdef _MSC_VER
239 VertexBackup* backupIdx = new VertexBackup[_touchedVertices.size()];
240#else
241 VertexBackup backupIdx[_touchedVertices.size()];
242#endif
243 memset(backupIdx, 0, sizeof(VertexBackup) * _touchedVertices.size());
244 int idx = 0;
245 for (HyperGraph::VertexSet::iterator it = _touchedVertices.begin();
246 it != _touchedVertices.end(); ++it) {
248 backupIdx[idx].hessianIndex = v->hessianIndex();
249 backupIdx[idx].vertex = v;
250 backupIdx[idx].hessianData = v->hessianData();
251 ++idx;
252 }
253 sort(backupIdx,
254 backupIdx +
256 .size()); // sort according to the hessianIndex which is the
257 // same order as used later by the optimizer
258 for (int i = 0; i < idx; ++i) {
259 backupIdx[i].vertex->setHessianIndex(i);
260 }
261 // cerr << "backup tempindex done." << endl;
262
263 // building the structure of the update
264 _updateMat.clear(true); // get rid of the old matrix structure
265 _updateMat.rowBlockIndices().clear();
266 _updateMat.colBlockIndices().clear();
267 _updateMat.blockCols().clear();
268
269 // placing the current stuff in _updateMat
270 MatrixXd* lastBlock = 0;
271 int sizePoses = 0;
272 for (int i = 0; i < idx; ++i) {
273 OptimizableGraph::Vertex* v = backupIdx[i].vertex;
274 int dim = v->dimension();
275 sizePoses += dim;
276 _updateMat.rowBlockIndices().push_back(sizePoses);
277 _updateMat.colBlockIndices().push_back(sizePoses);
278 _updateMat.blockCols().push_back(
280 int ind = v->hessianIndex();
281 // cerr << PVAR(ind) << endl;
282 if (ind >= 0) {
283 MatrixXd* m = _updateMat.block(ind, ind, true);
284 v->mapHessianMemory(m->data());
285 lastBlock = m;
286 }
287 }
288 if (lastBlock) {
289 lastBlock->diagonal().array() += 1e-6; // HACK to get Eigen value > 0
290 }
291
292 for (HyperGraph::EdgeSet::const_iterator it = eset.begin(); it != eset.end();
293 ++it) {
294 OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
297
298 int ind1 = v1->hessianIndex();
299 if (ind1 == -1) continue;
300 int ind2 = v2->hessianIndex();
301 if (ind2 == -1) continue;
302 bool transposedBlock = ind1 > ind2;
303 if (transposedBlock) // make sure, we allocate the upper triangular block
304 swap(ind1, ind2);
305
306 MatrixXd* m = _updateMat.block(ind1, ind2, true);
307 e->mapHessianMemory(m->data(), 0, 1, transposedBlock);
308 }
309
310 // build the system into _updateMat
311 for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end();
312 ++it) {
313 OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
314 e->computeError();
315 }
316 for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end();
317 ++it) {
318 OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
321 }
322
323 // restore the original data for the vertex
324 for (int i = 0; i < idx; ++i) {
325 backupIdx[i].vertex->setHessianIndex(backupIdx[i].hessianIndex);
326 if (backupIdx[i].hessianData)
327 backupIdx[i].vertex->mapHessianMemory(backupIdx[i].hessianData);
328 }
329
330 // update the structure of the real block matrix
331 bool solverStatus = _algorithm->updateStructure(newVertices, eset);
332
333 bool updateStatus = computeCholeskyUpdate();
334 if (!updateStatus) {
335 cerr << "Error while computing update" << endl;
336 }
337
338 cholmod_sparse* updateAsSparseFactor =
339 cholmod_factor_to_sparse(_cholmodFactor, &_cholmodCommon);
340
341 // convert CCS update by permuting back to the permutation of L
342 if (updateAsSparseFactor->nzmax > _permutedUpdate->nzmax) {
343 // cerr << "realloc _permutedUpdate" << endl;
344 cholmod_reallocate_triplet(updateAsSparseFactor->nzmax, _permutedUpdate,
346 }
347 _permutedUpdate->nnz = 0;
348 _permutedUpdate->nrow = _permutedUpdate->ncol = _L->n;
349 {
350 int* Ap = static_cast<int*>(updateAsSparseFactor->p);
351 int* Ai = static_cast<int*>(updateAsSparseFactor->i);
352 double* Ax = static_cast<double*>(updateAsSparseFactor->x);
353 int* Bj = static_cast<int*>(_permutedUpdate->j);
354 int* Bi = static_cast<int*>(_permutedUpdate->i);
355 double* Bx = static_cast<double*>(_permutedUpdate->x);
356 for (size_t c = 0; c < updateAsSparseFactor->ncol; ++c) {
357 const int& rbeg = Ap[c];
358 const int& rend = Ap[c + 1];
359 int cc = c / slamDimension;
360 int coff = c % slamDimension;
361 const int& cbase = backupIdx[cc].vertex->colInHessian();
362 const int& ccol = _perm(cbase + coff);
363 for (int j = rbeg; j < rend; j++) {
364 const int& r = Ai[j];
365 const double& val = Ax[j];
366
367 int rr = r / slamDimension;
368 int roff = r % slamDimension;
369 const int& rbase = backupIdx[rr].vertex->colInHessian();
370
371 int row = _perm(rbase + roff);
372 int col = ccol;
373 if (col > row) // lower triangular entry
374 swap(col, row);
375 Bi[_permutedUpdate->nnz] = row;
376 Bj[_permutedUpdate->nnz] = col;
377 Bx[_permutedUpdate->nnz] = val;
378 ++_permutedUpdate->nnz;
379 }
380 }
381 }
382 cholmod_free_sparse(&updateAsSparseFactor, &_cholmodCommon);
383#ifdef _MSC_VER
384 delete[] backupIdx;
385#endif
386
387#if 0
388 cholmod_sparse* updatePermuted = cholmod_triplet_to_sparse(_permutedUpdate, _permutedUpdate->nnz, &_cholmodCommon);
389 //writeCCSMatrix("update-permuted.txt", updatePermuted->nrow, updatePermuted->ncol, (int*)updatePermuted->p, (int*)updatePermuted->i, (double*)updatePermuted->x, false);
390 _solverInterface->choleskyUpdate(updatePermuted);
391 cholmod_free_sparse(&updatePermuted, &_cholmodCommon);
392#else
395#endif
396
397 return solverStatus;
398}
399
401 if (_cholmodFactor) {
402 cholmod_free_factor(&_cholmodFactor, &_cholmodCommon);
403 _cholmodFactor = 0;
404 }
405
407 size_t m = A.rows();
408 size_t n = A.cols();
409
411 // std::cerr << __PRETTY_FUNCTION__ << ": reallocating columns" <<
412 // std::endl;
415 ? n
416 : 2 * n; // pre-allocate more space if re-allocating
417 delete[] static_cast<int*>(_cholmodSparse->p);
419 }
420 size_t nzmax = A.nonZeros();
421 if (_cholmodSparse->nzmax < nzmax) {
422 // std::cerr << __PRETTY_FUNCTION__ << ": reallocating row + values" <<
423 // std::endl;
424 _cholmodSparse->nzmax =
425 _cholmodSparse->nzmax == 0
426 ? nzmax
427 : 2 * nzmax; // pre-allocate more space if re-allocating
428 delete[] static_cast<double*>(_cholmodSparse->x);
429 delete[] static_cast<int*>(_cholmodSparse->i);
430 _cholmodSparse->i = new int[_cholmodSparse->nzmax];
431 _cholmodSparse->x = new double[_cholmodSparse->nzmax];
432 }
433 _cholmodSparse->ncol = n;
434 _cholmodSparse->nrow = m;
435
436 A.fillCCS(static_cast<int*>(_cholmodSparse->p),
437 static_cast<int*>(_cholmodSparse->i),
438 static_cast<double*>(_cholmodSparse->x), true);
439 // writeCCSMatrix("updatesparse.txt", _cholmodSparse->nrow,
440 // _cholmodSparse->ncol, (int*)_cholmodSparse->p, (int*)_cholmodSparse->i,
441 // (double*)_cholmodSparse->x, true);
442
443 _cholmodFactor = cholmod_analyze(_cholmodSparse, &_cholmodCommon);
444 cholmod_factorize(_cholmodSparse, _cholmodFactor, &_cholmodCommon);
445
446#if 0
447 int* p = (int*)_cholmodFactor->Perm;
448 for (int i = 0; i < (int)n; ++i)
449 if (i != p[i])
450 cerr << "wrong permutation" << i << " -> " << p[i] << endl;
451#endif
452
453 if (_cholmodCommon.status == CHOLMOD_NOT_POSDEF) {
454 // std::cerr << "Cholesky failure, writing debug.txt (Hessian loadable by
455 // Octave)" << std::endl; writeCCSMatrix("debug.txt", _cholmodSparse->nrow,
456 // _cholmodSparse->ncol, (int*)_cholmodSparse->p, (int*)_cholmodSparse->i,
457 // (double*)_cholmodSparse->x, true);
458 return false;
459 }
460
461 // change to the specific format we need to have a pretty normal L
462 int change_status = cholmod_change_factor(CHOLMOD_REAL, 1, 0, 1, 1,
464 if (!change_status) {
465 return false;
466 }
467
468 return true;
469}
470
471static OptimizationAlgorithm* createSolver(const std::string& solverName) {
472 std::unique_ptr<g2o::Solver> s;
473
474 if (solverName == "fix3_2_cholmod") {
475 s = AllocateCholmodSolver<3, 2>();
476 } else if (solverName == "fix6_3_cholmod") {
477 s = AllocateCholmodSolver<6, 3>();
478 }
479
481 new OptimizationAlgorithmGaussNewton(std::move(s));
482 return gaussNewton;
483}
484
485bool SparseOptimizerIncremental::initSolver(int dimension, int batchEveryN) {
486 // cerr << __PRETTY_FUNCTION__ << endl;
487 slamDimension = dimension;
488 if (dimension == 3) {
489 setAlgorithm(createSolver("fix3_2_cholmod"));
492 assert(gaussNewton);
495 &gaussNewton->solver());
496 assert(bs && "Unable to get internal block solver");
500 bs->setSchur(false);
503 } else {
504 setAlgorithm(createSolver("fix6_3_cholmod"));
507 assert(gaussNewton);
510 &gaussNewton->solver());
511 assert(bs && "Unable to get internal block solver");
514 &bs->linearSolver());
516 bs->setSchur(false);
519 }
521 _solverInterface->batchEveryN = batchEveryN;
522 if (!solver()) {
523 cerr << "Error allocating solver. Allocating CHOLMOD solver failed!"
524 << endl;
525 return false;
526 }
527 return true;
528}
529
531 // re-allocate the memory
532 if (_tripletWorkspace.size() < (int)_permutedUpdate->ncol) {
533 _tripletWorkspace.resize(_permutedUpdate->ncol * 2);
534 }
535
536 // reallocate num-zeros
537 if (_permutedUpdateAsSparse->nzmax < _permutedUpdate->nzmax) {
539 delete[] static_cast<int*>(_permutedUpdateAsSparse->i);
540 delete[] static_cast<double*>(_permutedUpdateAsSparse->x);
541 _permutedUpdateAsSparse->x = new double[_permutedUpdateAsSparse->nzmax];
543 }
544
547 delete[] static_cast<int*>(_permutedUpdateAsSparse->p);
550 }
551
554
555 int* w = _tripletWorkspace.data();
556 memset(w, 0, sizeof(int) * _permutedUpdate->ncol);
557
558 int* Ti = static_cast<int*>(_permutedUpdate->i);
559 int* Tj = static_cast<int*>(_permutedUpdate->j);
560 double* Tx = static_cast<double*>(_permutedUpdate->x);
561
562 int* Cp = static_cast<int*>(_permutedUpdateAsSparse->p);
563 int* Ci = static_cast<int*>(_permutedUpdateAsSparse->i);
564 double* Cx = static_cast<double*>(_permutedUpdateAsSparse->x);
565
566 for (size_t k = 0; k < _permutedUpdate->nnz; ++k) /* column counts */
567 w[Tj[k]]++;
568
569 /* column pointers */
570 const int n = _permutedUpdate->ncol;
571 int nz = 0;
572 for (int i = 0; i < n; i++) {
573 Cp[i] = nz;
574 nz += w[i];
575 w[i] = Cp[i];
576 }
577 Cp[n] = nz;
578 assert((size_t)nz == _permutedUpdate->nnz);
579
580 for (size_t k = 0; k < _permutedUpdate->nnz; ++k) {
581 int p = w[Tj[k]]++;
582 Ci[p] = Ti[k]; /* A(i,j) is the pth entry in C */
583 Cx[p] = Tx[k];
584 }
585}
586
587} // namespace g2o
void setEstimate(const EstimateType &et)
set the estimate for the vertex also calls updateCache()
Implementation of a solver operating on the blocks of the Hessian.
virtual void setSchur(bool s)
LinearSolver< PoseMatrixType > & linearSolver() const
const VertexContainer & vertices() const
std::set< Edge * > EdgeSet
std::set< Vertex * > VertexSet
const VertexIDMap & vertices() const
virtual cholmod_factor * L() const =0
virtual bool solve(double *x, double *b)=0
virtual int choleskyUpdate(cholmod_sparse *update)=0
linear solver which allows to update the cholesky factor
VertexSE2::EstimateType updatedEstimate
VertexSE3::EstimateType updatedEstimate
virtual void linearizeOplus(JacobianWorkspace &jacobianWorkspace)=0
virtual void computeError()=0
virtual void mapHessianMemory(double *d, int i, int j, bool rowMajor)=0
virtual void constructQuadraticForm()=0
A general case Vertex for optimization.
virtual void mapHessianMemory(double *d)=0
bool marginalized() const
true => this node is marginalized out during the optimization
void setHessianIndex(int ti)
set the temporary index of the vertex in the parameter blocks
int colInHessian() const
get the row of this vertex in the Hessian
virtual void clearQuadraticForm()=0
int dimension() const
dimension of the estimated state belonging to this node
bool fixed() const
true => this node is fixed during the optimization
virtual double * hessianData()=0
virtual int copyB(double *b_) const =0
Implementation of the Gauss Newton Algorithm.
Solver & solver()
return the underlying solver used to solve the linear system
Generic interface for a non-linear solver operating on a graph.
virtual bool updateStructure(const std::vector< HyperGraph::Vertex * > &vset, const HyperGraph::EdgeSet &edges)=0
virtual bool init(bool online=false)=0
double * b()
return b, the right hand side of the system
Definition solver.h:101
virtual bool buildStructure(bool zeroBlocks=false)=0
double * x()
return x, the solution vector
Definition solver.h:98
virtual bool solve()=0
virtual bool buildSystem()=0
void setAdditionalVectorSpace(size_t s)
Definition solver.cpp:82
Sparse matrix which uses blocks.
size_t nonZeros() const
number of non-zero elements
int fillCCS(int *Cp, int *Ci, double *Cx, bool upperTriangle=false) const
const std::vector< IntBlockMap > & blockCols() const
the block matrices per block-column
const std::vector< int > & colBlockIndices() const
indices of the column blocks
std::map< int, SparseMatrixBlock * > IntBlockMap
int rows() const
rows of the matrix
SparseMatrixBlock * block(int r, int c, bool alloc=false)
int cols() const
columns of the matrix
const std::vector< int > & rowBlockIndices() const
indices of the row blocks
void clear(bool dealloc=false)
SparseBlockMatrix< Eigen::MatrixXd > _updateMat
int optimize(int iterations, bool online=false)
LinearSolverCholmodOnlineInterface * _solverInterface
virtual bool initSolver(int dimension, int batchEveryN)
virtual bool updateInitialization(HyperGraph::VertexSet &vset, HyperGraph::EdgeSet &eset)
virtual bool updateInitialization(HyperGraph::VertexSet &vset, HyperGraph::EdgeSet &eset)
EdgeContainer _activeEdges
sorted according to EdgeIDCompare
void setAlgorithm(OptimizationAlgorithm *algorithm)
OptimizationAlgorithm * _algorithm
VertexContainer _activeVertices
sorted according to VertexIDCompare
bool verbose() const
verbose information during optimization
const VertexContainer & indexMapping() const
the index mapping of the vertices
OptimizationAlgorithm * solver()
#define __PRETTY_FUNCTION__
Definition macros.h:90
Definition jet.h:938
bool operator<(const HyperDijkstra::AdjacencyMapEntry &a, const HyperDijkstra::AdjacencyMapEntry &b)
static OptimizationAlgorithm * createSolver(const std::string &solverName)
Definition jet.h:876
JacobianWorkspace & jacobianWorkspace()
the workspace for storing the Jacobians of the graph
Our extension of the CHOLMOD matrix struct.
Definition cholmod_ext.h:38