LSST Applications  21.0.0-147-g0e635eb1+1acddb5be5,22.0.0+052faf71bd,22.0.0+1ea9a8b2b2,22.0.0+6312710a6c,22.0.0+729191ecac,22.0.0+7589c3a021,22.0.0+9f079a9461,22.0.1-1-g7d6de66+b8044ec9de,22.0.1-1-g87000a6+536b1ee016,22.0.1-1-g8e32f31+6312710a6c,22.0.1-10-gd060f87+016f7cdc03,22.0.1-12-g9c3108e+df145f6f68,22.0.1-16-g314fa6d+c825727ab8,22.0.1-19-g93a5c75+d23f2fb6d8,22.0.1-19-gb93eaa13+aab3ef7709,22.0.1-2-g8ef0a89+b8044ec9de,22.0.1-2-g92698f7+9f079a9461,22.0.1-2-ga9b0f51+052faf71bd,22.0.1-2-gac51dbf+052faf71bd,22.0.1-2-gb66926d+6312710a6c,22.0.1-2-gcb770ba+09e3807989,22.0.1-20-g32debb5+b8044ec9de,22.0.1-23-gc2439a9a+fb0756638e,22.0.1-3-g496fd5d+09117f784f,22.0.1-3-g59f966b+1e6ba2c031,22.0.1-3-g849a1b8+f8b568069f,22.0.1-3-gaaec9c0+c5c846a8b1,22.0.1-32-g5ddfab5d3+60ce4897b0,22.0.1-4-g037fbe1+64e601228d,22.0.1-4-g8623105+b8044ec9de,22.0.1-5-g096abc9+d18c45d440,22.0.1-5-g15c806e+57f5c03693,22.0.1-7-gba73697+57f5c03693,master-g6e05de7fdc+c1283a92b8,master-g72cdda8301+729191ecac,w.2021.39
LSST Data Management Base Package
optimizer.h
Go to the documentation of this file.
1 // -*- lsst-c++ -*-
2 /*
3  * LSST Data Management System
4  * Copyright 2008-2013 LSST Corporation.
5  *
6  * This product includes software developed by the
7  * LSST Project (http://www.lsst.org/).
8  *
9  * This program is free software: you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation, either version 3 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the LSST License Statement and
20  * the GNU General Public License along with this program. If not,
21  * see <http://www.lsstcorp.org/LegalNotices/>.
22  */
23 
24 #ifndef LSST_MEAS_MODELFIT_optimizer_h_INCLUDED
25 #define LSST_MEAS_MODELFIT_optimizer_h_INCLUDED
26 
27 #include "ndarray.h"
28 
29 #include "lsst/base.h"
30 #include "lsst/pex/config.h"
31 #include "lsst/afw/table/Schema.h"
34 
35 namespace lsst { namespace meas { namespace modelfit {
36 
37 class Likelihood;
38 class Prior;
39 class Optimizer;
40 
45 public:
46 
47  int const dataSize;
48  int const parameterSize;
49 
60  std::shared_ptr<Likelihood> likelihood,
62  );
63 
67  OptimizerObjective(int dataSize_, int parameterSize_) :
68  dataSize(dataSize_), parameterSize(parameterSize_)
69  {}
70 
91  ndarray::Array<Scalar const,2,1> const & parameters,
92  ndarray::Array<Scalar,1,1> const & output
93  ) const;
94 
103  virtual void computeResiduals(
104  ndarray::Array<Scalar const,1,1> const & parameters,
105  ndarray::Array<Scalar,1,1> const & residuals
106  ) const = 0;
107 
122  ndarray::Array<Scalar const,1,1> const & parameters,
123  ndarray::Array<Scalar,2,-2> const & derivatives
124  ) const {
125  return false;
126  }
127 
128 
134  virtual bool hasPrior() const { return false; }
135 
142  virtual Scalar computePrior(ndarray::Array<Scalar const,1,1> const & parameters) const { return 1.0; }
143 
158  virtual void differentiatePrior(
159  ndarray::Array<Scalar const,1,1> const & parameters,
160  ndarray::Array<Scalar,1,1> const & gradient,
161  ndarray::Array<Scalar,2,1> const & hessian
162  ) const {
163  gradient.deep() = 0.0;
164  hessian.deep() = 0.0;
165  }
166 
167  virtual ~OptimizerObjective() {}
168 };
169 
188 public:
190  noSR1Term, bool,
191  "If true, ignore the SR1 update term in the Hessian, resulting in a Levenberg-Marquardt-like method"
192  );
193 
195  skipSR1UpdateThreshold, double,
196  "Skip the SR1 update if |v||s| / (|v||s|) is less than this threshold"
197  );
198 
200  minTrustRadiusThreshold, double,
201  "If the trust radius falls below this threshold, consider the algorithm converged"
202  );
203 
205  gradientThreshold, double,
206  "If the maximum of the gradient falls below this threshold, consider the algorithm converged"
207  );
208 
210  numDiffRelStep, double,
211  "relative step size used for numerical derivatives (added to other steps)"
212  );
213 
215  numDiffAbsStep, double,
216  "absolute step size used for numerical derivatives (added to other steps)"
217  );
218 
220  numDiffTrustRadiusStep, double,
221  "step size (in units of trust radius) used for numerical derivatives (added to relative step)"
222  );
223 
225  stepAcceptThreshold, double,
226  "steps with reduction ratio greater than this are accepted"
227  );
228 
230  trustRegionInitialSize, double,
231  "the initial trust region will be set to this value"
232  );
233 
236  "steps with reduction radio greater than this may increase the trust radius"
237  );
238 
241  "steps with length this fraction of the trust radius may increase the trust radius"
242  );
243 
245  trustRegionGrowFactor, double,
246  "when increase the trust region size, multiply the radius by this factor"
247  );
248 
251  "steps with reduction radio less than this will decrease the trust radius"
252  );
253 
255  trustRegionShrinkFactor, double,
256  "when reducing the trust region size, multiply the radius by this factor"
257  );
258 
261  "value passed as the tolerance to solveTrustRegion"
262  );
263 
265  maxInnerIterations, int,
266  "maximum number of iterations (i.e. function evaluations and trust region subproblems) per step"
267  );
268 
270  maxOuterIterations, int,
271  "maximum number of steps"
272  );
273 
275  doSaveIterations, bool,
276  "whether to save all iterations for debugging purposes"
277  );
278 
280  noSR1Term(false), skipSR1UpdateThreshold(1E-8),
282  gradientThreshold(1E-5),
284  stepAcceptThreshold(0.0),
290  trustRegionShrinkFactor(1.0/3.0),
292  maxInnerIterations(20),
293  maxOuterIterations(500),
294  doSaveIterations(false)
295  {}
296 };
297 
299 public:
300 
304  bool doRecordDerivatives
305  );
306 
308 
309  void apply(
310  int outerIterCount,
311  int innerIterCount,
312  afw::table::BaseCatalog & history,
313  Optimizer const & optimizer
314  ) const;
315 
317  ndarray::Array<Scalar const,1,1> const & nested,
318  Vector & gradient,
319  Matrix & hessian
320  ) const;
321 
323  afw::table::BaseRecord const & record,
324  Vector & gradient,
325  Matrix & hessian
326  ) const;
327 
329  ndarray::Array<Scalar const,1,1> const & nested,
330  ndarray::Array<Scalar,1,1> const & gradient,
331  ndarray::Array<Scalar,2,2> const & hessian
332  ) const;
333 
335  afw::table::BaseRecord const & record,
336  ndarray::Array<Scalar,1,1> const & gradient,
337  ndarray::Array<Scalar,2,2> const & hessian
338  ) const;
339 
341  afw::table::BaseRecord const & record,
342  ndarray::Array<Scalar const,2,1> const & parameters,
343  ndarray::Array<Scalar,1,1> const & output
344  ) const;
345 
354 };
355 
399 class Optimizer {
400 public:
401 
405 
406  enum StateFlags {
414  FAILED_NAN = 0x0080,
424  };
425 
428  ndarray::Array<Scalar const,1,1> const & parameters,
429  Control const & ctrl
430  );
431 
432  std::shared_ptr<Objective const> getObjective() const { return _objective; }
433 
434  Control const & getControl() const { return _ctrl; }
435 
436  bool step() { return _stepImpl(0); }
437 
438  bool step(HistoryRecorder const & recorder, afw::table::BaseCatalog & history) {
439  return _stepImpl(0, &recorder, &history);
440  }
441 
442  int run() { return _runImpl(); }
443 
444  int run(HistoryRecorder const & recorder, afw::table::BaseCatalog & history) {
445  return _runImpl(&recorder, &history);
446  }
447 
448  int getState() const { return _state; }
449 
450  Scalar getObjectiveValue() const { return _current.objectiveValue; }
451 
452  ndarray::Array<Scalar const,1,1> getParameters() const { return _current.parameters; }
453 
454  ndarray::Array<Scalar const,1,1> getResiduals() const { return _current.residuals; }
455 
456  ndarray::Array<Scalar const,1,1> getGradient() const { return _gradient; }
457 
458  ndarray::Array<Scalar const,2,2> getHessian() const { return _hessian; }
459 
462 
463 private:
464 
465  struct IterationData {
466  Scalar objectiveValue;
467  Scalar priorValue;
468  ndarray::Array<Scalar,1,1> parameters;
469  ndarray::Array<Scalar,1,1> residuals;
470 
471  IterationData(int dataSize, int parameterSize);
472 
473  void swap(IterationData & other);
474  };
475 
477 
478  bool _stepImpl(
479  int outerIterCount,
480  HistoryRecorder const * recorder=NULL,
481  afw::table::BaseCatalog * history=NULL
482  );
483 
484  int _runImpl(HistoryRecorder const * recorder=NULL, afw::table::BaseCatalog * history=NULL);
485 
486  void _computeDerivatives();
487 
488  int _state;
490  Control _ctrl;
491  double _trustRadius;
492  IterationData _current;
493  IterationData _next;
494  ndarray::Array<Scalar,1,1> _step;
495  ndarray::Array<Scalar,1,1> _gradient;
496  ndarray::Array<Scalar,2,2> _hessian;
497  ndarray::Array<Scalar,2,-2> _residualDerivative;
498  Matrix _sr1b;
499  Vector _sr1v;
500  Vector _sr1jtr;
501 };
502 
519  ndarray::Array<Scalar,1,1> const & x,
520  ndarray::Array<Scalar const,2,1> const & F, ndarray::Array<Scalar const,1,1> const & g,
521  double r, double tolerance
522 );
523 
524 }}} // namespace lsst::meas::modelfit
525 
526 #endif // !LSST_MEAS_MODELFIT_optimizer_h_INCLUDED
double x
table::Key< int > nested
table::Schema schema
Definition: python.h:134
Basic LSST definitions.
Base class for all records.
Definition: BaseRecord.h:31
Defines the fields and offsets for a table.
Definition: Schema.h:51
Configuration object for Optimizer.
Definition: optimizer.h:187
double trustRegionGrowStepFraction
"steps with length this fraction of the trust radius may increase the trust radius" ;
Definition: optimizer.h:242
bool noSR1Term
"If true, ignore the SR1 update term in the Hessian, resulting in a Levenberg-Marquardt-like method" ...
Definition: optimizer.h:192
double trustRegionShrinkFactor
"when reducing the trust region size, multiply the radius by this factor" ;
Definition: optimizer.h:257
double trustRegionShrinkReductionRatio
"steps with reduction radio less than this will decrease the trust radius" ;
Definition: optimizer.h:252
double numDiffAbsStep
"absolute step size used for numerical derivatives (added to other steps)" ;
Definition: optimizer.h:217
double numDiffTrustRadiusStep
"step size (in units of trust radius) used for numerical derivatives (added to relative step)" ;
Definition: optimizer.h:222
double gradientThreshold
"If the maximum of the gradient falls below this threshold, consider the algorithm converged" ;
Definition: optimizer.h:207
double numDiffRelStep
"relative step size used for numerical derivatives (added to other steps)" ;
Definition: optimizer.h:212
double trustRegionGrowReductionRatio
"steps with reduction radio greater than this may increase the trust radius" ;
Definition: optimizer.h:237
double trustRegionGrowFactor
"when increase the trust region size, multiply the radius by this factor" ;
Definition: optimizer.h:247
int maxInnerIterations
"maximum number of iterations (i.e. function evaluations and trust region subproblems) per step" ;
Definition: optimizer.h:267
double skipSR1UpdateThreshold
"Skip the SR1 update if |v||s| / (|v||s|) is less than this threshold" ;
Definition: optimizer.h:197
double minTrustRadiusThreshold
"If the trust radius falls below this threshold, consider the algorithm converged" ;
Definition: optimizer.h:202
bool doSaveIterations
"whether to save all iterations for debugging purposes" ;
Definition: optimizer.h:277
double stepAcceptThreshold
"steps with reduction ratio greater than this are accepted" ;
Definition: optimizer.h:227
int maxOuterIterations
"maximum number of steps" ;
Definition: optimizer.h:272
double trustRegionInitialSize
"the initial trust region will be set to this value" ;
Definition: optimizer.h:232
double trustRegionSolverTolerance
"value passed as the tolerance to solveTrustRegion" ;
Definition: optimizer.h:262
void unpackDerivatives(afw::table::BaseRecord const &record, Vector &gradient, Matrix &hessian) const
void fillObjectiveModelGrid(afw::table::BaseRecord const &record, ndarray::Array< Scalar const, 2, 1 > const &parameters, ndarray::Array< Scalar, 1, 1 > const &output) const
OptimizerHistoryRecorder(afw::table::Schema &schema, std::shared_ptr< Model > model, bool doRecordDerivatives)
void unpackDerivatives(afw::table::BaseRecord const &record, ndarray::Array< Scalar, 1, 1 > const &gradient, ndarray::Array< Scalar, 2, 2 > const &hessian) const
void apply(int outerIterCount, int innerIterCount, afw::table::BaseCatalog &history, Optimizer const &optimizer) const
void unpackDerivatives(ndarray::Array< Scalar const, 1, 1 > const &nested, ndarray::Array< Scalar, 1, 1 > const &gradient, ndarray::Array< Scalar, 2, 2 > const &hessian) const
OptimizerHistoryRecorder(afw::table::Schema const &schema)
void unpackDerivatives(ndarray::Array< Scalar const, 1, 1 > const &nested, Vector &gradient, Matrix &hessian) const
A numerical optimizer customized for least-squares problems with Bayesian priors.
Definition: optimizer.h:399
std::shared_ptr< Objective const > getObjective() const
Definition: optimizer.h:432
Scalar getObjectiveValue() const
Definition: optimizer.h:450
ndarray::Array< Scalar const, 1, 1 > getParameters() const
Definition: optimizer.h:452
void removeSR1Term()
Remove the symmetric-rank-1 secant term from the Hessian, making it just (J^T J)
bool step(HistoryRecorder const &recorder, afw::table::BaseCatalog &history)
Definition: optimizer.h:438
ndarray::Array< Scalar const, 1, 1 > getGradient() const
Definition: optimizer.h:456
OptimizerObjective Objective
Definition: optimizer.h:402
ndarray::Array< Scalar const, 1, 1 > getResiduals() const
Definition: optimizer.h:454
int run(HistoryRecorder const &recorder, afw::table::BaseCatalog &history)
Definition: optimizer.h:444
OptimizerHistoryRecorder HistoryRecorder
Definition: optimizer.h:404
Optimizer(std::shared_ptr< Objective const > objective, ndarray::Array< Scalar const, 1, 1 > const &parameters, Control const &ctrl)
ndarray::Array< Scalar const, 2, 2 > getHessian() const
Definition: optimizer.h:458
Control const & getControl() const
Definition: optimizer.h:434
Base class for objective functions for Optimizer.
Definition: optimizer.h:44
static std::shared_ptr< OptimizerObjective > makeFromLikelihood(std::shared_ptr< Likelihood > likelihood, std::shared_ptr< Prior > prior=std::shared_ptr< Prior >())
Return a concrete Objective object built from a Likelihood and Prior.
virtual void differentiatePrior(ndarray::Array< Scalar const, 1, 1 > const &parameters, ndarray::Array< Scalar, 1, 1 > const &gradient, ndarray::Array< Scalar, 2, 1 > const &hessian) const
Compute the first and second derivatives of the Bayesian prior with respect to the parameters.
Definition: optimizer.h:158
virtual void computeResiduals(ndarray::Array< Scalar const, 1, 1 > const &parameters, ndarray::Array< Scalar, 1, 1 > const &residuals) const =0
Evaluate the residuals of the model for a given parameter vector.
virtual Scalar computePrior(ndarray::Array< Scalar const, 1, 1 > const &parameters) const
Compute the value of the Bayesian prior for the given parameter vector.
Definition: optimizer.h:142
virtual bool hasPrior() const
Return true if the Objective has a Bayesian prior as well as a likelihood.
Definition: optimizer.h:134
OptimizerObjective(int dataSize_, int parameterSize_)
Base class constructor; must be called by all subclasses.
Definition: optimizer.h:67
void fillObjectiveValueGrid(ndarray::Array< Scalar const, 2, 1 > const &parameters, ndarray::Array< Scalar, 1, 1 > const &output) const
Evaluate the Objective on a 1-d grid.
virtual bool differentiateResiduals(ndarray::Array< Scalar const, 1, 1 > const &parameters, ndarray::Array< Scalar, 2,-2 > const &derivatives) const
Evaluate analytic derivatives of the model or signal that they are not available.
Definition: optimizer.h:121
#define LSST_CONTROL_FIELD(NAME, TYPE, DOC)
A preprocessor macro used to define fields in C++ "control object" structs.
Definition: config.h:43
void swap(CameraSys &a, CameraSys &b)
Definition: CameraSys.h:157
Eigen::Matrix< Scalar, Eigen::Dynamic, 1 > Vector
Definition: common.h:46
void solveTrustRegion(ndarray::Array< Scalar, 1, 1 > const &x, ndarray::Array< Scalar const, 2, 1 > const &F, ndarray::Array< Scalar const, 1, 1 > const &g, double r, double tolerance)
Solve a symmetric quadratic matrix equation with a ball constraint.
Eigen::Matrix< Scalar, Eigen::Dynamic, Eigen::Dynamic > Matrix
Definition: common.h:45
double Scalar
Typedefs to be used for probability and parameter values.
Definition: common.h:44
A base class for image defects.