24 #ifndef LSST_MEAS_MODELFIT_optimizer_h_INCLUDED
25 #define LSST_MEAS_MODELFIT_optimizer_h_INCLUDED
35 namespace lsst {
namespace meas {
namespace modelfit {
91 ndarray::Array<Scalar const,2,1>
const & parameters,
92 ndarray::Array<Scalar,1,1>
const & output
104 ndarray::Array<Scalar const,1,1>
const & parameters,
105 ndarray::Array<Scalar,1,1>
const & residuals
122 ndarray::Array<Scalar const,1,1>
const & parameters,
123 ndarray::Array<Scalar,2,-2>
const & derivatives
142 virtual Scalar computePrior(ndarray::Array<Scalar const,1,1>
const & parameters)
const {
return 1.0; }
159 ndarray::Array<Scalar const,1,1>
const & parameters,
160 ndarray::Array<Scalar,1,1>
const & gradient,
161 ndarray::Array<Scalar,2,1>
const & hessian
163 gradient.deep() = 0.0;
164 hessian.deep() = 0.0;
191 "If true, ignore the SR1 update term in the Hessian, resulting in a Levenberg-Marquardt-like method"
196 "Skip the SR1 update if |v||s| / (|v||s|) is less than this threshold"
201 "If the trust radius falls below this threshold, consider the algorithm converged"
206 "If the maximum of the gradient falls below this threshold, consider the algorithm converged"
211 "relative step size used for numerical derivatives (added to other steps)"
216 "absolute step size used for numerical derivatives (added to other steps)"
221 "step size (in units of trust radius) used for numerical derivatives (added to relative step)"
226 "steps with reduction ratio greater than this are accepted"
231 "the initial trust region will be set to this value"
236 "steps with reduction radio greater than this may increase the trust radius"
241 "steps with length this fraction of the trust radius may increase the trust radius"
246 "when increase the trust region size, multiply the radius by this factor"
251 "steps with reduction radio less than this will decrease the trust radius"
256 "when reducing the trust region size, multiply the radius by this factor"
261 "value passed as the tolerance to solveTrustRegion"
266 "maximum number of iterations (i.e. function evaluations and trust region subproblems) per step"
271 "maximum number of steps"
276 "whether to save all iterations for debugging purposes"
304 bool doRecordDerivatives
317 ndarray::Array<Scalar const,1,1>
const &
nested,
329 ndarray::Array<Scalar const,1,1>
const &
nested,
330 ndarray::Array<Scalar,1,1>
const & gradient,
331 ndarray::Array<Scalar,2,2>
const & hessian
336 ndarray::Array<Scalar,1,1>
const & gradient,
337 ndarray::Array<Scalar,2,2>
const & hessian
342 ndarray::Array<Scalar const,2,1>
const &
parameters,
343 ndarray::Array<Scalar,1,1>
const & output
428 ndarray::Array<Scalar const,1,1>
const & parameters,
436 bool step() {
return _stepImpl(0); }
439 return _stepImpl(0, &recorder, &history);
442 int run() {
return _runImpl(); }
445 return _runImpl(&recorder, &history);
452 ndarray::Array<Scalar const,1,1>
getParameters()
const {
return _current.parameters; }
454 ndarray::Array<Scalar const,1,1>
getResiduals()
const {
return _current.residuals; }
456 ndarray::Array<Scalar const,1,1>
getGradient()
const {
return _gradient; }
458 ndarray::Array<Scalar const,2,2>
getHessian()
const {
return _hessian; }
465 struct IterationData {
468 ndarray::Array<Scalar,1,1> parameters;
469 ndarray::Array<Scalar,1,1> residuals;
471 IterationData(
int dataSize,
int parameterSize);
486 void _computeDerivatives();
492 IterationData _current;
494 ndarray::Array<Scalar,1,1> _step;
495 ndarray::Array<Scalar,1,1> _gradient;
496 ndarray::Array<Scalar,2,2> _hessian;
497 ndarray::Array<
Scalar,2,-2> _residualDerivative;
519 ndarray::Array<Scalar,1,1>
const &
x,
520 ndarray::Array<Scalar const,2,1>
const & F, ndarray::Array<Scalar const,1,1>
const & g,
521 double r,
double tolerance
ItemVariant const * other
Base class for all records.
Defines the fields and offsets for a table.
Base class for optimizer/sampler likelihood functions that compute likelihood at a point.
Abstract base class and concrete factories that define multi-shapelet galaxy models.
Configuration object for Optimizer.
double trustRegionGrowStepFraction
"steps with length this fraction of the trust radius may increase the trust radius" ;
bool noSR1Term
"If true, ignore the SR1 update term in the Hessian, resulting in a Levenberg-Marquardt-like method" ...
double trustRegionShrinkFactor
"when reducing the trust region size, multiply the radius by this factor" ;
double trustRegionShrinkReductionRatio
"steps with reduction radio less than this will decrease the trust radius" ;
double numDiffAbsStep
"absolute step size used for numerical derivatives (added to other steps)" ;
double numDiffTrustRadiusStep
"step size (in units of trust radius) used for numerical derivatives (added to relative step)" ;
double gradientThreshold
"If the maximum of the gradient falls below this threshold, consider the algorithm converged" ;
double numDiffRelStep
"relative step size used for numerical derivatives (added to other steps)" ;
double trustRegionGrowReductionRatio
"steps with reduction radio greater than this may increase the trust radius" ;
double trustRegionGrowFactor
"when increase the trust region size, multiply the radius by this factor" ;
int maxInnerIterations
"maximum number of iterations (i.e. function evaluations and trust region subproblems) per step" ;
double skipSR1UpdateThreshold
"Skip the SR1 update if |v||s| / (|v||s|) is less than this threshold" ;
double minTrustRadiusThreshold
"If the trust radius falls below this threshold, consider the algorithm converged" ;
bool doSaveIterations
"whether to save all iterations for debugging purposes" ;
double stepAcceptThreshold
"steps with reduction ratio greater than this are accepted" ;
int maxOuterIterations
"maximum number of steps" ;
double trustRegionInitialSize
"the initial trust region will be set to this value" ;
double trustRegionSolverTolerance
"value passed as the tolerance to solveTrustRegion" ;
void unpackDerivatives(afw::table::BaseRecord const &record, Vector &gradient, Matrix &hessian) const
void fillObjectiveModelGrid(afw::table::BaseRecord const &record, ndarray::Array< Scalar const, 2, 1 > const ¶meters, ndarray::Array< Scalar, 1, 1 > const &output) const
afw::table::Key< int > inner
afw::table::Key< int > state
void unpackDerivatives(afw::table::BaseRecord const &record, ndarray::Array< Scalar, 1, 1 > const &gradient, ndarray::Array< Scalar, 2, 2 > const &hessian) const
void apply(int outerIterCount, int innerIterCount, afw::table::BaseCatalog &history, Optimizer const &optimizer) const
afw::table::Key< int > outer
void unpackDerivatives(ndarray::Array< Scalar const, 1, 1 > const &nested, ndarray::Array< Scalar, 1, 1 > const &gradient, ndarray::Array< Scalar, 2, 2 > const &hessian) const
OptimizerHistoryRecorder(afw::table::Schema &schema, boost::shared_ptr< Model > model, bool doRecordDerivatives)
OptimizerHistoryRecorder(afw::table::Schema const &schema)
void unpackDerivatives(ndarray::Array< Scalar const, 1, 1 > const &nested, Vector &gradient, Matrix &hessian) const
A numerical optimizer customized for least-squares problems with Bayesian priors.
@ FAILED_MAX_OUTER_ITERATIONS
@ FAILED_MAX_INNER_ITERATIONS
Scalar getObjectiveValue() const
ndarray::Array< Scalar const, 1, 1 > getParameters() const
void removeSR1Term()
Remove the symmetric-rank-1 secant term from the Hessian, making it just (J^T J)
bool step(HistoryRecorder const &recorder, afw::table::BaseCatalog &history)
ndarray::Array< Scalar const, 1, 1 > getGradient() const
OptimizerObjective Objective
ndarray::Array< Scalar const, 1, 1 > getResiduals() const
int run(HistoryRecorder const &recorder, afw::table::BaseCatalog &history)
OptimizerHistoryRecorder HistoryRecorder
Optimizer(boost::shared_ptr< Objective const > objective, ndarray::Array< Scalar const, 1, 1 > const ¶meters, Control const &ctrl)
ndarray::Array< Scalar const, 2, 2 > getHessian() const
boost::shared_ptr< Objective const > getObjective() const
Control const & getControl() const
Base class for objective functions for Optimizer.
virtual void differentiatePrior(ndarray::Array< Scalar const, 1, 1 > const ¶meters, ndarray::Array< Scalar, 1, 1 > const &gradient, ndarray::Array< Scalar, 2, 1 > const &hessian) const
Compute the first and second derivatives of the Bayesian prior with respect to the parameters.
virtual void computeResiduals(ndarray::Array< Scalar const, 1, 1 > const ¶meters, ndarray::Array< Scalar, 1, 1 > const &residuals) const =0
Evaluate the residuals of the model for a given parameter vector.
virtual Scalar computePrior(ndarray::Array< Scalar const, 1, 1 > const ¶meters) const
Compute the value of the Bayesian prior for the given parameter vector.
virtual ~OptimizerObjective()
virtual bool hasPrior() const
Return true if the Objective has a Bayesian prior as well as a likelihood.
OptimizerObjective(int dataSize_, int parameterSize_)
Base class constructor; must be called by all subclasses.
void fillObjectiveValueGrid(ndarray::Array< Scalar const, 2, 1 > const ¶meters, ndarray::Array< Scalar, 1, 1 > const &output) const
Evaluate the Objective on a 1-d grid.
static boost::shared_ptr< OptimizerObjective > makeFromLikelihood(boost::shared_ptr< Likelihood > likelihood, boost::shared_ptr< Prior > prior=boost::shared_ptr< Prior >())
Return a concrete Objective object built from a Likelihood and Prior.
virtual bool differentiateResiduals(ndarray::Array< Scalar const, 1, 1 > const ¶meters, ndarray::Array< Scalar, 2,-2 > const &derivatives) const
Evaluate analytic derivatives of the model or signal that they are not available.
Base class for Bayesian priors.
#define LSST_CONTROL_FIELD(NAME, TYPE, DOC)
A preprocessor macro used to define fields in C++ "control object" structs.
void swap(CameraSys &a, CameraSys &b)
Eigen::Matrix< Scalar, Eigen::Dynamic, 1 > Vector
void solveTrustRegion(ndarray::Array< Scalar, 1, 1 > const &x, ndarray::Array< Scalar const, 2, 1 > const &F, ndarray::Array< Scalar const, 1, 1 > const &g, double r, double tolerance)
Solve a symmetric quadratic matrix equation with a ball constraint.
Eigen::Matrix< Scalar, Eigen::Dynamic, Eigen::Dynamic > Matrix
double Scalar
Typedefs to be used for probability and parameter values.
A base class for image defects.