LSSTApplications  21.0.0+75b29a8a7f,21.0.0+e70536a077,21.0.0-1-ga51b5d4+62c747d40b,21.0.0-11-ga6ea59e8e+47cba9fc36,21.0.0-2-g103fe59+914993bf7c,21.0.0-2-g1367e85+e2614ded12,21.0.0-2-g45278ab+e70536a077,21.0.0-2-g4bc9b9f+7b2b5f8678,21.0.0-2-g5242d73+e2614ded12,21.0.0-2-g54e2caa+6403186824,21.0.0-2-g7f82c8f+3ac4acbffc,21.0.0-2-g8dde007+04a6aea1af,21.0.0-2-g8f08a60+9402881886,21.0.0-2-ga326454+3ac4acbffc,21.0.0-2-ga63a54e+81dd751046,21.0.0-2-gc738bc1+5f65c6e7a9,21.0.0-2-gde069b7+26c92b3210,21.0.0-2-gecfae73+0993ddc9bd,21.0.0-2-gfc62afb+e2614ded12,21.0.0-21-gba890a8+5a4f502a26,21.0.0-23-g9966ff26+03098d1af8,21.0.0-3-g357aad2+8ad216c477,21.0.0-3-g4be5c26+e2614ded12,21.0.0-3-g6d51c4a+4d2fe0280d,21.0.0-3-g7d9da8d+75b29a8a7f,21.0.0-3-gaa929c8+522e0f12c2,21.0.0-3-ge02ed75+4d2fe0280d,21.0.0-4-g3300ddd+e70536a077,21.0.0-4-gc004bbf+eac6615e82,21.0.0-4-gccdca77+f94adcd104,21.0.0-4-gd1c1571+18b81799f9,21.0.0-5-g7b47fff+4d2fe0280d,21.0.0-5-gb155db7+d2632f662b,21.0.0-5-gdf36809+637e4641ee,21.0.0-6-g722ad07+28c848f42a,21.0.0-7-g959bb79+522e0f12c2,21.0.0-7-gfd72ab2+cf01990774,21.0.0-9-g87fb7b8d+e2ab11cdd6,w.2021.04
LSSTDataManagementBasePackage
ScaledPolynomialTransformFitter.cc
Go to the documentation of this file.
1 // -*- LSST-C++ -*-
2 
3 /*
4  * LSST Data Management System
5  * Copyright 2016 LSST/AURA
6  *
7  * This product includes software developed by the
8  * LSST Project (http://www.lsst.org/).
9  *
10  * This program is free software: you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation, either version 3 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the LSST License Statement and
21  * the GNU General Public License along with this program. If not,
22  * see <http://www.lsstcorp.org/LegalNotices/>.
23  */
24 
25 #include <map>
26 
27 #include "Eigen/LU" // for determinant, even though it's a 2x2 that doesn't use actual LU implementation
28 
29 #include "boost/math/tools/minima.hpp"
30 
31 #include "lsst/geom/Box.h"
36 #include "lsst/afw/table/Match.h"
38 
39 // When developing this code, it was used to add an in-line check for the
40 // correctness of a particularly complex calculation that was difficult to
41 // factor out into something unit-testable (the larger context that code is in
42 // *is* unit tested, which should be guard against regressions). The test
43 // code is still in place, guarded by a check against this preprocessor macro;
44 // set it to 1 to re-enable that test during development, but be sure to
45 // set it back to zero before committing.
46 #define LSST_ScaledPolynomialTransformFitter_TEST_IN_PLACE 0
47 
48 namespace lsst {
49 namespace meas {
50 namespace astrom {
51 
52 // A singleton struct that manages the schema and keys for polynomial fitting catalogs.
54 public:
63  // We use uint16 instead of Flag since it's the only bool we have here, we
64  // may want NumPy views, and afw::table doesn't support [u]int8 fields.
66 
67  Keys(Keys const &) = delete;
68  Keys(Keys &&) = delete;
69  Keys &operator=(Keys const &) = delete;
70  Keys &operator=(Keys &&) = delete;
71 
72  static Keys const &forMatches() {
73  static Keys const it(0);
74  return it;
75  }
76 
77  static Keys const &forGrid() {
78  static Keys const it;
79  return it;
80  }
81 
82 private:
83  Keys(int)
84  : schema(),
85  refId(schema.addField<afw::table::RecordId>("ref_id", "ID of reference object in this match.")),
86  srcId(schema.addField<afw::table::RecordId>("src_id", "ID of source object in this match.")),
87  output(afw::table::Point2DKey::addFields(schema, "src",
88  "source positions in pixel coordinates.", "pix")),
89  input(afw::table::Point2DKey::addFields(schema, "intermediate",
90  "reference positions in intermediate world coordinates",
91  "deg")),
92  initial(afw::table::Point2DKey::addFields(
93  schema, "initial", "reference positions transformed by initial WCS", "pix")),
94  model(afw::table::Point2DKey::addFields(
95  schema, "model", "result of applying transform to reference positions", "pix")),
96  outputErr(
97  afw::table::CovarianceMatrixKey<float, 2>::addFields(schema, "src", {"x", "y"}, "pix")),
99  "rejected", "True if the match should be rejected from the fit.")) {}
100 
101  Keys()
102  : schema(),
103  output(afw::table::Point2DKey::addFields(
104  schema, "output", "grid output positions in intermediate world coordinates", "deg")),
105  input(afw::table::Point2DKey::addFields(schema, "input",
106  "grid input positions in pixel coordinates.", "pix")),
107  model(afw::table::Point2DKey::addFields(
108  schema, "model", "result of applying transform to input positions", "deg")) {}
109 };
110 
111 namespace {
112 
113 // Return the AffineTransforms that maps the given (x,y) coordinates to lie within (-1, 1)x(-1, 1)
116  for (auto const &record : data) {
117  bbox.include(geom::Point2D(record.get(key)));
118  };
119  return geom::AffineTransform(
120  geom::LinearTransform::makeScaling(0.5 * bbox.getWidth(), 0.5 * bbox.getHeight()))
121  .inverted() *
123 }
124 
125 } // namespace
126 
128  int maxOrder, afw::table::ReferenceMatchVector const &matches, afw::geom::SkyWcs const &initialWcs,
129  double intrinsicScatter) {
130  Keys const &keys = Keys::forMatches();
131  afw::table::BaseCatalog catalog(keys.schema);
132  catalog.reserve(matches.size());
133  float var2 = intrinsicScatter * intrinsicScatter;
134  auto initialIwcToSky = getIntermediateWorldCoordsToSky(initialWcs);
135  for (auto const &match : matches) {
136  auto record = catalog.addNew();
137  record->set(keys.refId, match.first->getId());
138  record->set(keys.srcId, match.second->getId());
139  record->set(keys.input, initialIwcToSky->applyInverse(match.first->getCoord()));
140  record->set(keys.initial, initialWcs.skyToPixel(match.first->getCoord()));
141  record->set(keys.output, match.second->getCentroid());
142  record->set(keys.outputErr, match.second->getCentroidErr() + var2 * Eigen::Matrix2f::Identity());
143  record->set(keys.rejected, false);
144  }
145  return ScaledPolynomialTransformFitter(catalog, keys, maxOrder, intrinsicScatter,
146  computeScaling(catalog, keys.input),
147  computeScaling(catalog, keys.output));
148 }
149 
151  int maxOrder, geom::Box2D const &bbox, int nGridX, int nGridY,
152  ScaledPolynomialTransform const &toInvert) {
153  Keys const &keys = Keys::forGrid();
154  afw::table::BaseCatalog catalog(keys.schema);
155  catalog.reserve(nGridX * nGridY);
156  geom::Extent2D dx(bbox.getWidth() / nGridX, 0.0);
157  geom::Extent2D dy(0.0, bbox.getHeight() / nGridY);
158  for (int iy = 0; iy < nGridY; ++iy) {
159  for (int ix = 0; ix < nGridX; ++ix) {
160  geom::Point2D point = bbox.getMin() + dx * ix + dy * iy;
161  auto record = catalog.addNew();
162  record->set(keys.output, point);
163  record->set(keys.input, toInvert(point));
164  }
165  }
166  return ScaledPolynomialTransformFitter(catalog, keys, maxOrder, 0.0, computeScaling(catalog, keys.input),
167  computeScaling(catalog, keys.output));
168 }
169 
170 ScaledPolynomialTransformFitter::ScaledPolynomialTransformFitter(afw::table::BaseCatalog const &data,
171  Keys const &keys, int maxOrder,
172  double intrinsicScatter,
173  geom::AffineTransform const &inputScaling,
174  geom::AffineTransform const &outputScaling)
175  : _keys(keys),
176  _intrinsicScatter(intrinsicScatter),
177  _data(data),
178  _outputScaling(outputScaling),
179  _transform(PolynomialTransform(maxOrder), inputScaling, outputScaling.inverted()),
180  _vandermonde(data.size(), detail::computePackedSize(maxOrder)) {
181  // Create a matrix that evaluates the max-order polynomials of all the (scaled) input positions;
182  // we'll extract subsets of this later when fitting to a subset of the matches and a lower order.
183  for (std::size_t i = 0; i < data.size(); ++i) {
184  geom::Point2D input = getInputScaling()(_data[i].get(_keys.input));
185  // x[k] == pow(x, k), y[k] == pow(y, k)
186  detail::computePowers(_transform._poly._u, input.getX());
187  detail::computePowers(_transform._poly._v, input.getY());
188  // We pack coefficients in the following order:
189  // (0,0), (0,1), (1,0), (0,2), (1,1), (2,0)
190  // Note that this lets us choose the just first N(N+1)/2 columns to
191  // evaluate an Nth order polynomial, even if N < maxOrder.
192  for (int n = 0, j = 0; n <= maxOrder; ++n) {
193  for (int p = 0, q = n; p <= n; ++p, --q, ++j) {
194  _vandermonde(i, j) = _transform._poly._u[p] * _transform._poly._v[q];
195  }
196  }
197  }
198 }
199 
201  int maxOrder = _transform.getPoly().getOrder();
202  if (order < 0) {
203  order = maxOrder;
204  }
205  if (order > maxOrder) {
206  throw LSST_EXCEPT(
208  (boost::format("Order (%d) exceeded maximum order for the fitter (%d)") % order % maxOrder)
209  .str());
210  }
211 
212  int const packedSize = detail::computePackedSize(order);
213  std::size_t nGood = 0;
214  if (_keys.rejected.isValid()) {
215  for (auto const &record : _data) {
216  if (!record.get(_keys.rejected)) {
217  ++nGood;
218  }
219  }
220  } else {
221  nGood = _data.size();
222  }
223  // One block of the block-diagonal (2x2) unweighted design matrix M;
224  // m[i,j] = u_i^{p(j)} v_i^{q(j)}. The two nonzero blocks are the same,
225  // because we're using the same polynomial basis for x and y.
226  Eigen::MatrixXd m = Eigen::MatrixXd::Zero(nGood, packedSize);
227  // vx, vy: (2x1) blocks of the unweighted data vector v
228  Eigen::VectorXd vx = Eigen::VectorXd::Zero(nGood);
229  Eigen::VectorXd vy = Eigen::VectorXd::Zero(nGood);
230  // sxx, syy, sxy: (2x2) blocks of the covariance matrix S, each of which is
231  // individually diagonal.
232  Eigen::ArrayXd sxx(nGood);
233  Eigen::ArrayXd syy(nGood);
234  Eigen::ArrayXd sxy(nGood);
235  Eigen::Matrix2d outS = _outputScaling.getLinear().getMatrix();
236  for (std::size_t i1 = 0, i2 = 0; i1 < _data.size(); ++i1) {
237  // check that the 'rejected' field (== 'not outlier-rejected') is both
238  // present in the schema and not rejected.
239  if (!_keys.rejected.isValid() || !_data[i1].get(_keys.rejected)) {
240  geom::Point2D output = _outputScaling(_data[i1].get(_keys.output));
241  vx[i2] = output.getX();
242  vy[i2] = output.getY();
243  m.row(i2) = _vandermonde.row(i1).head(packedSize);
244  if (_keys.outputErr.isValid()) {
245  Eigen::Matrix2d modelErr =
246  outS * _data[i1].get(_keys.outputErr).cast<double>() * outS.adjoint();
247  sxx[i2] = modelErr(0, 0);
248  sxy[i2] = modelErr(0, 1);
249  syy[i2] = modelErr(1, 1);
250  } else {
251  sxx[i2] = 1.0;
252  sxy[i2] = 0.0;
253  syy[i2] = 1.0;
254  }
255  ++i2;
256  }
257  }
258  // Do a blockwise inverse of S. Note that the result F is still symmetric
259  Eigen::ArrayXd fxx = 1.0 / (sxx - sxy.square() / syy);
260  Eigen::ArrayXd fyy = 1.0 / (syy - sxy.square() / sxx);
261  Eigen::ArrayXd fxy = -(sxy / sxx) * fyy;
262 #ifdef LSST_ScaledPolynomialTransformFitter_TEST_IN_PLACE
263  assert((sxx * fxx + sxy * fxy).isApproxToConstant(1.0));
264  assert((syy * fyy + sxy * fxy).isApproxToConstant(1.0));
265  assert((sxx * fxy).isApprox(-sxy * fyy));
266  assert((sxy * fxx).isApprox(-syy * fxy));
267 #endif
268  // Now that we've got all the block quantities, we'll form the full normal equations matrix.
269  // That's H = M^T F M:
270  Eigen::MatrixXd h(2 * packedSize, 2 * packedSize);
271  h.topLeftCorner(packedSize, packedSize) = m.adjoint() * fxx.matrix().asDiagonal() * m;
272  h.topRightCorner(packedSize, packedSize) = m.adjoint() * fxy.matrix().asDiagonal() * m;
273  h.bottomLeftCorner(packedSize, packedSize) = h.topRightCorner(packedSize, packedSize).adjoint();
274  h.bottomRightCorner(packedSize, packedSize) = m.adjoint() * fyy.matrix().asDiagonal() * m;
275  // And here's the corresponding RHS vector, g = M^T F v
276  Eigen::VectorXd g(2 * packedSize);
277  g.head(packedSize) = m.adjoint() * (fxx.matrix().asDiagonal() * vx + fxy.matrix().asDiagonal() * vy);
278  g.tail(packedSize) = m.adjoint() * (fxy.matrix().asDiagonal() * vx + fyy.matrix().asDiagonal() * vy);
279  // Solve the normal equations.
281  auto solution = lstsq.getSolution();
282  // Unpack the solution vector back into the polynomial coefficient matrices.
283  for (int n = 0, j = 0; n <= order; ++n) {
284  for (int p = 0, q = n; p <= n; ++p, --q, ++j) {
285  _transform._poly._xCoeffs(p, q) = solution[j];
286  _transform._poly._yCoeffs(p, q) = solution[j + packedSize];
287  }
288  }
289 }
290 
292  for (auto &record : _data) {
293  record.set(_keys.model, _transform(record.get(_keys.input)));
294  }
295 }
296 
298  if (!_keys.rejected.isValid()) {
300  "Cannot compute intrinsic scatter on fitter initialized with fromGrid.");
301  }
302  double newIntrinsicScatter = computeIntrinsicScatter();
303  float varDiff = newIntrinsicScatter * newIntrinsicScatter - _intrinsicScatter * _intrinsicScatter;
304  for (auto &record : _data) {
305  record.set(_keys.outputErr, record.get(_keys.outputErr) + varDiff * Eigen::Matrix2f::Identity());
306  }
307  _intrinsicScatter = newIntrinsicScatter;
308  return _intrinsicScatter;
309 }
310 
311 double ScaledPolynomialTransformFitter::computeIntrinsicScatter() const {
312  // We model the variance matrix of each match as the sum of the intrinsic variance (which we're
313  // trying to fit) and the per-source measurement uncertainty.
314  // We start by computing the variance directly, which yields the sum.
315  // At the same time, we find the maximum per-source variance. Since the per-source uncertainties
316  // are actually 2x2 matrices, we use the square of the major axis of that ellipse.
317  double directVariance = 0.0; // direct estimate of total scatter (includes measurement errors)
318  double maxMeasurementVariance = 0.0; // maximum of the per-match measurement uncertainties
319  double oldIntrinsicVariance = _intrinsicScatter * _intrinsicScatter;
320  std::size_t nGood = 0;
321  for (auto const &record : _data) {
322  if (!_keys.rejected.isValid() || !record.get(_keys.rejected)) {
323  auto delta = record.get(_keys.output) - record.get(_keys.model);
324  directVariance += 0.5 * delta.computeSquaredNorm();
325  double cxx = _keys.outputErr.getElement(record, 0, 0) - oldIntrinsicVariance;
326  double cyy = _keys.outputErr.getElement(record, 1, 1) - oldIntrinsicVariance;
327  double cxy = _keys.outputErr.getElement(record, 0, 1);
328  // square of semimajor axis of uncertainty error ellipse
329  double ca2 = 0.5 * (cxx + cyy + std::sqrt(cxx * cxx + cyy * cyy + 4 * cxy * cxy - 2 * cxx * cyy));
330  maxMeasurementVariance = std::max(maxMeasurementVariance, ca2);
331  ++nGood;
332  }
333  }
334  directVariance /= nGood;
335 
336  // Function that computes the -log likelihood of the current deltas with
337  // the variance modeled as described above.
338  auto logLikelihood = [this](double intrinsicVariance) {
339  // Uncertainties in the table right now include the old intrinsic scatter; need to
340  // subtract it off as we add the new one in.
341  double varDiff = intrinsicVariance - this->_intrinsicScatter * this->_intrinsicScatter;
342  double q = 0.0;
343  for (auto &record : this->_data) {
344  double x = record.get(this->_keys.output.getX()) - record.get(_keys.model.getX());
345  double y = record.get(this->_keys.output.getY()) - record.get(_keys.model.getY());
346  double cxx = this->_keys.outputErr.getElement(record, 0, 0) + varDiff;
347  double cyy = this->_keys.outputErr.getElement(record, 1, 1) + varDiff;
348  double cxy = this->_keys.outputErr.getElement(record, 0, 1);
349  double det = cxx * cyy - cxy * cxy;
350  q += (x * x * cyy - 2 * x * y * cxy + y * y * cxx) / det + std::log(det);
351  }
352  return q;
353  };
354 
355  // directVariance brackets the intrinsic variance from above, and this quantity
356  // brackets it from below:
357  double minIntrinsicVariance = std::max(0.0, directVariance - maxMeasurementVariance);
358 
359  // Minimize the negative log likelihood to find the best-fit intrinsic variance.
360  static constexpr int BITS_REQUIRED = 16; // solution good to ~1E-4
361  boost::uintmax_t maxIterations = 20;
362  auto result = boost::math::tools::brent_find_minima(logLikelihood, minIntrinsicVariance, directVariance,
363  BITS_REQUIRED, maxIterations);
364  return std::sqrt(result.first); // return RMS instead of variance
365 }
366 
368  OutlierRejectionControl const &ctrl) {
369  // If the 'rejected' field isn't present in the schema (because the fitter
370  // was constructed with fromGrid), we can't do outlier rejection.
371  if (!_keys.rejected.isValid()) {
373  "Cannot reject outliers on fitter initialized with fromGrid.");
374  }
375  if (static_cast<std::size_t>(ctrl.nClipMin) >= _data.size()) {
376  throw LSST_EXCEPT(
378  (boost::format("Not enough values (%d) to clip %d.") % _data.size() % ctrl.nClipMin).str());
379  }
381  for (auto &record : _data) {
382  Eigen::Matrix2d cov = record.get(_keys.outputErr).cast<double>();
383  Eigen::Vector2d d = (record.get(_keys.output) - record.get(_keys.model)).asEigen();
384  double r2 = d.dot(cov.inverse() * d);
385  rankings.insert(std::make_pair(r2, &record));
386  }
387  auto cutoff = rankings.upper_bound(ctrl.nSigma * ctrl.nSigma);
388  int nClip = 0, nGood = 0;
389  for (auto iter = rankings.begin(); iter != cutoff; ++iter) {
390  iter->second->set(_keys.rejected, false);
391  ++nGood;
392  }
393  for (auto iter = cutoff; iter != rankings.end(); ++iter) {
394  iter->second->set(_keys.rejected, true);
395  ++nClip;
396  }
397  assert(static_cast<std::size_t>(nGood + nClip) == _data.size());
398  while (nClip < ctrl.nClipMin) {
399  --cutoff;
400  cutoff->second->set(_keys.rejected, true);
401  ++nClip;
402  }
403  while (nClip > ctrl.nClipMax && cutoff != rankings.end()) {
404  cutoff->second->set(_keys.rejected, false);
405  ++cutoff;
406  --nClip;
407  }
409  if (cutoff != rankings.end()) {
410  result.first = std::sqrt(cutoff->first);
411  }
412  return result;
413 }
414 
415 } // namespace astrom
416 } // namespace meas
417 } // namespace lsst
y
int y
Definition: SpanSet.cc:49
lsst::afw::table::CatalogT::get
std::shared_ptr< RecordT > const get(size_type i) const
Return a pointer to the record at index i.
Definition: Catalog.h:459
lsst::meas::astrom::ScaledPolynomialTransformFitter::Keys::operator=
Keys & operator=(Keys &&)=delete
lsst::meas::astrom::ScaledPolynomialTransformFitter::updateIntrinsicScatter
double updateIntrinsicScatter()
Infer the intrinsic scatter in the offset between the data points and the best-fit model,...
Definition: ScaledPolynomialTransformFitter.cc:297
lsst::meas::astrom::ScaledPolynomialTransformFitter::fromMatches
static ScaledPolynomialTransformFitter fromMatches(int maxOrder, afw::table::ReferenceMatchVector const &matches, afw::geom::SkyWcs const &initialWcs, double intrinsicScatter)
Initialize a fit from intermediate world coordinates to pixels using source/reference matches.
Definition: ScaledPolynomialTransformFitter.cc:127
lsst::afw::table::PointKey::getX
Key< T > getX() const noexcept
Return the underlying x Key.
Definition: aggregates.h:107
lsst::afw::table::CovarianceMatrixKey::get
Eigen::Matrix< T, N, N > get(BaseRecord const &record) const override
Get a covariance matrix from the given record.
Definition: aggregates.cc:259
ScaledPolynomialTransformFitter.h
lsst::afw::table::Schema::addField
Key< T > addField(Field< T > const &field, bool doReplace=false)
Add a new field to the Schema, and return the associated Key.
Definition: Schema.cc:668
lsst::afw::table::RecordId
std::int64_t RecordId
Type used for unique IDs for records.
Definition: misc.h:22
lsst::meas::astrom::ScaledPolynomialTransformFitter::Keys::initial
afw::table::Point2DKey initial
Definition: ScaledPolynomialTransformFitter.cc:60
lsst::meas::astrom::ScaledPolynomialTransformFitter::Keys::srcId
afw::table::Key< afw::table::RecordId > srcId
Definition: ScaledPolynomialTransformFitter.cc:57
Match.h
lsst::meas::astrom::ScaledPolynomialTransformFitter::updateModel
void updateModel()
Update the 'model' field in the data catalog using the current best- fit transform.
Definition: ScaledPolynomialTransformFitter.cc:291
std::pair
polynomialUtils.h
lsst::meas::astrom::OutlierRejectionControl::nClipMax
int nClipMax
"Never clip more than this many matches." ;
Definition: ScaledPolynomialTransformFitter.h:53
lsst::meas::astrom::PolynomialTransform::getOrder
int getOrder() const
Return the order of the polynomials.
Definition: PolynomialTransform.h:107
std::vector< ReferenceMatch >
std::vector::size
T size(T... args)
AffineTransform.h
lsst::meas::astrom::ScaledPolynomialTransformFitter::fromGrid
static ScaledPolynomialTransformFitter fromGrid(int maxOrder, geom::Box2D const &bbox, int nGridX, int nGridY, ScaledPolynomialTransform const &toInvert)
Initialize a fit that inverts an existing transform by evaluating and fitting to points on a grid.
Definition: ScaledPolynomialTransformFitter.cc:150
lsst::meas::astrom::ScaledPolynomialTransformFitter::fit
void fit(int order=-1)
Perform a linear least-squares fit of the polynomial coefficients.
Definition: ScaledPolynomialTransformFitter.cc:200
lsst::afw
Definition: imageAlgorithm.dox:1
lsst::meas::astrom::ScaledPolynomialTransformFitter
A fitter class for scaled polynomial transforms.
Definition: ScaledPolynomialTransformFitter.h:101
lsst::meas::astrom::OutlierRejectionControl::nClipMin
int nClipMin
"Always clip at least this many matches." ;
Definition: ScaledPolynomialTransformFitter.h:51
lsst::meas::astrom::ScaledPolynomialTransformFitter::Keys::input
afw::table::Point2DKey input
Definition: ScaledPolynomialTransformFitter.cc:59
lsst::meas::astrom::ScaledPolynomialTransformFitter::rejectOutliers
std::pair< double, size_t > rejectOutliers(OutlierRejectionControl const &ctrl)
Mark outliers in the data catalog using sigma clipping.
Definition: ScaledPolynomialTransformFitter.cc:367
astshim.keyMap.keyMapContinued.keys
def keys(self)
Definition: keyMapContinued.py:6
lsst::geom::AffineTransform::inverted
AffineTransform const inverted() const
Return the inverse transform.
Definition: AffineTransform.cc:53
lsst::afw::table::Schema
Defines the fields and offsets for a table.
Definition: Schema.h:50
lsst.pex.config.history.format
def format(config, name=None, writeSourceLine=True, prefix="", verbose=False)
Definition: history.py:174
lsst::afw::geom::SkyWcs
A 2-dimensional celestial WCS that transform pixels to ICRS RA/Dec, using the LSST standard for pixel...
Definition: SkyWcs.h:117
aggregates.h
lsst::afw::math::LeastSquares::fromNormalEquations
static LeastSquares fromNormalEquations(ndarray::Array< T1, 2, C1 > const &fisher, ndarray::Array< T2, 1, C2 > const &rhs, Factorization factorization=NORMAL_EIGENSYSTEM)
Initialize from the terms in the normal equations, given as ndarrays.
Definition: LeastSquares.h:154
lsst::afw::table::Point2DKey
PointKey< double > Point2DKey
Definition: aggregates.h:118
lsst::afw::table::CovarianceMatrixKey< float, 2 >
std::sqrt
T sqrt(T... args)
lsst::meas::astrom::ScaledPolynomialTransform
A 2-d coordinate transform represented by a lazy composition of an AffineTransform,...
Definition: PolynomialTransform.h:157
lsst::meas::astrom::OutlierRejectionControl::nSigma
double nSigma
"Number of sigma to clip at." ;
Definition: ScaledPolynomialTransformFitter.h:49
lsst::afw::table::CatalogT::reserve
void reserve(size_type n)
Increase the capacity of the catalog to the given size.
Definition: Catalog.h:428
lsst::geom::LinearTransform::makeScaling
static LinearTransform makeScaling(double s) noexcept
Definition: LinearTransform.h:94
lsst::geom::AffineTransform
An affine coordinate transformation consisting of a linear transformation and an offset.
Definition: AffineTransform.h:75
lsst::afw::table::CovarianceMatrixKey::isValid
bool isValid() const noexcept
Return True if all the constituent error Keys are valid.
Definition: aggregates.cc:294
lsst::afw::table::CatalogT::size
size_type size() const
Return the number of elements in the catalog.
Definition: Catalog.h:408
lsst::afw::table::PointKey< double >
lsst::afw::table::CovarianceMatrixKey::getElement
T getElement(BaseRecord const &record, int i, int j) const
Return the element in row i and column j.
Definition: aggregates.cc:336
data
char * data
Definition: BaseRecord.cc:62
lsst::meas::astrom::ScaledPolynomialTransformFitter::getInputScaling
geom::AffineTransform const & getInputScaling() const
Return the input scaling transform that maps input data points to [-1, 1].
Definition: ScaledPolynomialTransformFitter.h:270
std::log
T log(T... args)
lsst::afw::geom::SkyWcs::skyToPixel
lsst::geom::Point2D skyToPixel(lsst::geom::SpherePoint const &sky) const
Compute pixel position(s) from sky position(s)
Definition: SkyWcs.h:349
lsst::meas::astrom::ScaledPolynomialTransformFitter::Keys::rejected
afw::table::Key< std::uint16_t > rejected
Definition: ScaledPolynomialTransformFitter.cc:65
x
double x
Definition: ChebyshevBoundedField.cc:277
lsst.pex::exceptions::LengthError
Reports attempts to exceed implementation-defined length limits for some classes.
Definition: Runtime.h:76
lsst::geom::AffineTransform::getLinear
LinearTransform const & getLinear() const noexcept
Definition: AffineTransform.h:155
lsst::meas::astrom::OutlierRejectionControl
Control object for outlier rejection in ScaledPolynomialTransformFitter.
Definition: ScaledPolynomialTransformFitter.h:45
lsst::meas::astrom::ScaledPolynomialTransformFitter::Keys::forGrid
static Keys const & forGrid()
Definition: ScaledPolynomialTransformFitter.cc:77
lsst.pex::exceptions::LogicError
Reports errors in the logical structure of the program.
Definition: Runtime.h:46
lsst::meas::astrom::ScaledPolynomialTransformFitter::Keys
Definition: ScaledPolynomialTransformFitter.cc:53
lsst::afw::table::Key
A class used as a handle to a particular field in a table.
Definition: Key.h:53
lsst::afw::table::Key::isValid
bool isValid() const noexcept
Return true if the key was initialized to valid offset.
Definition: Key.h:97
lsst::meas::astrom::detail::computePackedSize
int computePackedSize(int order)
Compute this size of a packed 2-d polynomial coefficient array.
Definition: polynomialUtils.h:50
lsst.pipe.tasks.cli.cmd.commands.str
str
Definition: commands.py:50
std::uint16_t
std::map
STL class.
lsst::meas::astrom::PolynomialTransform
A 2-d coordinate transform represented by a pair of standard polynomials (one for each coordinate).
Definition: PolynomialTransform.h:45
LeastSquares.h
lsst::meas::astrom::ScaledPolynomialTransformFitter::Keys::operator=
Keys & operator=(Keys const &)=delete
std::map::upper_bound
T upper_bound(T... args)
result
py::object result
Definition: _schema.cc:429
lsst::geom::LinearTransform::getMatrix
Matrix const & getMatrix() const noexcept
Definition: LinearTransform.h:151
lsst
A base class for image defects.
Definition: imageAlgorithm.dox:1
lsst::meas::astrom::ScaledPolynomialTransformFitter::Keys::refId
afw::table::Key< afw::table::RecordId > refId
Definition: ScaledPolynomialTransformFitter.cc:56
lsst::meas::astrom::ScaledPolynomialTransform::getPoly
PolynomialTransform const & getPoly() const
Return the polynomial transform applied after the input scaling.
Definition: PolynomialTransform.h:209
LSST_EXCEPT
#define LSST_EXCEPT(type,...)
Create an exception with a given type.
Definition: Exception.h:48
lsst::afw::detection
Definition: Footprint.h:50
lsst::afw::geom::getIntermediateWorldCoordsToSky
std::shared_ptr< TransformPoint2ToSpherePoint > getIntermediateWorldCoordsToSky(SkyWcs const &wcs, bool simplify=true)
Return a transform from intermediate world coordinates to sky.
Definition: SkyWcs.cc:558
lsst::meas::astrom::ScaledPolynomialTransformFitter::Keys::output
afw::table::Point2DKey output
Definition: ScaledPolynomialTransformFitter.cc:58
lsst::afw::table::BaseCatalog
CatalogT< BaseRecord > BaseCatalog
Definition: fwd.h:71
std::map::begin
T begin(T... args)
std::map::insert
T insert(T... args)
key
Key< U > key
Definition: Schema.cc:281
lsst::geom::Point< double, 2 >
lsst::meas::astrom::ScaledPolynomialTransformFitter::Keys::outputErr
afw::table::CovarianceMatrixKey< float, 2 > outputErr
Definition: ScaledPolynomialTransformFitter.cc:62
lsst::meas::astrom::detail::computePowers
void computePowers(Eigen::VectorXd &r, double x)
Fill an array with integer powers of x, so .
Definition: polynomialUtils.cc:33
lsst::geom::asEigen
Eigen::Vector3d asEigen(sphgeom::Vector3d const &vector) noexcept
Definition: sphgeomUtils.h:36
std::size_t
std::make_pair
T make_pair(T... args)
std::map::end
T end(T... args)
lsst::meas::astrom::ScaledPolynomialTransformFitter::Keys::schema
afw::table::Schema schema
Definition: ScaledPolynomialTransformFitter.cc:55
lsst::meas::astrom::ScaledPolynomialTransformFitter::Keys::forMatches
static Keys const & forMatches()
Definition: ScaledPolynomialTransformFitter.cc:72
lsst::afw::table::CatalogT::addNew
std::shared_ptr< RecordT > addNew()
Create a new record, add it to the end of the catalog, and return a pointer to it.
Definition: Catalog.h:485
std::max
T max(T... args)
lsst::geom::Box2D
A floating-point coordinate rectangle geometry.
Definition: Box.h:413
lsst::meas::astrom::ScaledPolynomialTransformFitter::Keys::Keys
Keys(Keys &&)=delete
lsst::meas::astrom::ScaledPolynomialTransformFitter::Keys::model
afw::table::Point2DKey model
Definition: ScaledPolynomialTransformFitter.cc:61
lsst::meas::astrom::ScaledPolynomialTransformFitter::Keys::Keys
Keys(Keys const &)=delete
lsst::afw::table::CatalogT< BaseRecord >
lsst::geom::Extent< double, 2 >
astshim.fitsChanContinued.iter
def iter(self)
Definition: fitsChanContinued.py:88
Box.h
bbox
AmpInfoBoxKey bbox
Definition: Amplifier.cc:117
m
int m
Definition: SpanSet.cc:49
lsst::afw::table::PointKey::getY
Key< T > getY() const noexcept
Return the underlying y Key.
Definition: aggregates.h:110