LSSTApplications  10.0+286,10.0+36,10.0+46,10.0-2-g4f67435,10.1+152,10.1+37,11.0,11.0+1,11.0-1-g47edd16,11.0-1-g60db491,11.0-1-g7418c06,11.0-2-g04d2804,11.0-2-g68503cd,11.0-2-g818369d,11.0-2-gb8b8ce7
LSSTDataManagementBasePackage
tests.py
Go to the documentation of this file.
1 #!/usr/bin/env python
2 #
3 # LSST Data Management System
4 # Copyright 2008-2015 AURA/LSST.
5 #
6 # This product includes software developed by the
7 # LSST Project (http://www.lsst.org/).
8 #
9 # This program is free software: you can redistribute it and/or modify
10 # it under the terms of the GNU General Public License as published by
11 # the Free Software Foundation, either version 3 of the License, or
12 # (at your option) any later version.
13 #
14 # This program is distributed in the hope that it will be useful,
15 # but WITHOUT ANY WARRANTY; without even the implied warranty of
16 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 # GNU General Public License for more details.
18 #
19 # You should have received a copy of the LSST License Statement and
20 # the GNU General Public License along with this program. If not,
21 # see <http://www.lsstcorp.org/LegalNotices/>.
22 #
23 
24 import numpy
25 
26 import lsst.utils.tests
27 import lsst.afw.table
28 import lsst.afw.image
29 import lsst.afw.detection
30 import lsst.afw.geom
32 import lsst.afw.coord
34 
35 from .sfm import SingleFrameMeasurementTask
36 from .forcedMeasurement import ForcedMeasurementTask
37 from .baseLib import CentroidResultKey
38 
39 __all__ = ("BlendContext", "TestDataset", "AlgorithmTestCase", "TransformTestCase",
40  "SingleFramePluginTransformSetupHelper", "ForcedPluginTransformSetupHelper",
41  "FluxTransformTestCase", "CentroidTransformTestCase")
42 
43 class BlendContext(object):
44  """!
45  A Python context manager used to add multiple overlapping sources along with a parent source
46  that represents all of them together.
47 
48  This is used as the return value for TestDataset.addBlend(), and this is the only way it should
49  be used. The only public method is addChild().
50  """
51 
52  def __init__(self, owner):
53  self.owner = owner
54  self.parentRecord = self.owner.catalog.addNew()
55  self.parentImage = lsst.afw.image.ImageF(self.owner.exposure.getBBox())
56  self.children = []
57 
58  def __enter__(self):
59  # BlendContext is its own context manager, so we just return self.
60  return self
61 
62  def addChild(self, flux, centroid, shape=None):
63  """!
64  Add a child source to the blend, and return the truth catalog record that corresponds to it.
65 
66  @param[in] flux Total flux of the source to be added.
67  @param[in] centroid Position of the source to be added (lsst.afw.geom.Point2D).
68  @param[in] shape 2nd moments of the source before PSF convolution
69  (lsst.afw.geom.ellipses.Quadrupole). Note that the truth catalog
70  records post-convolution moments)
71  """
72  record, image = self.owner.addSource(flux, centroid, shape)
73  record.set(self.owner.keys["parent"], self.parentRecord.getId())
74  self.parentImage += image
75  self.children.append((record, image))
76  return record
77 
78  def __exit__(self, type_, value, tb):
79  # We're not using the context manager for any kind of exception safety or guarantees;
80  # we just want the nice "with" statement syntax.
81  if type_ is not None: # exception was raised; just skip all this and let it propagate
82  return
83  # On exit, we need to compute and set the truth values for the parent object.
84  self.parentRecord.set(self.owner.keys["nChild"], len(self.children))
85  # Compute flux from sum of component fluxes
86  flux = 0.0
87  for record, image in self.children:
88  flux += record.get(self.owner.keys["flux"])
89  self.parentRecord.set(self.owner.keys["flux"], flux)
90  # Compute centroid from flux-weighted mean of component centroids
91  x = 0.0
92  y = 0.0
93  for record, image in self.children:
94  w = record.get(self.owner.keys["flux"])/flux
95  x += record.get(self.owner.keys["centroid"].getX())*w
96  y += record.get(self.owner.keys["centroid"].getY())*w
97  self.parentRecord.set(self.owner.keys["centroid"], lsst.afw.geom.Point2D(x, y))
98  # Compute shape from flux-weighted mean of offset component shapes
99  xx = 0.0
100  yy = 0.0
101  xy = 0.0
102  for record, image in self.children:
103  w = record.get(self.owner.keys["flux"])/flux
104  dx = record.get(self.owner.keys["centroid"].getX()) - x
105  dy = record.get(self.owner.keys["centroid"].getY()) - y
106  xx += (record.get(self.owner.keys["shape"].getIxx()) + dx**2)*w
107  yy += (record.get(self.owner.keys["shape"].getIyy()) + dy**2)*w
108  xy += (record.get(self.owner.keys["shape"].getIxy()) + dx*dy)*w
109  self.parentRecord.set(self.owner.keys["shape"], lsst.afw.geom.ellipses.Quadrupole(xx, yy, xy))
110  # Run detection on the parent image to get the parent Footprint.
111  self.owner._installFootprint(self.parentRecord, self.parentImage)
112  # Create perfect HeavyFootprints for all children; these will need to be modified later to account
113  # for the noise we'll add to the image.
114  deblend = lsst.afw.image.MaskedImageF(self.owner.exposure.getMaskedImage(), True)
115  for record, image in self.children:
116  deblend.getImage().getArray()[:,:] = image.getArray()
117  heavyFootprint = lsst.afw.detection.HeavyFootprintF(self.parentRecord.getFootprint(), deblend)
118  record.setFootprint(heavyFootprint)
119 
120 
121 class TestDataset(object):
122  """!
123  A simulated dataset consisting of a test image and an associated truth catalog.
124 
125  TestDataset creates an idealized image made of pure Gaussians (including a Gaussian PSF),
126  with simple noise and idealized Footprints/HeavyFootprints that simulated the outputs
127  of detection and deblending. Multiple noise realizations can be created from the same
128  underlying sources, allowing uncertainty estimates to be verified via Monte Carlo.
129 
130  Typical usage:
131  @code
132  bbox = lsst.afw.geom.Box2I(lsst.afw.geom.Point2I(0,0), lsst.afw.geom.Point2I(100, 100))
133  dataset = TestDataset(bbox)
134  dataset.addSource(flux=1E5, centroid=lsst.afw.geom.Point2D(25, 26))
135  dataset.addSource(flux=2E5, centroid=lsst.afw.geom.Point2D(75, 24),
136  shape=lsst.afw.geom.ellipses.Quadrupole(8, 7, 2))
137  with dataset.addBlend() as family:
138  family.addChild(flux=2E5, centroid=lsst.afw.geom.Point2D(50, 72))
139  family.addChild(flux=1.5E5, centroid=lsst.afw.geom.Point2D(51, 74))
140  exposure, catalog = dataset.realize(noise=100.0, schema=TestDataset.makeMinimalSchema())
141  @endcode
142  """
143 
144  @classmethod
146  """Return the minimal schema needed to hold truth catalog fields.
147 
148  When TestDataset.realize() is called, the schema must include at least these fields.
149  Usually it will include additional fields for measurement algorithm outputs, allowing
150  the same catalog to be used for both truth values (the fields from the minimal schema)
151  and the measurements.
152  """
153  if not hasattr(cls, "_schema"):
155  cls.keys = {}
156  cls.keys["parent"] = schema.find("parent").key
157  cls.keys["nChild"] = schema.addField("deblend_nchild", type=int)
158  cls.keys["flux"] = schema.addField("truth_flux", type=float, doc="true flux", units="dn")
159  cls.keys["centroid"] = lsst.afw.table.Point2DKey.addFields(
160  schema, "truth", "true simulated centroid", "pixels"
161  )
162  cls.keys["shape"] = lsst.afw.table.QuadrupoleKey.addFields(
163  schema, "truth", "true shape after PSF convolution", lsst.afw.table.PIXEL
164  )
165  cls.keys["isStar"] = schema.addField("truth_isStar", type="Flag",
166  doc="set if the object is a star")
167  schema.getAliasMap().set("slot_Shape", "truth")
168  schema.getAliasMap().set("slot_Centroid", "truth")
169  schema.getAliasMap().set("slot_ModelFlux", "truth")
170  schema.getCitizen().markPersistent()
171  cls._schema = schema
172  schema = lsst.afw.table.Schema(cls._schema)
173  schema.disconnectAliases()
174  return schema
175 
176  @staticmethod
177  def makePerturbedWcs(oldWcs, minScaleFactor=1.2, maxScaleFactor=1.5,
178  minRotation=None, maxRotation=None,
179  minRefShift=None, maxRefShift=None,
180  minPixShift=2.0, maxPixShift=4.0):
181  """!
182  Create a new undistorted TanWcs that is similar but not identical to another, with random
183  scaling, rotation, and offset (in both pixel position and reference position).
184 
185  The maximum and minimum arguments are interpreted as absolute values for a split
186  range that covers both positive and negative values (as this method is used
187  in testing, it is typically most important to avoid perturbations near zero).
188  Scale factors are treated somewhat differently: the actual scale factor is chosen between
189  minScaleFactor and maxScaleFactor OR (1/maxScaleFactor) and (1/minScaleFactor).
190 
191  The default range for rotation is 30-60 degrees, and the default range for reference shift
192  is 0.5-1.0 arcseconds (these cannot be safely included directly as default values because Angle
193  objects are mutable).
194  """
195  if minRotation is None: minRotation = 30.0*lsst.afw.geom.degrees
196  if maxRotation is None: maxRotation = 60.0*lsst.afw.geom.degrees
197  if minRefShift is None: minRefShift = 0.5*lsst.afw.geom.arcseconds
198  if maxRefShift is None: maxRefShift = 1.0*lsst.afw.geom.arcseconds
199  def splitRandom(min1, max1, min2=None, max2=None):
200  if min2 is None: min2 = -max1
201  if max2 is None: max2 = -min1
202  if numpy.random.uniform() > 0.5:
203  return float(numpy.random.uniform(min1, max1))
204  else:
205  return float(numpy.random.uniform(min2, max2))
206  # Generate random perturbations
207  scaleFactor = splitRandom(minScaleFactor, maxScaleFactor, 1.0/maxScaleFactor, 1.0/minScaleFactor)
208  rotation = splitRandom(minRotation.asRadians(), maxRotation.asRadians())*lsst.afw.geom.radians
209  refShiftRa = splitRandom(minRefShift.asRadians(), maxRefShift.asRadians())*lsst.afw.geom.radians
210  refShiftDec = splitRandom(minRefShift.asRadians(), maxRefShift.asRadians())*lsst.afw.geom.radians
211  pixShiftX = splitRandom(minPixShift, maxPixShift)
212  pixShiftY = splitRandom(minPixShift, maxPixShift)
213  # Compute new CD matrix
214  oldTransform = lsst.afw.geom.LinearTransform(oldWcs.getCDMatrix())
215  rTransform = lsst.afw.geom.LinearTransform.makeRotation(rotation)
216  sTransform = lsst.afw.geom.LinearTransform.makeScaling(scaleFactor)
217  newTransform = oldTransform*rTransform*sTransform
218  matrix = newTransform.getMatrix()
219  # Compute new coordinate reference pixel (CRVAL)
220  oldSkyOrigin = oldWcs.getSkyOrigin().toIcrs()
221  newSkyOrigin = lsst.afw.coord.IcrsCoord(oldSkyOrigin.getRa() + refShiftRa,
222  oldSkyOrigin.getDec() + refShiftDec)
223  # Compute new pixel reference pixel (CRPIX)
224  oldPixOrigin = oldWcs.getPixelOrigin()
225  newPixOrigin = lsst.afw.geom.Point2D(oldPixOrigin.getX() + pixShiftX,
226  oldPixOrigin.getY() + pixShiftY)
227  return lsst.afw.image.makeWcs(newSkyOrigin, newPixOrigin,
228  matrix[0,0], matrix[0,1], matrix[1,0], matrix[1,1])
229 
230  @staticmethod
231  def makeEmptyExposure(bbox, wcs=None, crval=None, cdelt=None, psfSigma=2.0, psfDim=17, fluxMag0=1E12):
232  """!
233  Create an Exposure, with a Calib, Wcs, and Psf, but no pixel values set.
234 
235  @param[in] bbox Bounding box of the image (image coordinates) as returned by makeCatalog.
236  @param[in] wcs New Wcs for the exposure (created from crval and cdelt if None).
237  @param[in] crval afw.coord.Coord: center of the TAN WCS attached to the image.
238  @param[in] cdelt afw.geom.Angle: pixel scale of the image
239  @param[in] psfSigma Radius (sigma) of the Gaussian PSF attached to the image
240  @param[in] psfDim Width and height of the image's Gaussian PSF attached to the image
241  @param[in] fluxMag0 Flux at magnitude zero (in e-) used to set the Calib of the exposure.
242  """
243  if wcs is None:
244  if crval is None:
245  crval = lsst.afw.coord.IcrsCoord(45.0*lsst.afw.geom.degrees, 45.0*lsst.afw.geom.degrees)
246  if cdelt is None:
247  cdelt = 0.2*lsst.afw.geom.arcseconds
248  crpix = lsst.afw.geom.Box2D(bbox).getCenter()
249  wcs = lsst.afw.image.makeWcs(crval, crpix, cdelt.asDegrees(), 0.0, 0.0, cdelt.asDegrees())
250  exposure = lsst.afw.image.ExposureF(bbox)
251  psf = lsst.afw.detection.GaussianPsf(psfDim, psfDim, psfSigma)
252  calib = lsst.afw.image.Calib()
253  calib.setFluxMag0(fluxMag0)
254  exposure.setWcs(wcs)
255  exposure.setPsf(psf)
256  exposure.setCalib(calib)
257  return exposure
258 
259  @staticmethod
260  def drawGaussian(bbox, flux, ellipse):
261  """!
262  Create an image of an elliptical Gaussian.
263 
264  @param[in,out] bbox Bounding box of image to create.
265  @param[in] flux Total flux of the Gaussian (normalized analytically, not using pixel
266  values)
267  @param[in] ellipse lsst.afw.geom.ellipses.Ellipse holding the centroid and shape.
268  """
269  x, y = numpy.meshgrid(numpy.arange(bbox.getBeginX(), bbox.getEndX()),
270  numpy.arange(bbox.getBeginY(), bbox.getEndY()))
271  t = ellipse.getGridTransform()
272  xt = t[t.XX] * x + t[t.XY] * y + t[t.X]
273  yt = t[t.YX] * x + t[t.YY] * y + t[t.Y]
274  image = lsst.afw.image.ImageF(bbox)
275  image.getArray()[:,:] = numpy.exp(-0.5*(xt**2 + yt**2))*flux/(2.0*ellipse.getCore().getArea())
276  return image
277 
278  def __init__(self, bbox, threshold=10.0, exposure=None, **kwds):
279  """!
280  Initialize the dataset.
281 
282  @param[in] bbox Bounding box of the test image.
283  @param[in] threshold Threshold absolute value used to determine footprints for
284  simulated sources. This thresholding will be applied before noise is
285  actually added to images (or before the noise level is even known), so
286  this will necessarily produce somewhat artificial footprints.
287  @param[in] exposure lsst.afw.image.ExposureF test sources should be added to. Ownership should
288  be considered transferred from the caller to the TestDataset.
289  Must have a Gaussian Psf for truth catalog shapes to be exact.
290  @param[in] **kwds Keyword arguments forwarded to makeEmptyExposure if exposure is None.
291  """
292  if exposure is None:
293  exposure = self.makeEmptyExposure(bbox, **kwds)
294  self.threshold = lsst.afw.detection.Threshold(threshold, lsst.afw.detection.Threshold.VALUE)
295  self.exposure = exposure
296  self.psfShape = self.exposure.getPsf().computeShape()
297  self.schema = self.makeMinimalSchema()
299 
300  def _installFootprint(self, record, image):
301  """Create a Footprint for a simulated source and add it to its truth catalog record.
302  """
303  # Run detection on the single-source image
304  fpSet = lsst.afw.detection.FootprintSet(image, self.threshold)
305  # the call below to the FootprintSet ctor is actually a grow operation
306  fpSet = lsst.afw.detection.FootprintSet(fpSet, int(self.psfShape.getDeterminantRadius() + 1.0), True)
307  # Update the full exposure's mask plane to indicate the detection
308  fpSet.setMask(self.exposure.getMaskedImage().getMask(), "DETECTED")
309  # Attach the new footprint to the exposure
310  if len(fpSet.getFootprints()) > 1:
311  raise RuntimeError("Threshold value results in multiple Footprints for a single object")
312  if len(fpSet.getFootprints()) == 0:
313  raise RuntimeError("Threshold value results in zero Footprints for object")
314  record.setFootprint(fpSet.getFootprints()[0])
315 
316  def addSource(self, flux, centroid, shape=None):
317  """!
318  Add a source to the simulation
319 
320  @param[in] flux Total flux of the source to be added.
321  @param[in] centroid Position of the source to be added (lsst.afw.geom.Point2D).
322  @param[in] shape 2nd moments of the source before PSF convolution
323  (lsst.afw.geom.ellipses.Quadrupole). Note that the truth catalog
324  records post-convolution moments). If None, a point source
325  will be added.
326 
327  @return a truth catalog record and single-source image corresponding to the new source.
328  """
329  # Create and set the truth catalog fields
330  record = self.catalog.addNew()
331  record.set(self.keys["flux"], flux)
332  record.set(self.keys["centroid"], centroid)
333  if shape is None:
334  record.set(self.keys["isStar"], True)
335  fullShape = self.psfShape
336  else:
337  record.set(self.keys["isStar"], False)
338  fullShape = shape.convolve(self.psfShape)
339  record.set(self.keys["shape"], fullShape)
340  # Create an image containing just this source
341  image = self.drawGaussian(self.exposure.getBBox(), flux,
342  lsst.afw.geom.ellipses.Ellipse(fullShape, centroid))
343  # Generate a footprint for this source
344  self._installFootprint(record, image)
345  # Actually add the source to the full exposure
346  self.exposure.getMaskedImage().getImage().getArray()[:,:] += image.getArray()
347  return record, image
348 
349  def addBlend(self):
350  """!
351  Return a context manager that allows a blend of multiple sources to be added.
352 
353  Example:
354  @code
355  d = TestDataset(...)
356  with d.addBlend() as b:
357  b.addChild(flux1, centroid1)
358  b.addChild(flux2, centroid2, shape2)
359  @endcode
360 
361  Note that nothing stops you from creating overlapping sources just using the addSource() method,
362  but addBlend() is necesssary to create a parent object and deblended HeavyFootprints of the type
363  produced by the detection and deblending pipelines.
364  """
365  return BlendContext(self)
366 
367  def transform(self, wcs, **kwds):
368  """!
369  Create a copy of the dataset transformed to a new WCS, with new Psf and Calib.
370 
371  @param[in] wcs Wcs for the new dataset.
372  @param[in] **kwds Additional keyword arguments passed on to makeEmptyExposure. If not
373  specified, these revert to the defaults for makeEmptyExposure, not the
374  values in the current dataset.
375  """
376  bboxD = lsst.afw.geom.Box2D()
377  xyt = lsst.afw.image.XYTransformFromWcsPair(wcs, self.exposure.getWcs())
378  for corner in lsst.afw.geom.Box2D(self.exposure.getBBox()).getCorners():
379  bboxD.include(xyt.forwardTransform(lsst.afw.geom.Point2D(corner)))
380  bboxI = lsst.afw.geom.Box2I(bboxD)
381  result = TestDataset(bbox=bboxI, wcs=wcs, **kwds)
382  oldCalib = self.exposure.getCalib()
383  newCalib = result.exposure.getCalib()
384  oldPsfShape = self.exposure.getPsf().computeShape()
385  for record in self.catalog:
386  if record.get(self.keys["nChild"]):
387  raise NotImplementedError("Transforming blended sources in TestDatasets is not supported")
388  magnitude = oldCalib.getMagnitude(record.get(self.keys["flux"]))
389  newFlux = newCalib.getFlux(magnitude)
390  oldCentroid = record.get(self.keys["centroid"])
391  newCentroid = xyt.forwardTransform(oldCentroid)
392  if record.get(self.keys["isStar"]):
393  newDeconvolvedShape = None
394  else:
395  affine = xyt.linearizeForwardTransform(oldCentroid)
396  oldFullShape = record.get(self.keys["shape"])
397  oldDeconvolvedShape = lsst.afw.geom.ellipses.Quadrupole(
398  oldFullShape.getIxx() - oldPsfShape.getIxx(),
399  oldFullShape.getIyy() - oldPsfShape.getIyy(),
400  oldFullShape.getIxy() - oldPsfShape.getIxy(),
401  False
402  )
403  newDeconvolvedShape = oldDeconvolvedShape.transform(affine.getLinear())
404  result.addSource(newFlux, newCentroid, newDeconvolvedShape)
405  return result
406 
407  def realize(self, noise, schema):
408  """!
409  Create a simulated with noise and a simulated post-detection catalog with (Heavy)Footprints.
410 
411  @param[in] noise Standard deviation of noise to be added to the exposure. The noise will be
412  Gaussian and constant, appropriate for the sky-limited regime.
413  @param[in] schema Schema of the new catalog to be created. Must start with self.schema (i.e.
414  schema.contains(self.schema) must be True), but typically contains fields for
415  already-configured measurement algorithms as well.
416 
417  @return a tuple of (exposure, catalog)
418  """
419  assert schema.contains(self.schema)
420  mapper = lsst.afw.table.SchemaMapper(self.schema)
421  mapper.addMinimalSchema(self.schema, True)
422  exposure = self.exposure.clone()
423  exposure.getMaskedImage().getVariance().getArray()[:,:] = noise**2
424  exposure.getMaskedImage().getImage().getArray()[:,:] \
425  += numpy.random.randn(exposure.getHeight(), exposure.getWidth())*noise
426  catalog = lsst.afw.table.SourceCatalog(schema)
427  catalog.extend(self.catalog, mapper=mapper)
428  # Loop over sources and generate new HeavyFootprints that divide up the noisy pixels, not the
429  # ideal no-noise pixels.
430  for record in catalog:
431  # parent objects have non-Heavy Footprints, which don't need to be updated after adding noise.
432  if record.getParent() == 0: continue
433  # get flattened arrays that correspond to the no-noise and noisy parent images
434  parent = catalog.find(record.getParent())
435  footprint = parent.getFootprint()
436  parentFluxArrayNoNoise = numpy.zeros(footprint.getArea(), dtype=numpy.float32)
438  footprint,
439  self.exposure.getMaskedImage().getImage().getArray(),
440  parentFluxArrayNoNoise,
441  self.exposure.getXY0()
442  )
443  parentFluxArrayNoisy = numpy.zeros(footprint.getArea(), dtype=numpy.float32)
445  footprint,
446  exposure.getMaskedImage().getImage().getArray(),
447  parentFluxArrayNoisy,
448  exposure.getXY0()
449  )
450  oldHeavy = lsst.afw.detection.HeavyFootprintF.cast(record.getFootprint())
451  fraction = (oldHeavy.getImageArray() / parentFluxArrayNoNoise)
452  # n.b. this isn't a copy ctor - it's a copy from a vanilla Footprint, so it doesn't copy
453  # the arrays we don't want to change, and hence we have to do that ourselves below.
454  newHeavy = lsst.afw.detection.HeavyFootprintF(oldHeavy)
455  newHeavy.getImageArray()[:] = parentFluxArrayNoisy*fraction
456  newHeavy.getMaskArray()[:] = oldHeavy.getMaskArray()
457  newHeavy.getVarianceArray()[:] = oldHeavy.getVarianceArray()
458  record.setFootprint(newHeavy)
459  return exposure, catalog
460 
461 
463  # Some tests depend on the noise realization in the test data or from the
464  # numpy random number generator. In most cases, they are testing that the
465  # measured flux lies within 2 sigma of the correct value, which we should
466  # expect to fail sometimes. Some -- but sadly not all -- of these cases
467  # have been marked with an "rng dependent" comment.
468  #
469  # We ensure these tests are provided with data which causes them to pass
470  # by seeding the numpy RNG with this value. It can be over-ridden as
471  # necessary in subclasses.
472  randomSeed = 1234
473 
474  @classmethod
475  def setUpClass(cls):
476  numpy.random.seed(cls.randomSeed)
477 
478  def makeSingleFrameMeasurementConfig(self, plugin=None, dependencies=()):
479  """Convenience function to create a Config instance for SingleFrameMeasurementTask
480 
481  The plugin and its dependencies will be the only plugins run, while the Centroid, Shape,
482  and ModelFlux slots will be set to the truth fields generated by the TestDataset class.
483  """
484  config = SingleFrameMeasurementTask.ConfigClass()
485  config.slots.centroid = "truth"
486  config.slots.shape = "truth"
487  config.slots.modelFlux = None
488  config.slots.apFlux = None
489  config.slots.psfFlux = None
490  config.slots.instFlux = None
491  config.slots.calibFlux = None
492  config.plugins.names = (plugin,) + tuple(dependencies)
493  return config
494 
495  def makeSingleFrameMeasurementTask(self, plugin=None, dependencies=(), config=None, schema=None,
496  algMetadata=None):
497  """Convenience function to create a SingleFrameMeasurementTask with a simple configuration.
498  """
499  if config is None:
500  if plugin is None:
501  raise ValueError("Either plugin or config argument must not be None")
502  config = self.makeSingleFrameMeasurementConfig(plugin=plugin, dependencies=dependencies)
503  if schema is None:
504  schema = TestDataset.makeMinimalSchema()
505  # Clear all aliases so only those defined by config are set.
506  schema.setAliasMap(None)
507  if algMetadata is None:
508  algMetadata = lsst.daf.base.PropertyList()
509  return SingleFrameMeasurementTask(schema=schema, algMetadata=algMetadata, config=config)
510 
511 
512  def makeForcedMeasurementConfig(self, plugin=None, dependencies=()):
513  """Convenience function to create a Config instance for ForcedMeasurementTask
514 
515  In addition to the plugins specified in the plugin and dependencies arguments,
516  the TransformedCentroid and TransformedShape plugins will be run and used as the
517  Centroid and Shape slots; these simply transform the reference catalog centroid
518  and shape to the measurement coordinate system.
519  """
520  config = ForcedMeasurementTask.ConfigClass()
521  config.slots.centroid = "base_TransformedCentroid"
522  config.slots.shape = "base_TransformedShape"
523  config.slots.modelFlux = None
524  config.slots.apFlux = None
525  config.slots.psfFlux = None
526  config.slots.instFlux = None
527  config.plugins.names = (plugin,) + tuple(dependencies) + ("base_TransformedCentroid",
528  "base_TransformedShape")
529  return config
530 
531  def makeForcedMeasurementTask(self, plugin=None, dependencies=(), config=None, refSchema=None,
532  algMetadata=None):
533  """Convenience function to create a ForcedMeasurementTask with a simple configuration.
534  """
535  if config is None:
536  if plugin is None:
537  raise ValueError("Either plugin or config argument must not be None")
538  config = self.makeForcedMeasurementConfig(plugin=plugin, dependencies=dependencies)
539  if refSchema is None:
540  refSchema = TestDataset.makeMinimalSchema()
541  if algMetadata is None:
542  algMetadata = lsst.daf.base.PropertyList()
543  return ForcedMeasurementTask(refSchema=refSchema, algMetadata=algMetadata, config=config)
544 
545 
547  """!
548  Base class for testing measurement transformations.
549 
550  We test both that the transform itself operates successfully (fluxes are
551  converted to magnitudes, flags are propagated properly) and that the
552  transform is registered as the default for the appropriate measurement
553  algorithms.
554 
555  In the simple case of one-measurement-per-transformation, the developer
556  need not directly write any tests themselves: simply customizing the class
557  variables is all that is required. More complex measurements (e.g.
558  multiple aperture fluxes) require extra effort.
559  """
560  # The name used for the measurement algorithm; determines the names of the
561  # fields in the resulting catalog. This default should generally be fine,
562  # but subclasses can override if required.
563  name = "MeasurementTransformTest"
564 
565  # These should be customized by subclassing.
566  controlClass = None
567  algorithmClass = None
568  transformClass = None
569 
570  # Flags which may be set by the algorithm being tested. Can be customized
571  # in subclasses.
572  flagNames = ("flag",)
573 
574  # The plugin being tested should be registered under these names for
575  # single frame and forced measurement. Should be customized by
576  # subclassing.
577  singleFramePlugins = ()
578  forcedPlugins = ()
579 
580  def setUp(self):
582  self.calexp = TestDataset.makeEmptyExposure(bbox)
583  self._setupTransform()
584 
585  def tearDown(self):
586  del self.calexp
587  del self.inputCat
588  del self.mapper
589  del self.transform
590  del self.outputCat
591 
592  def _populateCatalog(self, baseNames):
593  records = []
594  for flagValue in (True, False):
595  records.append(self.inputCat.addNew())
596  for baseName in baseNames:
597  for flagName in self.flagNames:
598  if records[-1].schema.join(baseName, flagName) in records[-1].schema:
599  records[-1].set(records[-1].schema.join(baseName, flagName), flagValue)
600  self._setFieldsInRecords(records, baseName)
601 
602  def _checkOutput(self, baseNames):
603  for inSrc, outSrc in zip(self.inputCat, self.outputCat):
604  for baseName in baseNames:
605  self._compareFieldsInRecords(inSrc, outSrc, baseName)
606  for flagName in self.flagNames:
607  keyName = outSrc.schema.join(baseName, flagName)
608  if keyName in inSrc.schema:
609  self.assertEqual(outSrc.get(keyName), inSrc.get(keyName))
610  else:
611  self.assertFalse(keyName in outSrc.schema)
612 
613  def _runTransform(self, doExtend=True):
614  if doExtend:
615  self.outputCat.extend(self.inputCat, mapper=self.mapper)
616  self.transform(self.inputCat, self.outputCat, self.calexp.getWcs(), self.calexp.getCalib())
617 
618  def testTransform(self, baseNames=None):
619  """
620  Test the operation of the transformation on a catalog containing random data.
621 
622  We check that:
623 
624  * An appropriate exception is raised on an attempt to transform between catalogs with different
625  numbers of rows;
626  * Otherwise, all appropriate conversions are properly appled and that flags have been propagated.
627 
628  The `baseNames` argument requires some explanation. This should be an iterable of the leading parts of
629  the field names for each measurement; that is, everything that appears before `_flux`, `_flag`, etc.
630  In the simple case of a single measurement per plugin, this is simply equal to `self.name` (thus
631  measurements are stored as `self.name + "_flux"`, etc). More generally, the developer may specify
632  whatever iterable they require. For example, to handle multiple apertures, we could have
633  `(self.name + "_0", self.name + "_1", ...)`.
634 
635  @param[in] baseNames Iterable of the initial parts of measurement field names.
636  """
637  baseNames = baseNames or [self.name]
638  self._populateCatalog(baseNames)
639  self.assertRaises(lsst.pex.exceptions.LengthError, self._runTransform, False)
640  self._runTransform()
641  self._checkOutput(baseNames)
642 
643  def _checkRegisteredTransform(self, registry, name):
644  # If this is a Python-based transform, we can compare directly; if
645  # it's wrapped C++, we need to compare the wrapped class.
646  self.assertEqual(registry[name].PluginClass.getTransformClass(), self.transformClass)
647 
648  def testRegistration(self):
649  """
650  Test that the transformation is appropriately registered with the relevant measurement algorithms.
651  """
652  for pluginName in self.singleFramePlugins:
653  self._checkRegisteredTransform(lsst.meas.base.SingleFramePlugin.registry, pluginName)
654  for pluginName in self.forcedPlugins:
655  self._checkRegisteredTransform(lsst.meas.base.ForcedPlugin.registry, pluginName)
656 
657 
659  def _setupTransform(self):
660  self.control = self.controlClass()
662  # Trick algorithms that depend on the slot centroid or alias into thinking they've been defined;
663  # it doesn't matter for this test since we won't actually use the plugins for anything besides
664  # defining the schema.
665  inputSchema.getAliasMap().set("slot_Centroid", "dummy")
666  inputSchema.getAliasMap().set("slot_Shape", "dummy")
667  self.algorithmClass(self.control, self.name, inputSchema)
668  inputSchema.getAliasMap().erase("slot_Centroid")
669  inputSchema.getAliasMap().erase("slot_Shape")
672  self.transform = self.transformClass(self.control, self.name, self.mapper)
673  self.outputCat = lsst.afw.table.BaseCatalog(self.mapper.getOutputSchema())
674 
675 
677  def _setupTransform(self):
678  self.control = self.controlClass()
681  # Trick algorithms that depend on the slot centroid or alias into thinking they've been defined;
682  # it doesn't matter for this test since we won't actually use the plugins for anything besides
683  # defining the schema.
684  inputMapper.editOutputSchema().getAliasMap().set("slot_Centroid", "dummy")
685  inputMapper.editOutputSchema().getAliasMap().set("slot_Shape", "dummy")
686  self.algorithmClass(self.control, self.name, inputMapper, lsst.daf.base.PropertyList())
687  inputMapper.editOutputSchema().getAliasMap().erase("slot_Centroid")
688  inputMapper.editOutputSchema().getAliasMap().erase("slot_Shape")
689  self.inputCat = lsst.afw.table.SourceCatalog(inputMapper.getOutputSchema())
690  self.mapper = lsst.afw.table.SchemaMapper(inputMapper.getOutputSchema())
691  self.transform = self.transformClass(self.control, self.name, self.mapper)
692  self.outputCat = lsst.afw.table.BaseCatalog(self.mapper.getOutputSchema())
693 
694 
696  def _setFieldsInRecords(self, records, name):
697  for record in records:
698  record[record.schema.join(name, 'flux')] = numpy.random.random()
699  record[record.schema.join(name, 'fluxSigma')] = numpy.random.random()
700 
701  # Negative fluxes should be converted to NaNs.
702  assert len(records) > 1
703  records[0][record.schema.join(name, 'flux')] = -1
704 
705  def _compareFieldsInRecords(self, inSrc, outSrc, name):
706  fluxName, fluxSigmaName = inSrc.schema.join(name, 'flux'), inSrc.schema.join(name, 'fluxSigma')
707  if inSrc[fluxName] > 0:
708  mag, magErr = self.calexp.getCalib().getMagnitude(inSrc[fluxName], inSrc[fluxSigmaName])
709  self.assertEqual(outSrc[outSrc.schema.join(name, 'mag')], mag)
710  self.assertEqual(outSrc[outSrc.schema.join(name, 'magErr')], magErr)
711  else:
712  self.assertTrue(numpy.isnan(outSrc[outSrc.schema.join(name, 'mag')]))
713  self.assertTrue(numpy.isnan(outSrc[outSrc.schema.join(name, 'magErr')]))
714 
715 
717  def _setFieldsInRecords(self, records, name):
718  for record in records:
719  record[record.schema.join(name, 'x')] = numpy.random.random()
720  record[record.schema.join(name, 'y')] = numpy.random.random()
721  # Some algorithms set no errors; some set only sigma on x & y; some provide
722  # a full covariance matrix. Set only those which exist in the schema.
723  for fieldSuffix in ('xSigma', 'ySigma', 'x_y_Cov'):
724  fieldName = record.schema.join(name, fieldSuffix)
725  if fieldName in record.schema:
726  record[fieldName] = numpy.random.random()
727 
728  def _compareFieldsInRecords(self, inSrc, outSrc, name):
729  centroidResultKey = CentroidResultKey(inSrc.schema[self.name])
730  centroidResult = centroidResultKey.get(inSrc)
731 
732  coord = lsst.afw.table.CoordKey(outSrc.schema[self.name]).get(outSrc)
733  coordTruth = self.calexp.getWcs().pixelToSky(centroidResult.getCentroid())
734  self.assertEqual(coordTruth, coord)
735 
736  # If the centroid has an associated uncertainty matrix, the coordinate
737  # must have one too, and vice versa.
738  try:
739  coordErr = lsst.afw.table.CovarianceMatrix2fKey(outSrc.schema[self.name],
740  ["ra", "dec"]).get(outSrc)
741  except lsst.pex.exceptions.NotFoundError:
742  self.assertFalse(centroidResultKey.getCentroidErr().isValid())
743  else:
744  transform = self.calexp.getWcs().linearizePixelToSky(coordTruth, lsst.afw.geom.radians)
745  coordErrTruth = numpy.dot(numpy.dot(transform.getLinear().getMatrix(),
746  centroidResult.getCentroidErr()),
747  transform.getLinear().getMatrix().transpose())
748  numpy.testing.assert_array_almost_equal(numpy.array(coordErrTruth), coordErr)
An ellipse core with quadrupole moments as parameters.
Definition: Quadrupole.h:45
Defines the fields and offsets for a table.
Definition: Schema.h:46
def addBlend
Return a context manager that allows a blend of multiple sources to be added.
Definition: tests.py:349
A custom container class for records, based on std::vector.
Definition: Catalog.h:94
Class for storing ordered metadata with comments.
Definition: PropertyList.h:81
A mapping between the keys of two Schemas, used to copy data between them.
Definition: SchemaMapper.h:19
def addSource
Add a source to the simulation.
Definition: tests.py:316
def addChild
Add a child source to the blend, and return the truth catalog record that corresponds to it...
Definition: tests.py:62
static LinearTransform makeScaling(double s)
def drawGaussian
Create an image of an elliptical Gaussian.
Definition: tests.py:260
XYTransformFromWcsPair: An XYTransform obtained by putting two Wcs objects &quot;back to back&quot;...
Definition: Wcs.h:432
def makeEmptyExposure
Create an Exposure, with a Calib, Wcs, and Psf, but no pixel values set.
Definition: tests.py:231
A Threshold is used to pass a threshold value to detection algorithms.
Definition: Threshold.h:44
An integer coordinate rectangle.
Definition: Box.h:53
A 2D linear coordinate transformation.
static QuadrupoleKey addFields(Schema &schema, std::string const &name, std::string const &doc, CoordinateType coordType=CoordinateType::PIXEL)
Custom catalog class for record/table subclasses that are guaranteed to have an ID, and should generally be sorted by that ID.
Definition: fwd.h:55
def transform
Create a copy of the dataset transformed to a new WCS, with new Psf and Calib.
Definition: tests.py:367
static LinearTransform makeRotation(Angle t)
An ellipse defined by an arbitrary BaseCore and a center point.
Definition: Ellipse.h:50
Wcs::Ptr makeWcs(coord::Coord const &crval, geom::Point2D const &crpix, double CD11, double CD12, double CD21, double CD22)
Create a Wcs object from crval, crpix, CD, using CD elements (useful from python) ...
Definition: makeWcs.cc:141
static Schema makeMinimalSchema()
Return a minimal schema for Source tables and records.
Definition: Source.h:242
Subclass of unittest.TestCase that adds some custom assertions for convenience.
Definition: tests.py:179
A simulated dataset consisting of a test image and an associated truth catalog.
Definition: tests.py:121
ndarray::Array< typename boost::remove_const< T >::type, N-1, N-1 > flattenArray(Footprint const &fp, ndarray::Array< T, N, C > const &src, lsst::afw::geom::Point2I const &xy0=lsst::afw::geom::Point2I())
Flatten the first two dimensions of an array Use this footprint to map 2-D points in the source to 1-...
def realize
Create a simulated with noise and a simulated post-detection catalog with (Heavy)Footprints.
Definition: tests.py:407
A set of Footprints, associated with a MaskedImage.
Definition: FootprintSet.h:53
def __init__
Initialize the dataset.
Definition: tests.py:278
def makePerturbedWcs
Create a new undistorted TanWcs that is similar but not identical to another, with random scaling...
Definition: tests.py:180
A floating-point coordinate rectangle geometry.
Definition: Box.h:271
Base class for testing measurement transformations.
Definition: tests.py:546
A class to handle Icrs coordinates (inherits from Coord)
Definition: Coord.h:157
A circularly symmetric Gaussian Psf class with no spatial variation, intended mostly for testing purp...
Definition: GaussianPsf.h:41
A FunctorKey used to get or set celestial coordiantes from a pair of Angle keys.
Definition: aggregates.h:119
A Python context manager used to add multiple overlapping sources along with a parent source that rep...
Definition: tests.py:43