LSST Applications  21.0.0-131-g8cabc107+528f53ee53,22.0.0+00495a2688,22.0.0+0ef2527977,22.0.0+11a2aa21cd,22.0.0+269b7e55e3,22.0.0+2c6b6677a3,22.0.0+64c1bc5aa5,22.0.0+7b3a3f865e,22.0.0+e1b6d2281c,22.0.0+ff3c34362c,22.0.1-1-g1b65d06+c95cbdf3df,22.0.1-1-g7058be7+1cf78af69b,22.0.1-1-g7dab645+2a65e40b06,22.0.1-1-g8760c09+64c1bc5aa5,22.0.1-1-g949febb+64c1bc5aa5,22.0.1-1-ga324b9c+269b7e55e3,22.0.1-1-gf9d8b05+ff3c34362c,22.0.1-10-g781e53d+9b51d1cd24,22.0.1-10-gba590ab+b9624b875d,22.0.1-13-g76f9b8d+2c6b6677a3,22.0.1-14-g22236948+57af756299,22.0.1-18-g3db9cf4b+9b7092c56c,22.0.1-18-gb17765a+2264247a6b,22.0.1-2-g8ef0a89+2c6b6677a3,22.0.1-2-gcb770ba+c99495d3c6,22.0.1-24-g2e899d296+4206820b0d,22.0.1-3-g7aa11f2+2c6b6677a3,22.0.1-3-g8c1d971+f253ffa91f,22.0.1-3-g997b569+ff3b2f8649,22.0.1-4-g1930a60+6871d0c7f6,22.0.1-4-g5b7b756+6b209d634c,22.0.1-6-ga02864e+6871d0c7f6,22.0.1-7-g3402376+a1a2182ac4,22.0.1-7-g65f59fa+54b92689ce,master-gcc5351303a+e1b6d2281c,w.2021.32
LSST Data Management Base Package
cpFlatNormTask.py
Go to the documentation of this file.
1 # This file is part of cp_pipe.
2 #
3 # Developed for the LSST Data Management System.
4 # This product includes software developed by the LSST Project
5 # (https://www.lsst.org).
6 # See the COPYRIGHT file at the top-level directory of this distribution
7 # for details of code ownership.
8 #
9 # This program is free software: you can redistribute it and/or modify
10 # it under the terms of the GNU General Public License as published by
11 # the Free Software Foundation, either version 3 of the License, or
12 # (at your option) any later version.
13 #
14 # This program is distributed in the hope that it will be useful,
15 # but WITHOUT ANY WARRANTY; without even the implied warranty of
16 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 # GNU General Public License for more details.
18 #
19 # You should have received a copy of the GNU General Public License
20 # along with this program. If not, see <http://www.gnu.org/licenses/>.
21 import numpy as np
22 from collections import defaultdict
23 
24 import lsst.afw.math as afwMath
25 import lsst.daf.base as dafBase
26 import lsst.pex.config as pexConfig
27 import lsst.pipe.base as pipeBase
28 import lsst.pipe.base.connectionTypes as cT
29 from lsst.cp.pipe.cpCombine import VignetteExposure
30 from lsst.cp.pipe.utils import ddict2dict
31 
32 from ._lookupStaticCalibration import lookupStaticCalibration
33 
34 __all__ = ["CpFlatMeasureTask", "CpFlatMeasureTaskConfig",
35  "CpFlatNormalizationTask", "CpFlatNormalizationTaskConfig"]
36 
37 
38 class CpFlatMeasureConnections(pipeBase.PipelineTaskConnections,
39  dimensions=("instrument", "exposure", "detector")):
40  inputExp = cT.Input(
41  name="postISRCCD",
42  doc="Input exposure to measure statistics from.",
43  storageClass="Exposure",
44  dimensions=("instrument", "exposure", "detector"),
45  )
46  outputStats = cT.Output(
47  name="flatStats",
48  doc="Output statistics to write.",
49  storageClass="PropertyList",
50  dimensions=("instrument", "exposure", "detector"),
51  )
52 
53 
54 class CpFlatMeasureTaskConfig(pipeBase.PipelineTaskConfig,
55  pipelineConnections=CpFlatMeasureConnections):
56  maskNameList = pexConfig.ListField(
57  dtype=str,
58  doc="Mask list to exclude from statistics calculations.",
59  default=['DETECTED', 'BAD', 'NO_DATA'],
60  )
61  doVignette = pexConfig.Field(
62  dtype=bool,
63  doc="Mask vignetted regions?",
64  default=True,
65  )
66  numSigmaClip = pexConfig.Field(
67  dtype=float,
68  doc="Rejection threshold (sigma) for statistics clipping.",
69  default=3.0,
70  )
71  clipMaxIter = pexConfig.Field(
72  dtype=int,
73  doc="Max number of clipping iterations to apply.",
74  default=3,
75  )
76 
77 
78 class CpFlatMeasureTask(pipeBase.PipelineTask,
79  pipeBase.CmdLineTask):
80  """Apply extra masking and measure image statistics.
81  """
82  ConfigClass = CpFlatMeasureTaskConfig
83  _DefaultName = "cpFlatMeasure"
84 
85  def run(self, inputExp):
86  """Mask ISR processed FLAT exposures to ensure consistent statistics.
87 
88  Parameters
89  ----------
90  inputExp : `lsst.afw.image.Exposure`
91  Post-ISR processed exposure to measure.
92 
93  Returns
94  -------
95  outputStats : `lsst.daf.base.PropertyList`
96  List containing the statistics.
97  """
98  if self.config.doVignette:
99  VignetteExposure(inputExp, doUpdateMask=True, doSetValue=False, log=self.log)
100  mask = inputExp.getMask()
101  maskVal = mask.getPlaneBitMask(self.config.maskNameList)
102  statsControl = afwMath.StatisticsControl(self.config.numSigmaClip,
103  self.config.clipMaxIter,
104  maskVal)
105  statsControl.setAndMask(maskVal)
106 
107  outputStats = dafBase.PropertyList()
108 
109  # Detector level:
110  stats = afwMath.makeStatistics(inputExp.getMaskedImage(),
111  afwMath.MEANCLIP | afwMath.STDEVCLIP | afwMath.NPOINT,
112  statsControl)
113  outputStats['DETECTOR_MEDIAN'] = stats.getValue(afwMath.MEANCLIP)
114  outputStats['DETECTOR_SIGMA'] = stats.getValue(afwMath.STDEVCLIP)
115  outputStats['DETECTOR_N'] = stats.getValue(afwMath.NPOINT)
116  self.log.info("Stats: median=%f sigma=%f n=%d",
117  outputStats['DETECTOR_MEDIAN'],
118  outputStats['DETECTOR_SIGMA'],
119  outputStats['DETECTOR_N'])
120 
121  # AMP LEVEL:
122  for ampIdx, amp in enumerate(inputExp.getDetector()):
123  ampName = amp.getName()
124  ampExp = inputExp.Factory(inputExp, amp.getBBox())
125  stats = afwMath.makeStatistics(ampExp.getMaskedImage(),
126  afwMath.MEANCLIP | afwMath.STDEVCLIP | afwMath.NPOINT,
127  statsControl)
128  outputStats[f'AMP_NAME_{ampIdx}'] = ampName
129  outputStats[f'AMP_MEDIAN_{ampIdx}'] = stats.getValue(afwMath.MEANCLIP)
130  outputStats[f'AMP_SIGMA_{ampIdx}'] = stats.getValue(afwMath.STDEVCLIP)
131  outputStats[f'AMP_N_{ampIdx}'] = stats.getValue(afwMath.NPOINT)
132 
133  return pipeBase.Struct(
134  outputStats=outputStats
135  )
136 
137 
138 class CpFlatNormalizationConnections(pipeBase.PipelineTaskConnections,
139  dimensions=("instrument", "physical_filter")):
140  inputMDs = cT.Input(
141  name="cpFlatProc_metadata",
142  doc="Input metadata for each visit/detector in input set.",
143  storageClass="PropertyList",
144  dimensions=("instrument", "physical_filter", "detector", "exposure"),
145  multiple=True,
146  )
147  camera = cT.PrerequisiteInput(
148  name="camera",
149  doc="Input camera to use for gain lookup.",
150  storageClass="Camera",
151  dimensions=("instrument",),
152  lookupFunction=lookupStaticCalibration,
153  isCalibration=True,
154  )
155 
156  outputScales = cT.Output(
157  name="cpFlatNormScales",
158  doc="Output combined proposed calibration.",
159  storageClass="StructuredDataDict",
160  dimensions=("instrument", "physical_filter"),
161  )
162 
163 
164 class CpFlatNormalizationTaskConfig(pipeBase.PipelineTaskConfig,
165  pipelineConnections=CpFlatNormalizationConnections):
166  level = pexConfig.ChoiceField(
167  dtype=str,
168  doc="Which level to apply normalizations.",
169  default='DETECTOR',
170  allowed={
171  'DETECTOR': "Correct using full detector statistics.",
172  'AMP': "Correct using individual amplifiers.",
173  },
174  )
175  scaleMaxIter = pexConfig.Field(
176  dtype=int,
177  doc="Max number of iterations to use in scale solver.",
178  default=10,
179  )
180 
181 
182 class CpFlatNormalizationTask(pipeBase.PipelineTask,
183  pipeBase.CmdLineTask):
184  """Rescale merged flat frames to remove unequal screen illumination.
185  """
186  ConfigClass = CpFlatNormalizationTaskConfig
187  _DefaultName = "cpFlatNorm"
188 
189  def runQuantum(self, butlerQC, inputRefs, outputRefs):
190  inputs = butlerQC.get(inputRefs)
191 
192  # Use the dimensions of the inputs for generating
193  # output scales.
194  dimensions = [exp.dataId.byName() for exp in inputRefs.inputMDs]
195  inputs['inputDims'] = dimensions
196 
197  outputs = self.runrun(**inputs)
198  butlerQC.put(outputs, outputRefs)
199 
200  def run(self, inputMDs, inputDims, camera):
201  """Normalize FLAT exposures to a consistent level.
202 
203  Parameters
204  ----------
205  inputMDs : `list` [`lsst.daf.base.PropertyList`]
206  Amplifier-level metadata used to construct scales.
207  inputDims : `list` [`dict`]
208  List of dictionaries of input data dimensions/values.
209  Each list entry should contain:
210 
211  ``"exposure"``
212  exposure id value (`int`)
213  ``"detector"``
214  detector id value (`int`)
215 
216  Returns
217  -------
218  outputScales : `dict` [`dict` [`dict` [`float`]]]
219  Dictionary of scales, indexed by detector (`int`),
220  amplifier (`int`), and exposure (`int`).
221 
222  Raises
223  ------
224  KeyError
225  Raised if the input dimensions do not contain detector and
226  exposure, or if the metadata does not contain the expected
227  statistic entry.
228  """
229  expSet = sorted(set([d['exposure'] for d in inputDims]))
230  detSet = sorted(set([d['detector'] for d in inputDims]))
231 
232  expMap = {exposureId: idx for idx, exposureId in enumerate(expSet)}
233  detMap = {detectorId: idx for idx, detectorId in enumerate(detSet)}
234 
235  nExp = len(expSet)
236  nDet = len(detSet)
237  if self.config.level == 'DETECTOR':
238  bgMatrix = np.zeros((nDet, nExp))
239  bgCounts = np.ones((nDet, nExp))
240  elif self.config.level == 'AMP':
241  nAmp = len(camera[detSet[0]])
242  bgMatrix = np.zeros((nDet * nAmp, nExp))
243  bgCounts = np.ones((nDet * nAmp, nExp))
244 
245  for inMetadata, inDimensions in zip(inputMDs, inputDims):
246  try:
247  exposureId = inDimensions['exposure']
248  detectorId = inDimensions['detector']
249  except Exception as e:
250  raise KeyError("Cannot find expected dimensions in %s" % (inDimensions, )) from e
251 
252  if self.config.level == 'DETECTOR':
253  detIdx = detMap[detectorId]
254  expIdx = expMap[exposureId]
255  try:
256  value = inMetadata.get('DETECTOR_MEDIAN')
257  count = inMetadata.get('DETECTOR_N')
258  except Exception as e:
259  raise KeyError("Cannot read expected metadata string.") from e
260 
261  if np.isfinite(value):
262  bgMatrix[detIdx][expIdx] = value
263  bgCounts[detIdx][expIdx] = count
264  else:
265  bgMatrix[detIdx][expIdx] = np.nan
266  bgCounts[detIdx][expIdx] = 1
267 
268  elif self.config.level == 'AMP':
269  detector = camera[detectorId]
270  nAmp = len(detector)
271 
272  detIdx = detMap[detectorId] * nAmp
273  expIdx = expMap[exposureId]
274 
275  for ampIdx, amp in enumerate(detector):
276  try:
277  value = inMetadata.get(f'AMP_MEDIAN_{ampIdx}')
278  count = inMetadata.get(f'AMP_N_{ampIdx}')
279  except Exception as e:
280  raise KeyError("cannot read expected metadata string.") from e
281 
282  detAmpIdx = detIdx + ampIdx
283  if np.isfinite(value):
284  bgMatrix[detAmpIdx][expIdx] = value
285  bgCounts[detAmpIdx][expIdx] = count
286  else:
287  bgMatrix[detAmpIdx][expIdx] = np.nan
288  bgMatrix[detAmpIdx][expIdx] = 1
289 
290  scaleResult = self.measureScalesmeasureScales(bgMatrix, bgCounts, iterations=self.config.scaleMaxIter)
291  expScales = scaleResult.expScales
292  detScales = scaleResult.detScales
293 
294  outputScales = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(float))))
295 
296  # Note that the enumerated "detId"/"expId" here index the
297  # "detScales" and "expScales" arrays.
298  if self.config.level == 'DETECTOR':
299  for detIdx, det in enumerate(detSet):
300  for amp in camera[det]:
301  for expIdx, exp in enumerate(expSet):
302  outputScales['expScale'][det][amp.getName()][exp] = expScales[expIdx].tolist()
303  outputScales['detScale'][det] = detScales[detIdx].tolist()
304  elif self.config.level == 'AMP':
305  for detIdx, det in enumerate(detSet):
306  for ampIdx, amp in enumerate(camera[det]):
307  for expIdx, exp in enumerate(expSet):
308  outputScales['expScale'][det][amp.getName()][exp] = expScales[expIdx].tolist()
309  detAmpIdx = detIdx + ampIdx
310  outputScales['detScale'][det][amp.getName()] = detScales[detAmpIdx].tolist()
311 
312  return pipeBase.Struct(
313  outputScales=ddict2dict(outputScales),
314  )
315 
316  def measureScales(self, bgMatrix, bgCounts=None, iterations=10):
317  """Convert backgrounds to exposure and detector components.
318 
319  Parameters
320  ----------
321  bgMatrix : `np.ndarray`, (nDetectors, nExposures)
322  Input backgrounds indexed by exposure (axis=0) and
323  detector (axis=1).
324  bgCounts : `np.ndarray`, (nDetectors, nExposures), optional
325  Input pixel counts used to in measuring bgMatrix, indexed
326  identically.
327  iterations : `int`, optional
328  Number of iterations to use in decomposition.
329 
330  Returns
331  -------
332  scaleResult : `lsst.pipe.base.Struct`
333  Result struct containing fields:
334 
335  ``vectorE``
336  Output E vector of exposure level scalings
337  (`np.array`, (nExposures)).
338  ``vectorG``
339  Output G vector of detector level scalings
340  (`np.array`, (nExposures)).
341  ``bgModel``
342  Expected model bgMatrix values, calculated from E and G
343  (`np.ndarray`, (nDetectors, nExposures)).
344 
345  Notes
346  -----
347 
348  The set of background measurements B[exposure, detector] of
349  flat frame data should be defined by a "Cartesian" product of
350  two vectors, E[exposure] and G[detector]. The E vector
351  represents the total flux incident on the focal plane. In a
352  perfect camera, this is simply the sum along the columns of B
353  (np.sum(B, axis=0)).
354 
355  However, this simple model ignores differences in detector
356  gains, the vignetting of the detectors, and the illumination
357  pattern of the source lamp. The G vector describes these
358  detector dependent differences, which should be identical over
359  different exposures. For a perfect lamp of unit total
360  intensity, this is simply the sum along the rows of B
361  (np.sum(B, axis=1)). This algorithm divides G by the total
362  flux level, to provide the relative (not absolute) scales
363  between detectors.
364 
365  The algorithm here, from pipe_drivers/constructCalibs.py and
366  from there from Eugene Magnier/PanSTARRS [1]_, attempts to
367  iteratively solve this decomposition from initial "perfect" E
368  and G vectors. The operation is performed in log space to
369  reduce the multiply and divides to linear additions and
370  subtractions.
371 
372  References
373  ----------
374  .. [1] https://svn.pan-starrs.ifa.hawaii.edu/trac/ipp/browser/trunk/psModules/src/detrend/pmFlatNormalize.c # noqa: E501
375 
376  """
377  numExps = bgMatrix.shape[1]
378  numChips = bgMatrix.shape[0]
379  if bgCounts is None:
380  bgCounts = np.ones_like(bgMatrix)
381 
382  logMeas = np.log(bgMatrix)
383  logMeas = np.ma.masked_array(logMeas, ~np.isfinite(logMeas))
384  logG = np.zeros(numChips)
385  logE = np.array([np.average(logMeas[:, iexp] - logG,
386  weights=bgCounts[:, iexp]) for iexp in range(numExps)])
387 
388  for iter in range(iterations):
389  logG = np.array([np.average(logMeas[ichip, :] - logE,
390  weights=bgCounts[ichip, :]) for ichip in range(numChips)])
391 
392  bad = np.isnan(logG)
393  if np.any(bad):
394  logG[bad] = logG[~bad].mean()
395 
396  logE = np.array([np.average(logMeas[:, iexp] - logG,
397  weights=bgCounts[:, iexp]) for iexp in range(numExps)])
398  fluxLevel = np.average(np.exp(logG), weights=np.sum(bgCounts, axis=1))
399 
400  logG -= np.log(fluxLevel)
401  self.log.debug(f"ITER {iter}: Flux: {fluxLevel}")
402  self.log.debug(f"Exps: {np.exp(logE)}")
403  self.log.debug(f"{np.mean(logG)}")
404 
405  logE = np.array([np.average(logMeas[:, iexp] - logG,
406  weights=bgCounts[:, iexp]) for iexp in range(numExps)])
407 
408  bgModel = np.exp(logE[np.newaxis, :] - logG[:, np.newaxis])
409  return pipeBase.Struct(
410  expScales=np.exp(logE),
411  detScales=np.exp(logG),
412  bgModel=bgModel,
413  )
Pass parameters to a Statistics object.
Definition: Statistics.h:93
def measureScales(self, bgMatrix, bgCounts=None, iterations=10)
def run(self, inputMDs, inputDims, camera)
def runQuantum(self, butlerQC, inputRefs, outputRefs)
Class for storing ordered metadata with comments.
Definition: PropertyList.h:68
daf::base::PropertySet * set
Definition: fits.cc:912
Statistics makeStatistics(lsst::afw::image::Image< Pixel > const &img, lsst::afw::image::Mask< image::MaskPixel > const &msk, int const flags, StatisticsControl const &sctrl=StatisticsControl())
Handle a watered-down front-end to the constructor (no variance)
Definition: Statistics.h:360
def VignetteExposure(exposure, polygon=None, doUpdateMask=True, maskPlane="NO_DATA", doSetValue=False, vignetteValue=0.0, log=None)
Definition: cpCombine.py:561
def ddict2dict(d)
Definition: utils.py:806