22 from collections
import defaultdict
28 import lsst.pipe.base.connectionTypes
as cT
32 from ._lookupStaticCalibration
import lookupStaticCalibration
34 __all__ = [
"CpFlatMeasureTask",
"CpFlatMeasureTaskConfig",
35 "CpFlatNormalizationTask",
"CpFlatNormalizationTaskConfig"]
39 dimensions=(
"instrument",
"exposure",
"detector")):
42 doc=
"Input exposure to measure statistics from.",
43 storageClass=
"Exposure",
44 dimensions=(
"instrument",
"exposure",
"detector"),
46 outputStats = cT.Output(
48 doc=
"Output statistics to write.",
49 storageClass=
"PropertyList",
50 dimensions=(
"instrument",
"exposure",
"detector"),
55 pipelineConnections=CpFlatMeasureConnections):
56 maskNameList = pexConfig.ListField(
58 doc=
"Mask list to exclude from statistics calculations.",
59 default=[
'DETECTED',
'BAD',
'NO_DATA'],
61 doVignette = pexConfig.Field(
63 doc=
"Mask vignetted regions?",
66 numSigmaClip = pexConfig.Field(
68 doc=
"Rejection threshold (sigma) for statistics clipping.",
71 clipMaxIter = pexConfig.Field(
73 doc=
"Max number of clipping iterations to apply.",
79 pipeBase.CmdLineTask):
80 """Apply extra masking and measure image statistics.
82 ConfigClass = CpFlatMeasureTaskConfig
83 _DefaultName =
"cpFlatMeasure"
85 def run(self, inputExp):
86 """Mask ISR processed FLAT exposures to ensure consistent statistics.
90 inputExp : `lsst.afw.image.Exposure`
91 Post-ISR processed exposure to measure.
95 outputStats : `lsst.daf.base.PropertyList`
96 List containing the statistics.
98 if self.config.doVignette:
99 VignetteExposure(inputExp, doUpdateMask=
True, doSetValue=
False, log=self.log)
100 mask = inputExp.getMask()
101 maskVal = mask.getPlaneBitMask(self.config.maskNameList)
103 self.config.clipMaxIter,
105 statsControl.setAndMask(maskVal)
111 afwMath.MEANCLIP | afwMath.STDEVCLIP | afwMath.NPOINT,
113 outputStats[
'DETECTOR_MEDIAN'] = stats.getValue(afwMath.MEANCLIP)
114 outputStats[
'DETECTOR_SIGMA'] = stats.getValue(afwMath.STDEVCLIP)
115 outputStats[
'DETECTOR_N'] = stats.getValue(afwMath.NPOINT)
116 self.log.
info(
"Stats: median=%f sigma=%f n=%d",
117 outputStats[
'DETECTOR_MEDIAN'],
118 outputStats[
'DETECTOR_SIGMA'],
119 outputStats[
'DETECTOR_N'])
122 for ampIdx, amp
in enumerate(inputExp.getDetector()):
123 ampName = amp.getName()
124 ampExp = inputExp.Factory(inputExp, amp.getBBox())
126 afwMath.MEANCLIP | afwMath.STDEVCLIP | afwMath.NPOINT,
128 outputStats[f
'AMP_NAME_{ampIdx}'] = ampName
129 outputStats[f
'AMP_MEDIAN_{ampIdx}'] = stats.getValue(afwMath.MEANCLIP)
130 outputStats[f
'AMP_SIGMA_{ampIdx}'] = stats.getValue(afwMath.STDEVCLIP)
131 outputStats[f
'AMP_N_{ampIdx}'] = stats.getValue(afwMath.NPOINT)
133 return pipeBase.Struct(
134 outputStats=outputStats
139 dimensions=(
"instrument",
"physical_filter")):
141 name=
"cpFlatProc_metadata",
142 doc=
"Input metadata for each visit/detector in input set.",
143 storageClass=
"PropertyList",
144 dimensions=(
"instrument",
"physical_filter",
"detector",
"exposure"),
147 camera = cT.PrerequisiteInput(
149 doc=
"Input camera to use for gain lookup.",
150 storageClass=
"Camera",
151 dimensions=(
"instrument",),
152 lookupFunction=lookupStaticCalibration,
156 outputScales = cT.Output(
157 name=
"cpFlatNormScales",
158 doc=
"Output combined proposed calibration.",
159 storageClass=
"StructuredDataDict",
160 dimensions=(
"instrument",
"physical_filter"),
165 pipelineConnections=CpFlatNormalizationConnections):
166 level = pexConfig.ChoiceField(
168 doc=
"Which level to apply normalizations.",
171 'DETECTOR':
"Correct using full detector statistics.",
172 'AMP':
"Correct using individual amplifiers.",
175 scaleMaxIter = pexConfig.Field(
177 doc=
"Max number of iterations to use in scale solver.",
183 pipeBase.CmdLineTask):
184 """Rescale merged flat frames to remove unequal screen illumination.
186 ConfigClass = CpFlatNormalizationTaskConfig
187 _DefaultName =
"cpFlatNorm"
190 inputs = butlerQC.get(inputRefs)
194 dimensions = [exp.dataId.byName()
for exp
in inputRefs.inputMDs]
195 inputs[
'inputDims'] = dimensions
197 outputs = self.
runrun(**inputs)
198 butlerQC.put(outputs, outputRefs)
200 def run(self, inputMDs, inputDims, camera):
201 """Normalize FLAT exposures to a consistent level.
205 inputMDs : `list` [`lsst.daf.base.PropertyList`]
206 Amplifier-level metadata used to construct scales.
207 inputDims : `list` [`dict`]
208 List of dictionaries of input data dimensions/values.
209 Each list entry should contain:
212 exposure id value (`int`)
214 detector id value (`int`)
218 outputScales : `dict` [`dict` [`dict` [`float`]]]
219 Dictionary of scales, indexed by detector (`int`),
220 amplifier (`int`), and exposure (`int`).
225 Raised if the input dimensions do not contain detector and
226 exposure, or if the metadata does not contain the expected
229 expSet = sorted(
set([d[
'exposure']
for d
in inputDims]))
230 detSet = sorted(
set([d[
'detector']
for d
in inputDims]))
232 expMap = {exposureId: idx
for idx, exposureId
in enumerate(expSet)}
233 detMap = {detectorId: idx
for idx, detectorId
in enumerate(detSet)}
237 if self.config.level ==
'DETECTOR':
238 bgMatrix = np.zeros((nDet, nExp))
239 bgCounts = np.ones((nDet, nExp))
240 elif self.config.level ==
'AMP':
241 nAmp = len(camera[detSet[0]])
242 bgMatrix = np.zeros((nDet * nAmp, nExp))
243 bgCounts = np.ones((nDet * nAmp, nExp))
245 for inMetadata, inDimensions
in zip(inputMDs, inputDims):
247 exposureId = inDimensions[
'exposure']
248 detectorId = inDimensions[
'detector']
249 except Exception
as e:
250 raise KeyError(
"Cannot find expected dimensions in %s" % (inDimensions, ))
from e
252 if self.config.level ==
'DETECTOR':
253 detIdx = detMap[detectorId]
254 expIdx = expMap[exposureId]
256 value = inMetadata.get(
'DETECTOR_MEDIAN')
257 count = inMetadata.get(
'DETECTOR_N')
258 except Exception
as e:
259 raise KeyError(
"Cannot read expected metadata string.")
from e
261 if np.isfinite(value):
262 bgMatrix[detIdx][expIdx] = value
263 bgCounts[detIdx][expIdx] = count
265 bgMatrix[detIdx][expIdx] = np.nan
266 bgCounts[detIdx][expIdx] = 1
268 elif self.config.level ==
'AMP':
269 detector = camera[detectorId]
272 detIdx = detMap[detectorId] * nAmp
273 expIdx = expMap[exposureId]
275 for ampIdx, amp
in enumerate(detector):
277 value = inMetadata.get(f
'AMP_MEDIAN_{ampIdx}')
278 count = inMetadata.get(f
'AMP_N_{ampIdx}')
279 except Exception
as e:
280 raise KeyError(
"cannot read expected metadata string.")
from e
282 detAmpIdx = detIdx + ampIdx
283 if np.isfinite(value):
284 bgMatrix[detAmpIdx][expIdx] = value
285 bgCounts[detAmpIdx][expIdx] = count
287 bgMatrix[detAmpIdx][expIdx] = np.nan
288 bgMatrix[detAmpIdx][expIdx] = 1
290 scaleResult = self.
measureScalesmeasureScales(bgMatrix, bgCounts, iterations=self.config.scaleMaxIter)
291 expScales = scaleResult.expScales
292 detScales = scaleResult.detScales
294 outputScales = defaultdict(
lambda: defaultdict(
lambda: defaultdict(
lambda: defaultdict(float))))
298 if self.config.level ==
'DETECTOR':
299 for detIdx, det
in enumerate(detSet):
300 for amp
in camera[det]:
301 for expIdx, exp
in enumerate(expSet):
302 outputScales[
'expScale'][det][amp.getName()][exp] = expScales[expIdx].tolist()
303 outputScales[
'detScale'][det] = detScales[detIdx].tolist()
304 elif self.config.level ==
'AMP':
305 for detIdx, det
in enumerate(detSet):
306 for ampIdx, amp
in enumerate(camera[det]):
307 for expIdx, exp
in enumerate(expSet):
308 outputScales[
'expScale'][det][amp.getName()][exp] = expScales[expIdx].tolist()
309 detAmpIdx = detIdx + ampIdx
310 outputScales[
'detScale'][det][amp.getName()] = detScales[detAmpIdx].tolist()
312 return pipeBase.Struct(
317 """Convert backgrounds to exposure and detector components.
321 bgMatrix : `np.ndarray`, (nDetectors, nExposures)
322 Input backgrounds indexed by exposure (axis=0) and
324 bgCounts : `np.ndarray`, (nDetectors, nExposures), optional
325 Input pixel counts used to in measuring bgMatrix, indexed
327 iterations : `int`, optional
328 Number of iterations to use in decomposition.
332 scaleResult : `lsst.pipe.base.Struct`
333 Result struct containing fields:
336 Output E vector of exposure level scalings
337 (`np.array`, (nExposures)).
339 Output G vector of detector level scalings
340 (`np.array`, (nExposures)).
342 Expected model bgMatrix values, calculated from E and G
343 (`np.ndarray`, (nDetectors, nExposures)).
348 The set of background measurements B[exposure, detector] of
349 flat frame data should be defined by a "Cartesian" product of
350 two vectors, E[exposure] and G[detector]. The E vector
351 represents the total flux incident on the focal plane. In a
352 perfect camera, this is simply the sum along the columns of B
355 However, this simple model ignores differences in detector
356 gains, the vignetting of the detectors, and the illumination
357 pattern of the source lamp. The G vector describes these
358 detector dependent differences, which should be identical over
359 different exposures. For a perfect lamp of unit total
360 intensity, this is simply the sum along the rows of B
361 (np.sum(B, axis=1)). This algorithm divides G by the total
362 flux level, to provide the relative (not absolute) scales
365 The algorithm here, from pipe_drivers/constructCalibs.py and
366 from there from Eugene Magnier/PanSTARRS [1]_, attempts to
367 iteratively solve this decomposition from initial "perfect" E
368 and G vectors. The operation is performed in log space to
369 reduce the multiply and divides to linear additions and
374 .. [1] https://svn.pan-starrs.ifa.hawaii.edu/trac/ipp/browser/trunk/psModules/src/detrend/pmFlatNormalize.c # noqa: E501
377 numExps = bgMatrix.shape[1]
378 numChips = bgMatrix.shape[0]
380 bgCounts = np.ones_like(bgMatrix)
382 logMeas = np.log(bgMatrix)
383 logMeas = np.ma.masked_array(logMeas, ~np.isfinite(logMeas))
384 logG = np.zeros(numChips)
385 logE = np.array([np.average(logMeas[:, iexp] - logG,
386 weights=bgCounts[:, iexp])
for iexp
in range(numExps)])
388 for iter
in range(iterations):
389 logG = np.array([np.average(logMeas[ichip, :] - logE,
390 weights=bgCounts[ichip, :])
for ichip
in range(numChips)])
394 logG[bad] = logG[~bad].mean()
396 logE = np.array([np.average(logMeas[:, iexp] - logG,
397 weights=bgCounts[:, iexp])
for iexp
in range(numExps)])
398 fluxLevel = np.average(np.exp(logG), weights=np.sum(bgCounts, axis=1))
400 logG -= np.log(fluxLevel)
401 self.log.
debug(f
"ITER {iter}: Flux: {fluxLevel}")
402 self.log.
debug(f
"Exps: {np.exp(logE)}")
403 self.log.
debug(f
"{np.mean(logG)}")
405 logE = np.array([np.average(logMeas[:, iexp] - logG,
406 weights=bgCounts[:, iexp])
for iexp
in range(numExps)])
408 bgModel = np.exp(logE[np.newaxis, :] - logG[:, np.newaxis])
409 return pipeBase.Struct(
410 expScales=np.exp(logE),
411 detScales=np.exp(logG),
Pass parameters to a Statistics object.
def measureScales(self, bgMatrix, bgCounts=None, iterations=10)
def run(self, inputMDs, inputDims, camera)
def runQuantum(self, butlerQC, inputRefs, outputRefs)
Class for storing ordered metadata with comments.
daf::base::PropertySet * set
Statistics makeStatistics(lsst::afw::image::Image< Pixel > const &img, lsst::afw::image::Mask< image::MaskPixel > const &msk, int const flags, StatisticsControl const &sctrl=StatisticsControl())
Handle a watered-down front-end to the constructor (no variance)
def VignetteExposure(exposure, polygon=None, doUpdateMask=True, maskPlane="NO_DATA", doSetValue=False, vignetteValue=0.0, log=None)