23 import matplotlib.pyplot
as plt
24 from collections
import Counter
29 from .utils
import (fitLeastSq, fitBootstrap, funcPolynomial, funcAstier)
30 from scipy.optimize
import least_squares
34 from .astierCovPtcUtils
import (fftSize, CovFft, computeCovDirect, fitData)
35 from .linearity
import LinearitySolveTask
36 from .photodiode
import getBOTphotodiodeData
43 __all__ = [
'MeasurePhotonTransferCurveTask',
44 'MeasurePhotonTransferCurveTaskConfig']
48 """Config class for photon transfer curve measurement task"""
49 ccdKey = pexConfig.Field(
51 doc=
"The key by which to pull a detector from a dataId, e.g. 'ccd' or 'detector'.",
54 ptcFitType = pexConfig.ChoiceField(
56 doc=
"Fit PTC to Eq. 16, Eq. 20 in Astier+19, or to a polynomial.",
59 "POLYNOMIAL":
"n-degree polynomial (use 'polynomialFitDegree' to set 'n').",
60 "EXPAPPROXIMATION":
"Approximation in Astier+19 (Eq. 16).",
61 "FULLCOVARIANCE":
"Full covariances model in Astier+19 (Eq. 20)"
64 maximumRangeCovariancesAstier = pexConfig.Field(
66 doc=
"Maximum range of covariances as in Astier+19",
69 covAstierRealSpace = pexConfig.Field(
71 doc=
"Calculate covariances in real space or via FFT? (see appendix A of Astier+19).",
74 polynomialFitDegree = pexConfig.Field(
76 doc=
"Degree of polynomial to fit the PTC, when 'ptcFitType'=POLYNOMIAL.",
79 linearity = pexConfig.ConfigurableField(
80 target=LinearitySolveTask,
81 doc=
"Task to solve the linearity."
84 doCreateLinearizer = pexConfig.Field(
86 doc=
"Calculate non-linearity and persist linearizer?",
90 binSize = pexConfig.Field(
92 doc=
"Bin the image by this factor in both dimensions.",
95 minMeanSignal = pexConfig.DictField(
98 doc=
"Minimum values (inclusive) of mean signal (in ADU) above which to consider, per amp."
99 " The same cut is applied to all amps if this dictionary is of the form"
100 " {'ALL_AMPS': value}",
101 default={
'ALL_AMPS': 0.0},
103 maxMeanSignal = pexConfig.DictField(
106 doc=
"Maximum values (inclusive) of mean signal (in ADU) below which to consider, per amp."
107 " The same cut is applied to all amps if this dictionary is of the form"
108 " {'ALL_AMPS': value}",
109 default={
'ALL_AMPS': 1e6},
111 initialNonLinearityExclusionThresholdPositive = pexConfig.RangeField(
113 doc=
"Initially exclude data points with a variance that are more than a factor of this from being"
114 " linear in the positive direction, from the PTC fit. Note that these points will also be"
115 " excluded from the non-linearity fit. This is done before the iterative outlier rejection,"
116 " to allow an accurate determination of the sigmas for said iterative fit.",
121 initialNonLinearityExclusionThresholdNegative = pexConfig.RangeField(
123 doc=
"Initially exclude data points with a variance that are more than a factor of this from being"
124 " linear in the negative direction, from the PTC fit. Note that these points will also be"
125 " excluded from the non-linearity fit. This is done before the iterative outlier rejection,"
126 " to allow an accurate determination of the sigmas for said iterative fit.",
131 minMeanRatioTest = pexConfig.Field(
133 doc=
"In the initial test to screen out bad points with a ratio test, points with low"
134 " flux can get inadvertantly screened. This test only screens out points with flux"
135 " above this value.",
138 minVarPivotSearch = pexConfig.Field(
140 doc=
"The code looks for a pivot signal point after which the variance starts decreasing at high-flux"
141 " to exclude then form the PTC model fit. However, sometimes at low fluxes, the variance"
142 " decreases slightly. Set this variable for the variance value, in ADU^2, after which the pivot "
143 " should be sought.",
146 sigmaCutPtcOutliers = pexConfig.Field(
148 doc=
"Sigma cut for outlier rejection in PTC.",
151 maskNameList = pexConfig.ListField(
153 doc=
"Mask list to exclude from statistics calculations.",
154 default=[
'SUSPECT',
'BAD',
'NO_DATA'],
156 nSigmaClipPtc = pexConfig.Field(
158 doc=
"Sigma cut for afwMath.StatisticsControl()",
161 nIterSigmaClipPtc = pexConfig.Field(
163 doc=
"Number of sigma-clipping iterations for afwMath.StatisticsControl()",
166 minNumberGoodPixelsForFft = pexConfig.Field(
168 doc=
"Minimum number of acceptable good pixels per amp to calculate the covariances via FFT.",
171 maxIterationsPtcOutliers = pexConfig.Field(
173 doc=
"Maximum number of iterations for outlier rejection in PTC.",
176 doFitBootstrap = pexConfig.Field(
178 doc=
"Use bootstrap for the PTC fit parameters and errors?.",
181 doPhotodiode = pexConfig.Field(
183 doc=
"Apply a correction based on the photodiode readings if available?",
186 photodiodeDataPath = pexConfig.Field(
188 doc=
"Gen2 only: path to locate the data photodiode data files.",
191 instrumentName = pexConfig.Field(
193 doc=
"Instrument name.",
199 """A class to calculate, fit, and plot a PTC from a set of flat pairs.
201 The Photon Transfer Curve (var(signal) vs mean(signal)) is a standard tool
202 used in astronomical detectors characterization (e.g., Janesick 2001,
203 Janesick 2007). If ptcFitType is "EXPAPPROXIMATION" or "POLYNOMIAL", this task calculates the
204 PTC from a series of pairs of flat-field images; each pair taken at identical exposure
205 times. The difference image of each pair is formed to eliminate fixed pattern noise,
206 and then the variance of the difference image and the mean of the average image
207 are used to produce the PTC. An n-degree polynomial or the approximation in Equation
208 16 of Astier+19 ("The Shape of the Photon Transfer Curve of CCD sensors",
209 arXiv:1905.08677) can be fitted to the PTC curve. These models include
210 parameters such as the gain (e/DN) and readout noise.
212 Linearizers to correct for signal-chain non-linearity are also calculated.
213 The `Linearizer` class, in general, can support per-amp linearizers, but in this
214 task this is not supported.
216 If ptcFitType is "FULLCOVARIANCE", the covariances of the difference images are calculated via the
217 DFT methods described in Astier+19 and the variances for the PTC are given by the cov[0,0] elements
218 at each signal level. The full model in Equation 20 of Astier+19 is fit to the PTC to get the gain
225 Positional arguments passed to the Task constructor. None used at this
228 Keyword arguments passed on to the Task constructor. None used at this
233 RunnerClass = DataRefListRunner
234 ConfigClass = MeasurePhotonTransferCurveTaskConfig
235 _DefaultName =
"measurePhotonTransferCurve"
238 pipeBase.CmdLineTask.__init__(self, *args, **kwargs)
239 self.makeSubtask(
"linearity")
240 plt.interactive(
False)
246 """Run the Photon Transfer Curve (PTC) measurement task.
248 For a dataRef (which is each detector here),
249 and given a list of exposure pairs (postISR) at different exposure times,
254 dataRefList : `list` [`lsst.daf.peristence.ButlerDataRef`]
255 Data references for exposures for detectors to process.
257 if len(dataRefList) < 2:
258 raise RuntimeError(
"Insufficient inputs to combine.")
261 dataRef = dataRefList[0]
263 detNum = dataRef.dataId[self.config.ccdKey]
264 camera = dataRef.get(
'camera')
265 detector = camera[dataRef.dataId[self.config.ccdKey]]
267 amps = detector.getAmplifiers()
268 ampNames = [amp.getName()
for amp
in amps]
274 for (exp1, exp2)
in expPairs.values():
275 id1 = exp1.getInfo().getVisitInfo().getExposureId()
276 id2 = exp2.getInfo().getVisitInfo().getExposureId()
277 expIds.append((id1, id2))
278 self.log.
info(f
"Measuring PTC using {expIds} exposures for detector {detector.getId()}")
283 if self.config.doPhotodiode:
284 for (expId1, expId2)
in expIds:
286 for i, expId
in enumerate([expId1, expId2]):
291 dataRef.dataId[
'expId'] = expId//1000
292 if self.config.photodiodeDataPath:
297 charges[i] = photodiodeData.getCharge()
301 self.log.
warn(f
"No photodiode data found for {expId}")
303 for ampName
in ampNames:
304 datasetPtc.photoCharge[ampName].
append((charges[0], charges[1]))
308 for ampName
in ampNames:
309 datasetPtc.photoCharge[ampName] = np.repeat(np.nan, len(expIds))
311 for ampName
in ampNames:
312 datasetPtc.inputExpIdPairs[ampName] = expIds
314 maxMeanSignalDict = {ampName: 1e6
for ampName
in ampNames}
315 minMeanSignalDict = {ampName: 0.0
for ampName
in ampNames}
316 for ampName
in ampNames:
317 if 'ALL_AMPS' in self.config.maxMeanSignal:
318 maxMeanSignalDict[ampName] = self.config.maxMeanSignal[
'ALL_AMPS']
319 elif ampName
in self.config.maxMeanSignal:
320 maxMeanSignalDict[ampName] = self.config.maxMeanSignal[ampName]
322 if 'ALL_AMPS' in self.config.minMeanSignal:
323 minMeanSignalDict[ampName] = self.config.minMeanSignal[
'ALL_AMPS']
324 elif ampName
in self.config.minMeanSignal:
325 minMeanSignalDict[ampName] = self.config.minMeanSignal[ampName]
329 for expTime, (exp1, exp2)
in expPairs.items():
330 expId1 = exp1.getInfo().getVisitInfo().getExposureId()
331 expId2 = exp2.getInfo().getVisitInfo().getExposureId()
334 tags = [
'mu',
'i',
'j',
'var',
'cov',
'npix',
'ext',
'expTime',
'ampName']
335 for ampNumber, amp
in enumerate(detector):
336 ampName = amp.getName()
338 doRealSpace = self.config.covAstierRealSpace
339 muDiff, varDiff, covAstier = self.
measureMeanVarCov(exp1, exp2, region=amp.getBBox(),
340 covAstierRealSpace=doRealSpace)
342 if np.isnan(muDiff)
or np.isnan(varDiff)
or (covAstier
is None):
343 msg = (f
"NaN mean or var, or None cov in amp {ampName} in exposure pair {expId1},"
344 f
" {expId2} of detector {detNum}.")
348 if (muDiff <= minMeanSignalDict[ampName])
or (muDiff >= maxMeanSignalDict[ampName]):
351 datasetPtc.rawExpTimes[ampName].
append(expTime)
352 datasetPtc.rawMeans[ampName].
append(muDiff)
353 datasetPtc.rawVars[ampName].
append(varDiff)
355 tupleRows += [(muDiff, ) + covRow + (ampNumber, expTime, ampName)
for covRow
in covAstier]
356 if nAmpsNan == len(ampNames):
357 msg = f
"NaN mean in all amps of exposure pair {expId1}, {expId2} of detector {detNum}."
361 tupleRecords += tupleRows
362 covariancesWithTags = np.core.records.fromrecords(tupleRecords, names=allTags)
364 for ampName
in datasetPtc.ampNames:
366 index = np.argsort(datasetPtc.rawMeans[ampName])
367 datasetPtc.rawExpTimes[ampName] = np.array(datasetPtc.rawExpTimes[ampName])[index]
368 datasetPtc.rawMeans[ampName] = np.array(datasetPtc.rawMeans[ampName])[index]
369 datasetPtc.rawVars[ampName] = np.array(datasetPtc.rawVars[ampName])[index]
371 if self.config.ptcFitType
in [
"FULLCOVARIANCE", ]:
375 newDatasetPtc = copy.copy(datasetPtc)
376 newDatasetPtc = self.
fitPtc(newDatasetPtc,
'EXPAPPROXIMATION')
377 for ampName
in datasetPtc.ampNames:
378 datasetPtc.expIdMask[ampName] = newDatasetPtc.expIdMask[ampName]
380 datasetPtc.fitType =
"FULLCOVARIANCE"
382 elif self.config.ptcFitType
in [
"EXPAPPROXIMATION",
"POLYNOMIAL"]:
385 datasetPtc = self.
fitPtc(datasetPtc, self.config.ptcFitType)
387 detName = detector.getName()
388 now = datetime.datetime.utcnow()
389 calibDate = now.strftime(
"%Y-%m-%d")
390 butler = dataRef.getButler()
392 datasetPtc.updateMetadata(setDate=
True, camera=camera, detector=detector)
395 if self.config.doCreateLinearizer:
401 dimensions = {
'camera': camera.getName(),
'detector': detector.getId()}
402 linearityResults = self.linearity.
run(datasetPtc, camera, dimensions)
403 linearizer = linearityResults.outputLinearizer
405 self.log.
info(
"Writing linearizer:")
407 detName = detector.getName()
408 now = datetime.datetime.utcnow()
409 calibDate = now.strftime(
"%Y-%m-%d")
411 butler.put(linearizer, datasetType=
'linearizer',
412 dataId={
'detector': detNum,
'detectorName': detName,
'calibDate': calibDate})
414 self.log.
info(f
"Writing PTC data.")
415 butler.put(datasetPtc, datasetType=
'photonTransferCurveDataset', dataId={
'detector': detNum,
416 'detectorName': detName,
'calibDate': calibDate})
418 return pipeBase.Struct(exitStatus=0)
421 """Produce a list of flat pairs indexed by exposure time.
425 dataRefList : `list` [`lsst.daf.peristence.ButlerDataRef`]
426 Data references for exposures for detectors to process.
430 flatPairs : `dict` [`float`, `lsst.afw.image.exposure.exposure.ExposureF`]
431 Dictionary that groups flat-field exposures that have the same exposure time (seconds).
435 We use the difference of one pair of flat-field images taken at the same exposure time when
436 calculating the PTC to reduce Fixed Pattern Noise. If there are > 2 flat-field images with the
437 same exposure time, the first two are kept and the rest discarded.
442 for dataRef
in dataRefList:
444 tempFlat = dataRef.get(
"postISRCCD")
446 self.log.
warn(
"postISR exposure could not be retrieved. Ignoring flat.")
448 expDate = tempFlat.getInfo().getVisitInfo().getDate().get()
449 expDict.setdefault(expDate, tempFlat)
450 sortedExps = {k: expDict[k]
for k
in sorted(expDict)}
453 for exp
in sortedExps:
454 tempFlat = sortedExps[exp]
455 expTime = tempFlat.getInfo().getVisitInfo().getExposureTime()
456 listAtExpTime = flatPairs.setdefault(expTime, [])
457 if len(listAtExpTime) >= 2:
458 self.log.
warn(f
"Already found 2 exposures at expTime {expTime}. "
459 f
"Ignoring exposure {tempFlat.getInfo().getVisitInfo().getExposureId()}")
461 listAtExpTime.append(tempFlat)
464 for (key, value)
in flatPairs.items():
466 keysToDrop.append(key)
469 for key
in keysToDrop:
470 self.log.
warn(f
"Only one exposure found at expTime {key}. Dropping exposure "
471 f
"{flatPairs[key][0].getInfo().getVisitInfo().getExposureId()}.")
473 sortedFlatPairs = {k: flatPairs[k]
for k
in sorted(flatPairs)}
474 return sortedFlatPairs
477 """Fit measured flat covariances to full model in Astier+19.
481 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
482 The dataset containing information such as the means, variances and exposure times.
484 covariancesWithTagsArray : `numpy.recarray`
485 Tuple with at least (mu, cov, var, i, j, npix), where:
486 mu : 0.5*(m1 + m2), where:
487 mu1: mean value of flat1
488 mu2: mean value of flat2
489 cov: covariance value at lag(i, j)
490 var: variance(covariance value at lag(0, 0))
493 npix: number of pixels used for covariance calculation.
497 dataset: `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
498 This is the same dataset as the input paramter, however, it has been modified
499 to include information such as the fit vectors and the fit parameters. See
500 the class `PhotonTransferCurveDatase`.
503 covFits, covFitsNoB =
fitData(covariancesWithTagsArray,
504 r=self.config.maximumRangeCovariancesAstier,
505 expIdMask=dataset.expIdMask)
510 """Get output data for PhotonTransferCurveCovAstierDataset from CovFit objects.
514 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
515 The dataset containing information such as the means, variances and exposure times.
518 Dictionary of CovFit objects, with amp names as keys.
521 Dictionary of CovFit objects, with amp names as keys, and 'b=0' in Eq. 20 of Astier+19.
525 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
526 This is the same dataset as the input paramter, however, it has been modified
527 to include extra information such as the mask 1D array, gains, reoudout noise, measured signal,
528 measured variance, modeled variance, a, and b coefficient matrices (see Astier+19) per amplifier.
529 See the class `PhotonTransferCurveDatase`.
531 assert(len(covFits) == len(covFitsNoB))
533 for i, amp
in enumerate(dataset.ampNames):
534 lenInputTimes = len(dataset.rawExpTimes[amp])
536 dataset.ptcFitPars[amp] = np.nan
537 dataset.ptcFitParsError[amp] = np.nan
538 dataset.ptcFitChiSq[amp] = np.nan
539 if (amp
in covFits
and (covFits[amp].covParams
is not None)
and
540 (covFitsNoB[amp].covParams
is not None)):
542 fitNoB = covFitsNoB[amp]
544 dataset.covariances[amp] = fit.cov
545 dataset.covariancesModel[amp] = fit.evalCovModel()
546 dataset.covariancesSqrtWeights[amp] = fit.sqrtW
547 dataset.aMatrix[amp] = fit.getA()
548 dataset.bMatrix[amp] = fit.getB()
549 dataset.covariancesNoB[amp] = fitNoB.cov
550 dataset.covariancesModelNoB[amp] = fitNoB.evalCovModel()
551 dataset.covariancesSqrtWeightsNoB[amp] = fitNoB.sqrtW
552 dataset.aMatrixNoB[amp] = fitNoB.getA()
554 (meanVecFinal, varVecFinal, varVecModel,
555 wc, varMask) = fit.getFitData(0, 0, divideByMu=
False)
558 dataset.gain[amp] = gain
559 dataset.gainErr[amp] = fit.getGainErr()
560 dataset.noise[amp] = np.sqrt(fit.getRon())
561 dataset.noiseErr[amp] = fit.getRonErr()
563 padLength = lenInputTimes - len(varVecFinal)
564 dataset.finalVars[amp] = np.pad(varVecFinal/(gain**2), (0, padLength),
'constant',
565 constant_values=np.nan)
566 dataset.finalModelVars[amp] = np.pad(varVecModel/(gain**2), (0, padLength),
'constant',
567 constant_values=np.nan)
568 dataset.finalMeans[amp] = np.pad(meanVecFinal/gain, (0, padLength),
'constant',
569 constant_values=np.nan)
573 matrixSide = self.config.maximumRangeCovariancesAstier
574 nanMatrix = np.full((matrixSide, matrixSide), np.nan)
575 listNanMatrix = np.full((lenInputTimes, matrixSide, matrixSide), np.nan)
577 dataset.covariances[amp] = listNanMatrix
578 dataset.covariancesModel[amp] = listNanMatrix
579 dataset.covariancesSqrtWeights[amp] = listNanMatrix
580 dataset.aMatrix[amp] = nanMatrix
581 dataset.bMatrix[amp] = nanMatrix
582 dataset.covariancesNoB[amp] = listNanMatrix
583 dataset.covariancesModelNoB[amp] = listNanMatrix
584 dataset.covariancesSqrtWeightsNoB[amp] = listNanMatrix
585 dataset.aMatrixNoB[amp] = nanMatrix
587 dataset.expIdMask[amp] = np.repeat(np.nan, lenInputTimes)
588 dataset.gain[amp] = np.nan
589 dataset.gainErr[amp] = np.nan
590 dataset.noise[amp] = np.nan
591 dataset.noiseErr[amp] = np.nan
592 dataset.finalVars[amp] = np.repeat(np.nan, lenInputTimes)
593 dataset.finalModelVars[amp] = np.repeat(np.nan, lenInputTimes)
594 dataset.finalMeans[amp] = np.repeat(np.nan, lenInputTimes)
599 """Calculate the mean of each of two exposures and the variance and covariance of their difference.
601 The variance is calculated via afwMath, and the covariance via the methods in Astier+19 (appendix A).
602 In theory, var = covariance[0,0]. This should be validated, and in the future, we may decide to just
603 keep one (covariance).
607 exposure1 : `lsst.afw.image.exposure.exposure.ExposureF`
608 First exposure of flat field pair.
610 exposure2 : `lsst.afw.image.exposure.exposure.ExposureF`
611 Second exposure of flat field pair.
613 region : `lsst.geom.Box2I`, optional
614 Region of each exposure where to perform the calculations (e.g, an amplifier).
616 covAstierRealSpace : `bool`, optional
617 Should the covariannces in Astier+19 be calculated in real space or via FFT?
618 See Appendix A of Astier+19.
622 mu : `float` or `NaN`
623 0.5*(mu1 + mu2), where mu1, and mu2 are the clipped means of the regions in
624 both exposures. If either mu1 or m2 are NaN's, the returned value is NaN.
626 varDiff : `float` or `NaN`
627 Half of the clipped variance of the difference of the regions inthe two input
628 exposures. If either mu1 or m2 are NaN's, the returned value is NaN.
630 covDiffAstier : `list` or `None`
631 List with tuples of the form (dx, dy, var, cov, npix), where:
637 Variance at (dx, dy).
639 Covariance at (dx, dy).
641 Number of pixel pairs used to evaluate var and cov.
642 If either mu1 or m2 are NaN's, the returned value is None.
645 if region
is not None:
646 im1Area = exposure1.maskedImage[region]
647 im2Area = exposure2.maskedImage[region]
649 im1Area = exposure1.maskedImage
650 im2Area = exposure2.maskedImage
652 if self.config.binSize > 1:
656 im1MaskVal = exposure1.getMask().getPlaneBitMask(self.config.maskNameList)
658 self.config.nIterSigmaClipPtc,
660 im1StatsCtrl.setNanSafe(
True)
661 im1StatsCtrl.setAndMask(im1MaskVal)
663 im2MaskVal = exposure2.getMask().getPlaneBitMask(self.config.maskNameList)
665 self.config.nIterSigmaClipPtc,
667 im2StatsCtrl.setNanSafe(
True)
668 im2StatsCtrl.setAndMask(im2MaskVal)
673 if np.isnan(mu1)
or np.isnan(mu2):
674 self.log.
warn(f
"Mean of amp in image 1 or 2 is NaN: {mu1}, {mu2}.")
675 return np.nan, np.nan,
None
680 temp = im2Area.clone()
682 diffIm = im1Area.clone()
687 diffImMaskVal = diffIm.getMask().getPlaneBitMask(self.config.maskNameList)
689 self.config.nIterSigmaClipPtc,
691 diffImStatsCtrl.setNanSafe(
True)
692 diffImStatsCtrl.setAndMask(diffImMaskVal)
697 w1 = np.where(im1Area.getMask().getArray() == 0, 1, 0)
698 w2 = np.where(im2Area.getMask().getArray() == 0, 1, 0)
701 wDiff = np.where(diffIm.getMask().getArray() == 0, 1, 0)
704 if np.sum(w) < self.config.minNumberGoodPixelsForFft:
705 self.log.
warn(f
"Number of good points for FFT ({np.sum(w)}) is less than threshold "
706 f
"({self.config.minNumberGoodPixelsForFft})")
707 return np.nan, np.nan,
None
709 maxRangeCov = self.config.maximumRangeCovariancesAstier
710 if covAstierRealSpace:
711 covDiffAstier =
computeCovDirect(diffIm.getImage().getArray(), w, maxRangeCov)
713 shapeDiff = diffIm.getImage().getArray().shape
714 fftShape = (
fftSize(shapeDiff[0] + maxRangeCov),
fftSize(shapeDiff[1]+maxRangeCov))
715 c =
CovFft(diffIm.getImage().getArray(), w, fftShape, maxRangeCov)
716 covDiffAstier = c.reportCovFft(maxRangeCov)
718 return mu, varDiff, covDiffAstier
721 """Compute covariances of diffImage in real space.
723 For lags larger than ~25, it is slower than the FFT way.
724 Taken from https://github.com/PierreAstier/bfptc/
728 diffImage : `numpy.array`
729 Image to compute the covariance of.
731 weightImage : `numpy.array`
732 Weight image of diffImage (1's and 0's for good and bad pixels, respectively).
735 Last index of the covariance to be computed.
740 List with tuples of the form (dx, dy, var, cov, npix), where:
746 Variance at (dx, dy).
748 Covariance at (dx, dy).
750 Number of pixel pairs used to evaluate var and cov.
755 for dy
in range(maxRange + 1):
756 for dx
in range(0, maxRange + 1):
759 cov2, nPix2 = self.
covDirectValue(diffImage, weightImage, dx, -dy)
760 cov = 0.5*(cov1 + cov2)
764 if (dx == 0
and dy == 0):
766 outList.append((dx, dy, var, cov, nPix))
771 """Compute covariances of diffImage in real space at lag (dx, dy).
773 Taken from https://github.com/PierreAstier/bfptc/ (c.f., appendix of Astier+19).
777 diffImage : `numpy.array`
778 Image to compute the covariance of.
780 weightImage : `numpy.array`
781 Weight image of diffImage (1's and 0's for good and bad pixels, respectively).
792 Covariance at (dx, dy)
795 Number of pixel pairs used to evaluate var and cov.
797 (nCols, nRows) = diffImage.shape
801 (dx, dy) = (-dx, -dy)
805 im1 = diffImage[dy:, dx:]
806 w1 = weightImage[dy:, dx:]
807 im2 = diffImage[:nCols - dy, :nRows - dx]
808 w2 = weightImage[:nCols - dy, :nRows - dx]
810 im1 = diffImage[:nCols + dy, dx:]
811 w1 = weightImage[:nCols + dy, dx:]
812 im2 = diffImage[-dy:, :nRows - dx]
813 w2 = weightImage[-dy:, :nRows - dx]
819 s1 = im1TimesW.sum()/nPix
820 s2 = (im2*wAll).sum()/nPix
821 p = (im1TimesW*im2).sum()/nPix
827 def _initialParsForPolynomial(order):
829 pars = np.zeros(order, dtype=np.float)
836 def _boundsForPolynomial(initialPars, lowers=[], uppers=[]):
838 lowers = [np.NINF
for p
in initialPars]
840 uppers = [np.inf
for p
in initialPars]
842 return (lowers, uppers)
845 def _boundsForAstier(initialPars, lowers=[], uppers=[]):
847 lowers = [np.NINF
for p
in initialPars]
849 uppers = [np.inf
for p
in initialPars]
850 return (lowers, uppers)
853 def _getInitialGoodPoints(means, variances, maxDeviationPositive, maxDeviationNegative,
854 minMeanRatioTest, minVarPivotSearch):
855 """Return a boolean array to mask bad points.
859 means : `numpy.array`
860 Input array with mean signal values.
862 variances : `numpy.array`
863 Input array with variances at each mean value.
865 maxDeviationPositive : `float`
866 Maximum deviation from being constant for the variance/mean
867 ratio, in the positive direction.
869 maxDeviationNegative : `float`
870 Maximum deviation from being constant for the variance/mean
871 ratio, in the negative direction.
873 minMeanRatioTest : `float`
874 Minimum signal value (in ADU) after which to start examining
877 minVarPivotSearch : `float`
878 Minimum variance point (in ADU^2) after which the pivot point
879 wher the variance starts decreasing should be sought.
883 goodPoints : `numpy.array` [`bool`]
884 Boolean array to select good (`True`) and bad (`False`)
889 A linear function has a constant ratio, so find the median
890 value of the ratios, and exclude the points that deviate
891 from that by more than a factor of maxDeviationPositive/negative.
892 Asymmetric deviations are supported as we expect the PTC to turn
893 down as the flux increases, but sometimes it anomalously turns
894 upwards just before turning over, which ruins the fits, so it
895 is wise to be stricter about restricting positive outliers than
898 Too high and points that are so bad that fit will fail will be included
899 Too low and the non-linear points will be excluded, biasing the NL fit.
901 This function also masks points after the variance starts decreasing.
904 assert(len(means) == len(variances))
905 ratios = [b/a
for (a, b)
in zip(means, variances)]
906 medianRatio = np.nanmedian(ratios)
907 ratioDeviations = [0.0
if a < minMeanRatioTest
else (r/medianRatio)-1
908 for (a, r)
in zip(means, ratios)]
911 maxDeviationPositive =
abs(maxDeviationPositive)
912 maxDeviationNegative = -1. *
abs(maxDeviationNegative)
914 goodPoints = np.array([
True if (r < maxDeviationPositive
and r > maxDeviationNegative)
915 else False for r
in ratioDeviations])
918 pivot = np.where(np.array(np.diff(variances)) < 0)[0]
922 pivot = [p
for p
in pivot
if variances[p] > minVarPivotSearch]
924 pivot = np.min(pivot)
925 goodPoints[pivot+1:len(goodPoints)] =
False
929 def _makeZeroSafe(self, array, warn=True, substituteValue=1e-9):
931 nBad = Counter(array)[0]
936 msg = f
"Found {nBad} zeros in array at elements {[x for x in np.where(array==0)[0]]}"
939 array[array == 0] = substituteValue
943 """Fit the photon transfer curve to a polynimial or to Astier+19 approximation.
945 Fit the photon transfer curve with either a polynomial of the order
946 specified in the task config, or using the Astier approximation.
948 Sigma clipping is performed iteratively for the fit, as well as an
949 initial clipping of data points that are more than
950 config.initialNonLinearityExclusionThreshold away from lying on a
951 straight line. This other step is necessary because the photon transfer
952 curve turns over catastrophically at very high flux (because saturation
953 drops the variance to ~0) and these far outliers cause the initial fit
954 to fail, meaning the sigma cannot be calculated to perform the
959 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
960 The dataset containing the means, variances and exposure times
963 Fit a 'POLYNOMIAL' (degree: 'polynomialFitDegree') or
964 'EXPAPPROXIMATION' (Eq. 16 of Astier+19) to the PTC
968 dataset: `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
969 This is the same dataset as the input paramter, however, it has been modified
970 to include information such as the fit vectors and the fit parameters. See
971 the class `PhotonTransferCurveDatase`.
974 matrixSide = self.config.maximumRangeCovariancesAstier
975 nanMatrix = np.empty((matrixSide, matrixSide))
976 nanMatrix[:] = np.nan
978 for amp
in dataset.ampNames:
979 lenInputTimes = len(dataset.rawExpTimes[amp])
980 listNanMatrix = np.empty((lenInputTimes, matrixSide, matrixSide))
981 listNanMatrix[:] = np.nan
983 dataset.covariances[amp] = listNanMatrix
984 dataset.covariancesModel[amp] = listNanMatrix
985 dataset.covariancesSqrtWeights[amp] = listNanMatrix
986 dataset.aMatrix[amp] = nanMatrix
987 dataset.bMatrix[amp] = nanMatrix
988 dataset.covariancesNoB[amp] = listNanMatrix
989 dataset.covariancesModelNoB[amp] = listNanMatrix
990 dataset.covariancesSqrtWeightsNoB[amp] = listNanMatrix
991 dataset.aMatrixNoB[amp] = nanMatrix
993 def errFunc(p, x, y):
994 return ptcFunc(p, x) - y
996 sigmaCutPtcOutliers = self.config.sigmaCutPtcOutliers
997 maxIterationsPtcOutliers = self.config.maxIterationsPtcOutliers
999 for i, ampName
in enumerate(dataset.ampNames):
1000 timeVecOriginal = np.array(dataset.rawExpTimes[ampName])
1001 meanVecOriginal = np.array(dataset.rawMeans[ampName])
1002 varVecOriginal = np.array(dataset.rawVars[ampName])
1006 self.config.initialNonLinearityExclusionThresholdPositive,
1007 self.config.initialNonLinearityExclusionThresholdNegative,
1008 self.config.minMeanRatioTest,
1009 self.config.minVarPivotSearch)
1010 if not (goodPoints.any()):
1011 msg = (f
"\nSERIOUS: All points in goodPoints: {goodPoints} are bad."
1012 f
"Setting {ampName} to BAD.")
1016 self.
fillBadAmp(dataset, ptcFitType, ampName)
1021 if ptcFitType ==
'EXPAPPROXIMATION':
1022 ptcFunc = funcAstier
1023 parsIniPtc = [-1e-9, 1.0, 10.]
1026 uppers=[1e-4, 2.5, 2000])
1027 if ptcFitType ==
'POLYNOMIAL':
1028 ptcFunc = funcPolynomial
1034 while count <= maxIterationsPtcOutliers:
1038 meanTempVec = meanVecOriginal[mask]
1039 varTempVec = varVecOriginal[mask]
1040 res = least_squares(errFunc, parsIniPtc, bounds=bounds, args=(meanTempVec, varTempVec))
1046 sigResids = (varVecOriginal - ptcFunc(pars, meanVecOriginal))/np.sqrt(varVecOriginal)
1047 newMask = np.array([
True if np.abs(r) < sigmaCutPtcOutliers
else False for r
in sigResids])
1048 mask = goodPoints & newMask
1049 if not (mask.any()
and newMask.any()):
1050 msg = (f
"\nSERIOUS: All points in either mask: {mask} or newMask: {newMask} are bad. "
1051 f
"Setting {ampName} to BAD.")
1055 self.
fillBadAmp(dataset, ptcFitType, ampName)
1057 nDroppedTotal = Counter(mask)[
False]
1058 self.log.
debug(f
"Iteration {count}: discarded {nDroppedTotal} points in total for {ampName}")
1061 assert (len(mask) == len(timeVecOriginal) == len(meanVecOriginal) == len(varVecOriginal))
1063 if not (mask.any()
and newMask.any()):
1065 dataset.expIdMask[ampName] = mask
1067 meanVecFinal = meanVecOriginal[mask]
1068 varVecFinal = varVecOriginal[mask]
1070 if Counter(mask)[
False] > 0:
1071 self.log.
info((f
"Number of points discarded in PTC of amplifier {ampName}:" +
1072 f
" {Counter(mask)[False]} out of {len(meanVecOriginal)}"))
1074 if (len(meanVecFinal) < len(parsIniPtc)):
1075 msg = (f
"\nSERIOUS: Not enough data points ({len(meanVecFinal)}) compared to the number of"
1076 f
"parameters of the PTC model({len(parsIniPtc)}). Setting {ampName} to BAD.")
1080 self.
fillBadAmp(dataset, ptcFitType, ampName)
1084 if self.config.doFitBootstrap:
1085 parsFit, parsFitErr, reducedChiSqPtc =
fitBootstrap(parsIniPtc, meanVecFinal,
1086 varVecFinal, ptcFunc,
1087 weightsY=1./np.sqrt(varVecFinal))
1089 parsFit, parsFitErr, reducedChiSqPtc =
fitLeastSq(parsIniPtc, meanVecFinal,
1090 varVecFinal, ptcFunc,
1091 weightsY=1./np.sqrt(varVecFinal))
1092 dataset.ptcFitPars[ampName] = parsFit
1093 dataset.ptcFitParsError[ampName] = parsFitErr
1094 dataset.ptcFitChiSq[ampName] = reducedChiSqPtc
1097 padLength = len(dataset.rawExpTimes[ampName]) - len(varVecFinal)
1098 dataset.finalVars[ampName] = np.pad(varVecFinal, (0, padLength),
'constant',
1099 constant_values=np.nan)
1100 dataset.finalModelVars[ampName] = np.pad(ptcFunc(parsFit, meanVecFinal), (0, padLength),
1101 'constant', constant_values=np.nan)
1102 dataset.finalMeans[ampName] = np.pad(meanVecFinal, (0, padLength),
'constant',
1103 constant_values=np.nan)
1105 if ptcFitType ==
'EXPAPPROXIMATION':
1106 ptcGain = parsFit[1]
1107 ptcGainErr = parsFitErr[1]
1108 ptcNoise = np.sqrt(np.fabs(parsFit[2]))
1109 ptcNoiseErr = 0.5*(parsFitErr[2]/np.fabs(parsFit[2]))*np.sqrt(np.fabs(parsFit[2]))
1110 if ptcFitType ==
'POLYNOMIAL':
1111 ptcGain = 1./parsFit[1]
1112 ptcGainErr = np.fabs(1./parsFit[1])*(parsFitErr[1]/parsFit[1])
1113 ptcNoise = np.sqrt(np.fabs(parsFit[0]))*ptcGain
1114 ptcNoiseErr = (0.5*(parsFitErr[0]/np.fabs(parsFit[0]))*(np.sqrt(np.fabs(parsFit[0]))))*ptcGain
1115 dataset.gain[ampName] = ptcGain
1116 dataset.gainErr[ampName] = ptcGainErr
1117 dataset.noise[ampName] = ptcNoise
1118 dataset.noiseErr[ampName] = ptcNoiseErr
1119 if not len(dataset.ptcFitType) == 0:
1120 dataset.ptcFitType = ptcFitType
1121 if len(dataset.badAmps) == 0:
1122 dataset.badAmps = np.repeat(np.nan, len(
list(dataset.rawExpTimes.values())[0]))
1127 """Fill the dataset with NaNs if there are not enough good points.
1131 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
1132 The dataset containing the means, variances and exposure times.
1135 Fit a 'POLYNOMIAL' (degree: 'polynomialFitDegree') or
1136 'EXPAPPROXIMATION' (Eq. 16 of Astier+19) to the PTC.
1141 dataset.badAmps.append(ampName)
1142 dataset.expIdMask[ampName] = np.repeat(
False, len(dataset.rawExpTimes[ampName]))
1143 dataset.gain[ampName] = np.nan
1144 dataset.gainErr[ampName] = np.nan
1145 dataset.noise[ampName] = np.nan
1146 dataset.noiseErr[ampName] = np.nan
1147 dataset.ptcFitPars[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1)
if
1148 ptcFitType
in [
"POLYNOMIAL", ]
else np.repeat(np.nan, 3))
1149 dataset.ptcFitParsError[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1)
if
1150 ptcFitType
in [
"POLYNOMIAL", ]
else np.repeat(np.nan, 3))
1151 dataset.ptcFitChiSq[ampName] = np.nan
1152 dataset.finalVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
1153 dataset.finalModelVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
1154 dataset.finalMeans[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))