23 __all__ = [
'MeasurePhotonTransferCurveTask',
24 'MeasurePhotonTransferCurveTaskConfig',
25 'PhotonTransferCurveDataset']
28 import matplotlib.pyplot
as plt
30 from matplotlib.backends.backend_pdf
import PdfPages
31 from sqlite3
import OperationalError
32 from collections
import Counter
33 from dataclasses
import dataclass
36 import lsst.pex.config
as pexConfig
39 from .utils
import (NonexistentDatasetTaskDataIdContainer, PairedVisitListTaskRunner,
40 checkExpLengthEqual, validateIsrConfig)
41 from scipy.optimize
import leastsq, least_squares
42 import numpy.polynomial.polynomial
as poly
49 """Config class for photon transfer curve measurement task"""
50 isr = pexConfig.ConfigurableField(
52 doc=
"""Task to perform instrumental signature removal.""",
54 isrMandatorySteps = pexConfig.ListField(
56 doc=
"isr operations that must be performed for valid results. Raises if any of these are False.",
57 default=[
'doAssembleCcd']
59 isrForbiddenSteps = pexConfig.ListField(
61 doc=
"isr operations that must NOT be performed for valid results. Raises if any of these are True",
62 default=[
'doFlat',
'doFringe',
'doBrighterFatter',
'doUseOpticsTransmission',
63 'doUseFilterTransmission',
'doUseSensorTransmission',
'doUseAtmosphereTransmission']
65 isrDesirableSteps = pexConfig.ListField(
67 doc=
"isr operations that it is advisable to perform, but are not mission-critical." +
68 " WARNs are logged for any of these found to be False.",
69 default=[
'doBias',
'doDark',
'doCrosstalk',
'doDefect']
71 isrUndesirableSteps = pexConfig.ListField(
73 doc=
"isr operations that it is *not* advisable to perform in the general case, but are not" +
74 " forbidden as some use-cases might warrant them." +
75 " WARNs are logged for any of these found to be True.",
76 default=[
'doLinearize']
78 ccdKey = pexConfig.Field(
80 doc=
"The key by which to pull a detector from a dataId, e.g. 'ccd' or 'detector'.",
83 makePlots = pexConfig.Field(
85 doc=
"Plot the PTC curves?",
88 ptcFitType = pexConfig.ChoiceField(
90 doc=
"Fit PTC to approximation in Astier+19 (Equation 16) or to a polynomial.",
93 "POLYNOMIAL":
"n-degree polynomial (use 'polynomialFitDegree' to set 'n').",
94 "ASTIERAPPROXIMATION":
"Approximation in Astier+19 (Eq. 16)."
97 polynomialFitDegree = pexConfig.Field(
99 doc=
"Degree of polynomial to fit the PTC, when 'ptcFitType'=POLYNOMIAL.",
102 polynomialFitDegreeNonLinearity = pexConfig.Field(
104 doc=
"Degree of polynomial to fit the meanSignal vs exposureTime curve to produce" +
105 " the table for LinearizeLookupTable.",
108 binSize = pexConfig.Field(
110 doc=
"Bin the image by this factor in both dimensions.",
113 minMeanSignal = pexConfig.Field(
115 doc=
"Minimum value (inclusive) of mean signal (in DN) above which to consider.",
118 maxMeanSignal = pexConfig.Field(
120 doc=
"Maximum value (inclusive) of mean signal (in DN) below which to consider.",
123 initialNonLinearityExclusionThresholdPositive = pexConfig.RangeField(
125 doc=
"Initially exclude data points with a variance that are more than a factor of this from being"
126 " linear in the positive direction, from the PTC fit. Note that these points will also be"
127 " excluded from the non-linearity fit. This is done before the iterative outlier rejection,"
128 " to allow an accurate determination of the sigmas for said iterative fit.",
133 initialNonLinearityExclusionThresholdNegative = pexConfig.RangeField(
135 doc=
"Initially exclude data points with a variance that are more than a factor of this from being"
136 " linear in the negative direction, from the PTC fit. Note that these points will also be"
137 " excluded from the non-linearity fit. This is done before the iterative outlier rejection,"
138 " to allow an accurate determination of the sigmas for said iterative fit.",
143 sigmaCutPtcOutliers = pexConfig.Field(
145 doc=
"Sigma cut for outlier rejection in PTC.",
148 nSigmaClipPtc = pexConfig.Field(
150 doc=
"Sigma cut for afwMath.StatisticsControl()",
153 nIterSigmaClipPtc = pexConfig.Field(
155 doc=
"Number of sigma-clipping iterations for afwMath.StatisticsControl()",
158 maxIterationsPtcOutliers = pexConfig.Field(
160 doc=
"Maximum number of iterations for outlier rejection in PTC.",
163 doFitBootstrap = pexConfig.Field(
165 doc=
"Use bootstrap for the PTC fit parameters and errors?.",
168 linResidualTimeIndex = pexConfig.Field(
170 doc=
"Index position in time array for reference time in linearity residual calculation.",
173 maxAduForLookupTableLinearizer = pexConfig.Field(
175 doc=
"Maximum DN value for the LookupTable linearizer.",
178 instrumentName = pexConfig.Field(
180 doc=
"Instrument name.",
187 """A simple class to hold the output from the
188 `calculateLinearityResidualAndLinearizers` function.
191 polynomialLinearizerCoefficients: list
193 quadraticPolynomialLinearizerCoefficient: float
195 linearizerTableRow: list
197 linearityResidual: list
198 meanSignalVsTimePolyFitPars: list
199 meanSignalVsTimePolyFitParsErr: list
200 fractionalNonLinearityResidual: list
201 meanSignalVsTimePolyFitReducedChiSq: float
205 """A simple class to hold the output data from the PTC task.
207 The dataset is made up of a dictionary for each item, keyed by the
208 amplifiers' names, which much be supplied at construction time.
210 New items cannot be added to the class to save accidentally saving to the
211 wrong property, and the class can be frozen if desired.
213 inputVisitPairs records the visits used to produce the data.
214 When fitPtcAndNonLinearity() is run, a mask is built up, which is by definition
215 always the same length as inputVisitPairs, rawExpTimes, rawMeans
216 and rawVars, and is a list of bools, which are incrementally set to False
217 as points are discarded from the fits.
219 PTC fit parameters for polynomials are stored in a list in ascending order
220 of polynomial term, i.e. par[0]*x^0 + par[1]*x + par[2]*x^2 etc
221 with the length of the list corresponding to the order of the polynomial
228 self.__dict__[
"ampNames"] = ampNames
229 self.__dict__[
"badAmps"] = []
232 self.__dict__[
"inputVisitPairs"] = {ampName: []
for ampName
in ampNames}
233 self.__dict__[
"visitMask"] = {ampName: []
for ampName
in ampNames}
234 self.__dict__[
"rawExpTimes"] = {ampName: []
for ampName
in ampNames}
235 self.__dict__[
"rawMeans"] = {ampName: []
for ampName
in ampNames}
236 self.__dict__[
"rawVars"] = {ampName: []
for ampName
in ampNames}
239 self.__dict__[
"ptcFitType"] = {ampName:
"" for ampName
in ampNames}
240 self.__dict__[
"ptcFitPars"] = {ampName: []
for ampName
in ampNames}
241 self.__dict__[
"ptcFitParsError"] = {ampName: []
for ampName
in ampNames}
242 self.__dict__[
"ptcFitReducedChiSquared"] = {ampName: []
for ampName
in ampNames}
243 self.__dict__[
"nonLinearity"] = {ampName: []
for ampName
in ampNames}
244 self.__dict__[
"nonLinearityError"] = {ampName: []
for ampName
in ampNames}
245 self.__dict__[
"nonLinearityResiduals"] = {ampName: []
for ampName
in ampNames}
246 self.__dict__[
"fractionalNonLinearityResiduals"] = {ampName: []
for ampName
in ampNames}
247 self.__dict__[
"nonLinearityReducedChiSquared"] = {ampName: []
for ampName
in ampNames}
250 self.__dict__[
"gain"] = {ampName: -1.
for ampName
in ampNames}
251 self.__dict__[
"gainErr"] = {ampName: -1.
for ampName
in ampNames}
252 self.__dict__[
"noise"] = {ampName: -1.
for ampName
in ampNames}
253 self.__dict__[
"noiseErr"] = {ampName: -1.
for ampName
in ampNames}
254 self.__dict__[
"coefficientsLinearizePolynomial"] = {ampName: []
for ampName
in ampNames}
255 self.__dict__[
"coefficientLinearizeSquared"] = {ampName: []
for ampName
in ampNames}
258 """Protect class attributes"""
259 if attribute
not in self.__dict__:
260 raise AttributeError(f
"{attribute} is not already a member of PhotonTransferCurveDataset, which"
261 " does not support setting of new attributes.")
263 self.__dict__[attribute] = value
266 """Get the visits used, i.e. not discarded, for a given amp.
268 If no mask has been created yet, all visits are returned.
270 if self.visitMask[ampName] == []:
271 return self.inputVisitPairs[ampName]
274 assert len(self.visitMask[ampName]) == len(self.inputVisitPairs[ampName])
276 pairs = self.inputVisitPairs[ampName]
277 mask = self.visitMask[ampName]
279 return [(v1, v2)
for ((v1, v2), m)
in zip(pairs, mask)
if bool(m)
is True]
282 return [amp
for amp
in self.ampNames
if amp
not in self.badAmps]
286 """A class to calculate, fit, and plot a PTC from a set of flat pairs.
288 The Photon Transfer Curve (var(signal) vs mean(signal)) is a standard tool
289 used in astronomical detectors characterization (e.g., Janesick 2001,
290 Janesick 2007). This task calculates the PTC from a series of pairs of
291 flat-field images; each pair taken at identical exposure times. The
292 difference image of each pair is formed to eliminate fixed pattern noise,
293 and then the variance of the difference image and the mean of the average image
294 are used to produce the PTC. An n-degree polynomial or the approximation in Equation
295 16 of Astier+19 ("The Shape of the Photon Transfer Curve of CCD sensors",
296 arXiv:1905.08677) can be fitted to the PTC curve. These models include
297 parameters such as the gain (e/DN) and readout noise.
299 Linearizers to correct for signal-chain non-linearity are also calculated.
300 The `Linearizer` class, in general, can support per-amp linearizers, but in this
301 task this is not supported.
306 Positional arguments passed to the Task constructor. None used at this
309 Keyword arguments passed on to the Task constructor. None used at this
314 RunnerClass = PairedVisitListTaskRunner
315 ConfigClass = MeasurePhotonTransferCurveTaskConfig
316 _DefaultName =
"measurePhotonTransferCurve"
319 pipeBase.CmdLineTask.__init__(self, *args, **kwargs)
320 self.makeSubtask(
"isr")
321 plt.interactive(
False)
323 self.config.isrForbiddenSteps, self.config.isrDesirableSteps, checkTrim=
False)
328 def _makeArgumentParser(cls):
329 """Augment argument parser for the MeasurePhotonTransferCurveTask."""
331 parser.add_argument(
"--visit-pairs", dest=
"visitPairs", nargs=
"*",
332 help=
"Visit pairs to use. Each pair must be of the form INT,INT e.g. 123,456")
333 parser.add_id_argument(
"--id", datasetType=
"photonTransferCurveDataset",
334 ContainerClass=NonexistentDatasetTaskDataIdContainer,
335 help=
"The ccds to use, e.g. --id ccd=0..100")
340 """Run the Photon Transfer Curve (PTC) measurement task.
342 For a dataRef (which is each detector here),
343 and given a list of visit pairs at different exposure times,
348 dataRef : list of lsst.daf.persistence.ButlerDataRef
349 dataRef for the detector for the visits to be fit.
350 visitPairs : `iterable` of `tuple` of `int`
351 Pairs of visit numbers to be processed together
355 detNum = dataRef.dataId[self.config.ccdKey]
356 detector = dataRef.get(
'camera')[dataRef.dataId[self.config.ccdKey]]
362 for name
in dataRef.getButler().getKeys(
'bias'):
363 if name
not in dataRef.dataId:
365 dataRef.dataId[name] = \
366 dataRef.getButler().queryMetadata(
'raw', [name], detector=detNum)[0]
367 except OperationalError:
370 amps = detector.getAmplifiers()
371 ampNames = [amp.getName()
for amp
in amps]
374 self.log.
info(
'Measuring PTC using %s visits for detector %s' % (visitPairs, detNum))
376 for (v1, v2)
in visitPairs:
378 dataRef.dataId[
'expId'] = v1
380 dataRef.dataId[
'expId'] = v2
382 del dataRef.dataId[
'expId']
385 expTime = exp1.getInfo().getVisitInfo().getExposureTime()
389 ampName = amp.getName()
391 dataset.rawExpTimes[ampName].
append(expTime)
392 dataset.rawMeans[ampName].
append(mu)
393 dataset.rawVars[ampName].
append(varDiff)
394 dataset.inputVisitPairs[ampName].
append((v1, v2))
396 numberAmps = len(detector.getAmplifiers())
397 numberAduValues = self.config.maxAduForLookupTableLinearizer
398 lookupTableArray = np.zeros((numberAmps, numberAduValues), dtype=np.float32)
405 tableArray=lookupTableArray)
407 if self.config.makePlots:
408 self.
plot(dataRef, dataset, ptcFitType=self.config.ptcFitType)
411 self.log.
info(f
"Writing PTC and NL data to {dataRef.getUri(write=True)}")
412 dataRef.put(dataset, datasetType=
"photonTransferCurveDataset")
414 butler = dataRef.getButler()
415 self.log.
info(
"Writing linearizers: \n "
416 "lookup table (linear component of polynomial fit), \n "
417 "polynomial (coefficients for a polynomial correction), \n "
418 "and squared linearizer (quadratic coefficient from polynomial)")
420 detName = detector.getName()
421 now = datetime.datetime.utcnow()
422 calibDate = now.strftime(
"%Y-%m-%d")
424 for linType, dataType
in [(
"LOOKUPTABLE",
'linearizeLut'),
425 (
"LINEARIZEPOLYNOMIAL",
'linearizePolynomial'),
426 (
"LINEARIZESQUARED",
'linearizeSquared')]:
428 if linType ==
"LOOKUPTABLE":
429 tableArray = lookupTableArray
434 instruName=self.config.instrumentName,
435 tableArray=tableArray,
437 butler.put(linearizer, datasetType=dataType, dataId={
'detector': detNum,
438 'detectorName': detName,
'calibDate': calibDate})
440 self.log.
info(
'Finished measuring PTC for in detector %s' % detNum)
442 return pipeBase.Struct(exitStatus=0)
445 tableArray=None, log=None):
446 """Build linearizer object to persist.
450 dataset : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset`
451 The dataset containing the means, variances, and exposure times
452 detector : `lsst.afw.cameraGeom.Detector`
454 calibDate : `datetime.datetime`
456 linearizerType : `str`
457 'LOOKUPTABLE', 'LINEARIZESQUARED', or 'LINEARIZEPOLYNOMIAL'
458 instruName : `str`, optional
460 tableArray : `np.array`, optional
461 Look-up table array with size rows=nAmps and columns=DN values
462 log : `lsst.log.Log`, optional
463 Logger to handle messages
467 linearizer : `lsst.ip.isr.Linearizer`
470 detName = detector.getName()
471 detNum = detector.getId()
472 if linearizerType ==
"LOOKUPTABLE":
473 if tableArray
is not None:
474 linearizer =
Linearizer(detector=detector, table=tableArray, log=log)
476 raise RuntimeError(
"tableArray must be provided when creating a LookupTable linearizer")
477 elif linearizerType
in (
"LINEARIZESQUARED",
"LINEARIZEPOLYNOMIAL"):
480 raise RuntimeError(
"Invalid linearizerType {linearizerType} to build a Linearizer object. "
481 "Supported: 'LOOKUPTABLE', 'LINEARIZESQUARED', or 'LINEARIZEPOLYNOMIAL'")
482 for i, amp
in enumerate(detector.getAmplifiers()):
483 ampName = amp.getName()
484 if linearizerType ==
"LOOKUPTABLE":
485 linearizer.linearityCoeffs[ampName] = [i, 0]
486 linearizer.linearityType[ampName] =
"LookupTable"
487 elif linearizerType ==
"LINEARIZESQUARED":
488 linearizer.linearityCoeffs[ampName] = [dataset.coefficientLinearizeSquared[ampName]]
489 linearizer.linearityType[ampName] =
"Squared"
490 elif linearizerType ==
"LINEARIZEPOLYNOMIAL":
491 linearizer.linearityCoeffs[ampName] = dataset.coefficientsLinearizePolynomial[ampName]
492 linearizer.linearityType[ampName] =
"Polynomial"
493 linearizer.linearityBBox[ampName] = amp.getBBox()
495 linearizer.validate()
496 calibId = f
"detectorName={detName} detector={detNum} calibDate={calibDate} ccd={detNum} filter=NONE"
499 raftName = detName.split(
"_")[0]
500 calibId += f
" raftName={raftName}"
503 calibId += f
" raftName={raftname}"
505 serial = detector.getSerial()
506 linearizer.updateMetadata(instrumentName=instruName, detectorId=f
"{detNum}",
507 calibId=calibId, serial=serial, detectorName=f
"{detName}")
512 """Calculate the mean signal of two exposures and the variance of their difference.
516 exposure1 : `lsst.afw.image.exposure.exposure.ExposureF`
517 First exposure of flat field pair.
519 exposure2 : `lsst.afw.image.exposure.exposure.ExposureF`
520 Second exposure of flat field pair.
522 region : `lsst.geom.Box2I`
523 Region of each exposure where to perform the calculations (e.g, an amplifier).
529 0.5*(mu1 + mu2), where mu1, and mu2 are the clipped means of the regions in
533 Half of the clipped variance of the difference of the regions inthe two input
537 if region
is not None:
538 im1Area = exposure1.maskedImage[region]
539 im2Area = exposure2.maskedImage[region]
541 im1Area = exposure1.maskedImage
542 im2Area = exposure2.maskedImage
548 statsCtrl.setNumSigmaClip(self.config.nSigmaClipPtc)
549 statsCtrl.setNumIter(self.config.nIterSigmaClipPtc)
557 temp = im2Area.clone()
559 diffIm = im1Area.clone()
568 def _fitLeastSq(self, initialParams, dataX, dataY, function):
569 """Do a fit and estimate the parameter errors using using scipy.optimize.leastq.
571 optimize.leastsq returns the fractional covariance matrix. To estimate the
572 standard deviation of the fit parameters, multiply the entries of this matrix
573 by the unweighted reduced chi squared and take the square root of the diagonal elements.
577 initialParams : `list` of `float`
578 initial values for fit parameters. For ptcFitType=POLYNOMIAL, its length
579 determines the degree of the polynomial.
581 dataX : `numpy.array` of `float`
582 Data in the abscissa axis.
584 dataY : `numpy.array` of `float`
585 Data in the ordinate axis.
587 function : callable object (function)
588 Function to fit the data with.
592 pFitSingleLeastSquares : `list` of `float`
593 List with fitted parameters.
595 pErrSingleLeastSquares : `list` of `float`
596 List with errors for fitted parameters.
598 reducedChiSqSingleLeastSquares : `float`
599 Unweighted reduced chi squared
602 def errFunc(p, x, y):
605 pFit, pCov, infoDict, errMessage, success = leastsq(errFunc, initialParams,
606 args=(dataX, dataY), full_output=1, epsfcn=0.0001)
608 if (len(dataY) > len(initialParams))
and pCov
is not None:
609 reducedChiSq = (errFunc(pFit, dataX, dataY)**2).sum()/(len(dataY)-len(initialParams))
612 pCov = np.zeros((len(initialParams), len(initialParams)))
614 reducedChiSq = np.inf
617 for i
in range(len(pFit)):
618 errorVec.append(np.fabs(pCov[i][i])**0.5)
620 pFitSingleLeastSquares = pFit
621 pErrSingleLeastSquares = np.array(errorVec)
623 return pFitSingleLeastSquares, pErrSingleLeastSquares, reducedChiSq
625 def _fitBootstrap(self, initialParams, dataX, dataY, function, confidenceSigma=1.):
626 """Do a fit using least squares and bootstrap to estimate parameter errors.
628 The bootstrap error bars are calculated by fitting 100 random data sets.
632 initialParams : `list` of `float`
633 initial values for fit parameters. For ptcFitType=POLYNOMIAL, its length
634 determines the degree of the polynomial.
636 dataX : `numpy.array` of `float`
637 Data in the abscissa axis.
639 dataY : `numpy.array` of `float`
640 Data in the ordinate axis.
642 function : callable object (function)
643 Function to fit the data with.
645 confidenceSigma : `float`
646 Number of sigmas that determine confidence interval for the bootstrap errors.
650 pFitBootstrap : `list` of `float`
651 List with fitted parameters.
653 pErrBootstrap : `list` of `float`
654 List with errors for fitted parameters.
656 reducedChiSqBootstrap : `float`
660 def errFunc(p, x, y):
664 pFit, _ = leastsq(errFunc, initialParams, args=(dataX, dataY), full_output=0)
667 residuals = errFunc(pFit, dataX, dataY)
668 sigmaErrTotal = np.std(residuals)
673 randomDelta = np.random.normal(0., sigmaErrTotal, len(dataY))
674 randomDataY = dataY + randomDelta
675 randomFit, _ = leastsq(errFunc, initialParams,
676 args=(dataX, randomDataY), full_output=0)
677 pars.append(randomFit)
678 pars = np.array(pars)
679 meanPfit = np.mean(pars, 0)
682 nSigma = confidenceSigma
683 errPfit = nSigma*np.std(pars, 0)
684 pFitBootstrap = meanPfit
685 pErrBootstrap = errPfit
687 reducedChiSq = (errFunc(pFitBootstrap, dataX, dataY)**2).sum()/(len(dataY)-len(initialParams))
688 return pFitBootstrap, pErrBootstrap, reducedChiSq
691 """Polynomial function definition"""
692 return poly.polyval(x, [*pars])
695 """Single brighter-fatter parameter model for PTC; Equation 16 of Astier+19"""
696 a00, gain, noise = pars
697 return 0.5/(a00*gain*gain)*(np.exp(2*a00*x*gain)-1) + noise/(gain*gain)
700 def _initialParsForPolynomial(order):
702 pars = np.zeros(order, dtype=np.float)
709 def _boundsForPolynomial(initialPars):
710 lowers = [np.NINF
for p
in initialPars]
711 uppers = [np.inf
for p
in initialPars]
713 return (lowers, uppers)
716 def _boundsForAstier(initialPars):
717 lowers = [np.NINF
for p
in initialPars]
718 uppers = [np.inf
for p
in initialPars]
719 return (lowers, uppers)
722 def _getInitialGoodPoints(means, variances, maxDeviationPositive, maxDeviationNegative):
723 """Return a boolean array to mask bad points.
725 A linear function has a constant ratio, so find the median
726 value of the ratios, and exclude the points that deviate
727 from that by more than a factor of maxDeviationPositive/negative.
728 Asymmetric deviations are supported as we expect the PTC to turn
729 down as the flux increases, but sometimes it anomalously turns
730 upwards just before turning over, which ruins the fits, so it
731 is wise to be stricter about restricting positive outliers than
734 Too high and points that are so bad that fit will fail will be included
735 Too low and the non-linear points will be excluded, biasing the NL fit."""
736 ratios = [b/a
for (a, b)
in zip(means, variances)]
737 medianRatio = np.median(ratios)
738 ratioDeviations = [(r/medianRatio)-1
for r
in ratios]
741 maxDeviationPositive =
abs(maxDeviationPositive)
742 maxDeviationNegative = -1. *
abs(maxDeviationNegative)
744 goodPoints = np.array([
True if (r < maxDeviationPositive
and r > maxDeviationNegative)
745 else False for r
in ratioDeviations])
748 def _makeZeroSafe(self, array, warn=True, substituteValue=1e-9):
750 nBad = Counter(array)[0]
755 msg = f
"Found {nBad} zeros in array at elements {[x for x in np.where(array==0)[0]]}"
758 array[array == 0] = substituteValue
762 """Calculate linearity residual and fit an n-order polynomial to the mean vs time curve
763 to produce corrections (deviation from linear part of polynomial) for a particular amplifier
764 to populate LinearizeLookupTable.
765 Use the coefficients of this fit to calculate the correction coefficients for LinearizePolynomial
766 and LinearizeSquared."
771 exposureTimeVector: `list` of `float`
772 List of exposure times for each flat pair
774 meanSignalVector: `list` of `float`
775 List of mean signal from diference image of flat pairs
779 dataset : `lsst.cp.pipe.ptc.LinearityResidualsAndLinearizersDataset`
780 The dataset containing the fit parameters, the NL correction coefficients, and the
781 LUT row for the amplifier at hand. Explicitly:
783 dataset.polynomialLinearizerCoefficients : `list` of `float`
784 Coefficients for LinearizePolynomial, where corrImage = uncorrImage + sum_i c_i uncorrImage^(2 +
786 c_(j-2) = -k_j/(k_1^j) with units (DN^(1-j)). The units of k_j are DN/t^j, and they are fit from
787 meanSignalVector = k0 + k1*exposureTimeVector + k2*exposureTimeVector^2 +...
788 + kn*exposureTimeVector^n, with n = "polynomialFitDegreeNonLinearity".
789 k_0 and k_1 and degenerate with bias level and gain, and are not used by the non-linearity
790 correction. Therefore, j = 2...n in the above expression (see `LinearizePolynomial` class in
793 dataset.quadraticPolynomialLinearizerCoefficient : `float`
794 Coefficient for LinearizeSquared, where corrImage = uncorrImage + c0*uncorrImage^2.
795 c0 = -k2/(k1^2), where k1 and k2 are fit from
796 meanSignalVector = k0 + k1*exposureTimeVector + k2*exposureTimeVector^2 +...
797 + kn*exposureTimeVector^n, with n = "polynomialFitDegreeNonLinearity".
799 dataset.linearizerTableRow : `list` of `float`
800 One dimensional array with deviation from linear part of n-order polynomial fit
801 to mean vs time curve. This array will be one row (for the particular amplifier at hand)
802 of the table array for LinearizeLookupTable.
804 dataset.linearityResidual : `list` of `float`
805 Linearity residual from the mean vs time curve, defined as
806 100*(1 - meanSignalReference/expTimeReference/(meanSignal/expTime).
808 dataset.meanSignalVsTimePolyFitPars : `list` of `float`
809 Parameters from n-order polynomial fit to meanSignalVector vs exposureTimeVector.
811 dataset.meanSignalVsTimePolyFitParsErr : `list` of `float`
812 Parameters from n-order polynomial fit to meanSignalVector vs exposureTimeVector.
814 dataset.fractionalNonLinearityResidual : `list` of `float`
815 Fractional residuals from the meanSignal vs exposureTime curve with respect to linear part of
816 polynomial fit: 100*(linearPart - meanSignal)/linearPart, where
817 linearPart = k0 + k1*exposureTimeVector.
819 dataset.meanSignalVsTimePolyFitReducedChiSq : `float`
820 Reduced unweighted chi squared from polynomial fit to meanSignalVector vs exposureTimeVector.
825 if self.config.doFitBootstrap:
826 parsFit, parsFitErr, reducedChiSquaredNonLinearityFit = self.
_fitBootstrap(parsIniNonLinearity,
831 parsFit, parsFitErr, reducedChiSquaredNonLinearityFit = self.
_fitLeastSq(parsIniNonLinearity,
838 tMax = (self.config.maxAduForLookupTableLinearizer - parsFit[0])/parsFit[1]
839 timeRange = np.linspace(0, tMax, self.config.maxAduForLookupTableLinearizer)
840 signalIdeal = parsFit[0] + parsFit[1]*timeRange
842 linearizerTableRow = signalIdeal - signalUncorrected
848 polynomialLinearizerCoefficients = []
849 for i, coefficient
in enumerate(parsFit):
850 c = -coefficient/(k1**i)
851 polynomialLinearizerCoefficients.append(c)
852 if np.fabs(c) > 1e-10:
853 msg = f
"Coefficient {c} in polynomial fit larger than threshold 1e-10."
856 c0 = polynomialLinearizerCoefficients[2]
859 linResidualTimeIndex = self.config.linResidualTimeIndex
860 if exposureTimeVector[linResidualTimeIndex] == 0.0:
861 raise RuntimeError(
"Reference time for linearity residual can't be 0.0")
862 linResidual = 100*(1 - ((meanSignalVector[linResidualTimeIndex] /
863 exposureTimeVector[linResidualTimeIndex]) /
864 (meanSignalVector/exposureTimeVector)))
867 linearPart = parsFit[0] + k1*exposureTimeVector
868 fracNonLinearityResidual = 100*(linearPart - meanSignalVector)/linearPart
871 dataset.polynomialLinearizerCoefficients = polynomialLinearizerCoefficients
872 dataset.quadraticPolynomialLinearizerCoefficient = c0
873 dataset.linearizerTableRow = linearizerTableRow
874 dataset.linearityResidual = linResidual
875 dataset.meanSignalVsTimePolyFitPars = parsFit
876 dataset.meanSignalVsTimePolyFitParsErr = parsFitErr
877 dataset.fractionalNonLinearityResidual = fracNonLinearityResidual
878 dataset.meanSignalVsTimePolyFitReducedChiSq = reducedChiSquaredNonLinearityFit
883 """Fit the photon transfer curve and calculate linearity and residuals.
885 Fit the photon transfer curve with either a polynomial of the order
886 specified in the task config, or using the Astier approximation.
888 Sigma clipping is performed iteratively for the fit, as well as an
889 initial clipping of data points that are more than
890 config.initialNonLinearityExclusionThreshold away from lying on a
891 straight line. This other step is necessary because the photon transfer
892 curve turns over catastrophically at very high flux (because saturation
893 drops the variance to ~0) and these far outliers cause the initial fit
894 to fail, meaning the sigma cannot be calculated to perform the
899 dataset : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset`
900 The dataset containing the means, variances and exposure times
902 Fit a 'POLYNOMIAL' (degree: 'polynomialFitDegree') or
903 'ASTIERAPPROXIMATION' to the PTC
904 tableArray : `np.array`
905 Optional. Look-up table array with size rows=nAmps and columns=DN values.
906 It will be modified in-place if supplied.
910 dataset: `lsst.cp.pipe.ptc.PhotonTransferCurveDataset`
911 This is the same dataset as the input paramter, however, it has been modified
912 to include information such as the fit vectors and the fit parameters. See
913 the class `PhotonTransferCurveDatase`.
916 def errFunc(p, x, y):
917 return ptcFunc(p, x) - y
919 sigmaCutPtcOutliers = self.config.sigmaCutPtcOutliers
920 maxIterationsPtcOutliers = self.config.maxIterationsPtcOutliers
922 for i, ampName
in enumerate(dataset.ampNames):
923 timeVecOriginal = np.array(dataset.rawExpTimes[ampName])
924 meanVecOriginal = np.array(dataset.rawMeans[ampName])
925 varVecOriginal = np.array(dataset.rawVars[ampName])
928 mask = ((meanVecOriginal >= self.config.minMeanSignal) &
929 (meanVecOriginal <= self.config.maxMeanSignal))
932 self.config.initialNonLinearityExclusionThresholdPositive,
933 self.config.initialNonLinearityExclusionThresholdNegative)
934 mask = mask & goodPoints
936 if ptcFitType ==
'ASTIERAPPROXIMATION':
938 parsIniPtc = [-1e-9, 1.0, 10.]
940 if ptcFitType ==
'POLYNOMIAL':
947 while count <= maxIterationsPtcOutliers:
951 meanTempVec = meanVecOriginal[mask]
952 varTempVec = varVecOriginal[mask]
953 res = least_squares(errFunc, parsIniPtc, bounds=bounds, args=(meanTempVec, varTempVec))
959 sigResids = (varVecOriginal - ptcFunc(pars, meanVecOriginal))/np.sqrt(varVecOriginal)
960 newMask = np.array([
True if np.abs(r) < sigmaCutPtcOutliers
else False for r
in sigResids])
961 mask = mask & newMask
963 nDroppedTotal = Counter(mask)[
False]
964 self.log.
debug(f
"Iteration {count}: discarded {nDroppedTotal} points in total for {ampName}")
967 assert (len(mask) == len(timeVecOriginal) == len(meanVecOriginal) == len(varVecOriginal))
969 dataset.visitMask[ampName] = mask
972 timeVecFinal = timeVecOriginal[mask]
973 meanVecFinal = meanVecOriginal[mask]
974 varVecFinal = varVecOriginal[mask]
976 if Counter(mask)[
False] > 0:
977 self.log.
info((f
"Number of points discarded in PTC of amplifier {ampName}:" +
978 f
" {Counter(mask)[False]} out of {len(meanVecOriginal)}"))
980 if (len(meanVecFinal) < len(parsIniPtc)):
981 msg = (f
"\nSERIOUS: Not enough data points ({len(meanVecFinal)}) compared to the number of"
982 f
"parameters of the PTC model({len(parsIniPtc)}). Setting {ampName} to BAD.")
986 lenNonLinPars = self.config.polynomialFitDegreeNonLinearity - 1
987 dataset.badAmps.append(ampName)
988 dataset.gain[ampName] = np.nan
989 dataset.gainErr[ampName] = np.nan
990 dataset.noise[ampName] = np.nan
991 dataset.noiseErr[ampName] = np.nan
992 dataset.nonLinearity[ampName] = np.nan
993 dataset.nonLinearityError[ampName] = np.nan
994 dataset.nonLinearityResiduals[ampName] = np.nan
995 dataset.fractionalNonLinearityResiduals[ampName] = np.nan
996 dataset.coefficientLinearizeSquared[ampName] = np.nan
997 dataset.ptcFitPars[ampName] = np.nan
998 dataset.ptcFitParsError[ampName] = np.nan
999 dataset.ptcFitReducedChiSquared[ampName] = np.nan
1000 dataset.coefficientsLinearizePolynomial[ampName] = [np.nan]*lenNonLinPars
1001 tableArray[i, :] = [np.nan]*self.config.maxAduForLookupTableLinearizer
1005 if self.config.doFitBootstrap:
1006 parsFit, parsFitErr, reducedChiSqPtc = self.
_fitBootstrap(parsIniPtc, meanVecFinal,
1007 varVecFinal, ptcFunc)
1009 parsFit, parsFitErr, reducedChiSqPtc = self.
_fitLeastSq(parsIniPtc, meanVecFinal,
1010 varVecFinal, ptcFunc)
1012 dataset.ptcFitPars[ampName] = parsFit
1013 dataset.ptcFitParsError[ampName] = parsFitErr
1014 dataset.ptcFitReducedChiSquared[ampName] = reducedChiSqPtc
1016 if ptcFitType ==
'ASTIERAPPROXIMATION':
1017 ptcGain = parsFit[1]
1018 ptcGainErr = parsFitErr[1]
1019 ptcNoise = np.sqrt(np.fabs(parsFit[2]))
1020 ptcNoiseErr = 0.5*(parsFitErr[2]/np.fabs(parsFit[2]))*np.sqrt(np.fabs(parsFit[2]))
1021 if ptcFitType ==
'POLYNOMIAL':
1022 ptcGain = 1./parsFit[1]
1023 ptcGainErr = np.fabs(1./parsFit[1])*(parsFitErr[1]/parsFit[1])
1024 ptcNoise = np.sqrt(np.fabs(parsFit[0]))*ptcGain
1025 ptcNoiseErr = (0.5*(parsFitErr[0]/np.fabs(parsFit[0]))*(np.sqrt(np.fabs(parsFit[0]))))*ptcGain
1027 dataset.gain[ampName] = ptcGain
1028 dataset.gainErr[ampName] = ptcGainErr
1029 dataset.noise[ampName] = ptcNoise
1030 dataset.noiseErr[ampName] = ptcNoiseErr
1031 dataset.ptcFitType[ampName] = ptcFitType
1039 if tableArray
is not None:
1040 tableArray[i, :] = datasetLinRes.linearizerTableRow
1041 dataset.nonLinearity[ampName] = datasetLinRes.meanSignalVsTimePolyFitPars
1042 dataset.nonLinearityError[ampName] = datasetLinRes.meanSignalVsTimePolyFitParsErr
1043 dataset.nonLinearityResiduals[ampName] = datasetLinRes.linearityResidual
1044 dataset.fractionalNonLinearityResiduals[ampName] = datasetLinRes.fractionalNonLinearityResidual
1045 dataset.nonLinearityReducedChiSquared[ampName] = datasetLinRes.meanSignalVsTimePolyFitReducedChiSq
1049 polyLinCoeffs = np.array(datasetLinRes.polynomialLinearizerCoefficients[2:])
1050 dataset.coefficientsLinearizePolynomial[ampName] = polyLinCoeffs
1051 quadPolyLinCoeff = datasetLinRes.quadraticPolynomialLinearizerCoefficient
1052 dataset.coefficientLinearizeSquared[ampName] = quadPolyLinCoeff
1056 def plot(self, dataRef, dataset, ptcFitType):
1057 dirname = dataRef.getUri(datasetType=
'cpPipePlotRoot', write=
True)
1058 if not os.path.exists(dirname):
1059 os.makedirs(dirname)
1061 detNum = dataRef.dataId[self.config.ccdKey]
1062 filename = f
"PTC_det{detNum}.pdf"
1063 filenameFull = os.path.join(dirname, filename)
1064 with PdfPages(filenameFull)
as pdfPages:
1065 self.
_plotPtc(dataset, ptcFitType, pdfPages)
1067 def _plotPtc(self, dataset, ptcFitType, pdfPages):
1068 """Plot PTC, linearity, and linearity residual per amplifier"""
1070 reducedChiSqPtc = dataset.ptcFitReducedChiSquared
1071 if ptcFitType ==
'ASTIERAPPROXIMATION':
1073 stringTitle = (
r"Var = $\frac{1}{2g^2a_{00}}(\exp (2a_{00} \mu g) - 1) + \frac{n_{00}}{g^2}$ "
1074 r" ($chi^2$/dof = %g)" % (reducedChiSqPtc))
1075 if ptcFitType ==
'POLYNOMIAL':
1077 stringTitle =
r"Polynomial (degree: %g)" % (self.config.polynomialFitDegree)
1082 supTitleFontSize = 18
1086 nAmps = len(dataset.ampNames)
1089 nRows = np.sqrt(nAmps)
1090 mantissa, _ = np.modf(nRows)
1092 nRows = int(nRows) + 1
1098 f, ax = plt.subplots(nrows=nRows, ncols=nCols, sharex=
'col', sharey=
'row', figsize=(13, 10))
1099 f2, ax2 = plt.subplots(nrows=nRows, ncols=nCols, sharex=
'col', sharey=
'row', figsize=(13, 10))
1101 for i, (amp, a, a2)
in enumerate(zip(dataset.ampNames, ax.flatten(), ax2.flatten())):
1102 meanVecOriginal = np.array(dataset.rawMeans[amp])
1103 varVecOriginal = np.array(dataset.rawVars[amp])
1104 mask = dataset.visitMask[amp]
1105 meanVecFinal = meanVecOriginal[mask]
1106 varVecFinal = varVecOriginal[mask]
1107 meanVecOutliers = meanVecOriginal[np.invert(mask)]
1108 varVecOutliers = varVecOriginal[np.invert(mask)]
1109 pars, parsErr = dataset.ptcFitPars[amp], dataset.ptcFitParsError[amp]
1111 if ptcFitType ==
'ASTIERAPPROXIMATION':
1112 if len(meanVecFinal):
1113 ptcA00, ptcA00error = pars[0], parsErr[0]
1114 ptcGain, ptcGainError = pars[1], parsErr[1]
1115 ptcNoise = np.sqrt(np.fabs(pars[2]))
1116 ptcNoiseError = 0.5*(parsErr[2]/np.fabs(pars[2]))*np.sqrt(np.fabs(pars[2]))
1117 stringLegend = (f
"a00: {ptcA00:.2e}+/-{ptcA00error:.2e} 1/e"
1118 f
"\n Gain: {ptcGain:.4}+/-{ptcGainError:.2e} e/DN"
1119 f
"\n Noise: {ptcNoise:.4}+/-{ptcNoiseError:.2e} e \n")
1121 if ptcFitType ==
'POLYNOMIAL':
1122 if len(meanVecFinal):
1123 ptcGain, ptcGainError = 1./pars[1], np.fabs(1./pars[1])*(parsErr[1]/pars[1])
1124 ptcNoise = np.sqrt(np.fabs(pars[0]))*ptcGain
1125 ptcNoiseError = (0.5*(parsErr[0]/np.fabs(pars[0]))*(np.sqrt(np.fabs(pars[0]))))*ptcGain
1126 stringLegend = (f
"Noise: {ptcNoise:.4}+/-{ptcNoiseError:.2e} e \n"
1127 f
"Gain: {ptcGain:.4}+/-{ptcGainError:.2e} e/DN \n")
1129 a.set_xlabel(
r'Mean signal ($\mu$, DN)', fontsize=labelFontSize)
1130 a.set_ylabel(
r'Variance (DN$^2$)', fontsize=labelFontSize)
1131 a.tick_params(labelsize=11)
1132 a.set_xscale(
'linear', fontsize=labelFontSize)
1133 a.set_yscale(
'linear', fontsize=labelFontSize)
1135 a2.set_xlabel(
r'Mean Signal ($\mu$, DN)', fontsize=labelFontSize)
1136 a2.set_ylabel(
r'Variance (DN$^2$)', fontsize=labelFontSize)
1137 a2.tick_params(labelsize=11)
1138 a2.set_xscale(
'log')
1139 a2.set_yscale(
'log')
1141 if len(meanVecFinal):
1142 minMeanVecFinal = np.min(meanVecFinal)
1143 maxMeanVecFinal = np.max(meanVecFinal)
1144 meanVecFit = np.linspace(minMeanVecFinal, maxMeanVecFinal, 100*len(meanVecFinal))
1145 minMeanVecOriginal = np.min(meanVecOriginal)
1146 maxMeanVecOriginal = np.max(meanVecOriginal)
1147 deltaXlim = maxMeanVecOriginal - minMeanVecOriginal
1149 a.plot(meanVecFit, ptcFunc(pars, meanVecFit), color=
'red')
1150 a.plot(meanVecFinal, pars[0] + pars[1]*meanVecFinal, color=
'green', linestyle=
'--')
1151 a.scatter(meanVecFinal, varVecFinal, c=
'blue', marker=
'o', s=markerSize)
1152 a.scatter(meanVecOutliers, varVecOutliers, c=
'magenta', marker=
's', s=markerSize)
1153 a.text(0.03, 0.8, stringLegend, transform=a.transAxes, fontsize=legendFontSize)
1154 a.set_title(amp, fontsize=titleFontSize)
1155 a.set_xlim([minMeanVecOriginal - 0.2*deltaXlim, maxMeanVecOriginal + 0.2*deltaXlim])
1158 a2.plot(meanVecFit, ptcFunc(pars, meanVecFit), color=
'red')
1159 a2.scatter(meanVecFinal, varVecFinal, c=
'blue', marker=
'o', s=markerSize)
1160 a2.scatter(meanVecOutliers, varVecOutliers, c=
'magenta', marker=
's', s=markerSize)
1161 a2.text(0.03, 0.8, stringLegend, transform=a2.transAxes, fontsize=legendFontSize)
1162 a2.set_title(amp, fontsize=titleFontSize)
1163 a2.set_xlim([minMeanVecOriginal, maxMeanVecOriginal])
1165 a.set_title(f
"{amp} (BAD)", fontsize=titleFontSize)
1166 a2.set_title(f
"{amp} (BAD)", fontsize=titleFontSize)
1168 f.suptitle(
"PTC \n Fit: " + stringTitle, fontsize=20)
1170 f2.suptitle(
"PTC (log-log)", fontsize=20)
1171 pdfPages.savefig(f2)
1174 f, ax = plt.subplots(nrows=4, ncols=4, sharex=
'col', sharey=
'row', figsize=(13, 10))
1175 for i, (amp, a)
in enumerate(zip(dataset.ampNames, ax.flatten())):
1176 meanVecFinal = np.array(dataset.rawMeans[amp])[dataset.visitMask[amp]]
1177 timeVecFinal = np.array(dataset.rawExpTimes[amp])[dataset.visitMask[amp]]
1179 a.set_xlabel(
'Time (sec)', fontsize=labelFontSize)
1180 a.set_ylabel(
r'Mean signal ($\mu$, DN)', fontsize=labelFontSize)
1181 a.tick_params(labelsize=labelFontSize)
1182 a.set_xscale(
'linear', fontsize=labelFontSize)
1183 a.set_yscale(
'linear', fontsize=labelFontSize)
1185 if len(meanVecFinal):
1186 pars, parsErr = dataset.nonLinearity[amp], dataset.nonLinearityError[amp]
1187 k0, k0Error = pars[0], parsErr[0]
1188 k1, k1Error = pars[1], parsErr[1]
1189 k2, k2Error = pars[2], parsErr[2]
1190 stringLegend = (f
"k0: {k0:.4}+/-{k0Error:.2e} DN\n k1: {k1:.4}+/-{k1Error:.2e} DN/t"
1191 f
"\n k2: {k2:.2e}+/-{k2Error:.2e} DN/t^2 \n")
1192 a.scatter(timeVecFinal, meanVecFinal)
1193 a.plot(timeVecFinal, self.
funcPolynomial(pars, timeVecFinal), color=
'red')
1194 a.text(0.03, 0.75, stringLegend, transform=a.transAxes, fontsize=legendFontSize)
1195 a.set_title(f
"{amp}", fontsize=titleFontSize)
1197 a.set_title(f
"{amp} (BAD)", fontsize=titleFontSize)
1199 f.suptitle(
"Linearity \n Fit: Polynomial (degree: %g)"
1200 % (self.config.polynomialFitDegreeNonLinearity),
1201 fontsize=supTitleFontSize)
1205 f, ax = plt.subplots(nrows=4, ncols=4, sharex=
'col', sharey=
'row', figsize=(13, 10))
1206 for i, (amp, a)
in enumerate(zip(dataset.ampNames, ax.flatten())):
1207 meanVecFinal = np.array(dataset.rawMeans[amp])[dataset.visitMask[amp]]
1208 linRes = np.array(dataset.nonLinearityResiduals[amp])
1210 a.set_xlabel(
r'Mean signal ($\mu$, DN)', fontsize=labelFontSize)
1211 a.set_ylabel(
'LR (%)', fontsize=labelFontSize)
1212 a.set_xscale(
'linear', fontsize=labelFontSize)
1213 a.set_yscale(
'linear', fontsize=labelFontSize)
1214 if len(meanVecFinal):
1215 a.axvline(x=timeVecFinal[self.config.linResidualTimeIndex], color=
'g', linestyle=
'--')
1216 a.scatter(meanVecFinal, linRes)
1217 a.set_title(f
"{amp}", fontsize=titleFontSize)
1218 a.axhline(y=0, color=
'k')
1219 a.tick_params(labelsize=labelFontSize)
1221 a.set_title(f
"{amp} (BAD)", fontsize=titleFontSize)
1223 f.suptitle(
r"Linearity Residual: $100\times(1 - \mu_{\rm{ref}}/t_{\rm{ref}})/(\mu / t))$" +
"\n" +
1224 r"$t_{\rm{ref}}$: " + f
"{timeVecFinal[2]} s", fontsize=supTitleFontSize)
1228 f, ax = plt.subplots(nrows=4, ncols=4, sharex=
'col', sharey=
'row', figsize=(13, 10))
1229 for i, (amp, a)
in enumerate(zip(dataset.ampNames, ax.flatten())):
1230 meanVecFinal = np.array(dataset.rawMeans[amp])[dataset.visitMask[amp]]
1231 fracLinRes = np.array(dataset.fractionalNonLinearityResiduals[amp])
1233 a.axhline(y=0, color=
'k')
1234 a.axvline(x=0, color=
'k', linestyle=
'-')
1235 a.set_xlabel(
r'Mean signal ($\mu$, DN)', fontsize=labelFontSize)
1236 a.set_ylabel(
'Fractional nonlinearity (%)', fontsize=labelFontSize)
1237 a.tick_params(labelsize=labelFontSize)
1238 a.set_xscale(
'linear', fontsize=labelFontSize)
1239 a.set_yscale(
'linear', fontsize=labelFontSize)
1241 if len(meanVecFinal):
1242 a.scatter(meanVecFinal, fracLinRes, c=
'g')
1243 a.set_title(amp, fontsize=titleFontSize)
1245 a.set_title(f
"{amp} (BAD)", fontsize=titleFontSize)
1247 f.suptitle(
r"Fractional NL residual" +
"\n" +
1248 r"$100\times \frac{(k_0 + k_1*Time-\mu)}{k_0+k_1*Time}$",
1249 fontsize=supTitleFontSize)