23 from collections
import Counter
29 from scipy.optimize
import least_squares
31 import lsst.pipe.base.connectionTypes
as cT
33 from .astierCovPtcUtils
import fitDataFullCovariance
42 __all__ = [
'PhotonTransferCurveSolveConfig',
'PhotonTransferCurveSolveTask']
46 dimensions=(
"instrument",
"detector")):
47 inputCovariances = cT.Input(
48 name=
"ptcCovariances",
49 doc=
"Tuple with measured covariances from flats.",
50 storageClass=
"PhotonTransferCurveDataset",
51 dimensions=(
"instrument",
"exposure",
"detector"),
54 camera = cT.PrerequisiteInput(
56 doc=
"Camera the input data comes from.",
57 storageClass=
"Camera",
58 dimensions=(
"instrument",),
60 lookupFunction=lookupStaticCalibration,
62 outputPtcDataset = cT.Output(
63 name=
"ptcDatsetProposal",
64 doc=
"Output proposed ptc dataset.",
65 storageClass=
"PhotonTransferCurveDataset",
66 dimensions=(
"instrument",
"detector"),
73 pipelineConnections=PhotonTransferCurveSolveConnections):
74 """Configuration for fitting measured covariances.
76 ptcFitType = pexConfig.ChoiceField(
78 doc=
"Fit PTC to Eq. 16, Eq. 20 in Astier+19, or to a polynomial.",
81 "POLYNOMIAL":
"n-degree polynomial (use 'polynomialFitDegree' to set 'n').",
82 "EXPAPPROXIMATION":
"Approximation in Astier+19 (Eq. 16).",
83 "FULLCOVARIANCE":
"Full covariances model in Astier+19 (Eq. 20)"
86 maximumRangeCovariancesAstier = pexConfig.Field(
88 doc=
"Maximum range of covariances as in Astier+19",
91 sigmaClipFullFitCovariancesAstier = pexConfig.Field(
93 doc=
"sigma clip for full model fit for FULLCOVARIANCE ptcFitType ",
96 maxIterFullFitCovariancesAstier = pexConfig.Field(
98 doc=
"Maximum number of iterations in full model fit for FULLCOVARIANCE ptcFitType",
101 polynomialFitDegree = pexConfig.Field(
103 doc=
"Degree of polynomial to fit the PTC, when 'ptcFitType'=POLYNOMIAL.",
106 sigmaCutPtcOutliers = pexConfig.Field(
108 doc=
"Sigma cut for outlier rejection in PTC.",
111 maxIterationsPtcOutliers = pexConfig.Field(
113 doc=
"Maximum number of iterations for outlier rejection in PTC.",
116 initialNonLinearityExclusionThresholdPositive = pexConfig.RangeField(
118 doc=
"Initially exclude data points with a variance that are more than a factor of this from being"
119 " linear in the positive direction, from the PTC fit. Note that these points will also be"
120 " excluded from the non-linearity fit. This is done before the iterative outlier rejection,"
121 " to allow an accurate determination of the sigmas for said iterative fit.",
126 initialNonLinearityExclusionThresholdNegative = pexConfig.RangeField(
128 doc=
"Initially exclude data points with a variance that are more than a factor of this from being"
129 " linear in the negative direction, from the PTC fit. Note that these points will also be"
130 " excluded from the non-linearity fit. This is done before the iterative outlier rejection,"
131 " to allow an accurate determination of the sigmas for said iterative fit.",
136 minMeanRatioTest = pexConfig.Field(
138 doc=
"In the initial test to screen out bad points with a ratio test, points with low"
139 " flux can get inadvertantly screened. This test only screens out points with flux"
140 " above this value.",
143 minVarPivotSearch = pexConfig.Field(
145 doc=
"The code looks for a pivot signal point after which the variance starts decreasing at high-flux"
146 " to exclude then form the PTC model fit. However, sometimes at low fluxes, the variance"
147 " decreases slightly. Set this variable for the variance value, in ADU^2, after which the pivot "
148 " should be sought.",
151 doFitBootstrap = pexConfig.Field(
153 doc=
"Use bootstrap for the PTC fit parameters and errors?.",
159 pipeBase.CmdLineTask):
160 """Task to fit the PTC from flat covariances.
161 This task assembles the list of individual PTC datasets produced
162 by `PhotonTransferCurveSolveTask` into one single final PTC dataset.
163 The task fits the measured (co)variances to a polynomial model or to
164 the models described in equations 16 and 20 of Astier+19
165 (referred to as `POLYNOMIAL`, `EXPAPPROXIMATION`, and `FULLCOVARIANCE`
166 in the configuration options of the task, respectively). Parameters
167 of interest such as tghe gain and noise are derived from the fits.
169 Astier+19: "The Shape of the Photon Transfer Curve
170 of CCD sensors", arXiv:1905.08677
172 ConfigClass = PhotonTransferCurveSolveConfig
173 _DefaultName =
'cpPhotonTransferCurveSolve'
176 """Ensure that the input and output dimensions are passed along.
180 butlerQC : `~lsst.daf.butler.butlerQuantumContext.ButlerQuantumContext`
181 Butler to operate on.
182 inputRefs : `~lsst.pipe.base.connections.InputQuantizedConnection`
183 Input data refs to load.
184 ouptutRefs : `~lsst.pipe.base.connections.OutputQuantizedConnection`
185 Output data refs to persist.
187 inputs = butlerQC.get(inputRefs)
188 outputs = self.
runrun(inputCovariances=inputs[
'inputCovariances'], camera=inputs[
'camera'])
189 butlerQC.put(outputs, outputRefs)
191 def run(self, inputCovariances, camera=None, inputExpList=None):
192 """Fit measure covariances to different models.
196 inputCovariances : `list` [`lsst.ip.isr.PhotonTransferCurveDataset`]
197 List of lsst.ip.isr.PhotonTransferCurveDataset datasets.
199 camera : `lsst.afw.cameraGeom.Camera`, optional
202 inputExpList : `list` [`~lsst.afw.image.exposure.exposure.ExposureF`], optional
207 results : `lsst.pipe.base.Struct`
208 The results struct containing:
209 ``outputPtcDatset`` : `lsst.ip.isr.PhotonTransferCurveDataset`
210 Final PTC dataset, containing information such as the means, variances,
214 ampNames = np.unique(inputCovariances[0].ampNames)
216 self.config.maximumRangeCovariancesAstier)
217 for partialPtcDataset
in inputCovariances:
218 if partialPtcDataset.ptcFitType ==
'DUMMY':
220 for ampName
in ampNames:
221 datasetPtc.inputExpIdPairs[ampName].
append(partialPtcDataset.inputExpIdPairs[ampName])
222 if type(partialPtcDataset.rawExpTimes[ampName])
is list:
223 datasetPtc.rawExpTimes[ampName].
append(partialPtcDataset.rawExpTimes[ampName][0])
225 datasetPtc.rawExpTimes[ampName].
append(partialPtcDataset.rawExpTimes[ampName])
226 if type(partialPtcDataset.rawMeans[ampName])
is list:
227 datasetPtc.rawMeans[ampName].
append(partialPtcDataset.rawMeans[ampName][0])
229 datasetPtc.rawMeans[ampName].
append(partialPtcDataset.rawMeans[ampName])
230 if type(partialPtcDataset.rawVars[ampName])
is list:
231 datasetPtc.rawVars[ampName].
append(partialPtcDataset.rawVars[ampName][0])
233 datasetPtc.rawVars[ampName].
append(partialPtcDataset.rawVars[ampName])
234 if type(partialPtcDataset.expIdMask[ampName])
is list:
235 datasetPtc.expIdMask[ampName].
append(partialPtcDataset.expIdMask[ampName][0])
237 datasetPtc.expIdMask[ampName].
append(partialPtcDataset.expIdMask[ampName])
238 datasetPtc.covariances[ampName].
append(np.array(partialPtcDataset.covariances[ampName][0]))
239 datasetPtc.covariancesSqrtWeights[ampName].
append(
240 np.array(partialPtcDataset.covariancesSqrtWeights[ampName][0]))
242 for ampName
in ampNames:
243 index = np.argsort(np.ravel(np.array(datasetPtc.rawMeans[ampName])))
244 datasetPtc.inputExpIdPairs[ampName] = np.array(datasetPtc.inputExpIdPairs[ampName])[index]
245 datasetPtc.rawExpTimes[ampName] = np.array(datasetPtc.rawExpTimes[ampName])[index]
246 datasetPtc.rawMeans[ampName] = np.array(datasetPtc.rawMeans[ampName])[index]
247 datasetPtc.rawVars[ampName] = np.array(datasetPtc.rawVars[ampName])[index]
248 datasetPtc.expIdMask[ampName] = np.array(datasetPtc.expIdMask[ampName])[index]
249 datasetPtc.covariances[ampName] = np.array(datasetPtc.covariances[ampName])[index]
250 datasetPtc.covariancesSqrtWeights[ampName] = np.array(
251 datasetPtc.covariancesSqrtWeights[ampName])[index]
252 if self.config.ptcFitType ==
"FULLCOVARIANCE":
257 tempDatasetPtc = copy.copy(datasetPtc)
258 tempDatasetPtc.ptcFitType =
"EXPAPPROXIMATION"
259 tempDatasetPtc = self.
fitPtcfitPtc(tempDatasetPtc)
260 for ampName
in datasetPtc.ampNames:
261 datasetPtc.expIdMask[ampName] = tempDatasetPtc.expIdMask[ampName]
262 datasetPtc.fitType =
"FULLCOVARIANCE"
268 datasetPtc = self.
fitPtcfitPtc(datasetPtc)
269 if inputExpList
is not None:
271 detector = inputExpList[0].getDetector()
274 datasetPtc.updateMetadata(setDate=
True, camera=camera, detector=detector)
276 return pipeBase.Struct(
277 outputPtcDataset=datasetPtc,
281 """Fit measured flat covariances to full model in Astier+19.
285 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
286 The dataset containing information such as the means, (co)variances,
291 dataset: `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
292 This is the same dataset as the input paramter, however, it has been modified
293 to include information such as the fit vectors and the fit parameters. See
294 the class `PhotonTransferCurveDatase`.
303 """Get output data for PhotonTransferCurveCovAstierDataset from CovFit objects.
307 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
308 The dataset containing information such as the means, variances and exposure times.
310 Dictionary of CovFit objects, with amp names as keys.
312 Dictionary of CovFit objects, with amp names as keys, and 'b=0' in Eq. 20 of Astier+19.
316 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
317 This is the same dataset as the input paramter, however, it has been modified
318 to include extra information such as the mask 1D array, gains, reoudout noise, measured signal,
319 measured variance, modeled variance, a, and b coefficient matrices (see Astier+19) per amplifier.
320 See the class `PhotonTransferCurveDatase`.
322 assert(len(covFits) == len(covFitsNoB))
324 for i, amp
in enumerate(dataset.ampNames):
325 lenInputTimes = len(dataset.rawExpTimes[amp])
327 dataset.ptcFitPars[amp] = [np.nan]
328 dataset.ptcFitParsError[amp] = [np.nan]
329 dataset.ptcFitChiSq[amp] = np.nan
332 fitNoB = covFitsNoB[amp]
335 dataset.covariances[amp] = fit.cov
336 dataset.covariancesModel[amp] = fit.evalCovModel()
337 dataset.covariancesSqrtWeights[amp] = fit.sqrtW
338 dataset.aMatrix[amp] = fit.getA()
339 dataset.bMatrix[amp] = fit.getB()
340 dataset.covariancesModelNoB[amp] = fitNoB.evalCovModel()
341 dataset.aMatrixNoB[amp] = fitNoB.getA()
343 (meanVecFinal, varVecFinal, varVecModel,
344 wc, varMask) = fit.getFitData(0, 0, divideByMu=
False)
347 dataset.gain[amp] = gain
348 dataset.gainErr[amp] = fit.getGainErr()
349 dataset.noise[amp] = np.sqrt(fit.getRon())
350 dataset.noiseErr[amp] = fit.getRonErr()
351 dataset.finalVars[amp] = varVecFinal
352 dataset.finalModelVars[amp] = varVecModel
353 dataset.finalMeans[amp] = meanVecFinal
358 matrixSide = self.config.maximumRangeCovariancesAstier
359 nanMatrix = np.full((matrixSide, matrixSide), np.nan)
360 listNanMatrix = np.full((lenInputTimes, matrixSide, matrixSide), np.nan)
362 dataset.covariances[amp] = listNanMatrix
363 dataset.covariancesModel[amp] = listNanMatrix
364 dataset.covariancesSqrtWeights[amp] = listNanMatrix
365 dataset.aMatrix[amp] = nanMatrix
366 dataset.bMatrix[amp] = nanMatrix
367 dataset.covariancesModelNoB[amp] = listNanMatrix
368 dataset.aMatrixNoB[amp] = nanMatrix
370 dataset.expIdMask[amp] = np.repeat(np.nan, lenInputTimes)
371 dataset.gain[amp] = np.nan
372 dataset.gainErr[amp] = np.nan
373 dataset.noise[amp] = np.nan
374 dataset.noiseErr[amp] = np.nan
375 dataset.finalVars[amp] = np.repeat(np.nan, lenInputTimes)
376 dataset.finalModelVars[amp] = np.repeat(np.nan, lenInputTimes)
377 dataset.finalMeans[amp] = np.repeat(np.nan, lenInputTimes)
382 def _initialParsForPolynomial(order):
384 pars = np.zeros(order, dtype=float)
391 def _boundsForPolynomial(initialPars, lowers=[], uppers=[]):
393 lowers = [np.NINF
for p
in initialPars]
395 uppers = [np.inf
for p
in initialPars]
397 return (lowers, uppers)
400 def _boundsForAstier(initialPars, lowers=[], uppers=[]):
402 lowers = [np.NINF
for p
in initialPars]
404 uppers = [np.inf
for p
in initialPars]
405 return (lowers, uppers)
408 def _getInitialGoodPoints(means, variances, maxDeviationPositive, maxDeviationNegative,
409 minMeanRatioTest, minVarPivotSearch):
410 """Return a boolean array to mask bad points.
414 means : `numpy.array`
415 Input array with mean signal values.
416 variances : `numpy.array`
417 Input array with variances at each mean value.
418 maxDeviationPositive : `float`
419 Maximum deviation from being constant for the variance/mean
420 ratio, in the positive direction.
421 maxDeviationNegative : `float`
422 Maximum deviation from being constant for the variance/mean
423 ratio, in the negative direction.
424 minMeanRatioTest : `float`
425 Minimum signal value (in ADU) after which to start examining
427 minVarPivotSearch : `float`
428 Minimum variance point (in ADU^2) after which the pivot point
429 wher the variance starts decreasing should be sought.
433 goodPoints : `numpy.array` [`bool`]
434 Boolean array to select good (`True`) and bad (`False`)
439 A linear function has a constant ratio, so find the median
440 value of the ratios, and exclude the points that deviate
441 from that by more than a factor of maxDeviationPositive/negative.
442 Asymmetric deviations are supported as we expect the PTC to turn
443 down as the flux increases, but sometimes it anomalously turns
444 upwards just before turning over, which ruins the fits, so it
445 is wise to be stricter about restricting positive outliers than
447 Too high and points that are so bad that fit will fail will be included
448 Too low and the non-linear points will be excluded, biasing the NL fit.
449 This function also masks points after the variance starts decreasing.
452 assert(len(means) == len(variances))
453 ratios = [b/a
for (a, b)
in zip(means, variances)]
454 medianRatio = np.nanmedian(ratios)
455 ratioDeviations = [0.0
if a < minMeanRatioTest
else (r/medianRatio)-1
456 for (a, r)
in zip(means, ratios)]
459 maxDeviationPositive =
abs(maxDeviationPositive)
460 maxDeviationNegative = -1. *
abs(maxDeviationNegative)
462 goodPoints = np.array([
True if (r < maxDeviationPositive
and r > maxDeviationNegative)
463 else False for r
in ratioDeviations])
466 pivot = np.where(np.array(np.diff(variances)) < 0)[0]
470 pivot = [p
for p
in pivot
if variances[p] > minVarPivotSearch]
472 pivot = np.min(pivot)
473 goodPoints[pivot+1:len(goodPoints)] =
False
477 def _makeZeroSafe(self, array, substituteValue=1e-9):
479 array = np.array(array)
480 nBad = Counter(np.ravel(array))[0]
484 index, = np.where(array == 0)
486 msg = f
"Found {nBad} zeros in array at elements {index}"
489 array[index] = substituteValue
494 """Fit the photon transfer curve to a polynomial or to Astier+19 approximation.
496 Fit the photon transfer curve with either a polynomial of the order
497 specified in the task config, or using the exponential approximation
498 in Astier+19 (Eq. 16).
500 Sigma clipping is performed iteratively for the fit, as well as an
501 initial clipping of data points that are more than
502 config.initialNonLinearityExclusionThreshold away from lying on a
503 straight line. This other step is necessary because the photon transfer
504 curve turns over catastrophically at very high flux (because saturation
505 drops the variance to ~0) and these far outliers cause the initial fit
506 to fail, meaning the sigma cannot be calculated to perform the
511 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
512 The dataset containing the means, variances and exposure times.
516 dataset: `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
517 This is the same dataset as the input parameter, however, it has been modified
518 to include information such as the fit vectors and the fit parameters. See
519 the class `PhotonTransferCurveDatase`.
524 Raises if dataset.ptcFitType is None or empty.
526 if dataset.ptcFitType:
527 ptcFitType = dataset.ptcFitType
529 raise RuntimeError(
"ptcFitType is None of empty in PTC dataset.")
530 matrixSide = self.config.maximumRangeCovariancesAstier
531 nanMatrix = np.empty((matrixSide, matrixSide))
532 nanMatrix[:] = np.nan
534 for amp
in dataset.ampNames:
535 lenInputTimes = len(dataset.rawExpTimes[amp])
536 listNanMatrix = np.empty((lenInputTimes, matrixSide, matrixSide))
537 listNanMatrix[:] = np.nan
539 dataset.covariancesModel[amp] = listNanMatrix
540 dataset.aMatrix[amp] = nanMatrix
541 dataset.bMatrix[amp] = nanMatrix
542 dataset.covariancesModelNoB[amp] = listNanMatrix
543 dataset.aMatrixNoB[amp] = nanMatrix
545 def errFunc(p, x, y):
546 return ptcFunc(p, x) - y
548 sigmaCutPtcOutliers = self.config.sigmaCutPtcOutliers
549 maxIterationsPtcOutliers = self.config.maxIterationsPtcOutliers
551 for i, ampName
in enumerate(dataset.ampNames):
552 timeVecOriginal = np.ravel(np.array(dataset.rawExpTimes[ampName]))
553 meanVecOriginal = np.ravel(np.array(dataset.rawMeans[ampName]))
554 varVecOriginal = np.ravel(np.array(dataset.rawVars[ampName]))
555 varVecOriginal = self.
_makeZeroSafe_makeZeroSafe(varVecOriginal)
558 self.config.initialNonLinearityExclusionThresholdPositive,
559 self.config.initialNonLinearityExclusionThresholdNegative,
560 self.config.minMeanRatioTest,
561 self.config.minVarPivotSearch)
562 if not (goodPoints.any()):
563 msg = (f
"SERIOUS: All points in goodPoints: {goodPoints} are bad."
564 f
"Setting {ampName} to BAD.")
567 self.
fillBadAmpfillBadAmp(dataset, ptcFitType, ampName)
572 if ptcFitType ==
'EXPAPPROXIMATION':
574 parsIniPtc = [-1e-9, 1.0, 10.]
576 bounds = self.
_boundsForAstier_boundsForAstier(parsIniPtc, lowers=[-1e-4, 0.5, -2000],
577 uppers=[1e-4, 2.5, 2000])
578 if ptcFitType ==
'POLYNOMIAL':
579 ptcFunc = funcPolynomial
585 while count <= maxIterationsPtcOutliers:
589 meanTempVec = meanVecOriginal[mask]
590 varTempVec = varVecOriginal[mask]
591 res = least_squares(errFunc, parsIniPtc, bounds=bounds, args=(meanTempVec, varTempVec))
597 sigResids = (varVecOriginal - ptcFunc(pars, meanVecOriginal))/np.sqrt(varVecOriginal)
598 newMask = np.array([
True if np.abs(r) < sigmaCutPtcOutliers
else False for r
in sigResids])
599 mask = mask & newMask
600 if not (mask.any()
and newMask.any()):
601 msg = (f
"SERIOUS: All points in either mask: {mask} or newMask: {newMask} are bad. "
602 f
"Setting {ampName} to BAD.")
605 self.
fillBadAmpfillBadAmp(dataset, ptcFitType, ampName)
607 nDroppedTotal = Counter(mask)[
False]
608 self.log.
debug(f
"Iteration {count}: discarded {nDroppedTotal} points in total for {ampName}")
611 assert (len(mask) == len(timeVecOriginal) == len(meanVecOriginal) == len(varVecOriginal))
612 if not (mask.any()
and newMask.any()):
614 dataset.expIdMask[ampName] = np.array(dataset.expIdMask[ampName])
616 if len(dataset.expIdMask[ampName]):
617 dataset.expIdMask[ampName] &= mask
619 dataset.expIdMask[ampName] = mask
621 meanVecFinal = meanVecOriginal[mask]
622 varVecFinal = varVecOriginal[mask]
624 if Counter(mask)[
False] > 0:
625 self.log.
info((f
"Number of points discarded in PTC of amplifier {ampName}:"
626 f
" {Counter(mask)[False]} out of {len(meanVecOriginal)}"))
628 if (len(meanVecFinal) < len(parsIniPtc)):
629 msg = (f
"SERIOUS: Not enough data points ({len(meanVecFinal)}) compared to the number of "
630 f
"parameters of the PTC model({len(parsIniPtc)}). Setting {ampName} to BAD.")
633 self.
fillBadAmpfillBadAmp(dataset, ptcFitType, ampName)
636 if self.config.doFitBootstrap:
637 parsFit, parsFitErr, reducedChiSqPtc =
fitBootstrap(parsIniPtc, meanVecFinal,
638 varVecFinal, ptcFunc,
639 weightsY=1./np.sqrt(varVecFinal))
641 parsFit, parsFitErr, reducedChiSqPtc =
fitLeastSq(parsIniPtc, meanVecFinal,
642 varVecFinal, ptcFunc,
643 weightsY=1./np.sqrt(varVecFinal))
644 dataset.ptcFitPars[ampName] = parsFit
645 dataset.ptcFitParsError[ampName] = parsFitErr
646 dataset.ptcFitChiSq[ampName] = reducedChiSqPtc
649 padLength = len(dataset.rawExpTimes[ampName]) - len(varVecFinal)
650 dataset.finalVars[ampName] = np.pad(varVecFinal, (0, padLength),
'constant',
651 constant_values=np.nan)
652 dataset.finalModelVars[ampName] = np.pad(ptcFunc(parsFit, meanVecFinal), (0, padLength),
653 'constant', constant_values=np.nan)
654 dataset.finalMeans[ampName] = np.pad(meanVecFinal, (0, padLength),
'constant',
655 constant_values=np.nan)
656 if ptcFitType ==
'EXPAPPROXIMATION':
658 ptcGainErr = parsFitErr[1]
659 ptcNoise = np.sqrt(np.fabs(parsFit[2]))
660 ptcNoiseErr = 0.5*(parsFitErr[2]/np.fabs(parsFit[2]))*np.sqrt(np.fabs(parsFit[2]))
661 if ptcFitType ==
'POLYNOMIAL':
662 ptcGain = 1./parsFit[1]
663 ptcGainErr = np.fabs(1./parsFit[1])*(parsFitErr[1]/parsFit[1])
664 ptcNoise = np.sqrt(np.fabs(parsFit[0]))*ptcGain
665 ptcNoiseErr = (0.5*(parsFitErr[0]/np.fabs(parsFit[0]))*(np.sqrt(np.fabs(parsFit[0]))))*ptcGain
666 dataset.gain[ampName] = ptcGain
667 dataset.gainErr[ampName] = ptcGainErr
668 dataset.noise[ampName] = ptcNoise
669 dataset.noiseErr[ampName] = ptcNoiseErr
671 if not len(dataset.ptcFitType) == 0:
672 dataset.ptcFitType = ptcFitType
673 if len(dataset.badAmps) == 0:
674 dataset.badAmps = np.repeat(np.nan, len(
list(dataset.rawExpTimes.values())[0]))
679 """Fill the dataset with NaNs if there are not enough good points.
683 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
684 The dataset containing the means, variances and exposure times.
686 Fit a 'POLYNOMIAL' (degree: 'polynomialFitDegree') or
687 'EXPAPPROXIMATION' (Eq. 16 of Astier+19) to the PTC.
691 dataset.badAmps.append(ampName)
692 dataset.expIdMask[ampName] = np.repeat(
False, len(dataset.rawExpTimes[ampName]))
693 dataset.gain[ampName] = np.nan
694 dataset.gainErr[ampName] = np.nan
695 dataset.noise[ampName] = np.nan
696 dataset.noiseErr[ampName] = np.nan
697 dataset.ptcFitPars[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1)
if
698 ptcFitType
in [
"POLYNOMIAL", ]
else np.repeat(np.nan, 3))
699 dataset.ptcFitParsError[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1)
if
700 ptcFitType
in [
"POLYNOMIAL", ]
else np.repeat(np.nan, 3))
701 dataset.ptcFitChiSq[ampName] = np.nan
702 dataset.finalVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
703 dataset.finalModelVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
704 dataset.finalMeans[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
def _boundsForPolynomial(initialPars, lowers=[], uppers=[])
def runQuantum(self, butlerQC, inputRefs, outputRefs)
def fitCovariancesAstier(self, dataset)
def run(self, inputCovariances, camera=None, inputExpList=None)
def _makeZeroSafe(self, array, substituteValue=1e-9)
def _boundsForAstier(initialPars, lowers=[], uppers=[])
def fitPtc(self, dataset)
def _initialParsForPolynomial(order)
def _getInitialGoodPoints(means, variances, maxDeviationPositive, maxDeviationNegative, minMeanRatioTest, minVarPivotSearch)
def getOutputPtcDataCovAstier(self, dataset, covFits, covFitsNoB)
def fillBadAmp(self, dataset, ptcFitType, ampName)
daf::base::PropertyList * list
std::shared_ptr< FrameSet > append(FrameSet const &first, FrameSet const &second)
Construct a FrameSet that performs two transformations in series.
def fitDataFullCovariance(dataset)
def fitBootstrap(initialParams, dataX, dataY, function, weightsY=None, confidenceSigma=1.)
def fitLeastSq(initialParams, dataX, dataY, function, weightsY=None)
Angle abs(Angle const &a)