23 from collections
import Counter
29 from scipy.optimize
import least_squares
33 from .astierCovPtcUtils
import fitDataFullCovariance
42 __all__ = [
'PhotonTransferCurveSolveConfig',
'PhotonTransferCurveSolveTask']
46 dimensions=(
"instrument",
"detector")):
47 inputCovariances = cT.Input(
48 name=
"ptcCovariances",
49 doc=
"Tuple with measured covariances from flats.",
50 storageClass=
"PhotonTransferCurveDataset",
51 dimensions=(
"instrument",
"exposure",
"detector"),
54 camera = cT.PrerequisiteInput(
56 doc=
"Camera the input data comes from.",
57 storageClass=
"Camera",
58 dimensions=(
"instrument",),
60 lookupFunction=lookupStaticCalibration,
62 outputPtcDataset = cT.Output(
63 name=
"ptcDatsetProposal",
64 doc=
"Output proposed ptc dataset.",
65 storageClass=
"PhotonTransferCurveDataset",
66 dimensions=(
"instrument",
"detector"),
73 pipelineConnections=PhotonTransferCurveSolveConnections):
74 """Configuration for fitting measured covariances.
76 ptcFitType = pexConfig.ChoiceField(
78 doc=
"Fit PTC to Eq. 16, Eq. 20 in Astier+19, or to a polynomial.",
81 "POLYNOMIAL":
"n-degree polynomial (use 'polynomialFitDegree' to set 'n').",
82 "EXPAPPROXIMATION":
"Approximation in Astier+19 (Eq. 16).",
83 "FULLCOVARIANCE":
"Full covariances model in Astier+19 (Eq. 20)"
86 maximumRangeCovariancesAstier = pexConfig.Field(
88 doc=
"Maximum range of covariances as in Astier+19",
91 sigmaClipFullFitCovariancesAstier = pexConfig.Field(
93 doc=
"sigma clip for full model fit for FULLCOVARIANCE ptcFitType ",
96 maxIterFullFitCovariancesAstier = pexConfig.Field(
98 doc=
"Maximum number of iterations in full model fit for FULLCOVARIANCE ptcFitType",
101 polynomialFitDegree = pexConfig.Field(
103 doc=
"Degree of polynomial to fit the PTC, when 'ptcFitType'=POLYNOMIAL.",
106 sigmaCutPtcOutliers = pexConfig.Field(
108 doc=
"Sigma cut for outlier rejection in PTC.",
111 maxIterationsPtcOutliers = pexConfig.Field(
113 doc=
"Maximum number of iterations for outlier rejection in PTC.",
116 initialNonLinearityExclusionThresholdPositive = pexConfig.RangeField(
118 doc=
"Initially exclude data points with a variance that are more than a factor of this from being"
119 " linear in the positive direction, from the PTC fit. Note that these points will also be"
120 " excluded from the non-linearity fit. This is done before the iterative outlier rejection,"
121 " to allow an accurate determination of the sigmas for said iterative fit.",
126 initialNonLinearityExclusionThresholdNegative = pexConfig.RangeField(
128 doc=
"Initially exclude data points with a variance that are more than a factor of this from being"
129 " linear in the negative direction, from the PTC fit. Note that these points will also be"
130 " excluded from the non-linearity fit. This is done before the iterative outlier rejection,"
131 " to allow an accurate determination of the sigmas for said iterative fit.",
136 minMeanRatioTest = pexConfig.Field(
138 doc=
"In the initial test to screen out bad points with a ratio test, points with low"
139 " flux can get inadvertantly screened. This test only screens out points with flux"
140 " above this value.",
143 minVarPivotSearch = pexConfig.Field(
145 doc=
"The code looks for a pivot signal point after which the variance starts decreasing at high-flux"
146 " to exclude then form the PTC model fit. However, sometimes at low fluxes, the variance"
147 " decreases slightly. Set this variable for the variance value, in ADU^2, after which the pivot "
148 " should be sought.",
151 doFitBootstrap = pexConfig.Field(
153 doc=
"Use bootstrap for the PTC fit parameters and errors?.",
159 pipeBase.CmdLineTask):
160 """Task to fit the PTC from flat covariances.
161 This task assembles the list of individual PTC datasets produced
162 by `PhotonTransferCurveSolveTask` into one single final PTC dataset.
163 The task fits the measured (co)variances to a polynomial model or to
164 the models described in equations 16 and 20 of Astier+19
165 (referred to as `POLYNOMIAL`, `EXPAPPROXIMATION`, and `FULLCOVARIANCE`
166 in the configuration options of the task, respectively). Parameters
167 of interest such as tghe gain and noise are derived from the fits.
169 Astier+19: "The Shape of the Photon Transfer Curve
170 of CCD sensors", arXiv:1905.08677
172 ConfigClass = PhotonTransferCurveSolveConfig
173 _DefaultName =
'cpPhotonTransferCurveSolve'
176 """Ensure that the input and output dimensions are passed along.
180 butlerQC : `~lsst.daf.butler.butlerQuantumContext.ButlerQuantumContext`
181 Butler to operate on.
182 inputRefs : `~lsst.pipe.base.connections.InputQuantizedConnection`
183 Input data refs to load.
184 ouptutRefs : `~lsst.pipe.base.connections.OutputQuantizedConnection`
185 Output data refs to persist.
187 inputs = butlerQC.get(inputRefs)
188 outputs = self.
runrun(inputCovariances=inputs[
'inputCovariances'], camera=inputs[
'camera'])
189 butlerQC.put(outputs, outputRefs)
191 def run(self, inputCovariances, camera=None, inputExpList=None):
192 """Fit measure covariances to different models.
196 inputCovariances : `list` [`lsst.ip.isr.PhotonTransferCurveDataset`]
197 List of lsst.ip.isr.PhotonTransferCurveDataset datasets.
199 camera : `lsst.afw.cameraGeom.Camera`, optional
202 inputExpList : `list` [`~lsst.afw.image.exposure.exposure.ExposureF`], optional
207 results : `lsst.pipe.base.Struct`
208 The results struct containing:
209 ``outputPtcDatset`` : `lsst.ip.isr.PhotonTransferCurveDataset`
210 Final PTC dataset, containing information such as the means, variances,
214 ampNames = np.unique(inputCovariances[0].ampNames)
216 self.config.maximumRangeCovariancesAstier)
217 for partialPtcDataset
in inputCovariances:
218 if partialPtcDataset.ptcFitType ==
'DUMMY':
220 for ampName
in ampNames:
221 datasetPtc.inputExpIdPairs[ampName].
append(partialPtcDataset.inputExpIdPairs[ampName])
222 if type(partialPtcDataset.rawExpTimes[ampName])
is list:
223 datasetPtc.rawExpTimes[ampName].
append(partialPtcDataset.rawExpTimes[ampName][0])
225 datasetPtc.rawExpTimes[ampName].
append(partialPtcDataset.rawExpTimes[ampName])
226 if type(partialPtcDataset.rawMeans[ampName])
is list:
227 datasetPtc.rawMeans[ampName].
append(partialPtcDataset.rawMeans[ampName][0])
229 datasetPtc.rawMeans[ampName].
append(partialPtcDataset.rawMeans[ampName])
230 if type(partialPtcDataset.rawVars[ampName])
is list:
231 datasetPtc.rawVars[ampName].
append(partialPtcDataset.rawVars[ampName][0])
233 datasetPtc.rawVars[ampName].
append(partialPtcDataset.rawVars[ampName])
234 datasetPtc.covariances[ampName].
append(np.array(partialPtcDataset.covariances[ampName][0]))
235 datasetPtc.covariancesSqrtWeights[ampName].
append(
236 np.array(partialPtcDataset.covariancesSqrtWeights[ampName][0]))
238 for ampName
in ampNames:
239 index = np.argsort(np.ravel(np.array(datasetPtc.rawMeans[ampName])))
240 datasetPtc.inputExpIdPairs[ampName] = np.array(datasetPtc.inputExpIdPairs[ampName])[index]
241 datasetPtc.rawExpTimes[ampName] = np.array(datasetPtc.rawExpTimes[ampName])[index]
242 datasetPtc.rawMeans[ampName] = np.array(datasetPtc.rawMeans[ampName])[index]
243 datasetPtc.rawVars[ampName] = np.array(datasetPtc.rawVars[ampName])[index]
244 datasetPtc.covariances[ampName] = np.array(datasetPtc.covariances[ampName])[index]
245 datasetPtc.covariancesSqrtWeights[ampName] = np.array(
246 datasetPtc.covariancesSqrtWeights[ampName])[index]
248 if self.config.ptcFitType ==
"FULLCOVARIANCE":
253 tempDatasetPtc = copy.copy(datasetPtc)
254 tempDatasetPtc.ptcFitType =
"EXPAPPROXIMATION"
255 tempDatasetPtc = self.
fitPtcfitPtc(tempDatasetPtc)
256 for ampName
in datasetPtc.ampNames:
257 datasetPtc.expIdMask[ampName] = tempDatasetPtc.expIdMask[ampName]
258 datasetPtc.fitType =
"FULLCOVARIANCE"
264 datasetPtc = self.
fitPtcfitPtc(datasetPtc)
265 if inputExpList
is not None:
267 detector = inputExpList[0].getDetector()
270 datasetPtc.updateMetadata(setDate=
True, camera=camera, detector=detector)
272 return pipeBase.Struct(
273 outputPtcDataset=datasetPtc,
277 """Fit measured flat covariances to full model in Astier+19.
281 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
282 The dataset containing information such as the means, (co)variances,
287 dataset: `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
288 This is the same dataset as the input paramter, however, it has been modified
289 to include information such as the fit vectors and the fit parameters. See
290 the class `PhotonTransferCurveDatase`.
299 """Get output data for PhotonTransferCurveCovAstierDataset from CovFit objects.
303 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
304 The dataset containing information such as the means, variances and exposure times.
306 Dictionary of CovFit objects, with amp names as keys.
308 Dictionary of CovFit objects, with amp names as keys, and 'b=0' in Eq. 20 of Astier+19.
312 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
313 This is the same dataset as the input paramter, however, it has been modified
314 to include extra information such as the mask 1D array, gains, reoudout noise, measured signal,
315 measured variance, modeled variance, a, and b coefficient matrices (see Astier+19) per amplifier.
316 See the class `PhotonTransferCurveDatase`.
318 assert(len(covFits) == len(covFitsNoB))
320 for i, amp
in enumerate(dataset.ampNames):
321 lenInputTimes = len(dataset.rawExpTimes[amp])
323 dataset.ptcFitPars[amp] = [np.nan]
324 dataset.ptcFitParsError[amp] = [np.nan]
325 dataset.ptcFitChiSq[amp] = np.nan
328 fitNoB = covFitsNoB[amp]
331 dataset.covariances[amp] = fit.cov
332 dataset.covariancesModel[amp] = fit.evalCovModel()
333 dataset.covariancesSqrtWeights[amp] = fit.sqrtW
334 dataset.aMatrix[amp] = fit.getA()
335 dataset.bMatrix[amp] = fit.getB()
336 dataset.covariancesModelNoB[amp] = fitNoB.evalCovModel()
337 dataset.aMatrixNoB[amp] = fitNoB.getA()
339 (meanVecFinal, varVecFinal, varVecModel,
340 wc, varMask) = fit.getFitData(0, 0, divideByMu=
False)
343 dataset.gain[amp] = gain
344 dataset.gainErr[amp] = fit.getGainErr()
345 dataset.noise[amp] = np.sqrt(fit.getRon())
346 dataset.noiseErr[amp] = fit.getRonErr()
347 dataset.finalVars[amp] = varVecFinal
348 dataset.finalModelVars[amp] = varVecModel
349 dataset.finalMeans[amp] = meanVecFinal
354 matrixSide = self.config.maximumRangeCovariancesAstier
355 nanMatrix = np.full((matrixSide, matrixSide), np.nan)
356 listNanMatrix = np.full((lenInputTimes, matrixSide, matrixSide), np.nan)
358 dataset.covariances[amp] = listNanMatrix
359 dataset.covariancesModel[amp] = listNanMatrix
360 dataset.covariancesSqrtWeights[amp] = listNanMatrix
361 dataset.aMatrix[amp] = nanMatrix
362 dataset.bMatrix[amp] = nanMatrix
363 dataset.covariancesModelNoB[amp] = listNanMatrix
364 dataset.aMatrixNoB[amp] = nanMatrix
366 dataset.expIdMask[amp] = np.repeat(np.nan, lenInputTimes)
367 dataset.gain[amp] = np.nan
368 dataset.gainErr[amp] = np.nan
369 dataset.noise[amp] = np.nan
370 dataset.noiseErr[amp] = np.nan
371 dataset.finalVars[amp] = np.repeat(np.nan, lenInputTimes)
372 dataset.finalModelVars[amp] = np.repeat(np.nan, lenInputTimes)
373 dataset.finalMeans[amp] = np.repeat(np.nan, lenInputTimes)
378 def _initialParsForPolynomial(order):
380 pars = np.zeros(order, dtype=np.float)
387 def _boundsForPolynomial(initialPars, lowers=[], uppers=[]):
389 lowers = [np.NINF
for p
in initialPars]
391 uppers = [np.inf
for p
in initialPars]
393 return (lowers, uppers)
396 def _boundsForAstier(initialPars, lowers=[], uppers=[]):
398 lowers = [np.NINF
for p
in initialPars]
400 uppers = [np.inf
for p
in initialPars]
401 return (lowers, uppers)
404 def _getInitialGoodPoints(means, variances, maxDeviationPositive, maxDeviationNegative,
405 minMeanRatioTest, minVarPivotSearch):
406 """Return a boolean array to mask bad points.
410 means : `numpy.array`
411 Input array with mean signal values.
412 variances : `numpy.array`
413 Input array with variances at each mean value.
414 maxDeviationPositive : `float`
415 Maximum deviation from being constant for the variance/mean
416 ratio, in the positive direction.
417 maxDeviationNegative : `float`
418 Maximum deviation from being constant for the variance/mean
419 ratio, in the negative direction.
420 minMeanRatioTest : `float`
421 Minimum signal value (in ADU) after which to start examining
423 minVarPivotSearch : `float`
424 Minimum variance point (in ADU^2) after which the pivot point
425 wher the variance starts decreasing should be sought.
429 goodPoints : `numpy.array` [`bool`]
430 Boolean array to select good (`True`) and bad (`False`)
435 A linear function has a constant ratio, so find the median
436 value of the ratios, and exclude the points that deviate
437 from that by more than a factor of maxDeviationPositive/negative.
438 Asymmetric deviations are supported as we expect the PTC to turn
439 down as the flux increases, but sometimes it anomalously turns
440 upwards just before turning over, which ruins the fits, so it
441 is wise to be stricter about restricting positive outliers than
443 Too high and points that are so bad that fit will fail will be included
444 Too low and the non-linear points will be excluded, biasing the NL fit.
445 This function also masks points after the variance starts decreasing.
448 assert(len(means) == len(variances))
449 ratios = [b/a
for (a, b)
in zip(means, variances)]
450 medianRatio = np.nanmedian(ratios)
451 ratioDeviations = [0.0
if a < minMeanRatioTest
else (r/medianRatio)-1
452 for (a, r)
in zip(means, ratios)]
455 maxDeviationPositive =
abs(maxDeviationPositive)
456 maxDeviationNegative = -1. *
abs(maxDeviationNegative)
458 goodPoints = np.array([
True if (r < maxDeviationPositive
and r > maxDeviationNegative)
459 else False for r
in ratioDeviations])
462 pivot = np.where(np.array(np.diff(variances)) < 0)[0]
466 pivot = [p
for p
in pivot
if variances[p] > minVarPivotSearch]
468 pivot = np.min(pivot)
469 goodPoints[pivot+1:len(goodPoints)] =
False
473 def _makeZeroSafe(self, array, substituteValue=1e-9):
475 array = np.array(array)
476 nBad = Counter(np.ravel(array))[0]
480 index, = np.where(array == 0)
482 msg = f
"Found {nBad} zeros in array at elements {index}"
485 array[index] = substituteValue
490 """Fit the photon transfer curve to a polynomial or to Astier+19 approximation.
492 Fit the photon transfer curve with either a polynomial of the order
493 specified in the task config, or using the exponential approximation
494 in Astier+19 (Eq. 16).
496 Sigma clipping is performed iteratively for the fit, as well as an
497 initial clipping of data points that are more than
498 config.initialNonLinearityExclusionThreshold away from lying on a
499 straight line. This other step is necessary because the photon transfer
500 curve turns over catastrophically at very high flux (because saturation
501 drops the variance to ~0) and these far outliers cause the initial fit
502 to fail, meaning the sigma cannot be calculated to perform the
507 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
508 The dataset containing the means, variances and exposure times.
512 dataset: `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
513 This is the same dataset as the input parameter, however, it has been modified
514 to include information such as the fit vectors and the fit parameters. See
515 the class `PhotonTransferCurveDatase`.
520 Raises if dataset.ptcFitType is None or empty.
522 if dataset.ptcFitType:
523 ptcFitType = dataset.ptcFitType
525 raise RuntimeError(
"ptcFitType is None of empty in PTC dataset.")
526 matrixSide = self.config.maximumRangeCovariancesAstier
527 nanMatrix = np.empty((matrixSide, matrixSide))
528 nanMatrix[:] = np.nan
530 for amp
in dataset.ampNames:
531 lenInputTimes = len(dataset.rawExpTimes[amp])
532 listNanMatrix = np.empty((lenInputTimes, matrixSide, matrixSide))
533 listNanMatrix[:] = np.nan
535 dataset.covariancesModel[amp] = listNanMatrix
536 dataset.aMatrix[amp] = nanMatrix
537 dataset.bMatrix[amp] = nanMatrix
538 dataset.covariancesModelNoB[amp] = listNanMatrix
539 dataset.aMatrixNoB[amp] = nanMatrix
541 def errFunc(p, x, y):
542 return ptcFunc(p, x) - y
544 sigmaCutPtcOutliers = self.config.sigmaCutPtcOutliers
545 maxIterationsPtcOutliers = self.config.maxIterationsPtcOutliers
547 for i, ampName
in enumerate(dataset.ampNames):
548 timeVecOriginal = np.ravel(np.array(dataset.rawExpTimes[ampName]))
549 meanVecOriginal = np.ravel(np.array(dataset.rawMeans[ampName]))
550 varVecOriginal = np.ravel(np.array(dataset.rawVars[ampName]))
551 varVecOriginal = self.
_makeZeroSafe_makeZeroSafe(varVecOriginal)
554 self.config.initialNonLinearityExclusionThresholdPositive,
555 self.config.initialNonLinearityExclusionThresholdNegative,
556 self.config.minMeanRatioTest,
557 self.config.minVarPivotSearch)
558 if not (goodPoints.any()):
559 msg = (f
"SERIOUS: All points in goodPoints: {goodPoints} are bad."
560 f
"Setting {ampName} to BAD.")
563 self.
fillBadAmpfillBadAmp(dataset, ptcFitType, ampName)
568 if ptcFitType ==
'EXPAPPROXIMATION':
570 parsIniPtc = [-1e-9, 1.0, 10.]
572 bounds = self.
_boundsForAstier_boundsForAstier(parsIniPtc, lowers=[-1e-4, 0.5, -2000],
573 uppers=[1e-4, 2.5, 2000])
574 if ptcFitType ==
'POLYNOMIAL':
575 ptcFunc = funcPolynomial
581 while count <= maxIterationsPtcOutliers:
585 meanTempVec = meanVecOriginal[mask]
586 varTempVec = varVecOriginal[mask]
587 res = least_squares(errFunc, parsIniPtc, bounds=bounds, args=(meanTempVec, varTempVec))
593 sigResids = (varVecOriginal - ptcFunc(pars, meanVecOriginal))/np.sqrt(varVecOriginal)
594 newMask = np.array([
True if np.abs(r) < sigmaCutPtcOutliers
else False for r
in sigResids])
595 mask = mask & newMask
596 if not (mask.any()
and newMask.any()):
597 msg = (f
"SERIOUS: All points in either mask: {mask} or newMask: {newMask} are bad. "
598 f
"Setting {ampName} to BAD.")
601 self.
fillBadAmpfillBadAmp(dataset, ptcFitType, ampName)
603 nDroppedTotal = Counter(mask)[
False]
604 self.log.
debug(f
"Iteration {count}: discarded {nDroppedTotal} points in total for {ampName}")
607 assert (len(mask) == len(timeVecOriginal) == len(meanVecOriginal) == len(varVecOriginal))
608 if not (mask.any()
and newMask.any()):
610 dataset.expIdMask[ampName] = mask
612 meanVecFinal = meanVecOriginal[mask]
613 varVecFinal = varVecOriginal[mask]
615 if Counter(mask)[
False] > 0:
616 self.log.
info((f
"Number of points discarded in PTC of amplifier {ampName}:"
617 f
" {Counter(mask)[False]} out of {len(meanVecOriginal)}"))
619 if (len(meanVecFinal) < len(parsIniPtc)):
620 msg = (f
"SERIOUS: Not enough data points ({len(meanVecFinal)}) compared to the number of "
621 f
"parameters of the PTC model({len(parsIniPtc)}). Setting {ampName} to BAD.")
624 self.
fillBadAmpfillBadAmp(dataset, ptcFitType, ampName)
627 if self.config.doFitBootstrap:
628 parsFit, parsFitErr, reducedChiSqPtc =
fitBootstrap(parsIniPtc, meanVecFinal,
629 varVecFinal, ptcFunc,
630 weightsY=1./np.sqrt(varVecFinal))
632 parsFit, parsFitErr, reducedChiSqPtc =
fitLeastSq(parsIniPtc, meanVecFinal,
633 varVecFinal, ptcFunc,
634 weightsY=1./np.sqrt(varVecFinal))
635 dataset.ptcFitPars[ampName] = parsFit
636 dataset.ptcFitParsError[ampName] = parsFitErr
637 dataset.ptcFitChiSq[ampName] = reducedChiSqPtc
640 padLength = len(dataset.rawExpTimes[ampName]) - len(varVecFinal)
641 dataset.finalVars[ampName] = np.pad(varVecFinal, (0, padLength),
'constant',
642 constant_values=np.nan)
643 dataset.finalModelVars[ampName] = np.pad(ptcFunc(parsFit, meanVecFinal), (0, padLength),
644 'constant', constant_values=np.nan)
645 dataset.finalMeans[ampName] = np.pad(meanVecFinal, (0, padLength),
'constant',
646 constant_values=np.nan)
647 if ptcFitType ==
'EXPAPPROXIMATION':
649 ptcGainErr = parsFitErr[1]
650 ptcNoise = np.sqrt(np.fabs(parsFit[2]))
651 ptcNoiseErr = 0.5*(parsFitErr[2]/np.fabs(parsFit[2]))*np.sqrt(np.fabs(parsFit[2]))
652 if ptcFitType ==
'POLYNOMIAL':
653 ptcGain = 1./parsFit[1]
654 ptcGainErr = np.fabs(1./parsFit[1])*(parsFitErr[1]/parsFit[1])
655 ptcNoise = np.sqrt(np.fabs(parsFit[0]))*ptcGain
656 ptcNoiseErr = (0.5*(parsFitErr[0]/np.fabs(parsFit[0]))*(np.sqrt(np.fabs(parsFit[0]))))*ptcGain
657 dataset.gain[ampName] = ptcGain
658 dataset.gainErr[ampName] = ptcGainErr
659 dataset.noise[ampName] = ptcNoise
660 dataset.noiseErr[ampName] = ptcNoiseErr
662 if not len(dataset.ptcFitType) == 0:
663 dataset.ptcFitType = ptcFitType
664 if len(dataset.badAmps) == 0:
665 dataset.badAmps = np.repeat(np.nan, len(
list(dataset.rawExpTimes.values())[0]))
670 """Fill the dataset with NaNs if there are not enough good points.
674 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
675 The dataset containing the means, variances and exposure times.
677 Fit a 'POLYNOMIAL' (degree: 'polynomialFitDegree') or
678 'EXPAPPROXIMATION' (Eq. 16 of Astier+19) to the PTC.
682 dataset.badAmps.append(ampName)
683 dataset.expIdMask[ampName] = np.repeat(
False, len(dataset.rawExpTimes[ampName]))
684 dataset.gain[ampName] = np.nan
685 dataset.gainErr[ampName] = np.nan
686 dataset.noise[ampName] = np.nan
687 dataset.noiseErr[ampName] = np.nan
688 dataset.ptcFitPars[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1)
if
689 ptcFitType
in [
"POLYNOMIAL", ]
else np.repeat(np.nan, 3))
690 dataset.ptcFitParsError[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1)
if
691 ptcFitType
in [
"POLYNOMIAL", ]
else np.repeat(np.nan, 3))
692 dataset.ptcFitChiSq[ampName] = np.nan
693 dataset.finalVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
694 dataset.finalModelVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
695 dataset.finalMeans[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
def _boundsForPolynomial(initialPars, lowers=[], uppers=[])
def runQuantum(self, butlerQC, inputRefs, outputRefs)
def fitCovariancesAstier(self, dataset)
def run(self, inputCovariances, camera=None, inputExpList=None)
def _makeZeroSafe(self, array, substituteValue=1e-9)
def _boundsForAstier(initialPars, lowers=[], uppers=[])
def fitPtc(self, dataset)
def _initialParsForPolynomial(order)
def _getInitialGoodPoints(means, variances, maxDeviationPositive, maxDeviationNegative, minMeanRatioTest, minVarPivotSearch)
def getOutputPtcDataCovAstier(self, dataset, covFits, covFitsNoB)
def fillBadAmp(self, dataset, ptcFitType, ampName)
daf::base::PropertyList * list
std::shared_ptr< FrameSet > append(FrameSet const &first, FrameSet const &second)
Construct a FrameSet that performs two transformations in series.
def fitDataFullCovariance(dataset)
def fitBootstrap(initialParams, dataX, dataY, function, weightsY=None, confidenceSigma=1.)
def fitLeastSq(initialParams, dataX, dataY, function, weightsY=None)
Angle abs(Angle const &a)