493 def fitPtc(self, dataset):
494 """Fit the photon transfer curve to a polynomial or to Astier+19 approximation.
496 Fit the photon transfer curve with either a polynomial of the order
497 specified in the task config, or using the exponential approximation
498 in Astier+19 (Eq. 16).
500 Sigma clipping is performed iteratively for the fit, as well as an
501 initial clipping of data points that are more than
502 config.initialNonLinearityExclusionThreshold away from lying on a
503 straight line. This other step is necessary because the photon transfer
504 curve turns over catastrophically at very high flux (because saturation
505 drops the variance to ~0) and these far outliers cause the initial fit
506 to fail, meaning the sigma cannot be calculated to perform the
511 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
512 The dataset containing the means, variances and exposure times.
516 dataset: `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
517 This is the same dataset as the input parameter, however, it has been modified
518 to include information such as the fit vectors and the fit parameters. See
519 the class `PhotonTransferCurveDatase`.
524 Raises if dataset.ptcFitType is None or empty.
526 if dataset.ptcFitType:
527 ptcFitType = dataset.ptcFitType
529 raise RuntimeError(
"ptcFitType is None of empty in PTC dataset.")
530 matrixSide = self.config.maximumRangeCovariancesAstier
531 nanMatrix = np.empty((matrixSide, matrixSide))
532 nanMatrix[:] = np.nan
534 for amp
in dataset.ampNames:
535 lenInputTimes = len(dataset.rawExpTimes[amp])
536 listNanMatrix = np.empty((lenInputTimes, matrixSide, matrixSide))
537 listNanMatrix[:] = np.nan
539 dataset.covariancesModel[amp] = listNanMatrix
540 dataset.aMatrix[amp] = nanMatrix
541 dataset.bMatrix[amp] = nanMatrix
542 dataset.covariancesModelNoB[amp] = listNanMatrix
543 dataset.aMatrixNoB[amp] = nanMatrix
545 def errFunc(p, x, y):
546 return ptcFunc(p, x) - y
548 sigmaCutPtcOutliers = self.config.sigmaCutPtcOutliers
549 maxIterationsPtcOutliers = self.config.maxIterationsPtcOutliers
551 for i, ampName
in enumerate(dataset.ampNames):
552 timeVecOriginal = np.ravel(np.array(dataset.rawExpTimes[ampName]))
553 meanVecOriginal = np.ravel(np.array(dataset.rawMeans[ampName]))
554 varVecOriginal = np.ravel(np.array(dataset.rawVars[ampName]))
555 varVecOriginal = self._makeZeroSafe(varVecOriginal)
557 goodPoints = self._getInitialGoodPoints(meanVecOriginal, varVecOriginal,
558 self.config.initialNonLinearityExclusionThresholdPositive,
559 self.config.initialNonLinearityExclusionThresholdNegative,
560 self.config.minMeanRatioTest,
561 self.config.minVarPivotSearch)
562 if not (goodPoints.any()):
563 msg = (f
"SERIOUS: All points in goodPoints: {goodPoints} are bad."
564 f
"Setting {ampName} to BAD.")
567 self.fillBadAmp(dataset, ptcFitType, ampName)
572 if ptcFitType ==
'EXPAPPROXIMATION':
574 parsIniPtc = [-1e-9, 1.0, 10.]
576 bounds = self._boundsForAstier(parsIniPtc, lowers=[-1e-4, 0.5, -2000],
577 uppers=[1e-4, 2.5, 2000])
578 if ptcFitType ==
'POLYNOMIAL':
579 ptcFunc = funcPolynomial
580 parsIniPtc = self._initialParsForPolynomial(self.config.polynomialFitDegree + 1)
581 bounds = self._boundsForPolynomial(parsIniPtc)
585 while count <= maxIterationsPtcOutliers:
589 meanTempVec = meanVecOriginal[mask]
590 varTempVec = varVecOriginal[mask]
591 res = least_squares(errFunc, parsIniPtc, bounds=bounds, args=(meanTempVec, varTempVec))
597 sigResids = (varVecOriginal - ptcFunc(pars, meanVecOriginal))/np.sqrt(varVecOriginal)
598 newMask = np.array([
True if np.abs(r) < sigmaCutPtcOutliers
else False for r
in sigResids])
599 mask = mask & newMask
600 if not (mask.any()
and newMask.any()):
601 msg = (f
"SERIOUS: All points in either mask: {mask} or newMask: {newMask} are bad. "
602 f
"Setting {ampName} to BAD.")
605 self.fillBadAmp(dataset, ptcFitType, ampName)
607 nDroppedTotal = Counter(mask)[
False]
608 self.log.
debug(f
"Iteration {count}: discarded {nDroppedTotal} points in total for {ampName}")
611 assert (len(mask) == len(timeVecOriginal) == len(meanVecOriginal) == len(varVecOriginal))
612 if not (mask.any()
and newMask.any()):
614 dataset.expIdMask[ampName] = np.array(dataset.expIdMask[ampName])
616 if len(dataset.expIdMask[ampName]):
617 dataset.expIdMask[ampName] &= mask
619 dataset.expIdMask[ampName] = mask
621 meanVecFinal = meanVecOriginal[mask]
622 varVecFinal = varVecOriginal[mask]
624 if Counter(mask)[
False] > 0:
625 self.log.
info((f
"Number of points discarded in PTC of amplifier {ampName}:"
626 f
" {Counter(mask)[False]} out of {len(meanVecOriginal)}"))
628 if (len(meanVecFinal) < len(parsIniPtc)):
629 msg = (f
"SERIOUS: Not enough data points ({len(meanVecFinal)}) compared to the number of "
630 f
"parameters of the PTC model({len(parsIniPtc)}). Setting {ampName} to BAD.")
633 self.fillBadAmp(dataset, ptcFitType, ampName)
636 if self.config.doFitBootstrap:
637 parsFit, parsFitErr, reducedChiSqPtc =
fitBootstrap(parsIniPtc, meanVecFinal,
638 varVecFinal, ptcFunc,
639 weightsY=1./np.sqrt(varVecFinal))
641 parsFit, parsFitErr, reducedChiSqPtc =
fitLeastSq(parsIniPtc, meanVecFinal,
642 varVecFinal, ptcFunc,
643 weightsY=1./np.sqrt(varVecFinal))
644 dataset.ptcFitPars[ampName] = parsFit
645 dataset.ptcFitParsError[ampName] = parsFitErr
646 dataset.ptcFitChiSq[ampName] = reducedChiSqPtc
649 padLength = len(dataset.rawExpTimes[ampName]) - len(varVecFinal)
650 dataset.finalVars[ampName] = np.pad(varVecFinal, (0, padLength),
'constant',
651 constant_values=np.nan)
652 dataset.finalModelVars[ampName] = np.pad(ptcFunc(parsFit, meanVecFinal), (0, padLength),
653 'constant', constant_values=np.nan)
654 dataset.finalMeans[ampName] = np.pad(meanVecFinal, (0, padLength),
'constant',
655 constant_values=np.nan)
656 if ptcFitType ==
'EXPAPPROXIMATION':
658 ptcGainErr = parsFitErr[1]
659 ptcNoise = np.sqrt(np.fabs(parsFit[2]))
660 ptcNoiseErr = 0.5*(parsFitErr[2]/np.fabs(parsFit[2]))*np.sqrt(np.fabs(parsFit[2]))
661 if ptcFitType ==
'POLYNOMIAL':
662 ptcGain = 1./parsFit[1]
663 ptcGainErr = np.fabs(1./parsFit[1])*(parsFitErr[1]/parsFit[1])
664 ptcNoise = np.sqrt(np.fabs(parsFit[0]))*ptcGain
665 ptcNoiseErr = (0.5*(parsFitErr[0]/np.fabs(parsFit[0]))*(np.sqrt(np.fabs(parsFit[0]))))*ptcGain
666 dataset.gain[ampName] = ptcGain
667 dataset.gainErr[ampName] = ptcGainErr
668 dataset.noise[ampName] = ptcNoise
669 dataset.noiseErr[ampName] = ptcNoiseErr
671 if not len(dataset.ptcFitType) == 0:
672 dataset.ptcFitType = ptcFitType
673 if len(dataset.badAmps) == 0:
674 dataset.badAmps = np.repeat(np.nan, len(
list(dataset.rawExpTimes.values())[0]))
daf::base::PropertyList * list
def fitBootstrap(initialParams, dataX, dataY, function, weightsY=None, confidenceSigma=1.)
def fitLeastSq(initialParams, dataX, dataY, function, weightsY=None)