942 def fitPtc(self, dataset, ptcFitType):
943 """Fit the photon transfer curve to a polynimial or to Astier+19 approximation.
945 Fit the photon transfer curve with either a polynomial of the order
946 specified in the task config, or using the Astier approximation.
948 Sigma clipping is performed iteratively for the fit, as well as an
949 initial clipping of data points that are more than
950 config.initialNonLinearityExclusionThreshold away from lying on a
951 straight line. This other step is necessary because the photon transfer
952 curve turns over catastrophically at very high flux (because saturation
953 drops the variance to ~0) and these far outliers cause the initial fit
954 to fail, meaning the sigma cannot be calculated to perform the
959 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
960 The dataset containing the means, variances and exposure times
963 Fit a 'POLYNOMIAL' (degree: 'polynomialFitDegree') or
964 'EXPAPPROXIMATION' (Eq. 16 of Astier+19) to the PTC
968 dataset: `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
969 This is the same dataset as the input paramter, however, it has been modified
970 to include information such as the fit vectors and the fit parameters. See
971 the class `PhotonTransferCurveDatase`.
974 matrixSide = self.config.maximumRangeCovariancesAstier
975 nanMatrix = np.empty((matrixSide, matrixSide))
976 nanMatrix[:] = np.nan
978 for amp
in dataset.ampNames:
979 lenInputTimes = len(dataset.rawExpTimes[amp])
980 listNanMatrix = np.empty((lenInputTimes, matrixSide, matrixSide))
981 listNanMatrix[:] = np.nan
983 dataset.covariances[amp] = listNanMatrix
984 dataset.covariancesModel[amp] = listNanMatrix
985 dataset.covariancesSqrtWeights[amp] = listNanMatrix
986 dataset.aMatrix[amp] = nanMatrix
987 dataset.bMatrix[amp] = nanMatrix
988 dataset.covariancesNoB[amp] = listNanMatrix
989 dataset.covariancesModelNoB[amp] = listNanMatrix
990 dataset.covariancesSqrtWeightsNoB[amp] = listNanMatrix
991 dataset.aMatrixNoB[amp] = nanMatrix
993 def errFunc(p, x, y):
994 return ptcFunc(p, x) - y
996 sigmaCutPtcOutliers = self.config.sigmaCutPtcOutliers
997 maxIterationsPtcOutliers = self.config.maxIterationsPtcOutliers
999 for i, ampName
in enumerate(dataset.ampNames):
1000 timeVecOriginal = np.array(dataset.rawExpTimes[ampName])
1001 meanVecOriginal = np.array(dataset.rawMeans[ampName])
1002 varVecOriginal = np.array(dataset.rawVars[ampName])
1003 varVecOriginal = self._makeZeroSafe(varVecOriginal)
1005 goodPoints = self._getInitialGoodPoints(meanVecOriginal, varVecOriginal,
1006 self.config.initialNonLinearityExclusionThresholdPositive,
1007 self.config.initialNonLinearityExclusionThresholdNegative,
1008 self.config.minMeanRatioTest,
1009 self.config.minVarPivotSearch)
1010 if not (goodPoints.any()):
1011 msg = (f
"\nSERIOUS: All points in goodPoints: {goodPoints} are bad."
1012 f
"Setting {ampName} to BAD.")
1016 self.fillBadAmp(dataset, ptcFitType, ampName)
1021 if ptcFitType ==
'EXPAPPROXIMATION':
1022 ptcFunc = funcAstier
1023 parsIniPtc = [-1e-9, 1.0, 10.]
1025 bounds = self._boundsForAstier(parsIniPtc, lowers=[-1e-4, 0.5, -2000],
1026 uppers=[1e-4, 2.5, 2000])
1027 if ptcFitType ==
'POLYNOMIAL':
1028 ptcFunc = funcPolynomial
1029 parsIniPtc = self._initialParsForPolynomial(self.config.polynomialFitDegree + 1)
1030 bounds = self._boundsForPolynomial(parsIniPtc)
1034 while count <= maxIterationsPtcOutliers:
1038 meanTempVec = meanVecOriginal[mask]
1039 varTempVec = varVecOriginal[mask]
1040 res = least_squares(errFunc, parsIniPtc, bounds=bounds, args=(meanTempVec, varTempVec))
1046 sigResids = (varVecOriginal - ptcFunc(pars, meanVecOriginal))/np.sqrt(varVecOriginal)
1047 newMask = np.array([
True if np.abs(r) < sigmaCutPtcOutliers
else False for r
in sigResids])
1048 mask = goodPoints & newMask
1049 if not (mask.any()
and newMask.any()):
1050 msg = (f
"\nSERIOUS: All points in either mask: {mask} or newMask: {newMask} are bad. "
1051 f
"Setting {ampName} to BAD.")
1055 self.fillBadAmp(dataset, ptcFitType, ampName)
1057 nDroppedTotal = Counter(mask)[
False]
1058 self.log.
debug(f
"Iteration {count}: discarded {nDroppedTotal} points in total for {ampName}")
1061 assert (len(mask) == len(timeVecOriginal) == len(meanVecOriginal) == len(varVecOriginal))
1063 if not (mask.any()
and newMask.any()):
1065 dataset.expIdMask[ampName] = mask
1067 meanVecFinal = meanVecOriginal[mask]
1068 varVecFinal = varVecOriginal[mask]
1070 if Counter(mask)[
False] > 0:
1071 self.log.
info((f
"Number of points discarded in PTC of amplifier {ampName}:" +
1072 f
" {Counter(mask)[False]} out of {len(meanVecOriginal)}"))
1074 if (len(meanVecFinal) < len(parsIniPtc)):
1075 msg = (f
"\nSERIOUS: Not enough data points ({len(meanVecFinal)}) compared to the number of"
1076 f
"parameters of the PTC model({len(parsIniPtc)}). Setting {ampName} to BAD.")
1080 self.fillBadAmp(dataset, ptcFitType, ampName)
1084 if self.config.doFitBootstrap:
1085 parsFit, parsFitErr, reducedChiSqPtc =
fitBootstrap(parsIniPtc, meanVecFinal,
1086 varVecFinal, ptcFunc,
1087 weightsY=1./np.sqrt(varVecFinal))
1089 parsFit, parsFitErr, reducedChiSqPtc =
fitLeastSq(parsIniPtc, meanVecFinal,
1090 varVecFinal, ptcFunc,
1091 weightsY=1./np.sqrt(varVecFinal))
1092 dataset.ptcFitPars[ampName] = parsFit
1093 dataset.ptcFitParsError[ampName] = parsFitErr
1094 dataset.ptcFitChiSq[ampName] = reducedChiSqPtc
1097 padLength = len(dataset.rawExpTimes[ampName]) - len(varVecFinal)
1098 dataset.finalVars[ampName] = np.pad(varVecFinal, (0, padLength),
'constant',
1099 constant_values=np.nan)
1100 dataset.finalModelVars[ampName] = np.pad(ptcFunc(parsFit, meanVecFinal), (0, padLength),
1101 'constant', constant_values=np.nan)
1102 dataset.finalMeans[ampName] = np.pad(meanVecFinal, (0, padLength),
'constant',
1103 constant_values=np.nan)
1105 if ptcFitType ==
'EXPAPPROXIMATION':
1106 ptcGain = parsFit[1]
1107 ptcGainErr = parsFitErr[1]
1108 ptcNoise = np.sqrt(np.fabs(parsFit[2]))
1109 ptcNoiseErr = 0.5*(parsFitErr[2]/np.fabs(parsFit[2]))*np.sqrt(np.fabs(parsFit[2]))
1110 if ptcFitType ==
'POLYNOMIAL':
1111 ptcGain = 1./parsFit[1]
1112 ptcGainErr = np.fabs(1./parsFit[1])*(parsFitErr[1]/parsFit[1])
1113 ptcNoise = np.sqrt(np.fabs(parsFit[0]))*ptcGain
1114 ptcNoiseErr = (0.5*(parsFitErr[0]/np.fabs(parsFit[0]))*(np.sqrt(np.fabs(parsFit[0]))))*ptcGain
1115 dataset.gain[ampName] = ptcGain
1116 dataset.gainErr[ampName] = ptcGainErr
1117 dataset.noise[ampName] = ptcNoise
1118 dataset.noiseErr[ampName] = ptcNoiseErr
1119 if not len(dataset.ptcFitType) == 0:
1120 dataset.ptcFitType = ptcFitType
1121 if len(dataset.badAmps) == 0:
1122 dataset.badAmps = np.repeat(np.nan, len(
list(dataset.rawExpTimes.values())[0]))