22 """Calculation of brighter-fatter effect correlations and kernels."""
24 __all__ = [
'BrighterFatterKernelSolveTask',
25 'BrighterFatterKernelSolveConfig']
32 import lsst.pipe.base.connectionTypes
as cT
35 from .utils
import (funcPolynomial, irlsFit)
36 from ._lookupStaticCalibration
import lookupStaticCalibration
40 dimensions=(
"instrument",
"exposure",
"detector")):
43 doc=
"Dummy exposure.",
44 storageClass=
'Exposure',
45 dimensions=(
"instrument",
"exposure",
"detector"),
49 camera = cT.PrerequisiteInput(
51 doc=
"Camera associated with this data.",
52 storageClass=
"Camera",
53 dimensions=(
"instrument", ),
55 lookupFunction=lookupStaticCalibration,
57 inputPtc = cT.PrerequisiteInput(
59 doc=
"Photon transfer curve dataset.",
60 storageClass=
"PhotonTransferCurveDataset",
61 dimensions=(
"instrument",
"detector"),
65 outputBFK = cT.Output(
66 name=
"brighterFatterKernel",
67 doc=
"Output measured brighter-fatter kernel.",
68 storageClass=
"BrighterFatterKernel",
69 dimensions=(
"instrument",
"detector"),
75 pipelineConnections=BrighterFatterKernelSolveConnections):
76 level = pexConfig.ChoiceField(
77 doc=
"The level at which to calculate the brighter-fatter kernels",
81 "AMP":
"Every amplifier treated separately",
82 "DETECTOR":
"One kernel per detector",
85 ignoreAmpsForAveraging = pexConfig.ListField(
87 doc=
"List of amp names to ignore when averaging the amplifier kernels into the detector"
88 " kernel. Only relevant for level = DETECTOR",
91 xcorrCheckRejectLevel = pexConfig.Field(
93 doc=
"Rejection level for the sum of the input cross-correlations. Arrays which "
94 "sum to greater than this are discarded before the clipped mean is calculated.",
97 nSigmaClip = pexConfig.Field(
99 doc=
"Number of sigma to clip when calculating means for the cross-correlation",
102 forceZeroSum = pexConfig.Field(
104 doc=
"Force the correlation matrix to have zero sum by adjusting the (0,0) value?",
107 useAmatrix = pexConfig.Field(
109 doc=
"Use the PTC 'a' matrix (Astier et al. 2019 equation 20) "
110 "instead of the average of measured covariances?",
114 maxIterSuccessiveOverRelaxation = pexConfig.Field(
116 doc=
"The maximum number of iterations allowed for the successive over-relaxation method",
119 eLevelSuccessiveOverRelaxation = pexConfig.Field(
121 doc=
"The target residual error for the successive over-relaxation method",
125 correlationQuadraticFit = pexConfig.Field(
127 doc=
"Use a quadratic fit to find the correlations instead of simple averaging?",
130 correlationModelRadius = pexConfig.Field(
132 doc=
"Build a model of the correlation coefficients for radii larger than this value in pixels?",
135 correlationModelSlope = pexConfig.Field(
137 doc=
"Slope of the correlation model for radii larger than correlationModelRadius",
143 """Measure appropriate Brighter-Fatter Kernel from the PTC dataset.
146 ConfigClass = BrighterFatterKernelSolveConfig
147 _DefaultName =
'cpBfkMeasure'
150 """Ensure that the input and output dimensions are passed along.
154 butlerQC : `lsst.daf.butler.butlerQuantumContext.ButlerQuantumContext`
155 Butler to operate on.
156 inputRefs : `lsst.pipe.base.connections.InputQuantizedConnection`
157 Input data refs to load.
158 ouptutRefs : `lsst.pipe.base.connections.OutputQuantizedConnection`
159 Output data refs to persist.
161 inputs = butlerQC.get(inputRefs)
164 inputs[
'inputDims'] = inputRefs.inputPtc.dataId.byName()
166 outputs = self.
runrun(**inputs)
167 butlerQC.put(outputs, outputRefs)
169 def run(self, inputPtc, dummy, camera, inputDims):
170 """Combine covariance information from PTC into brighter-fatter kernels.
174 inputPtc : `lsst.ip.isr.PhotonTransferCurveDataset`
175 PTC data containing per-amplifier covariance measurements.
176 dummy : `lsst.afw.image.Exposure`
177 The exposure used to select the appropriate PTC dataset.
178 In almost all circumstances, one of the input exposures
179 used to generate the PTC dataset is the best option.
180 camera : `lsst.afw.cameraGeom.Camera`
181 Camera to use for camera geometry information.
182 inputDims : `lsst.daf.butler.DataCoordinate` or `dict`
183 DataIds to use to populate the output calibration.
187 results : `lsst.pipe.base.Struct`
188 The resulst struct containing:
190 ``outputBfk`` : `lsst.ip.isr.BrighterFatterKernel`
191 Resulting Brighter-Fatter Kernel.
194 self.log.
warn(
"No dummy exposure found.")
196 detector = camera[inputDims[
'detector']]
197 detName = detector.getName()
199 if self.config.level ==
'DETECTOR':
200 detectorCorrList =
list()
203 bfk.means = inputPtc.finalMeans
204 bfk.variances = inputPtc.finalVars
208 bfk.rawXcorrs = inputPtc.covariances
209 bfk.badAmps = inputPtc.badAmps
210 bfk.shape = (inputPtc.covMatrixSide*2 + 1, inputPtc.covMatrixSide*2 + 1)
211 bfk.gain = inputPtc.gain
212 bfk.noise = inputPtc.noise
213 bfk.meanXcorrs = dict()
217 ampName = amp.getName()
218 mask = np.array(inputPtc.expIdMask[ampName], dtype=bool)
220 gain = bfk.gain[ampName]
221 fluxes = np.array(bfk.means[ampName])[mask]
222 variances = np.array(bfk.variances[ampName])[mask]
223 xCorrList = [np.array(xcorr)
for xcorr
in bfk.rawXcorrs[ampName]]
224 xCorrList = np.array(xCorrList)[mask]
228 self.log.
warn(
"Impossible gain recieved from PTC for %s: %f. Skipping amplifier.",
230 bfk.meanXcorrs[ampName] = np.zeros(bfk.shape)
231 bfk.ampKernels[ampName] = np.zeros(bfk.shape)
232 bfk.valid[ampName] =
False
235 fluxes = np.array([flux*gain
for flux
in fluxes])
236 variances = np.array([variance*gain*gain
for variance
in variances])
239 scaledCorrList =
list()
240 for xcorrNum, (xcorr, flux, var)
in enumerate(zip(xCorrList, fluxes, variances), 1):
241 q = np.array(xcorr) * gain * gain
243 self.log.
info(
"Amp: %s %d/%d Flux: %f Var: %f Q(0,0): %g Q(1,0): %g Q(0,1): %g",
244 ampName, xcorrNum, len(xCorrList), flux, var, q[0][0], q[1][0], q[0][1])
249 q[0][0] -= 2.0*(flux)
252 self.log.
warn(
"Amp: %s %d skipped due to value of (variance-mean)=%f",
253 ampName, xcorrNum, q[0][0])
260 xcorrCheck = np.abs(np.sum(scaled))/np.sum(np.abs(scaled))
261 if (xcorrCheck > self.config.xcorrCheckRejectLevel)
or not (np.isfinite(xcorrCheck)):
262 self.log.
warn(
"Amp: %s %d skipped due to value of triangle-inequality sum %f",
263 ampName, xcorrNum, xcorrCheck)
266 scaledCorrList.append(scaled)
267 self.log.
info(
"Amp: %s %d/%d Final: %g XcorrCheck: %f",
268 ampName, xcorrNum, len(xCorrList), q[0][0], xcorrCheck)
270 if len(scaledCorrList) == 0:
271 self.log.
warn(
"Amp: %s All inputs rejected for amp!", ampName)
272 bfk.meanXcorrs[ampName] = np.zeros(bfk.shape)
273 bfk.ampKernels[ampName] = np.zeros(bfk.shape)
274 bfk.valid[ampName] =
False
277 if self.config.useAmatrix:
279 preKernel = np.pad(self.
_tileArray_tileArray(np.array(inputPtc.aMatrix[ampName])), ((1, 1)))
280 elif self.config.correlationQuadraticFit:
282 preKernel = self.
quadraticCorrelationsquadraticCorrelations(scaledCorrList, fluxes, f
"Amp: {ampName}")
287 center = int((bfk.shape[0] - 1) / 2)
289 if self.config.forceZeroSum:
290 totalSum = np.sum(preKernel)
292 if self.config.correlationModelRadius < (preKernel.shape[0] - 1) / 2:
294 preFactor = np.sqrt(preKernel[center, center + 1] * preKernel[center + 1, center])
295 slopeFactor = 2.0 * np.abs(self.config.correlationModelSlope)
296 totalSum += 2.0*np.pi*(preFactor / (slopeFactor*(center + 0.5))**slopeFactor)
298 preKernel[center, center] -= totalSum
299 self.log.
info(
"%s Zero-Sum Scale: %g", ampName, totalSum)
301 finalSum = np.sum(preKernel)
302 bfk.meanXcorrs[ampName] = preKernel
305 bfk.ampKernels[ampName] = postKernel
306 if self.config.level ==
'DETECTOR':
307 detectorCorrList.extend(scaledCorrList)
308 bfk.valid[ampName] =
True
309 self.log.
info(
"Amp: %s Sum: %g Center Info Pre: %g Post: %g",
310 ampName, finalSum, preKernel[center, center], postKernel[center, center])
313 if self.config.level ==
'DETECTOR':
314 preKernel = self.
averageCorrelationsaverageCorrelations(detectorCorrList, f
"Det: {detName}")
315 finalSum = np.sum(preKernel)
316 center = int((bfk.shape[0] - 1) / 2)
319 bfk.detKernels[detName] = postKernel
320 self.log.
info(
"Det: %s Sum: %g Center Info Pre: %g Post: %g",
321 detName, finalSum, preKernel[center, center], postKernel[center, center])
323 return pipeBase.Struct(
328 """Average input correlations.
332 xCorrList : `list` [`numpy.array`]
333 List of cross-correlations.
335 Name for log messages.
339 meanXcorr : `numpy.array`
340 The averaged cross-correlation.
342 meanXcorr = np.zeros_like(xCorrList[0])
343 xCorrList = np.transpose(xCorrList)
345 sctrl.setNumSigmaClip(self.config.nSigmaClip)
346 for i
in range(np.shape(meanXcorr)[0]):
347 for j
in range(np.shape(meanXcorr)[1]):
349 afwMath.MEANCLIP, sctrl).getValue()
352 meanXcorr = np.pad(meanXcorr, ((1, 1)))
357 """Measure a quadratic correlation model.
361 xCorrList : `list` [`numpy.array`]
362 List of cross-correlations.
363 fluxList : `numpy.array`
364 Associated list of fluxes.
366 Name for log messages.
370 meanXcorr : `numpy.array`
371 The averaged cross-correlation.
373 meanXcorr = np.zeros_like(xCorrList[0])
374 fluxList = np.square(fluxList)
375 xCorrList = np.array(xCorrList)
377 for i
in range(np.shape(meanXcorr)[0]):
378 for j
in range(np.shape(meanXcorr)[1]):
382 linearFit, linearFitErr, chiSq, weights =
irlsFit([0.0, 1e-4], fluxList,
383 xCorrList[:, j, i], funcPolynomial)
384 meanXcorr[i, j] = linearFit[1]
385 self.log.
debug(
"Quad fit meanXcorr[%d,%d] = %g", i, j, linearFit[1])
388 meanXcorr = np.pad(meanXcorr, ((1, 1)))
393 def _tileArray(in_array):
394 """Given an input quarter-image, tile/mirror it and return full image.
396 Given a square input of side-length n, of the form
398 input = array([[1, 2, 3],
402 return an array of size 2n-1 as
404 output = array([[ 9, 8, 7, 8, 9],
413 The square input quarter-array
418 The full, tiled array
420 assert(in_array.shape[0] == in_array.shape[1])
421 length = in_array.shape[0] - 1
422 output = np.zeros((2*length + 1, 2*length + 1))
424 for i
in range(length + 1):
425 for j
in range(length + 1):
426 output[i + length, j + length] = in_array[i, j]
427 output[-i + length, j + length] = in_array[i, j]
428 output[i + length, -j + length] = in_array[i, j]
429 output[-i + length, -j + length] = in_array[i, j]
433 """An implementation of the successive over relaxation (SOR) method.
435 A numerical method for solving a system of linear equations
436 with faster convergence than the Gauss-Seidel method.
440 source : `numpy.ndarray`
442 maxIter : `int`, optional
443 Maximum number of iterations to attempt before aborting.
444 eLevel : `float`, optional
445 The target error level at which we deem convergence to have
450 output : `numpy.ndarray`
454 maxIter = self.config.maxIterSuccessiveOverRelaxation
456 eLevel = self.config.eLevelSuccessiveOverRelaxation
458 assert source.shape[0] == source.shape[1],
"Input array must be square"
460 func = np.zeros([source.shape[0] + 2, source.shape[1] + 2])
461 resid = np.zeros([source.shape[0] + 2, source.shape[1] + 2])
462 rhoSpe = np.cos(np.pi/source.shape[0])
465 for i
in range(1, func.shape[0] - 1):
466 for j
in range(1, func.shape[1] - 1):
467 resid[i, j] = (func[i, j - 1] + func[i, j + 1] + func[i - 1, j]
468 + func[i + 1, j] - 4*func[i, j] - source[i - 1, j - 1])
469 inError = np.sum(np.abs(resid))
477 while nIter < maxIter*2:
480 for i
in range(1, func.shape[0] - 1, 2):
481 for j
in range(1, func.shape[1] - 1, 2):
482 resid[i, j] = float(func[i, j-1] + func[i, j + 1] + func[i - 1, j]
483 + func[i + 1, j] - 4.0*func[i, j] - dx*dx*source[i - 1, j - 1])
484 func[i, j] += omega*resid[i, j]*.25
485 for i
in range(2, func.shape[0] - 1, 2):
486 for j
in range(2, func.shape[1] - 1, 2):
487 resid[i, j] = float(func[i, j - 1] + func[i, j + 1] + func[i - 1, j]
488 + func[i + 1, j] - 4.0*func[i, j] - dx*dx*source[i - 1, j - 1])
489 func[i, j] += omega*resid[i, j]*.25
491 for i
in range(1, func.shape[0] - 1, 2):
492 for j
in range(2, func.shape[1] - 1, 2):
493 resid[i, j] = float(func[i, j - 1] + func[i, j + 1] + func[i - 1, j]
494 + func[i + 1, j] - 4.0*func[i, j] - dx*dx*source[i - 1, j - 1])
495 func[i, j] += omega*resid[i, j]*.25
496 for i
in range(2, func.shape[0] - 1, 2):
497 for j
in range(1, func.shape[1] - 1, 2):
498 resid[i, j] = float(func[i, j - 1] + func[i, j + 1] + func[i - 1, j]
499 + func[i + 1, j] - 4.0*func[i, j] - dx*dx*source[i - 1, j - 1])
500 func[i, j] += omega*resid[i, j]*.25
501 outError = np.sum(np.abs(resid))
502 if outError < inError*eLevel:
505 omega = 1.0/(1 - rhoSpe*rhoSpe/2.0)
507 omega = 1.0/(1 - rhoSpe*rhoSpe*omega/4.0)
510 if nIter >= maxIter*2:
511 self.log.
warn(
"Failure: SuccessiveOverRelaxation did not converge in %s iterations."
512 "\noutError: %s, inError: %s," % (nIter//2, outError, inError*eLevel))
514 self.log.
info(
"Success: SuccessiveOverRelaxation converged in %s iterations."
515 "\noutError: %s, inError: %s", nIter//2, outError, inError*eLevel)
516 return func[1: -1, 1: -1]
Pass parameters to a Statistics object.
def runQuantum(self, butlerQC, inputRefs, outputRefs)
def averageCorrelations(self, xCorrList, name)
def run(self, inputPtc, dummy, camera, inputDims)
def quadraticCorrelations(self, xCorrList, fluxList, name)
def successiveOverRelax(self, source, maxIter=None, eLevel=None)
daf::base::PropertyList * list
Statistics makeStatistics(lsst::afw::image::Image< Pixel > const &img, lsst::afw::image::Mask< image::MaskPixel > const &msk, int const flags, StatisticsControl const &sctrl=StatisticsControl())
Handle a watered-down front-end to the constructor (no variance)
def irlsFit(initialParams, dataX, dataY, function, weightsY=None, weightType='Cauchy')