LSSTApplications  20.0.0
LSSTDataManagementBasePackage
makeBrighterFatterKernel.py
Go to the documentation of this file.
1 # This file is part of cp_pipe.
2 #
3 # Developed for the LSST Data Management System.
4 # This product includes software developed by the LSST Project
5 # (https://www.lsst.org).
6 # See the COPYRIGHT file at the top-level directory of this distribution
7 # for details of code ownership.
8 #
9 # This program is free software: you can redistribute it and/or modify
10 # it under the terms of the GNU General Public License as published by
11 # the Free Software Foundation, either version 3 of the License, or
12 # (at your option) any later version.
13 #
14 # This program is distributed in the hope that it will be useful,
15 # but WITHOUT ANY WARRANTY; without even the implied warranty of
16 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 # GNU General Public License for more details.
18 #
19 # You should have received a copy of the GNU General Public License
20 # along with this program. If not, see <https://www.gnu.org/licenses/>.
21 #
22 """Calculation of brighter-fatter effect correlations and kernels."""
23 
24 __all__ = ['MakeBrighterFatterKernelTaskConfig',
25  'MakeBrighterFatterKernelTask',
26  'calcBiasCorr']
27 
28 import os
29 import copy
30 from scipy import stats
31 import numpy as np
32 import matplotlib.pyplot as plt
33 # the following import is required for 3d projection
34 from mpl_toolkits.mplot3d import axes3d # noqa: F401
35 from matplotlib.backends.backend_pdf import PdfPages
36 from dataclasses import dataclass
37 
38 import lsstDebug
39 import lsst.afw.image as afwImage
40 import lsst.afw.math as afwMath
41 import lsst.afw.display as afwDisp
42 from lsst.ip.isr import IsrTask
43 import lsst.log as lsstLog
44 import lsst.pex.config as pexConfig
45 import lsst.pipe.base as pipeBase
46 from .utils import PairedVisitListTaskRunner, checkExpLengthEqual
47 import lsst.daf.persistence.butlerExceptions as butlerExceptions
48 from lsst.cp.pipe.ptc import (MeasurePhotonTransferCurveTaskConfig, MeasurePhotonTransferCurveTask,
49  PhotonTransferCurveDataset)
50 
51 
52 class MakeBrighterFatterKernelTaskConfig(pexConfig.Config):
53  """Config class for bright-fatter effect coefficient calculation."""
54 
55  isr = pexConfig.ConfigurableField(
56  target=IsrTask,
57  doc="""Task to perform instrumental signature removal""",
58  )
59  isrMandatorySteps = pexConfig.ListField(
60  dtype=str,
61  doc="isr operations that must be performed for valid results. Raises if any of these are False",
62  default=['doAssembleCcd']
63  )
64  isrForbiddenSteps = pexConfig.ListField(
65  dtype=str,
66  doc="isr operations that must NOT be performed for valid results. Raises if any of these are True",
67  default=['doFlat', 'doFringe', 'doBrighterFatter', 'doUseOpticsTransmission',
68  'doUseFilterTransmission', 'doUseSensorTransmission', 'doUseAtmosphereTransmission']
69  )
70  isrDesirableSteps = pexConfig.ListField(
71  dtype=str,
72  doc="isr operations that it is advisable to perform, but are not mission-critical." +
73  " WARNs are logged for any of these found to be False.",
74  default=['doBias', 'doDark', 'doCrosstalk', 'doDefect', 'doLinearize']
75  )
76  doCalcGains = pexConfig.Field(
77  dtype=bool,
78  doc="Measure the per-amplifier gains (using the photon transfer curve method)?",
79  default=True,
80  )
81  doPlotPtcs = pexConfig.Field(
82  dtype=bool,
83  doc="Plot the PTCs and butler.put() them as defined by the plotBrighterFatterPtc template",
84  default=False,
85  )
86  forceZeroSum = pexConfig.Field(
87  dtype=bool,
88  doc="Force the correlation matrix to have zero sum by adjusting the (0,0) value?",
89  default=False,
90  )
91  correlationQuadraticFit = pexConfig.Field(
92  dtype=bool,
93  doc="Use a quadratic fit to find the correlations instead of simple averaging?",
94  default=False,
95  )
96  correlationModelRadius = pexConfig.Field(
97  dtype=int,
98  doc="Build a model of the correlation coefficients for radii larger than this value in pixels?",
99  default=100,
100  )
101  correlationModelSlope = pexConfig.Field(
102  dtype=float,
103  doc="Slope of the correlation model for radii larger than correlationModelRadius",
104  default=-1.35,
105  )
106  ccdKey = pexConfig.Field(
107  dtype=str,
108  doc="The key by which to pull a detector from a dataId, e.g. 'ccd' or 'detector'",
109  default='ccd',
110  )
111  minMeanSignal = pexConfig.Field(
112  dtype=float,
113  doc="Minimum value of mean signal (in ADU) to consider.",
114  default=0,
115  )
116  maxMeanSignal = pexConfig.Field(
117  dtype=float,
118  doc="Maximum value to of mean signal (in ADU) to consider.",
119  default=9e6,
120  )
121  maxIterRegression = pexConfig.Field(
122  dtype=int,
123  doc="Maximum number of iterations for the regression fitter",
124  default=2
125  )
126  nSigmaClipGainCalc = pexConfig.Field(
127  dtype=int,
128  doc="Number of sigma to clip the pixel value distribution to during gain calculation",
129  default=5
130  )
131  nSigmaClipRegression = pexConfig.Field(
132  dtype=int,
133  doc="Number of sigma to clip outliers from the line of best fit to during iterative regression",
134  default=4
135  )
136  xcorrCheckRejectLevel = pexConfig.Field(
137  dtype=float,
138  doc="Sanity check level for the sum of the input cross-correlations. Arrays which " +
139  "sum to greater than this are discarded before the clipped mean is calculated.",
140  default=2.0
141  )
142  maxIterSuccessiveOverRelaxation = pexConfig.Field(
143  dtype=int,
144  doc="The maximum number of iterations allowed for the successive over-relaxation method",
145  default=10000
146  )
147  eLevelSuccessiveOverRelaxation = pexConfig.Field(
148  dtype=float,
149  doc="The target residual error for the successive over-relaxation method",
150  default=5.0e-14
151  )
152  nSigmaClipKernelGen = pexConfig.Field(
153  dtype=float,
154  doc="Number of sigma to clip to during pixel-wise clipping when generating the kernel. See " +
155  "the generateKernel docstring for more info.",
156  default=4
157  )
158  nSigmaClipXCorr = pexConfig.Field(
159  dtype=float,
160  doc="Number of sigma to clip when calculating means for the cross-correlation",
161  default=5
162  )
163  maxLag = pexConfig.Field(
164  dtype=int,
165  doc="The maximum lag (in pixels) to use when calculating the cross-correlation/kernel",
166  default=8
167  )
168  nPixBorderGainCalc = pexConfig.Field(
169  dtype=int,
170  doc="The number of border pixels to exclude when calculating the gain",
171  default=10
172  )
173  nPixBorderXCorr = pexConfig.Field(
174  dtype=int,
175  doc="The number of border pixels to exclude when calculating the cross-correlation and kernel",
176  default=10
177  )
178  biasCorr = pexConfig.Field(
179  dtype=float,
180  doc="An empirically determined correction factor, used to correct for the sigma-clipping of" +
181  " a non-Gaussian distribution. Post DM-15277, code will exist here to calculate appropriate values",
182  default=0.9241
183  )
184  backgroundBinSize = pexConfig.Field(
185  dtype=int,
186  doc="Size of the background bins",
187  default=128
188  )
189  fixPtcThroughOrigin = pexConfig.Field(
190  dtype=bool,
191  doc="Constrain the fit of the photon transfer curve to go through the origin when measuring" +
192  "the gain?",
193  default=True
194  )
195  level = pexConfig.ChoiceField(
196  doc="The level at which to calculate the brighter-fatter kernels",
197  dtype=str,
198  default="DETECTOR",
199  allowed={
200  "AMP": "Every amplifier treated separately",
201  "DETECTOR": "One kernel per detector",
202  }
203  )
204  ignoreAmpsForAveraging = pexConfig.ListField(
205  dtype=str,
206  doc="List of amp names to ignore when averaging the amplifier kernels into the detector" +
207  " kernel. Only relevant for level = AMP",
208  default=[]
209  )
210  backgroundWarnLevel = pexConfig.Field(
211  dtype=float,
212  doc="Log warnings if the mean of the fitted background is found to be above this level after " +
213  "differencing image pair.",
214  default=0.1
215  )
216 
217 
218 class BrighterFatterKernelTaskDataIdContainer(pipeBase.DataIdContainer):
219  """A DataIdContainer for the MakeBrighterFatterKernelTask."""
220 
221  def makeDataRefList(self, namespace):
222  """Compute refList based on idList.
223 
224  This method must be defined as the dataset does not exist before this
225  task is run.
226 
227  Parameters
228  ----------
229  namespace
230  Results of parsing the command-line.
231 
232  Notes
233  -----
234  Not called if ``add_id_argument`` called
235  with ``doMakeDataRefList=False``.
236  Note that this is almost a copy-and-paste of the vanilla implementation,
237  but without checking if the datasets already exist,
238  as this task exists to make them.
239  """
240  if self.datasetType is None:
241  raise RuntimeError("Must call setDatasetType first")
242  butler = namespace.butler
243  for dataId in self.idList:
244  refList = list(butler.subset(datasetType=self.datasetType, level=self.level, dataId=dataId))
245  # exclude nonexistent data
246  # this is a recursive test, e.g. for the sake of "raw" data
247  if not refList:
248  namespace.log.warn("No data found for dataId=%s", dataId)
249  continue
250  self.refList += refList
251 
252 
254  """A simple class to hold the kernel(s) generated and the intermediate
255  data products.
256 
257  kernel.ampwiseKernels are the kernels for each amplifier in the detector,
258  as generated by having LEVEL == 'AMP'
259 
260  kernel.detectorKernel is the kernel generated for the detector as a whole,
261  as generated by having LEVEL == 'DETECTOR'
262 
263  kernel.detectorKernelFromAmpKernels is the kernel for the detector,
264  generated by averaging together the amps in the detector
265 
266  The originalLevel is the level for which the kernel(s) were generated,
267  i.e. the level at which the task was originally run.
268  """
269 
270  def __init__(self, originalLevel, **kwargs):
271  self.__dict__["originalLevel"] = originalLevel
272  self.__dict__["ampwiseKernels"] = {}
273  self.__dict__["detectorKernel"] = {}
274  self.__dict__["detectorKernelFromAmpKernels"] = {}
275  self.__dict__["means"] = []
276  self.__dict__["rawMeans"] = []
277  self.__dict__["rawXcorrs"] = []
278  self.__dict__["xCorrs"] = []
279  self.__dict__["meanXcorrs"] = []
280  self.__dict__["gain"] = None # will be a dict keyed by amp if set
281  self.__dict__["gainErr"] = None # will be a dict keyed by amp if set
282  self.__dict__["noise"] = None # will be a dict keyed by amp if set
283  self.__dict__["noiseErr"] = None # will be a dict keyed by amp if set
284 
285  for key, value in kwargs.items():
286  if hasattr(self, key):
287  setattr(self, key, value)
288 
289  def __setattr__(self, attribute, value):
290  """Protect class attributes"""
291  if attribute not in self.__dict__:
292  print(f"Cannot set {attribute}")
293  else:
294  self.__dict__[attribute] = value
295 
296  def replaceDetectorKernelWithAmpKernel(self, ampName, detectorName):
297  self.detectorKernel[detectorName] = self.ampwiseKernels[ampName]
298 
299  def makeDetectorKernelFromAmpwiseKernels(self, detectorName, ampsToExclude=[], overwrite=False):
300  if detectorName not in self.detectorKernelFromAmpKernels.keys():
301  self.detectorKernelFromAmpKernels[detectorName] = {}
302 
303  if self.detectorKernelFromAmpKernels[detectorName] != {} and overwrite is False:
304  raise RuntimeError('Was told to replace existing detector kernel with overwrite==False')
305 
306  ampNames = self.ampwiseKernels.keys()
307  ampsToAverage = [amp for amp in ampNames if amp not in ampsToExclude]
308  avgKernel = np.zeros_like(self.ampwiseKernels[ampsToAverage[0]])
309  for ampName in ampsToAverage:
310  avgKernel += self.ampwiseKernels[ampName]
311  avgKernel /= len(ampsToAverage)
312 
313  self.detectorKernelFromAmpKernels[detectorName] = avgKernel
314 
315 
316 @dataclass
318  """The gains and the results of the PTC fits."""
319  gains: dict
320  ptcResults: dict
321 
322 
323 class MakeBrighterFatterKernelTask(pipeBase.CmdLineTask):
324  """Brighter-fatter effect correction-kernel calculation task.
325 
326  A command line task for calculating the brighter-fatter correction
327  kernel from pairs of flat-field images (with the same exposure length).
328 
329  The following operations are performed:
330 
331  - The configurable isr task is called, which unpersists and assembles the
332  raw images, and performs the selected instrument signature removal tasks.
333  For the purpose of brighter-fatter coefficient calculation is it
334  essential that certain components of isr are *not* performed, and
335  recommended that certain others are. The task checks the selected isr
336  configuration before it is run, and if forbidden components have been
337  selected task will raise, and if recommended ones have not been selected,
338  warnings are logged.
339 
340  - The gain of the each amplifier in the detector is calculated using
341  the photon transfer curve (PTC) method and used to correct the images
342  so that all calculations are done in units of electrons, and so that the
343  level across amplifier boundaries is continuous.
344  Outliers in the PTC are iteratively rejected
345  before fitting, with the nSigma rejection level set by
346  config.nSigmaClipRegression. Individual pixels are ignored in the input
347  images the image based on config.nSigmaClipGainCalc.
348 
349  - Each image is then cross-correlated with the one it's paired with
350  (with the pairing defined by the --visit-pairs command line argument),
351  which is done either the whole-image to whole-image,
352  or amplifier-by-amplifier, depending on config.level.
353 
354  - Once the cross-correlations have been calculated for each visit pair,
355  these are used to generate the correction kernel.
356  The maximum lag used, in pixels, and hence the size of the half-size
357  of the kernel generated, is given by config.maxLag,
358  i.e. a value of 10 will result in a kernel of size 2n-1 = 19x19 pixels.
359  Outlier values in these cross-correlations are rejected by using a
360  pixel-wise sigma-clipped thresholding to each cross-correlation in
361  the visit-pairs-length stack of cross-correlations.
362  The number of sigma clipped to is set by config.nSigmaClipKernelGen.
363 
364  - Once DM-15277 has been completed, a method will exist to calculate the
365  empirical correction factor, config.biasCorr.
366  TODO: DM-15277 update this part of the docstring once the ticket is done.
367  """
368 
369  RunnerClass = PairedVisitListTaskRunner
370  ConfigClass = MakeBrighterFatterKernelTaskConfig
371  _DefaultName = "makeBrighterFatterKernel"
372 
373  def __init__(self, *args, **kwargs):
374  pipeBase.CmdLineTask.__init__(self, *args, **kwargs)
375  self.makeSubtask("isr")
376 
377  self.debug = lsstDebug.Info(__name__)
378  if self.debug.enabled:
379  self.log.info("Running with debug enabled...")
380  # If we're displaying, test it works and save displays for later.
381  # It's worth testing here as displays are flaky and sometimes
382  # can't be contacted, and given processing takes a while,
383  # it's a shame to fail late due to display issues.
384  if self.debug.display:
385  try:
386  afwDisp.setDefaultBackend(self.debug.displayBackend)
387  afwDisp.Display.delAllDisplays()
388  self.disp1 = afwDisp.Display(0, open=True)
389  self.disp2 = afwDisp.Display(1, open=True)
390 
391  im = afwImage.ImageF(1, 1)
392  im.array[:] = [[1]]
393  self.disp1.mtv(im)
394  self.disp1.erase()
395  except NameError:
396  self.debug.display = False
397  self.log.warn('Failed to setup/connect to display! Debug display has been disabled')
398 
399  plt.interactive(False) # stop windows popping up when plotting. When headless, use 'agg' backend too
400  self.validateIsrConfig()
401  self.config.validate()
402  self.config.freeze()
403 
404  @classmethod
405  def _makeArgumentParser(cls):
406  """Augment argument parser for the MakeBrighterFatterKernelTask."""
407  parser = pipeBase.ArgumentParser(name=cls._DefaultName)
408  parser.add_argument("--visit-pairs", dest="visitPairs", nargs="*",
409  help="Visit pairs to use. Each pair must be of the form INT,INT e.g. 123,456")
410  parser.add_id_argument("--id", datasetType="brighterFatterKernel",
411  ContainerClass=BrighterFatterKernelTaskDataIdContainer,
412  help="The ccds to use, e.g. --id ccd=0..100")
413  return parser
414 
415  def validateIsrConfig(self):
416  """Check that appropriate ISR settings are being used
417  for brighter-fatter kernel calculation."""
418 
419  # How should we handle saturation/bad regions?
420  # 'doSaturationInterpolation': True
421  # 'doNanInterpAfterFlat': False
422  # 'doSaturation': True
423  # 'doSuspect': True
424  # 'doWidenSaturationTrails': True
425  # 'doSetBadRegions': True
426 
427  configDict = self.config.isr.toDict()
428 
429  for configParam in self.config.isrMandatorySteps:
430  if configDict[configParam] is False:
431  raise RuntimeError('Must set config.isr.%s to True '
432  'for brighter-fatter kernel calculation' % configParam)
433 
434  for configParam in self.config.isrForbiddenSteps:
435  if configDict[configParam] is True:
436  raise RuntimeError('Must set config.isr.%s to False '
437  'for brighter-fatter kernel calculation' % configParam)
438 
439  for configParam in self.config.isrDesirableSteps:
440  if configParam not in configDict:
441  self.log.info('Failed to find key %s in the isr config dict. You probably want ' +
442  'to set the equivalent for your obs_package to True.' % configParam)
443  continue
444  if configDict[configParam] is False:
445  self.log.warn('Found config.isr.%s set to False for brighter-fatter kernel calculation. '
446  'It is probably desirable to have this set to True' % configParam)
447 
448  # subtask settings
449  if not self.config.isr.assembleCcd.doTrim:
450  raise RuntimeError('Must trim when assembling CCDs. Set config.isr.assembleCcd.doTrim to True')
451 
452  @pipeBase.timeMethod
453  def runDataRef(self, dataRef, visitPairs):
454  """Run the brighter-fatter measurement task.
455 
456  For a dataRef (which is each detector here),
457  and given a list of visit pairs, calculate the
458  brighter-fatter kernel for the detector.
459 
460  Parameters
461  ----------
462  dataRef : `list` of `lsst.daf.persistence.ButlerDataRef`
463  dataRef for the detector for the visits to be fit.
464  visitPairs : `iterable` of `tuple` of `int`
465  Pairs of visit numbers to be processed together
466  """
467  np.random.seed(0) # used in the PTC fit bootstrap
468 
469  # setup necessary objects
470  # NB: don't use dataRef.get('raw_detector')
471  # this currently doesn't work for composites because of the way
472  # composite objects (i.e. LSST images) are handled/constructed
473  # these need to be retrieved from the camera and dereferenced
474  # rather than accessed directly
475  detNum = dataRef.dataId[self.config.ccdKey]
476  detector = dataRef.get('camera')[dataRef.dataId[self.config.ccdKey]]
477  amps = detector.getAmplifiers()
478  ampNames = [amp.getName() for amp in amps]
479 
480  if self.config.level == 'DETECTOR':
481  kernels = {detNum: []}
482  means = {detNum: []}
483  xcorrs = {detNum: []}
484  meanXcorrs = {detNum: []}
485  elif self.config.level == 'AMP':
486  kernels = {key: [] for key in ampNames}
487  means = {key: [] for key in ampNames}
488  xcorrs = {key: [] for key in ampNames}
489  meanXcorrs = {key: [] for key in ampNames}
490  else:
491  raise RuntimeError("Unsupported level: {}".format(self.config.level))
492 
493  # we must be able to get the gains one way or the other, so check early
494  if not self.config.doCalcGains:
495  deleteMe = None
496  try:
497  deleteMe = dataRef.get('photonTransferCurveDataset')
498  except butlerExceptions.NoResults:
499  try:
500  deleteMe = dataRef.get('brighterFatterGain')
501  except butlerExceptions.NoResults:
502  pass
503  if not deleteMe:
504  raise RuntimeError("doCalcGains == False and gains could not be got from butler") from None
505  else:
506  del deleteMe
507 
508  # if the level is DETECTOR we need to have the gains first so that each
509  # amp can be gain corrected in order to treat the detector as a single
510  # imaging area. However, if the level is AMP we can wait, calculate
511  # the correlations and correct for the gains afterwards
512  if self.config.level == 'DETECTOR':
513  if self.config.doCalcGains:
514  self.log.info('Computing gains for detector %s' % detNum)
515  gains, nomGains = self.estimateGains(dataRef, visitPairs)
516  dataRef.put(gains, datasetType='brighterFatterGain')
517  self.log.debug('Finished gain estimation for detector %s' % detNum)
518  else:
519  gains = dataRef.get('brighterFatterGain')
520  if not gains:
521  raise RuntimeError('Failed to retrieved gains for detector %s' % detNum)
522  self.log.info('Retrieved stored gain for detector %s' % detNum)
523  self.log.debug('Detector %s has gains %s' % (detNum, gains))
524  else: # we fake the gains as 1 for now, and correct later
525  gains = BrighterFatterGain({}, {})
526  for ampName in ampNames:
527  gains.gains[ampName] = 1.0
528  # We'll use the ptc.py code to calculate the gains, so we set this up
530  ptcConfig.isrForbiddenSteps = []
531  ptcConfig.doFitBootstrap = True
532  ptcConfig.ptcFitType = 'POLYNOMIAL' # default Astier doesn't work for gain correction
533  ptcConfig.polynomialFitDegree = 3
534  ptcConfig.minMeanSignal = self.config.minMeanSignal
535  ptcConfig.maxMeanSignal = self.config.maxMeanSignal
536  ptcTask = MeasurePhotonTransferCurveTask(config=ptcConfig)
537  ptcDataset = PhotonTransferCurveDataset(ampNames)
538 
539  # Loop over pairs of visits
540  # calculating the cross-correlations at the required level
541  for (v1, v2) in visitPairs:
542  dataRef.dataId['expId'] = v1
543  exp1 = self.isr.runDataRef(dataRef).exposure
544  dataRef.dataId['expId'] = v2
545  exp2 = self.isr.runDataRef(dataRef).exposure
546  del dataRef.dataId['expId']
547  checkExpLengthEqual(exp1, exp2, v1, v2, raiseWithMessage=True)
548 
549  self.log.info('Preparing images for cross-correlation calculation for detector %s' % detNum)
550  # note the shape of these returns depends on level
551  _scaledMaskedIms1, _means1 = self._makeCroppedExposures(exp1, gains, self.config.level)
552  _scaledMaskedIms2, _means2 = self._makeCroppedExposures(exp2, gains, self.config.level)
553 
554  # Compute the cross-correlation and means
555  # at the appropriate config.level:
556  # - "DETECTOR": one key, so compare the two visits to each other
557  # - "AMP": n_amp keys, comparing each amplifier of one visit
558  # to the same amplifier in the visit its paired with
559  for det_object in _scaledMaskedIms1.keys(): # det_object is ampName or detName depending on level
560  self.log.debug("Calculating correlations for %s" % det_object)
561  _xcorr, _mean = self._crossCorrelate(_scaledMaskedIms1[det_object],
562  _scaledMaskedIms2[det_object])
563  xcorrs[det_object].append(_xcorr)
564  means[det_object].append([_means1[det_object], _means2[det_object]])
565  if self.config.level != 'DETECTOR':
566  # Populate the ptcDataset for running fitting in the PTC task
567  expTime = exp1.getInfo().getVisitInfo().getExposureTime()
568  ptcDataset.rawExpTimes[det_object].append(expTime)
569  ptcDataset.rawMeans[det_object].append((_means1[det_object] + _means2[det_object]) / 2.0)
570  ptcDataset.rawVars[det_object].append(_xcorr[0, 0] / 2.0)
571 
572  # TODO: DM-15305 improve debug functionality here.
573  # This is position 1 for the removed code.
574 
575  # Save the raw means and xcorrs so we can look at them before any modifications
576  rawMeans = copy.deepcopy(means)
577  rawXcorrs = copy.deepcopy(xcorrs)
578 
579  # gains are always and only pre-applied for DETECTOR
580  # so for all other levels we now calculate them from the correlations
581  # and apply them
582  if self.config.level != 'DETECTOR':
583  if self.config.doCalcGains: # Run the PTC task for calculating the gains, put results
584  self.log.info('Calculating gains for detector %s using PTC task' % detNum)
585  ptcDataset = ptcTask.fitPtcAndNonLinearity(ptcDataset, ptcConfig.ptcFitType)
586  dataRef.put(ptcDataset, datasetType='photonTransferCurveDataset')
587  self.log.debug('Finished gain estimation for detector %s' % detNum)
588  else: # load results - confirmed to work much earlier on, so can be relied upon here
589  ptcDataset = dataRef.get('photonTransferCurveDataset')
590 
591  self._applyGains(means, xcorrs, ptcDataset)
592 
593  if self.config.doPlotPtcs:
594  dirname = dataRef.getUri(datasetType='cpPipePlotRoot', write=True)
595  if not os.path.exists(dirname):
596  os.makedirs(dirname)
597  detNum = dataRef.dataId[self.config.ccdKey]
598  filename = f"PTC_det{detNum}.pdf"
599  filenameFull = os.path.join(dirname, filename)
600  with PdfPages(filenameFull) as pdfPages:
601  ptcTask._plotPtc(ptcDataset, ptcConfig.ptcFitType, pdfPages)
602 
603  # having calculated and applied the gains for all code-paths we can now
604  # generate the kernel(s)
605  self.log.info('Generating kernel(s) for %s' % detNum)
606  for det_object in xcorrs.keys(): # looping over either detectors or amps
607  if self.config.level == 'DETECTOR':
608  objId = 'detector %s' % det_object
609  elif self.config.level == 'AMP':
610  objId = 'detector %s AMP %s' % (detNum, det_object)
611 
612  try:
613  meanXcorr, kernel = self.generateKernel(xcorrs[det_object], means[det_object], objId)
614  kernels[det_object] = kernel
615  meanXcorrs[det_object] = meanXcorr
616  except RuntimeError:
617  # bad amps will cause failures here which we want to ignore
618  self.log.warn('RuntimeError during kernel generation for %s' % objId)
619  continue
620 
621  bfKernel = BrighterFatterKernel(self.config.level)
622  bfKernel.means = means
623  bfKernel.rawMeans = rawMeans
624  bfKernel.rawXcorrs = rawXcorrs
625  bfKernel.xCorrs = xcorrs
626  bfKernel.meanXcorrs = meanXcorrs
627  bfKernel.originalLevel = self.config.level
628  try:
629  bfKernel.gain = ptcDataset.gain
630  bfKernel.gainErr = ptcDataset.gainErr
631  bfKernel.noise = ptcDataset.noise
632  bfKernel.noiseErr = ptcDataset.noiseErr
633  except NameError: # we don't have a ptcDataset to store results from
634  pass
635 
636  if self.config.level == 'AMP':
637  bfKernel.ampwiseKernels = kernels
638  ex = self.config.ignoreAmpsForAveraging
639  bfKernel.detectorKernel = bfKernel.makeDetectorKernelFromAmpwiseKernels(detNum, ampsToExclude=ex)
640 
641  elif self.config.level == 'DETECTOR':
642  bfKernel.detectorKernel = kernels
643  else:
644  raise RuntimeError('Invalid level for kernel calculation; this should not be possible.')
645 
646  dataRef.put(bfKernel)
647 
648  self.log.info('Finished generating kernel(s) for %s' % detNum)
649  return pipeBase.Struct(exitStatus=0)
650 
651  def _applyGains(self, means, xcorrs, ptcData):
652  """Apply the gains calculated by the PtcTask.
653 
654  It also removes datapoints that were thrown out in the PTC algorithm.
655 
656  Parameters
657  ----------
658  means : `dict` [`str`, `list` of `tuple`]
659  Dictionary, keyed by ampName, containing a list of the means for
660  each visit pair.
661 
662  xcorrs : `dict` [`str`, `list` of `np.array`]
663  Dictionary, keyed by ampName, containing a list of the
664  cross-correlations for each visit pair.
665 
666  ptcDataset : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset`
667  The results of running the ptcTask.
668  """
669  ampNames = means.keys()
670  assert set(xcorrs.keys()) == set(ampNames) == set(ptcData.ampNames)
671 
672  for ampName in ampNames:
673  mask = ptcData.visitMask[ampName]
674  gain = ptcData.gain[ampName]
675 
676  fitType = ptcData.ptcFitType[ampName]
677  if fitType != 'POLYNOMIAL':
678  raise RuntimeError(f"Only polynomial fit types supported currently, found {fitType}")
679  ptcFitPars = ptcData.ptcFitPars[ampName]
680  # polynomial params go in ascending order, so this is safe w.r.t.
681  # the polynomial order, as the constant term is always first,
682  # the linear term second etc
683 
684  # Adjust xcorrs[0,0] to remove the linear gain part, leaving just the second order part
685  for i in range(len(means[ampName])):
686  ampMean = np.mean(means[ampName][i])
687  xcorrs[ampName][i][0, 0] -= 2.0 * (ampMean * ptcFitPars[1] + ptcFitPars[0])
688 
689  # Now adjust the means and xcorrs for the calculated gain and remove the bad indices
690  means[ampName] = [[value*gain for value in pair] for pair in np.array(means[ampName])[mask]]
691  xcorrs[ampName] = [arr*gain*gain for arr in np.array(xcorrs[ampName])[mask]]
692  return
693 
694  def _makeCroppedExposures(self, exp, gains, level):
695  """Prepare exposure for cross-correlation calculation.
696 
697  For each amp, crop by the border amount, specified by
698  config.nPixBorderXCorr, then rescale by the gain
699  and subtract the sigma-clipped mean.
700  If the level is 'DETECTOR' then this is done
701  to the whole image so that it can be cross-correlated, with a copy
702  being returned.
703  If the level is 'AMP' then this is done per-amplifier,
704  and a copy of each prepared amp-image returned.
705 
706  Parameters:
707  -----------
708  exp : `lsst.afw.image.exposure.ExposureF`
709  The exposure to prepare
710  gains : `lsst.cp.pipe.makeBrighterFatterKernel.BrighterFatterGain`
711  The object holding the amplifier gains, essentially a
712  dictionary of the amplifier gain values, keyed by amplifier name
713  level : `str`
714  Either `AMP` or `DETECTOR`
715 
716  Returns:
717  --------
718  scaledMaskedIms : `dict` [`str`, `lsst.afw.image.maskedImage.MaskedImageF`]
719  Depending on level, this is either one item, or n_amp items,
720  keyed by detectorId or ampName
721 
722  Notes:
723  ------
724  This function is controlled by the following config parameters:
725  nPixBorderXCorr : `int`
726  The number of border pixels to exclude
727  nSigmaClipXCorr : `float`
728  The number of sigma to be clipped to
729  """
730  assert(isinstance(exp, afwImage.ExposureF))
731 
732  local_exp = exp.clone() # we don't want to modify the image passed in
733  del exp # ensure we don't make mistakes!
734 
735  border = self.config.nPixBorderXCorr
736  sigma = self.config.nSigmaClipXCorr
737 
738  sctrl = afwMath.StatisticsControl()
739  sctrl.setNumSigmaClip(sigma)
740 
741  means = {}
742  returnAreas = {}
743 
744  detector = local_exp.getDetector()
745  amps = detector.getAmplifiers()
746 
747  mi = local_exp.getMaskedImage() # makeStatistics does not seem to take exposures
748  temp = mi.clone()
749 
750  # Rescale each amp by the appropriate gain and subtract the mean.
751  # NB these are views modifying the image in-place
752  for amp in amps:
753  ampName = amp.getName()
754  rescaleIm = mi[amp.getBBox()] # the soon-to-be scaled, mean subtracted, amp image
755  rescaleTemp = temp[amp.getBBox()]
756  mean = afwMath.makeStatistics(rescaleIm, afwMath.MEANCLIP, sctrl).getValue()
757  gain = gains.gains[ampName]
758  rescaleIm *= gain
759  rescaleTemp *= gain
760  self.log.debug("mean*gain = %s, clipped mean = %s" %
761  (mean*gain, afwMath.makeStatistics(rescaleIm, afwMath.MEANCLIP,
762  sctrl).getValue()))
763  rescaleIm -= mean*gain
764 
765  if level == 'AMP': # build the dicts if doing amp-wise
766  means[ampName] = afwMath.makeStatistics(rescaleTemp[border: -border, border: -border,
767  afwImage.LOCAL], afwMath.MEANCLIP, sctrl).getValue()
768  returnAreas[ampName] = rescaleIm
769 
770  if level == 'DETECTOR': # else just average the whole detector
771  detName = local_exp.getDetector().getId()
772  means[detName] = afwMath.makeStatistics(temp[border: -border, border: -border, afwImage.LOCAL],
773  afwMath.MEANCLIP, sctrl).getValue()
774  returnAreas[detName] = rescaleIm
775 
776  return returnAreas, means
777 
778  def _crossCorrelate(self, maskedIm0, maskedIm1, runningBiasCorrSim=False, frameId=None, detId=None):
779  """Calculate the cross-correlation of an area.
780 
781  If the area in question contains multiple amplifiers then they must
782  have been gain corrected.
783 
784  Parameters:
785  -----------
786  maskedIm0 : `lsst.afw.image.MaskedImageF`
787  The first image area
788  maskedIm1 : `lsst.afw.image.MaskedImageF`
789  The first image area
790  frameId : `str`, optional
791  The frame identifier for use in the filename
792  if writing debug outputs.
793  detId : `str`, optional
794  The detector identifier (detector, or detector+amp,
795  depending on config.level) for use in the filename
796  if writing debug outputs.
797  runningBiasCorrSim : `bool`
798  Set to true when using this function to calculate the amount of bias
799  introduced by the sigma clipping. If False, the biasCorr parameter
800  is divided by to remove the bias, but this is, of course, not
801  appropriate when this is the parameter being measured.
802 
803  Returns:
804  --------
805  xcorr : `np.ndarray`
806  The quarter-image cross-correlation
807  mean : `float`
808  The sum of the means of the input images,
809  sigma-clipped, and with borders applied.
810  This is used when using this function with simulations to calculate
811  the biasCorr parameter.
812 
813  Notes:
814  ------
815  This function is controlled by the following config parameters:
816  maxLag : `int`
817  The maximum lag to use in the cross-correlation calculation
818  nPixBorderXCorr : `int`
819  The number of border pixels to exclude
820  nSigmaClipXCorr : `float`
821  The number of sigma to be clipped to
822  biasCorr : `float`
823  Parameter used to correct from the bias introduced
824  by the sigma cuts.
825  """
826  maxLag = self.config.maxLag
827  border = self.config.nPixBorderXCorr
828  sigma = self.config.nSigmaClipXCorr
829  biasCorr = self.config.biasCorr
830 
831  sctrl = afwMath.StatisticsControl()
832  sctrl.setNumSigmaClip(sigma)
833 
834  mean = afwMath.makeStatistics(maskedIm0.getImage()[border: -border, border: -border, afwImage.LOCAL],
835  afwMath.MEANCLIP, sctrl).getValue()
836  mean += afwMath.makeStatistics(maskedIm1.getImage()[border: -border, border: -border, afwImage.LOCAL],
837  afwMath.MEANCLIP, sctrl).getValue()
838 
839  # Diff the images, and apply border
840  diff = maskedIm0.clone()
841  diff -= maskedIm1.getImage()
842  diff = diff[border: -border, border: -border, afwImage.LOCAL]
843 
844  if self.debug.writeDiffImages:
845  filename = '_'.join(['diff', 'detector', detId, frameId, '.fits'])
846  diff.writeFits(os.path.join(self.debug.debugDataPath, filename))
847 
848  # Subtract background. It should be a constant, but it isn't always
849  binsize = self.config.backgroundBinSize
850  nx = diff.getWidth()//binsize
851  ny = diff.getHeight()//binsize
852  bctrl = afwMath.BackgroundControl(nx, ny, sctrl, afwMath.MEANCLIP)
853  bkgd = afwMath.makeBackground(diff, bctrl)
854  bgImg = bkgd.getImageF(afwMath.Interpolate.CUBIC_SPLINE, afwMath.REDUCE_INTERP_ORDER)
855  bgMean = np.mean(bgImg.getArray())
856  if abs(bgMean) >= self.config.backgroundWarnLevel:
857  self.log.warn('Mean of background = %s > config.maxBackground' % bgMean)
858 
859  diff -= bgImg
860 
861  if self.debug.writeDiffImages:
862  filename = '_'.join(['bgSub', 'diff', 'detector', detId, frameId, '.fits'])
863  diff.writeFits(os.path.join(self.debug.debugDataPath, filename))
864  if self.debug.display:
865  self.disp1.mtv(diff, title=frameId)
866 
867  self.log.debug("Median and variance of diff:")
868  self.log.debug("%s" % afwMath.makeStatistics(diff, afwMath.MEDIAN, sctrl).getValue())
869  self.log.debug("%s, %s" % (afwMath.makeStatistics(diff, afwMath.VARIANCECLIP, sctrl).getValue(),
870  np.var(diff.getImage().getArray())))
871 
872  # Measure the correlations
873  dim0 = diff[0: -maxLag, : -maxLag, afwImage.LOCAL]
874  dim0 -= afwMath.makeStatistics(dim0, afwMath.MEANCLIP, sctrl).getValue()
875  width, height = dim0.getDimensions()
876  xcorr = np.zeros((maxLag + 1, maxLag + 1), dtype=np.float64)
877 
878  for xlag in range(maxLag + 1):
879  for ylag in range(maxLag + 1):
880  dim_xy = diff[xlag:xlag + width, ylag: ylag + height, afwImage.LOCAL].clone()
881  dim_xy -= afwMath.makeStatistics(dim_xy, afwMath.MEANCLIP, sctrl).getValue()
882  dim_xy *= dim0
883  xcorr[xlag, ylag] = afwMath.makeStatistics(dim_xy, afwMath.MEANCLIP, sctrl).getValue()
884  if not runningBiasCorrSim:
885  xcorr[xlag, ylag] /= biasCorr
886 
887  # TODO: DM-15305 improve debug functionality here.
888  # This is position 2 for the removed code.
889 
890  return xcorr, mean
891 
892  def estimateGains(self, dataRef, visitPairs):
893  """Estimate the amplifier gains using the specified visits.
894 
895  Given a dataRef and list of flats of varying intensity,
896  calculate the gain for each amplifier in the detector
897  using the photon transfer curve (PTC) method.
898 
899  The config.fixPtcThroughOrigin option determines whether the iterative
900  fitting is forced to go through the origin or not.
901  This defaults to True, fitting var=1/gain * mean.
902  If set to False then var=1/g * mean + const is fitted.
903 
904  This is really a photo transfer curve (PTC) gain measurement task.
905  See DM-14063 for results from of a comparison between
906  this task's numbers and the gain values in the HSC camera model,
907  and those measured by the PTC task in eotest.
908 
909  Parameters
910  ----------
911  dataRef : `lsst.daf.persistence.butler.Butler.dataRef`
912  dataRef for the detector for the flats to be used
913  visitPairs : `list` of `tuple`
914  List of visit-pairs to use, as [(v1,v2), (v3,v4)...]
915 
916  Returns
917  -------
918  gains : `lsst.cp.pipe.makeBrighterFatterKernel.BrighterFatterGain`
919  Object holding the per-amplifier gains, essentially a
920  dict of the as-calculated amplifier gain values, keyed by amp name
921  nominalGains : `dict` [`str`, `float`]
922  Dict of the amplifier gains, as reported by the `detector` object,
923  keyed by amplifier name
924  """
925  # NB: don't use dataRef.get('raw_detector') due to composites
926  detector = dataRef.get('camera')[dataRef.dataId[self.config.ccdKey]]
927  amps = detector.getAmplifiers()
928  ampNames = [amp.getName() for amp in amps]
929 
930  ampMeans = {key: [] for key in ampNames} # these get turned into np.arrays later
931  ampCoVariances = {key: [] for key in ampNames}
932  ampVariances = {key: [] for key in ampNames}
933 
934  # Loop over the amps in the detector,
935  # calculating a PTC for each amplifier.
936  # The amplifier iteration is performed in _calcMeansAndVars()
937  # NB: no gain correction is applied
938  for visPairNum, visPair in enumerate(visitPairs):
939  _means, _vars, _covars = self._calcMeansAndVars(dataRef, visPair[0], visPair[1])
940 
941  # Do sanity checks; if these are failed more investigation is needed
942  breaker = 0
943  for amp in detector:
944  ampName = amp.getName()
945  if _means[ampName]*10 < _vars[ampName] or _means[ampName]*10 < _covars[ampName]:
946  msg = 'Sanity check failed; check visit pair %s amp %s' % (visPair, ampName)
947  self.log.warn(msg)
948  breaker += 1
949  if breaker:
950  continue
951 
952  # having made sanity checks
953  # pull the values out into the respective dicts
954  for k in _means.keys(): # keys are necessarily the same
955  if _vars[k]*1.3 < _covars[k] or _vars[k]*0.7 > _covars[k]:
956  self.log.warn('Dropped a value')
957  continue
958  ampMeans[k].append(_means[k])
959  ampVariances[k].append(_vars[k])
960  ampCoVariances[k].append(_covars[k])
961 
962  gains = {}
963  nomGains = {}
964  ptcResults = {}
965  for amp in detector:
966  ampName = amp.getName()
967  if ampMeans[ampName] == []: # all the data was dropped, amp is presumed bad
968  gains[ampName] = 1.0
969  ptcResults[ampName] = (0, 0, 1, 0)
970  continue
971 
972  nomGains[ampName] = amp.getGain()
973  slopeRaw, interceptRaw, rVal, pVal, stdErr = \
974  stats.linregress(np.asarray(ampMeans[ampName]), np.asarray(ampCoVariances[ampName]))
975  slopeFix, _ = self._iterativeRegression(np.asarray(ampMeans[ampName]),
976  np.asarray(ampCoVariances[ampName]),
977  fixThroughOrigin=True)
978  slopeUnfix, intercept = self._iterativeRegression(np.asarray(ampMeans[ampName]),
979  np.asarray(ampCoVariances[ampName]),
980  fixThroughOrigin=False)
981  self.log.info("Slope of raw fit: %s, intercept: %s p value: %s" % (slopeRaw,
982  interceptRaw, pVal))
983  self.log.info("slope of fixed fit: %s, difference vs raw:%s" % (slopeFix,
984  slopeFix - slopeRaw))
985  self.log.info("slope of unfixed fit: %s, difference vs fix:%s" % (slopeUnfix,
986  slopeFix - slopeUnfix))
987  if self.config.fixPtcThroughOrigin:
988  slopeToUse = slopeFix
989  else:
990  slopeToUse = slopeUnfix
991 
992  if self.debug.enabled:
993  fig = plt.figure()
994  ax = fig.add_subplot(111)
995  ax.plot(np.asarray(ampMeans[ampName]),
996  np.asarray(ampCoVariances[ampName]), linestyle='None', marker='x', label='data')
997  if self.config.fixPtcThroughOrigin:
998  ax.plot(np.asarray(ampMeans[ampName]),
999  np.asarray(ampMeans[ampName])*slopeToUse, label='Fit through origin')
1000  else:
1001  ax.plot(np.asarray(ampMeans[ampName]),
1002  np.asarray(ampMeans[ampName])*slopeToUse + intercept,
1003  label='Fit (intercept unconstrained')
1004 
1005  dataRef.put(fig, "plotBrighterFatterPtc", amp=ampName)
1006  self.log.info('Saved PTC for detector %s amp %s' % (detector.getId(), ampName))
1007  gains[ampName] = 1.0/slopeToUse
1008  # change the fit to use a cubic and match parameters with Lage method
1009  # or better, use the PTC task here too
1010  ptcResults[ampName] = (0, 0, 1, 0)
1011 
1012  return BrighterFatterGain(gains, ptcResults), nomGains
1013 
1014  def _calcMeansAndVars(self, dataRef, v1, v2):
1015  """Calculate the means, vars, covars, and retrieve the nominal gains,
1016  for each amp in each detector.
1017 
1018  This code runs using two visit numbers, and for the detector specified.
1019  It calculates the correlations in the individual amps without
1020  rescaling any gains. This allows a photon transfer curve
1021  to be generated and the gains measured.
1022 
1023  Images are assembled with use the isrTask, and basic isr is performed.
1024 
1025  Parameters:
1026  -----------
1027  dataRef : `lsst.daf.persistence.butler.Butler.dataRef`
1028  dataRef for the detector for the repo containing the flats to be used
1029  v1 : `int`
1030  First visit of the visit pair
1031  v2 : `int`
1032  Second visit of the visit pair
1033 
1034  Returns
1035  -------
1036  means, vars, covars : `tuple` of `dict`
1037  Three dicts, keyed by ampName,
1038  containing the sum of the image-means,
1039  the variance, and the quarter-image of the xcorr.
1040  """
1041  sigma = self.config.nSigmaClipGainCalc
1042  maxLag = self.config.maxLag
1043  border = self.config.nPixBorderGainCalc
1044  biasCorr = self.config.biasCorr
1045 
1046  # NB: don't use dataRef.get('raw_detector') due to composites
1047  detector = dataRef.get('camera')[dataRef.dataId[self.config.ccdKey]]
1048 
1049  ampMeans = {}
1050 
1051  # manipulate the dataId to get a postISR exposure for each visit
1052  # from the detector obj, restoring its original state afterwards
1053  originalDataId = dataRef.dataId.copy()
1054  dataRef.dataId['expId'] = v1
1055  exp1 = self.isr.runDataRef(dataRef).exposure
1056  dataRef.dataId['expId'] = v2
1057  exp2 = self.isr.runDataRef(dataRef).exposure
1058  dataRef.dataId = originalDataId
1059  exps = [exp1, exp2]
1060  checkExpLengthEqual(exp1, exp2, v1, v2, raiseWithMessage=True)
1061 
1062  detector = exps[0].getDetector()
1063  ims = [self._convertImagelikeToFloatImage(exp) for exp in exps]
1064 
1065  if self.debug.display:
1066  self.disp1.mtv(ims[0], title=str(v1))
1067  self.disp2.mtv(ims[1], title=str(v2))
1068 
1069  sctrl = afwMath.StatisticsControl()
1070  sctrl.setNumSigmaClip(sigma)
1071  for imNum, im in enumerate(ims):
1072 
1073  # calculate the sigma-clipped mean, excluding the borders
1074  # safest to apply borders to all amps regardless of edges
1075  # easier, camera-agnostic, and mitigates potentially dodgy
1076  # overscan-biases around edges as well
1077  for amp in detector:
1078  ampName = amp.getName()
1079  ampIm = im[amp.getBBox()]
1080  mean = afwMath.makeStatistics(ampIm[border: -border, border: -border, afwImage.LOCAL],
1081  afwMath.MEANCLIP, sctrl).getValue()
1082  if ampName not in ampMeans.keys():
1083  ampMeans[ampName] = []
1084  ampMeans[ampName].append(mean)
1085  ampIm -= mean
1086 
1087  diff = ims[0].clone()
1088  diff -= ims[1]
1089 
1090  temp = diff[border: -border, border: -border, afwImage.LOCAL]
1091 
1092  # Subtract background. It should be a constant,
1093  # but it isn't always (e.g. some SuprimeCam flats)
1094  # TODO: Check how this looks, and if this is the "right" way to do this
1095  binsize = self.config.backgroundBinSize
1096  nx = temp.getWidth()//binsize
1097  ny = temp.getHeight()//binsize
1098  bctrl = afwMath.BackgroundControl(nx, ny, sctrl, afwMath.MEANCLIP)
1099  bkgd = afwMath.makeBackground(temp, bctrl)
1100 
1101  box = diff.getBBox()
1102  box.grow(-border)
1103  diff[box, afwImage.LOCAL] -= bkgd.getImageF(afwMath.Interpolate.CUBIC_SPLINE,
1104  afwMath.REDUCE_INTERP_ORDER)
1105 
1106  variances = {}
1107  coVars = {}
1108  for amp in detector:
1109  ampName = amp.getName()
1110  diffAmpIm = diff[amp.getBBox()].clone()
1111  diffAmpImCrop = diffAmpIm[border: -border - maxLag, border: -border - maxLag, afwImage.LOCAL]
1112  diffAmpImCrop -= afwMath.makeStatistics(diffAmpImCrop, afwMath.MEANCLIP, sctrl).getValue()
1113  w, h = diffAmpImCrop.getDimensions()
1114  xcorr = np.zeros((maxLag + 1, maxLag + 1), dtype=np.float64)
1115 
1116  # calculate the cross-correlation
1117  for xlag in range(maxLag + 1):
1118  for ylag in range(maxLag + 1):
1119  dim_xy = diffAmpIm[border + xlag: border + xlag + w,
1120  border + ylag: border + ylag + h,
1121  afwImage.LOCAL].clone()
1122  dim_xy -= afwMath.makeStatistics(dim_xy, afwMath.MEANCLIP, sctrl).getValue()
1123  dim_xy *= diffAmpImCrop
1124  xcorr[xlag, ylag] = afwMath.makeStatistics(dim_xy,
1125  afwMath.MEANCLIP, sctrl).getValue()/(biasCorr)
1126 
1127  variances[ampName] = xcorr[0, 0]
1128  xcorr_full = self._tileArray(xcorr)
1129  coVars[ampName] = np.sum(xcorr_full)
1130 
1131  msg = "M1: " + str(ampMeans[ampName][0])
1132  msg += " M2 " + str(ampMeans[ampName][1])
1133  msg += " M_sum: " + str((ampMeans[ampName][0]) + ampMeans[ampName][1])
1134  msg += " Var " + str(variances[ampName])
1135  msg += " coVar: " + str(coVars[ampName])
1136  self.log.debug(msg)
1137 
1138  means = {}
1139  for amp in detector:
1140  ampName = amp.getName()
1141  means[ampName] = ampMeans[ampName][0] + ampMeans[ampName][1]
1142 
1143  return means, variances, coVars
1144 
1145  def _plotXcorr(self, xcorr, mean, zmax=0.05, title=None, fig=None, saveToFileName=None):
1146  """Plot the correlation functions."""
1147  try:
1148  xcorr = xcorr.getArray()
1149  except Exception:
1150  pass
1151 
1152  xcorr /= float(mean)
1153  # xcorr.getArray()[0,0]=abs(xcorr.getArray()[0,0]-1)
1154 
1155  if fig is None:
1156  fig = plt.figure()
1157  else:
1158  fig.clf()
1159 
1160  ax = fig.add_subplot(111, projection='3d')
1161  ax.azim = 30
1162  ax.elev = 20
1163 
1164  nx, ny = np.shape(xcorr)
1165 
1166  xpos, ypos = np.meshgrid(np.arange(nx), np.arange(ny))
1167  xpos = xpos.flatten()
1168  ypos = ypos.flatten()
1169  zpos = np.zeros(nx*ny)
1170  dz = xcorr.flatten()
1171  dz[dz > zmax] = zmax
1172 
1173  ax.bar3d(xpos, ypos, zpos, 1, 1, dz, color='b', zsort='max', sort_zpos=100)
1174  if xcorr[0, 0] > zmax:
1175  ax.bar3d([0], [0], [zmax], 1, 1, 1e-4, color='c')
1176 
1177  ax.set_xlabel("row")
1178  ax.set_ylabel("column")
1179  ax.set_zlabel(r"$\langle{(F_i - \bar{F})(F_i - \bar{F})}\rangle/\bar{F}$")
1180 
1181  if title:
1182  fig.suptitle(title)
1183  if saveToFileName:
1184  fig.savefig(saveToFileName)
1185 
1186  def _iterativeRegression(self, x, y, fixThroughOrigin=False, nSigmaClip=None, maxIter=None):
1187  """Use linear regression to fit a line, iteratively removing outliers.
1188 
1189  Useful when you have a sufficiently large numbers of points on your PTC.
1190  This function iterates until either there are no outliers of
1191  config.nSigmaClip magnitude, or until the specified maximum number
1192  of iterations has been performed.
1193 
1194  Parameters:
1195  -----------
1196  x : `numpy.array`
1197  The independent variable. Must be a numpy array, not a list.
1198  y : `numpy.array`
1199  The dependent variable. Must be a numpy array, not a list.
1200  fixThroughOrigin : `bool`, optional
1201  Whether to fix the PTC through the origin or allow an y-intercept.
1202  nSigmaClip : `float`, optional
1203  The number of sigma to clip to.
1204  Taken from the task config if not specified.
1205  maxIter : `int`, optional
1206  The maximum number of iterations allowed.
1207  Taken from the task config if not specified.
1208 
1209  Returns:
1210  --------
1211  slope : `float`
1212  The slope of the line of best fit
1213  intercept : `float`
1214  The y-intercept of the line of best fit
1215  """
1216  if not maxIter:
1217  maxIter = self.config.maxIterRegression
1218  if not nSigmaClip:
1219  nSigmaClip = self.config.nSigmaClipRegression
1220 
1221  nIter = 0
1222  sctrl = afwMath.StatisticsControl()
1223  sctrl.setNumSigmaClip(nSigmaClip)
1224 
1225  if fixThroughOrigin:
1226  while nIter < maxIter:
1227  nIter += 1
1228  self.log.debug("Origin fixed, iteration # %s using %s elements:" % (nIter, np.shape(x)[0]))
1229  TEST = x[:, np.newaxis]
1230  slope, _, _, _ = np.linalg.lstsq(TEST, y)
1231  slope = slope[0]
1232  res = (y - slope * x) / x
1233  resMean = afwMath.makeStatistics(res, afwMath.MEANCLIP, sctrl).getValue()
1234  resStd = np.sqrt(afwMath.makeStatistics(res, afwMath.VARIANCECLIP, sctrl).getValue())
1235  index = np.where((res > (resMean + nSigmaClip*resStd)) |
1236  (res < (resMean - nSigmaClip*resStd)))
1237  self.log.debug("%.3f %.3f %.3f %.3f" % (resMean, resStd, np.max(res), nSigmaClip))
1238  if np.shape(np.where(index))[1] == 0 or (nIter >= maxIter): # run out of points or iters
1239  break
1240  x = np.delete(x, index)
1241  y = np.delete(y, index)
1242 
1243  return slope, 0
1244 
1245  while nIter < maxIter:
1246  nIter += 1
1247  self.log.debug("Iteration # %s using %s elements:" % (nIter, np.shape(x)[0]))
1248  xx = np.vstack([x, np.ones(len(x))]).T
1249  ret, _, _, _ = np.linalg.lstsq(xx, y)
1250  slope, intercept = ret
1251  res = y - slope*x - intercept
1252  resMean = afwMath.makeStatistics(res, afwMath.MEANCLIP, sctrl).getValue()
1253  resStd = np.sqrt(afwMath.makeStatistics(res, afwMath.VARIANCECLIP, sctrl).getValue())
1254  index = np.where((res > (resMean + nSigmaClip * resStd)) | (res < resMean - nSigmaClip * resStd))
1255  self.log.debug("%.3f %.3f %.3f %.3f" % (resMean, resStd, np.max(res), nSigmaClip))
1256  if np.shape(np.where(index))[1] == 0 or (nIter >= maxIter): # run out of points, or iterations
1257  break
1258  x = np.delete(x, index)
1259  y = np.delete(y, index)
1260 
1261  return slope, intercept
1262 
1263  def generateKernel(self, corrs, means, objId, rejectLevel=None):
1264  """Generate the full kernel from a list of cross-correlations and means.
1265 
1266  Taking a list of quarter-image, gain-corrected cross-correlations,
1267  do a pixel-wise sigma-clipped mean of each,
1268  and tile into the full-sized kernel image.
1269 
1270  Each corr in corrs is one quarter of the full cross-correlation,
1271  and has been gain-corrected. Each mean in means is a tuple of the means
1272  of the two individual images, corresponding to that corr.
1273 
1274  Parameters:
1275  -----------
1276  corrs : `list` of `numpy.ndarray`, (Ny, Nx)
1277  A list of the quarter-image cross-correlations
1278  means : `list` of `tuples` of `floats`
1279  The means of the input images for each corr in corrs
1280  rejectLevel : `float`, optional
1281  This is essentially is a sanity check parameter.
1282  If this condition is violated there is something unexpected
1283  going on in the image, and it is discarded from the stack before
1284  the clipped-mean is calculated.
1285  If not provided then config.xcorrCheckRejectLevel is used
1286 
1287  Returns:
1288  --------
1289  kernel : `numpy.ndarray`, (Ny, Nx)
1290  The output kernel
1291  """
1292  self.log.info('Calculating kernel for %s'%objId)
1293 
1294  if not rejectLevel:
1295  rejectLevel = self.config.xcorrCheckRejectLevel
1296 
1297  if self.config.correlationQuadraticFit:
1298  xcorrList = []
1299  fluxList = []
1300 
1301  for corrNum, ((mean1, mean2), corr) in enumerate(zip(means, corrs)):
1302  msg = 'For item %s, initial corr[0,0] = %g, corr[1,0] = %g'%(corrNum, corr[0, 0], corr[1, 0])
1303  self.log.info(msg)
1304  if self.config.level == 'DETECTOR':
1305  # This is now done in _applyGains() but only if level is not DETECTOR
1306  corr[0, 0] -= (mean1 + mean2)
1307  fullCorr = self._tileArray(corr)
1308 
1309  # Craig Lage says he doesn't understand the negative sign, but it needs to be there
1310  xcorrList.append(-fullCorr / 2.0)
1311  flux = (mean1 + mean2) / 2.0
1312  fluxList.append(flux * flux)
1313  # We're using the linear fit algorithm to find a quadratic fit,
1314  # so we square the x-axis.
1315  # The step below does not need to be done, but is included
1316  # so that correlations can be compared
1317  # directly to existing code. We might want to take it out.
1318  corr /= -1.0*(mean1**2 + mean2**2)
1319 
1320  if not xcorrList:
1321  raise RuntimeError("Cannot generate kernel because all inputs were discarded. "
1322  "Either the data is bad, or config.xcorrCheckRejectLevel is too low")
1323 
1324  # This method fits a quadratic vs flux and keeps only the quadratic term.
1325  meanXcorr = np.zeros_like(fullCorr)
1326  xcorrList = np.asarray(xcorrList)
1327 
1328  for i in range(np.shape(meanXcorr)[0]):
1329  for j in range(np.shape(meanXcorr)[1]):
1330  # Note the i,j inversion. This serves the same function as the transpose step in
1331  # the base code. I don't understand why it is there, but I put it in to be consistent.
1332  slopeRaw, interceptRaw, rVal, pVal, stdErr = stats.linregress(np.asarray(fluxList),
1333  xcorrList[:, j, i])
1334  try:
1335  slope, intercept = self._iterativeRegression(np.asarray(fluxList),
1336  xcorrList[:, j, i],
1337  fixThroughOrigin=True)
1338  msg = "(%s,%s):Slope of raw fit: %s, intercept: %s p value: %s" % (i, j, slopeRaw,
1339  interceptRaw, pVal)
1340  self.log.debug(msg)
1341  self.log.debug("(%s,%s):Slope of fixed fit: %s" % (i, j, slope))
1342 
1343  meanXcorr[i, j] = slope
1344  except ValueError:
1345  meanXcorr[i, j] = slopeRaw
1346 
1347  msg = f"i={i}, j={j}, slope = {slope:.6g}, slopeRaw = {slopeRaw:.6g}"
1348  self.log.debug(msg)
1349  self.log.info('Quad Fit meanXcorr[0,0] = %g, meanXcorr[1,0] = %g'%(meanXcorr[8, 8],
1350  meanXcorr[9, 8]))
1351 
1352  else:
1353  # Try to average over a set of possible inputs.
1354  # This generates a simple function of the kernel that
1355  # should be constant across the images, and averages that.
1356  xcorrList = []
1357  sctrl = afwMath.StatisticsControl()
1358  sctrl.setNumSigmaClip(self.config.nSigmaClipKernelGen)
1359 
1360  for corrNum, ((mean1, mean2), corr) in enumerate(zip(means, corrs)):
1361  corr[0, 0] -= (mean1 + mean2)
1362  if corr[0, 0] > 0:
1363  self.log.warn('Skipped item %s due to unexpected value of (variance-mean)' % corrNum)
1364  continue
1365  corr /= -1.0*(mean1**2 + mean2**2)
1366 
1367  fullCorr = self._tileArray(corr)
1368 
1369  xcorrCheck = np.abs(np.sum(fullCorr))/np.sum(np.abs(fullCorr))
1370  if xcorrCheck > rejectLevel:
1371  self.log.warn("Sum of the xcorr is unexpectedly high. Investigate item num %s for %s. \n"
1372  "value = %s" % (corrNum, objId, xcorrCheck))
1373  continue
1374  xcorrList.append(fullCorr)
1375 
1376  if not xcorrList:
1377  raise RuntimeError("Cannot generate kernel because all inputs were discarded. "
1378  "Either the data is bad, or config.xcorrCheckRejectLevel is too low")
1379 
1380  # stack the individual xcorrs and apply a per-pixel clipped-mean
1381  meanXcorr = np.zeros_like(fullCorr)
1382  xcorrList = np.transpose(xcorrList)
1383  for i in range(np.shape(meanXcorr)[0]):
1384  for j in range(np.shape(meanXcorr)[1]):
1385  meanXcorr[i, j] = afwMath.makeStatistics(xcorrList[i, j],
1386  afwMath.MEANCLIP, sctrl).getValue()
1387 
1388  if self.config.correlationModelRadius < (meanXcorr.shape[0] - 1) / 2:
1389  sumToInfinity = self._buildCorrelationModel(meanXcorr, self.config.correlationModelRadius,
1390  self.config.correlationModelSlope)
1391  self.log.info("SumToInfinity = %s" % sumToInfinity)
1392  else:
1393  sumToInfinity = 0.0
1394  if self.config.forceZeroSum:
1395  self.log.info("Forcing sum of correlation matrix to zero")
1396  meanXcorr = self._forceZeroSum(meanXcorr, sumToInfinity)
1397 
1398  return meanXcorr, self.successiveOverRelax(meanXcorr)
1399 
1400  def successiveOverRelax(self, source, maxIter=None, eLevel=None):
1401  """An implementation of the successive over relaxation (SOR) method.
1402 
1403  A numerical method for solving a system of linear equations
1404  with faster convergence than the Gauss-Seidel method.
1405 
1406  Parameters:
1407  -----------
1408  source : `numpy.ndarray`
1409  The input array.
1410  maxIter : `int`, optional
1411  Maximum number of iterations to attempt before aborting.
1412  eLevel : `float`, optional
1413  The target error level at which we deem convergence to have
1414  occurred.
1415 
1416  Returns:
1417  --------
1418  output : `numpy.ndarray`
1419  The solution.
1420  """
1421  if not maxIter:
1422  maxIter = self.config.maxIterSuccessiveOverRelaxation
1423  if not eLevel:
1424  eLevel = self.config.eLevelSuccessiveOverRelaxation
1425 
1426  assert source.shape[0] == source.shape[1], "Input array must be square"
1427  # initialize, and set boundary conditions
1428  func = np.zeros([source.shape[0] + 2, source.shape[1] + 2])
1429  resid = np.zeros([source.shape[0] + 2, source.shape[1] + 2])
1430  rhoSpe = np.cos(np.pi/source.shape[0]) # Here a square grid is assumed
1431 
1432  # Calculate the initial error
1433  for i in range(1, func.shape[0] - 1):
1434  for j in range(1, func.shape[1] - 1):
1435  resid[i, j] = (func[i, j - 1] + func[i, j + 1] + func[i - 1, j] +
1436  func[i + 1, j] - 4*func[i, j] - source[i - 1, j - 1])
1437  inError = np.sum(np.abs(resid))
1438 
1439  # Iterate until convergence
1440  # We perform two sweeps per cycle,
1441  # updating 'odd' and 'even' points separately
1442  nIter = 0
1443  omega = 1.0
1444  dx = 1.0
1445  while nIter < maxIter*2:
1446  outError = 0
1447  if nIter%2 == 0:
1448  for i in range(1, func.shape[0] - 1, 2):
1449  for j in range(1, func.shape[1] - 1, 2):
1450  resid[i, j] = float(func[i, j-1] + func[i, j + 1] + func[i - 1, j] +
1451  func[i + 1, j] - 4.0*func[i, j] - dx*dx*source[i - 1, j - 1])
1452  func[i, j] += omega*resid[i, j]*.25
1453  for i in range(2, func.shape[0] - 1, 2):
1454  for j in range(2, func.shape[1] - 1, 2):
1455  resid[i, j] = float(func[i, j - 1] + func[i, j + 1] + func[i - 1, j] +
1456  func[i + 1, j] - 4.0*func[i, j] - dx*dx*source[i - 1, j - 1])
1457  func[i, j] += omega*resid[i, j]*.25
1458  else:
1459  for i in range(1, func.shape[0] - 1, 2):
1460  for j in range(2, func.shape[1] - 1, 2):
1461  resid[i, j] = float(func[i, j - 1] + func[i, j + 1] + func[i - 1, j] +
1462  func[i + 1, j] - 4.0*func[i, j] - dx*dx*source[i - 1, j - 1])
1463  func[i, j] += omega*resid[i, j]*.25
1464  for i in range(2, func.shape[0] - 1, 2):
1465  for j in range(1, func.shape[1] - 1, 2):
1466  resid[i, j] = float(func[i, j - 1] + func[i, j + 1] + func[i - 1, j] +
1467  func[i + 1, j] - 4.0*func[i, j] - dx*dx*source[i - 1, j - 1])
1468  func[i, j] += omega*resid[i, j]*.25
1469  outError = np.sum(np.abs(resid))
1470  if outError < inError*eLevel:
1471  break
1472  if nIter == 0:
1473  omega = 1.0/(1 - rhoSpe*rhoSpe/2.0)
1474  else:
1475  omega = 1.0/(1 - rhoSpe*rhoSpe*omega/4.0)
1476  nIter += 1
1477 
1478  if nIter >= maxIter*2:
1479  self.log.warn("Failure: SuccessiveOverRelaxation did not converge in %s iterations."
1480  "\noutError: %s, inError: %s," % (nIter//2, outError, inError*eLevel))
1481  else:
1482  self.log.info("Success: SuccessiveOverRelaxation converged in %s iterations."
1483  "\noutError: %s, inError: %s", nIter//2, outError, inError*eLevel)
1484  return func[1: -1, 1: -1]
1485 
1486  @staticmethod
1487  def _tileArray(in_array):
1488  """Given an input quarter-image, tile/mirror it and return full image.
1489 
1490  Given a square input of side-length n, of the form
1491 
1492  input = array([[1, 2, 3],
1493  [4, 5, 6],
1494  [7, 8, 9]])
1495 
1496  return an array of size 2n-1 as
1497 
1498  output = array([[ 9, 8, 7, 8, 9],
1499  [ 6, 5, 4, 5, 6],
1500  [ 3, 2, 1, 2, 3],
1501  [ 6, 5, 4, 5, 6],
1502  [ 9, 8, 7, 8, 9]])
1503 
1504  Parameters:
1505  -----------
1506  input : `np.array`
1507  The square input quarter-array
1508 
1509  Returns:
1510  --------
1511  output : `np.array`
1512  The full, tiled array
1513  """
1514  assert(in_array.shape[0] == in_array.shape[1])
1515  length = in_array.shape[0] - 1
1516  output = np.zeros((2*length + 1, 2*length + 1))
1517 
1518  for i in range(length + 1):
1519  for j in range(length + 1):
1520  output[i + length, j + length] = in_array[i, j]
1521  output[-i + length, j + length] = in_array[i, j]
1522  output[i + length, -j + length] = in_array[i, j]
1523  output[-i + length, -j + length] = in_array[i, j]
1524  return output
1525 
1526  @staticmethod
1527  def _forceZeroSum(inputArray, sumToInfinity):
1528  """Given an array of correlations, adjust the
1529  central value to force the sum of the array to be zero.
1530 
1531  Parameters:
1532  -----------
1533  input : `np.array`
1534  The square input array, assumed square and with
1535  shape (2n+1) x (2n+1)
1536 
1537  Returns:
1538  --------
1539  output : `np.array`
1540  The same array, with the value of the central value
1541  inputArray[n,n] adjusted to force the array sum to be zero.
1542  """
1543  assert(inputArray.shape[0] == inputArray.shape[1])
1544  assert(inputArray.shape[0] % 2 == 1)
1545  center = int((inputArray.shape[0] - 1) / 2)
1546  outputArray = np.copy(inputArray)
1547  outputArray[center, center] -= inputArray.sum() - sumToInfinity
1548  return outputArray
1549 
1550  @staticmethod
1551  def _buildCorrelationModel(array, replacementRadius, slope):
1552  """Given an array of correlations, build a model
1553  for correlations beyond replacementRadius pixels from the center
1554  and replace the measured values with the model.
1555 
1556  Parameters:
1557  -----------
1558  input : `np.array`
1559  The square input array, assumed square and with
1560  shape (2n+1) x (2n+1)
1561 
1562  Returns:
1563  --------
1564  output : `np.array`
1565  The same array, with the outer values
1566  replaced with a smoothed model.
1567  """
1568  assert(array.shape[0] == array.shape[1])
1569  assert(array.shape[0] % 2 == 1)
1570  assert(replacementRadius > 1)
1571  center = int((array.shape[0] - 1) / 2)
1572  # First we check if either the [0,1] or [1,0] correlation is positive.
1573  # If so, the data is seriously messed up. This has happened in some bad amplifiers.
1574  # In this case, we just return the input array unchanged.
1575  if (array[center, center + 1] >= 0.0) or (array[center + 1, center] >= 0.0):
1576  return 0.0
1577 
1578  intercept = (np.log10(-array[center, center + 1]) + np.log10(-array[center + 1, center])) / 2.0
1579  preFactor = 10**intercept
1580  slopeFactor = 2.0*abs(slope) - 2.0
1581  sumToInfinity = 2.0*np.pi*preFactor / (slopeFactor*(float(center)+0.5)**slopeFactor)
1582  # sum_to_ininity is the integral of the model beyond what is measured.
1583  # It is used to adjust C00 in the case of forcing zero sum
1584 
1585  # Now replace the pixels beyond replacementRadius with the model values
1586  for i in range(array.shape[0]):
1587  for j in range(array.shape[1]):
1588  r2 = float((i-center)*(i-center) + (j-center)*(j-center))
1589  if abs(i-center) < replacementRadius and abs(j-center) < replacementRadius:
1590  continue
1591  else:
1592  newCvalue = -preFactor * r2**slope
1593  array[i, j] = newCvalue
1594  return sumToInfinity
1595 
1596  @staticmethod
1597  def _convertImagelikeToFloatImage(imagelikeObject):
1598  """Turn an exposure or masked image of any type into an ImageF."""
1599  for attr in ("getMaskedImage", "getImage"):
1600  if hasattr(imagelikeObject, attr):
1601  imagelikeObject = getattr(imagelikeObject, attr)()
1602  try:
1603  floatImage = imagelikeObject.convertF()
1604  except AttributeError:
1605  raise RuntimeError("Failed to convert image to float")
1606  return floatImage
1607 
1608 
1609 def calcBiasCorr(fluxLevels, imageShape, repeats=1, seed=0, addCorrelations=False,
1610  correlationStrength=0.1, maxLag=10, nSigmaClip=5, border=10, logger=None):
1611  """Calculate the bias induced when sigma-clipping non-Gaussian distributions.
1612 
1613  Fill image-pairs of the specified size with Poisson-distributed values,
1614  adding correlations as necessary. Then calculate the cross correlation,
1615  and calculate the bias induced using the cross-correlation image
1616  and the image means.
1617 
1618  Parameters:
1619  -----------
1620  fluxLevels : `list` of `int`
1621  The mean flux levels at which to simulate.
1622  Nominal values might be something like [70000, 90000, 110000]
1623  imageShape : `tuple` of `int`
1624  The shape of the image array to simulate, nx by ny pixels.
1625  repeats : `int`, optional
1626  Number of repeats to perform so that results
1627  can be averaged to improve SNR.
1628  seed : `int`, optional
1629  The random seed to use for the Poisson points.
1630  addCorrelations : `bool`, optional
1631  Whether to add brighter-fatter-like correlations to the simulated images
1632  If true, a correlation between x_{i,j} and x_{i+1,j+1} is introduced
1633  by adding a*x_{i,j} to x_{i+1,j+1}
1634  correlationStrength : `float`, optional
1635  The strength of the correlations.
1636  This is the value of the coefficient `a` in the above definition.
1637  maxLag : `int`, optional
1638  The maximum lag to work to in pixels
1639  nSigmaClip : `float`, optional
1640  Number of sigma to clip to when calculating the sigma-clipped mean.
1641  border : `int`, optional
1642  Number of border pixels to mask
1643  logger : `lsst.log.Log`, optional
1644  Logger to use. Instantiated anew if not provided.
1645 
1646  Returns:
1647  --------
1648  biases : `dict` [`float`, `list` of `float`]
1649  A dictionary, keyed by flux level, containing a list of the biases
1650  for each repeat at that flux level
1651  means : `dict` [`float`, `list` of `float`]
1652  A dictionary, keyed by flux level, containing a list of the average
1653  mean fluxes (average of the mean of the two images)
1654  for the image pairs at that flux level
1655  xcorrs : `dict` [`float`, `list` of `np.ndarray`]
1656  A dictionary, keyed by flux level, containing a list of the xcorr
1657  images for the image pairs at that flux level
1658  """
1659  if logger is None:
1660  logger = lsstLog.Log.getDefaultLogger()
1661 
1662  means = {f: [] for f in fluxLevels}
1663  xcorrs = {f: [] for f in fluxLevels}
1664  biases = {f: [] for f in fluxLevels}
1665 
1667  config.isrMandatorySteps = [] # no isr but the validation routine is still run
1668  config.isrForbiddenSteps = []
1669  config.nSigmaClipXCorr = nSigmaClip
1670  config.nPixBorderXCorr = border
1671  config.maxLag = maxLag
1672  task = MakeBrighterFatterKernelTask(config=config)
1673 
1674  im0 = afwImage.maskedImage.MaskedImageF(imageShape[1], imageShape[0])
1675  im1 = afwImage.maskedImage.MaskedImageF(imageShape[1], imageShape[0])
1676 
1677  random = np.random.RandomState(seed)
1678 
1679  for rep in range(repeats):
1680  for flux in fluxLevels:
1681  data0 = random.poisson(flux, (imageShape)).astype(float)
1682  data1 = random.poisson(flux, (imageShape)).astype(float)
1683  if addCorrelations:
1684  data0[1:, 1:] += correlationStrength*data0[: -1, : -1]
1685  data1[1:, 1:] += correlationStrength*data1[: -1, : -1]
1686  im0.image.array[:, :] = data0
1687  im1.image.array[:, :] = data1
1688 
1689  _xcorr, _means = task._crossCorrelate(im0, im1, runningBiasCorrSim=True)
1690 
1691  means[flux].append(_means)
1692  xcorrs[flux].append(_xcorr)
1693  if addCorrelations:
1694  bias = xcorrs[flux][-1][1, 1]/means[flux][-1]*(1 + correlationStrength)/correlationStrength
1695  msg = f"Simulated/expected avg. flux: {flux:.1f}, {(means[flux][-1]/2):.1f}"
1696  logger.info(msg)
1697  logger.info(f"Bias: {bias:.6f}")
1698  else:
1699  bias = xcorrs[flux][-1][0, 0]/means[flux][-1]
1700  msg = f"Simulated/expected avg. flux: {flux:.1f}, {(means[flux][-1]/2):.1f}"
1701  logger.info(msg)
1702  logger.info(f"Bias: {bias:.6f}")
1703  biases[flux].append(bias)
1704 
1705  return biases, means, xcorrs
lsst::afw::image
Backwards-compatibility support for depersisting the old Calib (FluxMag0/FluxMag0Err) objects.
Definition: imageAlgorithm.dox:1
lsst::log.log.logContinued.warn
def warn(fmt, *args)
Definition: logContinued.py:202
lsst::log.log.logContinued.info
def info(fmt, *args)
Definition: logContinued.py:198
lsst.cp.pipe.makeBrighterFatterKernel.MakeBrighterFatterKernelTask.disp1
disp1
Definition: makeBrighterFatterKernel.py:388
lsst.cp.pipe.makeBrighterFatterKernel.MakeBrighterFatterKernelTask._tileArray
def _tileArray(in_array)
Definition: makeBrighterFatterKernel.py:1487
lsst.cp.pipe.makeBrighterFatterKernel.MakeBrighterFatterKernelTask.generateKernel
def generateKernel(self, corrs, means, objId, rejectLevel=None)
Definition: makeBrighterFatterKernel.py:1263
lsst.cp.pipe.makeBrighterFatterKernel.BrighterFatterKernelTaskDataIdContainer
Definition: makeBrighterFatterKernel.py:218
lsst.cp.pipe.ptc.PhotonTransferCurveDataset
Definition: ptc.py:204
lsst.cp.pipe.makeBrighterFatterKernel.MakeBrighterFatterKernelTask._crossCorrelate
def _crossCorrelate(self, maskedIm0, maskedIm1, runningBiasCorrSim=False, frameId=None, detId=None)
Definition: makeBrighterFatterKernel.py:778
lsst.cp.pipe.makeBrighterFatterKernel.MakeBrighterFatterKernelTaskConfig
Definition: makeBrighterFatterKernel.py:52
lsst::sphgeom::abs
Angle abs(Angle const &a)
Definition: Angle.h:106
lsst.cp.pipe.makeBrighterFatterKernel.MakeBrighterFatterKernelTask.disp2
disp2
Definition: makeBrighterFatterKernel.py:389
lsst::afw::math::makeBackground
std::shared_ptr< Background > makeBackground(ImageT const &img, BackgroundControl const &bgCtrl)
A convenience function that uses function overloading to make the correct type of Background.
Definition: Background.h:552
pex.config.history.format
def format(config, name=None, writeSourceLine=True, prefix="", verbose=False)
Definition: history.py:174
lsst.cp.pipe.makeBrighterFatterKernel.MakeBrighterFatterKernelTask._buildCorrelationModel
def _buildCorrelationModel(array, replacementRadius, slope)
Definition: makeBrighterFatterKernel.py:1551
ast::append
std::shared_ptr< FrameSet > append(FrameSet const &first, FrameSet const &second)
Construct a FrameSet that performs two transformations in series.
Definition: functional.cc:33
lsst::afw.display
Definition: __init__.py:1
lsst.cp.pipe.makeBrighterFatterKernel.BrighterFatterGain
Definition: makeBrighterFatterKernel.py:317
lsst::afw::math::makeStatistics
Statistics makeStatistics(lsst::afw::math::MaskedVector< EntryT > const &mv, std::vector< WeightPixel > const &vweights, int const flags, StatisticsControl const &sctrl=StatisticsControl())
The makeStatistics() overload to handle lsst::afw::math::MaskedVector<>
Definition: Statistics.h:520
astshim.keyMap.keyMapContinued.keys
def keys(self)
Definition: keyMapContinued.py:6
lsst.cp.pipe.makeBrighterFatterKernel.BrighterFatterKernel.__init__
def __init__(self, originalLevel, **kwargs)
Definition: makeBrighterFatterKernel.py:270
lsst::afw.display.ds9.mtv
def mtv(data, frame=None, title="", wcs=None, *args, **kwargs)
Definition: ds9.py:93
lsst.cp.pipe.ptc.MeasurePhotonTransferCurveTaskConfig
Definition: ptc.py:48
lsst::daf::persistence.butlerExceptions
Definition: butlerExceptions.py:1
lsst.cp.pipe.makeBrighterFatterKernel.MakeBrighterFatterKernelTask.__init__
def __init__(self, *args, **kwargs)
Definition: makeBrighterFatterKernel.py:373
lsst.cp.pipe.makeBrighterFatterKernel.MakeBrighterFatterKernelTask._applyGains
def _applyGains(self, means, xcorrs, ptcData)
Definition: makeBrighterFatterKernel.py:651
lsst.cp.pipe.makeBrighterFatterKernel.MakeBrighterFatterKernelTask.validateIsrConfig
def validateIsrConfig(self)
Definition: makeBrighterFatterKernel.py:415
lsst.cp.pipe.makeBrighterFatterKernel.MakeBrighterFatterKernelTask.debug
debug
Definition: makeBrighterFatterKernel.py:377
lsst.cp.pipe.ptc
Definition: ptc.py:1
lsst.cp.pipe.makeBrighterFatterKernel.MakeBrighterFatterKernelTask._DefaultName
string _DefaultName
Definition: makeBrighterFatterKernel.py:371
lsstDebug.Info
Definition: lsstDebug.py:28
lsst.cp.pipe.makeBrighterFatterKernel.BrighterFatterKernel.__setattr__
def __setattr__(self, attribute, value)
Definition: makeBrighterFatterKernel.py:289
lsst::log
Definition: Log.h:706
lsst.cp.pipe.makeBrighterFatterKernel.BrighterFatterKernel.replaceDetectorKernelWithAmpKernel
def replaceDetectorKernelWithAmpKernel(self, ampName, detectorName)
Definition: makeBrighterFatterKernel.py:296
lsst.cp.pipe.makeBrighterFatterKernel.MakeBrighterFatterKernelTask.runDataRef
def runDataRef(self, dataRef, visitPairs)
Definition: makeBrighterFatterKernel.py:453
lsst.cp.pipe.makeBrighterFatterKernel.calcBiasCorr
def calcBiasCorr(fluxLevels, imageShape, repeats=1, seed=0, addCorrelations=False, correlationStrength=0.1, maxLag=10, nSigmaClip=5, border=10, logger=None)
Definition: makeBrighterFatterKernel.py:1609
lsst::afw::math::BackgroundControl
Pass parameters to a Background object.
Definition: Background.h:56
lsst.cp.pipe.makeBrighterFatterKernel.MakeBrighterFatterKernelTask._calcMeansAndVars
def _calcMeansAndVars(self, dataRef, v1, v2)
Definition: makeBrighterFatterKernel.py:1014
lsst.cp.pipe.utils.checkExpLengthEqual
def checkExpLengthEqual(exp1, exp2, v1=None, v2=None, raiseWithMessage=False)
Definition: utils.py:162
lsst.cp.pipe.ptc.MeasurePhotonTransferCurveTask
Definition: ptc.py:285
lsst::afw::math::StatisticsControl
Pass parameters to a Statistics object.
Definition: Statistics.h:93
lsst.cp.pipe.makeBrighterFatterKernel.MakeBrighterFatterKernelTask.successiveOverRelax
def successiveOverRelax(self, source, maxIter=None, eLevel=None)
Definition: makeBrighterFatterKernel.py:1400
list
daf::base::PropertyList * list
Definition: fits.cc:913
lsst.cp.pipe.makeBrighterFatterKernel.MakeBrighterFatterKernelTask._iterativeRegression
def _iterativeRegression(self, x, y, fixThroughOrigin=False, nSigmaClip=None, maxIter=None)
Definition: makeBrighterFatterKernel.py:1186
lsst::ip::isr
Definition: applyLookupTable.h:34
lsst::afw::math
Definition: statistics.dox:6
lsst.cp.pipe.makeBrighterFatterKernel.MakeBrighterFatterKernelTask.estimateGains
def estimateGains(self, dataRef, visitPairs)
Definition: makeBrighterFatterKernel.py:892
lsst.cp.pipe.makeBrighterFatterKernel.MakeBrighterFatterKernelTask._convertImagelikeToFloatImage
def _convertImagelikeToFloatImage(imagelikeObject)
Definition: makeBrighterFatterKernel.py:1597
lsst.cp.pipe.makeBrighterFatterKernel.BrighterFatterKernel.makeDetectorKernelFromAmpwiseKernels
def makeDetectorKernelFromAmpwiseKernels(self, detectorName, ampsToExclude=[], overwrite=False)
Definition: makeBrighterFatterKernel.py:299
lsst.cp.pipe.makeBrighterFatterKernel.MakeBrighterFatterKernelTask._makeCroppedExposures
def _makeCroppedExposures(self, exp, gains, level)
Definition: makeBrighterFatterKernel.py:694
lsst.pipe.base
Definition: __init__.py:1
lsst::afw.display.ds9.erase
def erase(frame=None)
Definition: ds9.py:97
lsst.cp.pipe.makeBrighterFatterKernel.MakeBrighterFatterKernelTask
Definition: makeBrighterFatterKernel.py:323
lsst::afw::image.slicing.clone
clone
Definition: slicing.py:257
set
daf::base::PropertySet * set
Definition: fits.cc:912
lsst.cp.pipe.makeBrighterFatterKernel.BrighterFatterKernelTaskDataIdContainer.makeDataRefList
def makeDataRefList(self, namespace)
Definition: makeBrighterFatterKernel.py:221
lsst.cp.pipe.makeBrighterFatterKernel.BrighterFatterKernel
Definition: makeBrighterFatterKernel.py:253
pex.config.wrap.validate
validate
Definition: wrap.py:295
lsst.cp.pipe.makeBrighterFatterKernel.MakeBrighterFatterKernelTask._forceZeroSum
def _forceZeroSum(inputArray, sumToInfinity)
Definition: makeBrighterFatterKernel.py:1527