LSST Applications  21.0.0+04719a4bac,21.0.0-1-ga51b5d4+f5e6047307,21.0.0-11-g2b59f77+a9c1acf22d,21.0.0-11-ga42c5b2+86977b0b17,21.0.0-12-gf4ce030+76814010d2,21.0.0-13-g1721dae+760e7a6536,21.0.0-13-g3a573fe+768d78a30a,21.0.0-15-g5a7caf0+f21cbc5713,21.0.0-16-g0fb55c1+b60e2d390c,21.0.0-19-g4cded4ca+71a93a33c0,21.0.0-2-g103fe59+bb20972958,21.0.0-2-g45278ab+04719a4bac,21.0.0-2-g5242d73+3ad5d60fb1,21.0.0-2-g7f82c8f+8babb168e8,21.0.0-2-g8f08a60+06509c8b61,21.0.0-2-g8faa9b5+616205b9df,21.0.0-2-ga326454+8babb168e8,21.0.0-2-gde069b7+5e4aea9c2f,21.0.0-2-gecfae73+1d3a86e577,21.0.0-2-gfc62afb+3ad5d60fb1,21.0.0-25-g1d57be3cd+e73869a214,21.0.0-3-g357aad2+ed88757d29,21.0.0-3-g4a4ce7f+3ad5d60fb1,21.0.0-3-g4be5c26+3ad5d60fb1,21.0.0-3-g65f322c+e0b24896a3,21.0.0-3-g7d9da8d+616205b9df,21.0.0-3-ge02ed75+a9c1acf22d,21.0.0-4-g591bb35+a9c1acf22d,21.0.0-4-g65b4814+b60e2d390c,21.0.0-4-gccdca77+0de219a2bc,21.0.0-4-ge8a399c+6c55c39e83,21.0.0-5-gd00fb1e+05fce91b99,21.0.0-6-gc675373+3ad5d60fb1,21.0.0-64-g1122c245+4fb2b8f86e,21.0.0-7-g04766d7+cd19d05db2,21.0.0-7-gdf92d54+04719a4bac,21.0.0-8-g5674e7b+d1bd76f71f,master-gac4afde19b+a9c1acf22d,w.2021.13
LSST Data Management Base Package
makeBrighterFatterKernel.py
Go to the documentation of this file.
1 # This file is part of cp_pipe.
2 #
3 # Developed for the LSST Data Management System.
4 # This product includes software developed by the LSST Project
5 # (https://www.lsst.org).
6 # See the COPYRIGHT file at the top-level directory of this distribution
7 # for details of code ownership.
8 #
9 # This program is free software: you can redistribute it and/or modify
10 # it under the terms of the GNU General Public License as published by
11 # the Free Software Foundation, either version 3 of the License, or
12 # (at your option) any later version.
13 #
14 # This program is distributed in the hope that it will be useful,
15 # but WITHOUT ANY WARRANTY; without even the implied warranty of
16 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 # GNU General Public License for more details.
18 #
19 # You should have received a copy of the GNU General Public License
20 # along with this program. If not, see <https://www.gnu.org/licenses/>.
21 #
22 """Calculation of brighter-fatter effect correlations and kernels."""
23 
24 __all__ = ['MakeBrighterFatterKernelTaskConfig',
25  'MakeBrighterFatterKernelTask',
26  'calcBiasCorr']
27 
28 import os
29 import copy
30 from scipy import stats
31 import numpy as np
32 import matplotlib.pyplot as plt
33 # the following import is required for 3d projection
34 from mpl_toolkits.mplot3d import axes3d # noqa: F401
35 from matplotlib.backends.backend_pdf import PdfPages
36 from dataclasses import dataclass
37 
38 import lsstDebug
39 import lsst.afw.image as afwImage
40 import lsst.afw.math as afwMath
41 import lsst.afw.display as afwDisp
42 from lsst.ip.isr import IsrTask
43 import lsst.log as lsstLog
44 import lsst.pex.config as pexConfig
45 import lsst.pipe.base as pipeBase
46 from .utils import PairedVisitListTaskRunner, checkExpLengthEqual
47 import lsst.daf.persistence.butlerExceptions as butlerExceptions
48 from lsst.cp.pipe.ptc.measurePtcGen2Task import (MeasurePhotonTransferCurveTaskConfig,
49  MeasurePhotonTransferCurveTask)
50 from lsst.ip.isr import PhotonTransferCurveDataset
51 
52 
53 class MakeBrighterFatterKernelTaskConfig(pexConfig.Config):
54  """Config class for bright-fatter effect coefficient calculation."""
55 
56  isr = pexConfig.ConfigurableField(
57  target=IsrTask,
58  doc="""Task to perform instrumental signature removal""",
59  )
60  isrMandatorySteps = pexConfig.ListField(
61  dtype=str,
62  doc="isr operations that must be performed for valid results. Raises if any of these are False",
63  default=['doAssembleCcd']
64  )
65  isrForbiddenSteps = pexConfig.ListField(
66  dtype=str,
67  doc="isr operations that must NOT be performed for valid results. Raises if any of these are True",
68  default=['doFlat', 'doFringe', 'doBrighterFatter', 'doUseOpticsTransmission',
69  'doUseFilterTransmission', 'doUseSensorTransmission', 'doUseAtmosphereTransmission']
70  )
71  isrDesirableSteps = pexConfig.ListField(
72  dtype=str,
73  doc="isr operations that it is advisable to perform, but are not mission-critical."
74  " WARNs are logged for any of these found to be False.",
75  default=['doBias', 'doDark', 'doCrosstalk', 'doDefect', 'doLinearize']
76  )
77  doCalcGains = pexConfig.Field(
78  dtype=bool,
79  doc="Measure the per-amplifier gains (using the photon transfer curve method)?",
80  default=True,
81  )
82  doPlotPtcs = pexConfig.Field(
83  dtype=bool,
84  doc="Plot the PTCs and butler.put() them as defined by the plotBrighterFatterPtc template",
85  default=False,
86  )
87  forceZeroSum = pexConfig.Field(
88  dtype=bool,
89  doc="Force the correlation matrix to have zero sum by adjusting the (0,0) value?",
90  default=False,
91  )
92  correlationQuadraticFit = pexConfig.Field(
93  dtype=bool,
94  doc="Use a quadratic fit to find the correlations instead of simple averaging?",
95  default=False,
96  )
97  correlationModelRadius = pexConfig.Field(
98  dtype=int,
99  doc="Build a model of the correlation coefficients for radii larger than this value in pixels?",
100  default=100,
101  )
102  correlationModelSlope = pexConfig.Field(
103  dtype=float,
104  doc="Slope of the correlation model for radii larger than correlationModelRadius",
105  default=-1.35,
106  )
107  ccdKey = pexConfig.Field(
108  dtype=str,
109  doc="The key by which to pull a detector from a dataId, e.g. 'ccd' or 'detector'",
110  default='ccd',
111  )
112  minMeanSignal = pexConfig.DictField(
113  keytype=str,
114  itemtype=float,
115  doc="Minimum values (inclusive) of mean signal (in ADU) above which to consider, per amp."
116  " The same cut is applied to all amps if this dictionary is of the form"
117  " {'ALL_AMPS': value}",
118  default={'ALL_AMPS': 0.0},
119  )
120  maxMeanSignal = pexConfig.DictField(
121  keytype=str,
122  itemtype=float,
123  doc="Maximum values (inclusive) of mean signal (in ADU) below which to consider, per amp."
124  " The same cut is applied to all amps if this dictionary is of the form"
125  " {'ALL_AMPS': value}",
126  default={'ALL_AMPS': 1e6},
127  )
128  maxIterRegression = pexConfig.Field(
129  dtype=int,
130  doc="Maximum number of iterations for the regression fitter",
131  default=2
132  )
133  nSigmaClipGainCalc = pexConfig.Field(
134  dtype=int,
135  doc="Number of sigma to clip the pixel value distribution to during gain calculation",
136  default=5
137  )
138  nSigmaClipRegression = pexConfig.Field(
139  dtype=int,
140  doc="Number of sigma to clip outliers from the line of best fit to during iterative regression",
141  default=4
142  )
143  xcorrCheckRejectLevel = pexConfig.Field(
144  dtype=float,
145  doc="Sanity check level for the sum of the input cross-correlations. Arrays which "
146  "sum to greater than this are discarded before the clipped mean is calculated.",
147  default=2.0
148  )
149  maxIterSuccessiveOverRelaxation = pexConfig.Field(
150  dtype=int,
151  doc="The maximum number of iterations allowed for the successive over-relaxation method",
152  default=10000
153  )
154  eLevelSuccessiveOverRelaxation = pexConfig.Field(
155  dtype=float,
156  doc="The target residual error for the successive over-relaxation method",
157  default=5.0e-14
158  )
159  nSigmaClipKernelGen = pexConfig.Field(
160  dtype=float,
161  doc="Number of sigma to clip to during pixel-wise clipping when generating the kernel. See "
162  "the generateKernel docstring for more info.",
163  default=4
164  )
165  nSigmaClipXCorr = pexConfig.Field(
166  dtype=float,
167  doc="Number of sigma to clip when calculating means for the cross-correlation",
168  default=5
169  )
170  maxLag = pexConfig.Field(
171  dtype=int,
172  doc="The maximum lag (in pixels) to use when calculating the cross-correlation/kernel",
173  default=8
174  )
175  nPixBorderGainCalc = pexConfig.Field(
176  dtype=int,
177  doc="The number of border pixels to exclude when calculating the gain",
178  default=10
179  )
180  nPixBorderXCorr = pexConfig.Field(
181  dtype=int,
182  doc="The number of border pixels to exclude when calculating the cross-correlation and kernel",
183  default=10
184  )
185  biasCorr = pexConfig.Field(
186  dtype=float,
187  doc="An empirically determined correction factor, used to correct for the sigma-clipping of"
188  " a non-Gaussian distribution. Post DM-15277, code will exist here to calculate appropriate values",
189  default=0.9241
190  )
191  backgroundBinSize = pexConfig.Field(
192  dtype=int,
193  doc="Size of the background bins",
194  default=128
195  )
196  fixPtcThroughOrigin = pexConfig.Field(
197  dtype=bool,
198  doc="Constrain the fit of the photon transfer curve to go through the origin when measuring"
199  "the gain?",
200  default=True
201  )
202  level = pexConfig.ChoiceField(
203  doc="The level at which to calculate the brighter-fatter kernels",
204  dtype=str,
205  default="DETECTOR",
206  allowed={
207  "AMP": "Every amplifier treated separately",
208  "DETECTOR": "One kernel per detector",
209  }
210  )
211  ignoreAmpsForAveraging = pexConfig.ListField(
212  dtype=str,
213  doc="List of amp names to ignore when averaging the amplifier kernels into the detector"
214  " kernel. Only relevant for level = AMP",
215  default=[]
216  )
217  backgroundWarnLevel = pexConfig.Field(
218  dtype=float,
219  doc="Log warnings if the mean of the fitted background is found to be above this level after "
220  "differencing image pair.",
221  default=0.1
222  )
223 
224 
225 class BrighterFatterKernelTaskDataIdContainer(pipeBase.DataIdContainer):
226  """A DataIdContainer for the MakeBrighterFatterKernelTask."""
227 
228  def makeDataRefList(self, namespace):
229  """Compute refList based on idList.
230 
231  This method must be defined as the dataset does not exist before this
232  task is run.
233 
234  Parameters
235  ----------
236  namespace
237  Results of parsing the command-line.
238 
239  Notes
240  -----
241  Not called if ``add_id_argument`` called
242  with ``doMakeDataRefList=False``.
243  Note that this is almost a copy-and-paste of the vanilla implementation,
244  but without checking if the datasets already exist,
245  as this task exists to make them.
246  """
247  if self.datasetType is None:
248  raise RuntimeError("Must call setDatasetType first")
249  butler = namespace.butler
250  for dataId in self.idList:
251  refList = list(butler.subset(datasetType=self.datasetType, level=self.level, dataId=dataId))
252  # exclude nonexistent data
253  # this is a recursive test, e.g. for the sake of "raw" data
254  if not refList:
255  namespace.log.warn("No data found for dataId=%s", dataId)
256  continue
257  self.refList += refList
258 
259 
261  """A simple class to hold the kernel(s) generated and the intermediate
262  data products.
263 
264  kernel.ampwiseKernels are the kernels for each amplifier in the detector,
265  as generated by having LEVEL == 'AMP'
266 
267  kernel.detectorKernel is the kernel generated for the detector as a whole,
268  as generated by having LEVEL == 'DETECTOR'
269 
270  kernel.detectorKernelFromAmpKernels is the kernel for the detector,
271  generated by averaging together the amps in the detector
272 
273  The originalLevel is the level for which the kernel(s) were generated,
274  i.e. the level at which the task was originally run.
275  """
276 
277  def __init__(self, originalLevel, **kwargs):
278  self.__dict__["originalLevel"] = originalLevel
279  self.__dict__["ampwiseKernels"] = {}
280  self.__dict__["detectorKernel"] = {}
281  self.__dict__["detectorKernelFromAmpKernels"] = {}
282  self.__dict__["means"] = []
283  self.__dict__["rawMeans"] = []
284  self.__dict__["rawXcorrs"] = []
285  self.__dict__["xCorrs"] = []
286  self.__dict__["meanXcorrs"] = []
287  self.__dict__["gain"] = None # will be a dict keyed by amp if set
288  self.__dict__["gainErr"] = None # will be a dict keyed by amp if set
289  self.__dict__["noise"] = None # will be a dict keyed by amp if set
290  self.__dict__["noiseErr"] = None # will be a dict keyed by amp if set
291 
292  for key, value in kwargs.items():
293  if hasattr(self, key):
294  setattr(self, key, value)
295 
296  def __setattr__(self, attribute, value):
297  """Protect class attributes"""
298  if attribute not in self.__dict__:
299  print(f"Cannot set {attribute}")
300  else:
301  self.__dict__[attribute] = value
302 
303  def replaceDetectorKernelWithAmpKernel(self, ampName, detectorName):
304  self.detectorKernel[detectorName] = self.ampwiseKernels[ampName]
305 
306  def makeDetectorKernelFromAmpwiseKernels(self, detectorName, ampsToExclude=[], overwrite=False):
307  if detectorName not in self.detectorKernelFromAmpKernels.keys():
308  self.detectorKernelFromAmpKernels[detectorName] = {}
309 
310  if self.detectorKernelFromAmpKernels[detectorName] != {} and overwrite is False:
311  raise RuntimeError('Was told to replace existing detector kernel with overwrite==False')
312 
313  ampNames = self.ampwiseKernels.keys()
314  ampsToAverage = [amp for amp in ampNames if amp not in ampsToExclude]
315  avgKernel = np.zeros_like(self.ampwiseKernels[ampsToAverage[0]])
316  for ampName in ampsToAverage:
317  avgKernel += self.ampwiseKernels[ampName]
318  avgKernel /= len(ampsToAverage)
319 
320  self.detectorKernelFromAmpKernels[detectorName] = avgKernel
321 
322 
323 @dataclass
325  """The gains and the results of the PTC fits."""
326  gains: dict
327  ptcResults: dict
328 
329 
330 class MakeBrighterFatterKernelTask(pipeBase.CmdLineTask):
331  """Brighter-fatter effect correction-kernel calculation task.
332 
333  A command line task for calculating the brighter-fatter correction
334  kernel from pairs of flat-field images (with the same exposure length).
335 
336  The following operations are performed:
337 
338  - The configurable isr task is called, which unpersists and assembles the
339  raw images, and performs the selected instrument signature removal tasks.
340  For the purpose of brighter-fatter coefficient calculation is it
341  essential that certain components of isr are *not* performed, and
342  recommended that certain others are. The task checks the selected isr
343  configuration before it is run, and if forbidden components have been
344  selected task will raise, and if recommended ones have not been selected,
345  warnings are logged.
346 
347  - The gain of the each amplifier in the detector is calculated using
348  the photon transfer curve (PTC) method and used to correct the images
349  so that all calculations are done in units of electrons, and so that the
350  level across amplifier boundaries is continuous.
351  Outliers in the PTC are iteratively rejected
352  before fitting, with the nSigma rejection level set by
353  config.nSigmaClipRegression. Individual pixels are ignored in the input
354  images the image based on config.nSigmaClipGainCalc.
355 
356  - Each image is then cross-correlated with the one it's paired with
357  (with the pairing defined by the --visit-pairs command line argument),
358  which is done either the whole-image to whole-image,
359  or amplifier-by-amplifier, depending on config.level.
360 
361  - Once the cross-correlations have been calculated for each visit pair,
362  these are used to generate the correction kernel.
363  The maximum lag used, in pixels, and hence the size of the half-size
364  of the kernel generated, is given by config.maxLag,
365  i.e. a value of 10 will result in a kernel of size 2n-1 = 19x19 pixels.
366  Outlier values in these cross-correlations are rejected by using a
367  pixel-wise sigma-clipped thresholding to each cross-correlation in
368  the visit-pairs-length stack of cross-correlations.
369  The number of sigma clipped to is set by config.nSigmaClipKernelGen.
370 
371  - Once DM-15277 has been completed, a method will exist to calculate the
372  empirical correction factor, config.biasCorr.
373  TODO: DM-15277 update this part of the docstring once the ticket is done.
374  """
375 
376  RunnerClass = PairedVisitListTaskRunner
377  ConfigClass = MakeBrighterFatterKernelTaskConfig
378  _DefaultName = "makeBrighterFatterKernel"
379 
380  def __init__(self, *args, **kwargs):
381  pipeBase.CmdLineTask.__init__(self, *args, **kwargs)
382  self.makeSubtask("isr")
383 
384  self.debugdebug = lsstDebug.Info(__name__)
385  if self.debugdebug.enabled:
386  self.log.info("Running with debug enabled...")
387  # If we're displaying, test it works and save displays for later.
388  # It's worth testing here as displays are flaky and sometimes
389  # can't be contacted, and given processing takes a while,
390  # it's a shame to fail late due to display issues.
391  if self.debugdebug.display:
392  try:
393  afwDisp.setDefaultBackend(self.debugdebug.displayBackend)
394  afwDisp.Display.delAllDisplays()
395  self.disp1disp1 = afwDisp.Display(0, open=True)
396  self.disp2disp2 = afwDisp.Display(1, open=True)
397 
398  im = afwImage.ImageF(1, 1)
399  im.array[:] = [[1]]
400  self.disp1disp1.mtv(im)
401  self.disp1disp1.erase()
402  except NameError:
403  self.debugdebug.display = False
404  self.log.warn('Failed to setup/connect to display! Debug display has been disabled')
405 
406  plt.interactive(False) # stop windows popping up when plotting. When headless, use 'agg' backend too
407  self.validateIsrConfigvalidateIsrConfig()
408  self.config.validate()
409  self.config.freeze()
410 
411  @classmethod
412  def _makeArgumentParser(cls):
413  """Augment argument parser for the MakeBrighterFatterKernelTask."""
414  parser = pipeBase.ArgumentParser(name=cls._DefaultName_DefaultName)
415  parser.add_argument("--visit-pairs", dest="visitPairs", nargs="*",
416  help="Visit pairs to use. Each pair must be of the form INT,INT e.g. 123,456")
417  parser.add_id_argument("--id", datasetType="brighterFatterKernel",
418  ContainerClass=BrighterFatterKernelTaskDataIdContainer,
419  help="The ccds to use, e.g. --id ccd=0..100")
420  return parser
421 
422  def validateIsrConfig(self):
423  """Check that appropriate ISR settings are being used
424  for brighter-fatter kernel calculation."""
425 
426  # How should we handle saturation/bad regions?
427  # 'doSaturationInterpolation': True
428  # 'doNanInterpAfterFlat': False
429  # 'doSaturation': True
430  # 'doSuspect': True
431  # 'doWidenSaturationTrails': True
432  # 'doSetBadRegions': True
433 
434  configDict = self.config.isr.toDict()
435 
436  for configParam in self.config.isrMandatorySteps:
437  if configDict[configParam] is False:
438  raise RuntimeError(f'Must set config.isr.{configParam} to True '
439  'for brighter-fatter kernel calculation')
440 
441  for configParam in self.config.isrForbiddenSteps:
442  if configDict[configParam] is True:
443  raise RuntimeError(f'Must set config.isr.{configParam} to False '
444  'for brighter-fatter kernel calculation')
445 
446  for configParam in self.config.isrDesirableSteps:
447  if configParam not in configDict:
448  self.log.info('Failed to find key %s in the isr config dict. You probably want '
449  'to set the equivalent for your obs_package to True.', configParam)
450  continue
451  if configDict[configParam] is False:
452  self.log.warn('Found config.isr.%s set to False for brighter-fatter kernel calculation. '
453  'It is probably desirable to have this set to True', configParam)
454 
455  # subtask settings
456  if not self.config.isr.assembleCcd.doTrim:
457  raise RuntimeError('Must trim when assembling CCDs. Set config.isr.assembleCcd.doTrim to True')
458 
459  @pipeBase.timeMethod
460  def runDataRef(self, dataRef, visitPairs):
461  """Run the brighter-fatter measurement task.
462 
463  For a dataRef (which is each detector here),
464  and given a list of visit pairs, calculate the
465  brighter-fatter kernel for the detector.
466 
467  Parameters
468  ----------
469  dataRef : `list` of `lsst.daf.persistence.ButlerDataRef`
470  dataRef for the detector for the visits to be fit.
471  visitPairs : `iterable` of `tuple` of `int`
472  Pairs of visit numbers to be processed together
473  """
474  np.random.seed(0) # used in the PTC fit bootstrap
475 
476  # setup necessary objects
477  # NB: don't use dataRef.get('raw_detector')
478  # this currently doesn't work for composites because of the way
479  # composite objects (i.e. LSST images) are handled/constructed
480  # these need to be retrieved from the camera and dereferenced
481  # rather than accessed directly
482  detNum = dataRef.dataId[self.config.ccdKey]
483  detector = dataRef.get('camera')[dataRef.dataId[self.config.ccdKey]]
484  amps = detector.getAmplifiers()
485  ampNames = [amp.getName() for amp in amps]
486 
487  if self.config.level == 'DETECTOR':
488  kernels = {detNum: []}
489  means = {detNum: []}
490  xcorrs = {detNum: []}
491  meanXcorrs = {detNum: []}
492  elif self.config.level == 'AMP':
493  kernels = {key: [] for key in ampNames}
494  means = {key: [] for key in ampNames}
495  xcorrs = {key: [] for key in ampNames}
496  meanXcorrs = {key: [] for key in ampNames}
497  else:
498  raise RuntimeError("Unsupported level: {}".format(self.config.level))
499 
500  # we must be able to get the gains one way or the other, so check early
501  if not self.config.doCalcGains:
502  deleteMe = None
503  try:
504  deleteMe = dataRef.get('photonTransferCurveDataset')
505  except butlerExceptions.NoResults:
506  try:
507  deleteMe = dataRef.get('brighterFatterGain')
508  except butlerExceptions.NoResults:
509  pass
510  if not deleteMe:
511  raise RuntimeError("doCalcGains == False and gains could not be got from butler") from None
512  else:
513  del deleteMe
514 
515  # if the level is DETECTOR we need to have the gains first so that each
516  # amp can be gain corrected in order to treat the detector as a single
517  # imaging area. However, if the level is AMP we can wait, calculate
518  # the correlations and correct for the gains afterwards
519  if self.config.level == 'DETECTOR':
520  if self.config.doCalcGains:
521  self.log.info('Computing gains for detector %s' % detNum)
522  gains, nomGains = self.estimateGainsestimateGains(dataRef, visitPairs)
523  dataRef.put(gains, datasetType='brighterFatterGain')
524  self.log.debug('Finished gain estimation for detector %s' % detNum)
525  else:
526  gains = dataRef.get('brighterFatterGain')
527  if not gains:
528  raise RuntimeError('Failed to retrieved gains for detector %s' % detNum)
529  self.log.info('Retrieved stored gain for detector %s' % detNum)
530  self.log.debug('Detector %s has gains %s' % (detNum, gains))
531  else: # we fake the gains as 1 for now, and correct later
532  gains = BrighterFatterGain({}, {})
533  for ampName in ampNames:
534  gains.gains[ampName] = 1.0
535  # We'll use the ptc.py code to calculate the gains, so we set this up
537  ptcConfig.isrForbiddenSteps = []
538  ptcConfig.doFitBootstrap = True
539  ptcConfig.ptcFitType = 'POLYNOMIAL' # default Astier doesn't work for gain correction
540  ptcConfig.polynomialFitDegree = 3
541  ptcConfig.minMeanSignal = self.config.minMeanSignal
542  ptcConfig.maxMeanSignal = self.config.maxMeanSignal
543  ptcTask = MeasurePhotonTransferCurveTask(config=ptcConfig)
544  ptcDataset = PhotonTransferCurveDataset(ampNames)
545 
546  # Loop over pairs of visits
547  # calculating the cross-correlations at the required level
548  for (v1, v2) in visitPairs:
549  dataRef.dataId['expId'] = v1
550  exp1 = self.isr.runDataRef(dataRef).exposure
551  dataRef.dataId['expId'] = v2
552  exp2 = self.isr.runDataRef(dataRef).exposure
553  del dataRef.dataId['expId']
554  checkExpLengthEqual(exp1, exp2, v1, v2, raiseWithMessage=True)
555 
556  self.log.info('Preparing images for cross-correlation calculation for detector %s' % detNum)
557  # note the shape of these returns depends on level
558  _scaledMaskedIms1, _means1 = self._makeCroppedExposures_makeCroppedExposures(exp1, gains, self.config.level)
559  _scaledMaskedIms2, _means2 = self._makeCroppedExposures_makeCroppedExposures(exp2, gains, self.config.level)
560 
561  # Compute the cross-correlation and means
562  # at the appropriate config.level:
563  # - "DETECTOR": one key, so compare the two visits to each other
564  # - "AMP": n_amp keys, comparing each amplifier of one visit
565  # to the same amplifier in the visit its paired with
566  for det_object in _scaledMaskedIms1.keys(): # det_object is ampName or detName depending on level
567  self.log.debug("Calculating correlations for %s" % det_object)
568  _xcorr, _mean = self._crossCorrelate_crossCorrelate(_scaledMaskedIms1[det_object],
569  _scaledMaskedIms2[det_object])
570  xcorrs[det_object].append(_xcorr)
571  means[det_object].append([_means1[det_object], _means2[det_object]])
572  if self.config.level != 'DETECTOR':
573  # Populate the ptcDataset for running fitting in the PTC task
574  expTime = exp1.getInfo().getVisitInfo().getExposureTime()
575  ptcDataset.rawExpTimes[det_object].append(expTime)
576  ptcDataset.rawMeans[det_object].append((_means1[det_object] + _means2[det_object]) / 2.0)
577  ptcDataset.rawVars[det_object].append(_xcorr[0, 0] / 2.0)
578 
579  # TODO: DM-15305 improve debug functionality here.
580  # This is position 1 for the removed code.
581 
582  # Save the raw means and xcorrs so we can look at them before any modifications
583  rawMeans = copy.deepcopy(means)
584  rawXcorrs = copy.deepcopy(xcorrs)
585 
586  # gains are always and only pre-applied for DETECTOR
587  # so for all other levels we now calculate them from the correlations
588  # and apply them
589  if self.config.level != 'DETECTOR':
590  if self.config.doCalcGains: # Run the PTC task for calculating the gains, put results
591  self.log.info('Calculating gains for detector %s using PTC task' % detNum)
592  ptcDataset = ptcTask.fitPtc(ptcDataset, ptcConfig.ptcFitType)
593  dataRef.put(ptcDataset, datasetType='photonTransferCurveDataset')
594  self.log.debug('Finished gain estimation for detector %s' % detNum)
595  else: # load results - confirmed to work much earlier on, so can be relied upon here
596  ptcDataset = dataRef.get('photonTransferCurveDataset')
597 
598  self._applyGains_applyGains(means, xcorrs, ptcDataset)
599 
600  if self.config.doPlotPtcs:
601  dirname = dataRef.getUri(datasetType='cpPipePlotRoot', write=True)
602  if not os.path.exists(dirname):
603  os.makedirs(dirname)
604  detNum = dataRef.dataId[self.config.ccdKey]
605  filename = f"PTC_det{detNum}.pdf"
606  filenameFull = os.path.join(dirname, filename)
607  with PdfPages(filenameFull) as pdfPages:
608  ptcTask._plotPtc(ptcDataset, ptcConfig.ptcFitType, pdfPages)
609 
610  # having calculated and applied the gains for all code-paths we can now
611  # generate the kernel(s)
612  self.log.info('Generating kernel(s) for %s' % detNum)
613  for det_object in xcorrs.keys(): # looping over either detectors or amps
614  if self.config.level == 'DETECTOR':
615  objId = 'detector %s' % det_object
616  elif self.config.level == 'AMP':
617  objId = 'detector %s AMP %s' % (detNum, det_object)
618 
619  try:
620  meanXcorr, kernel = self.generateKernelgenerateKernel(xcorrs[det_object], means[det_object], objId)
621  kernels[det_object] = kernel
622  meanXcorrs[det_object] = meanXcorr
623  except RuntimeError:
624  # bad amps will cause failures here which we want to ignore
625  self.log.warn('RuntimeError during kernel generation for %s' % objId)
626  continue
627 
628  bfKernel = BrighterFatterKernel(self.config.level)
629  bfKernel.means = means
630  bfKernel.rawMeans = rawMeans
631  bfKernel.rawXcorrs = rawXcorrs
632  bfKernel.xCorrs = xcorrs
633  bfKernel.meanXcorrs = meanXcorrs
634  bfKernel.originalLevel = self.config.level
635  try:
636  bfKernel.gain = ptcDataset.gain
637  bfKernel.gainErr = ptcDataset.gainErr
638  bfKernel.noise = ptcDataset.noise
639  bfKernel.noiseErr = ptcDataset.noiseErr
640  except NameError: # we don't have a ptcDataset to store results from
641  pass
642 
643  if self.config.level == 'AMP':
644  bfKernel.ampwiseKernels = kernels
645  ex = self.config.ignoreAmpsForAveraging
646  bfKernel.detectorKernel = bfKernel.makeDetectorKernelFromAmpwiseKernels(detNum, ampsToExclude=ex)
647 
648  elif self.config.level == 'DETECTOR':
649  bfKernel.detectorKernel = kernels
650  else:
651  raise RuntimeError('Invalid level for kernel calculation; this should not be possible.')
652 
653  dataRef.put(bfKernel)
654 
655  self.log.info('Finished generating kernel(s) for %s' % detNum)
656  return pipeBase.Struct(exitStatus=0)
657 
658  def _applyGains(self, means, xcorrs, ptcData):
659  """Apply the gains calculated by the PtcTask.
660 
661  It also removes datapoints that were thrown out in the PTC algorithm.
662 
663  Parameters
664  ----------
665  means : `dict` [`str`, `list` of `tuple`]
666  Dictionary, keyed by ampName, containing a list of the means for
667  each visit pair.
668 
669  xcorrs : `dict` [`str`, `list` of `np.array`]
670  Dictionary, keyed by ampName, containing a list of the
671  cross-correlations for each visit pair.
672 
673  ptcDataset : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset`
674  The results of running the ptcTask.
675  """
676  ampNames = means.keys()
677  assert set(xcorrs.keys()) == set(ampNames) == set(ptcData.ampNames)
678 
679  for ampName in ampNames:
680  mask = ptcData.expIdMask[ampName]
681  gain = ptcData.gain[ampName]
682 
683  fitType = ptcData.ptcFitType[ampName]
684  if fitType != 'POLYNOMIAL':
685  raise RuntimeError(f"Only polynomial fit types supported currently, found {fitType}")
686  ptcFitPars = ptcData.ptcFitPars[ampName]
687  # polynomial params go in ascending order, so this is safe w.r.t.
688  # the polynomial order, as the constant term is always first,
689  # the linear term second etc
690 
691  # Adjust xcorrs[0,0] to remove the linear gain part, leaving just the second order part
692  for i in range(len(means[ampName])):
693  ampMean = np.mean(means[ampName][i])
694  xcorrs[ampName][i][0, 0] -= 2.0 * (ampMean * ptcFitPars[1] + ptcFitPars[0])
695 
696  # Now adjust the means and xcorrs for the calculated gain and remove the bad indices
697  means[ampName] = [[value*gain for value in pair] for pair in np.array(means[ampName])[mask]]
698  xcorrs[ampName] = [arr*gain*gain for arr in np.array(xcorrs[ampName])[mask]]
699  return
700 
701  def _makeCroppedExposures(self, exp, gains, level):
702  """Prepare exposure for cross-correlation calculation.
703 
704  For each amp, crop by the border amount, specified by
705  config.nPixBorderXCorr, then rescale by the gain
706  and subtract the sigma-clipped mean.
707  If the level is 'DETECTOR' then this is done
708  to the whole image so that it can be cross-correlated, with a copy
709  being returned.
710  If the level is 'AMP' then this is done per-amplifier,
711  and a copy of each prepared amp-image returned.
712 
713  Parameters:
714  -----------
715  exp : `lsst.afw.image.exposure.ExposureF`
716  The exposure to prepare
717  gains : `lsst.cp.pipe.makeBrighterFatterKernel.BrighterFatterGain`
718  The object holding the amplifier gains, essentially a
719  dictionary of the amplifier gain values, keyed by amplifier name
720  level : `str`
721  Either `AMP` or `DETECTOR`
722 
723  Returns:
724  --------
725  scaledMaskedIms : `dict` [`str`, `lsst.afw.image.maskedImage.MaskedImageF`]
726  Depending on level, this is either one item, or n_amp items,
727  keyed by detectorId or ampName
728 
729  Notes:
730  ------
731  This function is controlled by the following config parameters:
732  nPixBorderXCorr : `int`
733  The number of border pixels to exclude
734  nSigmaClipXCorr : `float`
735  The number of sigma to be clipped to
736  """
737  assert(isinstance(exp, afwImage.ExposureF))
738 
739  local_exp = exp.clone() # we don't want to modify the image passed in
740  del exp # ensure we don't make mistakes!
741 
742  border = self.config.nPixBorderXCorr
743  sigma = self.config.nSigmaClipXCorr
744 
745  sctrl = afwMath.StatisticsControl()
746  sctrl.setNumSigmaClip(sigma)
747 
748  means = {}
749  returnAreas = {}
750 
751  detector = local_exp.getDetector()
752  amps = detector.getAmplifiers()
753 
754  mi = local_exp.getMaskedImage() # makeStatistics does not seem to take exposures
755  temp = mi.clone()
756 
757  # Rescale each amp by the appropriate gain and subtract the mean.
758  # NB these are views modifying the image in-place
759  for amp in amps:
760  ampName = amp.getName()
761  rescaleIm = mi[amp.getBBox()] # the soon-to-be scaled, mean subtracted, amp image
762  rescaleTemp = temp[amp.getBBox()]
763  mean = afwMath.makeStatistics(rescaleIm, afwMath.MEANCLIP, sctrl).getValue()
764  gain = gains.gains[ampName]
765  rescaleIm *= gain
766  rescaleTemp *= gain
767  self.log.debug("mean*gain = %s, clipped mean = %s" %
768  (mean*gain, afwMath.makeStatistics(rescaleIm, afwMath.MEANCLIP,
769  sctrl).getValue()))
770  rescaleIm -= mean*gain
771 
772  if level == 'AMP': # build the dicts if doing amp-wise
773  means[ampName] = afwMath.makeStatistics(rescaleTemp[border: -border, border: -border,
774  afwImage.LOCAL], afwMath.MEANCLIP, sctrl).getValue()
775  returnAreas[ampName] = rescaleIm
776 
777  if level == 'DETECTOR': # else just average the whole detector
778  detName = local_exp.getDetector().getId()
779  means[detName] = afwMath.makeStatistics(temp[border: -border, border: -border, afwImage.LOCAL],
780  afwMath.MEANCLIP, sctrl).getValue()
781  returnAreas[detName] = rescaleIm
782 
783  return returnAreas, means
784 
785  def _crossCorrelate(self, maskedIm0, maskedIm1, runningBiasCorrSim=False, frameId=None, detId=None):
786  """Calculate the cross-correlation of an area.
787 
788  If the area in question contains multiple amplifiers then they must
789  have been gain corrected.
790 
791  Parameters:
792  -----------
793  maskedIm0 : `lsst.afw.image.MaskedImageF`
794  The first image area
795  maskedIm1 : `lsst.afw.image.MaskedImageF`
796  The first image area
797  frameId : `str`, optional
798  The frame identifier for use in the filename
799  if writing debug outputs.
800  detId : `str`, optional
801  The detector identifier (detector, or detector+amp,
802  depending on config.level) for use in the filename
803  if writing debug outputs.
804  runningBiasCorrSim : `bool`
805  Set to true when using this function to calculate the amount of bias
806  introduced by the sigma clipping. If False, the biasCorr parameter
807  is divided by to remove the bias, but this is, of course, not
808  appropriate when this is the parameter being measured.
809 
810  Returns:
811  --------
812  xcorr : `np.ndarray`
813  The quarter-image cross-correlation
814  mean : `float`
815  The sum of the means of the input images,
816  sigma-clipped, and with borders applied.
817  This is used when using this function with simulations to calculate
818  the biasCorr parameter.
819 
820  Notes:
821  ------
822  This function is controlled by the following config parameters:
823  maxLag : `int`
824  The maximum lag to use in the cross-correlation calculation
825  nPixBorderXCorr : `int`
826  The number of border pixels to exclude
827  nSigmaClipXCorr : `float`
828  The number of sigma to be clipped to
829  biasCorr : `float`
830  Parameter used to correct from the bias introduced
831  by the sigma cuts.
832  """
833  maxLag = self.config.maxLag
834  border = self.config.nPixBorderXCorr
835  sigma = self.config.nSigmaClipXCorr
836  biasCorr = self.config.biasCorr
837 
838  sctrl = afwMath.StatisticsControl()
839  sctrl.setNumSigmaClip(sigma)
840 
841  mean = afwMath.makeStatistics(maskedIm0.getImage()[border: -border, border: -border, afwImage.LOCAL],
842  afwMath.MEANCLIP, sctrl).getValue()
843  mean += afwMath.makeStatistics(maskedIm1.getImage()[border: -border, border: -border, afwImage.LOCAL],
844  afwMath.MEANCLIP, sctrl).getValue()
845 
846  # Diff the images, and apply border
847  diff = maskedIm0.clone()
848  diff -= maskedIm1.getImage()
849  diff = diff[border: -border, border: -border, afwImage.LOCAL]
850 
851  if self.debugdebug.writeDiffImages:
852  filename = '_'.join(['diff', 'detector', detId, frameId, '.fits'])
853  diff.writeFits(os.path.join(self.debugdebug.debugDataPath, filename))
854 
855  # Subtract background. It should be a constant, but it isn't always
856  binsize = self.config.backgroundBinSize
857  nx = diff.getWidth()//binsize
858  ny = diff.getHeight()//binsize
859  bctrl = afwMath.BackgroundControl(nx, ny, sctrl, afwMath.MEANCLIP)
860  bkgd = afwMath.makeBackground(diff, bctrl)
861  bgImg = bkgd.getImageF(afwMath.Interpolate.CUBIC_SPLINE, afwMath.REDUCE_INTERP_ORDER)
862  bgMean = np.mean(bgImg.getArray())
863  if abs(bgMean) >= self.config.backgroundWarnLevel:
864  self.log.warn('Mean of background = %s > config.maxBackground' % bgMean)
865 
866  diff -= bgImg
867 
868  if self.debugdebug.writeDiffImages:
869  filename = '_'.join(['bgSub', 'diff', 'detector', detId, frameId, '.fits'])
870  diff.writeFits(os.path.join(self.debugdebug.debugDataPath, filename))
871  if self.debugdebug.display:
872  self.disp1disp1.mtv(diff, title=frameId)
873 
874  self.log.debug("Median and variance of diff:")
875  self.log.debug("%s" % afwMath.makeStatistics(diff, afwMath.MEDIAN, sctrl).getValue())
876  self.log.debug("%s, %s" % (afwMath.makeStatistics(diff, afwMath.VARIANCECLIP, sctrl).getValue(),
877  np.var(diff.getImage().getArray())))
878 
879  # Measure the correlations
880  dim0 = diff[0: -maxLag, : -maxLag, afwImage.LOCAL]
881  dim0 -= afwMath.makeStatistics(dim0, afwMath.MEANCLIP, sctrl).getValue()
882  width, height = dim0.getDimensions()
883  xcorr = np.zeros((maxLag + 1, maxLag + 1), dtype=np.float64)
884 
885  for xlag in range(maxLag + 1):
886  for ylag in range(maxLag + 1):
887  dim_xy = diff[xlag:xlag + width, ylag: ylag + height, afwImage.LOCAL].clone()
888  dim_xy -= afwMath.makeStatistics(dim_xy, afwMath.MEANCLIP, sctrl).getValue()
889  dim_xy *= dim0
890  xcorr[xlag, ylag] = afwMath.makeStatistics(dim_xy, afwMath.MEANCLIP, sctrl).getValue()
891  if not runningBiasCorrSim:
892  xcorr[xlag, ylag] /= biasCorr
893 
894  # TODO: DM-15305 improve debug functionality here.
895  # This is position 2 for the removed code.
896 
897  return xcorr, mean
898 
899  def estimateGains(self, dataRef, visitPairs):
900  """Estimate the amplifier gains using the specified visits.
901 
902  Given a dataRef and list of flats of varying intensity,
903  calculate the gain for each amplifier in the detector
904  using the photon transfer curve (PTC) method.
905 
906  The config.fixPtcThroughOrigin option determines whether the iterative
907  fitting is forced to go through the origin or not.
908  This defaults to True, fitting var=1/gain * mean.
909  If set to False then var=1/g * mean + const is fitted.
910 
911  This is really a photo transfer curve (PTC) gain measurement task.
912  See DM-14063 for results from of a comparison between
913  this task's numbers and the gain values in the HSC camera model,
914  and those measured by the PTC task in eotest.
915 
916  Parameters
917  ----------
918  dataRef : `lsst.daf.persistence.butler.Butler.dataRef`
919  dataRef for the detector for the flats to be used
920  visitPairs : `list` of `tuple`
921  List of visit-pairs to use, as [(v1,v2), (v3,v4)...]
922 
923  Returns
924  -------
925  gains : `lsst.cp.pipe.makeBrighterFatterKernel.BrighterFatterGain`
926  Object holding the per-amplifier gains, essentially a
927  dict of the as-calculated amplifier gain values, keyed by amp name
928  nominalGains : `dict` [`str`, `float`]
929  Dict of the amplifier gains, as reported by the `detector` object,
930  keyed by amplifier name
931  """
932  # NB: don't use dataRef.get('raw_detector') due to composites
933  detector = dataRef.get('camera')[dataRef.dataId[self.config.ccdKey]]
934  amps = detector.getAmplifiers()
935  ampNames = [amp.getName() for amp in amps]
936 
937  ampMeans = {key: [] for key in ampNames} # these get turned into np.arrays later
938  ampCoVariances = {key: [] for key in ampNames}
939  ampVariances = {key: [] for key in ampNames}
940 
941  # Loop over the amps in the detector,
942  # calculating a PTC for each amplifier.
943  # The amplifier iteration is performed in _calcMeansAndVars()
944  # NB: no gain correction is applied
945  for visPairNum, visPair in enumerate(visitPairs):
946  _means, _vars, _covars = self._calcMeansAndVars_calcMeansAndVars(dataRef, visPair[0], visPair[1])
947 
948  # Do sanity checks; if these are failed more investigation is needed
949  breaker = 0
950  for amp in detector:
951  ampName = amp.getName()
952  if _means[ampName]*10 < _vars[ampName] or _means[ampName]*10 < _covars[ampName]:
953  msg = 'Sanity check failed; check visit pair %s amp %s' % (visPair, ampName)
954  self.log.warn(msg)
955  breaker += 1
956  if breaker:
957  continue
958 
959  # having made sanity checks
960  # pull the values out into the respective dicts
961  for k in _means.keys(): # keys are necessarily the same
962  if _vars[k]*1.3 < _covars[k] or _vars[k]*0.7 > _covars[k]:
963  self.log.warn('Dropped a value')
964  continue
965  ampMeans[k].append(_means[k])
966  ampVariances[k].append(_vars[k])
967  ampCoVariances[k].append(_covars[k])
968 
969  gains = {}
970  nomGains = {}
971  ptcResults = {}
972  for amp in detector:
973  ampName = amp.getName()
974  if ampMeans[ampName] == []: # all the data was dropped, amp is presumed bad
975  gains[ampName] = 1.0
976  ptcResults[ampName] = (0, 0, 1, 0)
977  continue
978 
979  nomGains[ampName] = amp.getGain()
980  slopeRaw, interceptRaw, rVal, pVal, stdErr = \
981  stats.linregress(np.asarray(ampMeans[ampName]), np.asarray(ampCoVariances[ampName]))
982  slopeFix, _ = self._iterativeRegression_iterativeRegression(np.asarray(ampMeans[ampName]),
983  np.asarray(ampCoVariances[ampName]),
984  fixThroughOrigin=True)
985  slopeUnfix, intercept = self._iterativeRegression_iterativeRegression(np.asarray(ampMeans[ampName]),
986  np.asarray(ampCoVariances[ampName]),
987  fixThroughOrigin=False)
988  self.log.info("Slope of raw fit: %s, intercept: %s p value: %s" % (slopeRaw,
989  interceptRaw, pVal))
990  self.log.info("slope of fixed fit: %s, difference vs raw:%s" % (slopeFix,
991  slopeFix - slopeRaw))
992  self.log.info("slope of unfixed fit: %s, difference vs fix:%s" % (slopeUnfix,
993  slopeFix - slopeUnfix))
994  if self.config.fixPtcThroughOrigin:
995  slopeToUse = slopeFix
996  else:
997  slopeToUse = slopeUnfix
998 
999  if self.debugdebug.enabled:
1000  fig = plt.figure()
1001  ax = fig.add_subplot(111)
1002  ax.plot(np.asarray(ampMeans[ampName]),
1003  np.asarray(ampCoVariances[ampName]), linestyle='None', marker='x', label='data')
1004  if self.config.fixPtcThroughOrigin:
1005  ax.plot(np.asarray(ampMeans[ampName]),
1006  np.asarray(ampMeans[ampName])*slopeToUse, label='Fit through origin')
1007  else:
1008  ax.plot(np.asarray(ampMeans[ampName]),
1009  np.asarray(ampMeans[ampName])*slopeToUse + intercept,
1010  label='Fit (intercept unconstrained')
1011 
1012  dataRef.put(fig, "plotBrighterFatterPtc", amp=ampName)
1013  self.log.info('Saved PTC for detector %s amp %s' % (detector.getId(), ampName))
1014  gains[ampName] = 1.0/slopeToUse
1015  # change the fit to use a cubic and match parameters with Lage method
1016  # or better, use the PTC task here too
1017  ptcResults[ampName] = (0, 0, 1, 0)
1018 
1019  return BrighterFatterGain(gains, ptcResults), nomGains
1020 
1021  def _calcMeansAndVars(self, dataRef, v1, v2):
1022  """Calculate the means, vars, covars, and retrieve the nominal gains,
1023  for each amp in each detector.
1024 
1025  This code runs using two visit numbers, and for the detector specified.
1026  It calculates the correlations in the individual amps without
1027  rescaling any gains. This allows a photon transfer curve
1028  to be generated and the gains measured.
1029 
1030  Images are assembled with use the isrTask, and basic isr is performed.
1031 
1032  Parameters:
1033  -----------
1034  dataRef : `lsst.daf.persistence.butler.Butler.dataRef`
1035  dataRef for the detector for the repo containing the flats to be used
1036  v1 : `int`
1037  First visit of the visit pair
1038  v2 : `int`
1039  Second visit of the visit pair
1040 
1041  Returns
1042  -------
1043  means, vars, covars : `tuple` of `dict`
1044  Three dicts, keyed by ampName,
1045  containing the sum of the image-means,
1046  the variance, and the quarter-image of the xcorr.
1047  """
1048  sigma = self.config.nSigmaClipGainCalc
1049  maxLag = self.config.maxLag
1050  border = self.config.nPixBorderGainCalc
1051  biasCorr = self.config.biasCorr
1052 
1053  # NB: don't use dataRef.get('raw_detector') due to composites
1054  detector = dataRef.get('camera')[dataRef.dataId[self.config.ccdKey]]
1055 
1056  ampMeans = {}
1057 
1058  # manipulate the dataId to get a postISR exposure for each visit
1059  # from the detector obj, restoring its original state afterwards
1060  originalDataId = dataRef.dataId.copy()
1061  dataRef.dataId['expId'] = v1
1062  exp1 = self.isr.runDataRef(dataRef).exposure
1063  dataRef.dataId['expId'] = v2
1064  exp2 = self.isr.runDataRef(dataRef).exposure
1065  dataRef.dataId = originalDataId
1066  exps = [exp1, exp2]
1067  checkExpLengthEqual(exp1, exp2, v1, v2, raiseWithMessage=True)
1068 
1069  detector = exps[0].getDetector()
1070  ims = [self._convertImagelikeToFloatImage_convertImagelikeToFloatImage(exp) for exp in exps]
1071 
1072  if self.debugdebug.display:
1073  self.disp1disp1.mtv(ims[0], title=str(v1))
1074  self.disp2disp2.mtv(ims[1], title=str(v2))
1075 
1076  sctrl = afwMath.StatisticsControl()
1077  sctrl.setNumSigmaClip(sigma)
1078  for imNum, im in enumerate(ims):
1079 
1080  # calculate the sigma-clipped mean, excluding the borders
1081  # safest to apply borders to all amps regardless of edges
1082  # easier, camera-agnostic, and mitigates potentially dodgy
1083  # overscan-biases around edges as well
1084  for amp in detector:
1085  ampName = amp.getName()
1086  ampIm = im[amp.getBBox()]
1087  mean = afwMath.makeStatistics(ampIm[border: -border, border: -border, afwImage.LOCAL],
1088  afwMath.MEANCLIP, sctrl).getValue()
1089  if ampName not in ampMeans.keys():
1090  ampMeans[ampName] = []
1091  ampMeans[ampName].append(mean)
1092  ampIm -= mean
1093 
1094  diff = ims[0].clone()
1095  diff -= ims[1]
1096 
1097  temp = diff[border: -border, border: -border, afwImage.LOCAL]
1098 
1099  # Subtract background. It should be a constant,
1100  # but it isn't always (e.g. some SuprimeCam flats)
1101  # TODO: Check how this looks, and if this is the "right" way to do this
1102  binsize = self.config.backgroundBinSize
1103  nx = temp.getWidth()//binsize
1104  ny = temp.getHeight()//binsize
1105  bctrl = afwMath.BackgroundControl(nx, ny, sctrl, afwMath.MEANCLIP)
1106  bkgd = afwMath.makeBackground(temp, bctrl)
1107 
1108  box = diff.getBBox()
1109  box.grow(-border)
1110  diff[box, afwImage.LOCAL] -= bkgd.getImageF(afwMath.Interpolate.CUBIC_SPLINE,
1111  afwMath.REDUCE_INTERP_ORDER)
1112 
1113  variances = {}
1114  coVars = {}
1115  for amp in detector:
1116  ampName = amp.getName()
1117  diffAmpIm = diff[amp.getBBox()].clone()
1118  diffAmpImCrop = diffAmpIm[border: -border - maxLag, border: -border - maxLag, afwImage.LOCAL]
1119  diffAmpImCrop -= afwMath.makeStatistics(diffAmpImCrop, afwMath.MEANCLIP, sctrl).getValue()
1120  w, h = diffAmpImCrop.getDimensions()
1121  xcorr = np.zeros((maxLag + 1, maxLag + 1), dtype=np.float64)
1122 
1123  # calculate the cross-correlation
1124  for xlag in range(maxLag + 1):
1125  for ylag in range(maxLag + 1):
1126  dim_xy = diffAmpIm[border + xlag: border + xlag + w,
1127  border + ylag: border + ylag + h,
1128  afwImage.LOCAL].clone()
1129  dim_xy -= afwMath.makeStatistics(dim_xy, afwMath.MEANCLIP, sctrl).getValue()
1130  dim_xy *= diffAmpImCrop
1131  xcorr[xlag, ylag] = afwMath.makeStatistics(dim_xy,
1132  afwMath.MEANCLIP, sctrl).getValue()/(biasCorr)
1133 
1134  variances[ampName] = xcorr[0, 0]
1135  xcorr_full = self._tileArray_tileArray(xcorr)
1136  coVars[ampName] = np.sum(xcorr_full)
1137 
1138  msg = "M1: " + str(ampMeans[ampName][0])
1139  msg += " M2 " + str(ampMeans[ampName][1])
1140  msg += " M_sum: " + str((ampMeans[ampName][0]) + ampMeans[ampName][1])
1141  msg += " Var " + str(variances[ampName])
1142  msg += " coVar: " + str(coVars[ampName])
1143  self.log.debug(msg)
1144 
1145  means = {}
1146  for amp in detector:
1147  ampName = amp.getName()
1148  means[ampName] = ampMeans[ampName][0] + ampMeans[ampName][1]
1149 
1150  return means, variances, coVars
1151 
1152  def _plotXcorr(self, xcorr, mean, zmax=0.05, title=None, fig=None, saveToFileName=None):
1153  """Plot the correlation functions."""
1154  try:
1155  xcorr = xcorr.getArray()
1156  except Exception:
1157  pass
1158 
1159  xcorr /= float(mean)
1160  # xcorr.getArray()[0,0]=abs(xcorr.getArray()[0,0]-1)
1161 
1162  if fig is None:
1163  fig = plt.figure()
1164  else:
1165  fig.clf()
1166 
1167  ax = fig.add_subplot(111, projection='3d')
1168  ax.azim = 30
1169  ax.elev = 20
1170 
1171  nx, ny = np.shape(xcorr)
1172 
1173  xpos, ypos = np.meshgrid(np.arange(nx), np.arange(ny))
1174  xpos = xpos.flatten()
1175  ypos = ypos.flatten()
1176  zpos = np.zeros(nx*ny)
1177  dz = xcorr.flatten()
1178  dz[dz > zmax] = zmax
1179 
1180  ax.bar3d(xpos, ypos, zpos, 1, 1, dz, color='b', zsort='max', sort_zpos=100)
1181  if xcorr[0, 0] > zmax:
1182  ax.bar3d([0], [0], [zmax], 1, 1, 1e-4, color='c')
1183 
1184  ax.set_xlabel("row")
1185  ax.set_ylabel("column")
1186  ax.set_zlabel(r"$\langle{(F_i - \bar{F})(F_i - \bar{F})}\rangle/\bar{F}$")
1187 
1188  if title:
1189  fig.suptitle(title)
1190  if saveToFileName:
1191  fig.savefig(saveToFileName)
1192 
1193  def _iterativeRegression(self, x, y, fixThroughOrigin=False, nSigmaClip=None, maxIter=None):
1194  """Use linear regression to fit a line, iteratively removing outliers.
1195 
1196  Useful when you have a sufficiently large numbers of points on your PTC.
1197  This function iterates until either there are no outliers of
1198  config.nSigmaClip magnitude, or until the specified maximum number
1199  of iterations has been performed.
1200 
1201  Parameters:
1202  -----------
1203  x : `numpy.array`
1204  The independent variable. Must be a numpy array, not a list.
1205  y : `numpy.array`
1206  The dependent variable. Must be a numpy array, not a list.
1207  fixThroughOrigin : `bool`, optional
1208  Whether to fix the PTC through the origin or allow an y-intercept.
1209  nSigmaClip : `float`, optional
1210  The number of sigma to clip to.
1211  Taken from the task config if not specified.
1212  maxIter : `int`, optional
1213  The maximum number of iterations allowed.
1214  Taken from the task config if not specified.
1215 
1216  Returns:
1217  --------
1218  slope : `float`
1219  The slope of the line of best fit
1220  intercept : `float`
1221  The y-intercept of the line of best fit
1222  """
1223  if not maxIter:
1224  maxIter = self.config.maxIterRegression
1225  if not nSigmaClip:
1226  nSigmaClip = self.config.nSigmaClipRegression
1227 
1228  nIter = 0
1229  sctrl = afwMath.StatisticsControl()
1230  sctrl.setNumSigmaClip(nSigmaClip)
1231 
1232  if fixThroughOrigin:
1233  while nIter < maxIter:
1234  nIter += 1
1235  self.log.debug("Origin fixed, iteration # %s using %s elements:" % (nIter, np.shape(x)[0]))
1236  TEST = x[:, np.newaxis]
1237  slope, _, _, _ = np.linalg.lstsq(TEST, y)
1238  slope = slope[0]
1239  res = (y - slope * x) / x
1240  resMean = afwMath.makeStatistics(res, afwMath.MEANCLIP, sctrl).getValue()
1241  resStd = np.sqrt(afwMath.makeStatistics(res, afwMath.VARIANCECLIP, sctrl).getValue())
1242  index = np.where((res > (resMean + nSigmaClip*resStd))
1243  | (res < (resMean - nSigmaClip*resStd)))
1244  self.log.debug("%.3f %.3f %.3f %.3f" % (resMean, resStd, np.max(res), nSigmaClip))
1245  if np.shape(np.where(index))[1] == 0 or (nIter >= maxIter): # run out of points or iters
1246  break
1247  x = np.delete(x, index)
1248  y = np.delete(y, index)
1249 
1250  return slope, 0
1251 
1252  while nIter < maxIter:
1253  nIter += 1
1254  self.log.debug("Iteration # %s using %s elements:" % (nIter, np.shape(x)[0]))
1255  xx = np.vstack([x, np.ones(len(x))]).T
1256  ret, _, _, _ = np.linalg.lstsq(xx, y)
1257  slope, intercept = ret
1258  res = y - slope*x - intercept
1259  resMean = afwMath.makeStatistics(res, afwMath.MEANCLIP, sctrl).getValue()
1260  resStd = np.sqrt(afwMath.makeStatistics(res, afwMath.VARIANCECLIP, sctrl).getValue())
1261  index = np.where((res > (resMean + nSigmaClip * resStd)) | (res < resMean - nSigmaClip * resStd))
1262  self.log.debug("%.3f %.3f %.3f %.3f" % (resMean, resStd, np.max(res), nSigmaClip))
1263  if np.shape(np.where(index))[1] == 0 or (nIter >= maxIter): # run out of points, or iterations
1264  break
1265  x = np.delete(x, index)
1266  y = np.delete(y, index)
1267 
1268  return slope, intercept
1269 
1270  def generateKernel(self, corrs, means, objId, rejectLevel=None):
1271  """Generate the full kernel from a list of cross-correlations and means.
1272 
1273  Taking a list of quarter-image, gain-corrected cross-correlations,
1274  do a pixel-wise sigma-clipped mean of each,
1275  and tile into the full-sized kernel image.
1276 
1277  Each corr in corrs is one quarter of the full cross-correlation,
1278  and has been gain-corrected. Each mean in means is a tuple of the means
1279  of the two individual images, corresponding to that corr.
1280 
1281  Parameters:
1282  -----------
1283  corrs : `list` of `numpy.ndarray`, (Ny, Nx)
1284  A list of the quarter-image cross-correlations
1285  means : `list` of `tuples` of `floats`
1286  The means of the input images for each corr in corrs
1287  rejectLevel : `float`, optional
1288  This is essentially is a sanity check parameter.
1289  If this condition is violated there is something unexpected
1290  going on in the image, and it is discarded from the stack before
1291  the clipped-mean is calculated.
1292  If not provided then config.xcorrCheckRejectLevel is used
1293 
1294  Returns:
1295  --------
1296  kernel : `numpy.ndarray`, (Ny, Nx)
1297  The output kernel
1298  """
1299  self.log.info('Calculating kernel for %s'%objId)
1300 
1301  if not rejectLevel:
1302  rejectLevel = self.config.xcorrCheckRejectLevel
1303 
1304  if self.config.correlationQuadraticFit:
1305  xcorrList = []
1306  fluxList = []
1307 
1308  for corrNum, ((mean1, mean2), corr) in enumerate(zip(means, corrs)):
1309  msg = 'For item %s, initial corr[0,0] = %g, corr[1,0] = %g'%(corrNum, corr[0, 0], corr[1, 0])
1310  self.log.info(msg)
1311  if self.config.level == 'DETECTOR':
1312  # This is now done in _applyGains() but only if level is not DETECTOR
1313  corr[0, 0] -= (mean1 + mean2)
1314  fullCorr = self._tileArray_tileArray(corr)
1315 
1316  # Craig Lage says he doesn't understand the negative sign, but it needs to be there
1317  xcorrList.append(-fullCorr / 2.0)
1318  flux = (mean1 + mean2) / 2.0
1319  fluxList.append(flux * flux)
1320  # We're using the linear fit algorithm to find a quadratic fit,
1321  # so we square the x-axis.
1322  # The step below does not need to be done, but is included
1323  # so that correlations can be compared
1324  # directly to existing code. We might want to take it out.
1325  corr /= -1.0*(mean1**2 + mean2**2)
1326 
1327  if not xcorrList:
1328  raise RuntimeError("Cannot generate kernel because all inputs were discarded. "
1329  "Either the data is bad, or config.xcorrCheckRejectLevel is too low")
1330 
1331  # This method fits a quadratic vs flux and keeps only the quadratic term.
1332  meanXcorr = np.zeros_like(fullCorr)
1333  xcorrList = np.asarray(xcorrList)
1334 
1335  for i in range(np.shape(meanXcorr)[0]):
1336  for j in range(np.shape(meanXcorr)[1]):
1337  # Note the i,j inversion. This serves the same function as the transpose step in
1338  # the base code. I don't understand why it is there, but I put it in to be consistent.
1339  slopeRaw, interceptRaw, rVal, pVal, stdErr = stats.linregress(np.asarray(fluxList),
1340  xcorrList[:, j, i])
1341  try:
1342  slope, intercept = self._iterativeRegression_iterativeRegression(np.asarray(fluxList),
1343  xcorrList[:, j, i],
1344  fixThroughOrigin=True)
1345  msg = "(%s,%s):Slope of raw fit: %s, intercept: %s p value: %s" % (i, j, slopeRaw,
1346  interceptRaw, pVal)
1347  self.log.debug(msg)
1348  self.log.debug("(%s,%s):Slope of fixed fit: %s" % (i, j, slope))
1349 
1350  meanXcorr[i, j] = slope
1351  except ValueError:
1352  meanXcorr[i, j] = slopeRaw
1353 
1354  msg = f"i={i}, j={j}, slope = {slope:.6g}, slopeRaw = {slopeRaw:.6g}"
1355  self.log.debug(msg)
1356  self.log.info('Quad Fit meanXcorr[0,0] = %g, meanXcorr[1,0] = %g'%(meanXcorr[8, 8],
1357  meanXcorr[9, 8]))
1358 
1359  else:
1360  # Try to average over a set of possible inputs.
1361  # This generates a simple function of the kernel that
1362  # should be constant across the images, and averages that.
1363  xcorrList = []
1364  sctrl = afwMath.StatisticsControl()
1365  sctrl.setNumSigmaClip(self.config.nSigmaClipKernelGen)
1366 
1367  for corrNum, ((mean1, mean2), corr) in enumerate(zip(means, corrs)):
1368  corr[0, 0] -= (mean1 + mean2)
1369  if corr[0, 0] > 0:
1370  self.log.warn('Skipped item %s due to unexpected value of (variance-mean)' % corrNum)
1371  continue
1372  corr /= -1.0*(mean1**2 + mean2**2)
1373 
1374  fullCorr = self._tileArray_tileArray(corr)
1375 
1376  xcorrCheck = np.abs(np.sum(fullCorr))/np.sum(np.abs(fullCorr))
1377  if xcorrCheck > rejectLevel:
1378  self.log.warn("Sum of the xcorr is unexpectedly high. Investigate item num %s for %s. \n"
1379  "value = %s" % (corrNum, objId, xcorrCheck))
1380  continue
1381  xcorrList.append(fullCorr)
1382 
1383  if not xcorrList:
1384  raise RuntimeError("Cannot generate kernel because all inputs were discarded. "
1385  "Either the data is bad, or config.xcorrCheckRejectLevel is too low")
1386 
1387  # stack the individual xcorrs and apply a per-pixel clipped-mean
1388  meanXcorr = np.zeros_like(fullCorr)
1389  xcorrList = np.transpose(xcorrList)
1390  for i in range(np.shape(meanXcorr)[0]):
1391  for j in range(np.shape(meanXcorr)[1]):
1392  meanXcorr[i, j] = afwMath.makeStatistics(xcorrList[i, j],
1393  afwMath.MEANCLIP, sctrl).getValue()
1394 
1395  if self.config.correlationModelRadius < (meanXcorr.shape[0] - 1) / 2:
1396  sumToInfinity = self._buildCorrelationModel_buildCorrelationModel(meanXcorr, self.config.correlationModelRadius,
1397  self.config.correlationModelSlope)
1398  self.log.info("SumToInfinity = %s" % sumToInfinity)
1399  else:
1400  sumToInfinity = 0.0
1401  if self.config.forceZeroSum:
1402  self.log.info("Forcing sum of correlation matrix to zero")
1403  meanXcorr = self._forceZeroSum_forceZeroSum(meanXcorr, sumToInfinity)
1404 
1405  return meanXcorr, self.successiveOverRelaxsuccessiveOverRelax(meanXcorr)
1406 
1407  def successiveOverRelax(self, source, maxIter=None, eLevel=None):
1408  """An implementation of the successive over relaxation (SOR) method.
1409 
1410  A numerical method for solving a system of linear equations
1411  with faster convergence than the Gauss-Seidel method.
1412 
1413  Parameters:
1414  -----------
1415  source : `numpy.ndarray`
1416  The input array.
1417  maxIter : `int`, optional
1418  Maximum number of iterations to attempt before aborting.
1419  eLevel : `float`, optional
1420  The target error level at which we deem convergence to have
1421  occurred.
1422 
1423  Returns:
1424  --------
1425  output : `numpy.ndarray`
1426  The solution.
1427  """
1428  if not maxIter:
1429  maxIter = self.config.maxIterSuccessiveOverRelaxation
1430  if not eLevel:
1431  eLevel = self.config.eLevelSuccessiveOverRelaxation
1432 
1433  assert source.shape[0] == source.shape[1], "Input array must be square"
1434  # initialize, and set boundary conditions
1435  func = np.zeros([source.shape[0] + 2, source.shape[1] + 2])
1436  resid = np.zeros([source.shape[0] + 2, source.shape[1] + 2])
1437  rhoSpe = np.cos(np.pi/source.shape[0]) # Here a square grid is assumed
1438 
1439  # Calculate the initial error
1440  for i in range(1, func.shape[0] - 1):
1441  for j in range(1, func.shape[1] - 1):
1442  resid[i, j] = (func[i, j - 1] + func[i, j + 1] + func[i - 1, j]
1443  + func[i + 1, j] - 4*func[i, j] - source[i - 1, j - 1])
1444  inError = np.sum(np.abs(resid))
1445 
1446  # Iterate until convergence
1447  # We perform two sweeps per cycle,
1448  # updating 'odd' and 'even' points separately
1449  nIter = 0
1450  omega = 1.0
1451  dx = 1.0
1452  while nIter < maxIter*2:
1453  outError = 0
1454  if nIter%2 == 0:
1455  for i in range(1, func.shape[0] - 1, 2):
1456  for j in range(1, func.shape[1] - 1, 2):
1457  resid[i, j] = float(func[i, j-1] + func[i, j + 1] + func[i - 1, j]
1458  + func[i + 1, j] - 4.0*func[i, j] - dx*dx*source[i - 1, j - 1])
1459  func[i, j] += omega*resid[i, j]*.25
1460  for i in range(2, func.shape[0] - 1, 2):
1461  for j in range(2, func.shape[1] - 1, 2):
1462  resid[i, j] = float(func[i, j - 1] + func[i, j + 1] + func[i - 1, j]
1463  + func[i + 1, j] - 4.0*func[i, j] - dx*dx*source[i - 1, j - 1])
1464  func[i, j] += omega*resid[i, j]*.25
1465  else:
1466  for i in range(1, func.shape[0] - 1, 2):
1467  for j in range(2, func.shape[1] - 1, 2):
1468  resid[i, j] = float(func[i, j - 1] + func[i, j + 1] + func[i - 1, j]
1469  + func[i + 1, j] - 4.0*func[i, j] - dx*dx*source[i - 1, j - 1])
1470  func[i, j] += omega*resid[i, j]*.25
1471  for i in range(2, func.shape[0] - 1, 2):
1472  for j in range(1, func.shape[1] - 1, 2):
1473  resid[i, j] = float(func[i, j - 1] + func[i, j + 1] + func[i - 1, j]
1474  + func[i + 1, j] - 4.0*func[i, j] - dx*dx*source[i - 1, j - 1])
1475  func[i, j] += omega*resid[i, j]*.25
1476  outError = np.sum(np.abs(resid))
1477  if outError < inError*eLevel:
1478  break
1479  if nIter == 0:
1480  omega = 1.0/(1 - rhoSpe*rhoSpe/2.0)
1481  else:
1482  omega = 1.0/(1 - rhoSpe*rhoSpe*omega/4.0)
1483  nIter += 1
1484 
1485  if nIter >= maxIter*2:
1486  self.log.warn("Failure: SuccessiveOverRelaxation did not converge in %s iterations."
1487  "\noutError: %s, inError: %s," % (nIter//2, outError, inError*eLevel))
1488  else:
1489  self.log.info("Success: SuccessiveOverRelaxation converged in %s iterations."
1490  "\noutError: %s, inError: %s", nIter//2, outError, inError*eLevel)
1491  return func[1: -1, 1: -1]
1492 
1493  @staticmethod
1494  def _tileArray(in_array):
1495  """Given an input quarter-image, tile/mirror it and return full image.
1496 
1497  Given a square input of side-length n, of the form
1498 
1499  input = array([[1, 2, 3],
1500  [4, 5, 6],
1501  [7, 8, 9]])
1502 
1503  return an array of size 2n-1 as
1504 
1505  output = array([[ 9, 8, 7, 8, 9],
1506  [ 6, 5, 4, 5, 6],
1507  [ 3, 2, 1, 2, 3],
1508  [ 6, 5, 4, 5, 6],
1509  [ 9, 8, 7, 8, 9]])
1510 
1511  Parameters:
1512  -----------
1513  input : `np.array`
1514  The square input quarter-array
1515 
1516  Returns:
1517  --------
1518  output : `np.array`
1519  The full, tiled array
1520  """
1521  assert(in_array.shape[0] == in_array.shape[1])
1522  length = in_array.shape[0] - 1
1523  output = np.zeros((2*length + 1, 2*length + 1))
1524 
1525  for i in range(length + 1):
1526  for j in range(length + 1):
1527  output[i + length, j + length] = in_array[i, j]
1528  output[-i + length, j + length] = in_array[i, j]
1529  output[i + length, -j + length] = in_array[i, j]
1530  output[-i + length, -j + length] = in_array[i, j]
1531  return output
1532 
1533  @staticmethod
1534  def _forceZeroSum(inputArray, sumToInfinity):
1535  """Given an array of correlations, adjust the
1536  central value to force the sum of the array to be zero.
1537 
1538  Parameters:
1539  -----------
1540  input : `np.array`
1541  The square input array, assumed square and with
1542  shape (2n+1) x (2n+1)
1543 
1544  Returns:
1545  --------
1546  output : `np.array`
1547  The same array, with the value of the central value
1548  inputArray[n,n] adjusted to force the array sum to be zero.
1549  """
1550  assert(inputArray.shape[0] == inputArray.shape[1])
1551  assert(inputArray.shape[0] % 2 == 1)
1552  center = int((inputArray.shape[0] - 1) / 2)
1553  outputArray = np.copy(inputArray)
1554  outputArray[center, center] -= inputArray.sum() - sumToInfinity
1555  return outputArray
1556 
1557  @staticmethod
1558  def _buildCorrelationModel(array, replacementRadius, slope):
1559  """Given an array of correlations, build a model
1560  for correlations beyond replacementRadius pixels from the center
1561  and replace the measured values with the model.
1562 
1563  Parameters:
1564  -----------
1565  input : `np.array`
1566  The square input array, assumed square and with
1567  shape (2n+1) x (2n+1)
1568 
1569  Returns:
1570  --------
1571  output : `np.array`
1572  The same array, with the outer values
1573  replaced with a smoothed model.
1574  """
1575  assert(array.shape[0] == array.shape[1])
1576  assert(array.shape[0] % 2 == 1)
1577  assert(replacementRadius > 1)
1578  center = int((array.shape[0] - 1) / 2)
1579  # First we check if either the [0,1] or [1,0] correlation is positive.
1580  # If so, the data is seriously messed up. This has happened in some bad amplifiers.
1581  # In this case, we just return the input array unchanged.
1582  if (array[center, center + 1] >= 0.0) or (array[center + 1, center] >= 0.0):
1583  return 0.0
1584 
1585  intercept = (np.log10(-array[center, center + 1]) + np.log10(-array[center + 1, center])) / 2.0
1586  preFactor = 10**intercept
1587  slopeFactor = 2.0*abs(slope) - 2.0
1588  sumToInfinity = 2.0*np.pi*preFactor / (slopeFactor*(float(center)+0.5)**slopeFactor)
1589  # sum_to_ininity is the integral of the model beyond what is measured.
1590  # It is used to adjust C00 in the case of forcing zero sum
1591 
1592  # Now replace the pixels beyond replacementRadius with the model values
1593  for i in range(array.shape[0]):
1594  for j in range(array.shape[1]):
1595  r2 = float((i-center)*(i-center) + (j-center)*(j-center))
1596  if abs(i-center) < replacementRadius and abs(j-center) < replacementRadius:
1597  continue
1598  else:
1599  newCvalue = -preFactor * r2**slope
1600  array[i, j] = newCvalue
1601  return sumToInfinity
1602 
1603  @staticmethod
1604  def _convertImagelikeToFloatImage(imagelikeObject):
1605  """Turn an exposure or masked image of any type into an ImageF."""
1606  for attr in ("getMaskedImage", "getImage"):
1607  if hasattr(imagelikeObject, attr):
1608  imagelikeObject = getattr(imagelikeObject, attr)()
1609  try:
1610  floatImage = imagelikeObject.convertF()
1611  except AttributeError:
1612  raise RuntimeError("Failed to convert image to float")
1613  return floatImage
1614 
1615 
1616 def calcBiasCorr(fluxLevels, imageShape, repeats=1, seed=0, addCorrelations=False,
1617  correlationStrength=0.1, maxLag=10, nSigmaClip=5, border=10, logger=None):
1618  """Calculate the bias induced when sigma-clipping non-Gaussian distributions.
1619 
1620  Fill image-pairs of the specified size with Poisson-distributed values,
1621  adding correlations as necessary. Then calculate the cross correlation,
1622  and calculate the bias induced using the cross-correlation image
1623  and the image means.
1624 
1625  Parameters:
1626  -----------
1627  fluxLevels : `list` of `int`
1628  The mean flux levels at which to simulate.
1629  Nominal values might be something like [70000, 90000, 110000]
1630  imageShape : `tuple` of `int`
1631  The shape of the image array to simulate, nx by ny pixels.
1632  repeats : `int`, optional
1633  Number of repeats to perform so that results
1634  can be averaged to improve SNR.
1635  seed : `int`, optional
1636  The random seed to use for the Poisson points.
1637  addCorrelations : `bool`, optional
1638  Whether to add brighter-fatter-like correlations to the simulated images
1639  If true, a correlation between x_{i,j} and x_{i+1,j+1} is introduced
1640  by adding a*x_{i,j} to x_{i+1,j+1}
1641  correlationStrength : `float`, optional
1642  The strength of the correlations.
1643  This is the value of the coefficient `a` in the above definition.
1644  maxLag : `int`, optional
1645  The maximum lag to work to in pixels
1646  nSigmaClip : `float`, optional
1647  Number of sigma to clip to when calculating the sigma-clipped mean.
1648  border : `int`, optional
1649  Number of border pixels to mask
1650  logger : `lsst.log.Log`, optional
1651  Logger to use. Instantiated anew if not provided.
1652 
1653  Returns:
1654  --------
1655  biases : `dict` [`float`, `list` of `float`]
1656  A dictionary, keyed by flux level, containing a list of the biases
1657  for each repeat at that flux level
1658  means : `dict` [`float`, `list` of `float`]
1659  A dictionary, keyed by flux level, containing a list of the average
1660  mean fluxes (average of the mean of the two images)
1661  for the image pairs at that flux level
1662  xcorrs : `dict` [`float`, `list` of `np.ndarray`]
1663  A dictionary, keyed by flux level, containing a list of the xcorr
1664  images for the image pairs at that flux level
1665  """
1666  if logger is None:
1667  logger = lsstLog.Log.getDefaultLogger()
1668 
1669  means = {f: [] for f in fluxLevels}
1670  xcorrs = {f: [] for f in fluxLevels}
1671  biases = {f: [] for f in fluxLevels}
1672 
1674  config.isrMandatorySteps = [] # no isr but the validation routine is still run
1675  config.isrForbiddenSteps = []
1676  config.nSigmaClipXCorr = nSigmaClip
1677  config.nPixBorderXCorr = border
1678  config.maxLag = maxLag
1679  task = MakeBrighterFatterKernelTask(config=config)
1680 
1681  im0 = afwImage.maskedImage.MaskedImageF(imageShape[1], imageShape[0])
1682  im1 = afwImage.maskedImage.MaskedImageF(imageShape[1], imageShape[0])
1683 
1684  random = np.random.RandomState(seed)
1685 
1686  for rep in range(repeats):
1687  for flux in fluxLevels:
1688  data0 = random.poisson(flux, (imageShape)).astype(float)
1689  data1 = random.poisson(flux, (imageShape)).astype(float)
1690  if addCorrelations:
1691  data0[1:, 1:] += correlationStrength*data0[: -1, : -1]
1692  data1[1:, 1:] += correlationStrength*data1[: -1, : -1]
1693  im0.image.array[:, :] = data0
1694  im1.image.array[:, :] = data1
1695 
1696  _xcorr, _means = task._crossCorrelate(im0, im1, runningBiasCorrSim=True)
1697 
1698  means[flux].append(_means)
1699  xcorrs[flux].append(_xcorr)
1700  if addCorrelations:
1701  bias = xcorrs[flux][-1][1, 1]/means[flux][-1]*(1 + correlationStrength)/correlationStrength
1702  msg = f"Simulated/expected avg. flux: {flux:.1f}, {(means[flux][-1]/2):.1f}"
1703  logger.info(msg)
1704  logger.info(f"Bias: {bias:.6f}")
1705  else:
1706  bias = xcorrs[flux][-1][0, 0]/means[flux][-1]
1707  msg = f"Simulated/expected avg. flux: {flux:.1f}, {(means[flux][-1]/2):.1f}"
1708  logger.info(msg)
1709  logger.info(f"Bias: {bias:.6f}")
1710  biases[flux].append(bias)
1711 
1712  return biases, means, xcorrs
Pass parameters to a Background object.
Definition: Background.h:56
Pass parameters to a Statistics object.
Definition: Statistics.h:93
def makeDetectorKernelFromAmpwiseKernels(self, detectorName, ampsToExclude=[], overwrite=False)
def _iterativeRegression(self, x, y, fixThroughOrigin=False, nSigmaClip=None, maxIter=None)
def _crossCorrelate(self, maskedIm0, maskedIm1, runningBiasCorrSim=False, frameId=None, detId=None)
daf::base::PropertyList * list
Definition: fits.cc:913
daf::base::PropertySet * set
Definition: fits.cc:912
std::shared_ptr< FrameSet > append(FrameSet const &first, FrameSet const &second)
Construct a FrameSet that performs two transformations in series.
Definition: functional.cc:33
def erase(frame=None)
Definition: ds9.py:96
def mtv(data, frame=None, title="", wcs=None, *args, **kwargs)
Definition: ds9.py:92
Backwards-compatibility support for depersisting the old Calib (FluxMag0/FluxMag0Err) objects.
Statistics makeStatistics(lsst::afw::image::Image< Pixel > const &img, lsst::afw::image::Mask< image::MaskPixel > const &msk, int const flags, StatisticsControl const &sctrl=StatisticsControl())
Handle a watered-down front-end to the constructor (no variance)
Definition: Statistics.h:354
std::shared_ptr< Background > makeBackground(ImageT const &img, BackgroundControl const &bgCtrl)
A convenience function that uses function overloading to make the correct type of Background.
Definition: Background.h:526
def calcBiasCorr(fluxLevels, imageShape, repeats=1, seed=0, addCorrelations=False, correlationStrength=0.1, maxLag=10, nSigmaClip=5, border=10, logger=None)
def checkExpLengthEqual(exp1, exp2, v1=None, v2=None, raiseWithMessage=False)
Definition: utils.py:588
Definition: Log.h:706
def format(config, name=None, writeSourceLine=True, prefix="", verbose=False)
Definition: history.py:174
Angle abs(Angle const &a)
Definition: Angle.h:106