LSSTApplications  17.0+124,17.0+14,17.0+73,18.0.0+37,18.0.0+80,18.0.0-4-g68ffd23+4,18.1.0-1-g0001055+12,18.1.0-1-g03d53ef+5,18.1.0-1-g1349e88+55,18.1.0-1-g2505f39+44,18.1.0-1-g5315e5e+4,18.1.0-1-g5e4b7ea+14,18.1.0-1-g7e8fceb+4,18.1.0-1-g85f8cd4+48,18.1.0-1-g8ff0b9f+4,18.1.0-1-ga2c679d+1,18.1.0-1-gd55f500+35,18.1.0-10-gb58edde+2,18.1.0-11-g0997b02+4,18.1.0-13-gfe4edf0b+12,18.1.0-14-g259bd21+21,18.1.0-19-gdb69f3f+2,18.1.0-2-g5f9922c+24,18.1.0-2-gd3b74e5+11,18.1.0-2-gfbf3545+32,18.1.0-26-g728bddb4+5,18.1.0-27-g6ff7ca9+2,18.1.0-3-g52aa583+25,18.1.0-3-g8ea57af+9,18.1.0-3-gb69f684+42,18.1.0-3-gfcaddf3+6,18.1.0-32-gd8786685a,18.1.0-4-gf3f9b77+6,18.1.0-5-g1dd662b+2,18.1.0-5-g6dbcb01+41,18.1.0-6-gae77429+3,18.1.0-7-g9d75d83+9,18.1.0-7-gae09a6d+30,18.1.0-9-gc381ef5+4,w.2019.45
LSSTDataManagementBasePackage
makeBrighterFatterKernel.py
Go to the documentation of this file.
1 # This file is part of cp_pipe.
2 #
3 # Developed for the LSST Data Management System.
4 # This product includes software developed by the LSST Project
5 # (https://www.lsst.org).
6 # See the COPYRIGHT file at the top-level directory of this distribution
7 # for details of code ownership.
8 #
9 # This program is free software: you can redistribute it and/or modify
10 # it under the terms of the GNU General Public License as published by
11 # the Free Software Foundation, either version 3 of the License, or
12 # (at your option) any later version.
13 #
14 # This program is distributed in the hope that it will be useful,
15 # but WITHOUT ANY WARRANTY; without even the implied warranty of
16 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 # GNU General Public License for more details.
18 #
19 # You should have received a copy of the GNU General Public License
20 # along with this program. If not, see <https://www.gnu.org/licenses/>.
21 #
22 """Calculation of brighter-fatter effect correlations and kernels."""
23 
24 __all__ = ['MakeBrighterFatterKernelTaskConfig',
25  'MakeBrighterFatterKernelTask',
26  'calcBiasCorr']
27 
28 import os
29 from scipy import stats
30 import numpy as np
31 import matplotlib.pyplot as plt
32 # the following import is required for 3d projection
33 from mpl_toolkits.mplot3d import axes3d # noqa: F401
34 
35 import lsstDebug
36 import lsst.afw.image as afwImage
37 import lsst.afw.math as afwMath
38 import lsst.afw.display as afwDisp
39 from lsst.ip.isr import IsrTask
40 import lsst.pex.config as pexConfig
41 import lsst.pipe.base as pipeBase
42 from .utils import PairedVisitListTaskRunner, checkExpLengthEqual
43 
44 
45 class MakeBrighterFatterKernelTaskConfig(pexConfig.Config):
46  """Config class for bright-fatter effect coefficient calculation."""
47 
48  isr = pexConfig.ConfigurableField(
49  target=IsrTask,
50  doc="""Task to perform instrumental signature removal""",
51  )
52  isrMandatorySteps = pexConfig.ListField(
53  dtype=str,
54  doc="isr operations that must be performed for valid results. Raises if any of these are False",
55  default=['doAssembleCcd']
56  )
57  isrForbiddenSteps = pexConfig.ListField(
58  dtype=str,
59  doc="isr operations that must NOT be performed for valid results. Raises if any of these are True",
60  default=['doFlat', 'doFringe', 'doAddDistortionModel', 'doBrighterFatter', 'doUseOpticsTransmission',
61  'doUseFilterTransmission', 'doUseSensorTransmission', 'doUseAtmosphereTransmission']
62  )
63  isrDesirableSteps = pexConfig.ListField(
64  dtype=str,
65  doc="isr operations that it is advisable to perform, but are not mission-critical." +
66  " WARNs are logged for any of these found to be False.",
67  default=['doBias', 'doDark', 'doCrosstalk', 'doDefect', 'doLinearize']
68  )
69  doCalcGains = pexConfig.Field(
70  dtype=bool,
71  doc="Measure the per-amplifier gains (using the photon transfer curve method)?",
72  default=True,
73  )
74  ccdKey = pexConfig.Field(
75  dtype=str,
76  doc="The key by which to pull a detector from a dataId, e.g. 'ccd' or 'detector'",
77  default='ccd',
78  )
79  maxIterRegression = pexConfig.Field(
80  dtype=int,
81  doc="Maximum number of iterations for the regression fitter",
82  default=10
83  )
84  nSigmaClipGainCalc = pexConfig.Field(
85  dtype=int,
86  doc="Number of sigma to clip the pixel value distribution to during gain calculation",
87  default=5
88  )
89  nSigmaClipRegression = pexConfig.Field(
90  dtype=int,
91  doc="Number of sigma to clip outliers from the line of best fit to during iterative regression",
92  default=3
93  )
94  xcorrCheckRejectLevel = pexConfig.Field(
95  dtype=float,
96  doc="Sanity check level for the sum of the input cross-correlations. Arrays which " +
97  "sum to greater than this are discarded before the clipped mean is calculated.",
98  default=2.0
99  )
100  maxIterSuccessiveOverRelaxation = pexConfig.Field(
101  dtype=int,
102  doc="The maximum number of iterations allowed for the successive over-relaxation method",
103  default=10000
104  )
105  eLevelSuccessiveOverRelaxation = pexConfig.Field(
106  dtype=float,
107  doc="The target residual error for the successive over-relaxation method",
108  default=5.0e-14
109  )
110  nSigmaClipKernelGen = pexConfig.Field(
111  dtype=float,
112  doc="Number of sigma to clip to during pixel-wise clipping when generating the kernel. See " +
113  "the generateKernel docstring for more info.",
114  default=4
115  )
116  nSigmaClipXCorr = pexConfig.Field(
117  dtype=float,
118  doc="Number of sigma to clip when calculating means for the cross-correlation",
119  default=5
120  )
121  maxLag = pexConfig.Field(
122  dtype=int,
123  doc="The maximum lag (in pixels) to use when calculating the cross-correlation/kernel",
124  default=8
125  )
126  nPixBorderGainCalc = pexConfig.Field(
127  dtype=int,
128  doc="The number of border pixels to exclude when calculating the gain",
129  default=10
130  )
131  nPixBorderXCorr = pexConfig.Field(
132  dtype=int,
133  doc="The number of border pixels to exclude when calculating the cross-correlation and kernel",
134  default=10
135  )
136  biasCorr = pexConfig.Field(
137  dtype=float,
138  doc="An empirically determined correction factor, used to correct for the sigma-clipping of" +
139  " a non-Gaussian distribution. Post DM-15277, code will exist here to calulate appropriate values",
140  default=0.9241
141  )
142  backgroundBinSize = pexConfig.Field(
143  dtype=int,
144  doc="Size of the background bins",
145  default=128
146  )
147  fixPtcThroughOrigin = pexConfig.Field(
148  dtype=bool,
149  doc="Constrain the fit of the photon transfer curve to go through the origin when measuring" +
150  "the gain?",
151  default=True
152  )
153  level = pexConfig.ChoiceField(
154  doc="The level at which to calculate the brighter-fatter kernels",
155  dtype=str, default="DETECTOR",
156  allowed={
157  "AMP": "Every amplifier treated separately",
158  "DETECTOR": "One kernel per detector",
159  }
160  )
161  backgroundWarnLevel = pexConfig.Field(
162  dtype=float,
163  doc="Log warnings if the mean of the fitted background is found to be above this level after " +
164  "differencing image pair.",
165  default=0.1
166  )
167 
168 
169 class BrighterFatterKernelTaskDataIdContainer(pipeBase.DataIdContainer):
170  """A DataIdContainer for the MakeBrighterFatterKernelTask."""
171 
172  def makeDataRefList(self, namespace):
173  """Compute refList based on idList.
174 
175  This method must be defined as the dataset does not exist before this
176  task is run.
177 
178  Parameters
179  ----------
180  namespace
181  Results of parsing the command-line.
182 
183  Notes
184  -----
185  Not called if ``add_id_argument`` called
186  with ``doMakeDataRefList=False``.
187  Note that this is almost a copy-and-paste of the vanilla implementation,
188  but without checking if the datasets already exist,
189  as this task exists to make them.
190  """
191  if self.datasetType is None:
192  raise RuntimeError("Must call setDatasetType first")
193  butler = namespace.butler
194  for dataId in self.idList:
195  refList = list(butler.subset(datasetType=self.datasetType, level=self.level, dataId=dataId))
196  # exclude nonexistent data
197  # this is a recursive test, e.g. for the sake of "raw" data
198  if not refList:
199  namespace.log.warn("No data found for dataId=%s", dataId)
200  continue
201  self.refList += refList
202 
203 
205  """A (very) simple class to hold the kernel(s) generated.
206 
207  The kernel.kernel is a dictionary holding the kernels themselves.
208  One kernel if the level is 'DETECTOR' or,
209  nAmps in length, if level is 'AMP'.
210  The dict is keyed by either the detector ID or the amplifier IDs.
211 
212  The level is the level for which the kernel(s) were generated so that one
213  can know how to access the kernels without having to query the shape of
214  the dictionary holding the kernel(s).
215  """
216 
217  def __init__(self, level, kernelDict):
218  assert type(level) == str
219  assert type(kernelDict) == dict
220  if level == 'DETECTOR':
221  assert len(kernelDict.keys()) == 1
222  if level == 'AMP':
223  assert len(kernelDict.keys()) > 1
224 
225  self.level = level
226  self.kernel = kernelDict
227 
228 
229 class MakeBrighterFatterKernelTask(pipeBase.CmdLineTask):
230  """Brighter-fatter effect correction-kernel calculation task.
231 
232  A command line task for calculating the brighter-fatter correction
233  kernel from pairs of flat-field images (with the same exposure length).
234 
235  The following operations are performed:
236 
237  - The configurable isr task is called, which unpersists and assembles the
238  raw images, and performs the selected instrument signature removal tasks.
239  For the purpose of brighter-fatter coefficient calculation is it
240  essential that certain components of isr are *not* performed, and
241  recommended that certain others are. The task checks the selected isr
242  configuration before it is run, and if forbidden components have been
243  selected task will raise, and if recommended ones have not been selected,
244  warnings are logged.
245 
246  - The gain of the each amplifier in the detector is calculated using
247  the photon transfer curve (PTC) method and used to correct the images
248  so that all calculations are done in units of electrons, and so that the
249  level across amplifier boundaries is continuous.
250  Outliers in the PTC are iteratively rejected
251  before fitting, with the nSigma rejection level set by
252  config.nSigmaClipRegression. Individual pixels are ignored in the input
253  images the image based on config.nSigmaClipGainCalc.
254 
255  - Each image is then cross-correlated with the one it's paired with
256  (with the pairing defined by the --visit-pairs command line argument),
257  which is done either the whole-image to whole-image,
258  or amplifier-by-amplifier, depending on config.level.
259 
260  - Once the cross-correlations have been calculated for each visit pair,
261  these are used to generate the correction kernel.
262  The maximum lag used, in pixels, and hence the size of the half-size
263  of the kernel generated, is given by config.maxLag,
264  i.e. a value of 10 will result in a kernel of size 2n-1 = 19x19 pixels.
265  Outlier values in these cross-correlations are rejected by using a
266  pixel-wise sigma-clipped thresholding to each cross-correlation in
267  the visit-pairs-length stack of cross-correlations.
268  The number of sigma clipped to is set by config.nSigmaClipKernelGen.
269 
270  - Once DM-15277 has been completed, a method will exist to calculate the
271  empirical correction factor, config.biasCorr.
272  TODO: DM-15277 update this part of the docstring once the ticket is done.
273  """
274 
275  RunnerClass = PairedVisitListTaskRunner
276  ConfigClass = MakeBrighterFatterKernelTaskConfig
277  _DefaultName = "makeBrighterFatterKernel"
278 
279  def __init__(self, *args, **kwargs):
280  pipeBase.CmdLineTask.__init__(self, *args, **kwargs)
281  self.makeSubtask("isr")
282 
283  self.debug = lsstDebug.Info(__name__)
284  if self.debug.enabled:
285  self.log.info("Running with debug enabled...")
286  # If we're displaying, test it works and save displays for later.
287  # It's worth testing here as displays are flaky and sometimes
288  # can't be contacted, and given processing takes a while,
289  # it's a shame to fail late due to display issues.
290  if self.debug.display:
291  try:
292  afwDisp.setDefaultBackend(self.debug.displayBackend)
293  afwDisp.Display.delAllDisplays()
294  self.disp1 = afwDisp.Display(0, open=True)
295  self.disp2 = afwDisp.Display(1, open=True)
296 
297  im = afwImage.ImageF(1, 1)
298  im.array[:] = [[1]]
299  self.disp1.mtv(im)
300  self.disp1.erase()
301  except NameError:
302  self.debug.display = False
303  self.log.warn('Failed to setup/connect to display! Debug display has been disabled')
304 
305  plt.interactive(False) # stop windows popping up when plotting. When headless, use 'agg' backend too
306  self.validateIsrConfig()
307  self.config.validate()
308  self.config.freeze()
309 
310  @classmethod
311  def _makeArgumentParser(cls):
312  """Augment argument parser for the MakeBrighterFatterKernelTask."""
313  parser = pipeBase.ArgumentParser(name=cls._DefaultName)
314  parser.add_argument("--visit-pairs", dest="visitPairs", nargs="*",
315  help="Visit pairs to use. Each pair must be of the form INT,INT e.g. 123,456")
316  parser.add_id_argument("--id", datasetType="brighterFatterKernel",
317  ContainerClass=BrighterFatterKernelTaskDataIdContainer,
318  help="The ccds to use, e.g. --id ccd=0..100")
319  return parser
320 
321  def validateIsrConfig(self):
322  """Check that appropriate ISR settings are being used
323  for brighter-fatter kernel calculation."""
324 
325  # How should we handle saturation/bad regions?
326  # 'doSaturationInterpolation': True
327  # 'doNanInterpAfterFlat': False
328  # 'doSaturation': True
329  # 'doSuspect': True
330  # 'doWidenSaturationTrails': True
331  # 'doSetBadRegions': True
332 
333  configDict = self.config.isr.toDict()
334 
335  for configParam in self.config.isrMandatorySteps:
336  if configDict[configParam] is False:
337  raise RuntimeError('Must set config.isr.%s to True '
338  'for brighter-fatter kernel calulation' % configParam)
339 
340  for configParam in self.config.isrForbiddenSteps:
341  if configDict[configParam] is True:
342  raise RuntimeError('Must set config.isr.%s to False '
343  'for brighter-fatter kernel calulation' % configParam)
344 
345  for configParam in self.config.isrDesirableSteps:
346  if configParam not in configDict:
347  self.log.info('Failed to find key %s in the isr config dict. You probably want ' +
348  'to set the equivalent for your obs_package to True.' % configParam)
349  continue
350  if configDict[configParam] is False:
351  self.log.warn('Found config.isr.%s set to False for brighter-fatter kernel calulation. '
352  'It is probably desirable to have this set to True' % configParam)
353 
354  # subtask settings
355  if not self.config.isr.assembleCcd.doTrim:
356  raise RuntimeError('Must trim when assembling CCDs. Set config.isr.assembleCcd.doTrim to True')
357 
358  @pipeBase.timeMethod
359  def runDataRef(self, dataRef, visitPairs):
360  """Run the brighter-fatter measurement task.
361 
362  For a dataRef (which is each detector here),
363  and given a list of visit pairs, calulate the
364  brighter-fatter kernel for the detector.
365 
366  Parameters
367  ----------
368  dataRef : list of lsst.daf.persistence.ButlerDataRef
369  dataRef for the detector for the visits to be fit.
370  visitPairs : `iterable` of `tuple` of `int`
371  Pairs of visit numbers to be processed together
372  """
373  xcorrs = {} # dict of lists keyed by either amp or detector depending on config.level
374  means = {}
375  kernels = {}
376 
377  # setup necessary objects
378  detNum = dataRef.dataId[self.config.ccdKey]
379  if self.config.level == 'DETECTOR':
380  xcorrs = {detNum: []}
381  means = {detNum: []}
382  elif self.config.level == 'AMP':
383  # NB: don't use dataRef.get('raw_detector')
384  # this currently doesn't work for composites because of the way
385  # composite objects (i.e. LSST images) are handled/constructed
386  # these need to be retrieved from the camera and dereferenced
387  # rather than accessed directly
388  detector = dataRef.get('camera')[dataRef.dataId[self.config.ccdKey]]
389  amps = detector.getAmplifiers()
390  ampNames = [amp.getName() for amp in amps]
391  xcorrs = {key: [] for key in ampNames}
392  means = {key: [] for key in ampNames}
393  else:
394  raise RuntimeError("Unsupported level: {}".format(self.config.level))
395 
396  # calculate or retrieve the gains
397  if self.config.doCalcGains:
398  self.log.info('Compute gains for detector %s' % detNum)
399  gains, nomGains = self.estimateGains(dataRef, visitPairs)
400  dataRef.put(gains, datasetType='brighterFatterGain')
401  self.log.debug('Finished gain estimation for detector %s' % detNum)
402  else:
403  gains = dataRef.get('brighterFatterGain')
404  if not gains:
405  raise RuntimeError('Failed to retrieved gains for detector %s' % detNum)
406  self.log.info('Retrieved stored gain for detector %s' % detNum)
407  self.log.debug('Detector %s has gains %s' % (detNum, gains))
408 
409  # Loop over pairs of visits
410  # calculating the cross-correlations at the required level
411  for (v1, v2) in visitPairs:
412 
413  dataRef.dataId['visit'] = v1
414  exp1 = self.isr.runDataRef(dataRef).exposure
415  dataRef.dataId['visit'] = v2
416  exp2 = self.isr.runDataRef(dataRef).exposure
417  del dataRef.dataId['visit']
418  checkExpLengthEqual(exp1, exp2, v1, v2, raiseWithMessage=True)
419 
420  self.log.info('Preparing images for cross-correlation calculation for detector %s' % detNum)
421  # note the shape of these returns depends on level
422  _scaledMaskedIms1, _means1 = self._makeCroppedExposures(exp1, gains, self.config.level)
423  _scaledMaskedIms2, _means2 = self._makeCroppedExposures(exp2, gains, self.config.level)
424 
425  # Compute the cross-correlation and means
426  # at the appropriate config.level:
427  # - "DETECTOR": one key, so compare the two visits to each other
428  # - "AMP": n_amp keys, comparing each amplifier of one visit
429  # to the same amplifier in the visit its paired with
430  for det_object in _scaledMaskedIms1.keys():
431  _xcorr, _ = self._crossCorrelate(_scaledMaskedIms1[det_object],
432  _scaledMaskedIms2[det_object])
433  xcorrs[det_object].append(_xcorr)
434  means[det_object].append([_means1[det_object], _means2[det_object]])
435 
436  # TODO: DM-15305 improve debug functionality here.
437  # This is position 1 for the removed code.
438 
439  # generate the kernel(s)
440  self.log.info('Generating kernel(s) for %s' % detNum)
441  for det_object in xcorrs.keys(): # looping over either detectors or amps
442  if self.config.level == 'DETECTOR':
443  objId = 'detector %s' % det_object
444  elif self.config.level == 'AMP':
445  objId = 'detector %s AMP %s' % (detNum, det_object)
446  kernels[det_object] = self.generateKernel(xcorrs[det_object], means[det_object], objId)
447  dataRef.put(BrighterFatterKernel(self.config.level, kernels))
448 
449  self.log.info('Finished generating kernel(s) for %s' % detNum)
450  return pipeBase.Struct(exitStatus=0)
451 
452  def _makeCroppedExposures(self, exp, gains, level):
453  """Prepare exposure for cross-correlation calculation.
454 
455  For each amp, crop by the border amount, specified by
456  config.nPixBorderXCorr, then rescale by the gain
457  and subtract the sigma-clipped mean.
458  If the level is 'DETECTOR' then this is done
459  to the whole image so that it can be cross-correlated, with a copy
460  being returned.
461  If the level is 'AMP' then this is done per-amplifier,
462  and a copy of each prepared amp-image returned.
463 
464  Parameters:
465  -----------
466  exp : `lsst.afw.image.exposure.ExposureF`
467  The exposure to prepare
468  gains : `dict` of `float`
469  Dictionary of the amplifier gain values, keyed by amplifier name
470  level : `str`
471  Either `AMP` or `DETECTOR`
472 
473  Returns:
474  --------
475  scaledMaskedIms : `dict` of `lsst.afw.image.maskedImage.MaskedImageF`
476  Depending on level, this is either one item, or n_amp items,
477  keyed by detectorId or ampName
478 
479  Notes:
480  ------
481  This function is controlled by the following config parameters:
482  nPixBorderXCorr : `int`
483  The number of border pixels to exclude
484  nSigmaClipXCorr : `float`
485  The number of sigma to be clipped to
486  """
487  assert(isinstance(exp, afwImage.ExposureF))
488 
489  local_exp = exp.clone() # we don't want to modify the image passed in
490  del exp # ensure we don't make mistakes!
491 
492  border = self.config.nPixBorderXCorr
493  sigma = self.config.nSigmaClipXCorr
494 
495  sctrl = afwMath.StatisticsControl()
496  sctrl.setNumSigmaClip(sigma)
497 
498  means = {}
499  returnAreas = {}
500 
501  detector = local_exp.getDetector()
502  amps = detector.getAmplifiers()
503 
504  mi = local_exp.getMaskedImage() # makeStatistics does not seem to take exposures
505  temp = mi.clone()
506 
507  # Rescale each amp by the appropriate gain and subtract the mean.
508  # NB these are views modifying the image in-place
509  for amp in amps:
510  ampName = amp.getName()
511  rescaleIm = mi[amp.getBBox()] # the soon-to-be scaled, mean subtractedm, amp image
512  rescaleTemp = temp[amp.getBBox()]
513  mean = afwMath.makeStatistics(rescaleIm, afwMath.MEANCLIP, sctrl).getValue()
514  gain = gains[ampName]
515  rescaleIm *= gain
516  rescaleTemp *= gain
517  self.log.debug("mean*gain = %s, clipped mean = %s" %
518  (mean*gain, afwMath.makeStatistics(rescaleIm, afwMath.MEANCLIP,
519  sctrl).getValue()))
520  rescaleIm -= mean*gain
521 
522  if level == 'AMP': # build the dicts if doing amp-wise
523  means[ampName] = afwMath.makeStatistics(rescaleTemp[border: -border, border: -border,
524  afwImage.LOCAL], afwMath.MEANCLIP, sctrl).getValue()
525  returnAreas[ampName] = rescaleIm
526 
527  if level == 'DETECTOR': # else just average the whole detector
528  detName = local_exp.getDetector().getId()
529  means[detName] = afwMath.makeStatistics(temp[border: -border, border: -border, afwImage.LOCAL],
530  afwMath.MEANCLIP, sctrl).getValue()
531  returnAreas[detName] = rescaleIm
532 
533  return returnAreas, means
534 
535  def _crossCorrelate(self, maskedIm0, maskedIm1, runningBiasCorrSim=False, frameId=None, detId=None):
536  """Calculate the cross-correlation of an area.
537 
538  If the area in question contains multiple amplifiers then they must
539  have been gain corrected.
540 
541  Parameters:
542  -----------
543  maskedIm0 : `lsst.afw.image.MaskedImageF`
544  The first image area
545  maskedIm1 : `lsst.afw.image.MaskedImageF`
546  The first image area
547  frameId : `str`, optional
548  The frame identifier for use in the filename
549  if writing debug outputs.
550  detId : `str`, optional
551  The detector identifier (detector, or detector+amp,
552  depending on config.level) for use in the filename
553  if writing debug outputs.
554  runningBiasCorrSim : `bool`
555  Set to true when using this function to calculate the amount of bias
556  introduced by the sigma clipping. If False, the biasCorr parameter
557  is divided by to remove the bias, but this is, of course, not
558  appropriate when this is the parameter being measured.
559 
560  Returns:
561  --------
562  xcorr : `np.ndarray`
563  The quarter-image cross-correlation
564  mean : `float`
565  The sum of the means of the input images,
566  sigma-clipped, and with borders applied.
567  This is used when using this function with simulations to calculate
568  the biasCorr parameter.
569 
570  Notes:
571  ------
572  This function is controlled by the following config parameters:
573  maxLag : `int`
574  The maximum lag to use in the cross-correlation calculation
575  nPixBorderXCorr : `int`
576  The number of border pixels to exclude
577  nSigmaClipXCorr : `float`
578  The number of sigma to be clipped to
579  biasCorr : `float`
580  Parameter used to correct from the bias introduced
581  by the sigma cuts.
582  """
583  maxLag = self.config.maxLag
584  border = self.config.nPixBorderXCorr
585  sigma = self.config.nSigmaClipXCorr
586  biasCorr = self.config.biasCorr
587 
588  sctrl = afwMath.StatisticsControl()
589  sctrl.setNumSigmaClip(sigma)
590 
591  mean = afwMath.makeStatistics(maskedIm0.getImage()[border: -border, border: -border, afwImage.LOCAL],
592  afwMath.MEANCLIP, sctrl).getValue()
593  mean += afwMath.makeStatistics(maskedIm1.getImage()[border: -border, border: -border, afwImage.LOCAL],
594  afwMath.MEANCLIP, sctrl).getValue()
595 
596  # Diff the images, and apply border
597  diff = maskedIm0.clone()
598  diff -= maskedIm1.getImage()
599  diff = diff[border: -border, border: -border, afwImage.LOCAL]
600 
601  if self.debug.writeDiffImages:
602  filename = '_'.join(['diff', 'detector', detId, frameId, '.fits'])
603  diff.writeFits(os.path.join(self.debug.debugDataPath, filename))
604 
605  # Subtract background. It should be a constant, but it isn't always
606  binsize = self.config.backgroundBinSize
607  nx = diff.getWidth()//binsize
608  ny = diff.getHeight()//binsize
609  bctrl = afwMath.BackgroundControl(nx, ny, sctrl, afwMath.MEANCLIP)
610  bkgd = afwMath.makeBackground(diff, bctrl)
611  bgImg = bkgd.getImageF(afwMath.Interpolate.CUBIC_SPLINE, afwMath.REDUCE_INTERP_ORDER)
612  bgMean = np.mean(bgImg.getArray())
613  if abs(bgMean) >= self.config.backgroundWarnLevel:
614  self.log.warn('Mean of background = %s > config.maxBackground' % bgMean)
615 
616  diff -= bgImg
617 
618  if self.debug.writeDiffImages:
619  filename = '_'.join(['bgSub', 'diff', 'detector', detId, frameId, '.fits'])
620  diff.writeFits(os.path.join(self.debug.debugDataPath, filename))
621  if self.debug.display:
622  self.disp1.mtv(diff, title=frameId)
623 
624  self.log.debug("Median and variance of diff:")
625  self.log.debug("%s" % afwMath.makeStatistics(diff, afwMath.MEDIAN, sctrl).getValue())
626  self.log.debug("%s" % afwMath.makeStatistics(diff, afwMath.VARIANCECLIP,
627  sctrl).getValue(), np.var(diff.getImage().getArray()))
628 
629  # Measure the correlations
630  dim0 = diff[0: -maxLag, : -maxLag, afwImage.LOCAL]
631  dim0 -= afwMath.makeStatistics(dim0, afwMath.MEANCLIP, sctrl).getValue()
632  width, height = dim0.getDimensions()
633  xcorr = np.zeros((maxLag + 1, maxLag + 1), dtype=np.float64)
634 
635  for xlag in range(maxLag + 1):
636  for ylag in range(maxLag + 1):
637  dim_xy = diff[xlag:xlag + width, ylag: ylag + height, afwImage.LOCAL].clone()
638  dim_xy -= afwMath.makeStatistics(dim_xy, afwMath.MEANCLIP, sctrl).getValue()
639  dim_xy *= dim0
640  xcorr[xlag, ylag] = afwMath.makeStatistics(dim_xy, afwMath.MEANCLIP, sctrl).getValue()
641  if not runningBiasCorrSim:
642  xcorr[xlag, ylag] /= biasCorr
643 
644  # TODO: DM-15305 improve debug functionality here.
645  # This is position 2 for the removed code.
646 
647  return xcorr, mean
648 
649  def estimateGains(self, dataRef, visitPairs):
650  """Estimate the amplifier gains using the specified visits.
651 
652  Given a dataRef and list of flats of varying intensity,
653  calculate the gain for each amplifier in the detector
654  using the photon transfer curve (PTC) method.
655 
656  The config.fixPtcThroughOrigin option determines whether the iterative
657  fitting is forced to go through the origin or not.
658  This defaults to True, fitting var=1/gain * mean.
659  If set to False then var=1/g * mean + const is fitted.
660 
661  This is really a photo transfer curve (PTC) gain measurement task.
662  See DM-14063 for results from of a comparison between
663  this task's numbers and the gain values in the HSC camera model,
664  and those measured by the PTC task in eotest.
665 
666  Parameters
667  ----------
668  dataRef : `lsst.daf.persistence.butler.Butler.dataRef`
669  dataRef for the detector for the flats to be used
670  visitPairs : `list` of `tuple`
671  List of visit-pairs to use, as [(v1,v2), (v3,v4)...]
672 
673  Returns
674  -------
675  gains : `dict` of `float`
676  Dict of the as-calculated amplifier gain values,
677  keyed by amplifier name
678  nominalGains : `dict` of `float`
679  Dict of the amplifier gains, as reported by the `detector` object,
680  keyed by amplifier name
681  """
682  # NB: don't use dataRef.get('raw_detector') due to composites
683  detector = dataRef.get('camera')[dataRef.dataId[self.config.ccdKey]]
684  amps = detector.getAmplifiers()
685  ampNames = [amp.getName() for amp in amps]
686 
687  ampMeans = {key: [] for key in ampNames} # these get turned into np.arrays later
688  ampCoVariances = {key: [] for key in ampNames}
689  ampVariances = {key: [] for key in ampNames}
690 
691  # Loop over the amps in the detector,
692  # calculating a PTC for each amplifier.
693  # The amplifier iteration is performed in _calcMeansAndVars()
694  # NB: no gain correction is applied
695  for visPairNum, visPair in enumerate(visitPairs):
696  _means, _vars, _covars = self._calcMeansAndVars(dataRef, visPair[0], visPair[1])
697 
698  # Do sanity checks; if these are failed more investigation is needed
699  breaker = 0
700  for amp in detector:
701  ampName = amp.getName()
702  if _means[ampName]*10 < _vars[ampName] or _means[ampName]*10 < _covars[ampName]:
703  msg = 'Sanity check failed; check visit pair %s amp %s' % (visPair, ampName)
704  self.log.warn(msg)
705  breaker += 1
706  if breaker:
707  continue
708 
709  # having made sanity checks
710  # pull the values out into the respective dicts
711  for k in _means.keys(): # keys are necessarily the same
712  if _vars[k]*1.3 < _covars[k] or _vars[k]*0.7 > _covars[k]:
713  self.log.warn('Dropped a value')
714  continue
715  ampMeans[k].append(_means[k])
716  ampVariances[k].append(_vars[k])
717  ampCoVariances[k].append(_covars[k])
718 
719  gains = {}
720  nomGains = {}
721  for amp in detector:
722  ampName = amp.getName()
723  nomGains[ampName] = amp.getGain()
724  slopeRaw, interceptRaw, rVal, pVal, stdErr = \
725  stats.linregress(np.asarray(ampMeans[ampName]), np.asarray(ampCoVariances[ampName]))
726  slopeFix, _ = self._iterativeRegression(np.asarray(ampMeans[ampName]),
727  np.asarray(ampCoVariances[ampName]),
728  fixThroughOrigin=True)
729  slopeUnfix, intercept = self._iterativeRegression(np.asarray(ampMeans[ampName]),
730  np.asarray(ampCoVariances[ampName]),
731  fixThroughOrigin=False)
732  self.log.info("Slope of raw fit: %s, intercept: %s p value: %s" % (slopeRaw,
733  interceptRaw, pVal))
734  self.log.info("slope of fixed fit: %s, difference vs raw:%s" % (slopeFix,
735  slopeFix - slopeRaw))
736  self.log.info("slope of unfixed fit: %s, difference vs fix:%s" % (slopeUnfix,
737  slopeFix - slopeUnfix))
738  if self.config.fixPtcThroughOrigin:
739  slopeToUse = slopeFix
740  else:
741  slopeToUse = slopeUnfix
742 
743  if self.debug.enabled:
744  fig = plt.figure()
745  ax = fig.add_subplot(111)
746  ax.plot(np.asarray(ampMeans[ampName]),
747  np.asarray(ampCoVariances[ampName]), linestyle='None', marker='x', label='data')
748  if self.config.fixPtcThroughOrigin:
749  ax.plot(np.asarray(ampMeans[ampName]),
750  np.asarray(ampMeans[ampName])*slopeToUse, label='Fit through origin')
751  else:
752  ax.plot(np.asarray(ampMeans[ampName]),
753  np.asarray(ampMeans[ampName])*slopeToUse + intercept,
754  label='Fit (intercept unconstrained')
755 
756  dataRef.put(fig, "plotBrighterFatterPtc", amp=ampName)
757  self.log.info('Saved PTC for detector %s amp %s' % (detector.getId(), ampName))
758  gains[ampName] = 1.0/slopeToUse
759  return gains, nomGains
760 
761  def _calcMeansAndVars(self, dataRef, v1, v2):
762  """Calculate the means, vars, covars, and retrieve the nominal gains,
763  for each amp in each detector.
764 
765  This code runs using two visit numbers, and for the detector specified.
766  It calculates the correlations in the individual amps without
767  rescaling any gains. This allows a photon transfer curve
768  to be generated and the gains measured.
769 
770  Images are assembled with use the isrTask, and basic isr is performed.
771 
772  Parameters:
773  -----------
774  dataRef : `lsst.daf.persistence.butler.Butler.dataRef`
775  dataRef for the detector for the repo containg the flats to be used
776  v1 : `int`
777  First visit of the visit pair
778  v2 : `int`
779  Second visit of the visit pair
780 
781  Returns
782  -------
783  means, vars, covars : `tuple` of `dicts`
784  Three dicts, keyed by ampName,
785  containing the sum of the image-means,
786  the variance, and the quarter-image of the xcorr.
787  """
788  sigma = self.config.nSigmaClipGainCalc
789  maxLag = self.config.maxLag
790  border = self.config.nPixBorderGainCalc
791  biasCorr = self.config.biasCorr
792 
793  # NB: don't use dataRef.get('raw_detector') due to composites
794  detector = dataRef.get('camera')[dataRef.dataId[self.config.ccdKey]]
795 
796  ampMeans = {}
797 
798  # manipulate the dataId to get a postISR exposure for each visit
799  # from the detector obj, restoring its original state afterwards
800  originalDataId = dataRef.dataId.copy()
801  dataRef.dataId['visit'] = v1
802  exp1 = self.isr.runDataRef(dataRef).exposure
803  dataRef.dataId['visit'] = v2
804  exp2 = self.isr.runDataRef(dataRef).exposure
805  dataRef.dataId = originalDataId
806  exps = [exp1, exp2]
807  checkExpLengthEqual(exp1, exp2, v1, v2, raiseWithMessage=True)
808 
809  detector = exps[0].getDetector()
810  ims = [self._convertImagelikeToFloatImage(exp) for exp in exps]
811 
812  if self.debug.display:
813  self.disp1.mtv(ims[0], title=str(v1))
814  self.disp2.mtv(ims[1], title=str(v2))
815 
816  sctrl = afwMath.StatisticsControl()
817  sctrl.setNumSigmaClip(sigma)
818  for imNum, im in enumerate(ims):
819 
820  # calculate the sigma-clipped mean, excluding the borders
821  # safest to apply borders to all amps regardless of edges
822  # easier, camera-agnostic, and mitigates potentially dodgy
823  # overscan-biases around edges as well
824  for amp in detector:
825  ampName = amp.getName()
826  ampIm = im[amp.getBBox()]
827  mean = afwMath.makeStatistics(ampIm[border: -border, border: -border, afwImage.LOCAL],
828  afwMath.MEANCLIP, sctrl).getValue()
829  if ampName not in ampMeans.keys():
830  ampMeans[ampName] = []
831  ampMeans[ampName].append(mean)
832  ampIm -= mean
833 
834  diff = ims[0].clone()
835  diff -= ims[1]
836 
837  temp = diff[border: -border, border: -border, afwImage.LOCAL]
838 
839  # Subtract background. It should be a constant,
840  # but it isn't always (e.g. some SuprimeCam flats)
841  # TODO: Check how this looks, and if this is the "right" way to do this
842  binsize = self.config.backgroundBinSize
843  nx = temp.getWidth()//binsize
844  ny = temp.getHeight()//binsize
845  bctrl = afwMath.BackgroundControl(nx, ny, sctrl, afwMath.MEANCLIP)
846  bkgd = afwMath.makeBackground(temp, bctrl)
847 
848  box = diff.getBBox()
849  box.grow(-border)
850  diff[box, afwImage.LOCAL] -= bkgd.getImageF(afwMath.Interpolate.CUBIC_SPLINE,
851  afwMath.REDUCE_INTERP_ORDER)
852 
853  variances = {}
854  coVars = {}
855  for amp in detector:
856  ampName = amp.getName()
857 
858  diffAmpIm = diff[amp.getBBox()].clone()
859  diffAmpImCrop = diffAmpIm[border: -border - maxLag, border: -border - maxLag, afwImage.LOCAL]
860  diffAmpImCrop -= afwMath.makeStatistics(diffAmpImCrop, afwMath.MEANCLIP, sctrl).getValue()
861  w, h = diffAmpImCrop.getDimensions()
862  xcorr = np.zeros((maxLag + 1, maxLag + 1), dtype=np.float64)
863 
864  # calculate the cross-correlation
865  for xlag in range(maxLag + 1):
866  for ylag in range(maxLag + 1):
867  dim_xy = diffAmpIm[border + xlag: border + xlag + w,
868  border + ylag: border + ylag + h,
869  afwImage.LOCAL].clone()
870  dim_xy -= afwMath.makeStatistics(dim_xy, afwMath.MEANCLIP, sctrl).getValue()
871  dim_xy *= diffAmpImCrop
872  xcorr[xlag, ylag] = afwMath.makeStatistics(dim_xy,
873  afwMath.MEANCLIP, sctrl).getValue()/(biasCorr)
874 
875  variances[ampName] = xcorr[0, 0]
876  xcorr_full = self._tileArray(xcorr)
877  coVars[ampName] = np.sum(xcorr_full)
878 
879  msg = "M1: " + str(ampMeans[ampName][0])
880  msg += " M2 " + str(ampMeans[ampName][1])
881  msg += " M_sum: " + str((ampMeans[ampName][0]) + ampMeans[ampName][1])
882  msg += " Var " + str(variances[ampName])
883  msg += " coVar: " + str(coVars[ampName])
884  self.log.debug(msg)
885 
886  means = {}
887  for amp in detector:
888  ampName = amp.getName()
889  means[ampName] = ampMeans[ampName][0] + ampMeans[ampName][1]
890 
891  return means, variances, coVars
892 
893  def _plotXcorr(self, xcorr, mean, zmax=0.05, title=None, fig=None, saveToFileName=None):
894  """Plot the correlation functions."""
895  try:
896  xcorr = xcorr.getArray()
897  except Exception:
898  pass
899 
900  xcorr /= float(mean)
901  # xcorr.getArray()[0,0]=abs(xcorr.getArray()[0,0]-1)
902 
903  if fig is None:
904  fig = plt.figure()
905  else:
906  fig.clf()
907 
908  ax = fig.add_subplot(111, projection='3d')
909  ax.azim = 30
910  ax.elev = 20
911 
912  nx, ny = np.shape(xcorr)
913 
914  xpos, ypos = np.meshgrid(np.arange(nx), np.arange(ny))
915  xpos = xpos.flatten()
916  ypos = ypos.flatten()
917  zpos = np.zeros(nx*ny)
918  dz = xcorr.flatten()
919  dz[dz > zmax] = zmax
920 
921  ax.bar3d(xpos, ypos, zpos, 1, 1, dz, color='b', zsort='max', sort_zpos=100)
922  if xcorr[0, 0] > zmax:
923  ax.bar3d([0], [0], [zmax], 1, 1, 1e-4, color='c')
924 
925  ax.set_xlabel("row")
926  ax.set_ylabel("column")
927  ax.set_zlabel(r"$\langle{(F_i - \bar{F})(F_i - \bar{F})}\rangle/\bar{F}$")
928 
929  if title:
930  fig.suptitle(title)
931  if saveToFileName:
932  fig.savefig(saveToFileName)
933 
934  def _iterativeRegression(self, x, y, fixThroughOrigin=False, nSigmaClip=None, maxIter=None):
935  """Use linear regression to fit a line, iteratively removing outliers.
936 
937  Useful when you have a sufficiently large numbers of points on your PTC.
938  This function iterates until either there are no outliers of
939  config.nSigmaClip magnitude, or until the specified maximum number
940  of iterations has been performed.
941 
942  Parameters:
943  -----------
944  x : `numpy.array`
945  The independent variable. Must be a numpy array, not a list.
946  y : `numpy.array`
947  The dependent variable. Must be a numpy array, not a list.
948  fixThroughOrigin : `bool`, optional
949  Whether to fix the PTC through the origin or allow an y-intercept.
950  nSigmaClip : `float`, optional
951  The number of sigma to clip to.
952  Taken from the task config if not specified.
953  maxIter : `int`, optional
954  The maximum number of iterations allowed.
955  Taken from the task config if not specified.
956 
957  Returns:
958  --------
959  slope : `float`
960  The slope of the line of best fit
961  intercept : `float`
962  The y-intercept of the line of best fit
963  """
964  if not maxIter:
965  maxIter = self.config.maxIterRegression
966  if not nSigmaClip:
967  nSigmaClip = self.config.nSigmaClipRegression
968 
969  nIter = 0
970  sctrl = afwMath.StatisticsControl()
971  sctrl.setNumSigmaClip(nSigmaClip)
972 
973  if fixThroughOrigin:
974  while nIter < maxIter:
975  nIter += 1
976  self.log.debug("Origin fixed, iteration # %s using %s elements:" % (nIter, np.shape(x)[0]))
977  TEST = x[:, np.newaxis]
978  slope, _, _, _ = np.linalg.lstsq(TEST, y)
979  slope = slope[0]
980  res = y - slope * x
981  resMean = afwMath.makeStatistics(res, afwMath.MEANCLIP, sctrl).getValue()
982  resStd = np.sqrt(afwMath.makeStatistics(res, afwMath.VARIANCECLIP, sctrl).getValue())
983  index = np.where((res > (resMean + nSigmaClip*resStd)) |
984  (res < (resMean - nSigmaClip*resStd)))
985  self.log.debug("%.3f %.3f %.3f %.3f" % (resMean, resStd, np.max(res), nSigmaClip))
986  if np.shape(np.where(index))[1] == 0 or (nIter >= maxIter): # run out of points or iters
987  break
988  x = np.delete(x, index)
989  y = np.delete(y, index)
990 
991  return slope, 0
992 
993  while nIter < maxIter:
994  nIter += 1
995  self.log.debug("Iteration # %s using %s elements:" % (nIter, np.shape(x)[0]))
996  xx = np.vstack([x, np.ones(len(x))]).T
997  ret, _, _, _ = np.linalg.lstsq(xx, y)
998  slope, intercept = ret
999  res = y - slope*x - intercept
1000  resMean = afwMath.makeStatistics(res, afwMath.MEANCLIP, sctrl).getValue()
1001  resStd = np.sqrt(afwMath.makeStatistics(res, afwMath.VARIANCECLIP, sctrl).getValue())
1002  index = np.where((res > (resMean + nSigmaClip * resStd)) | (res < resMean - nSigmaClip * resStd))
1003  self.log.debug("%.3f %.3f %.3f %.3f" % (resMean, resStd, np.max(res), nSigmaClip))
1004  if np.shape(np.where(index))[1] == 0 or (nIter >= maxIter): # run out of points, or iterations
1005  break
1006  x = np.delete(x, index)
1007  y = np.delete(y, index)
1008 
1009  return slope, intercept
1010 
1011  def generateKernel(self, corrs, means, objId, rejectLevel=None):
1012  """Generate the full kernel from a list of cross-correlations and means.
1013 
1014  Taking a list of quarter-image, gain-corrected cross-correlations,
1015  do a pixel-wise sigma-clipped mean of each,
1016  and tile into the full-sized kernel image.
1017 
1018  Each corr in corrs is one quarter of the full cross-correlation,
1019  and has been gain-corrected. Each mean in means is a tuple of the means
1020  of the two individual images, corresponding to that corr.
1021 
1022  Parameters:
1023  -----------
1024  corrs : `list` of `numpy.ndarray`, (Ny, Nx)
1025  A list of the quarter-image cross-correlations
1026  means : `dict` of `tuples` of `floats`
1027  The means of the input images for each corr in corrs
1028  rejectLevel : `float`, optional
1029  This is essentially is a sanity check parameter.
1030  If this condition is violated there is something unexpected
1031  going on in the image, and it is discarded from the stack before
1032  the clipped-mean is calculated.
1033  If not provided then config.xcorrCheckRejectLevel is used
1034 
1035  Returns:
1036  --------
1037  kernel : `numpy.ndarray`, (Ny, Nx)
1038  The output kernel
1039  """
1040  if not rejectLevel:
1041  rejectLevel = self.config.xcorrCheckRejectLevel
1042 
1043  # Try to average over a set of possible inputs.
1044  # This generates a simple function of the kernel that
1045  # should be constant across the images, and averages that.
1046  xcorrList = []
1047  sctrl = afwMath.StatisticsControl()
1048  sctrl.setNumSigmaClip(self.config.nSigmaClipKernelGen)
1049 
1050  for corrNum, ((mean1, mean2), corr) in enumerate(zip(means, corrs)):
1051  corr[0, 0] -= (mean1 + mean2)
1052  if corr[0, 0] > 0:
1053  self.log.warn('Skipped item %s due to unexpected value of (variance-mean)' % corrNum)
1054  continue
1055  corr /= -1.0*(mean1**2 + mean2**2)
1056 
1057  fullCorr = self._tileArray(corr)
1058 
1059  xcorrCheck = np.abs(np.sum(fullCorr))/np.sum(np.abs(fullCorr))
1060  if xcorrCheck > rejectLevel:
1061  self.log.warn("Sum of the xcorr is unexpectedly high. Investigate item num %s for %s. \n"
1062  "value = %s" % (corrNum, objId, xcorrCheck))
1063  continue
1064  xcorrList.append(fullCorr)
1065 
1066  if not xcorrList:
1067  raise RuntimeError("Cannot generate kernel because all inputs were discarded. "
1068  "Either the data is bad, or config.xcorrCheckRejectLevel is too low")
1069 
1070  # stack the individual xcorrs and apply a per-pixel clipped-mean
1071  meanXcorr = np.zeros_like(fullCorr)
1072  xcorrList = np.transpose(xcorrList)
1073  for i in range(np.shape(meanXcorr)[0]):
1074  for j in range(np.shape(meanXcorr)[1]):
1075  meanXcorr[i, j] = afwMath.makeStatistics(xcorrList[i, j], afwMath.MEANCLIP, sctrl).getValue()
1076 
1077  return self.successiveOverRelax(meanXcorr)
1078 
1079  def successiveOverRelax(self, source, maxIter=None, eLevel=None):
1080  """An implementation of the successive over relaxation (SOR) method.
1081 
1082  A numerical method for solving a system of linear equations
1083  with faster convergence than the Gauss-Seidel method.
1084 
1085  Parameters:
1086  -----------
1087  source : `numpy.ndarray`
1088  The input array
1089  maxIter : `int`, optional
1090  Maximum number of iterations to attempt before aborting
1091  eLevel : `float`, optional
1092  The target error level at which we deem convergence to have occured
1093 
1094  Returns:
1095  --------
1096  output : `numpy.ndarray`
1097  The solution
1098  """
1099  if not maxIter:
1100  maxIter = self.config.maxIterSuccessiveOverRelaxation
1101  if not eLevel:
1102  eLevel = self.config.eLevelSuccessiveOverRelaxation
1103 
1104  assert source.shape[0] == source.shape[1], "Input array must be square"
1105  # initialise, and set boundary conditions
1106  func = np.zeros([source.shape[0] + 2, source.shape[1] + 2])
1107  resid = np.zeros([source.shape[0] + 2, source.shape[1] + 2])
1108  rhoSpe = np.cos(np.pi/source.shape[0]) # Here a square grid is assummed
1109 
1110  # Calculate the initial error
1111  for i in range(1, func.shape[0] - 1):
1112  for j in range(1, func.shape[1] - 1):
1113  resid[i, j] = (func[i, j - 1] + func[i, j + 1] + func[i - 1, j] +
1114  func[i + 1, j] - 4*func[i, j] - source[i - 1, j - 1])
1115  inError = np.sum(np.abs(resid))
1116 
1117  # Iterate until convergence
1118  # We perform two sweeps per cycle,
1119  # updating 'odd' and 'even' points separately
1120  nIter = 0
1121  omega = 1.0
1122  dx = 1.0
1123  while nIter < maxIter*2:
1124  outError = 0
1125  if nIter%2 == 0:
1126  for i in range(1, func.shape[0] - 1, 2):
1127  for j in range(1, func.shape[1] - 1, 2):
1128  resid[i, j] = float(func[i, j-1] + func[i, j + 1] + func[i - 1, j] +
1129  func[i + 1, j] - 4.0*func[i, j] - dx*dx*source[i - 1, j - 1])
1130  func[i, j] += omega*resid[i, j]*.25
1131  for i in range(2, func.shape[0] - 1, 2):
1132  for j in range(2, func.shape[1] - 1, 2):
1133  resid[i, j] = float(func[i, j - 1] + func[i, j + 1] + func[i - 1, j] +
1134  func[i + 1, j] - 4.0*func[i, j] - dx*dx*source[i - 1, j - 1])
1135  func[i, j] += omega*resid[i, j]*.25
1136  else:
1137  for i in range(1, func.shape[0] - 1, 2):
1138  for j in range(2, func.shape[1] - 1, 2):
1139  resid[i, j] = float(func[i, j - 1] + func[i, j + 1] + func[i - 1, j] +
1140  func[i + 1, j] - 4.0*func[i, j] - dx*dx*source[i - 1, j - 1])
1141  func[i, j] += omega*resid[i, j]*.25
1142  for i in range(2, func.shape[0] - 1, 2):
1143  for j in range(1, func.shape[1] - 1, 2):
1144  resid[i, j] = float(func[i, j - 1] + func[i, j + 1] + func[i - 1, j] +
1145  func[i + 1, j] - 4.0*func[i, j] - dx*dx*source[i - 1, j - 1])
1146  func[i, j] += omega*resid[i, j]*.25
1147  outError = np.sum(np.abs(resid))
1148  if outError < inError*eLevel:
1149  break
1150  if nIter == 0:
1151  omega = 1.0/(1 - rhoSpe*rhoSpe/2.0)
1152  else:
1153  omega = 1.0/(1 - rhoSpe*rhoSpe*omega/4.0)
1154  nIter += 1
1155 
1156  if nIter >= maxIter*2:
1157  self.log.warn("Failure: SuccessiveOverRelaxation did not converge in %s iterations."
1158  "\noutError: %s, inError: %s," % (nIter//2, outError, inError*eLevel))
1159  else:
1160  self.log.info("Success: SuccessiveOverRelaxation converged in %s iterations."
1161  "\noutError: %s, inError: %s", nIter//2, outError, inError*eLevel)
1162  return func[1: -1, 1: -1]
1163 
1164  @staticmethod
1165  def _tileArray(in_array):
1166  """Given an input quarter-image, tile/mirror it and return full image.
1167 
1168  Given a square input of side-length n, of the form
1169 
1170  input = array([[1, 2, 3],
1171  [4, 5, 6],
1172  [7, 8, 9]])
1173 
1174  return an array of size 2n-1 as
1175 
1176  output = array([[ 9, 8, 7, 8, 9],
1177  [ 6, 5, 4, 5, 6],
1178  [ 3, 2, 1, 2, 3],
1179  [ 6, 5, 4, 5, 6],
1180  [ 9, 8, 7, 8, 9]])
1181 
1182  Parameters:
1183  -----------
1184  input : `np.array`
1185  The square input quarter-array
1186 
1187  Returns:
1188  --------
1189  output : `np.array`
1190  The full, tiled array
1191  """
1192  assert(in_array.shape[0] == in_array.shape[1])
1193  length = in_array.shape[0] - 1
1194  output = np.zeros((2*length + 1, 2*length + 1))
1195 
1196  for i in range(length + 1):
1197  for j in range(length + 1):
1198  output[i + length, j + length] = in_array[i, j]
1199  output[-i + length, j + length] = in_array[i, j]
1200  output[i + length, -j + length] = in_array[i, j]
1201  output[-i + length, -j + length] = in_array[i, j]
1202  return output
1203 
1204  @staticmethod
1205  def _convertImagelikeToFloatImage(imagelikeObject):
1206  """Turn an exposure or masked image of any type into an ImageF."""
1207  for attr in ("getMaskedImage", "getImage"):
1208  if hasattr(imagelikeObject, attr):
1209  imagelikeObject = getattr(imagelikeObject, attr)()
1210  try:
1211  floatImage = imagelikeObject.convertF()
1212  except AttributeError:
1213  raise RuntimeError("Failed to convert image to float")
1214  return floatImage
1215 
1216 
1217 def calcBiasCorr(fluxLevels, imageShape, repeats=1, seed=0, addCorrelations=False,
1218  correlationStrength=0.1, maxLag=10, nSigmaClip=5, border=10):
1219  """Calculate the bias induced when sigma-clipping non-Gassian distributions.
1220 
1221  Fill image-pairs of the specified size with Poisson-distributed values,
1222  adding correlations as necessary. Then calculate the cross correlation,
1223  and calculate the bias induced using the cross-correlation image
1224  and the image means.
1225 
1226  Parameters:
1227  -----------
1228  fluxLevels : `list` of `int`
1229  The mean flux levels at which to simiulate.
1230  Nominal values might be something like [70000, 90000, 110000]
1231  imageShape : `tuple` of `int`
1232  The shape of the image array to simulate, nx by ny pixels.
1233  repeats : `int`, optional
1234  Number of repeats to perform so that results
1235  can be averaged to improve SNR.
1236  seed : `int`, optional
1237  The random seed to use for the Poisson points.
1238  addCorrelations : `bool`, optional
1239  Whether to add brighter-fatter-like correlations to the simulated images
1240  If true, a correlation between x_{i,j} and x_{i+1,j+1} is introduced
1241  by adding a*x_{i,j} to x_{i+1,j+1}
1242  correlationStrength : `float`, optional
1243  The strength of the correlations.
1244  This is the value of the coefficient `a` in the above definition.
1245  maxLag : `int`, optional
1246  The maximum lag to work to in pixels
1247  nSigmaClip : `float`, optional
1248  Number of sigma to clip to when calculating the sigma-clipped mean.
1249  border : `int`, optional
1250  Number of border pixels to mask
1251 
1252  Returns:
1253  --------
1254  biases : `dict` of `list` of `float`
1255  A dictionary, keyed by flux level, containing a list of the biases
1256  for each repeat at that flux level
1257  means : `dict` of `list` of `float`
1258  A dictionary, keyed by flux level, containing a list of the average mean
1259  fluxes (average of the mean of the two images)
1260  for the image pairs at that flux level
1261  xcorrs : `dict` of `list` of `np.ndarray`
1262  A dictionary, keyed by flux level, containing a list of the xcorr
1263  images for the image pairs at that flux level
1264  """
1265  means = {f: [] for f in fluxLevels}
1266  xcorrs = {f: [] for f in fluxLevels}
1267  biases = {f: [] for f in fluxLevels}
1268 
1270  config.isrMandatorySteps = [] # no isr but the validation routine is still run
1271  config.isrForbiddenSteps = []
1272  config.nSigmaClipXCorr = nSigmaClip
1273  config.nPixBorderXCorr = border
1274  config.maxLag = maxLag
1275  task = MakeBrighterFatterKernelTask(config=config)
1276 
1277  im0 = afwImage.maskedImage.MaskedImageF(imageShape[1], imageShape[0])
1278  im1 = afwImage.maskedImage.MaskedImageF(imageShape[1], imageShape[0])
1279 
1280  random = np.random.RandomState(seed)
1281 
1282  for rep in range(repeats):
1283  for flux in fluxLevels:
1284  data0 = random.poisson(flux, (imageShape)).astype(float)
1285  data1 = random.poisson(flux, (imageShape)).astype(float)
1286  if addCorrelations:
1287  data0[1:, 1:] += correlationStrength*data0[: -1, : -1]
1288  data1[1:, 1:] += correlationStrength*data1[: -1, : -1]
1289  im0.image.array[:, :] = data0
1290  im1.image.array[:, :] = data1
1291 
1292  _xcorr, _means = task._crossCorrelate(im0, im1, runningBiasCorrSim=True)
1293 
1294  means[flux].append(_means)
1295  xcorrs[flux].append(_xcorr)
1296  if addCorrelations:
1297  bias = xcorrs[flux][-1][1, 1]/means[flux][-1]*(1 + correlationStrength)/correlationStrength
1298  print("Simulated/expected avg. flux: %.1f, %.1f" % (flux, means[flux][-1]/2))
1299  print("Bias: %.6f" % bias)
1300  else:
1301  bias = xcorrs[flux][-1][0, 0]/means[flux][-1]
1302  print("Simulated/expected avg. flux: %.1f, %.1f" % (flux, means[flux][-1]/2))
1303  print("Bias: %.6f" % bias)
1304  biases[flux].append(bias)
1305 
1306  return biases, means, xcorrs
def format(config, name=None, writeSourceLine=True, prefix="", verbose=False)
Definition: history.py:174
Angle abs(Angle const &a)
Definition: Angle.h:106
def _iterativeRegression(self, x, y, fixThroughOrigin=False, nSigmaClip=None, maxIter=None)
def erase(frame=None)
Definition: ds9.py:97
std::shared_ptr< Background > makeBackground(ImageT const &img, BackgroundControl const &bgCtrl)
A convenience function that uses function overloading to make the correct type of Background...
Definition: Background.h:561
std::shared_ptr< FrameSet > append(FrameSet const &first, FrameSet const &second)
Construct a FrameSet that performs two transformations in series.
Definition: functional.cc:33
def checkExpLengthEqual(exp1, exp2, v1=None, v2=None, raiseWithMessage=False)
Definition: utils.py:162
Pass parameters to a Background object.
Definition: Background.h:56
Statistics makeStatistics(lsst::afw::math::MaskedVector< EntryT > const &mv, std::vector< WeightPixel > const &vweights, int const flags, StatisticsControl const &sctrl=StatisticsControl())
The makeStatistics() overload to handle lsst::afw::math::MaskedVector<>
Definition: Statistics.h:520
Pass parameters to a Statistics object.
Definition: Statistics.h:93
def _crossCorrelate(self, maskedIm0, maskedIm1, runningBiasCorrSim=False, frameId=None, detId=None)
table::Key< int > type
Definition: Detector.cc:163
def mtv(data, frame=None, title="", wcs=None, args, kwargs)
Definition: ds9.py:93
Backwards-compatibility support for depersisting the old Calib (FluxMag0/FluxMag0Err) objects...
daf::base::PropertyList * list
Definition: fits.cc:903
def calcBiasCorr(fluxLevels, imageShape, repeats=1, seed=0, addCorrelations=False, correlationStrength=0.1, maxLag=10, nSigmaClip=5, border=10)