LSSTApplications  16.0-10-g0ee56ad+5,16.0-11-ga33d1f2+5,16.0-12-g3ef5c14+3,16.0-12-g71e5ef5+18,16.0-12-gbdf3636+3,16.0-13-g118c103+3,16.0-13-g8f68b0a+3,16.0-15-gbf5c1cb+4,16.0-16-gfd17674+3,16.0-17-g7c01f5c+3,16.0-18-g0a50484+1,16.0-20-ga20f992+8,16.0-21-g0e05fd4+6,16.0-21-g15e2d33+4,16.0-22-g62d8060+4,16.0-22-g847a80f+4,16.0-25-gf00d9b8+1,16.0-28-g3990c221+4,16.0-3-gf928089+3,16.0-32-g88a4f23+5,16.0-34-gd7987ad+3,16.0-37-gc7333cb+2,16.0-4-g10fc685+2,16.0-4-g18f3627+26,16.0-4-g5f3a788+26,16.0-5-gaf5c3d7+4,16.0-5-gcc1f4bb+1,16.0-6-g3b92700+4,16.0-6-g4412fcd+3,16.0-6-g7235603+4,16.0-69-g2562ce1b+2,16.0-8-g14ebd58+4,16.0-8-g2df868b+1,16.0-8-g4cec79c+6,16.0-8-gadf6c7a+1,16.0-8-gfc7ad86,16.0-82-g59ec2a54a+1,16.0-9-g5400cdc+2,16.0-9-ge6233d7+5,master-g2880f2d8cf+3,v17.0.rc1
LSSTDataManagementBasePackage
makeBrighterFatterKernel.py
Go to the documentation of this file.
1 #
2 # LSST Data Management System
3 #
4 # Copyright 2008-2017 AURA/LSST.
5 #
6 # This product includes software developed by the
7 # LSST Project (http://www.lsst.org/).
8 #
9 # This program is free software: you can redistribute it and/or modify
10 # it under the terms of the GNU General Public License as published by
11 # the Free Software Foundation, either version 3 of the License, or
12 # (at your option) any later version.
13 #
14 # This program is distributed in the hope that it will be useful,
15 # but WITHOUT ANY WARRANTY; without even the implied warranty of
16 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 # GNU General Public License for more details.
18 #
19 # You should have received a copy of the LSST License Statement and
20 # the GNU General Public License along with this program. If not,
21 # see <https://www.lsstcorp.org/LegalNotices/>.
22 #
23 """Calculation of brighter-fatter effect correlations and kernels."""
24 
25 __all__ = ['MakeBrighterFatterKernelTaskConfig',
26  'MakeBrighterFatterKernelTask',
27  'calcBiasCorr']
28 
29 import os
30 from scipy import stats
31 import numpy as np
32 import matplotlib.pyplot as plt
33 # the following import is required for 3d projection
34 from mpl_toolkits.mplot3d import axes3d # noqa: F401
35 
36 import lsstDebug
37 import lsst.afw.image as afwImage
38 import lsst.afw.math as afwMath
39 import lsst.afw.display as afwDisp
40 from lsst.ip.isr import IsrTask
41 import lsst.pex.config as pexConfig
42 import lsst.pipe.base as pipeBase
43 
44 
45 class MakeBrighterFatterKernelTaskConfig(pexConfig.Config):
46  """Config class for bright-fatter effect coefficient calculation."""
47 
48  isr = pexConfig.ConfigurableField(
49  target=IsrTask,
50  doc="""Task to perform instrumental signature removal""",
51  )
52  isrMandatorySteps = pexConfig.ListField(
53  dtype=str,
54  doc="isr operations that must be performed for valid results. Raises if any of these are False",
55  default=['doAssembleCcd']
56  )
57  isrForbiddenSteps = pexConfig.ListField(
58  dtype=str,
59  doc="isr operations that must NOT be performed for valid results. Raises if any of these are True",
60  default=['doFlat', 'doFringe', 'doAddDistortionModel', 'doBrighterFatter', 'doUseOpticsTransmission',
61  'doUseFilterTransmission', 'doUseSensorTransmission', 'doUseAtmosphereTransmission']
62  )
63  isrDesirableSteps = pexConfig.ListField(
64  dtype=str,
65  doc="isr operations that it is advisable to perform, but are not mission-critical." +
66  " WARNs are logged for any of these found to be False.",
67  default=['doBias', 'doDark', 'doCrosstalk', 'doDefect', 'doLinearize']
68  )
69  doCalcGains = pexConfig.Field(
70  dtype=bool,
71  doc="Measure the per-amplifier gains (using the photon transfer curve method)?",
72  default=True,
73  )
74  ccdKey = pexConfig.Field(
75  dtype=str,
76  doc="The key by which to pull a detector from a dataId, e.g. 'ccd' or 'detector'",
77  default='ccd',
78  )
79  maxIterRegression = pexConfig.Field(
80  dtype=int,
81  doc="Maximum number of iterations for the regression fitter",
82  default=10
83  )
84  nSigmaClipGainCalc = pexConfig.Field(
85  dtype=int,
86  doc="Number of sigma to clip the pixel value distribution to during gain calculation",
87  default=5
88  )
89  nSigmaClipRegression = pexConfig.Field(
90  dtype=int,
91  doc="Number of sigma to clip outliers from the line of best fit to during iterative regression",
92  default=3
93  )
94  xcorrCheckRejectLevel = pexConfig.Field(
95  dtype=float,
96  doc="Sanity check level for the sum of the input cross-correlations. Arrays which " +
97  "sum to greater than this are discarded before the clipped mean is calculated.",
98  default=2.0
99  )
100  maxIterSuccessiveOverRelaxation = pexConfig.Field(
101  dtype=int,
102  doc="The maximum number of iterations allowed for the successive over-relaxation method",
103  default=10000
104  )
105  eLevelSuccessiveOverRelaxation = pexConfig.Field(
106  dtype=float,
107  doc="The target residual error for the successive over-relaxation method",
108  default=5.0e-14
109  )
110  nSigmaClipKernelGen = pexConfig.Field(
111  dtype=float,
112  doc="Number of sigma to clip to during pixel-wise clipping when generating the kernel. See " +
113  "the generateKernel docstring for more info.",
114  default=4
115  )
116  nSigmaClipXCorr = pexConfig.Field(
117  dtype=float,
118  doc="Number of sigma to clip when calculating means for the cross-correlation",
119  default=5
120  )
121  maxLag = pexConfig.Field(
122  dtype=int,
123  doc="The maximum lag (in pixels) to use when calculating the cross-correlation/kernel",
124  default=8
125  )
126  nPixBorderGainCalc = pexConfig.Field(
127  dtype=int,
128  doc="The number of border pixels to exclude when calculating the gain",
129  default=10
130  )
131  nPixBorderXCorr = pexConfig.Field(
132  dtype=int,
133  doc="The number of border pixels to exclude when calculating the cross-correlation and kernel",
134  default=10
135  )
136  biasCorr = pexConfig.Field(
137  dtype=float,
138  doc="An empirically determined correction factor, used to correct for the sigma-clipping of" +
139  " a non-Gaussian distribution. Post DM-15277, code will exist here to calulate appropriate values",
140  default=0.9241
141  )
142  backgroundBinSize = pexConfig.Field(
143  dtype=int,
144  doc="Size of the background bins",
145  default=128
146  )
147  fixPtcThroughOrigin = pexConfig.Field(
148  dtype=bool,
149  doc="Constrain the fit of the photon transfer curve to go through the origin when measuring" +
150  "the gain?",
151  default=True
152  )
153  level = pexConfig.ChoiceField(
154  doc="The level at which to calculate the brighter-fatter kernels",
155  dtype=str, default="DETECTOR",
156  allowed={
157  "AMP": "Every amplifier treated separately",
158  "DETECTOR": "One kernel per detector",
159  }
160  )
161  backgroundWarnLevel = pexConfig.Field(
162  dtype=float,
163  doc="Log warnings if the mean of the fitted background is found to be above this level after " +
164  "differencing image pair.",
165  default=0.1
166  )
167 
168 
169 class MakeBrighterFatterKernelTaskRunner(pipeBase.TaskRunner):
170  """Subclass of TaskRunner for the MakeBrighterFatterKernelTask.
171 
172  This transforms the processed arguments generated by the ArgumentParser
173  into the arguments expected by makeBrighterFatterKernelTask.run().
174 
175  makeBrighterFatterKernelTask.run() takes a two arguments,
176  one of which is the dataRef (as usual), and the other is the list
177  of visit-pairs, in the form of a list of tuples.
178  This list is supplied on the command line as documented,
179  and this class parses that, and passes the parsed version
180  to the run() method.
181 
182  See pipeBase.TaskRunner for more information.
183  """
184 
185  @staticmethod
186  def getTargetList(parsedCmd, **kwargs):
187  """Parse the visit list and pass through explicitly."""
188  visitPairs = []
189  for visitStringPair in parsedCmd.visitPairs:
190  visitStrings = visitStringPair.split(",")
191  if len(visitStrings) != 2:
192  raise RuntimeError("Found {} visits in {} instead of 2".format(len(visitStrings),
193  visitStringPair))
194  try:
195  visits = [int(visit) for visit in visitStrings]
196  except Exception:
197  raise RuntimeError("Could not parse {} as two integer visit numbers".format(visitStringPair))
198  visitPairs.append(visits)
199 
200  return pipeBase.TaskRunner.getTargetList(parsedCmd, visitPairs=visitPairs, **kwargs)
201 
202 
203 class BrighterFatterKernelTaskDataIdContainer(pipeBase.DataIdContainer):
204  """A DataIdContainer for the MakeBrighterFatterKernelTask."""
205 
206  def makeDataRefList(self, namespace):
207  """Compute refList based on idList.
208 
209  This method must be defined as the dataset does not exist before this
210  task is run.
211 
212  Parameters
213  ----------
214  namespace
215  Results of parsing the command-line.
216 
217  Notes
218  -----
219  Not called if ``add_id_argument`` called
220  with ``doMakeDataRefList=False``.
221  Note that this is almost a copy-and-paste of the vanilla implementation,
222  but without checking if the datasets already exist,
223  as this task exists to make them.
224  """
225  if self.datasetType is None:
226  raise RuntimeError("Must call setDatasetType first")
227  butler = namespace.butler
228  for dataId in self.idList:
229  refList = list(butler.subset(datasetType=self.datasetType, level=self.level, dataId=dataId))
230  # exclude nonexistent data
231  # this is a recursive test, e.g. for the sake of "raw" data
232  if not refList:
233  namespace.log.warn("No data found for dataId=%s", dataId)
234  continue
235  self.refList += refList
236 
237 
239  """A (very) simple class to hold the kernel(s) generated.
240 
241  The kernel.kernel is a dictionary holding the kernels themselves.
242  One kernel if the level is 'DETECTOR' or,
243  nAmps in length, if level is 'AMP'.
244  The dict is keyed by either the detector ID or the amplifier IDs.
245 
246  The level is the level for which the kernel(s) were generated so that one
247  can know how to access the kernels without having to query the shape of
248  the dictionary holding the kernel(s).
249  """
250 
251  def __init__(self, level, kernelDict):
252  assert type(level) == str
253  assert type(kernelDict) == dict
254  if level == 'DETECTOR':
255  assert len(kernelDict.keys()) == 1
256  if level == 'AMP':
257  assert len(kernelDict.keys()) > 1
258 
259  self.level = level
260  self.kernel = kernelDict
261 
262 
263 class MakeBrighterFatterKernelTask(pipeBase.CmdLineTask):
264  """Brighter-fatter effect correction-kernel calculation task.
265 
266  A command line task for calculating the brighter-fatter correction
267  kernel from pairs of flat-field images (with the same exposure length).
268 
269  The following operations are performed:
270 
271  - The configurable isr task is called, which unpersists and assembles the
272  raw images, and performs the selected instrument signature removal tasks.
273  For the purpose of brighter-fatter coefficient calculation is it
274  essential that certain components of isr are *not* performed, and
275  recommended that certain others are. The task checks the selected isr
276  configuration before it is run, and if forbidden components have been
277  selected task will raise, and if recommended ones have not been selected,
278  warnings are logged.
279 
280  - The gain of the each amplifier in the detector is calculated using
281  the photon transfer curve (PTC) method and used to correct the images
282  so that all calculations are done in units of electrons, and so that the
283  level across amplifier boundaries is continuous.
284  Outliers in the PTC are iteratively rejected
285  before fitting, with the nSigma rejection level set by
286  config.nSigmaClipRegression. Individual pixels are ignored in the input
287  images the image based on config.nSigmaClipGainCalc.
288 
289  - Each image is then cross-correlated with the one it's paired with
290  (with the pairing defined by the --visit-pairs command line argument),
291  which is done either the whole-image to whole-image,
292  or amplifier-by-amplifier, depending on config.level.
293 
294  - Once the cross-correlations have been calculated for each visit pair,
295  these are used to generate the correction kernel.
296  The maximum lag used, in pixels, and hence the size of the half-size
297  of the kernel generated, is given by config.maxLag,
298  i.e. a value of 10 will result in a kernel of size 2n-1 = 19x19 pixels.
299  Outlier values in these cross-correlations are rejected by using a
300  pixel-wise sigma-clipped thresholding to each cross-correlation in
301  the visit-pairs-length stack of cross-correlations.
302  The number of sigma clipped to is set by config.nSigmaClipKernelGen.
303 
304  - Once DM-15277 has been completed, a method will exist to calculate the
305  empirical correction factor, config.biasCorr.
306  TODO: DM-15277 update this part of the docstring once the ticket is done.
307  """
308 
309  RunnerClass = MakeBrighterFatterKernelTaskRunner
310  ConfigClass = MakeBrighterFatterKernelTaskConfig
311  _DefaultName = "makeBrighterFatterKernel"
312 
313  def __init__(self, *args, **kwargs):
314  pipeBase.CmdLineTask.__init__(self, *args, **kwargs)
315  self.makeSubtask("isr")
316 
317  self.debug = lsstDebug.Info(__name__)
318  if self.debug.enabled:
319  self.log.info("Running with debug enabled...")
320  # If we're displaying, test it works and save displays for later.
321  # It's worth testing here as displays are flaky and sometimes
322  # can't be contacted, and given processing takes a while,
323  # it's a shame to fail late due to display issues.
324  if self.debug.display:
325  try:
326  afwDisp.setDefaultBackend(self.debug.displayBackend)
327  afwDisp.Display.delAllDisplays()
328  self.disp1 = afwDisp.Display(0, open=True)
329  self.disp2 = afwDisp.Display(1, open=True)
330 
331  im = afwImage.ImageF(1, 1)
332  im.array[:] = [[1]]
333  self.disp1.mtv(im)
334  self.disp1.erase()
335  except NameError:
336  self.debug.display = False
337  self.log.warn('Failed to setup/connect to display! Debug display has been disabled')
338 
339  plt.interactive(False) # stop windows popping up when plotting. When headless, use 'agg' backend too
340  self.validateIsrConfig()
341  self.config.validate()
342  self.config.freeze()
343 
344  @classmethod
345  def _makeArgumentParser(cls):
346  """Augment argument parser for the MakeBrighterFatterKernelTask."""
347  parser = pipeBase.ArgumentParser(name=cls._DefaultName)
348  parser.add_argument("--visit-pairs", dest="visitPairs", nargs="*",
349  help="Visit pairs to use. Each pair must be of the form INT,INT e.g. 123,456")
350  parser.add_id_argument("--id", datasetType="brighterFatterKernel",
351  ContainerClass=BrighterFatterKernelTaskDataIdContainer,
352  help="The ccds to use, e.g. --id ccd=0..100")
353  return parser
354 
355  def validateIsrConfig(self):
356  """Check that appropriate ISR settings are being used
357  for brighter-fatter kernel calculation."""
358 
359  # How should we handle saturation/bad regions?
360  # 'doSaturationInterpolation': True
361  # 'doNanInterpAfterFlat': False
362  # 'doSaturation': True
363  # 'doSuspect': True
364  # 'doWidenSaturationTrails': True
365  # 'doSetBadRegions': True
366 
367  configDict = self.config.isr.toDict()
368 
369  for configParam in self.config.isrMandatorySteps:
370  if configDict[configParam] is False:
371  raise RuntimeError('Must set config.isr.%s to True '
372  'for brighter-fatter kernel calulation' % configParam)
373 
374  for configParam in self.config.isrForbiddenSteps:
375  if configDict[configParam] is True:
376  raise RuntimeError('Must set config.isr.%s to False '
377  'for brighter-fatter kernel calulation' % configParam)
378 
379  for configParam in self.config.isrDesirableSteps:
380  if configParam not in configDict:
381  self.log.info('Failed to find key %s in the isr config dict. You probably want ' +
382  'to set the equivalent for your obs_package to True.' % configParam)
383  continue
384  if configDict[configParam] is False:
385  self.log.warn('Found config.isr.%s set to False for brighter-fatter kernel calulation. '
386  'It is probably desirable to have this set to True' % configParam)
387 
388  # subtask settings
389  if not self.config.isr.assembleCcd.doTrim:
390  raise RuntimeError('Must trim when assembling CCDs. Set config.isr.assembleCcd.doTrim to True')
391 
392  @pipeBase.timeMethod
393  def runDataRef(self, dataRef, visitPairs):
394  """Run the brighter-fatter measurement task.
395 
396  For a dataRef (which is each detector here),
397  and given a list of visit pairs, calulate the
398  brighter-fatter kernel for the detector.
399 
400  Parameters
401  ----------
402  dataRef : list of lsst.daf.persistence.ButlerDataRef
403  dataRef for the detector for the visits to be fit.
404  visitPairs : `iterable` of `tuple` of `int`
405  Pairs of visit numbers to be processed together
406  """
407  xcorrs = {} # dict of lists keyed by either amp or detector depending on config.level
408  means = {}
409  kernels = {}
410 
411  # setup necessary objects
412  detNum = dataRef.dataId[self.config.ccdKey]
413  if self.config.level == 'DETECTOR':
414  xcorrs = {detNum: []}
415  means = {detNum: []}
416  elif self.config.level == 'AMP':
417  # NB: don't use dataRef.get('raw_detector')
418  # this currently doesn't work for composites because of the way
419  # composite objects (i.e. LSST images) are handled/constructed
420  # these need to be retrieved from the camera and dereferenced
421  # rather than accessed directly
422  detector = dataRef.get('camera')[dataRef.dataId[self.config.ccdKey]]
423  ampInfoCat = detector.getAmpInfoCatalog()
424  ampNames = [amp.getName() for amp in ampInfoCat]
425  xcorrs = {key: [] for key in ampNames}
426  means = {key: [] for key in ampNames}
427  else:
428  raise RuntimeError("Unsupported level: {}".format(self.config.level))
429 
430  # calculate or retrieve the gains
431  if self.config.doCalcGains:
432  self.log.info('Compute gains for detector %s' % detNum)
433  gains, nomGains = self.estimateGains(dataRef, visitPairs)
434  dataRef.put(gains, datasetType='brighterFatterGain')
435  self.log.debug('Finished gain estimation for detector %s' % detNum)
436  else:
437  gains = dataRef.get('brighterFatterGain')
438  if not gains:
439  raise RuntimeError('Failed to retrieved gains for detector %s' % detNum)
440  self.log.info('Retrieved stored gain for detector %s' % detNum)
441  self.log.debug('Detector %s has gains %s' % (detNum, gains))
442 
443  # Loop over pairs of visits
444  # calculating the cross-correlations at the required level
445  for (v1, v2) in visitPairs:
446 
447  dataRef.dataId['visit'] = v1
448  exp1 = self.isr.runDataRef(dataRef).exposure
449  dataRef.dataId['visit'] = v2
450  exp2 = self.isr.runDataRef(dataRef).exposure
451  del dataRef.dataId['visit']
452  self._checkExpLengthEqual(exp1, exp2, v1, v2)
453 
454  self.log.info('Preparing images for cross-correlation calculation for detector %s' % detNum)
455  # note the shape of these returns depends on level
456  _scaledMaskedIms1, _means1 = self._makeCroppedExposures(exp1, gains, self.config.level)
457  _scaledMaskedIms2, _means2 = self._makeCroppedExposures(exp2, gains, self.config.level)
458 
459  # Compute the cross-correlation and means
460  # at the appropriate config.level:
461  # - "DETECTOR": one key, so compare the two visits to each other
462  # - "AMP": n_amp keys, comparing each amplifier of one visit
463  # to the same amplifier in the visit its paired with
464  for det_object in _scaledMaskedIms1.keys():
465  _xcorr, _ = self._crossCorrelate(_scaledMaskedIms1[det_object],
466  _scaledMaskedIms2[det_object])
467  xcorrs[det_object].append(_xcorr)
468  means[det_object].append([_means1[det_object], _means2[det_object]])
469 
470  # TODO: DM-15305 improve debug functionality here.
471  # This is position 1 for the removed code.
472 
473  # generate the kernel(s)
474  self.log.info('Generating kernel(s) for %s' % detNum)
475  for det_object in xcorrs.keys(): # looping over either detectors or amps
476  if self.config.level == 'DETECTOR':
477  objId = 'detector %s' % det_object
478  elif self.config.level == 'AMP':
479  objId = 'detector %s AMP %s' % (detNum, det_object)
480  kernels[det_object] = self.generateKernel(xcorrs[det_object], means[det_object], objId)
481  dataRef.put(BrighterFatterKernel(self.config.level, kernels))
482 
483  self.log.info('Finished generating kernel(s) for %s' % detNum)
484  return pipeBase.Struct(exitStatus=0)
485 
486  def _makeCroppedExposures(self, exp, gains, level):
487  """Prepare exposure for cross-correlation calculation.
488 
489  For each amp, crop by the border amount, specified by
490  config.nPixBorderXCorr, then rescale by the gain
491  and subtract the sigma-clipped mean.
492  If the level is 'DETECTOR' then this is done
493  to the whole image so that it can be cross-correlated, with a copy
494  being returned.
495  If the level is 'AMP' then this is done per-amplifier,
496  and a copy of each prepared amp-image returned.
497 
498  Parameters:
499  -----------
500  exp : `lsst.afw.image.exposure.ExposureF`
501  The exposure to prepare
502  gains : `dict` of `float`
503  Dictionary of the amplifier gain values, keyed by amplifier name
504  level : `str`
505  Either `AMP` or `DETECTOR`
506 
507  Returns:
508  --------
509  scaledMaskedIms : `dict` of `lsst.afw.image.maskedImage.MaskedImageF`
510  Depending on level, this is either one item, or n_amp items,
511  keyed by detectorId or ampName
512 
513  Notes:
514  ------
515  This function is controlled by the following config parameters:
516  nPixBorderXCorr : `int`
517  The number of border pixels to exclude
518  nSigmaClipXCorr : `float`
519  The number of sigma to be clipped to
520  """
521  assert(isinstance(exp, afwImage.ExposureF))
522 
523  local_exp = exp.clone() # we don't want to modify the image passed in
524  del exp # ensure we don't make mistakes!
525 
526  border = self.config.nPixBorderXCorr
527  sigma = self.config.nSigmaClipXCorr
528 
529  sctrl = afwMath.StatisticsControl()
530  sctrl.setNumSigmaClip(sigma)
531 
532  means = {}
533  returnAreas = {}
534 
535  detector = local_exp.getDetector()
536  ampInfoCat = detector.getAmpInfoCatalog()
537 
538  mi = local_exp.getMaskedImage() # makeStatistics does not seem to take exposures
539  temp = mi.clone()
540 
541  # Rescale each amp by the appropriate gain and subtract the mean.
542  # NB these are views modifying the image in-place
543  for amp in ampInfoCat:
544  ampName = amp.getName()
545  rescaleIm = mi[amp.getBBox()] # the soon-to-be scaled, mean subtractedm, amp image
546  rescaleTemp = temp[amp.getBBox()]
547  mean = afwMath.makeStatistics(rescaleIm, afwMath.MEANCLIP, sctrl).getValue()
548  gain = gains[ampName]
549  rescaleIm *= gain
550  rescaleTemp *= gain
551  self.log.debug("mean*gain = %s, clipped mean = %s" %
552  (mean*gain, afwMath.makeStatistics(rescaleIm, afwMath.MEANCLIP,
553  sctrl).getValue()))
554  rescaleIm -= mean*gain
555 
556  if level == 'AMP': # build the dicts if doing amp-wise
557  means[ampName] = afwMath.makeStatistics(rescaleTemp[border: -border, border: -border,
558  afwImage.LOCAL], afwMath.MEANCLIP, sctrl).getValue()
559  returnAreas[ampName] = rescaleIm
560 
561  if level == 'DETECTOR': # else just average the whole detector
562  detName = local_exp.getDetector().getId()
563  means[detName] = afwMath.makeStatistics(temp[border: -border, border: -border, afwImage.LOCAL],
564  afwMath.MEANCLIP, sctrl).getValue()
565  returnAreas[detName] = rescaleIm
566 
567  return returnAreas, means
568 
569  def _crossCorrelate(self, maskedIm0, maskedIm1, runningBiasCorrSim=False, frameId=None, detId=None):
570  """Calculate the cross-correlation of an area.
571 
572  If the area in question contains multiple amplifiers then they must
573  have been gain corrected.
574 
575  Parameters:
576  -----------
577  maskedIm0 : `lsst.afw.image.MaskedImageF`
578  The first image area
579  maskedIm1 : `lsst.afw.image.MaskedImageF`
580  The first image area
581  frameId : `str`, optional
582  The frame identifier for use in the filename
583  if writing debug outputs.
584  detId : `str`, optional
585  The detector identifier (detector, or detector+amp,
586  depending on config.level) for use in the filename
587  if writing debug outputs.
588  runningBiasCorrSim : `bool`
589  Set to true when using this function to calculate the amount of bias
590  introduced by the sigma clipping. If False, the biasCorr parameter
591  is divided by to remove the bias, but this is, of course, not
592  appropriate when this is the parameter being measured.
593 
594  Returns:
595  --------
596  xcorr : `np.ndarray`
597  The quarter-image cross-correlation
598  mean : `float`
599  The sum of the means of the input images,
600  sigma-clipped, and with borders applied.
601  This is used when using this function with simulations to calculate
602  the biasCorr parameter.
603 
604  Notes:
605  ------
606  This function is controlled by the following config parameters:
607  maxLag : `int`
608  The maximum lag to use in the cross-correlation calculation
609  nPixBorderXCorr : `int`
610  The number of border pixels to exclude
611  nSigmaClipXCorr : `float`
612  The number of sigma to be clipped to
613  biasCorr : `float`
614  Parameter used to correct from the bias introduced
615  by the sigma cuts.
616  """
617  maxLag = self.config.maxLag
618  border = self.config.nPixBorderXCorr
619  sigma = self.config.nSigmaClipXCorr
620  biasCorr = self.config.biasCorr
621 
622  sctrl = afwMath.StatisticsControl()
623  sctrl.setNumSigmaClip(sigma)
624 
625  mean = afwMath.makeStatistics(maskedIm0.getImage()[border: -border, border: -border, afwImage.LOCAL],
626  afwMath.MEANCLIP, sctrl).getValue()
627  mean += afwMath.makeStatistics(maskedIm1.getImage()[border: -border, border: -border, afwImage.LOCAL],
628  afwMath.MEANCLIP, sctrl).getValue()
629 
630  # Diff the images, and apply border
631  diff = maskedIm0.clone()
632  diff -= maskedIm1.getImage()
633  diff = diff[border: -border, border: -border, afwImage.LOCAL]
634 
635  if self.debug.writeDiffImages:
636  filename = '_'.join(['diff', 'detector', detId, frameId, '.fits'])
637  diff.writeFits(os.path.join(self.debug.debugDataPath, filename))
638 
639  # Subtract background. It should be a constant, but it isn't always
640  binsize = self.config.backgroundBinSize
641  nx = diff.getWidth()//binsize
642  ny = diff.getHeight()//binsize
643  bctrl = afwMath.BackgroundControl(nx, ny, sctrl, afwMath.MEANCLIP)
644  bkgd = afwMath.makeBackground(diff, bctrl)
645  bgImg = bkgd.getImageF(afwMath.Interpolate.CUBIC_SPLINE, afwMath.REDUCE_INTERP_ORDER)
646  bgMean = np.mean(bgImg.getArray())
647  if abs(bgMean) >= self.config.backgroundWarnLevel:
648  self.log.warn('Mean of background = %s > config.maxBackground' % bgMean)
649 
650  diff -= bgImg
651 
652  if self.debug.writeDiffImages:
653  filename = '_'.join(['bgSub', 'diff', 'detector', detId, frameId, '.fits'])
654  diff.writeFits(os.path.join(self.debug.debugDataPath, filename))
655  if self.debug.display:
656  self.disp1.mtv(diff, title=frameId)
657 
658  self.log.debug("Median and variance of diff:")
659  self.log.debug("%s" % afwMath.makeStatistics(diff, afwMath.MEDIAN, sctrl).getValue())
660  self.log.debug("%s" % afwMath.makeStatistics(diff, afwMath.VARIANCECLIP,
661  sctrl).getValue(), np.var(diff.getImage().getArray()))
662 
663  # Measure the correlations
664  dim0 = diff[0: -maxLag, : -maxLag, afwImage.LOCAL]
665  dim0 -= afwMath.makeStatistics(dim0, afwMath.MEANCLIP, sctrl).getValue()
666  width, height = dim0.getDimensions()
667  xcorr = np.zeros((maxLag + 1, maxLag + 1), dtype=np.float64)
668 
669  for xlag in range(maxLag + 1):
670  for ylag in range(maxLag + 1):
671  dim_xy = diff[xlag:xlag + width, ylag: ylag + height, afwImage.LOCAL].clone()
672  dim_xy -= afwMath.makeStatistics(dim_xy, afwMath.MEANCLIP, sctrl).getValue()
673  dim_xy *= dim0
674  xcorr[xlag, ylag] = afwMath.makeStatistics(dim_xy, afwMath.MEANCLIP, sctrl).getValue()
675  if not runningBiasCorrSim:
676  xcorr[xlag, ylag] /= biasCorr
677 
678  # TODO: DM-15305 improve debug functionality here.
679  # This is position 2 for the removed code.
680 
681  return xcorr, mean
682 
683  def estimateGains(self, dataRef, visitPairs):
684  """Estimate the amplifier gains using the specified visits.
685 
686  Given a dataRef and list of flats of varying intensity,
687  calculate the gain for each amplifier in the detector
688  using the photon transfer curve (PTC) method.
689 
690  The config.fixPtcThroughOrigin option determines whether the iterative
691  fitting is forced to go through the origin or not.
692  This defaults to True, fitting var=1/gain * mean.
693  If set to False then var=1/g * mean + const is fitted.
694 
695  This is really a photo transfer curve (PTC) gain measurement task.
696  See DM-14063 for results from of a comparison between
697  this task's numbers and the gain values in the HSC camera model,
698  and those measured by the PTC task in eotest.
699 
700  Parameters
701  ----------
702  dataRef : `lsst.daf.persistence.butler.Butler.dataRef`
703  dataRef for the detector for the flats to be used
704  visitPairs : `list` of `tuple`
705  List of visit-pairs to use, as [(v1,v2), (v3,v4)...]
706 
707  Returns
708  -------
709  gains : `dict` of `float`
710  Dict of the as-calculated amplifier gain values,
711  keyed by amplifier name
712  nominalGains : `dict` of `float`
713  Dict of the amplifier gains, as reported by the `detector` object,
714  keyed by amplifier name
715  """
716  # NB: don't use dataRef.get('raw_detector') due to composites
717  detector = dataRef.get('camera')[dataRef.dataId[self.config.ccdKey]]
718  ampInfoCat = detector.getAmpInfoCatalog()
719  ampNames = [amp.getName() for amp in ampInfoCat]
720 
721  ampMeans = {key: [] for key in ampNames} # these get turned into np.arrays later
722  ampCoVariances = {key: [] for key in ampNames}
723  ampVariances = {key: [] for key in ampNames}
724 
725  # Loop over the amps in the detector,
726  # calculating a PTC for each amplifier.
727  # The amplifier iteration is performed in _calcMeansAndVars()
728  # NB: no gain correction is applied
729  for visPairNum, visPair in enumerate(visitPairs):
730  _means, _vars, _covars = self._calcMeansAndVars(dataRef, visPair[0], visPair[1])
731 
732  # Do sanity checks; if these are failed more investigation is needed
733  breaker = 0
734  for amp in detector:
735  ampName = amp.getName()
736  if _means[ampName]*10 < _vars[ampName] or _means[ampName]*10 < _covars[ampName]:
737  msg = 'Sanity check failed; check visit pair %s amp %s' % (visPair, ampName)
738  self.log.warn(msg)
739  breaker += 1
740  if breaker:
741  continue
742 
743  # having made sanity checks
744  # pull the values out into the respective dicts
745  for k in _means.keys(): # keys are necessarily the same
746  if _vars[k]*1.3 < _covars[k] or _vars[k]*0.7 > _covars[k]:
747  self.log.warn('Dropped a value')
748  continue
749  ampMeans[k].append(_means[k])
750  ampVariances[k].append(_vars[k])
751  ampCoVariances[k].append(_covars[k])
752 
753  gains = {}
754  nomGains = {}
755  for amp in detector:
756  ampName = amp.getName()
757  nomGains[ampName] = amp.getGain()
758  slopeRaw, interceptRaw, rVal, pVal, stdErr = \
759  stats.linregress(np.asarray(ampMeans[ampName]), np.asarray(ampCoVariances[ampName]))
760  slopeFix, _ = self._iterativeRegression(np.asarray(ampMeans[ampName]),
761  np.asarray(ampCoVariances[ampName]),
762  fixThroughOrigin=True)
763  slopeUnfix, intercept = self._iterativeRegression(np.asarray(ampMeans[ampName]),
764  np.asarray(ampCoVariances[ampName]),
765  fixThroughOrigin=False)
766  self.log.info("Slope of raw fit: %s, intercept: %s p value: %s" % (slopeRaw,
767  interceptRaw, pVal))
768  self.log.info("slope of fixed fit: %s, difference vs raw:%s" % (slopeFix,
769  slopeFix - slopeRaw))
770  self.log.info("slope of unfixed fit: %s, difference vs fix:%s" % (slopeUnfix,
771  slopeFix - slopeUnfix))
772  if self.config.fixPtcThroughOrigin:
773  slopeToUse = slopeFix
774  else:
775  slopeToUse = slopeUnfix
776 
777  if self.debug.enabled:
778  fig = plt.figure()
779  ax = fig.add_subplot(111)
780  ax.plot(np.asarray(ampMeans[ampName]),
781  np.asarray(ampCoVariances[ampName]), linestyle='None', marker='x', label='data')
782  if self.config.fixPtcThroughOrigin:
783  ax.plot(np.asarray(ampMeans[ampName]),
784  np.asarray(ampMeans[ampName])*slopeToUse, label='Fit through origin')
785  else:
786  ax.plot(np.asarray(ampMeans[ampName]),
787  np.asarray(ampMeans[ampName])*slopeToUse + intercept,
788  label='Fit (intercept unconstrained')
789 
790  dataRef.put(fig, "plotBrighterFatterPtc", amp=ampName)
791  self.log.info('Saved PTC for detector %s amp %s' % (detector.getId(), ampName))
792  gains[ampName] = 1.0/slopeToUse
793  return gains, nomGains
794 
795  @staticmethod
796  def _checkExpLengthEqual(exp1, exp2, v1=None, v2=None):
797  """Check the exposure lengths of two exposures are equal.
798 
799  Parameters:
800  -----------
801  exp1 : `lsst.afw.image.exposure.ExposureF`
802  First exposure to check
803  exp2 : `lsst.afw.image.exposure.ExposureF`
804  Second exposure to check
805  v1 : `int` or `str`, optional
806  First visit of the visit pair
807  v2 : `int` or `str`, optional
808  Second visit of the visit pair
809 
810  Raises:
811  -------
812  RuntimeError
813  Raised if the exposure lengths of the two exposures are not equal
814  """
815  expTime1 = exp1.getInfo().getVisitInfo().getExposureTime()
816  expTime2 = exp2.getInfo().getVisitInfo().getExposureTime()
817  if expTime1 != expTime2:
818  msg = "Exposure lengths for visit pairs must be equal. " + \
819  "Found %s and %s" % (expTime1, expTime2)
820  if v1 and v2:
821  msg += " for visit pair %s, %s" % (v1, v2)
822  raise RuntimeError(msg)
823 
824  def _calcMeansAndVars(self, dataRef, v1, v2):
825  """Calculate the means, vars, covars, and retrieve the nominal gains,
826  for each amp in each detector.
827 
828  This code runs using two visit numbers, and for the detector specified.
829  It calculates the correlations in the individual amps without
830  rescaling any gains. This allows a photon transfer curve
831  to be generated and the gains measured.
832 
833  Images are assembled with use the isrTask, and basic isr is performed.
834 
835  Parameters:
836  -----------
837  dataRef : `lsst.daf.persistence.butler.Butler.dataRef`
838  dataRef for the detector for the repo containg the flats to be used
839  v1 : `int`
840  First visit of the visit pair
841  v2 : `int`
842  Second visit of the visit pair
843 
844  Returns
845  -------
846  means, vars, covars : `tuple` of `dicts`
847  Three dicts, keyed by ampName,
848  containing the sum of the image-means,
849  the variance, and the quarter-image of the xcorr.
850  """
851  sigma = self.config.nSigmaClipGainCalc
852  maxLag = self.config.maxLag
853  border = self.config.nPixBorderGainCalc
854  biasCorr = self.config.biasCorr
855 
856  # NB: don't use dataRef.get('raw_detector') due to composites
857  detector = dataRef.get('camera')[dataRef.dataId[self.config.ccdKey]]
858 
859  ampMeans = {}
860 
861  # manipulate the dataId to get a postISR exposure for each visit
862  # from the detector obj, restoring its original state afterwards
863  originalDataId = dataRef.dataId.copy()
864  dataRef.dataId['visit'] = v1
865  exp1 = self.isr.runDataRef(dataRef).exposure
866  dataRef.dataId['visit'] = v2
867  exp2 = self.isr.runDataRef(dataRef).exposure
868  dataRef.dataId = originalDataId
869  exps = [exp1, exp2]
870  self._checkExpLengthEqual(exp1, exp2, v1, v2)
871 
872  detector = exps[0].getDetector()
873  ims = [self._convertImagelikeToFloatImage(exp) for exp in exps]
874 
875  if self.debug.display:
876  self.disp1.mtv(ims[0], title=str(v1))
877  self.disp2.mtv(ims[1], title=str(v2))
878 
879  sctrl = afwMath.StatisticsControl()
880  sctrl.setNumSigmaClip(sigma)
881  for imNum, im in enumerate(ims):
882 
883  # calculate the sigma-clipped mean, excluding the borders
884  # safest to apply borders to all amps regardless of edges
885  # easier, camera-agnostic, and mitigates potentially dodgy
886  # overscan-biases around edges as well
887  for amp in detector:
888  ampName = amp.getName()
889  ampIm = im[amp.getBBox()]
890  mean = afwMath.makeStatistics(ampIm[border: -border, border: -border, afwImage.LOCAL],
891  afwMath.MEANCLIP, sctrl).getValue()
892  if ampName not in ampMeans.keys():
893  ampMeans[ampName] = []
894  ampMeans[ampName].append(mean)
895  ampIm -= mean
896 
897  diff = ims[0].clone()
898  diff -= ims[1]
899 
900  temp = diff[border: -border, border: -border, afwImage.LOCAL]
901 
902  # Subtract background. It should be a constant,
903  # but it isn't always (e.g. some SuprimeCam flats)
904  # TODO: Check how this looks, and if this is the "right" way to do this
905  binsize = self.config.backgroundBinSize
906  nx = temp.getWidth()//binsize
907  ny = temp.getHeight()//binsize
908  bctrl = afwMath.BackgroundControl(nx, ny, sctrl, afwMath.MEANCLIP)
909  bkgd = afwMath.makeBackground(temp, bctrl)
910 
911  box = diff.getBBox()
912  box.grow(-border)
913  diff[box, afwImage.LOCAL] -= bkgd.getImageF(afwMath.Interpolate.CUBIC_SPLINE,
914  afwMath.REDUCE_INTERP_ORDER)
915 
916  variances = {}
917  coVars = {}
918  for amp in detector:
919  ampName = amp.getName()
920 
921  diffAmpIm = diff[amp.getBBox()].clone()
922  diffAmpImCrop = diffAmpIm[border: -border - maxLag, border: -border - maxLag, afwImage.LOCAL]
923  diffAmpImCrop -= afwMath.makeStatistics(diffAmpImCrop, afwMath.MEANCLIP, sctrl).getValue()
924  w, h = diffAmpImCrop.getDimensions()
925  xcorr = np.zeros((maxLag + 1, maxLag + 1), dtype=np.float64)
926 
927  # calculate the cross-correlation
928  for xlag in range(maxLag + 1):
929  for ylag in range(maxLag + 1):
930  dim_xy = diffAmpIm[border + xlag: border + xlag + w,
931  border + ylag: border + ylag + h,
932  afwImage.LOCAL].clone()
933  dim_xy -= afwMath.makeStatistics(dim_xy, afwMath.MEANCLIP, sctrl).getValue()
934  dim_xy *= diffAmpImCrop
935  xcorr[xlag, ylag] = afwMath.makeStatistics(dim_xy,
936  afwMath.MEANCLIP, sctrl).getValue()/(biasCorr)
937 
938  variances[ampName] = xcorr[0, 0]
939  xcorr_full = self._tileArray(xcorr)
940  coVars[ampName] = np.sum(xcorr_full)
941 
942  msg = "M1: " + str(ampMeans[ampName][0])
943  msg += " M2 " + str(ampMeans[ampName][1])
944  msg += " M_sum: " + str((ampMeans[ampName][0]) + ampMeans[ampName][1])
945  msg += " Var " + str(variances[ampName])
946  msg += " coVar: " + str(coVars[ampName])
947  self.log.debug(msg)
948 
949  means = {}
950  for amp in detector:
951  ampName = amp.getName()
952  means[ampName] = ampMeans[ampName][0] + ampMeans[ampName][1]
953 
954  return means, variances, coVars
955 
956  def _plotXcorr(self, xcorr, mean, zmax=0.05, title=None, fig=None, saveToFileName=None):
957  """Plot the correlation functions."""
958  try:
959  xcorr = xcorr.getArray()
960  except Exception:
961  pass
962 
963  xcorr /= float(mean)
964  # xcorr.getArray()[0,0]=abs(xcorr.getArray()[0,0]-1)
965 
966  if fig is None:
967  fig = plt.figure()
968  else:
969  fig.clf()
970 
971  ax = fig.add_subplot(111, projection='3d')
972  ax.azim = 30
973  ax.elev = 20
974 
975  nx, ny = np.shape(xcorr)
976 
977  xpos, ypos = np.meshgrid(np.arange(nx), np.arange(ny))
978  xpos = xpos.flatten()
979  ypos = ypos.flatten()
980  zpos = np.zeros(nx*ny)
981  dz = xcorr.flatten()
982  dz[dz > zmax] = zmax
983 
984  ax.bar3d(xpos, ypos, zpos, 1, 1, dz, color='b', zsort='max', sort_zpos=100)
985  if xcorr[0, 0] > zmax:
986  ax.bar3d([0], [0], [zmax], 1, 1, 1e-4, color='c')
987 
988  ax.set_xlabel("row")
989  ax.set_ylabel("column")
990  ax.set_zlabel(r"$\langle{(F_i - \bar{F})(F_i - \bar{F})}\rangle/\bar{F}$")
991 
992  if title:
993  fig.suptitle(title)
994  if saveToFileName:
995  fig.savefig(saveToFileName)
996 
997  def _iterativeRegression(self, x, y, fixThroughOrigin=False, nSigmaClip=None, maxIter=None):
998  """Use linear regression to fit a line, iteratively removing outliers.
999 
1000  Useful when you have a sufficiently large numbers of points on your PTC.
1001  This function iterates until either there are no outliers of
1002  config.nSigmaClip magnitude, or until the specified maximum number
1003  of iterations has been performed.
1004 
1005  Parameters:
1006  -----------
1007  x : `numpy.array`
1008  The independent variable. Must be a numpy array, not a list.
1009  y : `numpy.array`
1010  The dependent variable. Must be a numpy array, not a list.
1011  fixThroughOrigin : `bool`, optional
1012  Whether to fix the PTC through the origin or allow an y-intercept.
1013  nSigmaClip : `float`, optional
1014  The number of sigma to clip to.
1015  Taken from the task config if not specified.
1016  maxIter : `int`, optional
1017  The maximum number of iterations allowed.
1018  Taken from the task config if not specified.
1019 
1020  Returns:
1021  --------
1022  slope : `float`
1023  The slope of the line of best fit
1024  intercept : `float`
1025  The y-intercept of the line of best fit
1026  """
1027  if not maxIter:
1028  maxIter = self.config.maxIterRegression
1029  if not nSigmaClip:
1030  nSigmaClip = self.config.nSigmaClipRegression
1031 
1032  nIter = 0
1033  sctrl = afwMath.StatisticsControl()
1034  sctrl.setNumSigmaClip(nSigmaClip)
1035 
1036  if fixThroughOrigin:
1037  while nIter < maxIter:
1038  nIter += 1
1039  self.log.debug("Origin fixed, iteration # %s using %s elements:" % (nIter, np.shape(x)[0]))
1040  TEST = x[:, np.newaxis]
1041  slope, _, _, _ = np.linalg.lstsq(TEST, y)
1042  slope = slope[0]
1043  res = y - slope * x
1044  resMean = afwMath.makeStatistics(res, afwMath.MEANCLIP, sctrl).getValue()
1045  resStd = np.sqrt(afwMath.makeStatistics(res, afwMath.VARIANCECLIP, sctrl).getValue())
1046  index = np.where((res > (resMean + nSigmaClip*resStd)) |
1047  (res < (resMean - nSigmaClip*resStd)))
1048  self.log.debug("%.3f %.3f %.3f %.3f" % (resMean, resStd, np.max(res), nSigmaClip))
1049  if np.shape(np.where(index))[1] == 0 or (nIter >= maxIter): # run out of points or iters
1050  break
1051  x = np.delete(x, index)
1052  y = np.delete(y, index)
1053 
1054  return slope, 0
1055 
1056  while nIter < maxIter:
1057  nIter += 1
1058  self.log.debug("Iteration # %s using %s elements:" % (nIter, np.shape(x)[0]))
1059  xx = np.vstack([x, np.ones(len(x))]).T
1060  ret, _, _, _ = np.linalg.lstsq(xx, y)
1061  slope, intercept = ret
1062  res = y - slope*x - intercept
1063  resMean = afwMath.makeStatistics(res, afwMath.MEANCLIP, sctrl).getValue()
1064  resStd = np.sqrt(afwMath.makeStatistics(res, afwMath.VARIANCECLIP, sctrl).getValue())
1065  index = np.where((res > (resMean + nSigmaClip * resStd)) | (res < resMean - nSigmaClip * resStd))
1066  self.log.debug("%.3f %.3f %.3f %.3f" % (resMean, resStd, np.max(res), nSigmaClip))
1067  if np.shape(np.where(index))[1] == 0 or (nIter >= maxIter): # run out of points, or iterations
1068  break
1069  x = np.delete(x, index)
1070  y = np.delete(y, index)
1071 
1072  return slope, intercept
1073 
1074  def generateKernel(self, corrs, means, objId, rejectLevel=None):
1075  """Generate the full kernel from a list of cross-correlations and means.
1076 
1077  Taking a list of quarter-image, gain-corrected cross-correlations,
1078  do a pixel-wise sigma-clipped mean of each,
1079  and tile into the full-sized kernel image.
1080 
1081  Each corr in corrs is one quarter of the full cross-correlation,
1082  and has been gain-corrected. Each mean in means is a tuple of the means
1083  of the two individual images, corresponding to that corr.
1084 
1085  Parameters:
1086  -----------
1087  corrs : `list` of `numpy.ndarray`, (Ny, Nx)
1088  A list of the quarter-image cross-correlations
1089  means : `dict` of `tuples` of `floats`
1090  The means of the input images for each corr in corrs
1091  rejectLevel : `float`, optional
1092  This is essentially is a sanity check parameter.
1093  If this condition is violated there is something unexpected
1094  going on in the image, and it is discarded from the stack before
1095  the clipped-mean is calculated.
1096  If not provided then config.xcorrCheckRejectLevel is used
1097 
1098  Returns:
1099  --------
1100  kernel : `numpy.ndarray`, (Ny, Nx)
1101  The output kernel
1102  """
1103  if not rejectLevel:
1104  rejectLevel = self.config.xcorrCheckRejectLevel
1105 
1106  # Try to average over a set of possible inputs.
1107  # This generates a simple function of the kernel that
1108  # should be constant across the images, and averages that.
1109  xcorrList = []
1110  sctrl = afwMath.StatisticsControl()
1111  sctrl.setNumSigmaClip(self.config.nSigmaClipKernelGen)
1112 
1113  for corrNum, ((mean1, mean2), corr) in enumerate(zip(means, corrs)):
1114  corr[0, 0] -= (mean1 + mean2)
1115  if corr[0, 0] > 0:
1116  self.log.warn('Skipped item %s due to unexpected value of (variance-mean)' % corrNum)
1117  continue
1118  corr /= -1.0*(mean1**2 + mean2**2)
1119 
1120  fullCorr = self._tileArray(corr)
1121 
1122  xcorrCheck = np.abs(np.sum(fullCorr))/np.sum(np.abs(fullCorr))
1123  if xcorrCheck > rejectLevel:
1124  self.log.warn("Sum of the xcorr is unexpectedly high. Investigate item num %s for %s. \n"
1125  "value = %s" % (corrNum, objId, xcorrCheck))
1126  continue
1127  xcorrList.append(fullCorr)
1128 
1129  if not xcorrList:
1130  raise RuntimeError("Cannot generate kernel because all inputs were discarded. "
1131  "Either the data is bad, or config.xcorrCheckRejectLevel is too low")
1132 
1133  # stack the individual xcorrs and apply a per-pixel clipped-mean
1134  meanXcorr = np.zeros_like(fullCorr)
1135  xcorrList = np.transpose(xcorrList)
1136  for i in range(np.shape(meanXcorr)[0]):
1137  for j in range(np.shape(meanXcorr)[1]):
1138  meanXcorr[i, j] = afwMath.makeStatistics(xcorrList[i, j], afwMath.MEANCLIP, sctrl).getValue()
1139 
1140  return self.successiveOverRelax(meanXcorr)
1141 
1142  def successiveOverRelax(self, source, maxIter=None, eLevel=None):
1143  """An implementation of the successive over relaxation (SOR) method.
1144 
1145  A numerical method for solving a system of linear equations
1146  with faster convergence than the Gauss-Seidel method.
1147 
1148  Parameters:
1149  -----------
1150  source : `numpy.ndarray`
1151  The input array
1152  maxIter : `int`, optional
1153  Maximum number of iterations to attempt before aborting
1154  eLevel : `float`, optional
1155  The target error level at which we deem convergence to have occured
1156 
1157  Returns:
1158  --------
1159  output : `numpy.ndarray`
1160  The solution
1161  """
1162  if not maxIter:
1163  maxIter = self.config.maxIterSuccessiveOverRelaxation
1164  if not eLevel:
1165  eLevel = self.config.eLevelSuccessiveOverRelaxation
1166 
1167  assert source.shape[0] == source.shape[1], "Input array must be square"
1168  # initialise, and set boundary conditions
1169  func = np.zeros([source.shape[0] + 2, source.shape[1] + 2])
1170  resid = np.zeros([source.shape[0] + 2, source.shape[1] + 2])
1171  rhoSpe = np.cos(np.pi/source.shape[0]) # Here a square grid is assummed
1172 
1173  # Calculate the initial error
1174  for i in range(1, func.shape[0] - 1):
1175  for j in range(1, func.shape[1] - 1):
1176  resid[i, j] = (func[i, j - 1] + func[i, j + 1] + func[i - 1, j] +
1177  func[i + 1, j] - 4*func[i, j] - source[i - 1, j - 1])
1178  inError = np.sum(np.abs(resid))
1179 
1180  # Iterate until convergence
1181  # We perform two sweeps per cycle,
1182  # updating 'odd' and 'even' points separately
1183  nIter = 0
1184  omega = 1.0
1185  dx = 1.0
1186  while nIter < maxIter*2:
1187  outError = 0
1188  if nIter%2 == 0:
1189  for i in range(1, func.shape[0] - 1, 2):
1190  for j in range(1, func.shape[1] - 1, 2):
1191  resid[i, j] = float(func[i, j-1] + func[i, j + 1] + func[i - 1, j] +
1192  func[i + 1, j] - 4.0*func[i, j] - dx*dx*source[i - 1, j - 1])
1193  func[i, j] += omega*resid[i, j]*.25
1194  for i in range(2, func.shape[0] - 1, 2):
1195  for j in range(2, func.shape[1] - 1, 2):
1196  resid[i, j] = float(func[i, j - 1] + func[i, j + 1] + func[i - 1, j] +
1197  func[i + 1, j] - 4.0*func[i, j] - dx*dx*source[i - 1, j - 1])
1198  func[i, j] += omega*resid[i, j]*.25
1199  else:
1200  for i in range(1, func.shape[0] - 1, 2):
1201  for j in range(2, func.shape[1] - 1, 2):
1202  resid[i, j] = float(func[i, j - 1] + func[i, j + 1] + func[i - 1, j] +
1203  func[i + 1, j] - 4.0*func[i, j] - dx*dx*source[i - 1, j - 1])
1204  func[i, j] += omega*resid[i, j]*.25
1205  for i in range(2, func.shape[0] - 1, 2):
1206  for j in range(1, func.shape[1] - 1, 2):
1207  resid[i, j] = float(func[i, j - 1] + func[i, j + 1] + func[i - 1, j] +
1208  func[i + 1, j] - 4.0*func[i, j] - dx*dx*source[i - 1, j - 1])
1209  func[i, j] += omega*resid[i, j]*.25
1210  outError = np.sum(np.abs(resid))
1211  if outError < inError*eLevel:
1212  break
1213  if nIter == 0:
1214  omega = 1.0/(1 - rhoSpe*rhoSpe/2.0)
1215  else:
1216  omega = 1.0/(1 - rhoSpe*rhoSpe*omega/4.0)
1217  nIter += 1
1218 
1219  if nIter >= maxIter*2:
1220  self.log.warn("Failure: SuccessiveOverRelaxation did not converge in %s iterations."
1221  "\noutError: %s, inError: %s," % (nIter//2, outError, inError*eLevel))
1222  else:
1223  self.log.info("Success: SuccessiveOverRelaxation converged in %s iterations."
1224  "\noutError: %s, inError: %s", nIter//2, outError, inError*eLevel)
1225  return func[1: -1, 1: -1]
1226 
1227  @staticmethod
1228  def _tileArray(in_array):
1229  """Given an input quarter-image, tile/mirror it and return full image.
1230 
1231  Given a square input of side-length n, of the form
1232 
1233  input = array([[1, 2, 3],
1234  [4, 5, 6],
1235  [7, 8, 9]])
1236 
1237  return an array of size 2n-1 as
1238 
1239  output = array([[ 9, 8, 7, 8, 9],
1240  [ 6, 5, 4, 5, 6],
1241  [ 3, 2, 1, 2, 3],
1242  [ 6, 5, 4, 5, 6],
1243  [ 9, 8, 7, 8, 9]])
1244 
1245  Parameters:
1246  -----------
1247  input : `np.array`
1248  The square input quarter-array
1249 
1250  Returns:
1251  --------
1252  output : `np.array`
1253  The full, tiled array
1254  """
1255  assert(in_array.shape[0] == in_array.shape[1])
1256  length = in_array.shape[0] - 1
1257  output = np.zeros((2*length + 1, 2*length + 1))
1258 
1259  for i in range(length + 1):
1260  for j in range(length + 1):
1261  output[i + length, j + length] = in_array[i, j]
1262  output[-i + length, j + length] = in_array[i, j]
1263  output[i + length, -j + length] = in_array[i, j]
1264  output[-i + length, -j + length] = in_array[i, j]
1265  return output
1266 
1267  @staticmethod
1268  def _convertImagelikeToFloatImage(imagelikeObject):
1269  """Turn an exposure or masked image of any type into an ImageF."""
1270  for attr in ("getMaskedImage", "getImage"):
1271  if hasattr(imagelikeObject, attr):
1272  imagelikeObject = getattr(imagelikeObject, attr)()
1273  try:
1274  floatImage = imagelikeObject.convertF()
1275  except AttributeError:
1276  raise RuntimeError("Failed to convert image to float")
1277  return floatImage
1278 
1279 
1280 def calcBiasCorr(fluxLevels, imageShape, repeats=1, seed=0, addCorrelations=False,
1281  correlationStrength=0.1, maxLag=10, nSigmaClip=5, border=10):
1282  """Calculate the bias induced when sigma-clipping non-Gassian distributions.
1283 
1284  Fill image-pairs of the specified size with Poisson-distributed values,
1285  adding correlations as necessary. Then calculate the cross correlation,
1286  and calculate the bias induced using the cross-correlation image
1287  and the image means.
1288 
1289  Parameters:
1290  -----------
1291  fluxLevels : `list` of `int`
1292  The mean flux levels at which to simiulate.
1293  Nominal values might be something like [70000, 90000, 110000]
1294  imageShape : `tuple` of `int`
1295  The shape of the image array to simulate, nx by ny pixels.
1296  repeats : `int`, optional
1297  Number of repeats to perform so that results
1298  can be averaged to improve SNR.
1299  seed : `int`, optional
1300  The random seed to use for the Poisson points.
1301  addCorrelations : `bool`, optional
1302  Whether to add brighter-fatter-like correlations to the simulated images
1303  If true, a correlation between x_{i,j} and x_{i+1,j+1} is introduced
1304  by adding a*x_{i,j} to x_{i+1,j+1}
1305  correlationStrength : `float`, optional
1306  The strength of the correlations.
1307  This is the value of the coefficient `a` in the above definition.
1308  maxLag : `int`, optional
1309  The maximum lag to work to in pixels
1310  nSigmaClip : `float`, optional
1311  Number of sigma to clip to when calculating the sigma-clipped mean.
1312  border : `int`, optional
1313  Number of border pixels to mask
1314 
1315  Returns:
1316  --------
1317  biases : `dict` of `list` of `float`
1318  A dictionary, keyed by flux level, containing a list of the biases
1319  for each repeat at that flux level
1320  means : `dict` of `list` of `float`
1321  A dictionary, keyed by flux level, containing a list of the average mean
1322  fluxes (average of the mean of the two images)
1323  for the image pairs at that flux level
1324  xcorrs : `dict` of `list` of `np.ndarray`
1325  A dictionary, keyed by flux level, containing a list of the xcorr
1326  images for the image pairs at that flux level
1327  """
1328  means = {f: [] for f in fluxLevels}
1329  xcorrs = {f: [] for f in fluxLevels}
1330  biases = {f: [] for f in fluxLevels}
1331 
1333  config.isrMandatorySteps = [] # no isr but the validation routine is still run
1334  config.isrForbiddenSteps = []
1335  config.nSigmaClipXCorr = nSigmaClip
1336  config.nPixBorderXCorr = border
1337  config.maxLag = maxLag
1338  task = MakeBrighterFatterKernelTask(config=config)
1339 
1340  im0 = afwImage.maskedImage.MaskedImageF(imageShape[1], imageShape[0])
1341  im1 = afwImage.maskedImage.MaskedImageF(imageShape[1], imageShape[0])
1342 
1343  random = np.random.RandomState(seed)
1344 
1345  for rep in range(repeats):
1346  for flux in fluxLevels:
1347  data0 = random.poisson(flux, (imageShape)).astype(float)
1348  data1 = random.poisson(flux, (imageShape)).astype(float)
1349  if addCorrelations:
1350  data0[1:, 1:] += correlationStrength*data0[: -1, : -1]
1351  data1[1:, 1:] += correlationStrength*data1[: -1, : -1]
1352  im0.image.array[:, :] = data0
1353  im1.image.array[:, :] = data1
1354 
1355  _xcorr, _means = task._crossCorrelate(im0, im1, runningBiasCorrSim=True)
1356 
1357  means[flux].append(_means)
1358  xcorrs[flux].append(_xcorr)
1359  if addCorrelations:
1360  bias = xcorrs[flux][-1][1, 1]/means[flux][-1]*(1 + correlationStrength)/correlationStrength
1361  print("Simulated/expected avg. flux: %.1f, %.1f" % (flux, means[flux][-1]/2))
1362  print("Bias: %.6f" % bias)
1363  else:
1364  bias = xcorrs[flux][-1][0, 0]/means[flux][-1]
1365  print("Simulated/expected avg. flux: %.1f, %.1f" % (flux, means[flux][-1]/2))
1366  print("Bias: %.6f" % bias)
1367  biases[flux].append(bias)
1368 
1369  return biases, means, xcorrs
Angle abs(Angle const &a)
Definition: Angle.h:106
def _iterativeRegression(self, x, y, fixThroughOrigin=False, nSigmaClip=None, maxIter=None)
def erase(frame=None)
Definition: ds9.py:97
std::shared_ptr< Background > makeBackground(ImageT const &img, BackgroundControl const &bgCtrl)
A convenience function that uses function overloading to make the correct type of Background...
Definition: Background.h:566
std::shared_ptr< FrameSet > append(FrameSet const &first, FrameSet const &second)
Construct a FrameSet that performs two transformations in series.
Definition: functional.cc:33
Pass parameters to a Background object.
Definition: Background.h:57
Statistics makeStatistics(lsst::afw::math::MaskedVector< EntryT > const &mv, std::vector< WeightPixel > const &vweights, int const flags, StatisticsControl const &sctrl=StatisticsControl())
The makeStatistics() overload to handle lsst::afw::math::MaskedVector<>
Definition: Statistics.h:520
Pass parameters to a Statistics object.
Definition: Statistics.h:93
def _crossCorrelate(self, maskedIm0, maskedIm1, runningBiasCorrSim=False, frameId=None, detId=None)
table::Key< int > type
Definition: Detector.cc:164
def format(config, name=None, writeSourceLine=True, prefix="", verbose=False)
Definition: history.py:168
def mtv(data, frame=None, title="", wcs=None, args, kwargs)
Definition: ds9.py:93
daf::base::PropertyList * list
Definition: fits.cc:833
def calcBiasCorr(fluxLevels, imageShape, repeats=1, seed=0, addCorrelations=False, correlationStrength=0.1, maxLag=10, nSigmaClip=5, border=10)