LSST Applications  21.0.0-131-g8cabc107+528f53ee53,22.0.0+00495a2688,22.0.0+0ef2527977,22.0.0+11a2aa21cd,22.0.0+269b7e55e3,22.0.0+2c6b6677a3,22.0.0+64c1bc5aa5,22.0.0+7b3a3f865e,22.0.0+e1b6d2281c,22.0.0+ff3c34362c,22.0.1-1-g1b65d06+c95cbdf3df,22.0.1-1-g7058be7+1cf78af69b,22.0.1-1-g7dab645+2a65e40b06,22.0.1-1-g8760c09+64c1bc5aa5,22.0.1-1-g949febb+64c1bc5aa5,22.0.1-1-ga324b9c+269b7e55e3,22.0.1-1-gf9d8b05+ff3c34362c,22.0.1-10-g781e53d+9b51d1cd24,22.0.1-10-gba590ab+b9624b875d,22.0.1-13-g76f9b8d+2c6b6677a3,22.0.1-14-g22236948+57af756299,22.0.1-18-g3db9cf4b+9b7092c56c,22.0.1-18-gb17765a+2264247a6b,22.0.1-2-g8ef0a89+2c6b6677a3,22.0.1-2-gcb770ba+c99495d3c6,22.0.1-24-g2e899d296+4206820b0d,22.0.1-3-g7aa11f2+2c6b6677a3,22.0.1-3-g8c1d971+f253ffa91f,22.0.1-3-g997b569+ff3b2f8649,22.0.1-4-g1930a60+6871d0c7f6,22.0.1-4-g5b7b756+6b209d634c,22.0.1-6-ga02864e+6871d0c7f6,22.0.1-7-g3402376+a1a2182ac4,22.0.1-7-g65f59fa+54b92689ce,master-gcc5351303a+e1b6d2281c,w.2021.32
LSST Data Management Base Package
cpCombine.py
Go to the documentation of this file.
1 # This file is part of cp_pipe.
2 #
3 # Developed for the LSST Data Management System.
4 # This product includes software developed by the LSST Project
5 # (https://www.lsst.org).
6 # See the COPYRIGHT file at the top-level directory of this distribution
7 # for details of code ownership.
8 #
9 # This program is free software: you can redistribute it and/or modify
10 # it under the terms of the GNU General Public License as published by
11 # the Free Software Foundation, either version 3 of the License, or
12 # (at your option) any later version.
13 #
14 # This program is distributed in the hope that it will be useful,
15 # but WITHOUT ANY WARRANTY; without even the implied warranty of
16 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 # GNU General Public License for more details.
18 #
19 # You should have received a copy of the GNU General Public License
20 # along with this program. If not, see <http://www.gnu.org/licenses/>.
21 import numpy as np
22 import time
23 
24 import lsst.pex.config as pexConfig
25 import lsst.pipe.base as pipeBase
26 import lsst.pipe.base.connectionTypes as cT
27 import lsst.afw.math as afwMath
28 import lsst.afw.image as afwImage
29 
30 from lsst.geom import Point2D
31 from lsst.log import Log
32 from astro_metadata_translator import merge_headers, ObservationGroup
33 from astro_metadata_translator.serialize import dates_to_fits
34 
35 
36 # CalibStatsConfig/CalibStatsTask from pipe_base/constructCalibs.py
37 class CalibStatsConfig(pexConfig.Config):
38  """Parameters controlling the measurement of background statistics.
39  """
40  stat = pexConfig.Field(
41  dtype=str,
42  default='MEANCLIP',
43  doc="Statistic name to use to estimate background (from lsst.afw.math)",
44  )
45  clip = pexConfig.Field(
46  dtype=float,
47  default=3.0,
48  doc="Clipping threshold for background",
49  )
50  nIter = pexConfig.Field(
51  dtype=int,
52  default=3,
53  doc="Clipping iterations for background",
54  )
55  mask = pexConfig.ListField(
56  dtype=str,
57  default=["DETECTED", "BAD", "NO_DATA"],
58  doc="Mask planes to reject",
59  )
60 
61 
62 class CalibStatsTask(pipeBase.Task):
63  """Measure statistics on the background
64 
65  This can be useful for scaling the background, e.g., for flats and fringe frames.
66  """
67  ConfigClass = CalibStatsConfig
68 
69  def run(self, exposureOrImage):
70  """Measure a particular statistic on an image (of some sort).
71 
72  Parameters
73  ----------
74  exposureOrImage : `lsst.afw.image.Exposure`, `lsst.afw.image.MaskedImage`, or `lsst.afw.image.Image`
75  Exposure or image to calculate statistics on.
76 
77  Returns
78  -------
79  results : float
80  Resulting statistic value.
81  """
82  stats = afwMath.StatisticsControl(self.config.clip, self.config.nIter,
83  afwImage.Mask.getPlaneBitMask(self.config.mask))
84  try:
85  image = exposureOrImage.getMaskedImage()
86  except Exception:
87  try:
88  image = exposureOrImage.getImage()
89  except Exception:
90  image = exposureOrImage
91  statType = afwMath.stringToStatisticsProperty(self.config.stat)
92  return afwMath.makeStatistics(image, statType, stats).getValue()
93 
94 
95 class CalibCombineConnections(pipeBase.PipelineTaskConnections,
96  dimensions=("instrument", "detector")):
97  inputExps = cT.Input(
98  name="cpInputs",
99  doc="Input pre-processed exposures to combine.",
100  storageClass="Exposure",
101  dimensions=("instrument", "detector", "exposure"),
102  multiple=True,
103  )
104  inputScales = cT.Input(
105  name="cpScales",
106  doc="Input scale factors to use.",
107  storageClass="StructuredDataDict",
108  dimensions=("instrument", ),
109  multiple=False,
110  )
111 
112  outputData = cT.Output(
113  name="cpProposal",
114  doc="Output combined proposed calibration to be validated and certified..",
115  storageClass="ExposureF",
116  dimensions=("instrument", "detector"),
117  isCalibration=True,
118  )
119 
120  def __init__(self, *, config=None):
121  super().__init__(config=config)
122 
123  if config and config.exposureScaling != 'InputList':
124  self.inputs.discard("inputScales")
125 
126 
127 # CalibCombineConfig/CalibCombineTask from pipe_base/constructCalibs.py
128 class CalibCombineConfig(pipeBase.PipelineTaskConfig,
129  pipelineConnections=CalibCombineConnections):
130  """Configuration for combining calib exposures.
131  """
132  calibrationType = pexConfig.Field(
133  dtype=str,
134  default="calibration",
135  doc="Name of calibration to be generated.",
136  )
137 
138  exposureScaling = pexConfig.ChoiceField(
139  dtype=str,
140  allowed={
141  "Unity": "Do not scale inputs. Scale factor is 1.0.",
142  "ExposureTime": "Scale inputs by their exposure time.",
143  "DarkTime": "Scale inputs by their dark time.",
144  "MeanStats": "Scale inputs based on their mean values.",
145  "InputList": "Scale inputs based on a list of values.",
146  },
147  default="Unity",
148  doc="Scaling to be applied to each input exposure.",
149  )
150  scalingLevel = pexConfig.ChoiceField(
151  dtype=str,
152  allowed={
153  "DETECTOR": "Scale by detector.",
154  "AMP": "Scale by amplifier.",
155  },
156  default="DETECTOR",
157  doc="Region to scale.",
158  )
159  maxVisitsToCalcErrorFromInputVariance = pexConfig.Field(
160  dtype=int,
161  default=5,
162  doc="Maximum number of visits to estimate variance from input variance, not per-pixel spread",
163  )
164 
165  doVignette = pexConfig.Field(
166  dtype=bool,
167  default=False,
168  doc="Copy vignette polygon to output and censor vignetted pixels?"
169  )
170 
171  mask = pexConfig.ListField(
172  dtype=str,
173  default=["SAT", "DETECTED", "INTRP"],
174  doc="Mask planes to respect",
175  )
176  combine = pexConfig.Field(
177  dtype=str,
178  default='MEANCLIP',
179  doc="Statistic name to use for combination (from lsst.afw.math)",
180  )
181  clip = pexConfig.Field(
182  dtype=float,
183  default=3.0,
184  doc="Clipping threshold for combination",
185  )
186  nIter = pexConfig.Field(
187  dtype=int,
188  default=3,
189  doc="Clipping iterations for combination",
190  )
191  stats = pexConfig.ConfigurableField(
192  target=CalibStatsTask,
193  doc="Background statistics configuration",
194  )
195 
196 
197 class CalibCombineTask(pipeBase.PipelineTask,
198  pipeBase.CmdLineTask):
199  """Task to combine calib exposures."""
200  ConfigClass = CalibCombineConfig
201  _DefaultName = 'cpCombine'
202 
203  def __init__(self, **kwargs):
204  super().__init__(**kwargs)
205  self.makeSubtask("stats")
206 
207  def runQuantum(self, butlerQC, inputRefs, outputRefs):
208  inputs = butlerQC.get(inputRefs)
209 
210  dimensions = [exp.dataId.byName() for exp in inputRefs.inputExps]
211  inputs['inputDims'] = dimensions
212 
213  outputs = self.runrun(**inputs)
214  butlerQC.put(outputs, outputRefs)
215 
216  def run(self, inputExps, inputScales=None, inputDims=None):
217  """Combine calib exposures for a single detector.
218 
219  Parameters
220  ----------
221  inputExps : `list` [`lsst.afw.image.Exposure`]
222  Input list of exposures to combine.
223  inputScales : `dict` [`dict` [`dict` [`float`]]], optional
224  Dictionary of scales, indexed by detector (`int`),
225  amplifier (`int`), and exposure (`int`). Used for
226  'inputList' scaling.
227  inputDims : `list` [`dict`]
228  List of dictionaries of input data dimensions/values.
229  Each list entry should contain:
230 
231  ``"exposure"``
232  exposure id value (`int`)
233  ``"detector"``
234  detector id value (`int`)
235 
236  Returns
237  -------
238  combinedExp : `lsst.afw.image.Exposure`
239  Final combined exposure generated from the inputs.
240 
241  Raises
242  ------
243  RuntimeError
244  Raised if no input data is found. Also raised if
245  config.exposureScaling == InputList, and a necessary scale
246  was not found.
247  """
248  width, height = self.getDimensionsgetDimensions(inputExps)
249  stats = afwMath.StatisticsControl(self.config.clip, self.config.nIter,
250  afwImage.Mask.getPlaneBitMask(self.config.mask))
251  numExps = len(inputExps)
252  if numExps < 1:
253  raise RuntimeError("No valid input data")
254  if numExps < self.config.maxVisitsToCalcErrorFromInputVariance:
255  stats.setCalcErrorFromInputVariance(True)
256 
257  # Check that all inputs either share the same detector (based
258  # on detId), or that no inputs have any detector.
259  detectorList = [exp.getDetector() for exp in inputExps]
260  if None in detectorList:
261  self.log.warn("Not all input detectors defined.")
262  detectorIds = [det.getId() if det is not None else None for det in detectorList]
263  detectorSerials = [det.getId() if det is not None else None for det in detectorList]
264  numDetectorIds = len(set(detectorIds))
265  numDetectorSerials = len(set(detectorSerials))
266  numDetectors = len(set([numDetectorIds, numDetectorSerials]))
267  if numDetectors != 1:
268  raise RuntimeError("Input data contains multiple detectors.")
269 
270  inputDetector = inputExps[0].getDetector()
271 
272  # Create output exposure for combined data.
273  combined = afwImage.MaskedImageF(width, height)
274  combinedExp = afwImage.makeExposure(combined)
275 
276  # Apply scaling:
277  expScales = []
278  if inputDims is None:
279  inputDims = [dict() for i in inputExps]
280 
281  for index, (exp, dims) in enumerate(zip(inputExps, inputDims)):
282  scale = 1.0
283  if exp is None:
284  self.log.warn("Input %d is None (%s); unable to scale exp.", index, dims)
285  continue
286 
287  if self.config.exposureScaling == "ExposureTime":
288  scale = exp.getInfo().getVisitInfo().getExposureTime()
289  elif self.config.exposureScaling == "DarkTime":
290  scale = exp.getInfo().getVisitInfo().getDarkTime()
291  elif self.config.exposureScaling == "MeanStats":
292  scale = self.stats.run(exp)
293  elif self.config.exposureScaling == "InputList":
294  visitId = dims.get('exposure', None)
295  detectorId = dims.get('detector', None)
296  if visitId is None or detectorId is None:
297  raise RuntimeError(f"Could not identify scaling for input {index} ({dims})")
298  if detectorId not in inputScales['expScale']:
299  raise RuntimeError(f"Could not identify a scaling for input {index}"
300  f" detector {detectorId}")
301 
302  if self.config.scalingLevel == "DETECTOR":
303  if visitId not in inputScales['expScale'][detectorId]:
304  raise RuntimeError(f"Could not identify a scaling for input {index}"
305  f"detector {detectorId} visit {visitId}")
306  scale = inputScales['expScale'][detectorId][visitId]
307  elif self.config.scalingLevel == 'AMP':
308  scale = [inputScales['expScale'][detectorId][amp.getName()][visitId]
309  for amp in exp.getDetector()]
310  else:
311  raise RuntimeError(f"Unknown scaling level: {self.config.scalingLevel}")
312  elif self.config.exposureScaling == 'Unity':
313  scale = 1.0
314  else:
315  raise RuntimeError(f"Unknown scaling type: {self.config.exposureScaling}.")
316 
317  expScales.append(scale)
318  self.log.info("Scaling input %d by %s", index, scale)
319  self.applyScaleapplyScale(exp, scale)
320 
321  self.combinecombine(combined, inputExps, stats)
322 
323  self.interpolateNansinterpolateNans(combined)
324 
325  if self.config.doVignette:
326  polygon = inputExps[0].getInfo().getValidPolygon()
327  VignetteExposure(combined, polygon=polygon, doUpdateMask=True,
328  doSetValue=True, vignetteValue=0.0)
329 
330  # Combine headers
331  self.combineHeaderscombineHeaders(inputExps, combinedExp,
332  calibType=self.config.calibrationType, scales=expScales)
333 
334  # Set the detector
335  combinedExp.setDetector(inputDetector)
336 
337  # Return
338  return pipeBase.Struct(
339  outputData=combinedExp,
340  )
341 
342  def getDimensions(self, expList):
343  """Get dimensions of the inputs.
344 
345  Parameters
346  ----------
347  expList : `list` [`lsst.afw.image.Exposure`]
348  Exps to check the sizes of.
349 
350  Returns
351  -------
352  width, height : `int`
353  Unique set of input dimensions.
354  """
355  dimList = [exp.getDimensions() for exp in expList if exp is not None]
356  return self.getSizegetSize(dimList)
357 
358  def getSize(self, dimList):
359  """Determine a consistent size, given a list of image sizes.
360 
361  Parameters
362  -----------
363  dimList : iterable of `tuple` (`int`, `int`)
364  List of dimensions.
365 
366  Raises
367  ------
368  RuntimeError
369  If input dimensions are inconsistent.
370 
371  Returns
372  --------
373  width, height : `int`
374  Common dimensions.
375  """
376  dim = set((w, h) for w, h in dimList)
377  if len(dim) != 1:
378  raise RuntimeError("Inconsistent dimensions: %s" % dim)
379  return dim.pop()
380 
381  def applyScale(self, exposure, scale=None):
382  """Apply scale to input exposure.
383 
384  This implementation applies a flux scaling: the input exposure is
385  divided by the provided scale.
386 
387  Parameters
388  ----------
389  exposure : `lsst.afw.image.Exposure`
390  Exposure to scale.
391  scale : `float` or `list` [`float`], optional
392  Constant scale to divide the exposure by.
393  """
394  if scale is not None:
395  mi = exposure.getMaskedImage()
396  if isinstance(scale, list):
397  for amp, ampScale in zip(exposure.getDetector(), scale):
398  ampIm = mi[amp.getBBox()]
399  ampIm /= ampScale
400  else:
401  mi /= scale
402 
403  def combine(self, target, expList, stats):
404  """Combine multiple images.
405 
406  Parameters
407  ----------
408  target : `lsst.afw.image.Exposure`
409  Output exposure to construct.
410  expList : `list` [`lsst.afw.image.Exposure`]
411  Input exposures to combine.
412  stats : `lsst.afw.math.StatisticsControl`
413  Control explaining how to combine the input images.
414  """
415  images = [img.getMaskedImage() for img in expList if img is not None]
416  combineType = afwMath.stringToStatisticsProperty(self.config.combine)
417  afwMath.statisticsStack(target, images, combineType, stats)
418 
419  def combineHeaders(self, expList, calib, calibType="CALIB", scales=None):
420  """Combine input headers to determine the set of common headers,
421  supplemented by calibration inputs.
422 
423  Parameters
424  ----------
425  expList : `list` of `lsst.afw.image.Exposure`
426  Input list of exposures to combine.
427  calib : `lsst.afw.image.Exposure`
428  Output calibration to construct headers for.
429  calibType: `str`, optional
430  OBSTYPE the output should claim.
431  scales: `list` of `float`, optional
432  Scale values applied to each input to record.
433 
434  Returns
435  -------
436  header : `lsst.daf.base.PropertyList`
437  Constructed header.
438  """
439  # Header
440  header = calib.getMetadata()
441  header.set("OBSTYPE", calibType)
442 
443  # Keywords we care about
444  comments = {"TIMESYS": "Time scale for all dates",
445  "DATE-OBS": "Start date of earliest input observation",
446  "MJD-OBS": "[d] Start MJD of earliest input observation",
447  "DATE-END": "End date of oldest input observation",
448  "MJD-END": "[d] End MJD of oldest input observation",
449  "MJD-AVG": "[d] MJD midpoint of all input observations",
450  "DATE-AVG": "Midpoint date of all input observations"}
451 
452  # Creation date
453  now = time.localtime()
454  calibDate = time.strftime("%Y-%m-%d", now)
455  calibTime = time.strftime("%X %Z", now)
456  header.set("CALIB_CREATE_DATE", calibDate)
457  header.set("CALIB_CREATE_TIME", calibTime)
458 
459  # Merge input headers
460  inputHeaders = [exp.getMetadata() for exp in expList if exp is not None]
461  merged = merge_headers(inputHeaders, mode='drop')
462  for k, v in merged.items():
463  if k not in header:
464  md = expList[0].getMetadata()
465  comment = md.getComment(k) if k in md else None
466  header.set(k, v, comment=comment)
467 
468  # Construct list of visits
469  visitInfoList = [exp.getInfo().getVisitInfo() for exp in expList if exp is not None]
470  for i, visit in enumerate(visitInfoList):
471  if visit is None:
472  continue
473  header.set("CPP_INPUT_%d" % (i,), visit.getExposureId())
474  header.set("CPP_INPUT_DATE_%d" % (i,), str(visit.getDate()))
475  header.set("CPP_INPUT_EXPT_%d" % (i,), visit.getExposureTime())
476  if scales is not None:
477  header.set("CPP_INPUT_SCALE_%d" % (i,), scales[i])
478 
479  # Not yet working: DM-22302
480  # Create an observation group so we can add some standard headers
481  # independent of the form in the input files.
482  # Use try block in case we are dealing with unexpected data headers
483  try:
484  group = ObservationGroup(visitInfoList, pedantic=False)
485  except Exception:
486  self.log.warn("Exception making an obs group for headers. Continuing.")
487  # Fall back to setting a DATE-OBS from the calibDate
488  dateCards = {"DATE-OBS": "{}T00:00:00.00".format(calibDate)}
489  comments["DATE-OBS"] = "Date of start of day of calibration midpoint"
490  else:
491  oldest, newest = group.extremes()
492  dateCards = dates_to_fits(oldest.datetime_begin, newest.datetime_end)
493 
494  for k, v in dateCards.items():
495  header.set(k, v, comment=comments.get(k, None))
496 
497  return header
498 
499  def interpolateNans(self, exp):
500  """Interpolate over NANs in the combined image.
501 
502  NANs can result from masked areas on the CCD. We don't want them getting
503  into our science images, so we replace them with the median of the image.
504 
505  Parameters
506  ----------
507  exp : `lsst.afw.image.Exposure`
508  Exp to check for NaNs.
509  """
510  array = exp.getImage().getArray()
511  bad = np.isnan(array)
512 
513  median = np.median(array[np.logical_not(bad)])
514  count = np.sum(np.logical_not(bad))
515  array[bad] = median
516  if count > 0:
517  self.log.warn("Found %s NAN pixels", count)
518 
519 
520 # Create versions of the Connections, Config, and Task that support filter constraints.
522  dimensions=("instrument", "detector", "physical_filter")):
523  inputScales = cT.Input(
524  name="cpFilterScales",
525  doc="Input scale factors to use.",
526  storageClass="StructuredDataDict",
527  dimensions=("instrument", "physical_filter"),
528  multiple=False,
529  )
530 
531  outputData = cT.Output(
532  name="cpFilterProposal",
533  doc="Output combined proposed calibration to be validated and certified.",
534  storageClass="ExposureF",
535  dimensions=("instrument", "detector", "physical_filter"),
536  isCalibration=True,
537  )
538 
539  def __init__(self, *, config=None):
540  super().__init__(config=config)
541 
542  if config and config.exposureScaling != 'InputList':
543  self.inputs.discard("inputScales")
544 
545 
547  pipelineConnections=CalibCombineByFilterConnections):
548  pass
549 
550 
552  """Task to combine calib exposures."""
553  ConfigClass = CalibCombineByFilterConfig
554  _DefaultName = 'cpFilterCombine'
555  pass
556 
557 
558 def VignetteExposure(exposure, polygon=None,
559  doUpdateMask=True, maskPlane="NO_DATA",
560  doSetValue=False, vignetteValue=0.0,
561  log=None):
562  """Apply vignetted polygon to image pixels.
563 
564  Parameters
565  ----------
566  exposure : `lsst.afw.image.Exposure`
567  Image to be updated.
568  doUpdateMask : `bool`, optional
569  Update the exposure mask for vignetted area?
570  maskPlane : `str`, optional
571  Mask plane to assign.
572  doSetValue : `bool`, optional
573  Set image value for vignetted area?
574  vignetteValue : `float`, optional
575  Value to assign.
576  log : `lsst.log.Log`, optional
577  Log to write to.
578 
579  Raises
580  ------
581  RuntimeError
582  Raised if no valid polygon exists.
583  """
584  polygon = polygon if polygon else exposure.getInfo().getValidPolygon()
585  if not polygon:
586  raise RuntimeError("Could not find valid polygon!")
587  log = log if log else Log.getLogger(__name__.partition(".")[2])
588 
589  fullyIlluminated = True
590  for corner in exposure.getBBox().getCorners():
591  if not polygon.contains(Point2D(corner)):
592  fullyIlluminated = False
593 
594  log.info("Exposure is fully illuminated? %s", fullyIlluminated)
595 
596  if not fullyIlluminated:
597  # Scan pixels.
598  mask = exposure.getMask()
599  numPixels = mask.getBBox().getArea()
600 
601  xx, yy = np.meshgrid(np.arange(0, mask.getWidth(), dtype=int),
602  np.arange(0, mask.getHeight(), dtype=int))
603 
604  vignMask = np.array([not polygon.contains(Point2D(x, y)) for x, y in
605  zip(xx.reshape(numPixels), yy.reshape(numPixels))])
606  vignMask = vignMask.reshape(mask.getHeight(), mask.getWidth())
607 
608  if doUpdateMask:
609  bitMask = mask.getPlaneBitMask(maskPlane)
610  maskArray = mask.getArray()
611  maskArray[vignMask] |= bitMask
612  if doSetValue:
613  imageArray = exposure.getImage().getArray()
614  imageArray[vignMask] = vignetteValue
615  log.info("Exposure contains %d vignetted pixels.",
616  np.count_nonzero(vignMask))
Pass parameters to a Statistics object.
Definition: Statistics.h:93
def applyScale(self, exposure, scale=None)
Definition: cpCombine.py:381
def runQuantum(self, butlerQC, inputRefs, outputRefs)
Definition: cpCombine.py:207
def run(self, inputExps, inputScales=None, inputDims=None)
Definition: cpCombine.py:216
def combine(self, target, expList, stats)
Definition: cpCombine.py:403
def combineHeaders(self, expList, calib, calibType="CALIB", scales=None)
Definition: cpCombine.py:419
def run(self, exposureOrImage)
Definition: cpCombine.py:69
daf::base::PropertySet * set
Definition: fits.cc:912
Backwards-compatibility support for depersisting the old Calib (FluxMag0/FluxMag0Err) objects.
std::shared_ptr< Exposure< ImagePixelT, MaskPixelT, VariancePixelT > > makeExposure(MaskedImage< ImagePixelT, MaskPixelT, VariancePixelT > &mimage, std::shared_ptr< geom::SkyWcs const > wcs=std::shared_ptr< geom::SkyWcs const >())
A function to return an Exposure of the correct type (cf.
Definition: Exposure.h:462
Statistics makeStatistics(lsst::afw::image::Image< Pixel > const &img, lsst::afw::image::Mask< image::MaskPixel > const &msk, int const flags, StatisticsControl const &sctrl=StatisticsControl())
Handle a watered-down front-end to the constructor (no variance)
Definition: Statistics.h:360
Property stringToStatisticsProperty(std::string const property)
Conversion function to switch a string to a Property (see Statistics.h)
Definition: Statistics.cc:747
std::shared_ptr< lsst::afw::image::Image< PixelT > > statisticsStack(std::vector< std::shared_ptr< lsst::afw::image::Image< PixelT >>> &images, Property flags, StatisticsControl const &sctrl=StatisticsControl(), std::vector< lsst::afw::image::VariancePixel > const &wvector=std::vector< lsst::afw::image::VariancePixel >(0))
A function to compute some statistics of a stack of Images.
def VignetteExposure(exposure, polygon=None, doUpdateMask=True, maskPlane="NO_DATA", doSetValue=False, vignetteValue=0.0, log=None)
Definition: cpCombine.py:561
Point< double, 2 > Point2D
Definition: Point.h:324
Definition: Log.h:717
def format(config, name=None, writeSourceLine=True, prefix="", verbose=False)
Definition: history.py:174