LSST Applications  21.0.0-172-gfb10e10a+18fedfabac,22.0.0+297cba6710,22.0.0+80564b0ff1,22.0.0+8d77f4f51a,22.0.0+a28f4c53b1,22.0.0+dcf3732eb2,22.0.1-1-g7d6de66+2a20fdde0d,22.0.1-1-g8e32f31+297cba6710,22.0.1-1-geca5380+7fa3b7d9b6,22.0.1-12-g44dc1dc+2a20fdde0d,22.0.1-15-g6a90155+515f58c32b,22.0.1-16-g9282f48+790f5f2caa,22.0.1-2-g92698f7+dcf3732eb2,22.0.1-2-ga9b0f51+7fa3b7d9b6,22.0.1-2-gd1925c9+bf4f0e694f,22.0.1-24-g1ad7a390+a9625a72a8,22.0.1-25-g5bf6245+3ad8ecd50b,22.0.1-25-gb120d7b+8b5510f75f,22.0.1-27-g97737f7+2a20fdde0d,22.0.1-32-gf62ce7b1+aa4237961e,22.0.1-4-g0b3f228+2a20fdde0d,22.0.1-4-g243d05b+871c1b8305,22.0.1-4-g3a563be+32dcf1063f,22.0.1-4-g44f2e3d+9e4ab0f4fa,22.0.1-42-gca6935d93+ba5e5ca3eb,22.0.1-5-g15c806e+85460ae5f3,22.0.1-5-g58711c4+611d128589,22.0.1-5-g75bb458+99c117b92f,22.0.1-6-g1c63a23+7fa3b7d9b6,22.0.1-6-g50866e6+84ff5a128b,22.0.1-6-g8d3140d+720564cf76,22.0.1-6-gd805d02+cc5644f571,22.0.1-8-ge5750ce+85460ae5f3,master-g6e05de7fdc+babf819c66,master-g99da0e417a+8d77f4f51a,w.2021.48
LSST Data Management Base Package
multiBandUtils.py
Go to the documentation of this file.
1 # This file is part of pipe_tasks.
2 #
3 # Developed for the LSST Data Management System.
4 # This product includes software developed by the LSST Project
5 # (https://www.lsst.org).
6 # See the COPYRIGHT file at the top-level directory of this distribution
7 # for details of code ownership.
8 #
9 # This program is free software: you can redistribute it and/or modify
10 # it under the terms of the GNU General Public License as published by
11 # the Free Software Foundation, either version 3 of the License, or
12 # (at your option) any later version.
13 #
14 # This program is distributed in the hope that it will be useful,
15 # but WITHOUT ANY WARRANTY; without even the implied warranty of
16 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 # GNU General Public License for more details.
18 #
19 # You should have received a copy of the GNU General Public License
20 # along with this program. If not, see <https://www.gnu.org/licenses/>.
21 import lsst.afw.table as afwTable
22 
23 from lsst.coadd.utils import ExistingCoaddDataIdContainer
24 from lsst.pipe.base import TaskRunner, ArgumentParser
25 from lsst.pex.config import Config, RangeField
26 from lsst.obs.base import ExposureIdInfo
27 
28 
29 class MergeSourcesRunner(TaskRunner):
30  """Task runner for `MergeDetectionTask` `MergeMeasurementTask`
31 
32  Required because the run method requires a list of
33  dataRefs rather than a single dataRef.
34  """
35  def makeTask(self, parsedCmd=None, args=None):
36  """Provide a butler to the Task constructor.
37 
38  Parameters
39  ----------
40  parsedCmd:
41  The parsed command
42  args: tuple
43  Tuple of a list of data references and kwargs (un-used)
44 
45  Raises
46  ------
47  RuntimeError
48  Thrown if both `parsedCmd` & `args` are `None`
49  """
50  if parsedCmd is not None:
51  butler = parsedCmd.butler
52  elif args is not None:
53  dataRefList, kwargs = args
54  butler = dataRefList[0].getButler()
55  else:
56  raise RuntimeError("Neither parsedCmd or args specified")
57  return self.TaskClass(config=self.config, log=self.log, butler=butler)
58 
59  @staticmethod
60  def buildRefDict(parsedCmd):
61  """Build a hierarchical dictionary of patch references
62 
63  Parameters
64  ----------
65  parsedCmd:
66  The parsed command
67 
68  Returns
69  -------
70  refDict: dict
71  A reference dictionary of the form {patch: {tract: {filter: dataRef}}}
72 
73  Raises
74  ------
75  RuntimeError
76  Thrown when multiple references are provided for the same
77  combination of tract, patch and filter
78  """
79  refDict = {} # Will index this as refDict[tract][patch][filter] = ref
80  for ref in parsedCmd.id.refList:
81  tract = ref.dataId["tract"]
82  patch = ref.dataId["patch"]
83  filter = ref.dataId["filter"]
84  if tract not in refDict:
85  refDict[tract] = {}
86  if patch not in refDict[tract]:
87  refDict[tract][patch] = {}
88  if filter in refDict[tract][patch]:
89  raise RuntimeError("Multiple versions of %s" % (ref.dataId,))
90  refDict[tract][patch][filter] = ref
91  return refDict
92 
93  @staticmethod
94  def getTargetList(parsedCmd, **kwargs):
95  """Provide a list of patch references for each patch, tract, filter combo.
96 
97  Parameters
98  ----------
99  parsedCmd:
100  The parsed command
101  kwargs:
102  Keyword arguments passed to the task
103 
104  Returns
105  -------
106  targetList: list
107  List of tuples, where each tuple is a (dataRef, kwargs) pair.
108  """
109  refDict = MergeSourcesRunner.buildRefDict(parsedCmd)
110  return [(list(p.values()), kwargs) for t in refDict.values() for p in t.values()]
111 
112 
113 def _makeGetSchemaCatalogs(datasetSuffix):
114  """Construct a getSchemaCatalogs instance method
115 
116  These are identical for most of the classes here, so we'll consolidate
117  the code.
118 
119  datasetSuffix: Suffix of dataset name, e.g., "src" for "deepCoadd_src"
120  """
121 
122  def getSchemaCatalogs(self):
123  """Return a dict of empty catalogs for each catalog dataset produced by this task."""
124  src = afwTable.SourceCatalog(self.schema)
125  if hasattr(self, "algMetadata"):
126  src.getTable().setMetadata(self.algMetadata)
127  return {self.config.coaddName + "Coadd_" + datasetSuffix: src}
128  return getSchemaCatalogs
129 
130 
131 def makeMergeArgumentParser(name, dataset):
132  """!
133  @brief Create a suitable ArgumentParser.
134 
135  We will use the ArgumentParser to get a provide a list of data
136  references for patches; the RunnerClass will sort them into lists
137  of data references for the same patch
138  """
139  parser = ArgumentParser(name)
140  parser.add_id_argument("--id", "deepCoadd_" + dataset,
141  ContainerClass=ExistingCoaddDataIdContainer,
142  help="data ID, e.g. --id tract=12345 patch=1,2 filter=g^r^i")
143  return parser
144 
145 
146 def getInputSchema(task, butler=None, schema=None):
147  """!
148  @brief Obtain the input schema either directly or froma butler reference.
149 
150  @param[in] butler butler reference to obtain the input schema from
151  @param[in] schema the input schema
152  """
153  if schema is None:
154  assert butler is not None, "Neither butler nor schema specified"
155  schema = butler.get(task.config.coaddName + "Coadd_" + task.inputDataset + "_schema",
156  immediate=True).schema
157  return schema
158 
159 
160 def readCatalog(task, patchRef):
161  """!
162  @brief Read input catalog.
163 
164  We read the input dataset provided by the 'inputDataset'
165  class variable.
166 
167  @param[in] patchRef data reference for patch
168  @return tuple consisting of the band name and the catalog
169  """
170  band = patchRef.get(task.config.coaddName + "Coadd_filterLabel", immediate=True).bandLabel
171  catalog = patchRef.get(task.config.coaddName + "Coadd_" + task.inputDataset, immediate=True)
172  task.log.info("Read %d sources for band %s: %s", len(catalog), band, patchRef.dataId)
173  return band, catalog
174 
175 
177  """!
178  @anchor CullPeaksConfig_
179 
180  @brief Configuration for culling garbage peaks after merging footprints.
181 
182  Peaks may also be culled after detection or during deblending; this configuration object
183  only deals with culling after merging Footprints.
184 
185  These cuts are based on three quantities:
186  - nBands: the number of bands in which the peak was detected
187  - peakRank: the position of the peak within its family, sorted from brightest to faintest.
188  - peakRankNormalized: the peak rank divided by the total number of peaks in the family.
189 
190  The formula that identifie peaks to cull is:
191 
192  nBands < nBandsSufficient
193  AND (rank >= rankSufficient)
194  AND (rank >= rankConsider OR rank >= rankNormalizedConsider)
195 
196  To disable peak culling, simply set nBandsSufficient=1.
197  """
198 
199  nBandsSufficient = RangeField(dtype=int, default=2, min=1,
200  doc="Always keep peaks detected in this many bands")
201  rankSufficient = RangeField(dtype=int, default=20, min=1,
202  doc="Always keep this many peaks in each family")
203  rankConsidered = RangeField(dtype=int, default=30, min=1,
204  doc=("Keep peaks with less than this rank that also match the "
205  "rankNormalizedConsidered condition."))
206  rankNormalizedConsidered = RangeField(dtype=float, default=0.7, min=0.0,
207  doc=("Keep peaks with less than this normalized rank that"
208  " also match the rankConsidered condition."))
209 
210 
211 def _makeMakeIdFactory(datasetName):
212  """Construct a makeIdFactory instance method
213 
214  These are identical for all the classes here, so this consolidates
215  the code.
216 
217  datasetName: Dataset name without the coadd name prefix, e.g., "CoaddId" for "deepCoaddId"
218  """
219 
220  def makeIdFactory(self, dataRef):
221  """Return an IdFactory for setting the detection identifiers
222 
223  The actual parameters used in the IdFactory are provided by
224  the butler (through the provided data reference.
225  """
226  info = ExposureIdInfo(
227  int(dataRef.get(self.config.coaddName + datasetName)),
228  dataRef.get(self.config.coaddName + datasetName + "_bits")
229  )
230  return info.makeSourceIdFactory()
231  return makeIdFactory
def makeTask(self, parsedCmd=None, args=None)
daf::base::PropertyList * list
Definition: fits.cc:913
def makeMergeArgumentParser(name, dataset)
Create a suitable ArgumentParser.
def getInputSchema(task, butler=None, schema=None)
Obtain the input schema either directly or froma butler reference.
def readCatalog(task, patchRef)
Read input catalog.