LSST Applications  21.0.0-147-g0e635eb1+1acddb5be5,22.0.0+052faf71bd,22.0.0+1ea9a8b2b2,22.0.0+6312710a6c,22.0.0+729191ecac,22.0.0+7589c3a021,22.0.0+9f079a9461,22.0.1-1-g7d6de66+b8044ec9de,22.0.1-1-g87000a6+536b1ee016,22.0.1-1-g8e32f31+6312710a6c,22.0.1-10-gd060f87+016f7cdc03,22.0.1-12-g9c3108e+df145f6f68,22.0.1-16-g314fa6d+c825727ab8,22.0.1-19-g93a5c75+d23f2fb6d8,22.0.1-19-gb93eaa13+aab3ef7709,22.0.1-2-g8ef0a89+b8044ec9de,22.0.1-2-g92698f7+9f079a9461,22.0.1-2-ga9b0f51+052faf71bd,22.0.1-2-gac51dbf+052faf71bd,22.0.1-2-gb66926d+6312710a6c,22.0.1-2-gcb770ba+09e3807989,22.0.1-20-g32debb5+b8044ec9de,22.0.1-23-gc2439a9a+fb0756638e,22.0.1-3-g496fd5d+09117f784f,22.0.1-3-g59f966b+1e6ba2c031,22.0.1-3-g849a1b8+f8b568069f,22.0.1-3-gaaec9c0+c5c846a8b1,22.0.1-32-g5ddfab5d3+60ce4897b0,22.0.1-4-g037fbe1+64e601228d,22.0.1-4-g8623105+b8044ec9de,22.0.1-5-g096abc9+d18c45d440,22.0.1-5-g15c806e+57f5c03693,22.0.1-7-gba73697+57f5c03693,master-g6e05de7fdc+c1283a92b8,master-g72cdda8301+729191ecac,w.2021.39
LSST Data Management Base Package
multiBandUtils.py
Go to the documentation of this file.
1 # This file is part of pipe_tasks.
2 #
3 # Developed for the LSST Data Management System.
4 # This product includes software developed by the LSST Project
5 # (https://www.lsst.org).
6 # See the COPYRIGHT file at the top-level directory of this distribution
7 # for details of code ownership.
8 #
9 # This program is free software: you can redistribute it and/or modify
10 # it under the terms of the GNU General Public License as published by
11 # the Free Software Foundation, either version 3 of the License, or
12 # (at your option) any later version.
13 #
14 # This program is distributed in the hope that it will be useful,
15 # but WITHOUT ANY WARRANTY; without even the implied warranty of
16 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 # GNU General Public License for more details.
18 #
19 # You should have received a copy of the GNU General Public License
20 # along with this program. If not, see <https://www.gnu.org/licenses/>.
21 import lsst.afw.table as afwTable
22 
23 from lsst.coadd.utils import ExistingCoaddDataIdContainer
24 from lsst.pipe.base import TaskRunner, ArgumentParser
25 from lsst.pex.config import Config, RangeField
26 from lsst.obs.base import ExposureIdInfo
27 
28 
29 class MergeSourcesRunner(TaskRunner):
30  """Task runner for `MergeDetectionTask` `MergeMeasurementTask`
31 
32  Required because the run method requires a list of
33  dataRefs rather than a single dataRef.
34  """
35  def makeTask(self, parsedCmd=None, args=None):
36  """Provide a butler to the Task constructor.
37 
38  Parameters
39  ----------
40  parsedCmd:
41  The parsed command
42  args: tuple
43  Tuple of a list of data references and kwargs (un-used)
44 
45  Raises
46  ------
47  RuntimeError
48  Thrown if both `parsedCmd` & `args` are `None`
49  """
50  if parsedCmd is not None:
51  butler = parsedCmd.butler
52  elif args is not None:
53  dataRefList, kwargs = args
54  butler = dataRefList[0].getButler()
55  else:
56  raise RuntimeError("Neither parsedCmd or args specified")
57  return self.TaskClass(config=self.config, log=self.log, butler=butler)
58 
59  @staticmethod
60  def buildRefDict(parsedCmd):
61  """Build a hierarchical dictionary of patch references
62 
63  Parameters
64  ----------
65  parsedCmd:
66  The parsed command
67 
68  Returns
69  -------
70  refDict: dict
71  A reference dictionary of the form {patch: {tract: {filter: dataRef}}}
72 
73  Raises
74  ------
75  RuntimeError
76  Thrown when multiple references are provided for the same
77  combination of tract, patch and filter
78  """
79  refDict = {} # Will index this as refDict[tract][patch][filter] = ref
80  for ref in parsedCmd.id.refList:
81  tract = ref.dataId["tract"]
82  patch = ref.dataId["patch"]
83  filter = ref.dataId["filter"]
84  if tract not in refDict:
85  refDict[tract] = {}
86  if patch not in refDict[tract]:
87  refDict[tract][patch] = {}
88  if filter in refDict[tract][patch]:
89  raise RuntimeError("Multiple versions of %s" % (ref.dataId,))
90  refDict[tract][patch][filter] = ref
91  return refDict
92 
93  @staticmethod
94  def getTargetList(parsedCmd, **kwargs):
95  """Provide a list of patch references for each patch, tract, filter combo.
96 
97  Parameters
98  ----------
99  parsedCmd:
100  The parsed command
101  kwargs:
102  Keyword arguments passed to the task
103 
104  Returns
105  -------
106  targetList: list
107  List of tuples, where each tuple is a (dataRef, kwargs) pair.
108  """
109  refDict = MergeSourcesRunner.buildRefDict(parsedCmd)
110  return [(list(p.values()), kwargs) for t in refDict.values() for p in t.values()]
111 
112 
113 def _makeGetSchemaCatalogs(datasetSuffix):
114  """Construct a getSchemaCatalogs instance method
115 
116  These are identical for most of the classes here, so we'll consolidate
117  the code.
118 
119  datasetSuffix: Suffix of dataset name, e.g., "src" for "deepCoadd_src"
120  """
121 
122  def getSchemaCatalogs(self):
123  """Return a dict of empty catalogs for each catalog dataset produced by this task."""
124  src = afwTable.SourceCatalog(self.schema)
125  if hasattr(self, "algMetadata"):
126  src.getTable().setMetadata(self.algMetadata)
127  return {self.config.coaddName + "Coadd_" + datasetSuffix: src}
128  return getSchemaCatalogs
129 
130 
131 def makeMergeArgumentParser(name, dataset):
132  """!
133  @brief Create a suitable ArgumentParser.
134 
135  We will use the ArgumentParser to get a provide a list of data
136  references for patches; the RunnerClass will sort them into lists
137  of data references for the same patch
138  """
139  parser = ArgumentParser(name)
140  parser.add_id_argument("--id", "deepCoadd_" + dataset,
141  ContainerClass=ExistingCoaddDataIdContainer,
142  help="data ID, e.g. --id tract=12345 patch=1,2 filter=g^r^i")
143  return parser
144 
145 
146 def getInputSchema(task, butler=None, schema=None):
147  """!
148  @brief Obtain the input schema either directly or froma butler reference.
149 
150  @param[in] butler butler reference to obtain the input schema from
151  @param[in] schema the input schema
152  """
153  if schema is None:
154  assert butler is not None, "Neither butler nor schema specified"
155  schema = butler.get(task.config.coaddName + "Coadd_" + task.inputDataset + "_schema",
156  immediate=True).schema
157  return schema
158 
159 
160 def readCatalog(task, patchRef):
161  """!
162  @brief Read input catalog.
163 
164  We read the input dataset provided by the 'inputDataset'
165  class variable.
166 
167  @param[in] patchRef data reference for patch
168  @return tuple consisting of the band name and the catalog
169  """
170  band = patchRef.get(task.config.coaddName + "Coadd_filterLabel", immediate=True).bandLabel
171  catalog = patchRef.get(task.config.coaddName + "Coadd_" + task.inputDataset, immediate=True)
172  task.log.info("Read %d sources for band %s: %s", len(catalog), band, patchRef.dataId)
173  return band, catalog
174 
175 
177  """!
178  @anchor CullPeaksConfig_
179 
180  @brief Configuration for culling garbage peaks after merging footprints.
181 
182  Peaks may also be culled after detection or during deblending; this configuration object
183  only deals with culling after merging Footprints.
184 
185  These cuts are based on three quantities:
186  - nBands: the number of bands in which the peak was detected
187  - peakRank: the position of the peak within its family, sorted from brightest to faintest.
188  - peakRankNormalized: the peak rank divided by the total number of peaks in the family.
189 
190  The formula that identifie peaks to cull is:
191 
192  nBands < nBandsSufficient
193  AND (rank >= rankSufficient)
194  AND (rank >= rankConsider OR rank >= rankNormalizedConsider)
195 
196  To disable peak culling, simply set nBandsSufficient=1.
197  """
198 
199  nBandsSufficient = RangeField(dtype=int, default=2, min=1,
200  doc="Always keep peaks detected in this many bands")
201  rankSufficient = RangeField(dtype=int, default=20, min=1,
202  doc="Always keep this many peaks in each family")
203  rankConsidered = RangeField(dtype=int, default=30, min=1,
204  doc=("Keep peaks with less than this rank that also match the "
205  "rankNormalizedConsidered condition."))
206  rankNormalizedConsidered = RangeField(dtype=float, default=0.7, min=0.0,
207  doc=("Keep peaks with less than this normalized rank that"
208  " also match the rankConsidered condition."))
209 
210 
211 def _makeMakeIdFactory(datasetName):
212  """Construct a makeIdFactory instance method
213 
214  These are identical for all the classes here, so this consolidates
215  the code.
216 
217  datasetName: Dataset name without the coadd name prefix, e.g., "CoaddId" for "deepCoaddId"
218  """
219 
220  def makeIdFactory(self, dataRef):
221  """Return an IdFactory for setting the detection identifiers
222 
223  The actual parameters used in the IdFactory are provided by
224  the butler (through the provided data reference.
225  """
226  info = ExposureIdInfo(
227  int(dataRef.get(self.config.coaddName + datasetName)),
228  dataRef.get(self.config.coaddName + datasetName + "_bits")
229  )
230  return info.makeSourceIdFactory()
231  return makeIdFactory
def makeTask(self, parsedCmd=None, args=None)
daf::base::PropertyList * list
Definition: fits.cc:913
def makeMergeArgumentParser(name, dataset)
Create a suitable ArgumentParser.
def getInputSchema(task, butler=None, schema=None)
Obtain the input schema either directly or froma butler reference.
def readCatalog(task, patchRef)
Read input catalog.