LSST Applications g0b6bd0c080+a72a5dd7e6,g1182afd7b4+2a019aa3bb,g17e5ecfddb+2b8207f7de,g1d67935e3f+06cf436103,g38293774b4+ac198e9f13,g396055baef+6a2097e274,g3b44f30a73+6611e0205b,g480783c3b1+98f8679e14,g48ccf36440+89c08d0516,g4b93dc025c+98f8679e14,g5c4744a4d9+a302e8c7f0,g613e996a0d+e1c447f2e0,g6c8d09e9e7+25247a063c,g7271f0639c+98f8679e14,g7a9cd813b8+124095ede6,g9d27549199+a302e8c7f0,ga1cf026fa3+ac198e9f13,ga32aa97882+7403ac30ac,ga786bb30fb+7a139211af,gaa63f70f4e+9994eb9896,gabf319e997+ade567573c,gba47b54d5d+94dc90c3ea,gbec6a3398f+06cf436103,gc6308e37c7+07dd123edb,gc655b1545f+ade567573c,gcc9029db3c+ab229f5caf,gd01420fc67+06cf436103,gd877ba84e5+06cf436103,gdb4cecd868+6f279b5b48,ge2d134c3d5+cc4dbb2e3f,ge448b5faa6+86d1ceac1d,gecc7e12556+98f8679e14,gf3ee170dca+25247a063c,gf4ac96e456+ade567573c,gf9f5ea5b4d+ac198e9f13,gff490e6085+8c2580be5c,w.2022.27
LSST Data Management Base Package
multiBandUtils.py
Go to the documentation of this file.
1# This file is part of pipe_tasks.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
21import lsst.afw.table as afwTable
22
23from lsst.coadd.utils import ExistingCoaddDataIdContainer
24from lsst.coadd.utils.getGen3CoaddExposureId import getGen3CoaddExposureId
25from lsst.pipe.base import TaskRunner, ArgumentParser
26from lsst.pex.config import Config, RangeField
27from lsst.obs.base import ExposureIdInfo
28
29
30class MergeSourcesRunner(TaskRunner):
31 """Task runner for `MergeDetectionTask` `MergeMeasurementTask`
32
33 Required because the run method requires a list of
34 dataRefs rather than a single dataRef.
35 """
36 def makeTask(self, parsedCmd=None, args=None):
37 """Provide a butler to the Task constructor.
38
39 Parameters
40 ----------
41 parsedCmd:
42 The parsed command
43 args: tuple
44 Tuple of a list of data references and kwargs (un-used)
45
46 Raises
47 ------
48 RuntimeError
49 Thrown if both `parsedCmd` & `args` are `None`
50 """
51 if parsedCmd is not None:
52 butler = parsedCmd.butler
53 elif args is not None:
54 dataRefList, kwargs = args
55 butler = dataRefList[0].getButler()
56 else:
57 raise RuntimeError("Neither parsedCmd or args specified")
58 return self.TaskClass(config=self.config, log=self.log, butler=butler)
59
60 @staticmethod
61 def buildRefDict(parsedCmd):
62 """Build a hierarchical dictionary of patch references
63
64 Parameters
65 ----------
66 parsedCmd:
67 The parsed command
68
69 Returns
70 -------
71 refDict: dict
72 A reference dictionary of the form {patch: {tract: {filter: dataRef}}}
73
74 Raises
75 ------
76 RuntimeError
77 Thrown when multiple references are provided for the same
78 combination of tract, patch and filter
79 """
80 refDict = {} # Will index this as refDict[tract][patch][filter] = ref
81 for ref in parsedCmd.id.refList:
82 tract = ref.dataId["tract"]
83 patch = ref.dataId["patch"]
84 filter = ref.dataId["filter"]
85 if tract not in refDict:
86 refDict[tract] = {}
87 if patch not in refDict[tract]:
88 refDict[tract][patch] = {}
89 if filter in refDict[tract][patch]:
90 raise RuntimeError("Multiple versions of %s" % (ref.dataId,))
91 refDict[tract][patch][filter] = ref
92 return refDict
93
94 @staticmethod
95 def getTargetList(parsedCmd, **kwargs):
96 """Provide a list of patch references for each patch, tract, filter combo.
97
98 Parameters
99 ----------
100 parsedCmd:
101 The parsed command
102 kwargs:
103 Keyword arguments passed to the task
104
105 Returns
106 -------
107 targetList: list
108 List of tuples, where each tuple is a (dataRef, kwargs) pair.
109 """
110 refDict = MergeSourcesRunner.buildRefDict(parsedCmd)
111 return [(list(p.values()), kwargs) for t in refDict.values() for p in t.values()]
112
113
114def _makeGetSchemaCatalogs(datasetSuffix):
115 """Construct a getSchemaCatalogs instance method
116
117 These are identical for most of the classes here, so we'll consolidate
118 the code.
119
120 datasetSuffix: Suffix of dataset name, e.g., "src" for "deepCoadd_src"
121 """
122
123 def getSchemaCatalogs(self):
124 """Return a dict of empty catalogs for each catalog dataset produced by this task."""
125 src = afwTable.SourceCatalog(self.schema)
126 if hasattr(self, "algMetadata"):
127 src.getTable().setMetadata(self.algMetadata)
128 return {self.config.coaddName + "Coadd_" + datasetSuffix: src}
129 return getSchemaCatalogs
130
131
132def makeMergeArgumentParser(name, dataset):
133 """!
134 @brief Create a suitable ArgumentParser.
135
136 We will use the ArgumentParser to get a provide a list of data
137 references for patches; the RunnerClass will sort them into lists
138 of data references for the same patch
139 """
140 parser = ArgumentParser(name)
141 parser.add_id_argument("--id", "deepCoadd_" + dataset,
142 ContainerClass=ExistingCoaddDataIdContainer,
143 help="data ID, e.g. --id tract=12345 patch=1,2 filter=g^r^i")
144 return parser
145
146
147def getInputSchema(task, butler=None, schema=None):
148 """!
149 @brief Obtain the input schema either directly or froma butler reference.
150
151 @param[in] task the task whose input schema is desired
152 @param[in] butler butler reference to obtain the input schema from
153 @param[in] schema the input schema
154 """
155 if schema is None:
156 assert butler is not None, "Neither butler nor schema specified"
157 schema = butler.get(task.config.coaddName + "Coadd_" + task.inputDataset + "_schema",
158 immediate=True).schema
159 return schema
160
161
162def readCatalog(task, patchRef):
163 """!
164 @brief Read input catalog.
165
166 We read the input dataset provided by the 'inputDataset'
167 class variable.
168
169 @param[in] task the task whose input catalog is desired
170 @param[in] patchRef data reference for patch
171 @return tuple consisting of the band name and the catalog
172 """
173 band = patchRef.get(task.config.coaddName + "Coadd_filterLabel", immediate=True).bandLabel
174 catalog = patchRef.get(task.config.coaddName + "Coadd_" + task.inputDataset, immediate=True)
175 task.log.info("Read %d sources for band %s: %s", len(catalog), band, patchRef.dataId)
176 return band, catalog
177
178
180 """!
181 @anchor CullPeaksConfig_
182
183 @brief Configuration for culling garbage peaks after merging footprints.
184
185 Peaks may also be culled after detection or during deblending; this configuration object
186 only deals with culling after merging Footprints.
187
188 These cuts are based on three quantities:
189 - nBands: the number of bands in which the peak was detected
190 - peakRank: the position of the peak within its family, sorted from brightest to faintest.
191 - peakRankNormalized: the peak rank divided by the total number of peaks in the family.
192
193 The formula that identifie peaks to cull is:
194
195 nBands < nBandsSufficient
196 AND (rank >= rankSufficient)
197 AND (rank >= rankConsider OR rank >= rankNormalizedConsider)
198
199 To disable peak culling, simply set nBandsSufficient=1.
200 """
201
202 nBandsSufficient = RangeField(dtype=int, default=2, min=1,
203 doc="Always keep peaks detected in this many bands")
204 rankSufficient = RangeField(dtype=int, default=20, min=1,
205 doc="Always keep this many peaks in each family")
206 rankConsidered = RangeField(dtype=int, default=30, min=1,
207 doc=("Keep peaks with less than this rank that also match the "
208 "rankNormalizedConsidered condition."))
209 rankNormalizedConsidered = RangeField(dtype=float, default=0.7, min=0.0,
210 doc=("Keep peaks with less than this normalized rank that"
211 " also match the rankConsidered condition."))
212
213
214def _makeMakeIdFactory(datasetName, includeBand=True):
215 """Construct a makeIdFactory instance method
216
217 These are identical for all the classes here, so this consolidates
218 the code.
219
220 datasetName: Dataset name without the coadd name prefix, e.g., "CoaddId" for "deepCoaddId"
221 """
222
223 def makeIdFactory(self, dataRef):
224 """Return an IdFactory for setting the detection identifiers
225
226 The actual parameters used in the IdFactory are provided by
227 the butler (through the provided data reference.
228 """
229 expId = getGen3CoaddExposureId(dataRef, coaddName=self.config.coaddName, includeBand=includeBand,
230 log=self.log)
231 info = ExposureIdInfo(expId, dataRef.get(self.config.coaddName + datasetName + "_bits"))
232 return info.makeSourceIdFactory()
233 return makeIdFactory
def makeTask(self, parsedCmd=None, args=None)
daf::base::PropertyList * list
Definition: fits.cc:913
def getGen3CoaddExposureId(dataRef, coaddName="deep", includeBand=True, log=None)
def makeMergeArgumentParser(name, dataset)
Create a suitable ArgumentParser.
def getInputSchema(task, butler=None, schema=None)
Obtain the input schema either directly or froma butler reference.
def readCatalog(task, patchRef)
Read input catalog.