7 from lsst.pex.config
import Config, RangeField
11 """Task runner for `MergeDetectionTask` `MergeMeasurementTask`
13 Required because the run method requires a list of
14 dataRefs rather than a single dataRef.
17 """Provide a butler to the Task constructor.
24 Tuple of a list of data references and kwargs (un-used)
29 Thrown if both `parsedCmd` & `args` are `None`
31 if parsedCmd
is not None:
32 butler = parsedCmd.butler
33 elif args
is not None:
34 dataRefList, kwargs = args
35 butler = dataRefList[0].getButler()
37 raise RuntimeError(
"Neither parsedCmd or args specified")
42 """Build a hierarchical dictionary of patch references
52 A reference dictionary of the form {patch: {tract: {filter: dataRef}}}
57 Thrown when multiple references are provided for the same
58 combination of tract, patch and filter
61 for ref
in parsedCmd.id.refList:
62 tract = ref.dataId[
"tract"]
63 patch = ref.dataId[
"patch"]
64 filter = ref.dataId[
"filter"]
65 if tract
not in refDict:
67 if patch
not in refDict[tract]:
68 refDict[tract][patch] = {}
69 if filter
in refDict[tract][patch]:
70 raise RuntimeError(
"Multiple versions of %s" % (ref.dataId,))
71 refDict[tract][patch][filter] = ref
76 """Provide a list of patch references for each patch, tract, filter combo.
83 Keyword arguments passed to the task
88 List of tuples, where each tuple is a (dataRef, kwargs) pair.
90 refDict = MergeSourcesRunner.buildRefDict(parsedCmd)
91 return [(
list(p.values()), kwargs)
for t
in refDict.values()
for p
in t.values()]
94 def _makeGetSchemaCatalogs(datasetSuffix):
95 """Construct a getSchemaCatalogs instance method
97 These are identical for most of the classes here, so we'll consolidate
100 datasetSuffix: Suffix of dataset name, e.g., "src" for "deepCoadd_src"
103 def getSchemaCatalogs(self):
104 """Return a dict of empty catalogs for each catalog dataset produced by this task."""
106 if hasattr(self,
"algMetadata"):
107 src.getTable().setMetadata(self.algMetadata)
108 return {self.
config.coaddName +
"Coadd_" + datasetSuffix: src}
109 return getSchemaCatalogs
114 @brief Create a suitable ArgumentParser.
116 We will use the ArgumentParser to get a provide a list of data
117 references for patches; the RunnerClass will sort them into lists
118 of data references for the same patch
121 parser.add_id_argument(
"--id",
"deepCoadd_" + dataset,
122 ContainerClass=ExistingCoaddDataIdContainer,
123 help=
"data ID, e.g. --id tract=12345 patch=1,2 filter=g^r^i")
129 @brief Obtain the input schema either directly or froma butler reference.
131 @param[in] butler butler reference to obtain the input schema from
132 @param[in] schema the input schema
135 assert butler
is not None,
"Neither butler nor schema specified"
136 schema = butler.get(task.config.coaddName +
"Coadd_" + task.inputDataset +
"_schema",
137 immediate=
True).schema
142 """Given a longer, camera-specific filter name (e.g. "HSC-I") return its shorthand name ("i").
155 @brief Read input catalog.
157 We read the input dataset provided by the 'inputDataset'
160 @param[in] patchRef data reference for patch
161 @return tuple consisting of the filter name and the catalog
163 filterName = patchRef.dataId[
"filter"]
164 catalog = patchRef.get(task.config.coaddName +
"Coadd_" + task.inputDataset, immediate=
True)
165 task.log.info(
"Read %d sources for filter %s: %s" % (len(catalog), filterName, patchRef.dataId))
166 return filterName, catalog
171 @anchor CullPeaksConfig_
173 @brief Configuration for culling garbage peaks after merging footprints.
175 Peaks may also be culled after detection or during deblending; this configuration object
176 only deals with culling after merging Footprints.
178 These cuts are based on three quantities:
179 - nBands: the number of bands in which the peak was detected
180 - peakRank: the position of the peak within its family, sorted from brightest to faintest.
181 - peakRankNormalized: the peak rank divided by the total number of peaks in the family.
183 The formula that identifie peaks to cull is:
185 nBands < nBandsSufficient
186 AND (rank >= rankSufficient)
187 AND (rank >= rankConsider OR rank >= rankNormalizedConsider)
189 To disable peak culling, simply set nBandsSufficient=1.
192 nBandsSufficient = RangeField(dtype=int, default=2, min=1,
193 doc=
"Always keep peaks detected in this many bands")
194 rankSufficient = RangeField(dtype=int, default=20, min=1,
195 doc=
"Always keep this many peaks in each family")
196 rankConsidered = RangeField(dtype=int, default=30, min=1,
197 doc=(
"Keep peaks with less than this rank that also match the "
198 "rankNormalizedConsidered condition."))
199 rankNormalizedConsidered = RangeField(dtype=float, default=0.7, min=0.0,
200 doc=(
"Keep peaks with less than this normalized rank that"
201 " also match the rankConsidered condition."))
204 def _makeMakeIdFactory(datasetName):
205 """Construct a makeIdFactory instance method
207 These are identical for all the classes here, so this consolidates
210 datasetName: Dataset name without the coadd name prefix, e.g., "CoaddId" for "deepCoaddId"
213 def makeIdFactory(self, dataRef):
214 """Return an IdFactory for setting the detection identifiers
216 The actual parameters used in the IdFactory are provided by
217 the butler (through the provided data reference.
219 expBits = dataRef.get(self.config.coaddName + datasetName +
"_bits")
220 expId = int(dataRef.get(self.config.coaddName + datasetName))
221 return afwTable.IdFactory.makeSource(expId, 64 - expBits)