31 """Task runner for `MergeDetectionTask` `MergeMeasurementTask`
33 Required because the run method requires a list of
34 dataRefs rather than a single dataRef.
37 """Provide a butler to the Task constructor.
44 Tuple of a list of data references and kwargs (un-used)
49 Thrown
if both `parsedCmd` & `args` are `
None`
51 if parsedCmd
is not None:
52 butler = parsedCmd.butler
53 elif args
is not None:
54 dataRefList, kwargs = args
55 butler = dataRefList[0].getButler()
57 raise RuntimeError(
"Neither parsedCmd or args specified")
58 return self.TaskClass(config=self.config, log=self.log, butler=butler)
62 """Build a hierarchical dictionary of patch references
72 A reference dictionary of the form {patch: {tract: {filter: dataRef}}}
77 Thrown when multiple references are provided for the same
78 combination of tract, patch
and filter
81 for ref
in parsedCmd.id.refList:
82 tract = ref.dataId[
"tract"]
83 patch = ref.dataId[
"patch"]
84 filter = ref.dataId[
"filter"]
85 if tract
not in refDict:
87 if patch
not in refDict[tract]:
88 refDict[tract][patch] = {}
89 if filter
in refDict[tract][patch]:
90 raise RuntimeError(
"Multiple versions of %s" % (ref.dataId,))
91 refDict[tract][patch][filter] = ref
96 """Provide a list of patch references for each patch, tract, filter combo.
103 Keyword arguments passed to the task
108 List of tuples, where each tuple is a (dataRef, kwargs) pair.
110 refDict = MergeSourcesRunner.buildRefDict(parsedCmd)
111 return [(
list(p.values()), kwargs)
for t
in refDict.values()
for p
in t.values()]
114def _makeGetSchemaCatalogs(datasetSuffix):
115 """Construct a getSchemaCatalogs instance method
117 These are identical for most of the classes here, so we
'll consolidate
120 datasetSuffix: Suffix of dataset name, e.g., "src" for "deepCoadd_src"
124 """Return a dict of empty catalogs for each catalog dataset produced by this task."""
126 if hasattr(self,
"algMetadata"):
127 src.getTable().setMetadata(self.algMetadata)
128 return {self.config.coaddName +
"Coadd_" + datasetSuffix: src}
129 return getSchemaCatalogs
134 @brief Create a suitable ArgumentParser.
136 We will use the ArgumentParser to get a provide a list of data
137 references
for patches; the RunnerClass will sort them into lists
138 of data references
for the same patch
140 parser = ArgumentParser(name)
141 parser.add_id_argument("--id",
"deepCoadd_" + dataset,
142 ContainerClass=ExistingCoaddDataIdContainer,
143 help=
"data ID, e.g. --id tract=12345 patch=1,2 filter=g^r^i")
149 @brief Obtain the input schema either directly
or froma butler reference.
151 @param[
in] task the task whose input schema
is desired
152 @param[
in] butler butler reference to obtain the input schema
from
153 @param[
in] schema the input schema
156 assert butler
is not None,
"Neither butler nor schema specified"
157 schema = butler.get(task.config.coaddName +
"Coadd_" + task.inputDataset +
"_schema",
158 immediate=
True).schema
164 @brief Read input catalog.
166 We read the input dataset provided by the
'inputDataset'
169 @param[
in] task the task whose input catalog
is desired
170 @param[
in] patchRef data reference
for patch
171 @return tuple consisting of the band name
and the catalog
173 band = patchRef.get(task.config.coaddName + "Coadd_filterLabel", immediate=
True).bandLabel
174 catalog = patchRef.get(task.config.coaddName +
"Coadd_" + task.inputDataset, immediate=
True)
175 task.log.info(
"Read %d sources for band %s: %s", len(catalog), band, patchRef.dataId)
181 @anchor CullPeaksConfig_
183 @brief Configuration
for culling garbage peaks after merging footprints.
185 Peaks may also be culled after detection
or during deblending; this configuration object
186 only deals
with culling after merging Footprints.
188 These cuts are based on three quantities:
189 - nBands: the number of bands
in which the peak was detected
190 - peakRank: the position of the peak within its family, sorted
from brightest to faintest.
191 - peakRankNormalized: the peak rank divided by the total number of peaks
in the family.
193 The formula that identifie peaks to cull
is:
195 nBands < nBandsSufficient
196 AND (rank >= rankSufficient)
197 AND (rank >= rankConsider OR rank >= rankNormalizedConsider)
199 To disable peak culling, simply set nBandsSufficient=1.
203 doc="Always keep peaks detected in this many bands")
205 doc=
"Always keep this many peaks in each family")
207 doc=(
"Keep peaks with less than this rank that also match the "
208 "rankNormalizedConsidered condition."))
209 rankNormalizedConsidered =
RangeField(dtype=float, default=0.7, min=0.0,
210 doc=(
"Keep peaks with less than this normalized rank that"
211 " also match the rankConsidered condition."))
214def _makeMakeIdFactory(datasetName, includeBand=True):
215 """Construct a makeIdFactory instance method
217 These are identical for all the classes here, so this consolidates
220 datasetName: Dataset name without the coadd name prefix, e.g.,
"CoaddId" for "deepCoaddId"
223 def makeIdFactory(self, dataRef):
224 """Return an IdFactory for setting the detection identifiers
226 The actual parameters used in the IdFactory are provided by
227 the butler (through the provided data reference.
231 info = ExposureIdInfo(expId, dataRef.get(self.config.coaddName + datasetName + "_bits"))
232 return info.makeSourceIdFactory()
def makeTask(self, parsedCmd=None, args=None)
def getTargetList(parsedCmd, **kwargs)
def buildRefDict(parsedCmd)
daf::base::PropertyList * list
def getGen3CoaddExposureId(dataRef, coaddName="deep", includeBand=True, log=None)
def getSchemaCatalogs(self)
def makeMergeArgumentParser(name, dataset)
Create a suitable ArgumentParser.
def getInputSchema(task, butler=None, schema=None)
Obtain the input schema either directly or froma butler reference.
def readCatalog(task, patchRef)
Read input catalog.