28 from .references
import MultiBandReferencesTask
29 from .forcedMeasurement
import ForcedMeasurementTask
30 from .applyApCorr
import ApplyApCorrTask
31 from .catalogCalculation
import CatalogCalculationTask
33 __all__ = (
"ForcedPhotCoaddConfig",
"ForcedPhotCoaddTask")
37 """Get the psfCache setting into ForcedPhotCoaddTask"""
40 return pipeBase.ButlerInitializedTaskRunner.getTargetList(parsedCmd,
41 psfCache=parsedCmd.psfCache)
45 dimensions=(
"band",
"skymap",
"tract",
"patch"),
46 defaultTemplates={
"inputCoaddName":
"deep",
47 "outputCoaddName":
"deep"}):
48 inputSchema = pipeBase.connectionTypes.InitInput(
49 doc=
"Schema for the input measurement catalogs.",
50 name=
"{inputCoaddName}Coadd_ref_schema",
51 storageClass=
"SourceCatalog",
53 outputSchema = pipeBase.connectionTypes.InitOutput(
54 doc=
"Schema for the output forced measurement catalogs.",
55 name=
"{outputCoaddName}Coadd_forced_src_schema",
56 storageClass=
"SourceCatalog",
58 exposure = pipeBase.connectionTypes.Input(
59 doc=
"Input exposure to perform photometry on.",
60 name=
"{inputCoaddName}Coadd",
61 storageClass=
"ExposureF",
62 dimensions=[
"band",
"skymap",
"tract",
"patch"],
64 refCat = pipeBase.connectionTypes.Input(
65 doc=
"Catalog of shapes and positions at which to force photometry.",
66 name=
"{inputCoaddName}Coadd_ref",
67 storageClass=
"SourceCatalog",
68 dimensions=[
"skymap",
"tract",
"patch"],
70 refCatInBand = pipeBase.connectionTypes.Input(
71 doc=
"Catalog of shapes and positions in the band having forced photometry done",
72 name=
"{inputCoaddName}Coadd_meas",
73 storageClass=
"SourceCatalog",
74 dimensions=(
"band",
"skymap",
"tract",
"patch")
76 refWcs = pipeBase.connectionTypes.Input(
77 doc=
"Reference world coordinate system.",
78 name=
"{inputCoaddName}Coadd.wcs",
80 dimensions=[
"band",
"skymap",
"tract",
"patch"],
82 measCat = pipeBase.connectionTypes.Output(
83 doc=
"Output forced photometry catalog.",
84 name=
"{outputCoaddName}Coadd_forced_src",
85 storageClass=
"SourceCatalog",
86 dimensions=[
"band",
"skymap",
"tract",
"patch"],
90 class ForcedPhotCoaddConfig(pipeBase.PipelineTaskConfig,
91 pipelineConnections=ForcedPhotCoaddConnections):
93 target=MultiBandReferencesTask,
94 doc=
"subtask to retrieve reference source catalog"
97 target=ForcedMeasurementTask,
98 doc=
"subtask to do forced measurement"
101 doc=
"coadd name: typically one of deep or goodSeeing",
108 doc=
"Run subtask to apply aperture corrections"
111 target=ApplyApCorrTask,
112 doc=
"Subtask to apply aperture corrections"
115 target=CatalogCalculationTask,
116 doc=
"Subtask to run catalogCalculation plugins on catalog"
119 doc=
"Dataset (without coadd prefix) that should be used to obtain (Heavy)Footprints for sources. "
120 "Must have IDs that match those of the reference catalog."
121 "If None, Footprints will be generated by transforming the reference Footprints.",
129 doc=
"Should be set to True if fake sources have been inserted into the input data."
138 self.catalogCalculation.plugins.names = []
139 self.measurement.copyColumns[
"id"] =
"id"
140 self.measurement.copyColumns[
"parent"] =
"parent"
141 self.references.removePatchOverlaps =
False
142 self.measurement.plugins.names |= [
'base_InputCount',
'base_Variance']
143 self.measurement.plugins[
'base_PixelFlags'].masksFpAnywhere = [
'CLIPPED',
'SENSOR_EDGE',
144 'REJECTED',
'INEXACT_PSF']
145 self.measurement.plugins[
'base_PixelFlags'].masksFpCenter = [
'CLIPPED',
'SENSOR_EDGE',
146 'REJECTED',
'INEXACT_PSF']
150 if (self.measurement.doReplaceWithNoise
and self.footprintDatasetName
is not None
151 and self.references.removePatchOverlaps):
152 raise ValueError(
"Cannot use removePatchOverlaps=True with deblended footprints, as parent "
153 "sources may be rejected while their children are not.")
156 class ForcedPhotCoaddTask(pipeBase.PipelineTask, pipeBase.CmdLineTask):
157 """A command-line driver for performing forced measurement on coadd images.
161 butler : `lsst.daf.persistence.butler.Butler`, optional
162 A Butler which will be passed to the references subtask to allow it to
163 load its schema from disk. Optional, but must be specified if
164 ``refSchema`` is not; if both are specified, ``refSchema`` takes
166 refSchema : `lsst.afw.table.Schema`, optional
167 The schema of the reference catalog, passed to the constructor of the
168 references subtask. Optional, but must be specified if ``butler`` is
169 not; if both are specified, ``refSchema`` takes precedence.
171 Keyword arguments are passed to the supertask constructor.
174 ConfigClass = ForcedPhotCoaddConfig
175 RunnerClass = ForcedPhotCoaddRunner
176 _DefaultName =
"forcedPhotCoadd"
177 dataPrefix =
"deepCoadd_"
179 def __init__(self, butler=None, refSchema=None, initInputs=None, **kwds):
180 super().__init__(**kwds)
182 if initInputs
is not None:
183 refSchema = initInputs[
'inputSchema'].schema
185 self.makeSubtask(
"references", butler=butler, schema=refSchema)
186 if refSchema
is None:
187 refSchema = self.references.schema
188 self.makeSubtask(
"measurement", refSchema=refSchema)
191 if self.config.doApCorr:
192 self.makeSubtask(
"applyApCorr", schema=self.measurement.schema)
193 self.makeSubtask(
'catalogCalculation', schema=self.measurement.schema)
196 def runQuantum(self, butlerQC, inputRefs, outputRefs):
197 inputs = butlerQC.get(inputRefs)
199 refCatInBand = inputs.pop(
'refCatInBand')
200 inputs[
'measCat'], inputs[
'exposureId'] = self.generateMeasCat(inputRefs.exposure.dataId,
206 outputs = self.run(**inputs)
207 butlerQC.put(outputs, outputRefs)
209 def generateMeasCat(self, exposureDataId, exposure, refCat, refCatInBand, refWcs, idPackerName):
210 """Generate a measurement catalog for Gen3.
214 exposureDataId : `DataId`
215 Butler dataId for this exposure.
216 exposure : `lsst.afw.image.exposure.Exposure`
217 Exposure to generate the catalog for.
218 refCat : `lsst.afw.table.SourceCatalog`
219 Catalog of shapes and positions at which to force photometry.
220 refCatInBand : `lsst.afw.table.SourceCatalog`
221 Catalog of shapes and position in the band forced photometry is
222 currently being performed
223 refWcs : `lsst.afw.image.SkyWcs`
224 Reference world coordinate system.
226 Type of ID packer to construct from the registry.
230 measCat : `lsst.afw.table.SourceCatalog`
231 Catalog of forced sources to measure.
233 Unique binary id associated with the input exposure
238 Raised if a footprint with a given source id was in the reference
239 catalog but not in the reference catalog in band (meaning there
240 was some sort of mismatch in the two input catalogs)
242 expId, expBits = exposureDataId.pack(idPackerName, returnMaxBits=
True)
245 measCat = self.measurement.generateMeasCat(exposure, refCat, refWcs,
249 for srcRecord
in measCat:
250 fpRecord = refCatInBand.find(srcRecord.getId())
252 raise LookupError(
"Cannot find Footprint for source {}; please check that {} "
253 "IDs are compatible with reference source IDs"
254 .
format(srcRecord.getId(), self.config.connections.refCatInBand))
255 srcRecord.setFootprint(fpRecord.getFootprint())
256 return measCat, expId
258 def runDataRef(self, dataRef, psfCache=None):
259 """Perform forced measurement on a single exposure.
263 dataRef : `lsst.daf.persistence.ButlerDataRef`
264 Passed to the ``references`` subtask to obtain the reference WCS,
265 the ``getExposure`` method (implemented by derived classes) to
266 read the measurment image, and the ``fetchReferences`` method to
267 get the exposure and load the reference catalog (see
268 :lsst-task`lsst.meas.base.references.CoaddSrcReferencesTask`).
269 Refer to derived class documentation for details of the datasets
270 and data ID keys which are used.
271 psfCache : `int`, optional
272 Size of PSF cache, or `None`. The size of the PSF cache can have
273 a significant effect upon the runtime for complicated PSF models.
277 Sources are generated with ``generateMeasCat`` in the ``measurement``
278 subtask. These are passed to ``measurement``'s ``run`` method, which
279 fills the source catalog with the forced measurement results. The
280 sources are then passed to the ``writeOutputs`` method (implemented by
281 derived classes) which writes the outputs.
283 refWcs = self.references.getWcs(dataRef)
284 exposure = self.getExposure(dataRef)
285 if psfCache
is not None:
286 exposure.getPsf().setCacheCapacity(psfCache)
287 refCat = self.fetchReferences(dataRef, exposure)
289 measCat = self.measurement.generateMeasCat(exposure, refCat, refWcs,
290 idFactory=self.makeIdFactory(dataRef))
291 self.log.
info(
"Performing forced measurement on %s" % (dataRef.dataId,))
292 self.attachFootprints(measCat, refCat, exposure, refWcs, dataRef)
294 exposureId = self.getExposureId(dataRef)
296 forcedPhotResult = self.run(measCat, exposure, refCat, refWcs, exposureId=exposureId)
298 self.writeOutput(dataRef, forcedPhotResult.measCat)
300 def run(self, measCat, exposure, refCat, refWcs, exposureId=None):
301 """Perform forced measurement on a single exposure.
305 measCat : `lsst.afw.table.SourceCatalog`
306 The measurement catalog, based on the sources listed in the
308 exposure : `lsst.afw.image.Exposure`
309 The measurement image upon which to perform forced detection.
310 refCat : `lsst.afw.table.SourceCatalog`
311 The reference catalog of sources to measure.
312 refWcs : `lsst.afw.image.SkyWcs`
313 The WCS for the references.
315 Optional unique exposureId used for random seed in measurement
320 result : ~`lsst.pipe.base.Struct`
321 Structure with fields:
324 Catalog of forced measurement results
325 (`lsst.afw.table.SourceCatalog`).
327 self.measurement.
run(measCat, exposure, refCat, refWcs, exposureId=exposureId)
328 if self.config.doApCorr:
329 self.applyApCorr.
run(
331 apCorrMap=exposure.getInfo().getApCorrMap()
333 self.catalogCalculation.
run(measCat)
335 return pipeBase.Struct(measCat=measCat)
337 def makeIdFactory(self, dataRef):
338 """Create an object that generates globally unique source IDs.
340 Source IDs are created based on a per-CCD ID and the ID of the CCD
345 dataRef : `lsst.daf.persistence.ButlerDataRef`
346 Butler data reference. The "CoaddId_bits" and "CoaddId" datasets
347 are accessed. The data ID must have tract and patch keys.
356 expBits = dataRef.get(self.config.coaddName +
"CoaddId_bits")
357 expId = int(dataRef.get(self.config.coaddName +
"CoaddId"))
361 return int(dataRef.get(self.config.coaddName +
"CoaddId"))
364 """Return an iterable of reference sources which overlap the exposure.
368 dataRef : `lsst.daf.persistence.ButlerDataRef`
369 Butler data reference corresponding to the image to be measured;
370 should have tract, patch, and filter keys.
372 exposure : `lsst.afw.image.Exposure`
377 All work is delegated to the references subtask; see
378 `CoaddSrcReferencesTask` for information about the default behavior.
380 skyMap = dataRef.get(self.dataPrefix +
"skyMap", immediate=
True)
381 tractInfo = skyMap[dataRef.dataId[
"tract"]]
382 patch = tuple(int(v)
for v
in dataRef.dataId[
"patch"].split(
","))
383 patchInfo = tractInfo.getPatchInfo(patch)
385 references.extend(self.references.fetchInPatches(dataRef, patchList=[patchInfo]))
389 r"""Attach Footprints to source records.
391 For coadd forced photometry, we use the deblended "heavy"
392 `~lsst.afw.detection.Footprint`\ s from the single-band measurements
393 of the same band - because we've guaranteed that the peaks (and hence
394 child sources) will be consistent across all bands before we get to
395 measurement, this should yield reasonable deblending for most sources.
396 It's most likely limitation is that it will not provide good flux
397 upper limits for sources that were not detected in this band but were
398 blended with sources that were.
400 if self.config.footprintDatasetName
is None:
401 return self.measurement.attachTransformedFootprints(sources, refCat, exposure, refWcs)
403 self.log.
info(
"Loading deblended footprints for sources from %s, %s" %
404 (self.config.footprintDatasetName, dataRef.dataId))
405 fpCat = dataRef.get(
"%sCoadd_%s" % (self.config.coaddName, self.config.footprintDatasetName),
407 for refRecord, srcRecord
in zip(refCat, sources):
408 fpRecord = fpCat.find(refRecord.getId())
410 raise LookupError(
"Cannot find Footprint for source %s; please check that %sCoadd_%s "
411 "IDs are compatible with reference source IDs" %
412 (srcRecord.getId(), self.config.coaddName,
413 self.config.footprintDatasetName))
414 srcRecord.setFootprint(fpRecord.getFootprint())
417 """Read input exposure on which measurement will be performed.
421 dataRef : `lsst.daf.persistence.ButlerDataRef`
422 Butler data reference.
424 if self.config.hasFakes:
425 name =
"fakes_" + self.config.coaddName +
"Coadd_calexp"
427 name = self.config.coaddName +
"Coadd_calexp"
429 return dataRef.get(name)
if dataRef.datasetExists(name)
else None
432 """Write forced source table
436 dataRef : `lsst.daf.persistence.ButlerDataRef`
437 Butler data reference. The forced_src dataset (with
438 self.dataPrefix prepended) is all that will be modified.
439 sources : `lsst.afw.table.SourceCatalog`
440 Catalog of sources to save.
442 dataRef.put(sources, self.dataPrefix +
"forced_src", flags=lsst.afw.table.SOURCE_IO_NO_FOOTPRINTS)
445 """The schema catalogs that will be used by this task.
449 schemaCatalogs : `dict`
450 Dictionary mapping dataset type to schema catalog.
454 There is only one schema for each type of forced measurement. The
455 dataset type for this measurement is defined in the mapper.
458 catalog.getTable().setMetadata(self.measurement.algMetadata)
459 datasetType = self.dataPrefix +
"forced_src"
460 return {datasetType: catalog}
462 def _getConfigName(self):
464 return self.dataPrefix +
"forced_config"
466 def _getMetadataName(self):
468 return self.dataPrefix +
"forced_metadata"
471 def _makeArgumentParser(cls):
472 parser = pipeBase.ArgumentParser(name=cls._DefaultName)
473 parser.add_id_argument(
"--id",
"deepCoadd_forced_src", help=
"data ID, with raw CCD keys + tract",
475 parser.add_argument(
"--psfCache", type=int, default=100, help=
"Size of CoaddPsf cache")
static std::shared_ptr< IdFactory > makeSource(RecordId expId, int reserved)
Return an IdFactory that includes another, fixed ID in the higher-order bits.
def getTargetList(parsedCmd, **kwargs)
def attachFootprints(self, sources, refCat, exposure, refWcs, dataRef)
def writeOutput(self, dataRef, sources)
def fetchReferences(self, dataRef, exposure)
def getExposure(self, dataRef)
def getSchemaCatalogs(self)
def getExposureId(self, dataRef)
def format(config, name=None, writeSourceLine=True, prefix="", verbose=False)
def run(self, skyInfo, tempExpRefList, imageScalerList, weightList, altMaskList=None, mask=None, supplementaryData=None)