|
LSST Applications g00d0e8bbd7+edbf708997,g03191d30f7+9ce8016dbd,g1955dfad08+0bd186d245,g199a45376c+5137f08352,g1fd858c14a+a888a50aa2,g262e1987ae+45f9aba685,g29ae962dfc+1c7d47a24f,g2cef7863aa+73c82f25e4,g35bb328faa+edbf708997,g3fd5ace14f+eed17d2c67,g47891489e3+6dc8069a4c,g53246c7159+edbf708997,g64539dfbff+c4107e45b5,g67b6fd64d1+6dc8069a4c,g74acd417e5+f452e9c21a,g786e29fd12+af89c03590,g7ae74a0b1c+a25e60b391,g7aefaa3e3d+2025e9ce17,g7cc15d900a+2d158402f9,g87389fa792+a4172ec7da,g89139ef638+6dc8069a4c,g8d4809ba88+c4107e45b5,g8d7436a09f+e96c132b44,g8ea07a8fe4+db21c37724,g98df359435+aae6d409c1,ga2180abaac+edbf708997,gac66b60396+966efe6077,gb632fb1845+88945a90f8,gbaa8f7a6c5+38b34f4976,gbf99507273+edbf708997,gca7fc764a6+6dc8069a4c,gd7ef33dd92+6dc8069a4c,gda68eeecaf+7d1e613a8d,gdab6d2f7ff+f452e9c21a,gdbb4c4dda9+c4107e45b5,ge410e46f29+6dc8069a4c,ge41e95a9f2+c4107e45b5,geaed405ab2+e194be0d2b,w.2025.47
LSST Data Management Base Package
|
Classes | |
| class | MergeDetectionsConnections |
Functions | |
| matchCatalogsExact (catalog1, catalog2, patch1=None, patch2=None) | |
Variables | |
| schema : `lsst.afw.table.Schema`, optional | |
| initInputs : `dict`, optional | |
| catalogs : `lsst.afw.table.SourceCatalog` | |
| mergedList : `lsst.afw.table.SourceCatalog` | |
| result : `lsst.pipe.base.Struct` | |
| catalog : `lsst.afw.table.SourceCatalog` | |
| mergeKey = schema.find("merge_peak_%s" % self.config.skyFilterName).key | |
| list | converted = [] |
| peak = oldFoot.getPeaks()[0] | |
| newFoot = afwDetect.Footprint(oldFoot.spans, schema) | |
| lsst.pipe.tasks.mergeDetections.matchCatalogsExact | ( | catalog1, | |
| catalog2, | |||
| patch1 = None, | |||
| patch2 = None ) |
Match two catalogs derived from the same mergeDet catalog.
When testing downstream features, like deblending methods/parameters
and measurement algorithms/parameters, it is useful to to compare
the same sources in two catalogs. In most cases this must be done
by matching on either RA/DEC or XY positions, which occassionally
will mismatch one source with another.
For a more robust solution, as long as the downstream catalog is
derived from the same mergeDet catalog, exact source matching
can be done via the unique ``(parent, deblend_peakID)``
combination. So this function performs this exact matching for
all sources both catalogs.
Parameters
----------
catalog1, catalog2 : `lsst.afw.table.SourceCatalog`
The two catalogs to merge
patch1, patch2 : `array` of `int`
Patch for each row, converted into an integer.
Returns
-------
result : `list` of `lsst.afw.table.SourceMatch`
List of matches for each source (using an inner join).
Definition at line 42 of file mergeDetections.py.
| lsst.pipe.tasks.mergeDetections.catalog : `lsst.afw.table.SourceCatalog` |
# Convert distance to tract coordinate
tractWcs = skyInfo.wcs
peakDistance = self.config.minNewPeak / tractWcs.getPixelScale().asArcseconds()
samePeakDistance = self.config.maxSamePeak / tractWcs.getPixelScale().asArcseconds()
# Put catalogs, filters in priority order
orderedCatalogs = [catalogs[band] for band in self.config.priorityList if band in catalogs.keys()]
orderedBands = [band for band in self.config.priorityList if band in catalogs.keys()]
mergedList = self.merged.getMergedSourceCatalog(orderedCatalogs, orderedBands, peakDistance,
self.schema, idFactory,
samePeakDistance)
#
# Add extra sources that correspond to blank sky
#
skySourceFootprints = self.getSkySourceFootprints(mergedList, skyInfo, skySeed)
if skySourceFootprints:
key = mergedList.schema.find("merge_footprint_%s" % self.config.skyFilterName).key
for foot in skySourceFootprints:
s = mergedList.addNew()
s.setFootprint(foot)
s.set(key, True)
# Sort Peaks from brightest to faintest
for record in mergedList:
record.getFootprint().sortPeaks()
self.log.info("Merged to %d sources", len(mergedList))
# Attempt to remove garbage peaks
self.cullPeaks(mergedList)
return Struct(outputCatalog=mergedList)
def cullPeaks(self, catalog):
Definition at line 334 of file mergeDetections.py.
| lsst.pipe.tasks.mergeDetections.catalogs : `lsst.afw.table.SourceCatalog` |
ConfigClass = MergeDetectionsConfig
_DefaultName = "mergeCoaddDetections"
def __init__(self, schema=None, initInputs=None, **kwargs):
super().__init__(**kwargs)
if initInputs is not None:
schema = initInputs['schema'].schema
if schema is None:
raise ValueError("No input schema or initInputs['schema'] provided.")
self.schema = schema
self.makeSubtask("skyObjects")
filterNames = list(self.config.priorityList)
filterNames.append(self.config.skyFilterName)
self.merged = afwDetect.FootprintMergeList(self.schema, filterNames)
self.outputSchema = afwTable.SourceCatalog(self.schema)
self.outputPeakSchema = afwDetect.PeakCatalog(self.merged.getPeakSchema())
def runQuantum(self, butlerQC, inputRefs, outputRefs):
inputs = butlerQC.get(inputRefs)
idGenerator = self.config.idGenerator.apply(butlerQC.quantum.dataId)
inputs["skySeed"] = idGenerator.catalog_id
inputs["idFactory"] = idGenerator.make_table_id_factory()
catalogDict = {ref.dataId['band']: cat for ref, cat in zip(inputRefs.catalogs,
inputs['catalogs'])}
inputs['catalogs'] = catalogDict
skyMap = inputs.pop('skyMap')
# Can use the first dataId to find the tract and patch being worked on
tractNumber = inputRefs.catalogs[0].dataId['tract']
tractInfo = skyMap[tractNumber]
patchInfo = tractInfo.getPatchInfo(inputRefs.catalogs[0].dataId['patch'])
skyInfo = Struct(
skyMap=skyMap,
tractInfo=tractInfo,
patchInfo=patchInfo,
wcs=tractInfo.getWcs(),
bbox=patchInfo.getOuterBBox()
)
inputs['skyInfo'] = skyInfo
outputs = self.run(**inputs)
butlerQC.put(outputs, outputRefs)
def run(self, catalogs, skyInfo, idFactory, skySeed):
Definition at line 284 of file mergeDetections.py.
| list lsst.pipe.tasks.mergeDetections.converted = [] |
Definition at line 383 of file mergeDetections.py.
| lsst.pipe.tasks.mergeDetections.initInputs : `dict`, optional |
Definition at line 220 of file mergeDetections.py.
| lsst.pipe.tasks.mergeDetections.mergedList : `lsst.afw.table.SourceCatalog` |
Definition at line 286 of file mergeDetections.py.
| lsst.pipe.tasks.mergeDetections.mergeKey = schema.find("merge_peak_%s" % self.config.skyFilterName).key |
Definition at line 382 of file mergeDetections.py.
| lsst.pipe.tasks.mergeDetections.newFoot = afwDetect.Footprint(oldFoot.spans, schema) |
Definition at line 387 of file mergeDetections.py.
| lsst.pipe.tasks.mergeDetections.peak = oldFoot.getPeaks()[0] |
Definition at line 386 of file mergeDetections.py.
| lsst.pipe.tasks.mergeDetections.result : `lsst.pipe.base.Struct` |
Definition at line 291 of file mergeDetections.py.
| lsst.pipe.tasks.mergeDetections.schema : `lsst.afw.table.Schema`, optional |
keys = [item.key for item in self.merged.getPeakSchema().extract("merge_peak_*").values()]
assert len(keys) > 0, "Error finding flags that associate peaks with their detection bands."
totalPeaks = 0
culledPeaks = 0
for parentSource in catalog:
# Make a list copy so we can clear the attached PeakCatalog and append the ones we're keeping
# to it (which is easier than deleting as we iterate).
keptPeaks = parentSource.getFootprint().getPeaks()
oldPeaks = list(keptPeaks)
keptPeaks.clear()
familySize = len(oldPeaks)
totalPeaks += familySize
for rank, peak in enumerate(oldPeaks):
if ((rank < self.config.cullPeaks.rankSufficient)
or (sum([peak.get(k) for k in keys]) >= self.config.cullPeaks.nBandsSufficient)
or (rank < self.config.cullPeaks.rankConsidered
and rank < self.config.cullPeaks.rankNormalizedConsidered * familySize)):
keptPeaks.append(peak)
else:
culledPeaks += 1
self.log.info("Culled %d of %d peaks", culledPeaks, totalPeaks)
def getSkySourceFootprints(self, mergedList, skyInfo, seed):
Definition at line 218 of file mergeDetections.py.