LSST Applications g0fba68d861+5b9ba6aee1,g1ec0fe41b4+e220e2fb2f,g1fd858c14a+120b017347,g35bb328faa+fcb1d3bbc8,g4d2262a081+57a06a8609,g53246c7159+fcb1d3bbc8,g56a49b3a55+8d793c2a3d,g60b5630c4e+4e8d433789,g60dcce3b99+6eff471efc,g67b6fd64d1+fad15079a7,g78460c75b0+2f9a1b4bcd,g786e29fd12+cf7ec2a62a,g8180f54f50+65cb53bb37,g8352419a5c+fcb1d3bbc8,g8852436030+ae791ba189,g89139ef638+fad15079a7,g9125e01d80+fcb1d3bbc8,g94187f82dc+4e8d433789,g989de1cb63+fad15079a7,g9ccd5d7f00+cce09d2c12,g9d31334357+4e8d433789,g9f33ca652e+323fd354f8,gabe3b4be73+1e0a283bba,gabf8522325+94c30d56e9,gb1101e3267+5e0f808207,gb58c049af0+f03b321e39,gb89ab40317+fad15079a7,gc0af124501+a88dc73679,gcf25f946ba+ae791ba189,gd6cbbdb0b4+8d7f1baacb,gdb1c4ca869+16879ca1a6,gde0f65d7ad+11b49afd66,ge1ad929117+4e8d433789,ge278dab8ac+4d6e48c014,ge410e46f29+fad15079a7,gf5e32f922b+fcb1d3bbc8,gf618743f1b+8ff1364817,gf67bdafdda+fad15079a7,w.2025.17
LSST Data Management Base Package
|
Classes | |
class | MergeDetectionsConnections |
Functions | |
matchCatalogsExact (catalog1, catalog2, patch1=None, patch2=None) | |
Variables | |
schema : `lsst.afw.table.Schema`, optional | |
initInputs : `dict`, optional | |
catalogs : `lsst.afw.table.SourceCatalog` | |
mergedList : `lsst.afw.table.SourceCatalog` | |
result : `lsst.pipe.base.Struct` | |
catalog : `lsst.afw.table.SourceCatalog` | |
mergeKey = schema.find("merge_peak_%s" % self.config.skyFilterName).key | |
list | converted = [] |
peak = oldFoot.getPeaks()[0] | |
newFoot = afwDetect.Footprint(oldFoot.spans, schema) | |
lsst.pipe.tasks.mergeDetections.matchCatalogsExact | ( | catalog1, | |
catalog2, | |||
patch1 = None, | |||
patch2 = None ) |
Match two catalogs derived from the same mergeDet catalog. When testing downstream features, like deblending methods/parameters and measurement algorithms/parameters, it is useful to to compare the same sources in two catalogs. In most cases this must be done by matching on either RA/DEC or XY positions, which occassionally will mismatch one source with another. For a more robust solution, as long as the downstream catalog is derived from the same mergeDet catalog, exact source matching can be done via the unique ``(parent, deblend_peakID)`` combination. So this function performs this exact matching for all sources both catalogs. Parameters ---------- catalog1, catalog2 : `lsst.afw.table.SourceCatalog` The two catalogs to merge patch1, patch2 : `array` of `int` Patch for each row, converted into an integer. Returns ------- result : `list` of `lsst.afw.table.SourceMatch` List of matches for each source (using an inner join).
Definition at line 42 of file mergeDetections.py.
lsst.pipe.tasks.mergeDetections.catalog : `lsst.afw.table.SourceCatalog` |
# Convert distance to tract coordinate tractWcs = skyInfo.wcs peakDistance = self.config.minNewPeak / tractWcs.getPixelScale().asArcseconds() samePeakDistance = self.config.maxSamePeak / tractWcs.getPixelScale().asArcseconds() # Put catalogs, filters in priority order orderedCatalogs = [catalogs[band] for band in self.config.priorityList if band in catalogs.keys()] orderedBands = [band for band in self.config.priorityList if band in catalogs.keys()] mergedList = self.merged.getMergedSourceCatalog(orderedCatalogs, orderedBands, peakDistance, self.schema, idFactory, samePeakDistance) # # Add extra sources that correspond to blank sky # skySourceFootprints = self.getSkySourceFootprints(mergedList, skyInfo, skySeed) if skySourceFootprints: key = mergedList.schema.find("merge_footprint_%s" % self.config.skyFilterName).key for foot in skySourceFootprints: s = mergedList.addNew() s.setFootprint(foot) s.set(key, True) # Sort Peaks from brightest to faintest for record in mergedList: record.getFootprint().sortPeaks() self.log.info("Merged to %d sources", len(mergedList)) # Attempt to remove garbage peaks self.cullPeaks(mergedList) return Struct(outputCatalog=mergedList) def cullPeaks(self, catalog):
Definition at line 334 of file mergeDetections.py.
lsst.pipe.tasks.mergeDetections.catalogs : `lsst.afw.table.SourceCatalog` |
ConfigClass = MergeDetectionsConfig _DefaultName = "mergeCoaddDetections" def __init__(self, schema=None, initInputs=None, **kwargs): super().__init__(**kwargs) if initInputs is not None: schema = initInputs['schema'].schema if schema is None: raise ValueError("No input schema or initInputs['schema'] provided.") self.schema = schema self.makeSubtask("skyObjects") filterNames = list(self.config.priorityList) filterNames.append(self.config.skyFilterName) self.merged = afwDetect.FootprintMergeList(self.schema, filterNames) self.outputSchema = afwTable.SourceCatalog(self.schema) self.outputPeakSchema = afwDetect.PeakCatalog(self.merged.getPeakSchema()) def runQuantum(self, butlerQC, inputRefs, outputRefs): inputs = butlerQC.get(inputRefs) idGenerator = self.config.idGenerator.apply(butlerQC.quantum.dataId) inputs["skySeed"] = idGenerator.catalog_id inputs["idFactory"] = idGenerator.make_table_id_factory() catalogDict = {ref.dataId['band']: cat for ref, cat in zip(inputRefs.catalogs, inputs['catalogs'])} inputs['catalogs'] = catalogDict skyMap = inputs.pop('skyMap') # Can use the first dataId to find the tract and patch being worked on tractNumber = inputRefs.catalogs[0].dataId['tract'] tractInfo = skyMap[tractNumber] patchInfo = tractInfo.getPatchInfo(inputRefs.catalogs[0].dataId['patch']) skyInfo = Struct( skyMap=skyMap, tractInfo=tractInfo, patchInfo=patchInfo, wcs=tractInfo.getWcs(), bbox=patchInfo.getOuterBBox() ) inputs['skyInfo'] = skyInfo outputs = self.run(**inputs) butlerQC.put(outputs, outputRefs) def run(self, catalogs, skyInfo, idFactory, skySeed):
Definition at line 284 of file mergeDetections.py.
list lsst.pipe.tasks.mergeDetections.converted = [] |
Definition at line 383 of file mergeDetections.py.
lsst.pipe.tasks.mergeDetections.initInputs : `dict`, optional |
Definition at line 220 of file mergeDetections.py.
lsst.pipe.tasks.mergeDetections.mergedList : `lsst.afw.table.SourceCatalog` |
Definition at line 286 of file mergeDetections.py.
lsst.pipe.tasks.mergeDetections.mergeKey = schema.find("merge_peak_%s" % self.config.skyFilterName).key |
Definition at line 382 of file mergeDetections.py.
lsst.pipe.tasks.mergeDetections.newFoot = afwDetect.Footprint(oldFoot.spans, schema) |
Definition at line 387 of file mergeDetections.py.
lsst.pipe.tasks.mergeDetections.peak = oldFoot.getPeaks()[0] |
Definition at line 386 of file mergeDetections.py.
lsst.pipe.tasks.mergeDetections.result : `lsst.pipe.base.Struct` |
Definition at line 291 of file mergeDetections.py.
lsst.pipe.tasks.mergeDetections.schema : `lsst.afw.table.Schema`, optional |
keys = [item.key for item in self.merged.getPeakSchema().extract("merge_peak_*").values()] assert len(keys) > 0, "Error finding flags that associate peaks with their detection bands." totalPeaks = 0 culledPeaks = 0 for parentSource in catalog: # Make a list copy so we can clear the attached PeakCatalog and append the ones we're keeping # to it (which is easier than deleting as we iterate). keptPeaks = parentSource.getFootprint().getPeaks() oldPeaks = list(keptPeaks) keptPeaks.clear() familySize = len(oldPeaks) totalPeaks += familySize for rank, peak in enumerate(oldPeaks): if ((rank < self.config.cullPeaks.rankSufficient) or (sum([peak.get(k) for k in keys]) >= self.config.cullPeaks.nBandsSufficient) or (rank < self.config.cullPeaks.rankConsidered and rank < self.config.cullPeaks.rankNormalizedConsidered * familySize)): keptPeaks.append(peak) else: culledPeaks += 1 self.log.info("Culled %d of %d peaks", culledPeaks, totalPeaks) def getSkySourceFootprints(self, mergedList, skyInfo, seed):
Definition at line 218 of file mergeDetections.py.