|
LSST Applications g00d0e8bbd7+8c5ae1fdc5,g013ef56533+603670b062,g083dd6704c+2e189452a7,g199a45376c+0ba108daf9,g1c5cce2383+bc9f6103a4,g1fd858c14a+cd69ed4fc1,g210f2d0738+c4742f2e9e,g262e1987ae+612fa42d85,g29ae962dfc+83d129e820,g2cef7863aa+aef1011c0b,g35bb328faa+8c5ae1fdc5,g3fd5ace14f+5eaa884f2a,g47891489e3+e32160a944,g53246c7159+8c5ae1fdc5,g5b326b94bb+dcc56af22d,g64539dfbff+c4742f2e9e,g67b6fd64d1+e32160a944,g74acd417e5+c122e1277d,g786e29fd12+668abc6043,g87389fa792+8856018cbb,g88cb488625+47d24e4084,g89139ef638+e32160a944,g8d7436a09f+d14b4ff40a,g8ea07a8fe4+b212507b11,g90f42f885a+e1755607f3,g97be763408+34be90ab8c,g98df359435+ec1fa61bf1,ga2180abaac+8c5ae1fdc5,ga9e74d7ce9+43ac651df0,gbf99507273+8c5ae1fdc5,gc2a301910b+c4742f2e9e,gca7fc764a6+e32160a944,gd7ef33dd92+e32160a944,gdab6d2f7ff+c122e1277d,gdb1e2cdc75+1b18322db8,ge410e46f29+e32160a944,ge41e95a9f2+c4742f2e9e,geaed405ab2+0d91c11c6d,w.2025.44
LSST Data Management Base Package
|
Classes | |
| class | MergeDetectionsConnections |
Functions | |
| matchCatalogsExact (catalog1, catalog2, patch1=None, patch2=None) | |
Variables | |
| schema : `lsst.afw.table.Schema`, optional | |
| initInputs : `dict`, optional | |
| catalogs : `lsst.afw.table.SourceCatalog` | |
| mergedList : `lsst.afw.table.SourceCatalog` | |
| result : `lsst.pipe.base.Struct` | |
| catalog : `lsst.afw.table.SourceCatalog` | |
| mergeKey = schema.find("merge_peak_%s" % self.config.skyFilterName).key | |
| list | converted = [] |
| peak = oldFoot.getPeaks()[0] | |
| newFoot = afwDetect.Footprint(oldFoot.spans, schema) | |
| lsst.pipe.tasks.mergeDetections.matchCatalogsExact | ( | catalog1, | |
| catalog2, | |||
| patch1 = None, | |||
| patch2 = None ) |
Match two catalogs derived from the same mergeDet catalog.
When testing downstream features, like deblending methods/parameters
and measurement algorithms/parameters, it is useful to to compare
the same sources in two catalogs. In most cases this must be done
by matching on either RA/DEC or XY positions, which occassionally
will mismatch one source with another.
For a more robust solution, as long as the downstream catalog is
derived from the same mergeDet catalog, exact source matching
can be done via the unique ``(parent, deblend_peakID)``
combination. So this function performs this exact matching for
all sources both catalogs.
Parameters
----------
catalog1, catalog2 : `lsst.afw.table.SourceCatalog`
The two catalogs to merge
patch1, patch2 : `array` of `int`
Patch for each row, converted into an integer.
Returns
-------
result : `list` of `lsst.afw.table.SourceMatch`
List of matches for each source (using an inner join).
Definition at line 42 of file mergeDetections.py.
| lsst.pipe.tasks.mergeDetections.catalog : `lsst.afw.table.SourceCatalog` |
# Convert distance to tract coordinate
tractWcs = skyInfo.wcs
peakDistance = self.config.minNewPeak / tractWcs.getPixelScale().asArcseconds()
samePeakDistance = self.config.maxSamePeak / tractWcs.getPixelScale().asArcseconds()
# Put catalogs, filters in priority order
orderedCatalogs = [catalogs[band] for band in self.config.priorityList if band in catalogs.keys()]
orderedBands = [band for band in self.config.priorityList if band in catalogs.keys()]
mergedList = self.merged.getMergedSourceCatalog(orderedCatalogs, orderedBands, peakDistance,
self.schema, idFactory,
samePeakDistance)
#
# Add extra sources that correspond to blank sky
#
skySourceFootprints = self.getSkySourceFootprints(mergedList, skyInfo, skySeed)
if skySourceFootprints:
key = mergedList.schema.find("merge_footprint_%s" % self.config.skyFilterName).key
for foot in skySourceFootprints:
s = mergedList.addNew()
s.setFootprint(foot)
s.set(key, True)
# Sort Peaks from brightest to faintest
for record in mergedList:
record.getFootprint().sortPeaks()
self.log.info("Merged to %d sources", len(mergedList))
# Attempt to remove garbage peaks
self.cullPeaks(mergedList)
return Struct(outputCatalog=mergedList)
def cullPeaks(self, catalog):
Definition at line 334 of file mergeDetections.py.
| lsst.pipe.tasks.mergeDetections.catalogs : `lsst.afw.table.SourceCatalog` |
ConfigClass = MergeDetectionsConfig
_DefaultName = "mergeCoaddDetections"
def __init__(self, schema=None, initInputs=None, **kwargs):
super().__init__(**kwargs)
if initInputs is not None:
schema = initInputs['schema'].schema
if schema is None:
raise ValueError("No input schema or initInputs['schema'] provided.")
self.schema = schema
self.makeSubtask("skyObjects")
filterNames = list(self.config.priorityList)
filterNames.append(self.config.skyFilterName)
self.merged = afwDetect.FootprintMergeList(self.schema, filterNames)
self.outputSchema = afwTable.SourceCatalog(self.schema)
self.outputPeakSchema = afwDetect.PeakCatalog(self.merged.getPeakSchema())
def runQuantum(self, butlerQC, inputRefs, outputRefs):
inputs = butlerQC.get(inputRefs)
idGenerator = self.config.idGenerator.apply(butlerQC.quantum.dataId)
inputs["skySeed"] = idGenerator.catalog_id
inputs["idFactory"] = idGenerator.make_table_id_factory()
catalogDict = {ref.dataId['band']: cat for ref, cat in zip(inputRefs.catalogs,
inputs['catalogs'])}
inputs['catalogs'] = catalogDict
skyMap = inputs.pop('skyMap')
# Can use the first dataId to find the tract and patch being worked on
tractNumber = inputRefs.catalogs[0].dataId['tract']
tractInfo = skyMap[tractNumber]
patchInfo = tractInfo.getPatchInfo(inputRefs.catalogs[0].dataId['patch'])
skyInfo = Struct(
skyMap=skyMap,
tractInfo=tractInfo,
patchInfo=patchInfo,
wcs=tractInfo.getWcs(),
bbox=patchInfo.getOuterBBox()
)
inputs['skyInfo'] = skyInfo
outputs = self.run(**inputs)
butlerQC.put(outputs, outputRefs)
def run(self, catalogs, skyInfo, idFactory, skySeed):
Definition at line 284 of file mergeDetections.py.
| list lsst.pipe.tasks.mergeDetections.converted = [] |
Definition at line 383 of file mergeDetections.py.
| lsst.pipe.tasks.mergeDetections.initInputs : `dict`, optional |
Definition at line 220 of file mergeDetections.py.
| lsst.pipe.tasks.mergeDetections.mergedList : `lsst.afw.table.SourceCatalog` |
Definition at line 286 of file mergeDetections.py.
| lsst.pipe.tasks.mergeDetections.mergeKey = schema.find("merge_peak_%s" % self.config.skyFilterName).key |
Definition at line 382 of file mergeDetections.py.
| lsst.pipe.tasks.mergeDetections.newFoot = afwDetect.Footprint(oldFoot.spans, schema) |
Definition at line 387 of file mergeDetections.py.
| lsst.pipe.tasks.mergeDetections.peak = oldFoot.getPeaks()[0] |
Definition at line 386 of file mergeDetections.py.
| lsst.pipe.tasks.mergeDetections.result : `lsst.pipe.base.Struct` |
Definition at line 291 of file mergeDetections.py.
| lsst.pipe.tasks.mergeDetections.schema : `lsst.afw.table.Schema`, optional |
keys = [item.key for item in self.merged.getPeakSchema().extract("merge_peak_*").values()]
assert len(keys) > 0, "Error finding flags that associate peaks with their detection bands."
totalPeaks = 0
culledPeaks = 0
for parentSource in catalog:
# Make a list copy so we can clear the attached PeakCatalog and append the ones we're keeping
# to it (which is easier than deleting as we iterate).
keptPeaks = parentSource.getFootprint().getPeaks()
oldPeaks = list(keptPeaks)
keptPeaks.clear()
familySize = len(oldPeaks)
totalPeaks += familySize
for rank, peak in enumerate(oldPeaks):
if ((rank < self.config.cullPeaks.rankSufficient)
or (sum([peak.get(k) for k in keys]) >= self.config.cullPeaks.nBandsSufficient)
or (rank < self.config.cullPeaks.rankConsidered
and rank < self.config.cullPeaks.rankNormalizedConsidered * familySize)):
keptPeaks.append(peak)
else:
culledPeaks += 1
self.log.info("Culled %d of %d peaks", culledPeaks, totalPeaks)
def getSkySourceFootprints(self, mergedList, skyInfo, seed):
Definition at line 218 of file mergeDetections.py.