LSST Applications g0b6bd0c080+a72a5dd7e6,g1182afd7b4+2a019aa3bb,g17e5ecfddb+2b8207f7de,g1d67935e3f+06cf436103,g38293774b4+ac198e9f13,g396055baef+6a2097e274,g3b44f30a73+6611e0205b,g480783c3b1+98f8679e14,g48ccf36440+89c08d0516,g4b93dc025c+98f8679e14,g5c4744a4d9+a302e8c7f0,g613e996a0d+e1c447f2e0,g6c8d09e9e7+25247a063c,g7271f0639c+98f8679e14,g7a9cd813b8+124095ede6,g9d27549199+a302e8c7f0,ga1cf026fa3+ac198e9f13,ga32aa97882+7403ac30ac,ga786bb30fb+7a139211af,gaa63f70f4e+9994eb9896,gabf319e997+ade567573c,gba47b54d5d+94dc90c3ea,gbec6a3398f+06cf436103,gc6308e37c7+07dd123edb,gc655b1545f+ade567573c,gcc9029db3c+ab229f5caf,gd01420fc67+06cf436103,gd877ba84e5+06cf436103,gdb4cecd868+6f279b5b48,ge2d134c3d5+cc4dbb2e3f,ge448b5faa6+86d1ceac1d,gecc7e12556+98f8679e14,gf3ee170dca+25247a063c,gf4ac96e456+ade567573c,gf9f5ea5b4d+ac198e9f13,gff490e6085+8c2580be5c,w.2022.27
LSST Data Management Base Package
|
Classes | |
class | MergeDetectionsConnections |
Functions | |
def | matchCatalogsExact (catalog1, catalog2, patch1=None, patch2=None) |
def | write (self, patchRef, catalog) |
Write the output. More... | |
def | writeMetadata (self, dataRefList) |
No metadata to write, and not sure how to write it for a list of dataRefs. More... | |
Variables | |
patch | |
tract | |
filter | |
schema = self.merged.getPeakSchema() | |
mergeKey = schema.find("merge_peak_%s" % self.config.skyFilterName).key | |
list | converted = [] |
peak = oldFoot.getPeaks()[0] | |
newFoot = afwDetect.Footprint(oldFoot.spans, schema) | |
def lsst.pipe.tasks.mergeDetections.matchCatalogsExact | ( | catalog1, | |
catalog2, | |||
patch1 = None , |
|||
patch2 = None |
|||
) |
Match two catalogs derived from the same mergeDet catalog When testing downstream features, like deblending methods/parameters and measurement algorithms/parameters, it is useful to to compare the same sources in two catalogs. In most cases this must be done by matching on either RA/DEC or XY positions, which occassionally will mismatch one source with another. For a more robust solution, as long as the downstream catalog is derived from the same mergeDet catalog, exact source matching can be done via the unique ``(parent, deblend_peakID)`` combination. So this function performs this exact matching for all sources both catalogs. Parameters ---------- catalog1, catalog2 : `lsst.afw.table.SourceCatalog` The two catalogs to merge patch1, patch2 : array of int Patch for each row, converted into an integer. In the gen3 butler this is done already, in gen2 it is recommended to use `patch2Int`, assuming that the patches are the same structure as HSC, that range from '0,0' to '9,9'. Returns ------- result: list of `lsst.afw.table.SourceMatch` List of matches for each source (using an inner join).
Definition at line 46 of file mergeDetections.py.
def lsst.pipe.tasks.mergeDetections.write | ( | self, | |
patchRef, | |||
catalog | |||
) |
Write the output.
[in] | patchRef | data reference for patch |
[in] | catalog | catalog |
We write as the dataset provided by the 'outputDataset' class variable.
Definition at line 479 of file mergeDetections.py.
def lsst.pipe.tasks.mergeDetections.writeMetadata | ( | self, | |
dataRefList | |||
) |
No metadata to write, and not sure how to write it for a list of dataRefs.
Definition at line 496 of file mergeDetections.py.
list lsst.pipe.tasks.mergeDetections.converted = [] |
Definition at line 468 of file mergeDetections.py.
lsst.pipe.tasks.mergeDetections.filter |
Definition at line 278 of file mergeDetections.py.
lsst.pipe.tasks.mergeDetections.mergeKey = schema.find("merge_peak_%s" % self.config.skyFilterName).key |
Definition at line 467 of file mergeDetections.py.
lsst.pipe.tasks.mergeDetections.newFoot = afwDetect.Footprint(oldFoot.spans, schema) |
Definition at line 472 of file mergeDetections.py.
lsst.pipe.tasks.mergeDetections.patch |
Definition at line 278 of file mergeDetections.py.
lsst.pipe.tasks.mergeDetections.peak = oldFoot.getPeaks()[0] |
Definition at line 471 of file mergeDetections.py.
lsst.pipe.tasks.mergeDetections.schema = self.merged.getPeakSchema() |
ConfigClass = MergeDetectionsConfig RunnerClass = MergeSourcesRunner _DefaultName = "mergeCoaddDetections" inputDataset = "det" outputDataset = "mergeDet" makeIdFactory = _makeMakeIdFactory("MergedCoaddId", includeBand=False) @classmethod def _makeArgumentParser(cls): return makeMergeArgumentParser(cls._DefaultName, cls.inputDataset) def getInputSchema(self, butler=None, schema=None): return getInputSchema(self, butler, schema) def __init__(self, butler=None, schema=None, initInputs=None, **kwargs): # Make PipelineTask-only wording less transitional after cmdlineTask is removed
super().__init__(**kwargs) if initInputs is not None: schema = initInputs['schema'].schema self.makeSubtask("skyObjects") self.schema = self.getInputSchema(butler=butler, schema=schema) filterNames = list(self.config.priorityList) filterNames.append(self.config.skyFilterName) self.merged = afwDetect.FootprintMergeList(self.schema, filterNames) self.outputSchema = afwTable.SourceCatalog(self.schema) self.outputPeakSchema = afwDetect.PeakCatalog(self.merged.getPeakSchema()) def runDataRef(self, patchRefList): catalogs = dict(readCatalog(self, patchRef) for patchRef in patchRefList) skyInfo = getSkyInfo(coaddName=self.config.coaddName, patchRef=patchRefList[0]) idFactory = self.makeIdFactory(patchRefList[0]) skySeed = getGen3CoaddExposureId(patchRefList[0], coaddName=self.config.coaddName, includeBand=False, log=self.log) mergeCatalogStruct = self.run(catalogs, skyInfo, idFactory, skySeed) self.write(patchRefList[0], mergeCatalogStruct.outputCatalog) def runQuantum(self, butlerQC, inputRefs, outputRefs): inputs = butlerQC.get(inputRefs) exposureIdInfo = ExposureIdInfo.fromDataId(butlerQC.quantum.dataId, "tract_patch") inputs["skySeed"] = exposureIdInfo.expId inputs["idFactory"] = exposureIdInfo.makeSourceIdFactory() catalogDict = {ref.dataId['band']: cat for ref, cat in zip(inputRefs.catalogs, inputs['catalogs'])} inputs['catalogs'] = catalogDict skyMap = inputs.pop('skyMap') # Can use the first dataId to find the tract and patch being worked on tractNumber = inputRefs.catalogs[0].dataId['tract'] tractInfo = skyMap[tractNumber] patchInfo = tractInfo.getPatchInfo(inputRefs.catalogs[0].dataId['patch']) skyInfo = Struct( skyMap=skyMap, tractInfo=tractInfo, patchInfo=patchInfo, wcs=tractInfo.getWcs(), bbox=patchInfo.getOuterBBox() ) inputs['skyInfo'] = skyInfo outputs = self.run(**inputs) butlerQC.put(outputs, outputRefs) def run(self, catalogs, skyInfo, idFactory, skySeed): r
# Convert distance to tract coordinate tractWcs = skyInfo.wcs peakDistance = self.config.minNewPeak / tractWcs.getPixelScale().asArcseconds() samePeakDistance = self.config.maxSamePeak / tractWcs.getPixelScale().asArcseconds() # Put catalogs, filters in priority order orderedCatalogs = [catalogs[band] for band in self.config.priorityList if band in catalogs.keys()] orderedBands = [band for band in self.config.priorityList if band in catalogs.keys()] mergedList = self.merged.getMergedSourceCatalog(orderedCatalogs, orderedBands, peakDistance, self.schema, idFactory, samePeakDistance) # # Add extra sources that correspond to blank sky # skySourceFootprints = self.getSkySourceFootprints(mergedList, skyInfo, skySeed) if skySourceFootprints: key = mergedList.schema.find("merge_footprint_%s" % self.config.skyFilterName).key for foot in skySourceFootprints: s = mergedList.addNew() s.setFootprint(foot) s.set(key, True) # Sort Peaks from brightest to faintest for record in mergedList: record.getFootprint().sortPeaks() self.log.info("Merged to %d sources", len(mergedList)) # Attempt to remove garbage peaks self.cullPeaks(mergedList) return Struct(outputCatalog=mergedList) def cullPeaks(self, catalog):
keys = [item.key for item in self.merged.getPeakSchema().extract("merge_peak_*").values()] assert len(keys) > 0, "Error finding flags that associate peaks with their detection bands." totalPeaks = 0 culledPeaks = 0 for parentSource in catalog: # Make a list copy so we can clear the attached PeakCatalog and append the ones we're keeping # to it (which is easier than deleting as we iterate). keptPeaks = parentSource.getFootprint().getPeaks() oldPeaks = list(keptPeaks) keptPeaks.clear() familySize = len(oldPeaks) totalPeaks += familySize for rank, peak in enumerate(oldPeaks): if ((rank < self.config.cullPeaks.rankSufficient) or (sum([peak.get(k) for k in keys]) >= self.config.cullPeaks.nBandsSufficient) or (rank < self.config.cullPeaks.rankConsidered and rank < self.config.cullPeaks.rankNormalizedConsidered * familySize)): keptPeaks.append(peak) else: culledPeaks += 1 self.log.info("Culled %d of %d peaks", culledPeaks, totalPeaks) def getSchemaCatalogs(self):
mergeDet = afwTable.SourceCatalog(self.schema) peak = afwDetect.PeakCatalog(self.merged.getPeakSchema()) return {self.config.coaddName + "Coadd_mergeDet": mergeDet, self.config.coaddName + "Coadd_peak": peak} def getSkySourceFootprints(self, mergedList, skyInfo, seed):
Definition at line 466 of file mergeDetections.py.
lsst.pipe.tasks.mergeDetections.tract |
Definition at line 278 of file mergeDetections.py.