LSSTApplications
19.0.0-14-gb0260a2+72efe9b372,20.0.0+7927753e06,20.0.0+8829bf0056,20.0.0+995114c5d2,20.0.0+b6f4b2abd1,20.0.0+bddc4f4cbe,20.0.0-1-g253301a+8829bf0056,20.0.0-1-g2b7511a+0d71a2d77f,20.0.0-1-g5b95a8c+7461dd0434,20.0.0-12-g321c96ea+23efe4bbff,20.0.0-16-gfab17e72e+fdf35455f6,20.0.0-2-g0070d88+ba3ffc8f0b,20.0.0-2-g4dae9ad+ee58a624b3,20.0.0-2-g61b8584+5d3db074ba,20.0.0-2-gb780d76+d529cf1a41,20.0.0-2-ged6426c+226a441f5f,20.0.0-2-gf072044+8829bf0056,20.0.0-2-gf1f7952+ee58a624b3,20.0.0-20-geae50cf+e37fec0aee,20.0.0-25-g3dcad98+544a109665,20.0.0-25-g5eafb0f+ee58a624b3,20.0.0-27-g64178ef+f1f297b00a,20.0.0-3-g4cc78c6+e0676b0dc8,20.0.0-3-g8f21e14+4fd2c12c9a,20.0.0-3-gbd60e8c+187b78b4b8,20.0.0-3-gbecbe05+48431fa087,20.0.0-38-ge4adf513+a12e1f8e37,20.0.0-4-g97dc21a+544a109665,20.0.0-4-gb4befbc+087873070b,20.0.0-4-gf910f65+5d3db074ba,20.0.0-5-gdfe0fee+199202a608,20.0.0-5-gfbfe500+d529cf1a41,20.0.0-6-g64f541c+d529cf1a41,20.0.0-6-g9a5b7a1+a1cd37312e,20.0.0-68-ga3f3dda+5fca18c6a4,20.0.0-9-g4aef684+e18322736b,w.2020.45
LSSTDataManagementBasePackage
|
Classes | |
class | MergeDetectionsConnections |
Functions | |
def | write (self, patchRef, catalog) |
Write the output. More... | |
def | writeMetadata (self, dataRefList) |
No metadata to write, and not sure how to write it for a list of dataRefs. More... | |
Variables | |
patch | |
tract | |
filter | |
schema | |
mergeKey | |
converted | |
peak | |
newFoot | |
def lsst.pipe.tasks.mergeDetections.write | ( | self, | |
patchRef, | |||
catalog | |||
) |
Write the output.
[in] | patchRef | data reference for patch |
[in] | catalog | catalog |
We write as the dataset provided by the 'outputDataset' class variable.
Definition at line 388 of file mergeDetections.py.
def lsst.pipe.tasks.mergeDetections.writeMetadata | ( | self, | |
dataRefList | |||
) |
No metadata to write, and not sure how to write it for a list of dataRefs.
Definition at line 405 of file mergeDetections.py.
lsst.pipe.tasks.mergeDetections.converted |
Definition at line 377 of file mergeDetections.py.
lsst.pipe.tasks.mergeDetections.filter |
Definition at line 187 of file mergeDetections.py.
lsst.pipe.tasks.mergeDetections.mergeKey |
Definition at line 376 of file mergeDetections.py.
lsst.pipe.tasks.mergeDetections.newFoot |
Definition at line 381 of file mergeDetections.py.
lsst.pipe.tasks.mergeDetections.patch |
Definition at line 187 of file mergeDetections.py.
lsst.pipe.tasks.mergeDetections.peak |
Definition at line 380 of file mergeDetections.py.
lsst.pipe.tasks.mergeDetections.schema |
ConfigClass = MergeDetectionsConfig RunnerClass = MergeSourcesRunner _DefaultName = "mergeCoaddDetections" inputDataset = "det" outputDataset = "mergeDet" makeIdFactory = _makeMakeIdFactory("MergedCoaddId") @classmethod def _makeArgumentParser(cls): return makeMergeArgumentParser(cls._DefaultName, cls.inputDataset) def getInputSchema(self, butler=None, schema=None): return getInputSchema(self, butler, schema) def __init__(self, butler=None, schema=None, initInputs=None, **kwargs): # Make PipelineTask-only wording less transitional after cmdlineTask is removed
super().__init__(**kwargs) if initInputs is not None: schema = initInputs['schema'].schema self.makeSubtask("skyObjects") self.schema = self.getInputSchema(butler=butler, schema=schema) filterNames = [getShortFilterName(name) for name in self.config.priorityList] filterNames += [self.config.skyFilterName] self.merged = afwDetect.FootprintMergeList(self.schema, filterNames) self.outputSchema = afwTable.SourceCatalog(self.schema) self.outputPeakSchema = afwDetect.PeakCatalog(self.merged.getPeakSchema()) def runDataRef(self, patchRefList): catalogs = dict(readCatalog(self, patchRef) for patchRef in patchRefList) skyInfo = getSkyInfo(coaddName=self.config.coaddName, patchRef=patchRefList[0]) idFactory = self.makeIdFactory(patchRefList[0]) skySeed = patchRefList[0].get(self.config.coaddName + "MergedCoaddId") mergeCatalogStruct = self.run(catalogs, skyInfo, idFactory, skySeed) self.write(patchRefList[0], mergeCatalogStruct.outputCatalog) def runQuantum(self, butlerQC, inputRefs, outputRefs): inputs = butlerQC.get(inputRefs) packedId, maxBits = butlerQC.quantum.dataId.pack("tract_patch", returnMaxBits=True) inputs["skySeed"] = packedId inputs["idFactory"] = afwTable.IdFactory.makeSource(packedId, 64 - maxBits) catalogDict = {ref.dataId['band']: cat for ref, cat in zip(inputRefs.catalogs, inputs['catalogs'])} inputs['catalogs'] = catalogDict skyMap = inputs.pop('skyMap') # Can use the first dataId to find the tract and patch being worked on tractNumber = inputRefs.catalogs[0].dataId['tract'] tractInfo = skyMap[tractNumber] patchInfo = tractInfo.getPatchInfo(inputRefs.catalogs[0].dataId['patch']) skyInfo = Struct( skyMap=skyMap, tractInfo=tractInfo, patchInfo=patchInfo, wcs=tractInfo.getWcs(), bbox=patchInfo.getOuterBBox() ) inputs['skyInfo'] = skyInfo outputs = self.run(**inputs) butlerQC.put(outputs, outputRefs) def run(self, catalogs, skyInfo, idFactory, skySeed): r
# Convert distance to tract coordinate tractWcs = skyInfo.wcs peakDistance = self.config.minNewPeak / tractWcs.getPixelScale().asArcseconds() samePeakDistance = self.config.maxSamePeak / tractWcs.getPixelScale().asArcseconds() # Put catalogs, filters in priority order orderedCatalogs = [catalogs[band] for band in self.config.priorityList if band in catalogs.keys()] orderedBands = [getShortFilterName(band) for band in self.config.priorityList if band in catalogs.keys()] mergedList = self.merged.getMergedSourceCatalog(orderedCatalogs, orderedBands, peakDistance, self.schema, idFactory, samePeakDistance) # # Add extra sources that correspond to blank sky # skySourceFootprints = self.getSkySourceFootprints(mergedList, skyInfo, skySeed) if skySourceFootprints: key = mergedList.schema.find("merge_footprint_%s" % self.config.skyFilterName).key for foot in skySourceFootprints: s = mergedList.addNew() s.setFootprint(foot) s.set(key, True) # Sort Peaks from brightest to faintest for record in mergedList: record.getFootprint().sortPeaks() self.log.info("Merged to %d sources" % len(mergedList)) # Attempt to remove garbage peaks self.cullPeaks(mergedList) return Struct(outputCatalog=mergedList) def cullPeaks(self, catalog):
keys = [item.key for item in self.merged.getPeakSchema().extract("merge_peak_*").values()] assert len(keys) > 0, "Error finding flags that associate peaks with their detection bands." totalPeaks = 0 culledPeaks = 0 for parentSource in catalog: # Make a list copy so we can clear the attached PeakCatalog and append the ones we're keeping # to it (which is easier than deleting as we iterate). keptPeaks = parentSource.getFootprint().getPeaks() oldPeaks = list(keptPeaks) keptPeaks.clear() familySize = len(oldPeaks) totalPeaks += familySize for rank, peak in enumerate(oldPeaks): if ((rank < self.config.cullPeaks.rankSufficient) or (sum([peak.get(k) for k in keys]) >= self.config.cullPeaks.nBandsSufficient) or (rank < self.config.cullPeaks.rankConsidered and rank < self.config.cullPeaks.rankNormalizedConsidered * familySize)): keptPeaks.append(peak) else: culledPeaks += 1 self.log.info("Culled %d of %d peaks" % (culledPeaks, totalPeaks)) def getSchemaCatalogs(self):
mergeDet = afwTable.SourceCatalog(self.schema) peak = afwDetect.PeakCatalog(self.merged.getPeakSchema()) return {self.config.coaddName + "Coadd_mergeDet": mergeDet, self.config.coaddName + "Coadd_peak": peak} def getSkySourceFootprints(self, mergedList, skyInfo, seed):
Definition at line 375 of file mergeDetections.py.
lsst.pipe.tasks.mergeDetections.tract |
Definition at line 187 of file mergeDetections.py.