| 
    LSST Applications
    21.0.0-172-gfb10e10a+18fedfabac,22.0.0+297cba6710,22.0.0+80564b0ff1,22.0.0+8d77f4f51a,22.0.0+a28f4c53b1,22.0.0+dcf3732eb2,22.0.1-1-g7d6de66+2a20fdde0d,22.0.1-1-g8e32f31+297cba6710,22.0.1-1-geca5380+7fa3b7d9b6,22.0.1-12-g44dc1dc+2a20fdde0d,22.0.1-15-g6a90155+515f58c32b,22.0.1-16-g9282f48+790f5f2caa,22.0.1-2-g92698f7+dcf3732eb2,22.0.1-2-ga9b0f51+7fa3b7d9b6,22.0.1-2-gd1925c9+bf4f0e694f,22.0.1-24-g1ad7a390+a9625a72a8,22.0.1-25-g5bf6245+3ad8ecd50b,22.0.1-25-gb120d7b+8b5510f75f,22.0.1-27-g97737f7+2a20fdde0d,22.0.1-32-gf62ce7b1+aa4237961e,22.0.1-4-g0b3f228+2a20fdde0d,22.0.1-4-g243d05b+871c1b8305,22.0.1-4-g3a563be+32dcf1063f,22.0.1-4-g44f2e3d+9e4ab0f4fa,22.0.1-42-gca6935d93+ba5e5ca3eb,22.0.1-5-g15c806e+85460ae5f3,22.0.1-5-g58711c4+611d128589,22.0.1-5-g75bb458+99c117b92f,22.0.1-6-g1c63a23+7fa3b7d9b6,22.0.1-6-g50866e6+84ff5a128b,22.0.1-6-g8d3140d+720564cf76,22.0.1-6-gd805d02+cc5644f571,22.0.1-8-ge5750ce+85460ae5f3,master-g6e05de7fdc+babf819c66,master-g99da0e417a+8d77f4f51a,w.2021.48
    
   LSST Data Management Base Package 
   | 
 
Classes | |
| class | MergeDetectionsConnections | 
Functions | |
| def | matchCatalogsExact (catalog1, catalog2, patch1=None, patch2=None) | 
| def | write (self, patchRef, catalog) | 
| Write the output.  More... | |
| def | writeMetadata (self, dataRefList) | 
| No metadata to write, and not sure how to write it for a list of dataRefs.  More... | |
Variables | |
| patch | |
| tract | |
| filter | |
| schema = self.merged.getPeakSchema() | |
| mergeKey = schema.find("merge_peak_%s" % self.config.skyFilterName).key | |
| list | converted = [] | 
| peak = oldFoot.getPeaks()[0] | |
| newFoot = afwDetect.Footprint(oldFoot.spans, schema) | |
| def lsst.pipe.tasks.mergeDetections.matchCatalogsExact | ( | catalog1, | |
| catalog2, | |||
patch1 = None,  | 
        |||
patch2 = None  | 
        |||
| ) | 
Match two catalogs derived from the same mergeDet catalog
When testing downstream features, like deblending methods/parameters
and measurement algorithms/parameters, it is useful to to compare
the same sources in two catalogs. In most cases this must be done
by matching on either RA/DEC or XY positions, which occassionally
will mismatch one source with another.
For a more robust solution, as long as the downstream catalog is
derived from the same mergeDet catalog, exact source matching
can be done via the unique ``(parent, deblend_peakID)``
combination. So this function performs this exact matching for
all sources both catalogs.
Parameters
----------
catalog1, catalog2 : `lsst.afw.table.SourceCatalog`
    The two catalogs to merge
patch1, patch2 : array of int
    Patch for each row, converted into an integer.
    In the gen3 butler this is done already, in gen2
    it is recommended to use `patch2Int`, assuming that
    the patches are the same structure as HSC, that range
    from '0,0' to '9,9'.
Returns
-------
result: list of `lsst.afw.table.SourceMatch`
    List of matches for each source (using an inner join).
 
Definition at line 45 of file mergeDetections.py.
| def lsst.pipe.tasks.mergeDetections.write | ( | self, | |
| patchRef, | |||
| catalog | |||
| ) | 
Write the output.
| [in] | patchRef | data reference for patch | 
| [in] | catalog | catalog | 
We write as the dataset provided by the 'outputDataset' class variable.
Definition at line 477 of file mergeDetections.py.
| def lsst.pipe.tasks.mergeDetections.writeMetadata | ( | self, | |
| dataRefList | |||
| ) | 
No metadata to write, and not sure how to write it for a list of dataRefs.
Definition at line 494 of file mergeDetections.py.
| list lsst.pipe.tasks.mergeDetections.converted = [] | 
Definition at line 466 of file mergeDetections.py.
| lsst.pipe.tasks.mergeDetections.filter | 
Definition at line 277 of file mergeDetections.py.
| lsst.pipe.tasks.mergeDetections.mergeKey = schema.find("merge_peak_%s" % self.config.skyFilterName).key | 
Definition at line 465 of file mergeDetections.py.
| lsst.pipe.tasks.mergeDetections.newFoot = afwDetect.Footprint(oldFoot.spans, schema) | 
Definition at line 470 of file mergeDetections.py.
| lsst.pipe.tasks.mergeDetections.patch | 
Definition at line 277 of file mergeDetections.py.
| lsst.pipe.tasks.mergeDetections.peak = oldFoot.getPeaks()[0] | 
Definition at line 469 of file mergeDetections.py.
| lsst.pipe.tasks.mergeDetections.schema = self.merged.getPeakSchema() | 
ConfigClass = MergeDetectionsConfig
RunnerClass = MergeSourcesRunner
_DefaultName = "mergeCoaddDetections"
inputDataset = "det"
outputDataset = "mergeDet"
makeIdFactory = _makeMakeIdFactory("MergedCoaddId")
@classmethod
def _makeArgumentParser(cls):
    return makeMergeArgumentParser(cls._DefaultName, cls.inputDataset)
def getInputSchema(self, butler=None, schema=None):
    return getInputSchema(self, butler, schema)
def __init__(self, butler=None, schema=None, initInputs=None, **kwargs):
    # Make PipelineTask-only wording less transitional after cmdlineTask is removedsuper().__init__(**kwargs)
if initInputs is not None:
    schema = initInputs['schema'].schema
self.makeSubtask("skyObjects")
self.schema = self.getInputSchema(butler=butler, schema=schema)
filterNames = list(self.config.priorityList)
filterNames.append(self.config.skyFilterName)
self.merged = afwDetect.FootprintMergeList(self.schema, filterNames)
self.outputSchema = afwTable.SourceCatalog(self.schema)
self.outputPeakSchema = afwDetect.PeakCatalog(self.merged.getPeakSchema())
def runDataRef(self, patchRefList):
catalogs = dict(readCatalog(self, patchRef) for patchRef in patchRefList)
skyInfo = getSkyInfo(coaddName=self.config.coaddName, patchRef=patchRefList[0])
idFactory = self.makeIdFactory(patchRefList[0])
skySeed = patchRefList[0].get(self.config.coaddName + "MergedCoaddId")
mergeCatalogStruct = self.run(catalogs, skyInfo, idFactory, skySeed)
self.write(patchRefList[0], mergeCatalogStruct.outputCatalog)
def runQuantum(self, butlerQC, inputRefs, outputRefs):
inputs = butlerQC.get(inputRefs)
exposureIdInfo = ExposureIdInfo.fromDataId(butlerQC.quantum.dataId, "tract_patch")
inputs["skySeed"] = exposureIdInfo.expId
inputs["idFactory"] = exposureIdInfo.makeSourceIdFactory()
catalogDict = {ref.dataId['band']: cat for ref, cat in zip(inputRefs.catalogs,
               inputs['catalogs'])}
inputs['catalogs'] = catalogDict
skyMap = inputs.pop('skyMap')
# Can use the first dataId to find the tract and patch being worked on
tractNumber = inputRefs.catalogs[0].dataId['tract']
tractInfo = skyMap[tractNumber]
patchInfo = tractInfo.getPatchInfo(inputRefs.catalogs[0].dataId['patch'])
skyInfo = Struct(
    skyMap=skyMap,
    tractInfo=tractInfo,
    patchInfo=patchInfo,
    wcs=tractInfo.getWcs(),
    bbox=patchInfo.getOuterBBox()
)
inputs['skyInfo'] = skyInfo
outputs = self.run(**inputs)
butlerQC.put(outputs, outputRefs)
def run(self, catalogs, skyInfo, idFactory, skySeed):
r# Convert distance to tract coordinate
tractWcs = skyInfo.wcs
peakDistance = self.config.minNewPeak / tractWcs.getPixelScale().asArcseconds()
samePeakDistance = self.config.maxSamePeak / tractWcs.getPixelScale().asArcseconds()
# Put catalogs, filters in priority order
orderedCatalogs = [catalogs[band] for band in self.config.priorityList if band in catalogs.keys()]
orderedBands = [band for band in self.config.priorityList if band in catalogs.keys()]
mergedList = self.merged.getMergedSourceCatalog(orderedCatalogs, orderedBands, peakDistance,
                                                self.schema, idFactory,
                                                samePeakDistance)
#
# Add extra sources that correspond to blank sky
#
skySourceFootprints = self.getSkySourceFootprints(mergedList, skyInfo, skySeed)
if skySourceFootprints:
    key = mergedList.schema.find("merge_footprint_%s" % self.config.skyFilterName).key
    for foot in skySourceFootprints:
        s = mergedList.addNew()
        s.setFootprint(foot)
        s.set(key, True)
# Sort Peaks from brightest to faintest
for record in mergedList:
    record.getFootprint().sortPeaks()
self.log.info("Merged to %d sources", len(mergedList))
# Attempt to remove garbage peaks
self.cullPeaks(mergedList)
return Struct(outputCatalog=mergedList)
def cullPeaks(self, catalog):
keys = [item.key for item in self.merged.getPeakSchema().extract("merge_peak_*").values()]
assert len(keys) > 0, "Error finding flags that associate peaks with their detection bands."
totalPeaks = 0
culledPeaks = 0
for parentSource in catalog:
    # Make a list copy so we can clear the attached PeakCatalog and append the ones we're keeping
    # to it (which is easier than deleting as we iterate).
    keptPeaks = parentSource.getFootprint().getPeaks()
    oldPeaks = list(keptPeaks)
    keptPeaks.clear()
    familySize = len(oldPeaks)
    totalPeaks += familySize
    for rank, peak in enumerate(oldPeaks):
        if ((rank < self.config.cullPeaks.rankSufficient)
            or (sum([peak.get(k) for k in keys]) >= self.config.cullPeaks.nBandsSufficient)
            or (rank < self.config.cullPeaks.rankConsidered
                and rank < self.config.cullPeaks.rankNormalizedConsidered * familySize)):
            keptPeaks.append(peak)
        else:
            culledPeaks += 1
self.log.info("Culled %d of %d peaks", culledPeaks, totalPeaks)
def getSchemaCatalogs(self):
mergeDet = afwTable.SourceCatalog(self.schema)
peak = afwDetect.PeakCatalog(self.merged.getPeakSchema())
return {self.config.coaddName + "Coadd_mergeDet": mergeDet,
        self.config.coaddName + "Coadd_peak": peak}
def getSkySourceFootprints(self, mergedList, skyInfo, seed):
 
Definition at line 464 of file mergeDetections.py.
| lsst.pipe.tasks.mergeDetections.tract | 
Definition at line 277 of file mergeDetections.py.