LSSTApplications  18.0.0+46,18.0.0+93,19.0.0,19.0.0+1,19.0.0+2,19.0.0+3,19.0.0+4,19.0.0-1-g20d9b18+2,19.0.0-1-g3dc8cbe+2,19.0.0-1-g425ff20,19.0.0-1-g5549ca4,19.0.0-1-g580fafe+2,19.0.0-1-g5db401e+3,19.0.0-1-g6fe20d0+1,19.0.0-1-g7011481+2,19.0.0-1-g8c57eb9+2,19.0.0-1-g9828021+2,19.0.0-1-gb5175dc+2,19.0.0-1-gd7f3e1b+2,19.0.0-1-gdc0e4a7+2,19.0.0-1-ge272bc4+2,19.0.0-2-g0d9f9cd+2,19.0.0-2-g1c703f9ef+1,19.0.0-2-g3d9e4fb2+2,19.0.0-2-gd955cfd+2,19.0.0-3-g2d13df8,19.0.0-3-g63079e6+2,19.0.0-7-g8a434f2+1,19.0.0-7-gf796fef9+3,w.2019.49
LSSTDataManagementBasePackage
Classes | Functions | Variables
lsst.pipe.tasks.mergeDetections Namespace Reference

Classes

class  MergeDetectionsConnections
 

Functions

def write (self, patchRef, catalog)
 Write the output. More...
 
def writeMetadata (self, dataRefList)
 No metadata to write, and not sure how to write it for a list of dataRefs. More...
 

Variables

 patch
 
 tract
 
 filter
 
 schema
 
 mergeKey
 
 converted
 
 peak
 
 newFoot
 

Function Documentation

◆ write()

def lsst.pipe.tasks.mergeDetections.write (   self,
  patchRef,
  catalog 
)

Write the output.

Parameters
[in]patchRefdata reference for patch
[in]catalogcatalog

We write as the dataset provided by the 'outputDataset' class variable.

Definition at line 388 of file mergeDetections.py.

388  def write(self, patchRef, catalog):
389  """!
390  @brief Write the output.
391 
392  @param[in] patchRef data reference for patch
393  @param[in] catalog catalog
394 
395  We write as the dataset provided by the 'outputDataset'
396  class variable.
397  """
398  patchRef.put(catalog, self.config.coaddName + "Coadd_" + self.outputDataset)
399  # since the filter isn't actually part of the data ID for the dataset we're saving,
400  # it's confusing to see it in the log message, even if the butler simply ignores it.
401  mergeDataId = patchRef.dataId.copy()
402  del mergeDataId["filter"]
403  self.log.info("Wrote merged catalog: %s" % (mergeDataId,))
404 
def write(self, patchRef, catalog)
Write the output.

◆ writeMetadata()

def lsst.pipe.tasks.mergeDetections.writeMetadata (   self,
  dataRefList 
)

No metadata to write, and not sure how to write it for a list of dataRefs.

Definition at line 405 of file mergeDetections.py.

405  def writeMetadata(self, dataRefList):
406  """!
407  @brief No metadata to write, and not sure how to write it for a list of dataRefs.
408  """
409  pass
410 
def writeMetadata(self, dataRefList)
No metadata to write, and not sure how to write it for a list of dataRefs.

Variable Documentation

◆ converted

lsst.pipe.tasks.mergeDetections.converted

Definition at line 377 of file mergeDetections.py.

◆ filter

lsst.pipe.tasks.mergeDetections.filter

Definition at line 187 of file mergeDetections.py.

◆ mergeKey

lsst.pipe.tasks.mergeDetections.mergeKey

Definition at line 376 of file mergeDetections.py.

◆ newFoot

lsst.pipe.tasks.mergeDetections.newFoot

Definition at line 381 of file mergeDetections.py.

◆ patch

lsst.pipe.tasks.mergeDetections.patch

Definition at line 187 of file mergeDetections.py.

◆ peak

lsst.pipe.tasks.mergeDetections.peak
Examples:
forEachPixel.cc.

Definition at line 380 of file mergeDetections.py.

◆ schema

lsst.pipe.tasks.mergeDetections.schema
ConfigClass = MergeDetectionsConfig
RunnerClass = MergeSourcesRunner
_DefaultName = "mergeCoaddDetections"
inputDataset = "det"
outputDataset = "mergeDet"
makeIdFactory = _makeMakeIdFactory("MergedCoaddId")

@classmethod
def _makeArgumentParser(cls):
    return makeMergeArgumentParser(cls._DefaultName, cls.inputDataset)

def getInputSchema(self, butler=None, schema=None):
    return getInputSchema(self, butler, schema)

def __init__(self, butler=None, schema=None, initInputs=None, **kwargs):
    # Make PipelineTask-only wording less transitional after cmdlineTask is removed
super().__init__(**kwargs)
if initInputs is not None:
    schema = initInputs['schema'].schema

self.makeSubtask("skyObjects")
self.schema = self.getInputSchema(butler=butler, schema=schema)

filterNames = [getShortFilterName(name) for name in self.config.priorityList]
filterNames += [self.config.skyFilterName]
self.merged = afwDetect.FootprintMergeList(self.schema, filterNames)
self.outputSchema = afwTable.SourceCatalog(self.schema)
self.outputPeakSchema = afwDetect.PeakCatalog(self.merged.getPeakSchema())

    def runDataRef(self, patchRefList):
catalogs = dict(readCatalog(self, patchRef) for patchRef in patchRefList)
skyInfo = getSkyInfo(coaddName=self.config.coaddName, patchRef=patchRefList[0])
idFactory = self.makeIdFactory(patchRefList[0])
skySeed = patchRefList[0].get(self.config.coaddName + "MergedCoaddId")
mergeCatalogStruct = self.run(catalogs, skyInfo, idFactory, skySeed)
self.write(patchRefList[0], mergeCatalogStruct.outputCatalog)

    def runQuantum(self, butlerQC, inputRefs, outputRefs):
inputs = butlerQC.get(inputRefs)
packedId, maxBits = butlerQC.quantum.dataId.pack("tract_patch", returnMaxBits=True)
inputs["skySeed"] = packedId
inputs["idFactory"] = afwTable.IdFactory.makeSource(packedId, 64 - maxBits)
catalogDict = {ref.dataId['abstract_filter']: cat for ref, cat in zip(inputRefs.catalogs,
               inputs['catalogs'])}
inputs['catalogs'] = catalogDict
skyMap = inputs.pop('skyMap')
# Can use the first dataId to find the tract and patch being worked on
tractNumber = inputRefs.catalogs[0].dataId['tract']
tractInfo = skyMap[tractNumber]
patchInfo = tractInfo.getPatchInfo(inputRefs.catalogs[0].dataId['patch'])
skyInfo = Struct(
    skyMap=skyMap,
    tractInfo=tractInfo,
    patchInfo=patchInfo,
    wcs=tractInfo.getWcs(),
    bbox=patchInfo.getOuterBBox()
)
inputs['skyInfo'] = skyInfo

outputs = self.run(**inputs)
butlerQC.put(outputs, outputRefs)

    def run(self, catalogs, skyInfo, idFactory, skySeed):
r
# Convert distance to tract coordinate
tractWcs = skyInfo.wcs
peakDistance = self.config.minNewPeak / tractWcs.getPixelScale().asArcseconds()
samePeakDistance = self.config.maxSamePeak / tractWcs.getPixelScale().asArcseconds()

# Put catalogs, filters in priority order
orderedCatalogs = [catalogs[band] for band in self.config.priorityList if band in catalogs.keys()]
orderedBands = [getShortFilterName(band) for band in self.config.priorityList
                if band in catalogs.keys()]

mergedList = self.merged.getMergedSourceCatalog(orderedCatalogs, orderedBands, peakDistance,
                                                self.schema, idFactory,
                                                samePeakDistance)

#
# Add extra sources that correspond to blank sky
#
skySourceFootprints = self.getSkySourceFootprints(mergedList, skyInfo, skySeed)
if skySourceFootprints:
    key = mergedList.schema.find("merge_footprint_%s" % self.config.skyFilterName).key
    for foot in skySourceFootprints:
        s = mergedList.addNew()
        s.setFootprint(foot)
        s.set(key, True)

# Sort Peaks from brightest to faintest
for record in mergedList:
    record.getFootprint().sortPeaks()
self.log.info("Merged to %d sources" % len(mergedList))
# Attempt to remove garbage peaks
self.cullPeaks(mergedList)
return Struct(outputCatalog=mergedList)

    def cullPeaks(self, catalog):
keys = [item.key for item in self.merged.getPeakSchema().extract("merge_peak_*").values()]
assert len(keys) > 0, "Error finding flags that associate peaks with their detection bands."
totalPeaks = 0
culledPeaks = 0
for parentSource in catalog:
    # Make a list copy so we can clear the attached PeakCatalog and append the ones we're keeping
    # to it (which is easier than deleting as we iterate).
    keptPeaks = parentSource.getFootprint().getPeaks()
    oldPeaks = list(keptPeaks)
    keptPeaks.clear()
    familySize = len(oldPeaks)
    totalPeaks += familySize
    for rank, peak in enumerate(oldPeaks):
        if ((rank < self.config.cullPeaks.rankSufficient) or
            (sum([peak.get(k) for k in keys]) >= self.config.cullPeaks.nBandsSufficient) or
            (rank < self.config.cullPeaks.rankConsidered and
             rank < self.config.cullPeaks.rankNormalizedConsidered * familySize)):
            keptPeaks.append(peak)
        else:
            culledPeaks += 1
self.log.info("Culled %d of %d peaks" % (culledPeaks, totalPeaks))

    def getSchemaCatalogs(self):
mergeDet = afwTable.SourceCatalog(self.schema)
peak = afwDetect.PeakCatalog(self.merged.getPeakSchema())
return {self.config.coaddName + "Coadd_mergeDet": mergeDet,
        self.config.coaddName + "Coadd_peak": peak}

    def getSkySourceFootprints(self, mergedList, skyInfo, seed):

Definition at line 375 of file mergeDetections.py.

◆ tract

lsst.pipe.tasks.mergeDetections.tract

Definition at line 187 of file mergeDetections.py.