LSST Applications g0b6bd0c080+a72a5dd7e6,g1182afd7b4+2a019aa3bb,g17e5ecfddb+2b8207f7de,g1d67935e3f+06cf436103,g38293774b4+ac198e9f13,g396055baef+6a2097e274,g3b44f30a73+6611e0205b,g480783c3b1+98f8679e14,g48ccf36440+89c08d0516,g4b93dc025c+98f8679e14,g5c4744a4d9+a302e8c7f0,g613e996a0d+e1c447f2e0,g6c8d09e9e7+25247a063c,g7271f0639c+98f8679e14,g7a9cd813b8+124095ede6,g9d27549199+a302e8c7f0,ga1cf026fa3+ac198e9f13,ga32aa97882+7403ac30ac,ga786bb30fb+7a139211af,gaa63f70f4e+9994eb9896,gabf319e997+ade567573c,gba47b54d5d+94dc90c3ea,gbec6a3398f+06cf436103,gc6308e37c7+07dd123edb,gc655b1545f+ade567573c,gcc9029db3c+ab229f5caf,gd01420fc67+06cf436103,gd877ba84e5+06cf436103,gdb4cecd868+6f279b5b48,ge2d134c3d5+cc4dbb2e3f,ge448b5faa6+86d1ceac1d,gecc7e12556+98f8679e14,gf3ee170dca+25247a063c,gf4ac96e456+ade567573c,gf9f5ea5b4d+ac198e9f13,gff490e6085+8c2580be5c,w.2022.27
LSST Data Management Base Package
Classes | Functions | Variables
lsst.pipe.tasks.mergeDetections Namespace Reference

Classes

class  MergeDetectionsConnections
 

Functions

def matchCatalogsExact (catalog1, catalog2, patch1=None, patch2=None)
 
def write (self, patchRef, catalog)
 Write the output. More...
 
def writeMetadata (self, dataRefList)
 No metadata to write, and not sure how to write it for a list of dataRefs. More...
 

Variables

 patch
 
 tract
 
 filter
 
 schema = self.merged.getPeakSchema()
 
 mergeKey = schema.find("merge_peak_%s" % self.config.skyFilterName).key
 
list converted = []
 
 peak = oldFoot.getPeaks()[0]
 
 newFoot = afwDetect.Footprint(oldFoot.spans, schema)
 

Function Documentation

◆ matchCatalogsExact()

def lsst.pipe.tasks.mergeDetections.matchCatalogsExact (   catalog1,
  catalog2,
  patch1 = None,
  patch2 = None 
)
Match two catalogs derived from the same mergeDet catalog

When testing downstream features, like deblending methods/parameters
and measurement algorithms/parameters, it is useful to to compare
the same sources in two catalogs. In most cases this must be done
by matching on either RA/DEC or XY positions, which occassionally
will mismatch one source with another.

For a more robust solution, as long as the downstream catalog is
derived from the same mergeDet catalog, exact source matching
can be done via the unique ``(parent, deblend_peakID)``
combination. So this function performs this exact matching for
all sources both catalogs.

Parameters
----------
catalog1, catalog2 : `lsst.afw.table.SourceCatalog`
    The two catalogs to merge

patch1, patch2 : array of int
    Patch for each row, converted into an integer.
    In the gen3 butler this is done already, in gen2
    it is recommended to use `patch2Int`, assuming that
    the patches are the same structure as HSC, that range
    from '0,0' to '9,9'.

Returns
-------
result: list of `lsst.afw.table.SourceMatch`
    List of matches for each source (using an inner join).

Definition at line 46 of file mergeDetections.py.

46def matchCatalogsExact(catalog1, catalog2, patch1=None, patch2=None):
47 """Match two catalogs derived from the same mergeDet catalog
48
49 When testing downstream features, like deblending methods/parameters
50 and measurement algorithms/parameters, it is useful to to compare
51 the same sources in two catalogs. In most cases this must be done
52 by matching on either RA/DEC or XY positions, which occassionally
53 will mismatch one source with another.
54
55 For a more robust solution, as long as the downstream catalog is
56 derived from the same mergeDet catalog, exact source matching
57 can be done via the unique ``(parent, deblend_peakID)``
58 combination. So this function performs this exact matching for
59 all sources both catalogs.
60
61 Parameters
62 ----------
63 catalog1, catalog2 : `lsst.afw.table.SourceCatalog`
64 The two catalogs to merge
65
66 patch1, patch2 : array of int
67 Patch for each row, converted into an integer.
68 In the gen3 butler this is done already, in gen2
69 it is recommended to use `patch2Int`, assuming that
70 the patches are the same structure as HSC, that range
71 from '0,0' to '9,9'.
72
73 Returns
74 -------
75 result: list of `lsst.afw.table.SourceMatch`
76 List of matches for each source (using an inner join).
77 """
78 # Only match the individual sources, the parents will
79 # already be matched by the mergeDet catalog
80 sidx1 = catalog1["parent"] != 0
81 sidx2 = catalog2["parent"] != 0
82
83 # Create the keys used to merge the catalogs
84 parents1 = np.array(catalog1["parent"][sidx1])
85 peaks1 = np.array(catalog1["deblend_peakId"][sidx1])
86 index1 = np.arange(len(catalog1))[sidx1]
87 parents2 = np.array(catalog2["parent"][sidx2])
88 peaks2 = np.array(catalog2["deblend_peakId"][sidx2])
89 index2 = np.arange(len(catalog2))[sidx2]
90
91 if patch1 is not None:
92 if patch2 is None:
93 msg = ("If the catalogs are from different patches then patch1 and patch2 must be specified"
94 ", got {} and {}").format(patch1, patch2)
95 raise ValueError(msg)
96 patch1 = patch1[sidx1]
97 patch2 = patch2[sidx2]
98
99 key1 = np.rec.array((parents1, peaks1, patch1, index1),
100 dtype=[('parent', np.int64), ('peakId', np.int32),
101 ("patch", patch1.dtype), ("index", np.int32)])
102 key2 = np.rec.array((parents2, peaks2, patch2, index2),
103 dtype=[('parent', np.int64), ('peakId', np.int32),
104 ("patch", patch2.dtype), ("index", np.int32)])
105 matchColumns = ("parent", "peakId", "patch")
106 else:
107 key1 = np.rec.array((parents1, peaks1, index1),
108 dtype=[('parent', np.int64), ('peakId', np.int32), ("index", np.int32)])
109 key2 = np.rec.array((parents2, peaks2, index2),
110 dtype=[('parent', np.int64), ('peakId', np.int32), ("index", np.int32)])
111 matchColumns = ("parent", "peakId")
112 # Match the two keys.
113 # This line performs an inner join on the structured
114 # arrays `key1` and `key2`, which stores their indices
115 # as columns in a structured array.
116 matched = rec_join(matchColumns, key1, key2, jointype="inner")
117
118 # Create the full index for both catalogs
119 indices1 = matched["index1"]
120 indices2 = matched["index2"]
121
122 # Re-index the resulting catalogs
123 matches = [
124 afwTable.SourceMatch(catalog1[int(i1)], catalog2[int(i2)], 0.0)
125 for i1, i2 in zip(indices1, indices2)
126 ]
127
128 return matches
129
130
def format(config, name=None, writeSourceLine=True, prefix="", verbose=False)
Definition: history.py:174
def matchCatalogsExact(catalog1, catalog2, patch1=None, patch2=None)
Lightweight representation of a geometric match between two records.
Definition: Match.h:67

◆ write()

def lsst.pipe.tasks.mergeDetections.write (   self,
  patchRef,
  catalog 
)

Write the output.

Parameters
[in]patchRefdata reference for patch
[in]catalogcatalog

We write as the dataset provided by the 'outputDataset' class variable.

Definition at line 479 of file mergeDetections.py.

479 def write(self, patchRef, catalog):
480 """!
481 @brief Write the output.
482
483 @param[in] patchRef data reference for patch
484 @param[in] catalog catalog
485
486 We write as the dataset provided by the 'outputDataset'
487 class variable.
488 """
489 patchRef.put(catalog, self.config.coaddName + "Coadd_" + self.outputDataset)
490 # since the filter isn't actually part of the data ID for the dataset we're saving,
491 # it's confusing to see it in the log message, even if the butler simply ignores it.
492 mergeDataId = patchRef.dataId.copy()
493 del mergeDataId["filter"]
494 self.log.info("Wrote merged catalog: %s", mergeDataId)
495
def write(self, patchRef, catalog)
Write the output.

◆ writeMetadata()

def lsst.pipe.tasks.mergeDetections.writeMetadata (   self,
  dataRefList 
)

No metadata to write, and not sure how to write it for a list of dataRefs.

Definition at line 496 of file mergeDetections.py.

496 def writeMetadata(self, dataRefList):
497 """!
498 @brief No metadata to write, and not sure how to write it for a list of dataRefs.
499 """
500 pass
def writeMetadata(self, dataRefList)
No metadata to write, and not sure how to write it for a list of dataRefs.

Variable Documentation

◆ converted

list lsst.pipe.tasks.mergeDetections.converted = []

Definition at line 468 of file mergeDetections.py.

◆ filter

lsst.pipe.tasks.mergeDetections.filter

Definition at line 278 of file mergeDetections.py.

◆ mergeKey

lsst.pipe.tasks.mergeDetections.mergeKey = schema.find("merge_peak_%s" % self.config.skyFilterName).key

Definition at line 467 of file mergeDetections.py.

◆ newFoot

lsst.pipe.tasks.mergeDetections.newFoot = afwDetect.Footprint(oldFoot.spans, schema)

Definition at line 472 of file mergeDetections.py.

◆ patch

lsst.pipe.tasks.mergeDetections.patch

Definition at line 278 of file mergeDetections.py.

◆ peak

lsst.pipe.tasks.mergeDetections.peak = oldFoot.getPeaks()[0]
Examples
forEachPixel.cc.

Definition at line 471 of file mergeDetections.py.

◆ schema

lsst.pipe.tasks.mergeDetections.schema = self.merged.getPeakSchema()
ConfigClass = MergeDetectionsConfig
RunnerClass = MergeSourcesRunner
_DefaultName = "mergeCoaddDetections"
inputDataset = "det"
outputDataset = "mergeDet"
makeIdFactory = _makeMakeIdFactory("MergedCoaddId", includeBand=False)

@classmethod
def _makeArgumentParser(cls):
    return makeMergeArgumentParser(cls._DefaultName, cls.inputDataset)

def getInputSchema(self, butler=None, schema=None):
    return getInputSchema(self, butler, schema)

def __init__(self, butler=None, schema=None, initInputs=None, **kwargs):
    # Make PipelineTask-only wording less transitional after cmdlineTask is removed
super().__init__(**kwargs)
if initInputs is not None:
    schema = initInputs['schema'].schema

self.makeSubtask("skyObjects")
self.schema = self.getInputSchema(butler=butler, schema=schema)

filterNames = list(self.config.priorityList)
filterNames.append(self.config.skyFilterName)
self.merged = afwDetect.FootprintMergeList(self.schema, filterNames)
self.outputSchema = afwTable.SourceCatalog(self.schema)
self.outputPeakSchema = afwDetect.PeakCatalog(self.merged.getPeakSchema())

def runDataRef(self, patchRefList):
catalogs = dict(readCatalog(self, patchRef) for patchRef in patchRefList)
skyInfo = getSkyInfo(coaddName=self.config.coaddName, patchRef=patchRefList[0])
idFactory = self.makeIdFactory(patchRefList[0])
skySeed = getGen3CoaddExposureId(patchRefList[0], coaddName=self.config.coaddName, includeBand=False,
                                 log=self.log)
mergeCatalogStruct = self.run(catalogs, skyInfo, idFactory, skySeed)
self.write(patchRefList[0], mergeCatalogStruct.outputCatalog)

def runQuantum(self, butlerQC, inputRefs, outputRefs):
inputs = butlerQC.get(inputRefs)
exposureIdInfo = ExposureIdInfo.fromDataId(butlerQC.quantum.dataId, "tract_patch")
inputs["skySeed"] = exposureIdInfo.expId
inputs["idFactory"] = exposureIdInfo.makeSourceIdFactory()
catalogDict = {ref.dataId['band']: cat for ref, cat in zip(inputRefs.catalogs,
               inputs['catalogs'])}
inputs['catalogs'] = catalogDict
skyMap = inputs.pop('skyMap')
# Can use the first dataId to find the tract and patch being worked on
tractNumber = inputRefs.catalogs[0].dataId['tract']
tractInfo = skyMap[tractNumber]
patchInfo = tractInfo.getPatchInfo(inputRefs.catalogs[0].dataId['patch'])
skyInfo = Struct(
    skyMap=skyMap,
    tractInfo=tractInfo,
    patchInfo=patchInfo,
    wcs=tractInfo.getWcs(),
    bbox=patchInfo.getOuterBBox()
)
inputs['skyInfo'] = skyInfo

outputs = self.run(**inputs)
butlerQC.put(outputs, outputRefs)

def run(self, catalogs, skyInfo, idFactory, skySeed):
r
# Convert distance to tract coordinate
tractWcs = skyInfo.wcs
peakDistance = self.config.minNewPeak / tractWcs.getPixelScale().asArcseconds()
samePeakDistance = self.config.maxSamePeak / tractWcs.getPixelScale().asArcseconds()

# Put catalogs, filters in priority order
orderedCatalogs = [catalogs[band] for band in self.config.priorityList if band in catalogs.keys()]
orderedBands = [band for band in self.config.priorityList if band in catalogs.keys()]

mergedList = self.merged.getMergedSourceCatalog(orderedCatalogs, orderedBands, peakDistance,
                                                self.schema, idFactory,
                                                samePeakDistance)

#
# Add extra sources that correspond to blank sky
#
skySourceFootprints = self.getSkySourceFootprints(mergedList, skyInfo, skySeed)
if skySourceFootprints:
    key = mergedList.schema.find("merge_footprint_%s" % self.config.skyFilterName).key
    for foot in skySourceFootprints:
        s = mergedList.addNew()
        s.setFootprint(foot)
        s.set(key, True)

# Sort Peaks from brightest to faintest
for record in mergedList:
    record.getFootprint().sortPeaks()
self.log.info("Merged to %d sources", len(mergedList))
# Attempt to remove garbage peaks
self.cullPeaks(mergedList)
return Struct(outputCatalog=mergedList)

def cullPeaks(self, catalog):
keys = [item.key for item in self.merged.getPeakSchema().extract("merge_peak_*").values()]
assert len(keys) > 0, "Error finding flags that associate peaks with their detection bands."
totalPeaks = 0
culledPeaks = 0
for parentSource in catalog:
    # Make a list copy so we can clear the attached PeakCatalog and append the ones we're keeping
    # to it (which is easier than deleting as we iterate).
    keptPeaks = parentSource.getFootprint().getPeaks()
    oldPeaks = list(keptPeaks)
    keptPeaks.clear()
    familySize = len(oldPeaks)
    totalPeaks += familySize
    for rank, peak in enumerate(oldPeaks):
        if ((rank < self.config.cullPeaks.rankSufficient)
            or (sum([peak.get(k) for k in keys]) >= self.config.cullPeaks.nBandsSufficient)
            or (rank < self.config.cullPeaks.rankConsidered
                and rank < self.config.cullPeaks.rankNormalizedConsidered * familySize)):
            keptPeaks.append(peak)
        else:
            culledPeaks += 1
self.log.info("Culled %d of %d peaks", culledPeaks, totalPeaks)

def getSchemaCatalogs(self):
mergeDet = afwTable.SourceCatalog(self.schema)
peak = afwDetect.PeakCatalog(self.merged.getPeakSchema())
return {self.config.coaddName + "Coadd_mergeDet": mergeDet,
        self.config.coaddName + "Coadd_peak": peak}

def getSkySourceFootprints(self, mergedList, skyInfo, seed):

Definition at line 466 of file mergeDetections.py.

◆ tract

lsst.pipe.tasks.mergeDetections.tract

Definition at line 278 of file mergeDetections.py.