LSST Applications g0f08755f38+9c285cab97,g1635faa6d4+13f3999e92,g1653933729+a8ce1bb630,g1a0ca8cf93+bf6eb00ceb,g28da252d5a+0829b12dee,g29321ee8c0+5700dc9eac,g2bbee38e9b+9634bc57db,g2bc492864f+9634bc57db,g2cdde0e794+c2c89b37c4,g3156d2b45e+41e33cbcdc,g347aa1857d+9634bc57db,g35bb328faa+a8ce1bb630,g3a166c0a6a+9634bc57db,g3e281a1b8c+9f2c4e2fc3,g414038480c+077ccc18e7,g41af890bb2+fde0dd39b6,g5fbc88fb19+17cd334064,g781aacb6e4+a8ce1bb630,g80478fca09+55a9465950,g82479be7b0+d730eedb7d,g858d7b2824+9c285cab97,g9125e01d80+a8ce1bb630,g9726552aa6+10f999ec6a,ga5288a1d22+2a84bb7594,gacf8899fa4+c69c5206e8,gae0086650b+a8ce1bb630,gb58c049af0+d64f4d3760,gc28159a63d+9634bc57db,gcf0d15dbbd+4b7d09cae4,gda3e153d99+9c285cab97,gda6a2b7d83+4b7d09cae4,gdaeeff99f8+1711a396fd,ge2409df99d+5e831397f4,ge79ae78c31+9634bc57db,gf0baf85859+147a0692ba,gf3967379c6+41c94011de,gf3fb38a9a8+8f07a9901b,gfb92a5be7c+9c285cab97,w.2024.46
LSST Data Management Base Package
Loading...
Searching...
No Matches
Classes | Functions | Variables
lsst.pipe.tasks.mergeDetections Namespace Reference

Classes

class  MergeDetectionsConnections
 

Functions

 matchCatalogsExact (catalog1, catalog2, patch1=None, patch2=None)
 

Variables

 schema : `lsst.afw.table.Schema`, optional
 
 initInputs : `dict`, optional
 
 catalogs : `lsst.afw.table.SourceCatalog`
 
 mergedList : `lsst.afw.table.SourceCatalog`
 
 result : `lsst.pipe.base.Struct`
 
 catalog : `lsst.afw.table.SourceCatalog`
 
 mergeKey = schema.find("merge_peak_%s" % self.config.skyFilterName).key
 
list converted = []
 
 peak = oldFoot.getPeaks()[0]
 
 newFoot = afwDetect.Footprint(oldFoot.spans, schema)
 

Function Documentation

◆ matchCatalogsExact()

lsst.pipe.tasks.mergeDetections.matchCatalogsExact ( catalog1,
catalog2,
patch1 = None,
patch2 = None )
Match two catalogs derived from the same mergeDet catalog.

When testing downstream features, like deblending methods/parameters
and measurement algorithms/parameters, it is useful to to compare
the same sources in two catalogs. In most cases this must be done
by matching on either RA/DEC or XY positions, which occassionally
will mismatch one source with another.

For a more robust solution, as long as the downstream catalog is
derived from the same mergeDet catalog, exact source matching
can be done via the unique ``(parent, deblend_peakID)``
combination. So this function performs this exact matching for
all sources both catalogs.

Parameters
----------
catalog1, catalog2 : `lsst.afw.table.SourceCatalog`
    The two catalogs to merge
patch1, patch2 : `array` of `int`
    Patch for each row, converted into an integer.

Returns
-------
result : `list` of `lsst.afw.table.SourceMatch`
    List of matches for each source (using an inner join).

Definition at line 42 of file mergeDetections.py.

42def matchCatalogsExact(catalog1, catalog2, patch1=None, patch2=None):
43 """Match two catalogs derived from the same mergeDet catalog.
44
45 When testing downstream features, like deblending methods/parameters
46 and measurement algorithms/parameters, it is useful to to compare
47 the same sources in two catalogs. In most cases this must be done
48 by matching on either RA/DEC or XY positions, which occassionally
49 will mismatch one source with another.
50
51 For a more robust solution, as long as the downstream catalog is
52 derived from the same mergeDet catalog, exact source matching
53 can be done via the unique ``(parent, deblend_peakID)``
54 combination. So this function performs this exact matching for
55 all sources both catalogs.
56
57 Parameters
58 ----------
59 catalog1, catalog2 : `lsst.afw.table.SourceCatalog`
60 The two catalogs to merge
61 patch1, patch2 : `array` of `int`
62 Patch for each row, converted into an integer.
63
64 Returns
65 -------
66 result : `list` of `lsst.afw.table.SourceMatch`
67 List of matches for each source (using an inner join).
68 """
69 # Only match the individual sources, the parents will
70 # already be matched by the mergeDet catalog
71 sidx1 = catalog1["parent"] != 0
72 sidx2 = catalog2["parent"] != 0
73
74 # Create the keys used to merge the catalogs
75 parents1 = np.array(catalog1["parent"][sidx1])
76 peaks1 = np.array(catalog1["deblend_peakId"][sidx1])
77 index1 = np.arange(len(catalog1))[sidx1]
78 parents2 = np.array(catalog2["parent"][sidx2])
79 peaks2 = np.array(catalog2["deblend_peakId"][sidx2])
80 index2 = np.arange(len(catalog2))[sidx2]
81
82 if patch1 is not None:
83 if patch2 is None:
84 msg = ("If the catalogs are from different patches then patch1 and patch2 must be specified"
85 ", got {} and {}").format(patch1, patch2)
86 raise ValueError(msg)
87 patch1 = patch1[sidx1]
88 patch2 = patch2[sidx2]
89
90 key1 = np.rec.array((parents1, peaks1, patch1, index1),
91 dtype=[('parent', np.int64), ('peakId', np.int32),
92 ("patch", patch1.dtype), ("index", np.int32)])
93 key2 = np.rec.array((parents2, peaks2, patch2, index2),
94 dtype=[('parent', np.int64), ('peakId', np.int32),
95 ("patch", patch2.dtype), ("index", np.int32)])
96 matchColumns = ("parent", "peakId", "patch")
97 else:
98 key1 = np.rec.array((parents1, peaks1, index1),
99 dtype=[('parent', np.int64), ('peakId', np.int32), ("index", np.int32)])
100 key2 = np.rec.array((parents2, peaks2, index2),
101 dtype=[('parent', np.int64), ('peakId', np.int32), ("index", np.int32)])
102 matchColumns = ("parent", "peakId")
103 # Match the two keys.
104 # This line performs an inner join on the structured
105 # arrays `key1` and `key2`, which stores their indices
106 # as columns in a structured array.
107 matched = rec_join(matchColumns, key1, key2, jointype="inner")
108
109 # Create the full index for both catalogs
110 indices1 = matched["index1"]
111 indices2 = matched["index2"]
112
113 # Re-index the resulting catalogs
114 matches = [
115 afwTable.SourceMatch(catalog1[int(i1)], catalog2[int(i2)], 0.0)
116 for i1, i2 in zip(indices1, indices2)
117 ]
118
119 return matches
120
121
Tag types used to declare specialized field types.
Definition misc.h:31

Variable Documentation

◆ catalog

lsst.pipe.tasks.mergeDetections.catalog : `lsst.afw.table.SourceCatalog`
# Convert distance to tract coordinate
tractWcs = skyInfo.wcs
peakDistance = self.config.minNewPeak / tractWcs.getPixelScale().asArcseconds()
samePeakDistance = self.config.maxSamePeak / tractWcs.getPixelScale().asArcseconds()

# Put catalogs, filters in priority order
orderedCatalogs = [catalogs[band] for band in self.config.priorityList if band in catalogs.keys()]
orderedBands = [band for band in self.config.priorityList if band in catalogs.keys()]

mergedList = self.merged.getMergedSourceCatalog(orderedCatalogs, orderedBands, peakDistance,
                                                self.schema, idFactory,
                                                samePeakDistance)

#
# Add extra sources that correspond to blank sky
#
skySourceFootprints = self.getSkySourceFootprints(mergedList, skyInfo, skySeed)
if skySourceFootprints:
    key = mergedList.schema.find("merge_footprint_%s" % self.config.skyFilterName).key
    for foot in skySourceFootprints:
        s = mergedList.addNew()
        s.setFootprint(foot)
        s.set(key, True)

# Sort Peaks from brightest to faintest
for record in mergedList:
    record.getFootprint().sortPeaks()
self.log.info("Merged to %d sources", len(mergedList))
# Attempt to remove garbage peaks
self.cullPeaks(mergedList)
return Struct(outputCatalog=mergedList)

def cullPeaks(self, catalog):

Definition at line 334 of file mergeDetections.py.

◆ catalogs

lsst.pipe.tasks.mergeDetections.catalogs : `lsst.afw.table.SourceCatalog`
ConfigClass = MergeDetectionsConfig
_DefaultName = "mergeCoaddDetections"

def __init__(self, schema=None, initInputs=None, **kwargs):
    super().__init__(**kwargs)

    if initInputs is not None:
        schema = initInputs['schema'].schema

    if schema is None:
        raise ValueError("No input schema or initInputs['schema'] provided.")

    self.schema = schema

    self.makeSubtask("skyObjects")

    filterNames = list(self.config.priorityList)
    filterNames.append(self.config.skyFilterName)
    self.merged = afwDetect.FootprintMergeList(self.schema, filterNames)
    self.outputSchema = afwTable.SourceCatalog(self.schema)
    self.outputPeakSchema = afwDetect.PeakCatalog(self.merged.getPeakSchema())

def runQuantum(self, butlerQC, inputRefs, outputRefs):
    inputs = butlerQC.get(inputRefs)
    idGenerator = self.config.idGenerator.apply(butlerQC.quantum.dataId)
    inputs["skySeed"] = idGenerator.catalog_id
    inputs["idFactory"] = idGenerator.make_table_id_factory()
    catalogDict = {ref.dataId['band']: cat for ref, cat in zip(inputRefs.catalogs,
                   inputs['catalogs'])}
    inputs['catalogs'] = catalogDict
    skyMap = inputs.pop('skyMap')
    # Can use the first dataId to find the tract and patch being worked on
    tractNumber = inputRefs.catalogs[0].dataId['tract']
    tractInfo = skyMap[tractNumber]
    patchInfo = tractInfo.getPatchInfo(inputRefs.catalogs[0].dataId['patch'])
    skyInfo = Struct(
        skyMap=skyMap,
        tractInfo=tractInfo,
        patchInfo=patchInfo,
        wcs=tractInfo.getWcs(),
        bbox=patchInfo.getOuterBBox()
    )
    inputs['skyInfo'] = skyInfo

    outputs = self.run(**inputs)
    butlerQC.put(outputs, outputRefs)

def run(self, catalogs, skyInfo, idFactory, skySeed):

Definition at line 284 of file mergeDetections.py.

◆ converted

list lsst.pipe.tasks.mergeDetections.converted = []

Definition at line 383 of file mergeDetections.py.

◆ initInputs

lsst.pipe.tasks.mergeDetections.initInputs : `dict`, optional

Definition at line 220 of file mergeDetections.py.

◆ mergedList

lsst.pipe.tasks.mergeDetections.mergedList : `lsst.afw.table.SourceCatalog`

Definition at line 286 of file mergeDetections.py.

◆ mergeKey

lsst.pipe.tasks.mergeDetections.mergeKey = schema.find("merge_peak_%s" % self.config.skyFilterName).key

Definition at line 382 of file mergeDetections.py.

◆ newFoot

lsst.pipe.tasks.mergeDetections.newFoot = afwDetect.Footprint(oldFoot.spans, schema)

Definition at line 387 of file mergeDetections.py.

◆ peak

lsst.pipe.tasks.mergeDetections.peak = oldFoot.getPeaks()[0]

Definition at line 386 of file mergeDetections.py.

◆ result

lsst.pipe.tasks.mergeDetections.result : `lsst.pipe.base.Struct`

Definition at line 291 of file mergeDetections.py.

◆ schema

lsst.pipe.tasks.mergeDetections.schema : `lsst.afw.table.Schema`, optional
keys = [item.key for item in self.merged.getPeakSchema().extract("merge_peak_*").values()]
assert len(keys) > 0, "Error finding flags that associate peaks with their detection bands."
totalPeaks = 0
culledPeaks = 0
for parentSource in catalog:
    # Make a list copy so we can clear the attached PeakCatalog and append the ones we're keeping
    # to it (which is easier than deleting as we iterate).
    keptPeaks = parentSource.getFootprint().getPeaks()
    oldPeaks = list(keptPeaks)
    keptPeaks.clear()
    familySize = len(oldPeaks)
    totalPeaks += familySize
    for rank, peak in enumerate(oldPeaks):
        if ((rank < self.config.cullPeaks.rankSufficient)
            or (sum([peak.get(k) for k in keys]) >= self.config.cullPeaks.nBandsSufficient)
            or (rank < self.config.cullPeaks.rankConsidered
                and rank < self.config.cullPeaks.rankNormalizedConsidered * familySize)):
            keptPeaks.append(peak)
        else:
            culledPeaks += 1
self.log.info("Culled %d of %d peaks", culledPeaks, totalPeaks)

def getSkySourceFootprints(self, mergedList, skyInfo, seed):

Definition at line 218 of file mergeDetections.py.