26from ._schemaMapper
import SchemaMapper
27from ._table
import CoordKey, SourceRecord
31 """Initialize a multi-catalog match.
36 Schema shared by all catalogs to be included in the match.
38 Set of name: type
for all data ID keys (e.g. {
"visit":int,
40 coordField : `str`, optional
41 Prefix
for _ra
and _dec fields that contain the
42 coordinates to use
for the match.
43 idField : `str`, optional
44 Name of the field
in schema that contains unique object
47 Maximum separation
for a match. Defaults to 0.5 arcseconds.
49 Type of record to expect
in catalogs to be matched.
52 def __init__(self, schema, dataIdFormat, coordField="coord", idField="id", radius=None,
53 RecordClass=SourceRecord):
55 radius = 0.5*lsst.geom.arcseconds
57 raise ValueError(
"'radius' argument must be an Angle")
60 self.
mapper.addMinimalSchema(schema,
True)
62 self.
idKey = schema.find(idField).key
64 outSchema = self.
mapper.editOutputSchema()
65 outSchema.setAliasMap(self.
mapper.getInputSchema().getAliasMap())
67 "object", type=numpy.int64, doc=
"Unique ID for joined sources")
68 for name, dataType
in dataIdFormat.items():
70 name, type=dataType, doc=
"'%s' data ID component")
83 self.
table = RecordClass.Table.make(self.
mapper.getOutputSchema())
88 """Create a new result record from the given input record, using the
89 given data ID and object ID to fill
in additional columns.
93 inputRecord : `lsst.afw.table.source.sourceRecord`
94 Record to use
as the reference
for the new result.
95 dataId : `DataId`
or `dict`
96 Data id describing the data.
98 Object id of the object to be added.
102 outputRecord : `lsst.afw.table.source.sourceRecord`
103 Newly generated record.
105 outputRecord = self.table.copyRecord(inputRecord, self.mapper)
107 outputRecord.set(key, dataId[name])
111 def add(self, catalog, dataId):
112 """Add a new catalog to the match, corresponding to the given data ID.
113 The new catalog is appended to the `self.
result`
and
118 catalog : `lsst.afw.table.base.Catalog`
119 Catalog to be added to the match result.
120 dataId : `DataId`
or `dict`
121 Data id
for the catalog to be added.
125 for record
in catalog:
131 catalog.sort(self.
idKey)
133 unmatchedIds = {record.get(self.
idKey)
for record
in catalog}
137 matchedRefIds =
set()
138 matchedCatIds =
set()
139 for refRecord, newRecord, distance
in matches:
141 if objId
in matchedRefIds:
145 matchedRefIds.add(objId)
146 if newRecord.get(self.
idKey)
in matchedCatIds:
151 matchedCatIds.add(newRecord.get(self.
idKey))
152 unmatchedIds.discard(newRecord.get(self.
idKey))
155 newToObj.setdefault(newRecord.get(self.
idKey),
set()).
add(objId)
160 for objId
in unmatchedIds:
161 newRecord = catalog.find(objId, self.
idKey)
164 self.
result.append(resultRecord)
168 """Return the final match catalog, after sorting it by object, copying
169 it to ensure contiguousness, and optionally removing ambiguous
172 After calling
finish(), the
in-progress state of the matcher
173 is returned to the state it was just after construction,
with
174 the exception of the object ID counter (which
is not reset).
178 removeAmbiguous : `bool`, optional
179 Should ambiguous matches be removed
from the match
180 catalog? Defaults to
True.
184 result : `lsst.afw.table.base.Catalog`
185 Final match catalog, sorted by object.
189 for record
in self.
result:
191 result.append(record)
195 result = result.copy(deep=
True)
203 """A mapping (i.e. dict-like object) that provides convenient
204 operations on the concatenated catalogs returned by a MultiMatch
207 A GroupView provides access to a catalog of grouped objects, in
208 which the grouping
is indicated by a field
for which all records
209 in a group have the same value. Once constructed, it allows
210 operations similar to those supported by SQL
"GROUP BY", such
as
211 filtering
and aggregate calculation.
216 Catalog schema to use
for the grouped object catalog.
218 List of identifying keys
for the groups
in the catalog.
220 List of catalog subsets associated
with each key
in ids.
224 def build(cls, catalog, groupField="object"):
225 """Construct a GroupView from a concatenated catalog.
229 catalog : `lsst.afw.table.base.Catalog`
230 Input catalog, containing records grouped by a field in
231 which all records
in the same group have the same value.
232 Must be sorted by the group field.
233 groupField : `str`, optional
234 Name
or Key
for the field that indicates groups. Defaults
240 Constructed GroupView
from the input concatenated catalog.
242 groupKey = catalog.schema.find(groupField).key
243 ids, indices = numpy.unique(catalog.get(groupKey), return_index=True)
244 groups = numpy.zeros(len(ids), dtype=object)
245 ends =
list(indices[1:]) + [len(catalog)]
246 for n, (i1, i2)
in enumerate(zip(indices, ends)):
247 groups[n] = catalog[i1:i2]
248 assert (groups[n].get(groupKey) == ids[n]).all()
249 return cls(catalog.schema, ids, groups)
261 return iter(self.
ids)
263 def __getitem__(self, key):
264 index = numpy.searchsorted(self.
ids, key)
265 if self.
ids[index] != key:
266 raise KeyError(
"Group with ID {0} not found".format(key))
270 """Return a new GroupView that contains only groups for which the
271 given predicate function returns True.
273 The predicate function
is called once
for each group,
and
274 passed a single argument: the subset catalog
for that group.
279 Function to identify which groups should be included
in
285 Subset GroupView containing only groups that match the
288 mask = numpy.zeros(len(self), dtype=bool)
289 for i
in range(len(self)):
290 mask[i] = predicate(self.
groups[i])
294 """Run an aggregate function on each group, returning an array with
295 one element for each group.
300 Callable object that computes the aggregate value. If
301 `field`
is None, called
with the entire subset catalog
as an
302 argument. If `field`
is not None, called
with an array view
304 field : `str`, optional
305 A string name
or Key object that indicates a single field the aggregate
308 Data type of the output array.
312 result : Array of `dtype`
313 Aggregated values
for each group.
315 result = numpy.zeros(len(self), dtype=dtype)
316 if field
is not None:
317 key = self.
schema.find(field).key
320 return function(cat.get(key))
323 for i
in range(len(self)):
324 result[i] = f(self.
groups[i])
327 def apply(self, function, field=None, dtype=float):
328 """Run a non-aggregate function on each group, returning an array with
329 one element for each record.
334 Callable object that computes the aggregate value. If field
is None,
335 called
with the entire subset catalog
as an argument. If field
is not
336 None, called
with an array view into that field.
338 A string name
or Key object that indicates a single field the aggregate
341 Data type
for the output array.
345 result : `numpy.array` of `dtype`
346 Result of the function calculated on an element-by-element basis.
348 result = numpy.zeros(self.count, dtype=dtype)
349 if field
is not None:
350 key = self.
schema.find(field).key
353 return function(cat.get(key))
357 for i
in range(len(self)):
358 next = last + len(self.
groups[i])
359 result[last:next] = f(self.
groups[i])
std::vector< SchemaItem< Flag > > * items
Base class for all records.
Defines the fields and offsets for a table.
__init__(self, schema, ids, groups)
aggregate(self, function, field=None, dtype=float)
build(cls, catalog, groupField="object")
apply(self, function, field=None, dtype=float)
add(self, catalog, dataId)
finish(self, removeAmbiguous=True)
makeRecord(self, inputRecord, dataId, objId)
__init__(self, schema, dataIdFormat, coordField="coord", idField="id", radius=None, RecordClass=SourceRecord)
A class representing an angle.
daf::base::PropertyList * list
daf::base::PropertySet * set
std::vector< Match< typename Cat1::Record, typename Cat2::Record > > matchRaDec(Cat1 const &cat1, Cat2 const &cat2, lsst::geom::Angle radius, MatchControl const &mc=MatchControl())
Compute all tuples (s1,s2,d) where s1 belings to cat1, s2 belongs to cat2 and d, the distance between...