LSST Applications 27.0.0,g0265f82a02+469cd937ee,g02d81e74bb+21ad69e7e1,g1470d8bcf6+cbe83ee85a,g2079a07aa2+e67c6346a6,g212a7c68fe+04a9158687,g2305ad1205+94392ce272,g295015adf3+81dd352a9d,g2bbee38e9b+469cd937ee,g337abbeb29+469cd937ee,g3939d97d7f+72a9f7b576,g487adcacf7+71499e7cba,g50ff169b8f+5929b3527e,g52b1c1532d+a6fc98d2e7,g591dd9f2cf+df404f777f,g5a732f18d5+be83d3ecdb,g64a986408d+21ad69e7e1,g858d7b2824+21ad69e7e1,g8a8a8dda67+a6fc98d2e7,g99cad8db69+f62e5b0af5,g9ddcbc5298+d4bad12328,ga1e77700b3+9c366c4306,ga8c6da7877+71e4819109,gb0e22166c9+25ba2f69a1,gb6a65358fc+469cd937ee,gbb8dafda3b+69d3c0e320,gc07e1c2157+a98bf949bb,gc120e1dc64+615ec43309,gc28159a63d+469cd937ee,gcf0d15dbbd+72a9f7b576,gdaeeff99f8+a38ce5ea23,ge6526c86ff+3a7c1ac5f1,ge79ae78c31+469cd937ee,gee10cc3b42+a6fc98d2e7,gf1cff7945b+21ad69e7e1,gfbcc870c63+9a11dc8c8f
LSST Data Management Base Package
Loading...
Searching...
No Matches
finalizeCharacterization.py
Go to the documentation of this file.
1# This file is part of pipe_tasks.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
21
22"""Task to run a finalized image characterization, using additional data.
23"""
24
25__all__ = ['FinalizeCharacterizationConnections',
26 'FinalizeCharacterizationConfig',
27 'FinalizeCharacterizationTask']
28
29import numpy as np
30import esutil
31import pandas as pd
32
33import lsst.pex.config as pexConfig
34import lsst.pipe.base as pipeBase
35import lsst.daf.base as dafBase
36import lsst.afw.table as afwTable
37import lsst.meas.algorithms as measAlg
39from lsst.meas.algorithms import MeasureApCorrTask
40from lsst.meas.base import SingleFrameMeasurementTask, ApplyApCorrTask
41from lsst.meas.algorithms.sourceSelector import sourceSelectorRegistry
42
43from .reserveIsolatedStars import ReserveIsolatedStarsTask
44
45
46class FinalizeCharacterizationConnections(pipeBase.PipelineTaskConnections,
47 dimensions=('instrument', 'visit',),
48 defaultTemplates={}):
49 src_schema = pipeBase.connectionTypes.InitInput(
50 doc='Input schema used for src catalogs.',
51 name='src_schema',
52 storageClass='SourceCatalog',
53 )
54 srcs = pipeBase.connectionTypes.Input(
55 doc='Source catalogs for the visit',
56 name='src',
57 storageClass='SourceCatalog',
58 dimensions=('instrument', 'visit', 'detector'),
59 deferLoad=True,
60 multiple=True,
61 )
62 calexps = pipeBase.connectionTypes.Input(
63 doc='Calexps for the visit',
64 name='calexp',
65 storageClass='ExposureF',
66 dimensions=('instrument', 'visit', 'detector'),
67 deferLoad=True,
68 multiple=True,
69 )
70 isolated_star_cats = pipeBase.connectionTypes.Input(
71 doc=('Catalog of isolated stars with average positions, number of associated '
72 'sources, and indexes to the isolated_star_sources catalogs.'),
73 name='isolated_star_cat',
74 storageClass='DataFrame',
75 dimensions=('instrument', 'tract', 'skymap'),
76 deferLoad=True,
77 multiple=True,
78 )
79 isolated_star_sources = pipeBase.connectionTypes.Input(
80 doc=('Catalog of isolated star sources with sourceIds, and indexes to the '
81 'isolated_star_cats catalogs.'),
82 name='isolated_star_sources',
83 storageClass='DataFrame',
84 dimensions=('instrument', 'tract', 'skymap'),
85 deferLoad=True,
86 multiple=True,
87 )
88 finalized_psf_ap_corr_cat = pipeBase.connectionTypes.Output(
89 doc=('Per-visit finalized psf models and aperture corrections. This '
90 'catalog uses detector id for the id and are sorted for fast '
91 'lookups of a detector.'),
92 name='finalized_psf_ap_corr_catalog',
93 storageClass='ExposureCatalog',
94 dimensions=('instrument', 'visit'),
95 )
96 finalized_src_table = pipeBase.connectionTypes.Output(
97 doc=('Per-visit catalog of measurements for psf/flag/etc.'),
98 name='finalized_src_table',
99 storageClass='DataFrame',
100 dimensions=('instrument', 'visit'),
101 )
102
103
104class FinalizeCharacterizationConfig(pipeBase.PipelineTaskConfig,
105 pipelineConnections=FinalizeCharacterizationConnections):
106 """Configuration for FinalizeCharacterizationTask."""
107 source_selector = sourceSelectorRegistry.makeField(
108 doc="How to select sources",
109 default="science"
110 )
111 id_column = pexConfig.Field(
112 doc='Name of column in isolated_star_sources with source id.',
113 dtype=str,
114 default='sourceId',
115 )
116 reserve_selection = pexConfig.ConfigurableField(
117 target=ReserveIsolatedStarsTask,
118 doc='Task to select reserved stars',
119 )
120 make_psf_candidates = pexConfig.ConfigurableField(
121 target=measAlg.MakePsfCandidatesTask,
122 doc='Task to make psf candidates from selected stars.',
123 )
124 psf_determiner = measAlg.psfDeterminerRegistry.makeField(
125 'PSF Determination algorithm',
126 default='piff'
127 )
128 measurement = pexConfig.ConfigurableField(
129 target=SingleFrameMeasurementTask,
130 doc='Measure sources for aperture corrections'
131 )
132 measure_ap_corr = pexConfig.ConfigurableField(
133 target=MeasureApCorrTask,
134 doc="Subtask to measure aperture corrections"
135 )
136 apply_ap_corr = pexConfig.ConfigurableField(
137 target=ApplyApCorrTask,
138 doc="Subtask to apply aperture corrections"
139 )
140
141 def setDefaults(self):
142 super().setDefaults()
143
144 source_selector = self.source_selector['science']
145 source_selector.setDefaults()
146
147 # We use the source selector only to select out flagged objects
148 # and signal-to-noise. Isolated, unresolved sources are handled
149 # by the isolated star catalog.
150
151 source_selector.doFlags = True
152 source_selector.doSignalToNoise = True
153 source_selector.doFluxLimit = False
154 source_selector.doUnresolved = False
155 source_selector.doIsolated = False
156
157 source_selector.signalToNoise.minimum = 20.0
158 source_selector.signalToNoise.maximum = 1000.0
159
160 source_selector.signalToNoise.fluxField = 'base_GaussianFlux_instFlux'
161 source_selector.signalToNoise.errField = 'base_GaussianFlux_instFluxErr'
162
163 source_selector.flags.bad = ['base_PixelFlags_flag_edge',
164 'base_PixelFlags_flag_interpolatedCenter',
165 'base_PixelFlags_flag_saturatedCenter',
166 'base_PixelFlags_flag_crCenter',
167 'base_PixelFlags_flag_bad',
168 'base_PixelFlags_flag_interpolated',
169 'base_PixelFlags_flag_saturated',
170 'slot_Centroid_flag',
171 'base_GaussianFlux_flag']
172
173 # Configure aperture correction to select only high s/n sources (that
174 # were used in the psf modeling) to avoid background problems when
175 # computing the aperture correction map.
176 self.measure_ap_corr.sourceSelector = 'science'
177
178 ap_selector = self.measure_ap_corr.sourceSelector['science']
179 # We do not need to filter flags or unresolved because we have used
180 # the filtered isolated stars as an input
181 ap_selector.doFlags = False
182 ap_selector.doUnresolved = False
183
184 import lsst.meas.modelfit # noqa: F401
185 import lsst.meas.extensions.photometryKron # noqa: F401
186 import lsst.meas.extensions.convolved # noqa: F401
187 import lsst.meas.extensions.gaap # noqa: F401
188 import lsst.meas.extensions.shapeHSM # noqa: F401
189
190 # Set up measurement defaults
191 self.measurement.plugins.names = [
192 'base_FPPosition',
193 'base_PsfFlux',
194 'base_GaussianFlux',
195 'modelfit_DoubleShapeletPsfApprox',
196 'modelfit_CModel',
197 'ext_photometryKron_KronFlux',
198 'ext_convolved_ConvolvedFlux',
199 'ext_gaap_GaapFlux',
200 'ext_shapeHSM_HsmShapeRegauss',
201 'ext_shapeHSM_HsmSourceMoments',
202 'ext_shapeHSM_HsmPsfMoments',
203 'ext_shapeHSM_HsmSourceMomentsRound',
204 ]
205 self.measurement.slots.modelFlux = 'modelfit_CModel'
206 self.measurement.plugins['ext_convolved_ConvolvedFlux'].seeing.append(8.0)
207 self.measurement.plugins['ext_gaap_GaapFlux'].sigmas = [
208 0.5,
209 0.7,
210 1.0,
211 1.5,
212 2.5,
213 3.0
214 ]
215 self.measurement.plugins['ext_gaap_GaapFlux'].doPsfPhotometry = True
216 self.measurement.slots.shape = 'ext_shapeHSM_HsmSourceMoments'
217 self.measurement.slots.psfShape = 'ext_shapeHSM_HsmPsfMoments'
218 self.measurement.plugins['ext_shapeHSM_HsmShapeRegauss'].deblendNChild = ""
219
220 # Keep track of which footprints contain streaks
221 self.measurement.plugins['base_PixelFlags'].masksFpAnywhere = ['STREAK']
222 self.measurement.plugins['base_PixelFlags'].masksFpCenter = ['STREAK']
223
224 # Turn off slot setting for measurement for centroid and shape
225 # (for which we use the input src catalog measurements)
226 self.measurement.slots.centroid = None
227 self.measurement.slots.apFlux = None
228 self.measurement.slots.calibFlux = None
229
230 names = self.measurement.plugins['ext_convolved_ConvolvedFlux'].getAllResultNames()
231 self.measure_ap_corr.allowFailure += names
232 names = self.measurement.plugins["ext_gaap_GaapFlux"].getAllGaapResultNames()
233 self.measure_ap_corr.allowFailure += names
234
235
236class FinalizeCharacterizationTask(pipeBase.PipelineTask):
237 """Run final characterization on exposures."""
238 ConfigClass = FinalizeCharacterizationConfig
239 _DefaultName = 'finalize_characterization'
240
241 def __init__(self, initInputs=None, **kwargs):
242 super().__init__(initInputs=initInputs, **kwargs)
243
245 initInputs['src_schema'].schema
246 )
247
248 self.makeSubtask('reserve_selection')
249 self.makeSubtask('source_selector')
250 self.makeSubtask('make_psf_candidates')
251 self.makeSubtask('psf_determiner')
252 self.makeSubtask('measurement', schema=self.schema)
253 self.makeSubtask('measure_ap_corr', schema=self.schema)
254 self.makeSubtask('apply_ap_corr', schema=self.schema)
255
256 # Only log warning and fatal errors from the source_selector
257 self.source_selector.log.setLevel(self.source_selector.log.WARN)
258
259 def runQuantum(self, butlerQC, inputRefs, outputRefs):
260 input_handle_dict = butlerQC.get(inputRefs)
261
262 band = butlerQC.quantum.dataId['band']
263 visit = butlerQC.quantum.dataId['visit']
264
265 src_dict_temp = {handle.dataId['detector']: handle
266 for handle in input_handle_dict['srcs']}
267 calexp_dict_temp = {handle.dataId['detector']: handle
268 for handle in input_handle_dict['calexps']}
269 isolated_star_cat_dict_temp = {handle.dataId['tract']: handle
270 for handle in input_handle_dict['isolated_star_cats']}
271 isolated_star_source_dict_temp = {handle.dataId['tract']: handle
272 for handle in input_handle_dict['isolated_star_sources']}
273 # TODO: Sort until DM-31701 is done and we have deterministic
274 # dataset ordering.
275 src_dict = {detector: src_dict_temp[detector] for
276 detector in sorted(src_dict_temp.keys())}
277 calexp_dict = {detector: calexp_dict_temp[detector] for
278 detector in sorted(calexp_dict_temp.keys())}
279 isolated_star_cat_dict = {tract: isolated_star_cat_dict_temp[tract] for
280 tract in sorted(isolated_star_cat_dict_temp.keys())}
281 isolated_star_source_dict = {tract: isolated_star_source_dict_temp[tract] for
282 tract in sorted(isolated_star_source_dict_temp.keys())}
283
284 struct = self.run(visit,
285 band,
286 isolated_star_cat_dict,
287 isolated_star_source_dict,
288 src_dict,
289 calexp_dict)
290
291 butlerQC.put(struct.psf_ap_corr_cat,
292 outputRefs.finalized_psf_ap_corr_cat)
293 butlerQC.put(pd.DataFrame(struct.output_table),
294 outputRefs.finalized_src_table)
295
296 def run(self, visit, band, isolated_star_cat_dict, isolated_star_source_dict, src_dict, calexp_dict):
297 """
298 Run the FinalizeCharacterizationTask.
299
300 Parameters
301 ----------
302 visit : `int`
303 Visit number. Used in the output catalogs.
304 band : `str`
305 Band name. Used to select reserved stars.
306 isolated_star_cat_dict : `dict`
307 Per-tract dict of isolated star catalog handles.
308 isolated_star_source_dict : `dict`
309 Per-tract dict of isolated star source catalog handles.
310 src_dict : `dict`
311 Per-detector dict of src catalog handles.
312 calexp_dict : `dict`
313 Per-detector dict of calibrated exposure handles.
314
315 Returns
316 -------
317 struct : `lsst.pipe.base.struct`
318 Struct with outputs for persistence.
319
320 Raises
321 ------
322 NoWorkFound
323 Raised if the selector returns no good sources.
324 """
325 # We do not need the isolated star table in this task.
326 # However, it is used in tests to confirm consistency of indexes.
327 _, isolated_source_table = self.concat_isolated_star_cats(
328 band,
329 isolated_star_cat_dict,
330 isolated_star_source_dict
331 )
332
333 exposure_cat_schema = afwTable.ExposureTable.makeMinimalSchema()
334 exposure_cat_schema.addField('visit', type='L', doc='Visit number')
335
336 metadata = dafBase.PropertyList()
337 metadata.add("COMMENT", "Catalog id is detector id, sorted.")
338 metadata.add("COMMENT", "Only detectors with data have entries.")
339
340 psf_ap_corr_cat = afwTable.ExposureCatalog(exposure_cat_schema)
341 psf_ap_corr_cat.setMetadata(metadata)
342
343 measured_src_tables = []
344 measured_src_table = None
345
346 for detector in src_dict:
347 src = src_dict[detector].get()
348 exposure = calexp_dict[detector].get()
349
350 psf, ap_corr_map, measured_src = self.compute_psf_and_ap_corr_map(
351 visit,
352 detector,
353 exposure,
354 src,
355 isolated_source_table
356 )
357
358 # And now we package it together...
359 if measured_src is not None:
360 record = psf_ap_corr_cat.addNew()
361 record['id'] = int(detector)
362 record['visit'] = visit
363 if psf is not None:
364 record.setPsf(psf)
365 if ap_corr_map is not None:
366 record.setApCorrMap(ap_corr_map)
367
368 measured_src['visit'][:] = visit
369 measured_src['detector'][:] = detector
370
371 measured_src_tables.append(measured_src.asAstropy().as_array())
372
373 if len(measured_src_tables) > 0:
374 measured_src_table = np.concatenate(measured_src_tables)
375
376 if measured_src_table is None:
377 raise pipeBase.NoWorkFound(f'No good sources found for any detectors in visit {visit}')
378
379 return pipeBase.Struct(psf_ap_corr_cat=psf_ap_corr_cat,
380 output_table=measured_src_table)
381
382 def _make_output_schema_mapper(self, input_schema):
383 """Make the schema mapper from the input schema to the output schema.
384
385 Parameters
386 ----------
387 input_schema : `lsst.afw.table.Schema`
388 Input schema.
389
390 Returns
391 -------
392 mapper : `lsst.afw.table.SchemaMapper`
393 Schema mapper
394 output_schema : `lsst.afw.table.Schema`
395 Output schema (with alias map)
396 """
397 mapper = afwTable.SchemaMapper(input_schema)
398 mapper.addMinimalSchema(afwTable.SourceTable.makeMinimalSchema())
399 mapper.addMapping(input_schema['slot_Centroid_x'].asKey())
400 mapper.addMapping(input_schema['slot_Centroid_y'].asKey())
401
402 # The aperture fields may be used by the psf determiner.
403 aper_fields = input_schema.extract('base_CircularApertureFlux_*')
404 for field, item in aper_fields.items():
405 mapper.addMapping(item.key)
406
407 # The following two may be redundant, but then the mapping is a no-op.
408 apflux_fields = input_schema.extract('slot_ApFlux_*')
409 for field, item in apflux_fields.items():
410 mapper.addMapping(item.key)
411
412 calibflux_fields = input_schema.extract('slot_CalibFlux_*')
413 for field, item in calibflux_fields.items():
414 mapper.addMapping(item.key)
415
416 mapper.addMapping(
417 input_schema[self.config.source_selector.active.signalToNoise.fluxField].asKey(),
418 'calib_psf_selection_flux')
419 mapper.addMapping(
420 input_schema[self.config.source_selector.active.signalToNoise.errField].asKey(),
421 'calib_psf_selection_flux_err')
422
423 output_schema = mapper.getOutputSchema()
424
425 output_schema.addField(
426 'calib_psf_candidate',
427 type='Flag',
428 doc=('set if the source was a candidate for PSF determination, '
429 'as determined from FinalizeCharacterizationTask.'),
430 )
431 output_schema.addField(
432 'calib_psf_reserved',
433 type='Flag',
434 doc=('set if source was reserved from PSF determination by '
435 'FinalizeCharacterizationTask.'),
436 )
437 output_schema.addField(
438 'calib_psf_used',
439 type='Flag',
440 doc=('set if source was used in the PSF determination by '
441 'FinalizeCharacterizationTask.'),
442 )
443 output_schema.addField(
444 'visit',
445 type=np.int64,
446 doc='Visit number for the sources.',
447 )
448 output_schema.addField(
449 'detector',
450 type=np.int32,
451 doc='Detector number for the sources.',
452 )
453
454 alias_map = input_schema.getAliasMap()
455 alias_map_output = afwTable.AliasMap()
456 alias_map_output.set('slot_Centroid', alias_map.get('slot_Centroid'))
457 alias_map_output.set('slot_ApFlux', alias_map.get('slot_ApFlux'))
458 alias_map_output.set('slot_CalibFlux', alias_map.get('slot_CalibFlux'))
459
460 output_schema.setAliasMap(alias_map_output)
461
462 return mapper, output_schema
463
464 def _make_selection_schema_mapper(self, input_schema):
465 """Make the schema mapper from the input schema to the selection schema.
466
467 Parameters
468 ----------
469 input_schema : `lsst.afw.table.Schema`
470 Input schema.
471
472 Returns
473 -------
474 mapper : `lsst.afw.table.SchemaMapper`
475 Schema mapper
476 selection_schema : `lsst.afw.table.Schema`
477 Selection schema (with alias map)
478 """
479 mapper = afwTable.SchemaMapper(input_schema)
480 mapper.addMinimalSchema(input_schema)
481
482 selection_schema = mapper.getOutputSchema()
483
484 selection_schema.setAliasMap(input_schema.getAliasMap())
485
486 return mapper, selection_schema
487
488 def concat_isolated_star_cats(self, band, isolated_star_cat_dict, isolated_star_source_dict):
489 """
490 Concatenate isolated star catalogs and make reserve selection.
491
492 Parameters
493 ----------
494 band : `str`
495 Band name. Used to select reserved stars.
496 isolated_star_cat_dict : `dict`
497 Per-tract dict of isolated star catalog handles.
498 isolated_star_source_dict : `dict`
499 Per-tract dict of isolated star source catalog handles.
500
501 Returns
502 -------
503 isolated_table : `np.ndarray` (N,)
504 Table of isolated stars, with indexes to isolated sources.
505 isolated_source_table : `np.ndarray` (M,)
506 Table of isolated sources, with indexes to isolated stars.
507 """
508 isolated_tables = []
509 isolated_sources = []
510 merge_cat_counter = 0
511 merge_source_counter = 0
512
513 for tract in isolated_star_cat_dict:
514 df_cat = isolated_star_cat_dict[tract].get()
515 table_cat = df_cat.to_records()
516
517 df_source = isolated_star_source_dict[tract].get(
518 parameters={'columns': [self.config.id_column,
519 'obj_index']}
520 )
521 table_source = df_source.to_records()
522
523 # Cut isolated star table to those observed in this band, and adjust indexes
524 (use_band,) = (table_cat[f'nsource_{band}'] > 0).nonzero()
525
526 if len(use_band) == 0:
527 # There are no sources in this band in this tract.
528 self.log.info("No sources found in %s band in tract %d.", band, tract)
529 continue
530
531 # With the following matching:
532 # table_source[b] <-> table_cat[use_band[a]]
533 obj_index = table_source['obj_index'][:]
534 a, b = esutil.numpy_util.match(use_band, obj_index)
535
536 # Update indexes and cut to band-selected stars/sources
537 table_source['obj_index'][b] = a
538 _, index_new = np.unique(a, return_index=True)
539 table_cat[f'source_cat_index_{band}'][use_band] = index_new
540
541 # After the following cuts, the catalogs have the following properties:
542 # - table_cat only contains isolated stars that have at least one source
543 # in ``band``.
544 # - table_source only contains ``band`` sources.
545 # - The slice table_cat["source_cat_index_{band}"]: table_cat["source_cat_index_{band}"]
546 # + table_cat["nsource_{band}]
547 # applied to table_source will give all the sources associated with the star.
548 # - For each source, table_source["obj_index"] points to the index of the associated
549 # isolated star.
550 table_source = table_source[b]
551 table_cat = table_cat[use_band]
552
553 # Add reserved flag column to tables
554 table_cat = np.lib.recfunctions.append_fields(
555 table_cat,
556 'reserved',
557 np.zeros(table_cat.size, dtype=bool),
558 usemask=False
559 )
560 table_source = np.lib.recfunctions.append_fields(
561 table_source,
562 'reserved',
563 np.zeros(table_source.size, dtype=bool),
564 usemask=False
565 )
566
567 # Get reserve star flags
568 table_cat['reserved'][:] = self.reserve_selection.run(
569 len(table_cat),
570 extra=f'{band}_{tract}',
571 )
572 table_source['reserved'][:] = table_cat['reserved'][table_source['obj_index']]
573
574 # Offset indexes to account for tract merging
575 table_cat[f'source_cat_index_{band}'] += merge_source_counter
576 table_source['obj_index'] += merge_cat_counter
577
578 isolated_tables.append(table_cat)
579 isolated_sources.append(table_source)
580
581 merge_cat_counter += len(table_cat)
582 merge_source_counter += len(table_source)
583
584 isolated_table = np.concatenate(isolated_tables)
585 isolated_source_table = np.concatenate(isolated_sources)
586
587 return isolated_table, isolated_source_table
588
589 def compute_psf_and_ap_corr_map(self, visit, detector, exposure, src, isolated_source_table):
590 """Compute psf model and aperture correction map for a single exposure.
591
592 Parameters
593 ----------
594 visit : `int`
595 Visit number (for logging).
596 detector : `int`
597 Detector number (for logging).
598 exposure : `lsst.afw.image.ExposureF`
599 src : `lsst.afw.table.SourceCatalog`
600 isolated_source_table : `np.ndarray`
601
602 Returns
603 -------
604 psf : `lsst.meas.algorithms.ImagePsf`
605 PSF Model
606 ap_corr_map : `lsst.afw.image.ApCorrMap`
607 Aperture correction map.
608 measured_src : `lsst.afw.table.SourceCatalog`
609 Updated source catalog with measurements, flags and aperture corrections.
610 """
611 # Apply source selector (s/n, flags, etc.)
612 good_src = self.source_selector.selectSources(src)
613 if sum(good_src.selected) == 0:
614 self.log.warning('No good sources remain after cuts for visit %d, detector %d',
615 visit, detector)
616 return None, None, None
617
618 # Cut down input src to the selected sources
619 # We use a separate schema/mapper here than for the output/measurement catalog because of
620 # clashes between fields that were previously run and those that need to be rerun with
621 # the new psf model. This may be slightly inefficient but keeps input
622 # and output values cleanly separated.
623 selection_mapper, selection_schema = self._make_selection_schema_mapper(src.schema)
624
625 selected_src = afwTable.SourceCatalog(selection_schema)
626 selected_src.reserve(good_src.selected.sum())
627 selected_src.extend(src[good_src.selected], mapper=selection_mapper)
628
629 # The calib flags have been copied from the input table,
630 # and we reset them here just to ensure they aren't propagated.
631 selected_src['calib_psf_candidate'] = np.zeros(len(selected_src), dtype=bool)
632 selected_src['calib_psf_used'] = np.zeros(len(selected_src), dtype=bool)
633 selected_src['calib_psf_reserved'] = np.zeros(len(selected_src), dtype=bool)
634
635 # Find the isolated sources and set flags
636 matched_src, matched_iso = esutil.numpy_util.match(
637 selected_src['id'],
638 isolated_source_table[self.config.id_column]
639 )
640
641 matched_arr = np.zeros(len(selected_src), dtype=bool)
642 matched_arr[matched_src] = True
643 selected_src['calib_psf_candidate'] = matched_arr
644
645 reserved_arr = np.zeros(len(selected_src), dtype=bool)
646 reserved_arr[matched_src] = isolated_source_table['reserved'][matched_iso]
647 selected_src['calib_psf_reserved'] = reserved_arr
648
649 selected_src = selected_src[selected_src['calib_psf_candidate']].copy(deep=True)
650
651 # Make the measured source catalog as well, based on the selected catalog.
652 measured_src = afwTable.SourceCatalog(self.schema)
653 measured_src.reserve(len(selected_src))
654 measured_src.extend(selected_src, mapper=self.schema_mapper)
655
656 # We need to copy over the calib_psf flags because they were not in the mapper
657 measured_src['calib_psf_candidate'] = selected_src['calib_psf_candidate']
658 measured_src['calib_psf_reserved'] = selected_src['calib_psf_reserved']
659
660 # Select the psf candidates from the selection catalog
661 try:
662 psf_selection_result = self.make_psf_candidates.run(selected_src, exposure=exposure)
663 except Exception as e:
664 self.log.warning('Failed to make psf candidates for visit %d, detector %d: %s',
665 visit, detector, e)
666 return None, None, measured_src
667
668 psf_cand_cat = psf_selection_result.goodStarCat
669
670 # Make list of psf candidates to send to the determiner
671 # (omitting those marked as reserved)
672 psf_determiner_list = [cand for cand, use
673 in zip(psf_selection_result.psfCandidates,
674 ~psf_cand_cat['calib_psf_reserved']) if use]
675 flag_key = psf_cand_cat.schema['calib_psf_used'].asKey()
676 try:
677 psf, cell_set = self.psf_determiner.determinePsf(exposure,
678 psf_determiner_list,
680 flagKey=flag_key)
681 except Exception as e:
682 self.log.warning('Failed to determine psf for visit %d, detector %d: %s',
683 visit, detector, e)
684 return None, None, measured_src
685
686 # Set the psf in the exposure for measurement/aperture corrections.
687 exposure.setPsf(psf)
688
689 # At this point, we need to transfer the psf used flag from the selection
690 # catalog to the measurement catalog.
691 matched_selected, matched_measured = esutil.numpy_util.match(
692 selected_src['id'],
693 measured_src['id']
694 )
695 measured_used = np.zeros(len(measured_src), dtype=bool)
696 measured_used[matched_measured] = selected_src['calib_psf_used'][matched_selected]
697 measured_src['calib_psf_used'] = measured_used
698
699 # Next, we do the measurement on all the psf candidate, used, and reserved stars.
700 try:
701 self.measurement.run(measCat=measured_src, exposure=exposure)
702 except Exception as e:
703 self.log.warning('Failed to make measurements for visit %d, detector %d: %s',
704 visit, detector, e)
705 return psf, None, measured_src
706
707 # And finally the ap corr map.
708 try:
709 ap_corr_map = self.measure_ap_corr.run(exposure=exposure,
710 catalog=measured_src).apCorrMap
711 except Exception as e:
712 self.log.warning('Failed to compute aperture corrections for visit %d, detector %d: %s',
713 visit, detector, e)
714 return psf, None, measured_src
715
716 self.apply_ap_corr.run(catalog=measured_src, apCorrMap=ap_corr_map)
717
718 return psf, ap_corr_map, measured_src
Mapping class that holds aliases for a Schema.
Definition AliasMap.h:36
Custom catalog class for ExposureRecord/Table.
Definition Exposure.h:311
A mapping between the keys of two Schemas, used to copy data between them.
Class for storing ordered metadata with comments.
run(self, visit, band, isolated_star_cat_dict, isolated_star_source_dict, src_dict, calexp_dict)
compute_psf_and_ap_corr_map(self, visit, detector, exposure, src, isolated_source_table)
concat_isolated_star_cats(self, band, isolated_star_cat_dict, isolated_star_source_dict)