LSST Applications g063fba187b+cac8b7c890,g0f08755f38+6aee506743,g1653933729+a8ce1bb630,g168dd56ebc+a8ce1bb630,g1a2382251a+b4475c5878,g1dcb35cd9c+8f9bc1652e,g20f6ffc8e0+6aee506743,g217e2c1bcf+73dee94bd0,g28da252d5a+1f19c529b9,g2bbee38e9b+3f2625acfc,g2bc492864f+3f2625acfc,g3156d2b45e+6e55a43351,g32e5bea42b+1bb94961c2,g347aa1857d+3f2625acfc,g35bb328faa+a8ce1bb630,g3a166c0a6a+3f2625acfc,g3e281a1b8c+c5dd892a6c,g3e8969e208+a8ce1bb630,g414038480c+5927e1bc1e,g41af890bb2+8a9e676b2a,g7af13505b9+809c143d88,g80478fca09+6ef8b1810f,g82479be7b0+f568feb641,g858d7b2824+6aee506743,g89c8672015+f4add4ffd5,g9125e01d80+a8ce1bb630,ga5288a1d22+2903d499ea,gb58c049af0+d64f4d3760,gc28159a63d+3f2625acfc,gcab2d0539d+b12535109e,gcf0d15dbbd+46a3f46ba9,gda6a2b7d83+46a3f46ba9,gdaeeff99f8+1711a396fd,ge79ae78c31+3f2625acfc,gef2f8181fd+0a71e47438,gf0baf85859+c1f95f4921,gfa517265be+6aee506743,gfa999e8aa5+17cd334064,w.2024.51
LSST Data Management Base Package
Loading...
Searching...
No Matches
finalizeCharacterization.py
Go to the documentation of this file.
1# This file is part of pipe_tasks.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
21
22"""Task to run a finalized image characterization, using additional data.
23"""
24
25__all__ = ['FinalizeCharacterizationConnections',
26 'FinalizeCharacterizationConfig',
27 'FinalizeCharacterizationTask']
28
29import numpy as np
30import esutil
31import pandas as pd
32
33import lsst.pex.config as pexConfig
34import lsst.pipe.base as pipeBase
35import lsst.daf.base as dafBase
36import lsst.afw.table as afwTable
37import lsst.meas.algorithms as measAlg
39from lsst.meas.algorithms import MeasureApCorrTask
40from lsst.meas.base import SingleFrameMeasurementTask, ApplyApCorrTask
41from lsst.meas.algorithms.sourceSelector import sourceSelectorRegistry
42
43from .reserveIsolatedStars import ReserveIsolatedStarsTask
44
45
46class FinalizeCharacterizationConnections(pipeBase.PipelineTaskConnections,
47 dimensions=('instrument', 'visit',),
48 defaultTemplates={}):
49 src_schema = pipeBase.connectionTypes.InitInput(
50 doc='Input schema used for src catalogs.',
51 name='src_schema',
52 storageClass='SourceCatalog',
53 )
54 srcs = pipeBase.connectionTypes.Input(
55 doc='Source catalogs for the visit',
56 name='src',
57 storageClass='SourceCatalog',
58 dimensions=('instrument', 'visit', 'detector'),
59 deferLoad=True,
60 multiple=True,
61 )
62 calexps = pipeBase.connectionTypes.Input(
63 doc='Calexps for the visit',
64 name='calexp',
65 storageClass='ExposureF',
66 dimensions=('instrument', 'visit', 'detector'),
67 deferLoad=True,
68 multiple=True,
69 )
70 isolated_star_cats = pipeBase.connectionTypes.Input(
71 doc=('Catalog of isolated stars with average positions, number of associated '
72 'sources, and indexes to the isolated_star_sources catalogs.'),
73 name='isolated_star_presource_associations',
74 storageClass='DataFrame',
75 dimensions=('instrument', 'tract', 'skymap'),
76 deferLoad=True,
77 multiple=True,
78 )
79 isolated_star_sources = pipeBase.connectionTypes.Input(
80 doc=('Catalog of isolated star sources with sourceIds, and indexes to the '
81 'isolated_star_cats catalogs.'),
82 name='isolated_star_presources',
83 storageClass='DataFrame',
84 dimensions=('instrument', 'tract', 'skymap'),
85 deferLoad=True,
86 multiple=True,
87 )
88 finalized_psf_ap_corr_cat = pipeBase.connectionTypes.Output(
89 doc=('Per-visit finalized psf models and aperture corrections. This '
90 'catalog uses detector id for the id and are sorted for fast '
91 'lookups of a detector.'),
92 name='finalized_psf_ap_corr_catalog',
93 storageClass='ExposureCatalog',
94 dimensions=('instrument', 'visit'),
95 )
96 finalized_src_table = pipeBase.connectionTypes.Output(
97 doc=('Per-visit catalog of measurements for psf/flag/etc.'),
98 name='finalized_src_table',
99 storageClass='DataFrame',
100 dimensions=('instrument', 'visit'),
101 )
102
103
104class FinalizeCharacterizationConfig(pipeBase.PipelineTaskConfig,
105 pipelineConnections=FinalizeCharacterizationConnections):
106 """Configuration for FinalizeCharacterizationTask."""
107 source_selector = sourceSelectorRegistry.makeField(
108 doc="How to select sources",
109 default="science"
110 )
111 id_column = pexConfig.Field(
112 doc='Name of column in isolated_star_sources with source id.',
113 dtype=str,
114 default='sourceId',
115 )
116 reserve_selection = pexConfig.ConfigurableField(
117 target=ReserveIsolatedStarsTask,
118 doc='Task to select reserved stars',
119 )
120 make_psf_candidates = pexConfig.ConfigurableField(
121 target=measAlg.MakePsfCandidatesTask,
122 doc='Task to make psf candidates from selected stars.',
123 )
124 psf_determiner = measAlg.psfDeterminerRegistry.makeField(
125 'PSF Determination algorithm',
126 default='piff'
127 )
128 measurement = pexConfig.ConfigurableField(
129 target=SingleFrameMeasurementTask,
130 doc='Measure sources for aperture corrections'
131 )
132 measure_ap_corr = pexConfig.ConfigurableField(
133 target=MeasureApCorrTask,
134 doc="Subtask to measure aperture corrections"
135 )
136 apply_ap_corr = pexConfig.ConfigurableField(
137 target=ApplyApCorrTask,
138 doc="Subtask to apply aperture corrections"
139 )
140
141 def setDefaults(self):
142 super().setDefaults()
143
144 source_selector = self.source_selector['science']
145 source_selector.setDefaults()
146
147 # We use the source selector only to select out flagged objects
148 # and signal-to-noise. Isolated, unresolved sources are handled
149 # by the isolated star catalog.
150
151 source_selector.doFlags = True
152 source_selector.doSignalToNoise = True
153 source_selector.doFluxLimit = False
154 source_selector.doUnresolved = False
155 source_selector.doIsolated = False
156
157 source_selector.signalToNoise.minimum = 50.0
158 source_selector.signalToNoise.maximum = 1000.0
159
160 source_selector.signalToNoise.fluxField = 'base_GaussianFlux_instFlux'
161 source_selector.signalToNoise.errField = 'base_GaussianFlux_instFluxErr'
162
163 source_selector.flags.bad = ['base_PixelFlags_flag_edge',
164 'base_PixelFlags_flag_interpolatedCenter',
165 'base_PixelFlags_flag_saturatedCenter',
166 'base_PixelFlags_flag_crCenter',
167 'base_PixelFlags_flag_bad',
168 'base_PixelFlags_flag_interpolated',
169 'base_PixelFlags_flag_saturated',
170 'slot_Centroid_flag',
171 'base_GaussianFlux_flag']
172
173 # Configure aperture correction to select only high s/n sources (that
174 # were used in the psf modeling) to avoid background problems when
175 # computing the aperture correction map.
176 self.measure_ap_corr.sourceSelector = 'science'
177
178 ap_selector = self.measure_ap_corr.sourceSelector['science']
179 # We do not need to filter flags or unresolved because we have used
180 # the filtered isolated stars as an input
181 ap_selector.doFlags = False
182 ap_selector.doUnresolved = False
183
184 import lsst.meas.modelfit # noqa: F401
185 import lsst.meas.extensions.photometryKron # noqa: F401
186 import lsst.meas.extensions.convolved # noqa: F401
187 import lsst.meas.extensions.gaap # noqa: F401
188 import lsst.meas.extensions.shapeHSM # noqa: F401
189
190 # Set up measurement defaults
191 self.measurement.plugins.names = [
192 'base_FPPosition',
193 'base_PsfFlux',
194 'base_GaussianFlux',
195 'modelfit_DoubleShapeletPsfApprox',
196 'modelfit_CModel',
197 'ext_photometryKron_KronFlux',
198 'ext_convolved_ConvolvedFlux',
199 'ext_gaap_GaapFlux',
200 'ext_shapeHSM_HsmShapeRegauss',
201 'ext_shapeHSM_HsmSourceMoments',
202 'ext_shapeHSM_HsmPsfMoments',
203 'ext_shapeHSM_HsmSourceMomentsRound',
204 'ext_shapeHSM_HigherOrderMomentsSource',
205 'ext_shapeHSM_HigherOrderMomentsPSF',
206 ]
207 self.measurement.slots.modelFlux = 'modelfit_CModel'
208 self.measurement.plugins['ext_convolved_ConvolvedFlux'].seeing.append(8.0)
209 self.measurement.plugins['ext_gaap_GaapFlux'].sigmas = [
210 0.5,
211 0.7,
212 1.0,
213 1.5,
214 2.5,
215 3.0
216 ]
217 self.measurement.plugins['ext_gaap_GaapFlux'].doPsfPhotometry = True
218 self.measurement.slots.shape = 'ext_shapeHSM_HsmSourceMoments'
219 self.measurement.slots.psfShape = 'ext_shapeHSM_HsmPsfMoments'
220 self.measurement.plugins['ext_shapeHSM_HsmShapeRegauss'].deblendNChild = ""
221
222 # TODO: Remove in DM-44658, streak masking to happen only in ip_diffim
223 # Keep track of which footprints contain streaks
224 self.measurement.plugins['base_PixelFlags'].masksFpAnywhere = ['STREAK']
225 self.measurement.plugins['base_PixelFlags'].masksFpCenter = ['STREAK']
226
227 # Turn off slot setting for measurement for centroid and shape
228 # (for which we use the input src catalog measurements)
229 self.measurement.slots.centroid = None
230 self.measurement.slots.apFlux = None
231 self.measurement.slots.calibFlux = None
232
233 names = self.measurement.plugins['ext_convolved_ConvolvedFlux'].getAllResultNames()
234 self.measure_ap_corr.allowFailure += names
235 names = self.measurement.plugins["ext_gaap_GaapFlux"].getAllGaapResultNames()
236 self.measure_ap_corr.allowFailure += names
237
238
239class FinalizeCharacterizationTask(pipeBase.PipelineTask):
240 """Run final characterization on exposures."""
241 ConfigClass = FinalizeCharacterizationConfig
242 _DefaultName = 'finalize_characterization'
243
244 def __init__(self, initInputs=None, **kwargs):
245 super().__init__(initInputs=initInputs, **kwargs)
246
248 initInputs['src_schema'].schema
249 )
250
251 self.makeSubtask('reserve_selection')
252 self.makeSubtask('source_selector')
253 self.makeSubtask('make_psf_candidates')
254 self.makeSubtask('psf_determiner')
255 self.makeSubtask('measurement', schema=self.schema)
256 self.makeSubtask('measure_ap_corr', schema=self.schema)
257 self.makeSubtask('apply_ap_corr', schema=self.schema)
258
259 # Only log warning and fatal errors from the source_selector
260 self.source_selector.log.setLevel(self.source_selector.log.WARN)
261
262 def runQuantum(self, butlerQC, inputRefs, outputRefs):
263 input_handle_dict = butlerQC.get(inputRefs)
264
265 band = butlerQC.quantum.dataId['band']
266 visit = butlerQC.quantum.dataId['visit']
267
268 src_dict_temp = {handle.dataId['detector']: handle
269 for handle in input_handle_dict['srcs']}
270 calexp_dict_temp = {handle.dataId['detector']: handle
271 for handle in input_handle_dict['calexps']}
272 isolated_star_cat_dict_temp = {handle.dataId['tract']: handle
273 for handle in input_handle_dict['isolated_star_cats']}
274 isolated_star_source_dict_temp = {handle.dataId['tract']: handle
275 for handle in input_handle_dict['isolated_star_sources']}
276 # TODO: Sort until DM-31701 is done and we have deterministic
277 # dataset ordering.
278 src_dict = {detector: src_dict_temp[detector] for
279 detector in sorted(src_dict_temp.keys())}
280 calexp_dict = {detector: calexp_dict_temp[detector] for
281 detector in sorted(calexp_dict_temp.keys())}
282 isolated_star_cat_dict = {tract: isolated_star_cat_dict_temp[tract] for
283 tract in sorted(isolated_star_cat_dict_temp.keys())}
284 isolated_star_source_dict = {tract: isolated_star_source_dict_temp[tract] for
285 tract in sorted(isolated_star_source_dict_temp.keys())}
286
287 struct = self.run(visit,
288 band,
289 isolated_star_cat_dict,
290 isolated_star_source_dict,
291 src_dict,
292 calexp_dict)
293
294 butlerQC.put(struct.psf_ap_corr_cat,
295 outputRefs.finalized_psf_ap_corr_cat)
296 butlerQC.put(pd.DataFrame(struct.output_table),
297 outputRefs.finalized_src_table)
298
299 def run(self, visit, band, isolated_star_cat_dict, isolated_star_source_dict, src_dict, calexp_dict):
300 """
301 Run the FinalizeCharacterizationTask.
302
303 Parameters
304 ----------
305 visit : `int`
306 Visit number. Used in the output catalogs.
307 band : `str`
308 Band name. Used to select reserved stars.
309 isolated_star_cat_dict : `dict`
310 Per-tract dict of isolated star catalog handles.
311 isolated_star_source_dict : `dict`
312 Per-tract dict of isolated star source catalog handles.
313 src_dict : `dict`
314 Per-detector dict of src catalog handles.
315 calexp_dict : `dict`
316 Per-detector dict of calibrated exposure handles.
317
318 Returns
319 -------
320 struct : `lsst.pipe.base.struct`
321 Struct with outputs for persistence.
322
323 Raises
324 ------
325 NoWorkFound
326 Raised if the selector returns no good sources.
327 """
328 # Check if we have the same inputs for each of the
329 # src_dict and calexp_dict.
330 src_detectors = set(src_dict.keys())
331 calexp_detectors = set(calexp_dict.keys())
332
333 if src_detectors != calexp_detectors:
334 detector_keys = sorted(src_detectors.intersection(calexp_detectors))
335 self.log.warning(
336 "Input src and calexp have mismatched detectors; "
337 "running intersection of %d detectors.",
338 len(detector_keys),
339 )
340 else:
341 detector_keys = sorted(src_detectors)
342
343 # We do not need the isolated star table in this task.
344 # However, it is used in tests to confirm consistency of indexes.
345 _, isolated_source_table = self.concat_isolated_star_cats(
346 band,
347 isolated_star_cat_dict,
348 isolated_star_source_dict
349 )
350
351 if isolated_source_table is None:
352 raise pipeBase.NoWorkFound(f'No good isolated sources found for any detectors in visit {visit}')
353
354 exposure_cat_schema = afwTable.ExposureTable.makeMinimalSchema()
355 exposure_cat_schema.addField('visit', type='L', doc='Visit number')
356
357 metadata = dafBase.PropertyList()
358 metadata.add("COMMENT", "Catalog id is detector id, sorted.")
359 metadata.add("COMMENT", "Only detectors with data have entries.")
360
361 psf_ap_corr_cat = afwTable.ExposureCatalog(exposure_cat_schema)
362 psf_ap_corr_cat.setMetadata(metadata)
363
364 measured_src_tables = []
365 measured_src_table = None
366
367 self.log.info("Running finalizeCharacterization on %d detectors.", len(detector_keys))
368 for detector in detector_keys:
369 self.log.info("Starting finalizeCharacterization on detector ID %d.", detector)
370 src = src_dict[detector].get()
371 exposure = calexp_dict[detector].get()
372
373 psf, ap_corr_map, measured_src = self.compute_psf_and_ap_corr_map(
374 visit,
375 detector,
376 exposure,
377 src,
378 isolated_source_table
379 )
380
381 # And now we package it together...
382 if measured_src is not None:
383 record = psf_ap_corr_cat.addNew()
384 record['id'] = int(detector)
385 record['visit'] = visit
386 if psf is not None:
387 record.setPsf(psf)
388 if ap_corr_map is not None:
389 record.setApCorrMap(ap_corr_map)
390
391 measured_src['visit'][:] = visit
392 measured_src['detector'][:] = detector
393
394 measured_src_tables.append(measured_src.asAstropy().as_array())
395
396 if len(measured_src_tables) > 0:
397 measured_src_table = np.concatenate(measured_src_tables)
398
399 if measured_src_table is None:
400 raise pipeBase.NoWorkFound(f'No good sources found for any detectors in visit {visit}')
401
402 return pipeBase.Struct(psf_ap_corr_cat=psf_ap_corr_cat,
403 output_table=measured_src_table)
404
405 def _make_output_schema_mapper(self, input_schema):
406 """Make the schema mapper from the input schema to the output schema.
407
408 Parameters
409 ----------
410 input_schema : `lsst.afw.table.Schema`
411 Input schema.
412
413 Returns
414 -------
415 mapper : `lsst.afw.table.SchemaMapper`
416 Schema mapper
417 output_schema : `lsst.afw.table.Schema`
418 Output schema (with alias map)
419 """
420 mapper = afwTable.SchemaMapper(input_schema)
421 mapper.addMinimalSchema(afwTable.SourceTable.makeMinimalSchema())
422 mapper.addMapping(input_schema['slot_Centroid_x'].asKey())
423 mapper.addMapping(input_schema['slot_Centroid_y'].asKey())
424
425 # The aperture fields may be used by the psf determiner.
426 aper_fields = input_schema.extract('base_CircularApertureFlux_*')
427 for field, item in aper_fields.items():
428 mapper.addMapping(item.key)
429
430 # The following two may be redundant, but then the mapping is a no-op.
431 # Note that the slot_CalibFlux mapping will copy over any
432 # normalized compensated fluxes that are used for calibration.
433 apflux_fields = input_schema.extract('slot_ApFlux_*')
434 for field, item in apflux_fields.items():
435 mapper.addMapping(item.key)
436
437 calibflux_fields = input_schema.extract('slot_CalibFlux_*')
438 for field, item in calibflux_fields.items():
439 mapper.addMapping(item.key)
440
441 mapper.addMapping(
442 input_schema[self.config.source_selector.active.signalToNoise.fluxField].asKey(),
443 'calib_psf_selection_flux')
444 mapper.addMapping(
445 input_schema[self.config.source_selector.active.signalToNoise.errField].asKey(),
446 'calib_psf_selection_flux_err')
447
448 output_schema = mapper.getOutputSchema()
449
450 output_schema.addField(
451 'calib_psf_candidate',
452 type='Flag',
453 doc=('set if the source was a candidate for PSF determination, '
454 'as determined from FinalizeCharacterizationTask.'),
455 )
456 output_schema.addField(
457 'calib_psf_reserved',
458 type='Flag',
459 doc=('set if source was reserved from PSF determination by '
460 'FinalizeCharacterizationTask.'),
461 )
462 output_schema.addField(
463 'calib_psf_used',
464 type='Flag',
465 doc=('set if source was used in the PSF determination by '
466 'FinalizeCharacterizationTask.'),
467 )
468 output_schema.addField(
469 'visit',
470 type=np.int64,
471 doc='Visit number for the sources.',
472 )
473 output_schema.addField(
474 'detector',
475 type=np.int32,
476 doc='Detector number for the sources.',
477 )
478
479 alias_map = input_schema.getAliasMap()
480 alias_map_output = afwTable.AliasMap()
481 alias_map_output.set('slot_Centroid', alias_map.get('slot_Centroid'))
482 alias_map_output.set('slot_ApFlux', alias_map.get('slot_ApFlux'))
483 alias_map_output.set('slot_CalibFlux', alias_map.get('slot_CalibFlux'))
484
485 output_schema.setAliasMap(alias_map_output)
486
487 return mapper, output_schema
488
489 def _make_selection_schema_mapper(self, input_schema):
490 """Make the schema mapper from the input schema to the selection schema.
491
492 Parameters
493 ----------
494 input_schema : `lsst.afw.table.Schema`
495 Input schema.
496
497 Returns
498 -------
499 mapper : `lsst.afw.table.SchemaMapper`
500 Schema mapper
501 selection_schema : `lsst.afw.table.Schema`
502 Selection schema (with alias map)
503 """
504 mapper = afwTable.SchemaMapper(input_schema)
505 mapper.addMinimalSchema(input_schema)
506
507 selection_schema = mapper.getOutputSchema()
508
509 selection_schema.setAliasMap(input_schema.getAliasMap())
510
511 return mapper, selection_schema
512
513 def concat_isolated_star_cats(self, band, isolated_star_cat_dict, isolated_star_source_dict):
514 """
515 Concatenate isolated star catalogs and make reserve selection.
516
517 Parameters
518 ----------
519 band : `str`
520 Band name. Used to select reserved stars.
521 isolated_star_cat_dict : `dict`
522 Per-tract dict of isolated star catalog handles.
523 isolated_star_source_dict : `dict`
524 Per-tract dict of isolated star source catalog handles.
525
526 Returns
527 -------
528 isolated_table : `np.ndarray` (N,)
529 Table of isolated stars, with indexes to isolated sources.
530 Returns None if there are no usable isolated catalogs.
531 isolated_source_table : `np.ndarray` (M,)
532 Table of isolated sources, with indexes to isolated stars.
533 Returns None if there are no usable isolated catalogs.
534 """
535 isolated_tables = []
536 isolated_sources = []
537 merge_cat_counter = 0
538 merge_source_counter = 0
539
540 for tract in isolated_star_cat_dict:
541 df_cat = isolated_star_cat_dict[tract].get()
542 table_cat = df_cat.to_records()
543
544 df_source = isolated_star_source_dict[tract].get(
545 parameters={'columns': [self.config.id_column,
546 'obj_index']}
547 )
548 table_source = df_source.to_records()
549
550 # Cut isolated star table to those observed in this band, and adjust indexes
551 (use_band,) = (table_cat[f'nsource_{band}'] > 0).nonzero()
552
553 if len(use_band) == 0:
554 # There are no sources in this band in this tract.
555 self.log.info("No sources found in %s band in tract %d.", band, tract)
556 continue
557
558 # With the following matching:
559 # table_source[b] <-> table_cat[use_band[a]]
560 obj_index = table_source['obj_index'][:]
561 a, b = esutil.numpy_util.match(use_band, obj_index)
562
563 # Update indexes and cut to band-selected stars/sources
564 table_source['obj_index'][b] = a
565 _, index_new = np.unique(a, return_index=True)
566 table_cat[f'source_cat_index_{band}'][use_band] = index_new
567
568 # After the following cuts, the catalogs have the following properties:
569 # - table_cat only contains isolated stars that have at least one source
570 # in ``band``.
571 # - table_source only contains ``band`` sources.
572 # - The slice table_cat["source_cat_index_{band}"]: table_cat["source_cat_index_{band}"]
573 # + table_cat["nsource_{band}]
574 # applied to table_source will give all the sources associated with the star.
575 # - For each source, table_source["obj_index"] points to the index of the associated
576 # isolated star.
577 table_source = table_source[b]
578 table_cat = table_cat[use_band]
579
580 # Add reserved flag column to tables
581 table_cat = np.lib.recfunctions.append_fields(
582 table_cat,
583 'reserved',
584 np.zeros(table_cat.size, dtype=bool),
585 usemask=False
586 )
587 table_source = np.lib.recfunctions.append_fields(
588 table_source,
589 'reserved',
590 np.zeros(table_source.size, dtype=bool),
591 usemask=False
592 )
593
594 # Get reserve star flags
595 table_cat['reserved'][:] = self.reserve_selection.run(
596 len(table_cat),
597 extra=f'{band}_{tract}',
598 )
599 table_source['reserved'][:] = table_cat['reserved'][table_source['obj_index']]
600
601 # Offset indexes to account for tract merging
602 table_cat[f'source_cat_index_{band}'] += merge_source_counter
603 table_source['obj_index'] += merge_cat_counter
604
605 isolated_tables.append(table_cat)
606 isolated_sources.append(table_source)
607
608 merge_cat_counter += len(table_cat)
609 merge_source_counter += len(table_source)
610
611 if len(isolated_tables) > 0:
612 isolated_table = np.concatenate(isolated_tables)
613 isolated_source_table = np.concatenate(isolated_sources)
614 else:
615 isolated_table = None
616 isolated_source_table = None
617
618 return isolated_table, isolated_source_table
619
620 def compute_psf_and_ap_corr_map(self, visit, detector, exposure, src, isolated_source_table):
621 """Compute psf model and aperture correction map for a single exposure.
622
623 Parameters
624 ----------
625 visit : `int`
626 Visit number (for logging).
627 detector : `int`
628 Detector number (for logging).
629 exposure : `lsst.afw.image.ExposureF`
630 src : `lsst.afw.table.SourceCatalog`
631 isolated_source_table : `np.ndarray`
632
633 Returns
634 -------
635 psf : `lsst.meas.algorithms.ImagePsf`
636 PSF Model
637 ap_corr_map : `lsst.afw.image.ApCorrMap`
638 Aperture correction map.
639 measured_src : `lsst.afw.table.SourceCatalog`
640 Updated source catalog with measurements, flags and aperture corrections.
641 """
642 # Extract footprints from the input src catalog for noise replacement.
643 footprints = SingleFrameMeasurementTask.getFootprintsFromCatalog(src)
644
645 # Apply source selector (s/n, flags, etc.)
646 good_src = self.source_selector.selectSources(src)
647 if sum(good_src.selected) == 0:
648 self.log.warning('No good sources remain after cuts for visit %d, detector %d',
649 visit, detector)
650 return None, None, None
651
652 # Cut down input src to the selected sources
653 # We use a separate schema/mapper here than for the output/measurement catalog because of
654 # clashes between fields that were previously run and those that need to be rerun with
655 # the new psf model. This may be slightly inefficient but keeps input
656 # and output values cleanly separated.
657 selection_mapper, selection_schema = self._make_selection_schema_mapper(src.schema)
658
659 selected_src = afwTable.SourceCatalog(selection_schema)
660 selected_src.reserve(good_src.selected.sum())
661 selected_src.extend(src[good_src.selected], mapper=selection_mapper)
662
663 # The calib flags have been copied from the input table,
664 # and we reset them here just to ensure they aren't propagated.
665 selected_src['calib_psf_candidate'] = np.zeros(len(selected_src), dtype=bool)
666 selected_src['calib_psf_used'] = np.zeros(len(selected_src), dtype=bool)
667 selected_src['calib_psf_reserved'] = np.zeros(len(selected_src), dtype=bool)
668
669 # Find the isolated sources and set flags
670 matched_src, matched_iso = esutil.numpy_util.match(
671 selected_src['id'],
672 isolated_source_table[self.config.id_column]
673 )
674
675 matched_arr = np.zeros(len(selected_src), dtype=bool)
676 matched_arr[matched_src] = True
677 selected_src['calib_psf_candidate'] = matched_arr
678
679 reserved_arr = np.zeros(len(selected_src), dtype=bool)
680 reserved_arr[matched_src] = isolated_source_table['reserved'][matched_iso]
681 selected_src['calib_psf_reserved'] = reserved_arr
682
683 selected_src = selected_src[selected_src['calib_psf_candidate']].copy(deep=True)
684
685 # Make the measured source catalog as well, based on the selected catalog.
686 measured_src = afwTable.SourceCatalog(self.schema)
687 measured_src.reserve(len(selected_src))
688 measured_src.extend(selected_src, mapper=self.schema_mapper)
689
690 # We need to copy over the calib_psf flags because they were not in the mapper
691 measured_src['calib_psf_candidate'] = selected_src['calib_psf_candidate']
692 measured_src['calib_psf_reserved'] = selected_src['calib_psf_reserved']
693
694 # Select the psf candidates from the selection catalog
695 try:
696 psf_selection_result = self.make_psf_candidates.run(selected_src, exposure=exposure)
697 except Exception as e:
698 self.log.warning('Failed to make psf candidates for visit %d, detector %d: %s',
699 visit, detector, e)
700 return None, None, measured_src
701
702 psf_cand_cat = psf_selection_result.goodStarCat
703
704 # Make list of psf candidates to send to the determiner
705 # (omitting those marked as reserved)
706 psf_determiner_list = [cand for cand, use
707 in zip(psf_selection_result.psfCandidates,
708 ~psf_cand_cat['calib_psf_reserved']) if use]
709 flag_key = psf_cand_cat.schema['calib_psf_used'].asKey()
710 try:
711 psf, cell_set = self.psf_determiner.determinePsf(exposure,
712 psf_determiner_list,
714 flagKey=flag_key)
715 except Exception as e:
716 self.log.warning('Failed to determine psf for visit %d, detector %d: %s',
717 visit, detector, e)
718 return None, None, measured_src
719 # Verify that the PSF is usable by downstream tasks
720 sigma = psf.computeShape(psf.getAveragePosition()).getDeterminantRadius()
721 if np.isnan(sigma):
722 self.log.warning('Failed to determine psf for visit %d, detector %d: '
723 'Computed final PSF size is NAN.',
724 visit, detector)
725 return None, None, measured_src
726
727 # Set the psf in the exposure for measurement/aperture corrections.
728 exposure.setPsf(psf)
729
730 # At this point, we need to transfer the psf used flag from the selection
731 # catalog to the measurement catalog.
732 matched_selected, matched_measured = esutil.numpy_util.match(
733 selected_src['id'],
734 measured_src['id']
735 )
736 measured_used = np.zeros(len(measured_src), dtype=bool)
737 measured_used[matched_measured] = selected_src['calib_psf_used'][matched_selected]
738 measured_src['calib_psf_used'] = measured_used
739
740 # Next, we do the measurement on all the psf candidate, used, and reserved stars.
741 # We use the full footprint list from the input src catalog for noise replacement.
742 try:
743 self.measurement.run(measCat=measured_src, exposure=exposure, footprints=footprints)
744 except Exception as e:
745 self.log.warning('Failed to make measurements for visit %d, detector %d: %s',
746 visit, detector, e)
747 return psf, None, measured_src
748
749 # And finally the ap corr map.
750 try:
751 ap_corr_map = self.measure_ap_corr.run(exposure=exposure,
752 catalog=measured_src).apCorrMap
753 except Exception as e:
754 self.log.warning('Failed to compute aperture corrections for visit %d, detector %d: %s',
755 visit, detector, e)
756 return psf, None, measured_src
757
758 self.apply_ap_corr.run(catalog=measured_src, apCorrMap=ap_corr_map)
759
760 return psf, ap_corr_map, measured_src
Mapping class that holds aliases for a Schema.
Definition AliasMap.h:36
Custom catalog class for ExposureRecord/Table.
Definition Exposure.h:310
A mapping between the keys of two Schemas, used to copy data between them.
Class for storing ordered metadata with comments.
run(self, visit, band, isolated_star_cat_dict, isolated_star_source_dict, src_dict, calexp_dict)
compute_psf_and_ap_corr_map(self, visit, detector, exposure, src, isolated_source_table)
concat_isolated_star_cats(self, band, isolated_star_cat_dict, isolated_star_source_dict)