LSST Applications 28.0.0,g1653933729+a8ce1bb630,g1a997c3884+a8ce1bb630,g28da252d5a+5bd70b7e6d,g2bbee38e9b+638fca75ac,g2bc492864f+638fca75ac,g3156d2b45e+07302053f8,g347aa1857d+638fca75ac,g35bb328faa+a8ce1bb630,g3a166c0a6a+638fca75ac,g3e281a1b8c+7bbb0b2507,g4005a62e65+17cd334064,g414038480c+5b5cd4fff3,g41af890bb2+4ffae9de63,g4e1a3235cc+0f1912dca3,g6249c6f860+3c3976f90c,g80478fca09+46aba80bd6,g82479be7b0+77990446f6,g858d7b2824+78ba4d1ce1,g89c8672015+f667a5183b,g9125e01d80+a8ce1bb630,ga5288a1d22+2a6264e9ca,gae0086650b+a8ce1bb630,gb58c049af0+d64f4d3760,gc22bb204ba+78ba4d1ce1,gc28159a63d+638fca75ac,gcf0d15dbbd+32ddb6096f,gd6b7c0dfd1+3e339405e9,gda3e153d99+78ba4d1ce1,gda6a2b7d83+32ddb6096f,gdaeeff99f8+1711a396fd,gdd5a9049c5+b18c39e5e3,ge2409df99d+a5e4577cdc,ge33fd446bb+78ba4d1ce1,ge79ae78c31+638fca75ac,gf0baf85859+64e8883e75,gf5289d68f6+e1b046a8d7,gfa443fc69c+91d9ed1ecf,gfda6b12a05+8419469a56
LSST Data Management Base Package
Loading...
Searching...
No Matches
finalizeCharacterization.py
Go to the documentation of this file.
1# This file is part of pipe_tasks.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
21
22"""Task to run a finalized image characterization, using additional data.
23"""
24
25__all__ = ['FinalizeCharacterizationConnections',
26 'FinalizeCharacterizationConfig',
27 'FinalizeCharacterizationTask']
28
29import numpy as np
30import esutil
31import pandas as pd
32
33import lsst.pex.config as pexConfig
34import lsst.pipe.base as pipeBase
35import lsst.daf.base as dafBase
36import lsst.afw.table as afwTable
37import lsst.meas.algorithms as measAlg
39from lsst.meas.algorithms import MeasureApCorrTask
40from lsst.meas.base import SingleFrameMeasurementTask, ApplyApCorrTask
41from lsst.meas.algorithms.sourceSelector import sourceSelectorRegistry
42
43from .reserveIsolatedStars import ReserveIsolatedStarsTask
44
45
46class FinalizeCharacterizationConnections(pipeBase.PipelineTaskConnections,
47 dimensions=('instrument', 'visit',),
48 defaultTemplates={}):
49 src_schema = pipeBase.connectionTypes.InitInput(
50 doc='Input schema used for src catalogs.',
51 name='src_schema',
52 storageClass='SourceCatalog',
53 )
54 srcs = pipeBase.connectionTypes.Input(
55 doc='Source catalogs for the visit',
56 name='src',
57 storageClass='SourceCatalog',
58 dimensions=('instrument', 'visit', 'detector'),
59 deferLoad=True,
60 multiple=True,
61 )
62 calexps = pipeBase.connectionTypes.Input(
63 doc='Calexps for the visit',
64 name='calexp',
65 storageClass='ExposureF',
66 dimensions=('instrument', 'visit', 'detector'),
67 deferLoad=True,
68 multiple=True,
69 )
70 isolated_star_cats = pipeBase.connectionTypes.Input(
71 doc=('Catalog of isolated stars with average positions, number of associated '
72 'sources, and indexes to the isolated_star_sources catalogs.'),
73 name='isolated_star_cat',
74 storageClass='DataFrame',
75 dimensions=('instrument', 'tract', 'skymap'),
76 deferLoad=True,
77 multiple=True,
78 )
79 isolated_star_sources = pipeBase.connectionTypes.Input(
80 doc=('Catalog of isolated star sources with sourceIds, and indexes to the '
81 'isolated_star_cats catalogs.'),
82 name='isolated_star_sources',
83 storageClass='DataFrame',
84 dimensions=('instrument', 'tract', 'skymap'),
85 deferLoad=True,
86 multiple=True,
87 )
88 finalized_psf_ap_corr_cat = pipeBase.connectionTypes.Output(
89 doc=('Per-visit finalized psf models and aperture corrections. This '
90 'catalog uses detector id for the id and are sorted for fast '
91 'lookups of a detector.'),
92 name='finalized_psf_ap_corr_catalog',
93 storageClass='ExposureCatalog',
94 dimensions=('instrument', 'visit'),
95 )
96 finalized_src_table = pipeBase.connectionTypes.Output(
97 doc=('Per-visit catalog of measurements for psf/flag/etc.'),
98 name='finalized_src_table',
99 storageClass='DataFrame',
100 dimensions=('instrument', 'visit'),
101 )
102
103
104class FinalizeCharacterizationConfig(pipeBase.PipelineTaskConfig,
105 pipelineConnections=FinalizeCharacterizationConnections):
106 """Configuration for FinalizeCharacterizationTask."""
107 source_selector = sourceSelectorRegistry.makeField(
108 doc="How to select sources",
109 default="science"
110 )
111 id_column = pexConfig.Field(
112 doc='Name of column in isolated_star_sources with source id.',
113 dtype=str,
114 default='sourceId',
115 )
116 reserve_selection = pexConfig.ConfigurableField(
117 target=ReserveIsolatedStarsTask,
118 doc='Task to select reserved stars',
119 )
120 make_psf_candidates = pexConfig.ConfigurableField(
121 target=measAlg.MakePsfCandidatesTask,
122 doc='Task to make psf candidates from selected stars.',
123 )
124 psf_determiner = measAlg.psfDeterminerRegistry.makeField(
125 'PSF Determination algorithm',
126 default='piff'
127 )
128 measurement = pexConfig.ConfigurableField(
129 target=SingleFrameMeasurementTask,
130 doc='Measure sources for aperture corrections'
131 )
132 measure_ap_corr = pexConfig.ConfigurableField(
133 target=MeasureApCorrTask,
134 doc="Subtask to measure aperture corrections"
135 )
136 apply_ap_corr = pexConfig.ConfigurableField(
137 target=ApplyApCorrTask,
138 doc="Subtask to apply aperture corrections"
139 )
140
141 def setDefaults(self):
142 super().setDefaults()
143
144 source_selector = self.source_selector['science']
145 source_selector.setDefaults()
146
147 # We use the source selector only to select out flagged objects
148 # and signal-to-noise. Isolated, unresolved sources are handled
149 # by the isolated star catalog.
150
151 source_selector.doFlags = True
152 source_selector.doSignalToNoise = True
153 source_selector.doFluxLimit = False
154 source_selector.doUnresolved = False
155 source_selector.doIsolated = False
156
157 source_selector.signalToNoise.minimum = 50.0
158 source_selector.signalToNoise.maximum = 1000.0
159
160 source_selector.signalToNoise.fluxField = 'base_GaussianFlux_instFlux'
161 source_selector.signalToNoise.errField = 'base_GaussianFlux_instFluxErr'
162
163 source_selector.flags.bad = ['base_PixelFlags_flag_edge',
164 'base_PixelFlags_flag_interpolatedCenter',
165 'base_PixelFlags_flag_saturatedCenter',
166 'base_PixelFlags_flag_crCenter',
167 'base_PixelFlags_flag_bad',
168 'base_PixelFlags_flag_interpolated',
169 'base_PixelFlags_flag_saturated',
170 'slot_Centroid_flag',
171 'base_GaussianFlux_flag']
172
173 # Configure aperture correction to select only high s/n sources (that
174 # were used in the psf modeling) to avoid background problems when
175 # computing the aperture correction map.
176 self.measure_ap_corr.sourceSelector = 'science'
177
178 ap_selector = self.measure_ap_corr.sourceSelector['science']
179 # We do not need to filter flags or unresolved because we have used
180 # the filtered isolated stars as an input
181 ap_selector.doFlags = False
182 ap_selector.doUnresolved = False
183
184 import lsst.meas.modelfit # noqa: F401
185 import lsst.meas.extensions.photometryKron # noqa: F401
186 import lsst.meas.extensions.convolved # noqa: F401
187 import lsst.meas.extensions.gaap # noqa: F401
188 import lsst.meas.extensions.shapeHSM # noqa: F401
189
190 # Set up measurement defaults
191 self.measurement.plugins.names = [
192 'base_FPPosition',
193 'base_PsfFlux',
194 'base_GaussianFlux',
195 'modelfit_DoubleShapeletPsfApprox',
196 'modelfit_CModel',
197 'ext_photometryKron_KronFlux',
198 'ext_convolved_ConvolvedFlux',
199 'ext_gaap_GaapFlux',
200 'ext_shapeHSM_HsmShapeRegauss',
201 'ext_shapeHSM_HsmSourceMoments',
202 'ext_shapeHSM_HsmPsfMoments',
203 'ext_shapeHSM_HsmSourceMomentsRound',
204 'ext_shapeHSM_HigherOrderMomentsSource',
205 'ext_shapeHSM_HigherOrderMomentsPSF',
206 ]
207 self.measurement.slots.modelFlux = 'modelfit_CModel'
208 self.measurement.plugins['ext_convolved_ConvolvedFlux'].seeing.append(8.0)
209 self.measurement.plugins['ext_gaap_GaapFlux'].sigmas = [
210 0.5,
211 0.7,
212 1.0,
213 1.5,
214 2.5,
215 3.0
216 ]
217 self.measurement.plugins['ext_gaap_GaapFlux'].doPsfPhotometry = True
218 self.measurement.slots.shape = 'ext_shapeHSM_HsmSourceMoments'
219 self.measurement.slots.psfShape = 'ext_shapeHSM_HsmPsfMoments'
220 self.measurement.plugins['ext_shapeHSM_HsmShapeRegauss'].deblendNChild = ""
221
222 # TODO: Remove in DM-44658, streak masking to happen only in ip_diffim
223 # Keep track of which footprints contain streaks
224 self.measurement.plugins['base_PixelFlags'].masksFpAnywhere = ['STREAK']
225 self.measurement.plugins['base_PixelFlags'].masksFpCenter = ['STREAK']
226
227 # Turn off slot setting for measurement for centroid and shape
228 # (for which we use the input src catalog measurements)
229 self.measurement.slots.centroid = None
230 self.measurement.slots.apFlux = None
231 self.measurement.slots.calibFlux = None
232
233 names = self.measurement.plugins['ext_convolved_ConvolvedFlux'].getAllResultNames()
234 self.measure_ap_corr.allowFailure += names
235 names = self.measurement.plugins["ext_gaap_GaapFlux"].getAllGaapResultNames()
236 self.measure_ap_corr.allowFailure += names
237
238
239class FinalizeCharacterizationTask(pipeBase.PipelineTask):
240 """Run final characterization on exposures."""
241 ConfigClass = FinalizeCharacterizationConfig
242 _DefaultName = 'finalize_characterization'
243
244 def __init__(self, initInputs=None, **kwargs):
245 super().__init__(initInputs=initInputs, **kwargs)
246
248 initInputs['src_schema'].schema
249 )
250
251 self.makeSubtask('reserve_selection')
252 self.makeSubtask('source_selector')
253 self.makeSubtask('make_psf_candidates')
254 self.makeSubtask('psf_determiner')
255 self.makeSubtask('measurement', schema=self.schema)
256 self.makeSubtask('measure_ap_corr', schema=self.schema)
257 self.makeSubtask('apply_ap_corr', schema=self.schema)
258
259 # Only log warning and fatal errors from the source_selector
260 self.source_selector.log.setLevel(self.source_selector.log.WARN)
261
262 def runQuantum(self, butlerQC, inputRefs, outputRefs):
263 input_handle_dict = butlerQC.get(inputRefs)
264
265 band = butlerQC.quantum.dataId['band']
266 visit = butlerQC.quantum.dataId['visit']
267
268 src_dict_temp = {handle.dataId['detector']: handle
269 for handle in input_handle_dict['srcs']}
270 calexp_dict_temp = {handle.dataId['detector']: handle
271 for handle in input_handle_dict['calexps']}
272 isolated_star_cat_dict_temp = {handle.dataId['tract']: handle
273 for handle in input_handle_dict['isolated_star_cats']}
274 isolated_star_source_dict_temp = {handle.dataId['tract']: handle
275 for handle in input_handle_dict['isolated_star_sources']}
276 # TODO: Sort until DM-31701 is done and we have deterministic
277 # dataset ordering.
278 src_dict = {detector: src_dict_temp[detector] for
279 detector in sorted(src_dict_temp.keys())}
280 calexp_dict = {detector: calexp_dict_temp[detector] for
281 detector in sorted(calexp_dict_temp.keys())}
282 isolated_star_cat_dict = {tract: isolated_star_cat_dict_temp[tract] for
283 tract in sorted(isolated_star_cat_dict_temp.keys())}
284 isolated_star_source_dict = {tract: isolated_star_source_dict_temp[tract] for
285 tract in sorted(isolated_star_source_dict_temp.keys())}
286
287 struct = self.run(visit,
288 band,
289 isolated_star_cat_dict,
290 isolated_star_source_dict,
291 src_dict,
292 calexp_dict)
293
294 butlerQC.put(struct.psf_ap_corr_cat,
295 outputRefs.finalized_psf_ap_corr_cat)
296 butlerQC.put(pd.DataFrame(struct.output_table),
297 outputRefs.finalized_src_table)
298
299 def run(self, visit, band, isolated_star_cat_dict, isolated_star_source_dict, src_dict, calexp_dict):
300 """
301 Run the FinalizeCharacterizationTask.
302
303 Parameters
304 ----------
305 visit : `int`
306 Visit number. Used in the output catalogs.
307 band : `str`
308 Band name. Used to select reserved stars.
309 isolated_star_cat_dict : `dict`
310 Per-tract dict of isolated star catalog handles.
311 isolated_star_source_dict : `dict`
312 Per-tract dict of isolated star source catalog handles.
313 src_dict : `dict`
314 Per-detector dict of src catalog handles.
315 calexp_dict : `dict`
316 Per-detector dict of calibrated exposure handles.
317
318 Returns
319 -------
320 struct : `lsst.pipe.base.struct`
321 Struct with outputs for persistence.
322
323 Raises
324 ------
325 NoWorkFound
326 Raised if the selector returns no good sources.
327 """
328 # We do not need the isolated star table in this task.
329 # However, it is used in tests to confirm consistency of indexes.
330 _, isolated_source_table = self.concat_isolated_star_cats(
331 band,
332 isolated_star_cat_dict,
333 isolated_star_source_dict
334 )
335
336 exposure_cat_schema = afwTable.ExposureTable.makeMinimalSchema()
337 exposure_cat_schema.addField('visit', type='L', doc='Visit number')
338
339 metadata = dafBase.PropertyList()
340 metadata.add("COMMENT", "Catalog id is detector id, sorted.")
341 metadata.add("COMMENT", "Only detectors with data have entries.")
342
343 psf_ap_corr_cat = afwTable.ExposureCatalog(exposure_cat_schema)
344 psf_ap_corr_cat.setMetadata(metadata)
345
346 measured_src_tables = []
347 measured_src_table = None
348
349 for detector in src_dict:
350 src = src_dict[detector].get()
351 exposure = calexp_dict[detector].get()
352
353 psf, ap_corr_map, measured_src = self.compute_psf_and_ap_corr_map(
354 visit,
355 detector,
356 exposure,
357 src,
358 isolated_source_table
359 )
360
361 # And now we package it together...
362 if measured_src is not None:
363 record = psf_ap_corr_cat.addNew()
364 record['id'] = int(detector)
365 record['visit'] = visit
366 if psf is not None:
367 record.setPsf(psf)
368 if ap_corr_map is not None:
369 record.setApCorrMap(ap_corr_map)
370
371 measured_src['visit'][:] = visit
372 measured_src['detector'][:] = detector
373
374 measured_src_tables.append(measured_src.asAstropy().as_array())
375
376 if len(measured_src_tables) > 0:
377 measured_src_table = np.concatenate(measured_src_tables)
378
379 if measured_src_table is None:
380 raise pipeBase.NoWorkFound(f'No good sources found for any detectors in visit {visit}')
381
382 return pipeBase.Struct(psf_ap_corr_cat=psf_ap_corr_cat,
383 output_table=measured_src_table)
384
385 def _make_output_schema_mapper(self, input_schema):
386 """Make the schema mapper from the input schema to the output schema.
387
388 Parameters
389 ----------
390 input_schema : `lsst.afw.table.Schema`
391 Input schema.
392
393 Returns
394 -------
395 mapper : `lsst.afw.table.SchemaMapper`
396 Schema mapper
397 output_schema : `lsst.afw.table.Schema`
398 Output schema (with alias map)
399 """
400 mapper = afwTable.SchemaMapper(input_schema)
401 mapper.addMinimalSchema(afwTable.SourceTable.makeMinimalSchema())
402 mapper.addMapping(input_schema['slot_Centroid_x'].asKey())
403 mapper.addMapping(input_schema['slot_Centroid_y'].asKey())
404
405 # The aperture fields may be used by the psf determiner.
406 aper_fields = input_schema.extract('base_CircularApertureFlux_*')
407 for field, item in aper_fields.items():
408 mapper.addMapping(item.key)
409
410 # The following two may be redundant, but then the mapping is a no-op.
411 # Note that the slot_CalibFlux mapping will copy over any
412 # normalized compensated fluxes that are used for calibration.
413 apflux_fields = input_schema.extract('slot_ApFlux_*')
414 for field, item in apflux_fields.items():
415 mapper.addMapping(item.key)
416
417 calibflux_fields = input_schema.extract('slot_CalibFlux_*')
418 for field, item in calibflux_fields.items():
419 mapper.addMapping(item.key)
420
421 mapper.addMapping(
422 input_schema[self.config.source_selector.active.signalToNoise.fluxField].asKey(),
423 'calib_psf_selection_flux')
424 mapper.addMapping(
425 input_schema[self.config.source_selector.active.signalToNoise.errField].asKey(),
426 'calib_psf_selection_flux_err')
427
428 output_schema = mapper.getOutputSchema()
429
430 output_schema.addField(
431 'calib_psf_candidate',
432 type='Flag',
433 doc=('set if the source was a candidate for PSF determination, '
434 'as determined from FinalizeCharacterizationTask.'),
435 )
436 output_schema.addField(
437 'calib_psf_reserved',
438 type='Flag',
439 doc=('set if source was reserved from PSF determination by '
440 'FinalizeCharacterizationTask.'),
441 )
442 output_schema.addField(
443 'calib_psf_used',
444 type='Flag',
445 doc=('set if source was used in the PSF determination by '
446 'FinalizeCharacterizationTask.'),
447 )
448 output_schema.addField(
449 'visit',
450 type=np.int64,
451 doc='Visit number for the sources.',
452 )
453 output_schema.addField(
454 'detector',
455 type=np.int32,
456 doc='Detector number for the sources.',
457 )
458
459 alias_map = input_schema.getAliasMap()
460 alias_map_output = afwTable.AliasMap()
461 alias_map_output.set('slot_Centroid', alias_map.get('slot_Centroid'))
462 alias_map_output.set('slot_ApFlux', alias_map.get('slot_ApFlux'))
463 alias_map_output.set('slot_CalibFlux', alias_map.get('slot_CalibFlux'))
464
465 output_schema.setAliasMap(alias_map_output)
466
467 return mapper, output_schema
468
469 def _make_selection_schema_mapper(self, input_schema):
470 """Make the schema mapper from the input schema to the selection schema.
471
472 Parameters
473 ----------
474 input_schema : `lsst.afw.table.Schema`
475 Input schema.
476
477 Returns
478 -------
479 mapper : `lsst.afw.table.SchemaMapper`
480 Schema mapper
481 selection_schema : `lsst.afw.table.Schema`
482 Selection schema (with alias map)
483 """
484 mapper = afwTable.SchemaMapper(input_schema)
485 mapper.addMinimalSchema(input_schema)
486
487 selection_schema = mapper.getOutputSchema()
488
489 selection_schema.setAliasMap(input_schema.getAliasMap())
490
491 return mapper, selection_schema
492
493 def concat_isolated_star_cats(self, band, isolated_star_cat_dict, isolated_star_source_dict):
494 """
495 Concatenate isolated star catalogs and make reserve selection.
496
497 Parameters
498 ----------
499 band : `str`
500 Band name. Used to select reserved stars.
501 isolated_star_cat_dict : `dict`
502 Per-tract dict of isolated star catalog handles.
503 isolated_star_source_dict : `dict`
504 Per-tract dict of isolated star source catalog handles.
505
506 Returns
507 -------
508 isolated_table : `np.ndarray` (N,)
509 Table of isolated stars, with indexes to isolated sources.
510 isolated_source_table : `np.ndarray` (M,)
511 Table of isolated sources, with indexes to isolated stars.
512 """
513 isolated_tables = []
514 isolated_sources = []
515 merge_cat_counter = 0
516 merge_source_counter = 0
517
518 for tract in isolated_star_cat_dict:
519 df_cat = isolated_star_cat_dict[tract].get()
520 table_cat = df_cat.to_records()
521
522 df_source = isolated_star_source_dict[tract].get(
523 parameters={'columns': [self.config.id_column,
524 'obj_index']}
525 )
526 table_source = df_source.to_records()
527
528 # Cut isolated star table to those observed in this band, and adjust indexes
529 (use_band,) = (table_cat[f'nsource_{band}'] > 0).nonzero()
530
531 if len(use_band) == 0:
532 # There are no sources in this band in this tract.
533 self.log.info("No sources found in %s band in tract %d.", band, tract)
534 continue
535
536 # With the following matching:
537 # table_source[b] <-> table_cat[use_band[a]]
538 obj_index = table_source['obj_index'][:]
539 a, b = esutil.numpy_util.match(use_band, obj_index)
540
541 # Update indexes and cut to band-selected stars/sources
542 table_source['obj_index'][b] = a
543 _, index_new = np.unique(a, return_index=True)
544 table_cat[f'source_cat_index_{band}'][use_band] = index_new
545
546 # After the following cuts, the catalogs have the following properties:
547 # - table_cat only contains isolated stars that have at least one source
548 # in ``band``.
549 # - table_source only contains ``band`` sources.
550 # - The slice table_cat["source_cat_index_{band}"]: table_cat["source_cat_index_{band}"]
551 # + table_cat["nsource_{band}]
552 # applied to table_source will give all the sources associated with the star.
553 # - For each source, table_source["obj_index"] points to the index of the associated
554 # isolated star.
555 table_source = table_source[b]
556 table_cat = table_cat[use_band]
557
558 # Add reserved flag column to tables
559 table_cat = np.lib.recfunctions.append_fields(
560 table_cat,
561 'reserved',
562 np.zeros(table_cat.size, dtype=bool),
563 usemask=False
564 )
565 table_source = np.lib.recfunctions.append_fields(
566 table_source,
567 'reserved',
568 np.zeros(table_source.size, dtype=bool),
569 usemask=False
570 )
571
572 # Get reserve star flags
573 table_cat['reserved'][:] = self.reserve_selection.run(
574 len(table_cat),
575 extra=f'{band}_{tract}',
576 )
577 table_source['reserved'][:] = table_cat['reserved'][table_source['obj_index']]
578
579 # Offset indexes to account for tract merging
580 table_cat[f'source_cat_index_{band}'] += merge_source_counter
581 table_source['obj_index'] += merge_cat_counter
582
583 isolated_tables.append(table_cat)
584 isolated_sources.append(table_source)
585
586 merge_cat_counter += len(table_cat)
587 merge_source_counter += len(table_source)
588
589 isolated_table = np.concatenate(isolated_tables)
590 isolated_source_table = np.concatenate(isolated_sources)
591
592 return isolated_table, isolated_source_table
593
594 def compute_psf_and_ap_corr_map(self, visit, detector, exposure, src, isolated_source_table):
595 """Compute psf model and aperture correction map for a single exposure.
596
597 Parameters
598 ----------
599 visit : `int`
600 Visit number (for logging).
601 detector : `int`
602 Detector number (for logging).
603 exposure : `lsst.afw.image.ExposureF`
604 src : `lsst.afw.table.SourceCatalog`
605 isolated_source_table : `np.ndarray`
606
607 Returns
608 -------
609 psf : `lsst.meas.algorithms.ImagePsf`
610 PSF Model
611 ap_corr_map : `lsst.afw.image.ApCorrMap`
612 Aperture correction map.
613 measured_src : `lsst.afw.table.SourceCatalog`
614 Updated source catalog with measurements, flags and aperture corrections.
615 """
616 # Extract footprints from the input src catalog for noise replacement.
617 footprints = SingleFrameMeasurementTask.getFootprintsFromCatalog(src)
618
619 # Apply source selector (s/n, flags, etc.)
620 good_src = self.source_selector.selectSources(src)
621 if sum(good_src.selected) == 0:
622 self.log.warning('No good sources remain after cuts for visit %d, detector %d',
623 visit, detector)
624 return None, None, None
625
626 # Cut down input src to the selected sources
627 # We use a separate schema/mapper here than for the output/measurement catalog because of
628 # clashes between fields that were previously run and those that need to be rerun with
629 # the new psf model. This may be slightly inefficient but keeps input
630 # and output values cleanly separated.
631 selection_mapper, selection_schema = self._make_selection_schema_mapper(src.schema)
632
633 selected_src = afwTable.SourceCatalog(selection_schema)
634 selected_src.reserve(good_src.selected.sum())
635 selected_src.extend(src[good_src.selected], mapper=selection_mapper)
636
637 # The calib flags have been copied from the input table,
638 # and we reset them here just to ensure they aren't propagated.
639 selected_src['calib_psf_candidate'] = np.zeros(len(selected_src), dtype=bool)
640 selected_src['calib_psf_used'] = np.zeros(len(selected_src), dtype=bool)
641 selected_src['calib_psf_reserved'] = np.zeros(len(selected_src), dtype=bool)
642
643 # Find the isolated sources and set flags
644 matched_src, matched_iso = esutil.numpy_util.match(
645 selected_src['id'],
646 isolated_source_table[self.config.id_column]
647 )
648
649 matched_arr = np.zeros(len(selected_src), dtype=bool)
650 matched_arr[matched_src] = True
651 selected_src['calib_psf_candidate'] = matched_arr
652
653 reserved_arr = np.zeros(len(selected_src), dtype=bool)
654 reserved_arr[matched_src] = isolated_source_table['reserved'][matched_iso]
655 selected_src['calib_psf_reserved'] = reserved_arr
656
657 selected_src = selected_src[selected_src['calib_psf_candidate']].copy(deep=True)
658
659 # Make the measured source catalog as well, based on the selected catalog.
660 measured_src = afwTable.SourceCatalog(self.schema)
661 measured_src.reserve(len(selected_src))
662 measured_src.extend(selected_src, mapper=self.schema_mapper)
663
664 # We need to copy over the calib_psf flags because they were not in the mapper
665 measured_src['calib_psf_candidate'] = selected_src['calib_psf_candidate']
666 measured_src['calib_psf_reserved'] = selected_src['calib_psf_reserved']
667
668 # Select the psf candidates from the selection catalog
669 try:
670 psf_selection_result = self.make_psf_candidates.run(selected_src, exposure=exposure)
671 except Exception as e:
672 self.log.warning('Failed to make psf candidates for visit %d, detector %d: %s',
673 visit, detector, e)
674 return None, None, measured_src
675
676 psf_cand_cat = psf_selection_result.goodStarCat
677
678 # Make list of psf candidates to send to the determiner
679 # (omitting those marked as reserved)
680 psf_determiner_list = [cand for cand, use
681 in zip(psf_selection_result.psfCandidates,
682 ~psf_cand_cat['calib_psf_reserved']) if use]
683 flag_key = psf_cand_cat.schema['calib_psf_used'].asKey()
684 try:
685 psf, cell_set = self.psf_determiner.determinePsf(exposure,
686 psf_determiner_list,
688 flagKey=flag_key)
689 except Exception as e:
690 self.log.warning('Failed to determine psf for visit %d, detector %d: %s',
691 visit, detector, e)
692 return None, None, measured_src
693
694 # Set the psf in the exposure for measurement/aperture corrections.
695 exposure.setPsf(psf)
696
697 # At this point, we need to transfer the psf used flag from the selection
698 # catalog to the measurement catalog.
699 matched_selected, matched_measured = esutil.numpy_util.match(
700 selected_src['id'],
701 measured_src['id']
702 )
703 measured_used = np.zeros(len(measured_src), dtype=bool)
704 measured_used[matched_measured] = selected_src['calib_psf_used'][matched_selected]
705 measured_src['calib_psf_used'] = measured_used
706
707 # Next, we do the measurement on all the psf candidate, used, and reserved stars.
708 # We use the full footprint list from the input src catalog for noise replacement.
709 try:
710 self.measurement.run(measCat=measured_src, exposure=exposure, footprints=footprints)
711 except Exception as e:
712 self.log.warning('Failed to make measurements for visit %d, detector %d: %s',
713 visit, detector, e)
714 return psf, None, measured_src
715
716 # And finally the ap corr map.
717 try:
718 ap_corr_map = self.measure_ap_corr.run(exposure=exposure,
719 catalog=measured_src).apCorrMap
720 except Exception as e:
721 self.log.warning('Failed to compute aperture corrections for visit %d, detector %d: %s',
722 visit, detector, e)
723 return psf, None, measured_src
724
725 self.apply_ap_corr.run(catalog=measured_src, apCorrMap=ap_corr_map)
726
727 return psf, ap_corr_map, measured_src
Mapping class that holds aliases for a Schema.
Definition AliasMap.h:36
Custom catalog class for ExposureRecord/Table.
Definition fwd.h:67
A mapping between the keys of two Schemas, used to copy data between them.
Class for storing ordered metadata with comments.
run(self, visit, band, isolated_star_cat_dict, isolated_star_source_dict, src_dict, calexp_dict)
compute_psf_and_ap_corr_map(self, visit, detector, exposure, src, isolated_source_table)
concat_isolated_star_cats(self, band, isolated_star_cat_dict, isolated_star_source_dict)