LSST Applications g180d380827+0f66a164bb,g2079a07aa2+86d27d4dc4,g2305ad1205+7d304bc7a0,g29320951ab+500695df56,g2bbee38e9b+0e5473021a,g337abbeb29+0e5473021a,g33d1c0ed96+0e5473021a,g3a166c0a6a+0e5473021a,g3ddfee87b4+e42ea45bea,g48712c4677+36a86eeaa5,g487adcacf7+2dd8f347ac,g50ff169b8f+96c6868917,g52b1c1532d+585e252eca,g591dd9f2cf+c70619cc9d,g5a732f18d5+53520f316c,g5ea96fc03c+341ea1ce94,g64a986408d+f7cd9c7162,g858d7b2824+f7cd9c7162,g8a8a8dda67+585e252eca,g99cad8db69+469ab8c039,g9ddcbc5298+9a081db1e4,ga1e77700b3+15fc3df1f7,gb0e22166c9+60f28cb32d,gba4ed39666+c2a2e4ac27,gbb8dafda3b+c92fc63c7e,gbd866b1f37+f7cd9c7162,gc120e1dc64+02c66aa596,gc28159a63d+0e5473021a,gc3e9b769f7+b0068a2d9f,gcf0d15dbbd+e42ea45bea,gdaeeff99f8+f9a426f77a,ge6526c86ff+84383d05b3,ge79ae78c31+0e5473021a,gee10cc3b42+585e252eca,gff1a9f87cc+f7cd9c7162,w.2024.17
LSST Data Management Base Package
Loading...
Searching...
No Matches
finalizeCharacterization.py
Go to the documentation of this file.
1# This file is part of pipe_tasks.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
21
22"""Task to run a finalized image characterization, using additional data.
23"""
24
25__all__ = ['FinalizeCharacterizationConnections',
26 'FinalizeCharacterizationConfig',
27 'FinalizeCharacterizationTask']
28
29import numpy as np
30import esutil
31import pandas as pd
32
33import lsst.pex.config as pexConfig
34import lsst.pipe.base as pipeBase
35import lsst.daf.base as dafBase
36import lsst.afw.table as afwTable
37import lsst.meas.algorithms as measAlg
39from lsst.meas.algorithms import MeasureApCorrTask
40from lsst.meas.base import SingleFrameMeasurementTask, ApplyApCorrTask
41from lsst.meas.algorithms.sourceSelector import sourceSelectorRegistry
42
43from .reserveIsolatedStars import ReserveIsolatedStarsTask
44
45
46class FinalizeCharacterizationConnections(pipeBase.PipelineTaskConnections,
47 dimensions=('instrument', 'visit',),
48 defaultTemplates={}):
49 src_schema = pipeBase.connectionTypes.InitInput(
50 doc='Input schema used for src catalogs.',
51 name='src_schema',
52 storageClass='SourceCatalog',
53 )
54 srcs = pipeBase.connectionTypes.Input(
55 doc='Source catalogs for the visit',
56 name='src',
57 storageClass='SourceCatalog',
58 dimensions=('instrument', 'visit', 'detector'),
59 deferLoad=True,
60 multiple=True,
61 )
62 calexps = pipeBase.connectionTypes.Input(
63 doc='Calexps for the visit',
64 name='calexp',
65 storageClass='ExposureF',
66 dimensions=('instrument', 'visit', 'detector'),
67 deferLoad=True,
68 multiple=True,
69 )
70 isolated_star_cats = pipeBase.connectionTypes.Input(
71 doc=('Catalog of isolated stars with average positions, number of associated '
72 'sources, and indexes to the isolated_star_sources catalogs.'),
73 name='isolated_star_cat',
74 storageClass='DataFrame',
75 dimensions=('instrument', 'tract', 'skymap'),
76 deferLoad=True,
77 multiple=True,
78 )
79 isolated_star_sources = pipeBase.connectionTypes.Input(
80 doc=('Catalog of isolated star sources with sourceIds, and indexes to the '
81 'isolated_star_cats catalogs.'),
82 name='isolated_star_sources',
83 storageClass='DataFrame',
84 dimensions=('instrument', 'tract', 'skymap'),
85 deferLoad=True,
86 multiple=True,
87 )
88 finalized_psf_ap_corr_cat = pipeBase.connectionTypes.Output(
89 doc=('Per-visit finalized psf models and aperture corrections. This '
90 'catalog uses detector id for the id and are sorted for fast '
91 'lookups of a detector.'),
92 name='finalized_psf_ap_corr_catalog',
93 storageClass='ExposureCatalog',
94 dimensions=('instrument', 'visit'),
95 )
96 finalized_src_table = pipeBase.connectionTypes.Output(
97 doc=('Per-visit catalog of measurements for psf/flag/etc.'),
98 name='finalized_src_table',
99 storageClass='DataFrame',
100 dimensions=('instrument', 'visit'),
101 )
102
103
104class FinalizeCharacterizationConfig(pipeBase.PipelineTaskConfig,
105 pipelineConnections=FinalizeCharacterizationConnections):
106 """Configuration for FinalizeCharacterizationTask."""
107 source_selector = sourceSelectorRegistry.makeField(
108 doc="How to select sources",
109 default="science"
110 )
111 id_column = pexConfig.Field(
112 doc='Name of column in isolated_star_sources with source id.',
113 dtype=str,
114 default='sourceId',
115 )
116 reserve_selection = pexConfig.ConfigurableField(
117 target=ReserveIsolatedStarsTask,
118 doc='Task to select reserved stars',
119 )
120 make_psf_candidates = pexConfig.ConfigurableField(
121 target=measAlg.MakePsfCandidatesTask,
122 doc='Task to make psf candidates from selected stars.',
123 )
124 psf_determiner = measAlg.psfDeterminerRegistry.makeField(
125 'PSF Determination algorithm',
126 default='piff'
127 )
128 measurement = pexConfig.ConfigurableField(
129 target=SingleFrameMeasurementTask,
130 doc='Measure sources for aperture corrections'
131 )
132 measure_ap_corr = pexConfig.ConfigurableField(
133 target=MeasureApCorrTask,
134 doc="Subtask to measure aperture corrections"
135 )
136 apply_ap_corr = pexConfig.ConfigurableField(
137 target=ApplyApCorrTask,
138 doc="Subtask to apply aperture corrections"
139 )
140
141 def setDefaults(self):
142 super().setDefaults()
143
144 source_selector = self.source_selector['science']
145 source_selector.setDefaults()
146
147 # We use the source selector only to select out flagged objects
148 # and signal-to-noise. Isolated, unresolved sources are handled
149 # by the isolated star catalog.
150
151 source_selector.doFlags = True
152 source_selector.doSignalToNoise = True
153 source_selector.doFluxLimit = False
154 source_selector.doUnresolved = False
155 source_selector.doIsolated = False
156
157 source_selector.signalToNoise.minimum = 20.0
158 source_selector.signalToNoise.maximum = 1000.0
159
160 source_selector.signalToNoise.fluxField = 'base_GaussianFlux_instFlux'
161 source_selector.signalToNoise.errField = 'base_GaussianFlux_instFluxErr'
162
163 source_selector.flags.bad = ['base_PixelFlags_flag_edge',
164 'base_PixelFlags_flag_interpolatedCenter',
165 'base_PixelFlags_flag_saturatedCenter',
166 'base_PixelFlags_flag_crCenter',
167 'base_PixelFlags_flag_bad',
168 'base_PixelFlags_flag_interpolated',
169 'base_PixelFlags_flag_saturated',
170 'slot_Centroid_flag',
171 'base_GaussianFlux_flag']
172
173 # Configure aperture correction to select only high s/n sources (that
174 # were used in the psf modeling) to avoid background problems when
175 # computing the aperture correction map.
176 self.measure_ap_corr.sourceSelector = 'science'
177
178 ap_selector = self.measure_ap_corr.sourceSelector['science']
179 # We do not need to filter flags or unresolved because we have used
180 # the filtered isolated stars as an input
181 ap_selector.doFlags = False
182 ap_selector.doUnresolved = False
183
184 import lsst.meas.modelfit # noqa: F401
185 import lsst.meas.extensions.photometryKron # noqa: F401
186 import lsst.meas.extensions.convolved # noqa: F401
187 import lsst.meas.extensions.gaap # noqa: F401
188 import lsst.meas.extensions.shapeHSM # noqa: F401
189
190 # Set up measurement defaults
191 self.measurement.plugins.names = [
192 'base_FPPosition',
193 'base_PsfFlux',
194 'base_GaussianFlux',
195 'modelfit_DoubleShapeletPsfApprox',
196 'modelfit_CModel',
197 'ext_photometryKron_KronFlux',
198 'ext_convolved_ConvolvedFlux',
199 'ext_gaap_GaapFlux',
200 'ext_shapeHSM_HsmShapeRegauss',
201 'ext_shapeHSM_HsmSourceMoments',
202 'ext_shapeHSM_HsmPsfMoments',
203 'ext_shapeHSM_HsmSourceMomentsRound',
204 'ext_shapeHSM_HigherOrderMomentsSource',
205 'ext_shapeHSM_HigherOrderMomentsPSF',
206 ]
207 self.measurement.slots.modelFlux = 'modelfit_CModel'
208 self.measurement.plugins['ext_convolved_ConvolvedFlux'].seeing.append(8.0)
209 self.measurement.plugins['ext_gaap_GaapFlux'].sigmas = [
210 0.5,
211 0.7,
212 1.0,
213 1.5,
214 2.5,
215 3.0
216 ]
217 self.measurement.plugins['ext_gaap_GaapFlux'].doPsfPhotometry = True
218 self.measurement.slots.shape = 'ext_shapeHSM_HsmSourceMoments'
219 self.measurement.slots.psfShape = 'ext_shapeHSM_HsmPsfMoments'
220 self.measurement.plugins['ext_shapeHSM_HsmShapeRegauss'].deblendNChild = ""
221
222 # Keep track of which footprints contain streaks
223 self.measurement.plugins['base_PixelFlags'].masksFpAnywhere = ['STREAK']
224 self.measurement.plugins['base_PixelFlags'].masksFpCenter = ['STREAK']
225
226 # Turn off slot setting for measurement for centroid and shape
227 # (for which we use the input src catalog measurements)
228 self.measurement.slots.centroid = None
229 self.measurement.slots.apFlux = None
230 self.measurement.slots.calibFlux = None
231
232 names = self.measurement.plugins['ext_convolved_ConvolvedFlux'].getAllResultNames()
233 self.measure_ap_corr.allowFailure += names
234 names = self.measurement.plugins["ext_gaap_GaapFlux"].getAllGaapResultNames()
235 self.measure_ap_corr.allowFailure += names
236
237
238class FinalizeCharacterizationTask(pipeBase.PipelineTask):
239 """Run final characterization on exposures."""
240 ConfigClass = FinalizeCharacterizationConfig
241 _DefaultName = 'finalize_characterization'
242
243 def __init__(self, initInputs=None, **kwargs):
244 super().__init__(initInputs=initInputs, **kwargs)
245
247 initInputs['src_schema'].schema
248 )
249
250 self.makeSubtask('reserve_selection')
251 self.makeSubtask('source_selector')
252 self.makeSubtask('make_psf_candidates')
253 self.makeSubtask('psf_determiner')
254 self.makeSubtask('measurement', schema=self.schema)
255 self.makeSubtask('measure_ap_corr', schema=self.schema)
256 self.makeSubtask('apply_ap_corr', schema=self.schema)
257
258 # Only log warning and fatal errors from the source_selector
259 self.source_selector.log.setLevel(self.source_selector.log.WARN)
260
261 def runQuantum(self, butlerQC, inputRefs, outputRefs):
262 input_handle_dict = butlerQC.get(inputRefs)
263
264 band = butlerQC.quantum.dataId['band']
265 visit = butlerQC.quantum.dataId['visit']
266
267 src_dict_temp = {handle.dataId['detector']: handle
268 for handle in input_handle_dict['srcs']}
269 calexp_dict_temp = {handle.dataId['detector']: handle
270 for handle in input_handle_dict['calexps']}
271 isolated_star_cat_dict_temp = {handle.dataId['tract']: handle
272 for handle in input_handle_dict['isolated_star_cats']}
273 isolated_star_source_dict_temp = {handle.dataId['tract']: handle
274 for handle in input_handle_dict['isolated_star_sources']}
275 # TODO: Sort until DM-31701 is done and we have deterministic
276 # dataset ordering.
277 src_dict = {detector: src_dict_temp[detector] for
278 detector in sorted(src_dict_temp.keys())}
279 calexp_dict = {detector: calexp_dict_temp[detector] for
280 detector in sorted(calexp_dict_temp.keys())}
281 isolated_star_cat_dict = {tract: isolated_star_cat_dict_temp[tract] for
282 tract in sorted(isolated_star_cat_dict_temp.keys())}
283 isolated_star_source_dict = {tract: isolated_star_source_dict_temp[tract] for
284 tract in sorted(isolated_star_source_dict_temp.keys())}
285
286 struct = self.run(visit,
287 band,
288 isolated_star_cat_dict,
289 isolated_star_source_dict,
290 src_dict,
291 calexp_dict)
292
293 butlerQC.put(struct.psf_ap_corr_cat,
294 outputRefs.finalized_psf_ap_corr_cat)
295 butlerQC.put(pd.DataFrame(struct.output_table),
296 outputRefs.finalized_src_table)
297
298 def run(self, visit, band, isolated_star_cat_dict, isolated_star_source_dict, src_dict, calexp_dict):
299 """
300 Run the FinalizeCharacterizationTask.
301
302 Parameters
303 ----------
304 visit : `int`
305 Visit number. Used in the output catalogs.
306 band : `str`
307 Band name. Used to select reserved stars.
308 isolated_star_cat_dict : `dict`
309 Per-tract dict of isolated star catalog handles.
310 isolated_star_source_dict : `dict`
311 Per-tract dict of isolated star source catalog handles.
312 src_dict : `dict`
313 Per-detector dict of src catalog handles.
314 calexp_dict : `dict`
315 Per-detector dict of calibrated exposure handles.
316
317 Returns
318 -------
319 struct : `lsst.pipe.base.struct`
320 Struct with outputs for persistence.
321
322 Raises
323 ------
324 NoWorkFound
325 Raised if the selector returns no good sources.
326 """
327 # We do not need the isolated star table in this task.
328 # However, it is used in tests to confirm consistency of indexes.
329 _, isolated_source_table = self.concat_isolated_star_cats(
330 band,
331 isolated_star_cat_dict,
332 isolated_star_source_dict
333 )
334
335 exposure_cat_schema = afwTable.ExposureTable.makeMinimalSchema()
336 exposure_cat_schema.addField('visit', type='L', doc='Visit number')
337
338 metadata = dafBase.PropertyList()
339 metadata.add("COMMENT", "Catalog id is detector id, sorted.")
340 metadata.add("COMMENT", "Only detectors with data have entries.")
341
342 psf_ap_corr_cat = afwTable.ExposureCatalog(exposure_cat_schema)
343 psf_ap_corr_cat.setMetadata(metadata)
344
345 measured_src_tables = []
346 measured_src_table = None
347
348 for detector in src_dict:
349 src = src_dict[detector].get()
350 exposure = calexp_dict[detector].get()
351
352 psf, ap_corr_map, measured_src = self.compute_psf_and_ap_corr_map(
353 visit,
354 detector,
355 exposure,
356 src,
357 isolated_source_table
358 )
359
360 # And now we package it together...
361 if measured_src is not None:
362 record = psf_ap_corr_cat.addNew()
363 record['id'] = int(detector)
364 record['visit'] = visit
365 if psf is not None:
366 record.setPsf(psf)
367 if ap_corr_map is not None:
368 record.setApCorrMap(ap_corr_map)
369
370 measured_src['visit'][:] = visit
371 measured_src['detector'][:] = detector
372
373 measured_src_tables.append(measured_src.asAstropy().as_array())
374
375 if len(measured_src_tables) > 0:
376 measured_src_table = np.concatenate(measured_src_tables)
377
378 if measured_src_table is None:
379 raise pipeBase.NoWorkFound(f'No good sources found for any detectors in visit {visit}')
380
381 return pipeBase.Struct(psf_ap_corr_cat=psf_ap_corr_cat,
382 output_table=measured_src_table)
383
384 def _make_output_schema_mapper(self, input_schema):
385 """Make the schema mapper from the input schema to the output schema.
386
387 Parameters
388 ----------
389 input_schema : `lsst.afw.table.Schema`
390 Input schema.
391
392 Returns
393 -------
394 mapper : `lsst.afw.table.SchemaMapper`
395 Schema mapper
396 output_schema : `lsst.afw.table.Schema`
397 Output schema (with alias map)
398 """
399 mapper = afwTable.SchemaMapper(input_schema)
400 mapper.addMinimalSchema(afwTable.SourceTable.makeMinimalSchema())
401 mapper.addMapping(input_schema['slot_Centroid_x'].asKey())
402 mapper.addMapping(input_schema['slot_Centroid_y'].asKey())
403
404 # The aperture fields may be used by the psf determiner.
405 aper_fields = input_schema.extract('base_CircularApertureFlux_*')
406 for field, item in aper_fields.items():
407 mapper.addMapping(item.key)
408
409 # The following two may be redundant, but then the mapping is a no-op.
410 apflux_fields = input_schema.extract('slot_ApFlux_*')
411 for field, item in apflux_fields.items():
412 mapper.addMapping(item.key)
413
414 calibflux_fields = input_schema.extract('slot_CalibFlux_*')
415 for field, item in calibflux_fields.items():
416 mapper.addMapping(item.key)
417
418 mapper.addMapping(
419 input_schema[self.config.source_selector.active.signalToNoise.fluxField].asKey(),
420 'calib_psf_selection_flux')
421 mapper.addMapping(
422 input_schema[self.config.source_selector.active.signalToNoise.errField].asKey(),
423 'calib_psf_selection_flux_err')
424
425 output_schema = mapper.getOutputSchema()
426
427 output_schema.addField(
428 'calib_psf_candidate',
429 type='Flag',
430 doc=('set if the source was a candidate for PSF determination, '
431 'as determined from FinalizeCharacterizationTask.'),
432 )
433 output_schema.addField(
434 'calib_psf_reserved',
435 type='Flag',
436 doc=('set if source was reserved from PSF determination by '
437 'FinalizeCharacterizationTask.'),
438 )
439 output_schema.addField(
440 'calib_psf_used',
441 type='Flag',
442 doc=('set if source was used in the PSF determination by '
443 'FinalizeCharacterizationTask.'),
444 )
445 output_schema.addField(
446 'visit',
447 type=np.int64,
448 doc='Visit number for the sources.',
449 )
450 output_schema.addField(
451 'detector',
452 type=np.int32,
453 doc='Detector number for the sources.',
454 )
455
456 alias_map = input_schema.getAliasMap()
457 alias_map_output = afwTable.AliasMap()
458 alias_map_output.set('slot_Centroid', alias_map.get('slot_Centroid'))
459 alias_map_output.set('slot_ApFlux', alias_map.get('slot_ApFlux'))
460 alias_map_output.set('slot_CalibFlux', alias_map.get('slot_CalibFlux'))
461
462 output_schema.setAliasMap(alias_map_output)
463
464 return mapper, output_schema
465
466 def _make_selection_schema_mapper(self, input_schema):
467 """Make the schema mapper from the input schema to the selection schema.
468
469 Parameters
470 ----------
471 input_schema : `lsst.afw.table.Schema`
472 Input schema.
473
474 Returns
475 -------
476 mapper : `lsst.afw.table.SchemaMapper`
477 Schema mapper
478 selection_schema : `lsst.afw.table.Schema`
479 Selection schema (with alias map)
480 """
481 mapper = afwTable.SchemaMapper(input_schema)
482 mapper.addMinimalSchema(input_schema)
483
484 selection_schema = mapper.getOutputSchema()
485
486 selection_schema.setAliasMap(input_schema.getAliasMap())
487
488 return mapper, selection_schema
489
490 def concat_isolated_star_cats(self, band, isolated_star_cat_dict, isolated_star_source_dict):
491 """
492 Concatenate isolated star catalogs and make reserve selection.
493
494 Parameters
495 ----------
496 band : `str`
497 Band name. Used to select reserved stars.
498 isolated_star_cat_dict : `dict`
499 Per-tract dict of isolated star catalog handles.
500 isolated_star_source_dict : `dict`
501 Per-tract dict of isolated star source catalog handles.
502
503 Returns
504 -------
505 isolated_table : `np.ndarray` (N,)
506 Table of isolated stars, with indexes to isolated sources.
507 isolated_source_table : `np.ndarray` (M,)
508 Table of isolated sources, with indexes to isolated stars.
509 """
510 isolated_tables = []
511 isolated_sources = []
512 merge_cat_counter = 0
513 merge_source_counter = 0
514
515 for tract in isolated_star_cat_dict:
516 df_cat = isolated_star_cat_dict[tract].get()
517 table_cat = df_cat.to_records()
518
519 df_source = isolated_star_source_dict[tract].get(
520 parameters={'columns': [self.config.id_column,
521 'obj_index']}
522 )
523 table_source = df_source.to_records()
524
525 # Cut isolated star table to those observed in this band, and adjust indexes
526 (use_band,) = (table_cat[f'nsource_{band}'] > 0).nonzero()
527
528 if len(use_band) == 0:
529 # There are no sources in this band in this tract.
530 self.log.info("No sources found in %s band in tract %d.", band, tract)
531 continue
532
533 # With the following matching:
534 # table_source[b] <-> table_cat[use_band[a]]
535 obj_index = table_source['obj_index'][:]
536 a, b = esutil.numpy_util.match(use_band, obj_index)
537
538 # Update indexes and cut to band-selected stars/sources
539 table_source['obj_index'][b] = a
540 _, index_new = np.unique(a, return_index=True)
541 table_cat[f'source_cat_index_{band}'][use_band] = index_new
542
543 # After the following cuts, the catalogs have the following properties:
544 # - table_cat only contains isolated stars that have at least one source
545 # in ``band``.
546 # - table_source only contains ``band`` sources.
547 # - The slice table_cat["source_cat_index_{band}"]: table_cat["source_cat_index_{band}"]
548 # + table_cat["nsource_{band}]
549 # applied to table_source will give all the sources associated with the star.
550 # - For each source, table_source["obj_index"] points to the index of the associated
551 # isolated star.
552 table_source = table_source[b]
553 table_cat = table_cat[use_band]
554
555 # Add reserved flag column to tables
556 table_cat = np.lib.recfunctions.append_fields(
557 table_cat,
558 'reserved',
559 np.zeros(table_cat.size, dtype=bool),
560 usemask=False
561 )
562 table_source = np.lib.recfunctions.append_fields(
563 table_source,
564 'reserved',
565 np.zeros(table_source.size, dtype=bool),
566 usemask=False
567 )
568
569 # Get reserve star flags
570 table_cat['reserved'][:] = self.reserve_selection.run(
571 len(table_cat),
572 extra=f'{band}_{tract}',
573 )
574 table_source['reserved'][:] = table_cat['reserved'][table_source['obj_index']]
575
576 # Offset indexes to account for tract merging
577 table_cat[f'source_cat_index_{band}'] += merge_source_counter
578 table_source['obj_index'] += merge_cat_counter
579
580 isolated_tables.append(table_cat)
581 isolated_sources.append(table_source)
582
583 merge_cat_counter += len(table_cat)
584 merge_source_counter += len(table_source)
585
586 isolated_table = np.concatenate(isolated_tables)
587 isolated_source_table = np.concatenate(isolated_sources)
588
589 return isolated_table, isolated_source_table
590
591 def compute_psf_and_ap_corr_map(self, visit, detector, exposure, src, isolated_source_table):
592 """Compute psf model and aperture correction map for a single exposure.
593
594 Parameters
595 ----------
596 visit : `int`
597 Visit number (for logging).
598 detector : `int`
599 Detector number (for logging).
600 exposure : `lsst.afw.image.ExposureF`
601 src : `lsst.afw.table.SourceCatalog`
602 isolated_source_table : `np.ndarray`
603
604 Returns
605 -------
606 psf : `lsst.meas.algorithms.ImagePsf`
607 PSF Model
608 ap_corr_map : `lsst.afw.image.ApCorrMap`
609 Aperture correction map.
610 measured_src : `lsst.afw.table.SourceCatalog`
611 Updated source catalog with measurements, flags and aperture corrections.
612 """
613 # Apply source selector (s/n, flags, etc.)
614 good_src = self.source_selector.selectSources(src)
615 if sum(good_src.selected) == 0:
616 self.log.warning('No good sources remain after cuts for visit %d, detector %d',
617 visit, detector)
618 return None, None, None
619
620 # Cut down input src to the selected sources
621 # We use a separate schema/mapper here than for the output/measurement catalog because of
622 # clashes between fields that were previously run and those that need to be rerun with
623 # the new psf model. This may be slightly inefficient but keeps input
624 # and output values cleanly separated.
625 selection_mapper, selection_schema = self._make_selection_schema_mapper(src.schema)
626
627 selected_src = afwTable.SourceCatalog(selection_schema)
628 selected_src.reserve(good_src.selected.sum())
629 selected_src.extend(src[good_src.selected], mapper=selection_mapper)
630
631 # The calib flags have been copied from the input table,
632 # and we reset them here just to ensure they aren't propagated.
633 selected_src['calib_psf_candidate'] = np.zeros(len(selected_src), dtype=bool)
634 selected_src['calib_psf_used'] = np.zeros(len(selected_src), dtype=bool)
635 selected_src['calib_psf_reserved'] = np.zeros(len(selected_src), dtype=bool)
636
637 # Find the isolated sources and set flags
638 matched_src, matched_iso = esutil.numpy_util.match(
639 selected_src['id'],
640 isolated_source_table[self.config.id_column]
641 )
642
643 matched_arr = np.zeros(len(selected_src), dtype=bool)
644 matched_arr[matched_src] = True
645 selected_src['calib_psf_candidate'] = matched_arr
646
647 reserved_arr = np.zeros(len(selected_src), dtype=bool)
648 reserved_arr[matched_src] = isolated_source_table['reserved'][matched_iso]
649 selected_src['calib_psf_reserved'] = reserved_arr
650
651 selected_src = selected_src[selected_src['calib_psf_candidate']].copy(deep=True)
652
653 # Make the measured source catalog as well, based on the selected catalog.
654 measured_src = afwTable.SourceCatalog(self.schema)
655 measured_src.reserve(len(selected_src))
656 measured_src.extend(selected_src, mapper=self.schema_mapper)
657
658 # We need to copy over the calib_psf flags because they were not in the mapper
659 measured_src['calib_psf_candidate'] = selected_src['calib_psf_candidate']
660 measured_src['calib_psf_reserved'] = selected_src['calib_psf_reserved']
661
662 # Select the psf candidates from the selection catalog
663 try:
664 psf_selection_result = self.make_psf_candidates.run(selected_src, exposure=exposure)
665 except Exception as e:
666 self.log.warning('Failed to make psf candidates for visit %d, detector %d: %s',
667 visit, detector, e)
668 return None, None, measured_src
669
670 psf_cand_cat = psf_selection_result.goodStarCat
671
672 # Make list of psf candidates to send to the determiner
673 # (omitting those marked as reserved)
674 psf_determiner_list = [cand for cand, use
675 in zip(psf_selection_result.psfCandidates,
676 ~psf_cand_cat['calib_psf_reserved']) if use]
677 flag_key = psf_cand_cat.schema['calib_psf_used'].asKey()
678 try:
679 psf, cell_set = self.psf_determiner.determinePsf(exposure,
680 psf_determiner_list,
682 flagKey=flag_key)
683 except Exception as e:
684 self.log.warning('Failed to determine psf for visit %d, detector %d: %s',
685 visit, detector, e)
686 return None, None, measured_src
687
688 # Set the psf in the exposure for measurement/aperture corrections.
689 exposure.setPsf(psf)
690
691 # At this point, we need to transfer the psf used flag from the selection
692 # catalog to the measurement catalog.
693 matched_selected, matched_measured = esutil.numpy_util.match(
694 selected_src['id'],
695 measured_src['id']
696 )
697 measured_used = np.zeros(len(measured_src), dtype=bool)
698 measured_used[matched_measured] = selected_src['calib_psf_used'][matched_selected]
699 measured_src['calib_psf_used'] = measured_used
700
701 # Next, we do the measurement on all the psf candidate, used, and reserved stars.
702 try:
703 self.measurement.run(measCat=measured_src, exposure=exposure)
704 except Exception as e:
705 self.log.warning('Failed to make measurements for visit %d, detector %d: %s',
706 visit, detector, e)
707 return psf, None, measured_src
708
709 # And finally the ap corr map.
710 try:
711 ap_corr_map = self.measure_ap_corr.run(exposure=exposure,
712 catalog=measured_src).apCorrMap
713 except Exception as e:
714 self.log.warning('Failed to compute aperture corrections for visit %d, detector %d: %s',
715 visit, detector, e)
716 return psf, None, measured_src
717
718 self.apply_ap_corr.run(catalog=measured_src, apCorrMap=ap_corr_map)
719
720 return psf, ap_corr_map, measured_src
Mapping class that holds aliases for a Schema.
Definition AliasMap.h:36
Custom catalog class for ExposureRecord/Table.
Definition Exposure.h:311
A mapping between the keys of two Schemas, used to copy data between them.
Class for storing ordered metadata with comments.
run(self, visit, band, isolated_star_cat_dict, isolated_star_source_dict, src_dict, calexp_dict)
compute_psf_and_ap_corr_map(self, visit, detector, exposure, src, isolated_source_table)
concat_isolated_star_cats(self, band, isolated_star_cat_dict, isolated_star_source_dict)