LSST Applications g00d0e8bbd7+edbf708997,g03191d30f7+9ce8016dbd,g1955dfad08+0bd186d245,g199a45376c+5137f08352,g1fd858c14a+a888a50aa2,g262e1987ae+45f9aba685,g29ae962dfc+1c7d47a24f,g2cef7863aa+73c82f25e4,g35bb328faa+edbf708997,g3fd5ace14f+eed17d2c67,g47891489e3+6dc8069a4c,g53246c7159+edbf708997,g64539dfbff+c4107e45b5,g67b6fd64d1+6dc8069a4c,g74acd417e5+f452e9c21a,g786e29fd12+af89c03590,g7ae74a0b1c+a25e60b391,g7aefaa3e3d+2025e9ce17,g7cc15d900a+2d158402f9,g87389fa792+a4172ec7da,g89139ef638+6dc8069a4c,g8d4809ba88+c4107e45b5,g8d7436a09f+e96c132b44,g8ea07a8fe4+db21c37724,g98df359435+aae6d409c1,ga2180abaac+edbf708997,gac66b60396+966efe6077,gb632fb1845+88945a90f8,gbaa8f7a6c5+38b34f4976,gbf99507273+edbf708997,gca7fc764a6+6dc8069a4c,gd7ef33dd92+6dc8069a4c,gda68eeecaf+7d1e613a8d,gdab6d2f7ff+f452e9c21a,gdbb4c4dda9+c4107e45b5,ge410e46f29+6dc8069a4c,ge41e95a9f2+c4107e45b5,geaed405ab2+e194be0d2b,w.2025.47
LSST Data Management Base Package
Loading...
Searching...
No Matches
diaCalculationPlugins.py
Go to the documentation of this file.
1# This file is part of ap_association.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
21
22"""Plugins for use in DiaSource summary statistics.
23
24Output columns must be
25as defined in the schema of the Apdb both in name and units.
26"""
27
28import functools
29import warnings
30
31from astropy.stats import median_absolute_deviation
32import numpy as np
33import pandas as pd
34from scipy.optimize import lsq_linear
35
36import lsst.geom as geom
37import lsst.pex.config as pexConfig
38import lsst.sphgeom as sphgeom
39from astropy.timeseries import LombScargle
40from astropy.timeseries import LombScargleMultiband
41import math
42import statistics
43
44from .diaCalculation import (
45 DiaObjectCalculationPluginConfig,
46 DiaObjectCalculationPlugin)
47from .pluginRegistry import register
48
49
50__all__ = ("MeanDiaPositionConfig", "MeanDiaPosition",
51 "HTMIndexDiaPosition", "HTMIndexDiaPositionConfig",
52 "NumDiaSourcesDiaPlugin", "NumDiaSourcesDiaPluginConfig",
53 "SimpleSourceFlagDiaPlugin", "SimpleSourceFlagDiaPluginConfig",
54 "WeightedMeanDiaPsfFluxConfig", "WeightedMeanDiaPsfFlux",
55 "PercentileDiaPsfFlux", "PercentileDiaPsfFluxConfig",
56 "SigmaDiaPsfFlux", "SigmaDiaPsfFluxConfig",
57 "Chi2DiaPsfFlux", "Chi2DiaPsfFluxConfig",
58 "MadDiaPsfFlux", "MadDiaPsfFluxConfig",
59 "SkewDiaPsfFlux", "SkewDiaPsfFluxConfig",
60 "MinMaxDiaPsfFlux", "MinMaxDiaPsfFluxConfig",
61 "MaxSlopeDiaPsfFlux", "MaxSlopeDiaPsfFluxConfig",
62 "ErrMeanDiaPsfFlux", "ErrMeanDiaPsfFluxConfig",
63 "LinearFitDiaPsfFlux", "LinearFitDiaPsfFluxConfig",
64 "StetsonJDiaPsfFlux", "StetsonJDiaPsfFluxConfig",
65 "WeightedMeanDiaTotFlux", "WeightedMeanDiaTotFluxConfig",
66 "SigmaDiaTotFlux", "SigmaDiaTotFluxConfig",
67 "LombScarglePeriodogram", "LombScarglePeriodogramConfig",
68 "LombScarglePeriodogramMulti", "LombScarglePeriodogramMultiConfig")
69
70
71def catchWarnings(_func=None, *, warns=[]):
72 """Decorator for generically catching numpy warnings.
73 """
74 def decoratorCatchWarnings(func):
75 @functools.wraps(func)
76 def wrapperCatchWarnings(*args, **kwargs):
77 with warnings.catch_warnings():
78 for val in warns:
79 warnings.filterwarnings("ignore", val)
80 return func(*args, **kwargs)
81 return wrapperCatchWarnings
82
83 if _func is None:
84 return decoratorCatchWarnings
85 else:
86 return decoratorCatchWarnings(_func)
87
88
90 target,
91 source,
92 columns,
93 default_dtype=np.float64,
94 int_fill_value=0,
95 # TODO DM-53254: Remove the force_int_to_float hack.
96 force_int_to_float=False,
97):
98 """
99 Assign from a source dataframe to a target dataframe in a type safe way.
100
101 Parameters
102 ----------
103 target : `pd.DataFrame`
104 Target pandas dataframe.
105 source : `pd.DataFrame` or `pd.Series`
106 Grouped source dataframe.
107 columns : `list` [`str`]
108 List of columns to transfer.
109 default_dtype : `np.dtype`, optional
110 Default datatype (if not in target).
111 int_fill_value : `int`, optional
112 Fill value for integer columns to avoid pandas insisting
113 that everything should be float-ified as nans.
114 force_int_to_float : `bool`, optional
115 Force integer columns to float columns? Use this option
116 for backwards compatibility for old pandas misfeatures which
117 are expected by some downstream processes.
118 """
119 is_series = isinstance(source, pd.Series)
120 for col in columns:
121 if is_series:
122 source_col = source
123 else:
124 source_col = source[col]
125
126 matched_length = False
127 if col in target.columns:
128 target_dtype = target[col].dtype
129 matched_length = len(target) == len(source)
130 else:
131 target_dtype = default_dtype
132
133 if (matched_length or pd.api.types.is_float_dtype(target_dtype)) and not force_int_to_float:
134 # If we have a matched length or float, we can do a
135 # straight assignment here.
136 target.loc[:, col] = source_col.astype(target_dtype)
137 else:
138 # With mis-matched integers, we must do this dance to preserve types.
139 # Note that this may lose precision with very large numbers.
140
141 # Convert to float
142 target[col] = target[col].astype(np.float64)
143 # Set the column, casting to float.
144 target.loc[:, col] = source_col.astype(np.float64)
145 if not force_int_to_float:
146 # Convert back to integer
147 target[col] = target[col].fillna(int_fill_value).astype(target_dtype)
148
149
150def compute_optimized_periodogram_grid(x0, oversampling_factor=5, nyquist_factor=100):
151 """
152 Computes an optimized periodogram frequency grid for a given time series.
153
154 Parameters
155 ----------
156 x0 : `array`
157 The input time axis.
158 oversampling_factor : `int`, optional
159 The oversampling factor for frequency grid.
160 nyquist_factor : `int`, optional
161 The Nyquist factor for frequency grid.
162
163 Returns
164 -------
165 frequencies : `array`
166 The computed optimized periodogram frequency grid.
167 """
168
169 num_points = len(x0)
170 baseline = np.max(x0) - np.min(x0)
171
172 # Calculate the frequency resolution based on oversampling factor and baseline
173 frequency_resolution = 1. / baseline / oversampling_factor
174
175 num_frequencies = int(
176 0.5 * oversampling_factor * nyquist_factor * num_points)
177 frequencies = frequency_resolution + \
178 frequency_resolution * np.arange(num_frequencies)
179
180 return frequencies
181
182
184 pass
185
186
187@register("ap_lombScarglePeriodogram")
189 """Compute the single-band period of a DiaObject given a set of DiaSources.
190 """
191 ConfigClass = LombScarglePeriodogramConfig
192
193 plugType = "multi"
194 outputCols = ["period", "power"]
195 needsFilter = True
196
197 @classmethod
200
201 @catchWarnings(warns=["All-NaN slice encountered"])
202 def calculate(self,
203 diaObjects,
204 diaSources,
205 filterDiaSources,
206 band):
207 """Compute the periodogram.
208
209 Parameters
210 ----------
211 diaObjects : `pandas.DataFrame`
212 Summary objects to store values in.
213 diaSources : `pandas.DataFrame` or `pandas.DataFrameGroupBy`
214 Catalog of DiaSources summarized by this DiaObject.
215 """
216
217 # Check and initialize output columns in diaObjects.
218 if (periodCol := f"{band}_period") not in diaObjects.columns:
219 diaObjects[periodCol] = np.nan
220 if (powerCol := f"{band}_power") not in diaObjects.columns:
221 diaObjects[powerCol] = np.nan
222
223 def _calculate_period(df, min_detections=5, nterms=1, oversampling_factor=5, nyquist_factor=100):
224 """Compute the Lomb-Scargle periodogram given a set of DiaSources.
225
226 Parameters
227 ----------
228 df : `pandas.DataFrame`
229 The input DataFrame.
230 min_detections : `int`, optional
231 The minimum number of detections.
232 nterms : `int`, optional
233 The number of terms in the Lomb-Scargle model.
234 oversampling_factor : `int`, optional
235 The oversampling factor for frequency grid.
236 nyquist_factor : `int`, optional
237 The Nyquist factor for frequency grid.
238
239 Returns
240 -------
241 pd_tab : `pandas.Series`
242 The output DataFrame with the Lomb-Scargle parameters.
243 """
244 tmpDf = df[~np.logical_or(np.isnan(df["psfFlux"]),
245 np.isnan(df["midpointMjdTai"]))]
246
247 if len(tmpDf) < min_detections:
248 return pd.Series({periodCol: np.nan, powerCol: np.nan})
249
250 time = tmpDf["midpointMjdTai"].to_numpy()
251 flux = tmpDf["psfFlux"].to_numpy()
252 flux_err = tmpDf["psfFluxErr"].to_numpy()
253
254 lsp = LombScargle(time, flux, dy=flux_err, nterms=nterms)
256 time, oversampling_factor=oversampling_factor, nyquist_factor=nyquist_factor)
257 period = 1/f_grid
258 power = lsp.power(f_grid)
259
260 return pd.Series({periodCol: period[np.argmax(power)],
261 powerCol: np.max(power)})
262
263 diaObjects.loc[:, [periodCol, powerCol]
264 ] = filterDiaSources.apply(_calculate_period)
265
266
268 pass
269
270
271@register("ap_lombScarglePeriodogramMulti")
273 """Compute the multi-band LombScargle periodogram of a DiaObject given a set of DiaSources.
274 """
275 ConfigClass = LombScarglePeriodogramMultiConfig
276
277 plugType = "multi"
278 outputCols = ["multiPeriod", "multiPower",
279 "multiFap", "multiAmp", "multiPhase"]
280 needsFilter = True
281
282 @classmethod
285
286 @staticmethod
287 def calculate_baluev_fap(time, n, maxPeriod, zmax):
288 """Calculate the False-Alarm probability using the Baluev approximation.
289
290 Parameters
291 ----------
292 time : `array`
293 The input time axis.
294 n : `int`
295 The number of detections.
296 maxPeriod : `float`
297 The maximum period in the grid.
298 zmax : `float`
299 The maximum power in the grid.
300
301 Returns
302 -------
303 fap_estimate : `float`
304 The False-Alarm probability Baluev approximation.
305
306 Notes
307 ----------
308 .. [1] Baluev, R. V. 2008, MNRAS, 385, 1279
309 .. [2] Süveges, M., Guy, L.P., Eyer, L., et al. 2015, MNRAS, 450, 2052
310 """
311 if n <= 2:
312 return np.nan
313
314 gam_ratio = math.factorial(int((n - 1)/2)) / math.factorial(int((n - 2)/2))
315 fu = 1/maxPeriod
316 return gam_ratio * np.sqrt(
317 4*np.pi*statistics.variance(time)
318 ) * fu * (1-zmax)**((n-4)/2) * np.sqrt(zmax)
319
320 @staticmethod
321 def generate_lsp_params(lsp_model, fbest, bands):
322 """Generate the Lomb-Scargle parameters.
323 Parameters
324 ----------
325 lsp_model : `astropy.timeseries.LombScargleMultiband`
326 The Lomb-Scargle model.
327 fbest : `float`
328 The best period.
329 bands : `array`
330 The bands of the time series.
331
332 Returns
333 -------
334 Amp : `array`
335 The amplitude of the time series.
336 Ph : `array`
337 The phase of the time series.
338
339 Notes
340 ----------
341 .. [1] VanderPlas, J. T., & Ivezic, Z. 2015, ApJ, 812, 18
342 """
343 best_params = lsp_model.model_parameters(fbest, units=True)
344
345 name_params = [f"theta_base_{i}" for i in range(3)]
346 name_params += [f"theta_band_{band}_{i}" for band in np.unique(bands) for i in range(3)]
347
348 df_params = pd.DataFrame([best_params], columns=name_params)
349
350 unique_bands = np.unique(bands)
351
352 amplitude_band = [np.sqrt(df_params[f"theta_band_{band}_1"]**2
353 + df_params[f"theta_band_{band}_2"]**2)
354 for band in unique_bands]
355 phase_bands = [np.arctan2(df_params[f"theta_band_{band}_2"],
356 df_params[f"theta_band_{band}_1"]) for band in unique_bands]
357
358 amp = [a[0] for a in amplitude_band]
359 ph = [p[0] for p in phase_bands]
360
361 return amp, ph
362
363 @catchWarnings(warns=["All-NaN slice encountered"])
364 def calculate(self,
365 diaObjects,
366 diaSources,
367 **kwargs):
368 """Compute the multi-band LombScargle periodogram of a DiaObject given
369 a set of DiaSources.
370
371 Parameters
372 ----------
373 diaObjects : `pandas.DataFrame`
374 Summary objects to store values in.
375 diaSources : `pandas.DataFrame` or `pandas.DataFrameGroupBy`
376 Catalog of DiaSources summarized by this DiaObject.
377 **kwargs : `dict`
378 Unused kwargs that are always passed to a plugin.
379 """
380
381 bands_arr = diaSources['band'].unique().values
382 unique_bands = np.unique(np.concatenate(bands_arr))
383 # Check and initialize output columns in diaObjects.
384 if (periodCol := "multiPeriod") not in diaObjects.columns:
385 diaObjects[periodCol] = np.nan
386 if (powerCol := "multiPower") not in diaObjects.columns:
387 diaObjects[powerCol] = np.nan
388 if (fapCol := "multiFap") not in diaObjects.columns:
389 diaObjects[fapCol] = np.nan
390 ampCol = "multiAmp"
391 phaseCol = "multiPhase"
392 for i in range(len(unique_bands)):
393 ampCol_band = f"{unique_bands[i]}_{ampCol}"
394 if ampCol_band not in diaObjects.columns:
395 diaObjects[ampCol_band] = np.nan
396 phaseCol_band = f"{unique_bands[i]}_{phaseCol}"
397 if phaseCol_band not in diaObjects.columns:
398 diaObjects[phaseCol_band] = np.nan
399
400 def _calculate_period_multi(df, all_unique_bands,
401 min_detections=9, oversampling_factor=5, nyquist_factor=100):
402 """Calculate the multi-band Lomb-Scargle periodogram.
403
404 Parameters
405 ----------
406 df : `pandas.DataFrame`
407 The input DataFrame.
408 all_unique_bands : `list` of `str`
409 List of all bands present in the diaSource table that is being worked on.
410 min_detections : `int`, optional
411 The minimum number of detections, including all bands.
412 oversampling_factor : `int`, optional
413 The oversampling factor for frequency grid.
414 nyquist_factor : `int`, optional
415 The Nyquist factor for frequency grid.
416
417 Returns
418 -------
419 pd_tab : `pandas.Series`
420 The output DataFrame with the Lomb-Scargle parameters.
421 """
422 tmpDf = df[~np.logical_or(np.isnan(df["psfFlux"]),
423 np.isnan(df["midpointMjdTai"]))]
424
425 if (len(tmpDf)) < min_detections:
426 pd_tab_nodet = pd.Series({periodCol: np.nan,
427 powerCol: np.nan,
428 fapCol: np.nan})
429 for band in all_unique_bands:
430 pd_tab_nodet[f"{band}_{ampCol}"] = np.nan
431 pd_tab_nodet[f"{band}_{phaseCol}"] = np.nan
432
433 return pd_tab_nodet
434
435 time = tmpDf["midpointMjdTai"].to_numpy()
436 flux = tmpDf["psfFlux"].to_numpy()
437 flux_err = tmpDf["psfFluxErr"].to_numpy()
438 bands = tmpDf["band"].to_numpy()
439
440 lsp = LombScargleMultiband(time, flux, bands, dy=flux_err,
441 nterms_base=1, nterms_band=1)
442
444 time, oversampling_factor=oversampling_factor, nyquist_factor=nyquist_factor)
445 period = 1/f_grid
446 power = lsp.power(f_grid)
447
448 fap_estimate = self.calculate_baluev_fap(
449 time, len(time), period[np.argmax(power)], np.max(power))
450
451 params_table_new = self.generate_lsp_params(lsp, f_grid[np.argmax(power)], bands)
452
453 pd_tab = pd.Series({periodCol: period[np.argmax(power)],
454 powerCol: np.max(power),
455 fapCol: fap_estimate
456 })
457
458 # Initialize the per-band amplitude/phase columns as NaNs
459 for band in all_unique_bands:
460 pd_tab[f"{band}_{ampCol}"] = np.nan
461 pd_tab[f"{band}_{phaseCol}"] = np.nan
462
463 # Populate the values of only the bands that have data for this diaSource
464 unique_bands = np.unique(bands)
465 for i in range(len(unique_bands)):
466 pd_tab[f"{unique_bands[i]}_{ampCol}"] = params_table_new[0][i]
467 pd_tab[f"{unique_bands[i]}_{phaseCol}"] = params_table_new[1][i]
468
469 return pd_tab
470
471 columns_list = [periodCol, powerCol, fapCol]
472 for i in range(len(unique_bands)):
473 columns_list.append(f"{unique_bands[i]}_{ampCol}")
474 columns_list.append(f"{unique_bands[i]}_{phaseCol}")
475
476 diaObjects.loc[:, columns_list
477 ] = diaSources.apply(_calculate_period_multi, unique_bands)
478
479
481 pass
482
483
484@register("ap_meanPosition")
486 """Compute the mean position of a DiaObject given a set of DiaSources.
487 """
488
489 ConfigClass = MeanDiaPositionConfig
490
491 plugType = 'multi'
492
493 outputCols = ["ra", "dec"]
494 needsFilter = False
495
496 @classmethod
499
500 def calculate(self, diaObjects, diaSources, **kwargs):
501 """Compute the mean ra/dec position of the diaObject given the
502 diaSource locations.
503
504 Parameters
505 ----------
506 diaObjects : `pandas.DataFrame`
507 Summary objects to store values in.
508 diaSources : `pandas.DataFrame` or `pandas.DataFrameGroupBy`
509 Catalog of DiaSources summarized by this DiaObject.
510 **kwargs
511 Any additional keyword arguments that may be passed to the plugin.
512 """
513 for outCol in self.outputCols:
514 if outCol not in diaObjects.columns:
515 diaObjects[outCol] = np.nan
516
517 def _computeMeanPos(df):
518 aveCoord = geom.averageSpherePoint(
519 list(geom.SpherePoint(src["ra"], src["dec"], geom.degrees)
520 for idx, src in df.iterrows()))
521
522 return pd.Series({"ra": aveCoord.getRa().asDegrees(),
523 "dec": aveCoord.getDec().asDegrees()})
524
525 ans = diaSources.apply(_computeMeanPos)
526 typeSafePandasAssignment(diaObjects, ans, ["ra", "dec"])
527
528
530
531 htmLevel = pexConfig.Field(
532 dtype=int,
533 doc="Level of the HTM pixelization.",
534 default=20,
535 )
536
537
538@register("ap_HTMIndex")
540 """Compute the mean position of a DiaObject given a set of DiaSources.
541
542 Notes
543 -----
544 This plugin was implemented to satisfy requirements of old APDB interface
545 which required ``pixelId`` column in DiaObject with HTM20 index. APDB
546 interface had migrated to not need that information, but we keep this
547 plugin in case it may be useful for something else.
548 """
549 ConfigClass = HTMIndexDiaPositionConfig
550
551 plugType = 'single'
552
553 inputCols = ["ra", "dec"]
554 outputCols = ["pixelId"]
555 needsFilter = False
556
557 def __init__(self, config, name, metadata):
558 DiaObjectCalculationPlugin.__init__(self, config, name, metadata)
560
561 @classmethod
563 return cls.FLUX_MOMENTS_CALCULATED
564
565 def calculate(self, diaObjects, diaObjectId, **kwargs):
566 """Compute the mean position of a DiaObject given a set of DiaSources
567
568 Parameters
569 ----------
570 diaObjects : `pandas.dataFrame`
571 Summary objects to store values in and read ra/dec from.
572 diaObjectId : `int`
573 Id of the diaObject to update.
574 **kwargs
575 Any additional keyword arguments that may be passed to the plugin.
576 """
577 sphPoint = geom.SpherePoint(
578 diaObjects.at[diaObjectId, "ra"] * geom.degrees,
579 diaObjects.at[diaObjectId, "dec"] * geom.degrees)
580 diaObjects.at[diaObjectId, "pixelId"] = self.pixelator.index(
581 sphPoint.getVector())
582
583
585 pass
586
587
588@register("ap_nDiaSources")
590 """Compute the total number of DiaSources associated with this DiaObject.
591 """
592
593 ConfigClass = NumDiaSourcesDiaPluginConfig
594 outputCols = ["nDiaSources"]
595 plugType = "multi"
596 needsFilter = False
597
598 @classmethod
601
602 def calculate(self, diaObjects, diaSources, **kwargs):
603 """Compute the total number of DiaSources associated with this DiaObject.
604
605 Parameters
606 ----------
607 diaObject : `dict`
608 Summary object to store values in and read ra/dec from.
609 **kwargs
610 Any additional keyword arguments that may be passed to the plugin.
611 """
612 typeSafePandasAssignment(diaObjects, diaSources.diaObjectId.count(), ["nDiaSources"])
613
614
616 pass
617
618
619@register("ap_diaObjectFlag")
621 """Find if any DiaSource is flagged.
622
623 Set the DiaObject flag if any DiaSource is flagged.
624 """
625
626 ConfigClass = NumDiaSourcesDiaPluginConfig
627 outputCols = ["flags"]
628 plugType = "multi"
629 needsFilter = False
630
631 @classmethod
634
635 def calculate(self, diaObjects, diaSources, **kwargs):
636 """Find if any DiaSource is flagged.
637
638 Set the DiaObject flag if any DiaSource is flagged.
639
640 Parameters
641 ----------
642 diaObject : `dict`
643 Summary object to store values in and read ra/dec from.
644 **kwargs
645 Any additional keyword arguments that may be passed to the plugin.
646 """
647 typeSafePandasAssignment(diaObjects, diaSources.flags.any(), ["flags"], default_dtype=np.uint64)
648
649
656 """Compute the weighted mean and mean error on the point source fluxes
657 of the DiaSource measured on the difference image.
658
659 Additionally store number of usable data points.
660 """
661
662 ConfigClass = WeightedMeanDiaPsfFluxConfig
663 outputCols = ["psfFluxMean", "psfFluxMeanErr", "psfFluxNdata"]
664 plugType = "multi"
665 needsFilter = True
666
667 @classmethod
670
671 @catchWarnings(warns=["invalid value encountered",
672 "divide by zero"])
673 def calculate(self,
674 diaObjects,
675 diaSources,
676 filterDiaSources,
677 band,
678 **kwargs):
679 """Compute the weighted mean and mean error of the point source flux.
680
681 Parameters
682 ----------
683 diaObject : `dict`
684 Summary object to store values in.
685 diaSources : `pandas.DataFrame`
686 DataFrame representing all diaSources associated with this
687 diaObject.
688 filterDiaSources : `pandas.DataFrame`
689 DataFrame representing diaSources associated with this
690 diaObject that are observed in the band pass ``band``.
691 band : `str`
692 Simple, string name of the filter for the flux being calculated.
693 **kwargs
694 Any additional keyword arguments that may be passed to the plugin.
695 """
696 meanName = "{}_psfFluxMean".format(band)
697 errName = "{}_psfFluxMeanErr".format(band)
698 nDataName = "{}_psfFluxNdata".format(band)
699 if meanName not in diaObjects.columns:
700 diaObjects[meanName] = np.nan
701 if errName not in diaObjects.columns:
702 diaObjects[errName] = np.nan
703 if nDataName not in diaObjects.columns:
704 diaObjects[nDataName] = 0
705
706 def _weightedMean(df):
707 tmpDf = df[~np.logical_or(np.isnan(df["psfFlux"]),
708 np.isnan(df["psfFluxErr"]))]
709 tot_weight = np.nansum(1 / tmpDf["psfFluxErr"] ** 2)
710 fluxMean = np.nansum(tmpDf["psfFlux"]
711 / tmpDf["psfFluxErr"] ** 2)
712 fluxMean /= tot_weight
713 if tot_weight > 0:
714 fluxMeanErr = np.sqrt(1 / tot_weight)
715 else:
716 fluxMeanErr = np.nan
717 nFluxData = len(tmpDf)
718
719 return pd.Series({meanName: fluxMean,
720 errName: fluxMeanErr,
721 nDataName: nFluxData},
722 dtype="object")
723 df = filterDiaSources.apply(_weightedMean).astype(diaObjects.dtypes[[meanName, errName, nDataName]])
724 # TODO DM-53254: Remove the force_int_to_float hack.
725 typeSafePandasAssignment(diaObjects, df, [meanName, errName, nDataName], force_int_to_float=True)
726
727
729 percentiles = pexConfig.ListField(
730 dtype=int,
731 default=[5, 25, 50, 75, 95],
732 doc="Percentiles to calculate to compute values for. Should be "
733 "integer values."
734 )
735
736
737@register("ap_percentileFlux")
739 """Compute percentiles of diaSource fluxes.
740 """
741
742 ConfigClass = PercentileDiaPsfFluxConfig
743 # Output columns are created upon instantiation of the class.
744 outputCols = []
745 plugType = "multi"
746 needsFilter = True
747
748 def __init__(self, config, name, metadata, **kwargs):
749 DiaObjectCalculationPlugin.__init__(self,
750 config,
751 name,
752 metadata,
753 **kwargs)
754 self.outputCols = ["psfFluxPercentile{:02d}".format(percent)
755 for percent in self.config.percentiles]
756
757 @classmethod
760
761 @catchWarnings(warns=["All-NaN slice encountered"])
762 def calculate(self,
763 diaObjects,
764 diaSources,
765 filterDiaSources,
766 band,
767 **kwargs):
768 """Compute the percentile fluxes of the point source flux.
769
770 Parameters
771 ----------
772 diaObject : `dict`
773 Summary object to store values in.
774 diaSources : `pandas.DataFrame`
775 DataFrame representing all diaSources associated with this
776 diaObject.
777 filterDiaSources : `pandas.DataFrame`
778 DataFrame representing diaSources associated with this
779 diaObject that are observed in the band pass ``band``.
780 band : `str`
781 Simple, string name of the filter for the flux being calculated.
782 **kwargs
783 Any additional keyword arguments that may be passed to the plugin.
784 """
785 pTileNames = []
786 for tilePercent in self.config.percentiles:
787 pTileName = "{}_psfFluxPercentile{:02d}".format(band,
788 tilePercent)
789 pTileNames.append(pTileName)
790 if pTileName not in diaObjects.columns:
791 diaObjects[pTileName] = np.nan
792
793 def _fluxPercentiles(df):
794 pTiles = np.nanpercentile(df["psfFlux"], self.config.percentiles)
795 return pd.Series(
796 dict((tileName, pTile)
797 for tileName, pTile in zip(pTileNames, pTiles)))
798
800 diaObjects,
801 filterDiaSources.apply(_fluxPercentiles),
802 pTileNames,
803 )
804
805
807 pass
808
809
810@register("ap_sigmaFlux")
812 """Compute scatter of diaSource fluxes.
813 """
814
815 ConfigClass = SigmaDiaPsfFluxConfig
816 # Output columns are created upon instantiation of the class.
817 outputCols = ["psfFluxSigma"]
818 plugType = "multi"
819 needsFilter = True
820
821 @classmethod
824
825 def calculate(self,
826 diaObjects,
827 diaSources,
828 filterDiaSources,
829 band,
830 **kwargs):
831 """Compute the sigma fluxes of the point source flux.
832
833 Parameters
834 ----------
835 diaObject : `dict`
836 Summary object to store values in.
837 diaSources : `pandas.DataFrame`
838 DataFrame representing all diaSources associated with this
839 diaObject.
840 filterDiaSources : `pandas.DataFrame`
841 DataFrame representing diaSources associated with this
842 diaObject that are observed in the band pass ``band``.
843 band : `str`
844 Simple, string name of the filter for the flux being calculated.
845 **kwargs
846 Any additional keyword arguments that may be passed to the plugin.
847 """
848 # Set "delta degrees of freedom (ddf)" to 1 to calculate the unbiased
849 # estimator of scatter (i.e. 'N - 1' instead of 'N').
850 column = "{}_psfFluxSigma".format(band)
851
853 diaObjects,
854 filterDiaSources.psfFlux.std(),
855 [column],
856 )
857
858
860 pass
861
862
863@register("ap_chi2Flux")
865 """Compute chi2 of diaSource fluxes.
866 """
867
868 ConfigClass = Chi2DiaPsfFluxConfig
869
870 # Required input Cols
871 inputCols = ["psfFluxMean"]
872 # Output columns are created upon instantiation of the class.
873 outputCols = ["psfFluxChi2"]
874 plugType = "multi"
875 needsFilter = True
876
877 @classmethod
879 return cls.FLUX_MOMENTS_CALCULATED
880
881 @catchWarnings(warns=["All-NaN slice encountered"])
882 def calculate(self,
883 diaObjects,
884 diaSources,
885 filterDiaSources,
886 band,
887 **kwargs):
888 """Compute the chi2 of the point source fluxes.
889
890 Parameters
891 ----------
892 diaObject : `dict`
893 Summary object to store values in.
894 diaSources : `pandas.DataFrame`
895 DataFrame representing all diaSources associated with this
896 diaObject.
897 filterDiaSources : `pandas.DataFrame`
898 DataFrame representing diaSources associated with this
899 diaObject that are observed in the band pass ``band``.
900 band : `str`
901 Simple, string name of the filter for the flux being calculated.
902 **kwargs
903 Any additional keyword arguments that may be passed to the plugin.
904 """
905 meanName = "{}_psfFluxMean".format(band)
906 column = "{}_psfFluxChi2".format(band)
907
908 def _chi2(df):
909 delta = (df["psfFlux"]
910 - diaObjects.at[df.diaObjectId.iat[0], meanName])
911 return np.nansum((delta / df["psfFluxErr"]) ** 2)
912
914 diaObjects,
915 filterDiaSources.apply(_chi2),
916 [column],
917 )
918
919
921 pass
922
923
924@register("ap_madFlux")
926 """Compute median absolute deviation of diaSource fluxes.
927 """
928
929 ConfigClass = MadDiaPsfFluxConfig
930
931 # Required input Cols
932 # Output columns are created upon instantiation of the class.
933 outputCols = ["psfFluxMAD"]
934 plugType = "multi"
935 needsFilter = True
936
937 @classmethod
940
941 @catchWarnings(warns=["All-NaN slice encountered"])
942 def calculate(self,
943 diaObjects,
944 diaSources,
945 filterDiaSources,
946 band,
947 **kwargs):
948 """Compute the median absolute deviation of the point source fluxes.
949
950 Parameters
951 ----------
952 diaObject : `dict`
953 Summary object to store values in.
954 diaSources : `pandas.DataFrame`
955 DataFrame representing all diaSources associated with this
956 diaObject.
957 filterDiaSources : `pandas.DataFrame`
958 DataFrame representing diaSources associated with this
959 diaObject that are observed in the band pass ``band``.
960 band : `str`
961 Simple, string name of the filter for the flux being calculated.
962 **kwargs
963 Any additional keyword arguments that may be passed to the plugin.
964 """
965 column = "{}_psfFluxMAD".format(band)
966
968 diaObjects,
969 filterDiaSources.psfFlux.apply(median_absolute_deviation, ignore_nan=True),
970 [column],
971 )
972
973
975 pass
976
977
978@register("ap_skewFlux")
980 """Compute the skew of diaSource fluxes.
981 """
982
983 ConfigClass = SkewDiaPsfFluxConfig
984
985 # Required input Cols
986 # Output columns are created upon instantiation of the class.
987 outputCols = ["psfFluxSkew"]
988 plugType = "multi"
989 needsFilter = True
990
991 @classmethod
994
995 def calculate(self,
996 diaObjects,
997 diaSources,
998 filterDiaSources,
999 band,
1000 **kwargs):
1001 """Compute the skew of the point source fluxes.
1002
1003 Parameters
1004 ----------
1005 diaObject : `dict`
1006 Summary object to store values in.
1007 diaSources : `pandas.DataFrame`
1008 DataFrame representing all diaSources associated with this
1009 diaObject.
1010 filterDiaSources : `pandas.DataFrame`
1011 DataFrame representing diaSources associated with this
1012 diaObject that are observed in the band pass ``band``.
1013 band : `str`
1014 Simple, string name of the filter for the flux being calculated.
1015 **kwargs
1016 Any additional keyword arguments that may be passed to the plugin.
1017 """
1018 column = "{}_psfFluxSkew".format(band)
1019
1021 diaObjects,
1022 filterDiaSources.psfFlux.skew(),
1023 [column],
1024 )
1025
1026
1028 pass
1029
1030
1031@register("ap_minMaxFlux")
1033 """Compute min/max of diaSource fluxes.
1034 """
1035
1036 ConfigClass = MinMaxDiaPsfFluxConfig
1037
1038 # Required input Cols
1039 # Output columns are created upon instantiation of the class.
1040 outputCols = ["psfFluxMin", "psfFluxMax"]
1041 plugType = "multi"
1042 needsFilter = True
1043
1044 @classmethod
1047
1048 def calculate(self,
1049 diaObjects,
1050 diaSources,
1051 filterDiaSources,
1052 band,
1053 **kwargs):
1054 """Compute min/max of the point source fluxes.
1055
1056 Parameters
1057 ----------
1058 diaObject : `dict`
1059 Summary object to store values in.
1060 diaSources : `pandas.DataFrame`
1061 DataFrame representing all diaSources associated with this
1062 diaObject.
1063 filterDiaSources : `pandas.DataFrame`
1064 DataFrame representing diaSources associated with this
1065 diaObject that are observed in the band pass ``band``.
1066 band : `str`
1067 Simple, string name of the filter for the flux being calculated.
1068 **kwargs
1069 Any additional keyword arguments that may be passed to the plugin.
1070 """
1071 minName = "{}_psfFluxMin".format(band)
1072 if minName not in diaObjects.columns:
1073 diaObjects[minName] = np.nan
1074 maxName = "{}_psfFluxMax".format(band)
1075 if maxName not in diaObjects.columns:
1076 diaObjects[maxName] = np.nan
1077
1079 diaObjects,
1080 filterDiaSources.psfFlux.min(),
1081 [minName],
1082 )
1084 diaObjects,
1085 filterDiaSources.psfFlux.max(),
1086 [maxName],
1087 )
1088
1089
1091 pass
1092
1093
1094@register("ap_maxSlopeFlux")
1096 """Compute the maximum ratio time ordered deltaFlux / deltaTime.
1097 """
1098
1099 ConfigClass = MinMaxDiaPsfFluxConfig
1100
1101 # Required input Cols
1102 # Output columns are created upon instantiation of the class.
1103 outputCols = ["psfFluxMaxSlope"]
1104 plugType = "multi"
1105 needsFilter = True
1106
1107 @classmethod
1110
1111 def calculate(self,
1112 diaObjects,
1113 diaSources,
1114 filterDiaSources,
1115 band,
1116 **kwargs):
1117 """Compute the maximum ratio time ordered deltaFlux / deltaTime.
1118
1119 Parameters
1120 ----------
1121 diaObject : `dict`
1122 Summary object to store values in.
1123 diaSources : `pandas.DataFrame`
1124 DataFrame representing all diaSources associated with this
1125 diaObject.
1126 filterDiaSources : `pandas.DataFrame`
1127 DataFrame representing diaSources associated with this
1128 diaObject that are observed in the band pass ``band``.
1129 band : `str`
1130 Simple, string name of the filter for the flux being calculated.
1131 **kwargs
1132 Any additional keyword arguments that may be passed to the plugin.
1133 """
1134
1135 def _maxSlope(df):
1136 tmpDf = df[~np.logical_or(np.isnan(df["psfFlux"]),
1137 np.isnan(df["midpointMjdTai"]))]
1138 if len(tmpDf) < 2:
1139 return np.nan
1140 times = tmpDf["midpointMjdTai"].to_numpy()
1141 timeArgs = times.argsort()
1142 times = times[timeArgs]
1143 fluxes = tmpDf["psfFlux"].to_numpy()[timeArgs]
1144 return (np.diff(fluxes) / np.diff(times)).max()
1145
1146 column = "{}_psfFluxMaxSlope".format(band)
1147
1149 diaObjects,
1150 filterDiaSources.apply(_maxSlope),
1151 [column],
1152 )
1153
1154
1156 pass
1157
1158
1159@register("ap_meanErrFlux")
1161 """Compute the mean of the dia source errors.
1162 """
1163
1164 ConfigClass = ErrMeanDiaPsfFluxConfig
1165
1166 # Required input Cols
1167 # Output columns are created upon instantiation of the class.
1168 outputCols = ["psfFluxErrMean"]
1169 plugType = "multi"
1170 needsFilter = True
1171
1172 @classmethod
1175
1176 def calculate(self,
1177 diaObjects,
1178 diaSources,
1179 filterDiaSources,
1180 band,
1181 **kwargs):
1182 """Compute the mean of the dia source errors.
1183
1184 Parameters
1185 ----------
1186 diaObject : `dict`
1187 Summary object to store values in.
1188 diaSources : `pandas.DataFrame`
1189 DataFrame representing all diaSources associated with this
1190 diaObject.
1191 filterDiaSources : `pandas.DataFrame`
1192 DataFrame representing diaSources associated with this
1193 diaObject that are observed in the band pass ``band``.
1194 band : `str`
1195 Simple, string name of the filter for the flux being calculated.
1196 **kwargs
1197 Any additional keyword arguments that may be passed to the plugin.
1198 """
1199 column = "{}_psfFluxErrMean".format(band)
1200
1202 diaObjects,
1203 filterDiaSources.psfFluxErr.mean(),
1204 [column],
1205 # Note that the schemas expect this to be single-precision.
1206 default_dtype=np.float32,
1207 )
1208
1209
1211 pass
1212
1213
1214@register("ap_linearFit")
1216 """Compute fit a linear model to flux vs time.
1217 """
1218
1219 ConfigClass = LinearFitDiaPsfFluxConfig
1220
1221 # Required input Cols
1222 # Output columns are created upon instantiation of the class.
1223 outputCols = ["psfFluxLinearSlope", "psfFluxLinearIntercept"]
1224 plugType = "multi"
1225 needsFilter = True
1226
1227 @classmethod
1230
1231 def calculate(self,
1232 diaObjects,
1233 diaSources,
1234 filterDiaSources,
1235 band,
1236 **kwargs):
1237 """Compute fit a linear model to flux vs time.
1238
1239 Parameters
1240 ----------
1241 diaObject : `dict`
1242 Summary object to store values in.
1243 diaSources : `pandas.DataFrame`
1244 DataFrame representing all diaSources associated with this
1245 diaObject.
1246 filterDiaSources : `pandas.DataFrame`
1247 DataFrame representing diaSources associated with this
1248 diaObject that are observed in the band pass ``band``.
1249 band : `str`
1250 Simple, string name of the filter for the flux being calculated.
1251 **kwargs
1252 Any additional keyword arguments that may be passed to the plugin.
1253 """
1254
1255 mName = "{}_psfFluxLinearSlope".format(band)
1256 if mName not in diaObjects.columns:
1257 diaObjects[mName] = np.nan
1258 bName = "{}_psfFluxLinearIntercept".format(band)
1259 if bName not in diaObjects.columns:
1260 diaObjects[bName] = np.nan
1261 dtype = diaObjects[mName].dtype
1262
1263 def _linearFit(df):
1264 tmpDf = df[~np.logical_or(
1265 np.isnan(df["psfFlux"]),
1266 np.logical_or(np.isnan(df["psfFluxErr"]),
1267 np.isnan(df["midpointMjdTai"])))]
1268 if len(tmpDf) < 2:
1269 return pd.Series({mName: np.nan, bName: np.nan})
1270 fluxes = tmpDf["psfFlux"].to_numpy()
1271 errors = tmpDf["psfFluxErr"].to_numpy()
1272 times = tmpDf["midpointMjdTai"].to_numpy()
1273 A = np.array([times / errors, 1 / errors]).transpose()
1274 m, b = lsq_linear(A, fluxes / errors).x
1275 return pd.Series({mName: m, bName: b}, dtype=dtype)
1276
1278 diaObjects,
1279 filterDiaSources.apply(_linearFit),
1280 [mName, bName],
1281 )
1282
1283
1285 pass
1286
1287
1288@register("ap_stetsonJ")
1290 """Compute the StetsonJ statistic on the DIA point source fluxes.
1291 """
1292
1293 ConfigClass = LinearFitDiaPsfFluxConfig
1294
1295 # Required input Cols
1296 inputCols = ["psfFluxMean"]
1297 # Output columns are created upon instantiation of the class.
1298 outputCols = ["psfFluxStetsonJ"]
1299 plugType = "multi"
1300 needsFilter = True
1301
1302 @classmethod
1304 return cls.FLUX_MOMENTS_CALCULATED
1305
1306 def calculate(self,
1307 diaObjects,
1308 diaSources,
1309 filterDiaSources,
1310 band,
1311 **kwargs):
1312 """Compute the StetsonJ statistic on the DIA point source fluxes.
1313
1314 Parameters
1315 ----------
1316 diaObject : `dict`
1317 Summary object to store values in.
1318 diaSources : `pandas.DataFrame`
1319 DataFrame representing all diaSources associated with this
1320 diaObject.
1321 filterDiaSources : `pandas.DataFrame`
1322 DataFrame representing diaSources associated with this
1323 diaObject that are observed in the band pass ``band``.
1324 band : `str`
1325 Simple, string name of the filter for the flux being calculated.
1326 **kwargs
1327 Any additional keyword arguments that may be passed to the plugin.
1328 """
1329 meanName = "{}_psfFluxMean".format(band)
1330
1331 def _stetsonJ(df):
1332 tmpDf = df[~np.logical_or(np.isnan(df["psfFlux"]),
1333 np.isnan(df["psfFluxErr"]))]
1334 if len(tmpDf) < 2:
1335 return np.nan
1336 fluxes = tmpDf["psfFlux"].to_numpy()
1337 errors = tmpDf["psfFluxErr"].to_numpy()
1338
1339 return self._stetson_J(
1340 fluxes,
1341 errors,
1342 diaObjects.at[tmpDf.diaObjectId.iat[0], meanName])
1343
1344 column = "{}_psfFluxStetsonJ".format(band)
1346 diaObjects,
1347 filterDiaSources.apply(_stetsonJ),
1348 [column],
1349 )
1350
1351 def _stetson_J(self, fluxes, errors, mean=None):
1352 """Compute the single band stetsonJ statistic.
1353
1354 Parameters
1355 ----------
1356 fluxes : `numpy.ndarray` (N,)
1357 Calibrated lightcurve flux values.
1358 errors : `numpy.ndarray` (N,)
1359 Errors on the calibrated lightcurve fluxes.
1360 mean : `float`
1361 Starting mean from previous plugin.
1362
1363 Returns
1364 -------
1365 stetsonJ : `float`
1366 stetsonJ statistic for the input fluxes and errors.
1367
1368 References
1369 ----------
1370 .. [1] Stetson, P. B., "On the Automatic Determination of Light-Curve
1371 Parameters for Cepheid Variables", PASP, 108, 851S, 1996
1372 """
1373 n_points = len(fluxes)
1374 flux_mean = self._stetson_mean(fluxes, errors, mean)
1375 delta_val = (
1376 np.sqrt(n_points / (n_points - 1)) * (fluxes - flux_mean) / errors)
1377 p_k = delta_val ** 2 - 1
1378
1379 return np.mean(np.sign(p_k) * np.sqrt(np.fabs(p_k)))
1380
1382 values,
1383 errors,
1384 mean=None,
1385 alpha=2.,
1386 beta=2.,
1387 n_iter=20,
1388 tol=1e-6):
1389 """Compute the stetson mean of the fluxes which down-weights outliers.
1390
1391 Weighted biased on an error weighted difference scaled by a constant
1392 (1/``a``) and raised to the power beta. Higher betas more harshly
1393 penalize outliers and ``a`` sets the number of sigma where a weighted
1394 difference of 1 occurs.
1395
1396 Parameters
1397 ----------
1398 values : `numpy.dnarray`, (N,)
1399 Input values to compute the mean of.
1400 errors : `numpy.ndarray`, (N,)
1401 Errors on the input values.
1402 mean : `float`
1403 Starting mean value or None.
1404 alpha : `float`
1405 Scalar down-weighting of the fractional difference. lower->more
1406 clipping. (Default value is 2.)
1407 beta : `float`
1408 Power law slope of the used to down-weight outliers. higher->more
1409 clipping. (Default value is 2.)
1410 n_iter : `int`
1411 Number of iterations of clipping.
1412 tol : `float`
1413 Fractional and absolute tolerance goal on the change in the mean
1414 before exiting early. (Default value is 1e-6)
1415
1416 Returns
1417 -------
1418 mean : `float`
1419 Weighted stetson mean result.
1420
1421 References
1422 ----------
1423 .. [1] Stetson, P. B., "On the Automatic Determination of Light-Curve
1424 Parameters for Cepheid Variables", PASP, 108, 851S, 1996
1425 """
1426 n_points = len(values)
1427 n_factor = np.sqrt(n_points / (n_points - 1))
1428 inv_var = 1 / errors ** 2
1429
1430 if mean is None:
1431 mean = np.average(values, weights=inv_var)
1432 for iter_idx in range(n_iter):
1433 chi = np.fabs(n_factor * (values - mean) / errors)
1434 tmp_mean = np.average(
1435 values,
1436 weights=inv_var / (1 + (chi / alpha) ** beta))
1437 diff = np.fabs(tmp_mean - mean)
1438 mean = tmp_mean
1439 if diff / mean < tol and diff < tol:
1440 break
1441 return mean
1442
1443
1445 pass
1446
1447
1448@register("ap_meanTotFlux")
1450 """Compute the weighted mean and mean error on the point source fluxes
1451 forced photometered at the DiaSource location in the calibrated image.
1452
1453 Additionally store number of usable data points.
1454 """
1455
1456 ConfigClass = WeightedMeanDiaPsfFluxConfig
1457 outputCols = ["scienceFluxMean", "scienceFluxMeanErr"]
1458 plugType = "multi"
1459 needsFilter = True
1460
1461 @classmethod
1464
1465 @catchWarnings(warns=["invalid value encountered",
1466 "divide by zero"])
1467 def calculate(self,
1468 diaObjects,
1469 diaSources,
1470 filterDiaSources,
1471 band,
1472 **kwargs):
1473 """Compute the weighted mean and mean error of the point source flux.
1474
1475 Parameters
1476 ----------
1477 diaObject : `dict`
1478 Summary object to store values in.
1479 diaSources : `pandas.DataFrame`
1480 DataFrame representing all diaSources associated with this
1481 diaObject.
1482 filterDiaSources : `pandas.DataFrame`
1483 DataFrame representing diaSources associated with this
1484 diaObject that are observed in the band pass ``band``.
1485 band : `str`
1486 Simple, string name of the filter for the flux being calculated.
1487 **kwargs
1488 Any additional keyword arguments that may be passed to the plugin.
1489 """
1490 totMeanName = "{}_scienceFluxMean".format(band)
1491 if totMeanName not in diaObjects.columns:
1492 diaObjects[totMeanName] = np.nan
1493 totErrName = "{}_scienceFluxMeanErr".format(band)
1494 if totErrName not in diaObjects.columns:
1495 diaObjects[totErrName] = np.nan
1496
1497 def _meanFlux(df):
1498 tmpDf = df[~np.logical_or(np.isnan(df["scienceFlux"]),
1499 np.isnan(df["scienceFluxErr"]))]
1500 tot_weight = np.nansum(1 / tmpDf["scienceFluxErr"] ** 2)
1501 fluxMean = np.nansum(tmpDf["scienceFlux"]
1502 / tmpDf["scienceFluxErr"] ** 2)
1503 fluxMean /= tot_weight
1504 fluxMeanErr = np.sqrt(1 / tot_weight)
1505
1506 return pd.Series({totMeanName: fluxMean,
1507 totErrName: fluxMeanErr})
1508
1509 df = filterDiaSources.apply(_meanFlux).astype(diaObjects.dtypes[[totMeanName, totErrName]])
1510 typeSafePandasAssignment(diaObjects, df, [totMeanName, totErrName])
1511
1512
1514 pass
1515
1516
1517@register("ap_sigmaTotFlux")
1519 """Compute scatter of diaSource fluxes.
1520 """
1521
1522 ConfigClass = SigmaDiaPsfFluxConfig
1523 # Output columns are created upon instantiation of the class.
1524 outputCols = ["scienceFluxSigma"]
1525 plugType = "multi"
1526 needsFilter = True
1527
1528 @classmethod
1531
1532 def calculate(self,
1533 diaObjects,
1534 diaSources,
1535 filterDiaSources,
1536 band,
1537 **kwargs):
1538 """Compute the sigma fluxes of the point source flux measured on the
1539 calibrated image.
1540
1541 Parameters
1542 ----------
1543 diaObject : `dict`
1544 Summary object to store values in.
1545 diaSources : `pandas.DataFrame`
1546 DataFrame representing all diaSources associated with this
1547 diaObject.
1548 filterDiaSources : `pandas.DataFrame`
1549 DataFrame representing diaSources associated with this
1550 diaObject that are observed in the band pass ``band``.
1551 band : `str`
1552 Simple, string name of the filter for the flux being calculated.
1553 **kwargs
1554 Any additional keyword arguments that may be passed to the plugin.
1555 """
1556 # Set "delta degrees of freedom (ddf)" to 1 to calculate the unbiased
1557 # estimator of scatter (i.e. 'N - 1' instead of 'N').
1558 column = "{}_scienceFluxSigma".format(band)
1559 typeSafePandasAssignment(diaObjects, filterDiaSources.scienceFlux.std(), [column])
Point in an unspecified spherical coordinate system.
Definition SpherePoint.h:57
calculate(self, diaObjects, diaSources, filterDiaSources, band, **kwargs)
calculate(self, diaObjects, diaSources, filterDiaSources, band, **kwargs)
calculate(self, diaObjects, diaSources, filterDiaSources, band, **kwargs)
calculate(self, diaObjects, diaSources, filterDiaSources, band)
calculate(self, diaObjects, diaSources, filterDiaSources, band, **kwargs)
calculate(self, diaObjects, diaSources, filterDiaSources, band, **kwargs)
calculate(self, diaObjects, diaSources, filterDiaSources, band, **kwargs)
calculate(self, diaObjects, diaSources, filterDiaSources, band, **kwargs)
calculate(self, diaObjects, diaSources, filterDiaSources, band, **kwargs)
calculate(self, diaObjects, diaSources, filterDiaSources, band, **kwargs)
calculate(self, diaObjects, diaSources, filterDiaSources, band, **kwargs)
calculate(self, diaObjects, diaSources, filterDiaSources, band, **kwargs)
_stetson_mean(self, values, errors, mean=None, alpha=2., beta=2., n_iter=20, tol=1e-6)
calculate(self, diaObjects, diaSources, filterDiaSources, band, **kwargs)
calculate(self, diaObjects, diaSources, filterDiaSources, band, **kwargs)
HtmPixelization provides HTM indexing of points and regions.
SpherePoint averageSpherePoint(std::vector< SpherePoint > const &coords)
Return the average of a list of coordinates.
compute_optimized_periodogram_grid(x0, oversampling_factor=5, nyquist_factor=100)
typeSafePandasAssignment(target, source, columns, default_dtype=np.float64, int_fill_value=0, force_int_to_float=False)
register(name, shouldApCorr=False, apCorrList=())