LSST Applications g063fba187b+fee0456c91,g0f08755f38+ea96e5a5a3,g1653933729+a8ce1bb630,g168dd56ebc+a8ce1bb630,g1a2382251a+90257ff92a,g20f6ffc8e0+ea96e5a5a3,g217e2c1bcf+937a289c59,g28da252d5a+daa7da44eb,g2bbee38e9b+253935c60e,g2bc492864f+253935c60e,g3156d2b45e+6e55a43351,g32e5bea42b+31359a2a7a,g347aa1857d+253935c60e,g35bb328faa+a8ce1bb630,g3a166c0a6a+253935c60e,g3b1af351f3+a8ce1bb630,g3e281a1b8c+c5dd892a6c,g414038480c+416496e02f,g41af890bb2+afe91b1188,g599934f4f4+0db33f7991,g7af13505b9+e36de7bce6,g80478fca09+da231ba887,g82479be7b0+a4516e59e3,g858d7b2824+ea96e5a5a3,g89c8672015+f4add4ffd5,g9125e01d80+a8ce1bb630,ga5288a1d22+bc6ab8dfbd,gb58c049af0+d64f4d3760,gc28159a63d+253935c60e,gcab2d0539d+3f2b72788c,gcf0d15dbbd+4ea9c45075,gda6a2b7d83+4ea9c45075,gdaeeff99f8+1711a396fd,ge79ae78c31+253935c60e,gef2f8181fd+3031e3cf99,gf0baf85859+c1f95f4921,gfa517265be+ea96e5a5a3,gfa999e8aa5+17cd334064,w.2024.50
LSST Data Management Base Package
Loading...
Searching...
No Matches
functors.py
Go to the documentation of this file.
1# This file is part of pipe_tasks.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
21
22__all__ = ["init_fromDict", "Functor", "CompositeFunctor", "mag_aware_eval",
23 "CustomFunctor", "Column", "Index", "CoordColumn", "RAColumn",
24 "DecColumn", "HtmIndex20", "fluxName", "fluxErrName", "Mag",
25 "MagErr", "MagDiff", "Color", "DeconvolvedMoments", "SdssTraceSize",
26 "PsfSdssTraceSizeDiff", "HsmTraceSize", "PsfHsmTraceSizeDiff",
27 "HsmFwhm", "E1", "E2", "RadiusFromQuadrupole", "LocalWcs",
28 "ComputePixelScale", "ConvertPixelToArcseconds",
29 "ConvertPixelSqToArcsecondsSq",
30 "ConvertDetectorAngleToPositionAngle",
31 "ReferenceBand", "Photometry",
32 "NanoJansky", "NanoJanskyErr", "LocalPhotometry", "LocalNanojansky",
33 "LocalNanojanskyErr", "LocalDipoleMeanFlux",
34 "LocalDipoleMeanFluxErr", "LocalDipoleDiffFlux",
35 "LocalDipoleDiffFluxErr", "Ebv",
36 ]
37
38import logging
39import os
40import os.path
41import re
42import warnings
43from contextlib import redirect_stdout
44from itertools import product
45
46import astropy.units as u
47import lsst.geom as geom
48import lsst.sphgeom as sphgeom
49import numpy as np
50import pandas as pd
51import yaml
52from astropy.coordinates import SkyCoord
53from lsst.daf.butler import DeferredDatasetHandle
54from lsst.pipe.base import InMemoryDatasetHandle
55from lsst.utils import doImport
56from lsst.utils.introspection import get_full_type_name
57
58
59def init_fromDict(initDict, basePath='lsst.pipe.tasks.functors',
60 typeKey='functor', name=None):
61 """Initialize an object defined in a dictionary.
62
63 The object needs to be importable as f'{basePath}.{initDict[typeKey]}'.
64 The positional and keyword arguments (if any) are contained in "args" and
65 "kwargs" entries in the dictionary, respectively.
66 This is used in `~lsst.pipe.tasks.functors.CompositeFunctor.from_yaml` to
67 initialize a composite functor from a specification in a YAML file.
68
69 Parameters
70 ----------
71 initDict : dictionary
72 Dictionary describing object's initialization.
73 Must contain an entry keyed by ``typeKey`` that is the name of the
74 object, relative to ``basePath``.
75 basePath : str
76 Path relative to module in which ``initDict[typeKey]`` is defined.
77 typeKey : str
78 Key of ``initDict`` that is the name of the object (relative to
79 ``basePath``).
80 """
81 initDict = initDict.copy()
82 # TO DO: DM-21956 We should be able to define functors outside this module
83 pythonType = doImport(f'{basePath}.{initDict.pop(typeKey)}')
84 args = []
85 if 'args' in initDict:
86 args = initDict.pop('args')
87 if isinstance(args, str):
88 args = [args]
89 try:
90 element = pythonType(*args, **initDict)
91 except Exception as e:
92 message = f'Error in constructing functor "{name}" of type {pythonType.__name__} with args: {args}'
93 raise type(e)(message, e.args)
94 return element
95
96
97class Functor(object):
98 """Define and execute a calculation on a DataFrame or Handle holding a
99 DataFrame.
100
101 The `__call__` method accepts either a `~pandas.DataFrame` object or a
102 `~lsst.daf.butler.DeferredDatasetHandle` or
103 `~lsst.pipe.base.InMemoryDatasetHandle`, and returns the
104 result of the calculation as a single column.
105 Each functor defines what columns are needed for the calculation, and only
106 these columns are read from the dataset handle.
107
108 The action of `__call__` consists of two steps: first, loading the
109 necessary columns from disk into memory as a `~pandas.DataFrame` object;
110 and second, performing the computation on this DataFrame and returning the
111 result.
112
113 To define a new `Functor`, a subclass must define a `_func` method,
114 that takes a `~pandas.DataFrame` and returns result in a `~pandas.Series`.
115 In addition, it must define the following attributes:
116
117 * `_columns`: The columns necessary to perform the calculation
118 * `name`: A name appropriate for a figure axis label
119 * `shortname`: A name appropriate for use as a dictionary key
120
121 On initialization, a `Functor` should declare what band (``filt`` kwarg)
122 and dataset (e.g. ``'ref'``, ``'meas'``, ``'forced_src'``) it is intended
123 to be applied to.
124 This enables the `_get_data` method to extract the proper columns from the
125 underlying data.
126 If not specified, the dataset will fall back on the `_defaultDataset`
127 attribute.
128 If band is not specified and ``dataset`` is anything other than ``'ref'``,
129 then an error will be raised when trying to perform the calculation.
130
131 Originally, `Functor` was set up to expect datasets formatted like the
132 ``deepCoadd_obj`` dataset; that is, a DataFrame with a multi-level column
133 index, with the levels of the column index being ``band``, ``dataset``, and
134 ``column``.
135 It has since been generalized to apply to DataFrames without multi-level
136 indices and multi-level indices with just ``dataset`` and ``column``
137 levels.
138 In addition, the `_get_data` method that reads the columns from the
139 underlying data will return a DataFrame with column index levels defined by
140 the `_dfLevels` attribute; by default, this is ``column``.
141
142 The `_dfLevels` attributes should generally not need to be changed, unless
143 `_func` needs columns from multiple filters or datasets to do the
144 calculation.
145 An example of this is the `~lsst.pipe.tasks.functors.Color` functor, for
146 which `_dfLevels = ('band', 'column')`, and `_func` expects the DataFrame
147 it gets to have those levels in the column index.
148
149 Parameters
150 ----------
151 filt : str
152 Band upon which to do the calculation.
153
154 dataset : str
155 Dataset upon which to do the calculation (e.g., 'ref', 'meas',
156 'forced_src').
157 """
158
159 _defaultDataset = 'ref'
160 _dfLevels = ('column',)
161 _defaultNoDup = False
162
163 def __init__(self, filt=None, dataset=None, noDup=None):
164 self.filt = filt
165 self.dataset = dataset if dataset is not None else self._defaultDataset
166 self._noDup = noDup
167 self.log = logging.getLogger(type(self).__name__)
168
169 @property
170 def noDup(self):
171 """Do not explode by band if used on object table."""
172 if self._noDup is not None:
173 return self._noDup
174 else:
175 return self._defaultNoDup
176
177 @property
178 def columns(self):
179 """Columns required to perform calculation."""
180 if not hasattr(self, '_columns'):
181 raise NotImplementedError('Must define columns property or _columns attribute')
182 return self._columns
183
184 def _get_data_columnLevels(self, data, columnIndex=None):
185 """Gets the names of the column index levels.
186
187 This should only be called in the context of a multilevel table.
188
189 Parameters
190 ----------
191 data : various
192 The data to be read, can be a
193 `~lsst.daf.butler.DeferredDatasetHandle` or
194 `~lsst.pipe.base.InMemoryDatasetHandle`.
195 columnIndex (optional): pandas `~pandas.Index` object
196 If not passed, then it is read from the
197 `~lsst.daf.butler.DeferredDatasetHandle`
198 for `~lsst.pipe.base.InMemoryDatasetHandle`.
199 """
200 if columnIndex is None:
201 columnIndex = data.get(component="columns")
202 return columnIndex.names
203
204 def _get_data_columnLevelNames(self, data, columnIndex=None):
205 """Gets the content of each of the column levels for a multilevel
206 table.
207 """
208 if columnIndex is None:
209 columnIndex = data.get(component="columns")
210
211 columnLevels = columnIndex.names
212 columnLevelNames = {
213 level: list(np.unique(np.array([c for c in columnIndex])[:, i]))
214 for i, level in enumerate(columnLevels)
215 }
216 return columnLevelNames
217
218 def _colsFromDict(self, colDict, columnIndex=None):
219 """Converts dictionary column specficiation to a list of columns."""
220 new_colDict = {}
221 columnLevels = self._get_data_columnLevels(None, columnIndex=columnIndex)
222
223 for i, lev in enumerate(columnLevels):
224 if lev in colDict:
225 if isinstance(colDict[lev], str):
226 new_colDict[lev] = [colDict[lev]]
227 else:
228 new_colDict[lev] = colDict[lev]
229 else:
230 new_colDict[lev] = columnIndex.levels[i]
231
232 levelCols = [new_colDict[lev] for lev in columnLevels]
233 cols = list(product(*levelCols))
234 colsAvailable = [col for col in cols if col in columnIndex]
235 return colsAvailable
236
237 def multilevelColumns(self, data, columnIndex=None, returnTuple=False):
238 """Returns columns needed by functor from multilevel dataset.
239
240 To access tables with multilevel column structure, the
241 `~lsst.daf.butler.DeferredDatasetHandle` or
242 `~lsst.pipe.base.InMemoryDatasetHandle` needs to be passed
243 either a list of tuples or a dictionary.
244
245 Parameters
246 ----------
247 data : various
248 The data as either `~lsst.daf.butler.DeferredDatasetHandle`, or
249 `~lsst.pipe.base.InMemoryDatasetHandle`.
250 columnIndex (optional): pandas `~pandas.Index` object
251 Either passed or read in from
252 `~lsst.daf.butler.DeferredDatasetHandle`.
253 `returnTuple` : `bool`
254 If true, then return a list of tuples rather than the column
255 dictionary specification.
256 This is set to `True` by `CompositeFunctor` in order to be able to
257 combine columns from the various component functors.
258
259 """
260 if not isinstance(data, (DeferredDatasetHandle, InMemoryDatasetHandle)):
261 raise RuntimeError(f"Unexpected data type. Got {get_full_type_name(data)}.")
262
263 if columnIndex is None:
264 columnIndex = data.get(component="columns")
265
266 # Confirm that the dataset has the column levels the functor is
267 # expecting it to have.
268 columnLevels = self._get_data_columnLevels(data, columnIndex)
269
270 columnDict = {'column': self.columns,
271 'dataset': self.dataset}
272 if self.filt is None:
273 columnLevelNames = self._get_data_columnLevelNames(data, columnIndex)
274 if "band" in columnLevels:
275 if self.dataset == "ref":
276 columnDict["band"] = columnLevelNames["band"][0]
277 else:
278 raise ValueError(f"'filt' not set for functor {self.name}"
279 f"(dataset {self.dataset}) "
280 "and DataFrame "
281 "contains multiple filters in column index. "
282 "Set 'filt' or set 'dataset' to 'ref'.")
283 else:
284 columnDict['band'] = self.filt
285
286 if returnTuple:
287 return self._colsFromDict(columnDict, columnIndex=columnIndex)
288 else:
289 return columnDict
290
291 def _func(self, df, dropna=True):
292 raise NotImplementedError('Must define calculation on DataFrame')
293
294 def _get_columnIndex(self, data):
295 """Return columnIndex."""
296
297 if isinstance(data, (DeferredDatasetHandle, InMemoryDatasetHandle)):
298 return data.get(component="columns")
299 else:
300 return None
301
302 def _get_data(self, data):
303 """Retrieve DataFrame necessary for calculation.
304
305 The data argument can be a `~pandas.DataFrame`, a
306 `~lsst.daf.butler.DeferredDatasetHandle`, or
307 an `~lsst.pipe.base.InMemoryDatasetHandle`.
308
309 Returns a DataFrame upon which `self._func` can act.
310 """
311 # We wrap a DataFrame in a handle here to take advantage of the
312 # DataFrame delegate DataFrame column wrangling abilities.
313 if isinstance(data, pd.DataFrame):
314 _data = InMemoryDatasetHandle(data, storageClass="DataFrame")
315 elif isinstance(data, (DeferredDatasetHandle, InMemoryDatasetHandle)):
316 _data = data
317 else:
318 raise RuntimeError(f"Unexpected type provided for data. Got {get_full_type_name(data)}.")
319
320 # First thing to do: check to see if the data source has a multilevel
321 # column index or not.
322 columnIndex = self._get_columnIndex(_data)
323 is_multiLevel = isinstance(columnIndex, pd.MultiIndex)
324
325 # Get proper columns specification for this functor.
326 if is_multiLevel:
327 columns = self.multilevelColumns(_data, columnIndex=columnIndex)
328 else:
329 columns = self.columns
330
331 # Load in-memory DataFrame with appropriate columns the gen3 way.
332 df = _data.get(parameters={"columns": columns})
333
334 # Drop unnecessary column levels.
335 if is_multiLevel:
336 df = self._setLevels(df)
337
338 return df
339
340 def _setLevels(self, df):
341 levelsToDrop = [n for n in df.columns.names if n not in self._dfLevels]
342 df.columns = df.columns.droplevel(levelsToDrop)
343 return df
344
345 def _dropna(self, vals):
346 return vals.dropna()
347
348 def __call__(self, data, dropna=False):
349 df = self._get_data(data)
350 try:
351 vals = self._func(df)
352 except Exception as e:
353 self.log.error("Exception in %s call: %s: %s", self.namename, type(e).__name__, e)
354 vals = self.fail(df)
355 if dropna:
356 vals = self._dropna(vals)
357
358 return vals
359
360 def difference(self, data1, data2, **kwargs):
361 """Computes difference between functor called on two different
362 DataFrame/Handle objects.
363 """
364 return self(data1, **kwargs) - self(data2, **kwargs)
365
366 def fail(self, df):
367 return pd.Series(np.full(len(df), np.nan), index=df.index)
368
369 @property
370 def name(self):
371 """Full name of functor (suitable for figure labels)."""
372 return NotImplementedError
373
374 @property
375 def shortname(self):
376 """Short name of functor (suitable for column name/dict key)."""
377 return self.namename
378
379
381 """Perform multiple calculations at once on a catalog.
382
383 The role of a `CompositeFunctor` is to group together computations from
384 multiple functors.
385 Instead of returning `~pandas.Series` a `CompositeFunctor` returns a
386 `~pandas.DataFrame`, with the column names being the keys of ``funcDict``.
387
388 The `columns` attribute of a `CompositeFunctor` is the union of all columns
389 in all the component functors.
390
391 A `CompositeFunctor` does not use a `_func` method itself; rather, when a
392 `CompositeFunctor` is called, all its columns are loaded at once, and the
393 resulting DataFrame is passed to the `_func` method of each component
394 functor.
395 This has the advantage of only doing I/O (reading from parquet file) once,
396 and works because each individual `_func` method of each component functor
397 does not care if there are *extra* columns in the DataFrame being passed;
398 only that it must contain *at least* the `columns` it expects.
399
400 An important and useful class method is `from_yaml`, which takes as an
401 argument the path to a YAML file specifying a collection of functors.
402
403 Parameters
404 ----------
405 funcs : `dict` or `list`
406 Dictionary or list of functors.
407 If a list, then it will be converted into a dictonary according to the
408 `.shortname` attribute of each functor.
409 """
410 dataset = None
411 name = "CompositeFunctor"
412
413 def __init__(self, funcs, **kwargs):
414
415 if type(funcs) is dict:
416 self.funcDict = funcs
417 else:
418 self.funcDict = {f.shortname: f for f in funcs}
419
420 self._filt = None
421
422 super().__init__(**kwargs)
423
424 @property
425 def filt(self):
426 return self._filt
427
428 @filt.setter
429 def filt(self, filt):
430 if filt is not None:
431 for _, f in self.funcDict.items():
432 f.filt = filt
433 self._filt = filt
434
435 def update(self, new):
436 """Update the functor with new functors."""
437 if isinstance(new, dict):
438 self.funcDict.update(new)
439 elif isinstance(new, CompositeFunctor):
440 self.funcDict.update(new.funcDict)
441 else:
442 raise TypeError('Can only update with dictionary or CompositeFunctor.')
443
444 # Make sure new functors have the same 'filt' set.
445 if self.filtfiltfiltfilt is not None:
447
448 @property
449 def columns(self):
450 return list(set([x for y in [f.columns for f in self.funcDict.values()] for x in y]))
451
452 def multilevelColumns(self, data, **kwargs):
453 # Get the union of columns for all component functors.
454 # Note the need to have `returnTuple=True` here.
455 return list(
456 set(
457 [
458 x
459 for y in [
460 f.multilevelColumns(data, returnTuple=True, **kwargs) for f in self.funcDict.values()
461 ]
462 for x in y
463 ]
464 )
465 )
466
467 def __call__(self, data, **kwargs):
468 """Apply the functor to the data table.
469
470 Parameters
471 ----------
472 data : various
473 The data represented as `~lsst.daf.butler.DeferredDatasetHandle`,
474 `~lsst.pipe.base.InMemoryDatasetHandle`, or `~pandas.DataFrame`.
475 The table or a pointer to a table on disk from which columns can
476 be accessed.
477 """
478 if isinstance(data, pd.DataFrame):
479 _data = InMemoryDatasetHandle(data, storageClass="DataFrame")
480 elif isinstance(data, (DeferredDatasetHandle, InMemoryDatasetHandle)):
481 _data = data
482 else:
483 raise RuntimeError(f"Unexpected type provided for data. Got {get_full_type_name(data)}.")
484
485 columnIndex = self._get_columnIndex(_data)
486
487 if isinstance(columnIndex, pd.MultiIndex):
488 columns = self.multilevelColumnsmultilevelColumns(_data, columnIndex=columnIndex)
489 df = _data.get(parameters={"columns": columns})
490
491 valDict = {}
492 for k, f in self.funcDict.items():
493 try:
494 subdf = f._setLevels(
495 df[f.multilevelColumns(_data, returnTuple=True, columnIndex=columnIndex)]
496 )
497 valDict[k] = f._func(subdf)
498 except Exception as e:
499 self.log.exception(
500 "Exception in %s (funcs: %s) call: %s",
502 str(list(self.funcDict.keys())),
503 type(e).__name__,
504 )
505 try:
506 valDict[k] = f.fail(subdf)
507 except NameError:
508 raise e
509
510 else:
511 df = _data.get(parameters={"columns": self.columnscolumns})
512
513 valDict = {k: f._func(df) for k, f in self.funcDict.items()}
514
515 # Check that output columns are actually columns.
516 for name, colVal in valDict.items():
517 if len(colVal.shape) != 1:
518 raise RuntimeError("Transformed column '%s' is not the shape of a column. "
519 "It is shaped %s and type %s." % (name, colVal.shape, type(colVal)))
520
521 try:
522 valDf = pd.concat(valDict, axis=1)
523 except TypeError:
524 print([(k, type(v)) for k, v in valDict.items()])
525 raise
526
527 if kwargs.get('dropna', False):
528 valDf = valDf.dropna(how='any')
529
530 return valDf
531
532 @classmethod
533 def renameCol(cls, col, renameRules):
534 if renameRules is None:
535 return col
536 for old, new in renameRules:
537 if col.startswith(old):
538 col = col.replace(old, new)
539 return col
540
541 @classmethod
542 def from_file(cls, filename, **kwargs):
543 # Allow environment variables in the filename.
544 filename = os.path.expandvars(filename)
545 with open(filename) as f:
546 translationDefinition = yaml.safe_load(f)
547
548 return cls.from_yaml(translationDefinition, **kwargs)
549
550 @classmethod
551 def from_yaml(cls, translationDefinition, **kwargs):
552 funcs = {}
553 for func, val in translationDefinition['funcs'].items():
554 funcs[func] = init_fromDict(val, name=func)
555
556 if 'flag_rename_rules' in translationDefinition:
557 renameRules = translationDefinition['flag_rename_rules']
558 else:
559 renameRules = None
560
561 if 'calexpFlags' in translationDefinition:
562 for flag in translationDefinition['calexpFlags']:
563 funcs[cls.renameCol(flag, renameRules)] = Column(flag, dataset='calexp')
564
565 if 'refFlags' in translationDefinition:
566 for flag in translationDefinition['refFlags']:
567 funcs[cls.renameCol(flag, renameRules)] = Column(flag, dataset='ref')
568
569 if 'forcedFlags' in translationDefinition:
570 for flag in translationDefinition['forcedFlags']:
571 funcs[cls.renameCol(flag, renameRules)] = Column(flag, dataset='forced_src')
572
573 if 'flags' in translationDefinition:
574 for flag in translationDefinition['flags']:
575 funcs[cls.renameCol(flag, renameRules)] = Column(flag, dataset='meas')
576
577 return cls(funcs, **kwargs)
578
579
580def mag_aware_eval(df, expr, log):
581 """Evaluate an expression on a DataFrame, knowing what the 'mag' function
582 means.
583
584 Builds on `pandas.DataFrame.eval`, which parses and executes math on
585 DataFrames.
586
587 Parameters
588 ----------
589 df : ~pandas.DataFrame
590 DataFrame on which to evaluate expression.
591
592 expr : str
593 Expression.
594 """
595 try:
596 expr_new = re.sub(r'mag\‍((\w+)\‍)', r'-2.5*log(\g<1>)/log(10)', expr)
597 val = df.eval(expr_new)
598 except Exception as e: # Should check what actually gets raised
599 log.error("Exception in mag_aware_eval: %s: %s", type(e).__name__, e)
600 expr_new = re.sub(r'mag\‍((\w+)\‍)', r'-2.5*log(\g<1>_instFlux)/log(10)', expr)
601 val = df.eval(expr_new)
602 return val
603
604
606 """Arbitrary computation on a catalog.
607
608 Column names (and thus the columns to be loaded from catalog) are found by
609 finding all words and trying to ignore all "math-y" words.
610
611 Parameters
612 ----------
613 expr : str
614 Expression to evaluate, to be parsed and executed by
615 `~lsst.pipe.tasks.functors.mag_aware_eval`.
616 """
617 _ignore_words = ('mag', 'sin', 'cos', 'exp', 'log', 'sqrt')
618
619 def __init__(self, expr, **kwargs):
620 self.expr = expr
621 super().__init__(**kwargs)
622
623 @property
624 def name(self):
625 return self.expr
626
627 @property
628 def columns(self):
629 flux_cols = re.findall(r'mag\‍(\s*(\w+)\s*\‍)', self.expr)
630
631 cols = [c for c in re.findall(r'[a-zA-Z_]+', self.expr) if c not in self._ignore_words]
632 not_a_col = []
633 for c in flux_cols:
634 if not re.search('_instFlux$', c):
635 cols.append(f'{c}_instFlux')
636 not_a_col.append(c)
637 else:
638 cols.append(c)
639
640 return list(set([c for c in cols if c not in not_a_col]))
641
642 def _func(self, df):
643 return mag_aware_eval(df, self.expr, self.loglog)
644
645
647 """Get column with a specified name."""
648
649 def __init__(self, col, **kwargs):
650 self.col = col
651 super().__init__(**kwargs)
652
653 @property
654 def name(self):
655 return self.col
656
657 @property
658 def columns(self):
659 return [self.col]
660
661 def _func(self, df):
662 return df[self.col]
663
664
666 """Return the value of the index for each object."""
667
668 columns = ['coord_ra'] # Just a dummy; something has to be here.
669 _defaultDataset = 'ref'
670 _defaultNoDup = True
671
672 def _func(self, df):
673 return pd.Series(df.index, index=df.index)
674
675
677 """Base class for coordinate column, in degrees."""
678 _radians = True
679
680 def __init__(self, col, **kwargs):
681 super().__init__(col, **kwargs)
682
683 def _func(self, df):
684 # Must not modify original column in case that column is used by
685 # another functor.
686 output = df[self.col] * 180 / np.pi if self._radians else df[self.col]
687 return output
688
689
691 """Right Ascension, in degrees."""
692 name = 'RA'
693 _defaultNoDup = True
694
695 def __init__(self, **kwargs):
696 super().__init__('coord_ra', **kwargs)
697
698 def __call__(self, catalog, **kwargs):
699 return super().__call__(catalog, **kwargs)
700
701
703 """Declination, in degrees."""
704 name = 'Dec'
705 _defaultNoDup = True
706
707 def __init__(self, **kwargs):
708 super().__init__('coord_dec', **kwargs)
709
710 def __call__(self, catalog, **kwargs):
711 return super().__call__(catalog, **kwargs)
712
713
715 """Uncertainty in Right Ascension, in degrees."""
716 name = 'RAErr'
717 _defaultNoDup = True
718
719 def __init__(self, **kwargs):
720 super().__init__('coord_raErr', **kwargs)
721
722
724 """Uncertainty in declination, in degrees."""
725 name = 'DecErr'
726 _defaultNoDup = True
727
728 def __init__(self, **kwargs):
729 super().__init__('coord_decErr', **kwargs)
730
731
733 """Coordinate covariance column, in degrees."""
734 _radians = True
735 name = 'RADecCov'
736 _defaultNoDup = True
737
738 def __init__(self, **kwargs):
739 super().__init__('coord_ra_dec_Cov', **kwargs)
740
741 def _func(self, df):
742 # Must not modify original column in case that column is used by
743 # another functor.
744 output = df[self.col]*(180/np.pi)**2 if self._radians else df[self.col]
745 return output
746
747
749 """Compute the level 20 HtmIndex for the catalog.
750
751 Notes
752 -----
753 This functor was implemented to satisfy requirements of old APDB interface
754 which required the ``pixelId`` column in DiaObject with HTM20 index.
755 The APDB interface had migrated to not need that information, but we keep
756 this class in case it may be useful for something else.
757 """
758 name = "Htm20"
759 htmLevel = 20
760 _radians = True
761
762 def __init__(self, ra, dec, **kwargs):
764 self.ra = ra
765 self.dec = dec
766 self._columns = [self.ra, self.dec]
767 super().__init__(**kwargs)
768
769 def _func(self, df):
770
771 def computePixel(row):
772 if self._radians:
773 sphPoint = geom.SpherePoint(row[self.ra],
774 row[self.dec],
775 geom.radians)
776 else:
777 sphPoint = geom.SpherePoint(row[self.ra],
778 row[self.dec],
779 geom.degrees)
780 return self.pixelator.index(sphPoint.getVector())
781
782 return df.apply(computePixel, axis=1, result_type='reduce').astype('int64')
783
784
785def fluxName(col):
786 """Append _instFlux to the column name if it doesn't have it already."""
787 if not col.endswith('_instFlux'):
788 col += '_instFlux'
789 return col
790
791
792def fluxErrName(col):
793 """Append _instFluxErr to the column name if it doesn't have it already."""
794 if not col.endswith('_instFluxErr'):
795 col += '_instFluxErr'
796 return col
797
798
800 """Compute calibrated magnitude.
801
802 Returns the flux at mag=0.
803 The default ``fluxMag0`` is 63095734448.0194, which is default for HSC.
804 TO DO: This default should be made configurable in DM-21955.
805
806 This calculation hides warnings about invalid values and dividing by zero.
807
808 As with all functors, a ``dataset`` and ``filt`` kwarg should be provided
809 upon initialization.
810 Unlike the default `Functor`, however, the default dataset for a `Mag` is
811 ``'meas'``, rather than ``'ref'``.
812
813 Parameters
814 ----------
815 col : `str`
816 Name of flux column from which to compute magnitude.
817 Can be parseable by the `~lsst.pipe.tasks.functors.fluxName` function;
818 that is, you can pass ``'modelfit_CModel'`` instead of
819 ``'modelfit_CModel_instFlux'``, and it will understand.
820 """
821 _defaultDataset = 'meas'
822
823 def __init__(self, col, **kwargs):
824 self.col = fluxName(col)
825 # TO DO: DM-21955 Replace hard coded photometic calibration values.
826 self.fluxMag0 = 63095734448.0194
827
828 super().__init__(**kwargs)
829
830 @property
831 def columns(self):
832 return [self.col]
833
834 def _func(self, df):
835 with warnings.catch_warnings():
836 warnings.filterwarnings('ignore', r'invalid value encountered')
837 warnings.filterwarnings('ignore', r'divide by zero')
838 return -2.5*np.log10(df[self.col] / self.fluxMag0)
839
840 @property
841 def name(self):
842 return f'mag_{self.col}'
843
844
845class MagErr(Mag):
846 """Compute calibrated magnitude uncertainty.
847
848 Parameters
849 ----------
850 col : `str`
851 Name of the flux column.
852 """
853
854 def __init__(self, *args, **kwargs):
855 super().__init__(*args, **kwargs)
856 # TO DO: DM-21955 Replace hard coded photometic calibration values.
857 self.fluxMag0Err = 0.
858
859 @property
860 def columns(self):
861 return [self.colcol, self.colcol + 'Err']
862
863 def _func(self, df):
864 with warnings.catch_warnings():
865 warnings.filterwarnings('ignore', r'invalid value encountered')
866 warnings.filterwarnings('ignore', r'divide by zero')
867 fluxCol, fluxErrCol = self.columnscolumnscolumns
868 x = df[fluxErrCol] / df[fluxCol]
869 y = self.fluxMag0Err / self.fluxMag0
870 magErr = (2.5 / np.log(10.)) * np.sqrt(x*x + y*y)
871 return magErr
872
873 @property
874 def name(self):
875 return super().name + '_err'
876
877
879 """Functor to calculate magnitude difference."""
880 _defaultDataset = 'meas'
881
882 def __init__(self, col1, col2, **kwargs):
883 self.col1 = fluxName(col1)
884 self.col2 = fluxName(col2)
885 super().__init__(**kwargs)
886
887 @property
888 def columns(self):
889 return [self.col1, self.col2]
890
891 def _func(self, df):
892 with warnings.catch_warnings():
893 warnings.filterwarnings('ignore', r'invalid value encountered')
894 warnings.filterwarnings('ignore', r'divide by zero')
895 return -2.5*np.log10(df[self.col1]/df[self.col2])
896
897 @property
898 def name(self):
899 return f'(mag_{self.col1} - mag_{self.col2})'
900
901 @property
902 def shortname(self):
903 return f'magDiff_{self.col1}_{self.col2}'
904
905
907 """Compute the color between two filters.
908
909 Computes color by initializing two different `Mag` functors based on the
910 ``col`` and filters provided, and then returning the difference.
911
912 This is enabled by the `_func` method expecting a DataFrame with a
913 multilevel column index, with both ``'band'`` and ``'column'``, instead of
914 just ``'column'``, which is the `Functor` default.
915 This is controlled by the `_dfLevels` attribute.
916
917 Also of note, the default dataset for `Color` is ``forced_src'``, whereas
918 for `Mag` it is ``'meas'``.
919
920 Parameters
921 ----------
922 col : str
923 Name of the flux column from which to compute; same as would be passed
924 to `~lsst.pipe.tasks.functors.Mag`.
925
926 filt2, filt1 : str
927 Filters from which to compute magnitude difference.
928 Color computed is ``Mag(filt2) - Mag(filt1)``.
929 """
930 _defaultDataset = 'forced_src'
931 _dfLevels = ('band', 'column')
932 _defaultNoDup = True
933
934 def __init__(self, col, filt2, filt1, **kwargs):
935 self.col = fluxName(col)
936 if filt2 == filt1:
937 raise RuntimeError("Cannot compute Color for %s: %s - %s " % (col, filt2, filt1))
938 self.filt2 = filt2
939 self.filt1 = filt1
940
941 self.mag2 = Mag(col, filt=filt2, **kwargs)
942 self.mag1 = Mag(col, filt=filt1, **kwargs)
943
944 super().__init__(**kwargs)
945
946 @property
947 def filt(self):
948 return None
949
950 @filt.setter
951 def filt(self, filt):
952 pass
953
954 def _func(self, df):
955 mag2 = self.mag2._func(df[self.filt2])
956 mag1 = self.mag1._func(df[self.filt1])
957 return mag2 - mag1
958
959 @property
960 def columns(self):
961 return [self.mag1.col, self.mag2.col]
962
963 def multilevelColumns(self, parq, **kwargs):
964 return [(self.datasetdataset, self.filt1, self.col), (self.datasetdataset, self.filt2, self.col)]
965
966 @property
967 def name(self):
968 return f'{self.filt2} - {self.filt1} ({self.col})'
969
970 @property
971 def shortname(self):
972 return f"{self.col}_{self.filt2.replace('-', '')}m{self.filt1.replace('-', '')}"
973
974
976 """This functor subtracts the trace of the PSF second moments from the
977 trace of the second moments of the source.
978
979 If the HsmShapeAlgorithm measurement is valid, then these will be used for
980 the sources.
981 Otherwise, the SdssShapeAlgorithm measurements will be used.
982 """
983 name = 'Deconvolved Moments'
984 shortname = 'deconvolvedMoments'
985 _columns = ("ext_shapeHSM_HsmSourceMoments_xx",
986 "ext_shapeHSM_HsmSourceMoments_yy",
987 "base_SdssShape_xx", "base_SdssShape_yy",
988 "ext_shapeHSM_HsmPsfMoments_xx",
989 "ext_shapeHSM_HsmPsfMoments_yy")
990
991 def _func(self, df):
992 """Calculate deconvolved moments."""
993 if "ext_shapeHSM_HsmSourceMoments_xx" in df.columns: # _xx added by tdm
994 hsm = df["ext_shapeHSM_HsmSourceMoments_xx"] + df["ext_shapeHSM_HsmSourceMoments_yy"]
995 else:
996 hsm = np.ones(len(df))*np.nan
997 sdss = df["base_SdssShape_xx"] + df["base_SdssShape_yy"]
998 if "ext_shapeHSM_HsmPsfMoments_xx" in df.columns:
999 psf = df["ext_shapeHSM_HsmPsfMoments_xx"] + df["ext_shapeHSM_HsmPsfMoments_yy"]
1000 else:
1001 # LSST does not have shape.sdss.psf.
1002 # We could instead add base_PsfShape to the catalog using
1003 # exposure.getPsf().computeShape(s.getCentroid()).getIxx().
1004 raise RuntimeError('No psf shape parameter found in catalog')
1005
1006 return hsm.where(np.isfinite(hsm), sdss) - psf
1007
1008
1010 """Functor to calculate the SDSS trace radius size for sources.
1011
1012 The SDSS trace radius size is a measure of size equal to the square root of
1013 half of the trace of the second moments tensor measured with the
1014 SdssShapeAlgorithm plugin.
1015 This has units of pixels.
1016 """
1017 name = "SDSS Trace Size"
1018 shortname = 'sdssTrace'
1019 _columns = ("base_SdssShape_xx", "base_SdssShape_yy")
1020
1021 def _func(self, df):
1022 srcSize = np.sqrt(0.5*(df["base_SdssShape_xx"] + df["base_SdssShape_yy"]))
1023 return srcSize
1024
1025
1027 """Functor to calculate the SDSS trace radius size difference (%) between
1028 the object and the PSF model.
1029
1030 See Also
1031 --------
1032 SdssTraceSize
1033 """
1034 name = "PSF - SDSS Trace Size"
1035 shortname = 'psf_sdssTrace'
1036 _columns = ("base_SdssShape_xx", "base_SdssShape_yy",
1037 "base_SdssShape_psf_xx", "base_SdssShape_psf_yy")
1038
1039 def _func(self, df):
1040 srcSize = np.sqrt(0.5*(df["base_SdssShape_xx"] + df["base_SdssShape_yy"]))
1041 psfSize = np.sqrt(0.5*(df["base_SdssShape_psf_xx"] + df["base_SdssShape_psf_yy"]))
1042 sizeDiff = 100*(srcSize - psfSize)/(0.5*(srcSize + psfSize))
1043 return sizeDiff
1044
1045
1047 """Functor to calculate the HSM trace radius size for sources.
1048
1049 The HSM trace radius size is a measure of size equal to the square root of
1050 half of the trace of the second moments tensor measured with the
1051 HsmShapeAlgorithm plugin.
1052 This has units of pixels.
1053 """
1054 name = 'HSM Trace Size'
1055 shortname = 'hsmTrace'
1056 _columns = ("ext_shapeHSM_HsmSourceMoments_xx",
1057 "ext_shapeHSM_HsmSourceMoments_yy")
1058
1059 def _func(self, df):
1060 srcSize = np.sqrt(0.5*(df["ext_shapeHSM_HsmSourceMoments_xx"]
1061 + df["ext_shapeHSM_HsmSourceMoments_yy"]))
1062 return srcSize
1063
1064
1066 """Functor to calculate the HSM trace radius size difference (%) between
1067 the object and the PSF model.
1068
1069 See Also
1070 --------
1071 HsmTraceSize
1072 """
1073 name = 'PSF - HSM Trace Size'
1074 shortname = 'psf_HsmTrace'
1075 _columns = ("ext_shapeHSM_HsmSourceMoments_xx",
1076 "ext_shapeHSM_HsmSourceMoments_yy",
1077 "ext_shapeHSM_HsmPsfMoments_xx",
1078 "ext_shapeHSM_HsmPsfMoments_yy")
1079
1080 def _func(self, df):
1081 srcSize = np.sqrt(0.5*(df["ext_shapeHSM_HsmSourceMoments_xx"]
1082 + df["ext_shapeHSM_HsmSourceMoments_yy"]))
1083 psfSize = np.sqrt(0.5*(df["ext_shapeHSM_HsmPsfMoments_xx"]
1084 + df["ext_shapeHSM_HsmPsfMoments_yy"]))
1085 sizeDiff = 100*(srcSize - psfSize)/(0.5*(srcSize + psfSize))
1086 return sizeDiff
1087
1088
1090 """Functor to calculate the PSF FWHM with second moments measured from the
1091 HsmShapeAlgorithm plugin.
1092
1093 This is in units of arcseconds, and assumes the hsc_rings_v1 skymap pixel
1094 scale of 0.168 arcseconds/pixel.
1095
1096 Notes
1097 -----
1098 This conversion assumes the PSF is Gaussian, which is not always the case.
1099 """
1100 name = 'HSM Psf FWHM'
1101 _columns = ('ext_shapeHSM_HsmPsfMoments_xx', 'ext_shapeHSM_HsmPsfMoments_yy')
1102 # TODO: DM-21403 pixel scale should be computed from the CD matrix or transform matrix
1103 pixelScale = 0.168
1104 SIGMA2FWHM = 2*np.sqrt(2*np.log(2))
1105
1106 def _func(self, df):
1107 return self.pixelScale*self.SIGMA2FWHM*np.sqrt(
1108 0.5*(df['ext_shapeHSM_HsmPsfMoments_xx'] + df['ext_shapeHSM_HsmPsfMoments_yy']))
1109
1110
1112 r"""Calculate :math:`e_1` ellipticity component for sources, defined as:
1113
1114 .. math::
1115 e_1 &= (I_{xx}-I_{yy})/(I_{xx}+I_{yy})
1116
1117 See Also
1118 --------
1119 E2
1120 """
1121 name = "Distortion Ellipticity (e1)"
1122 shortname = "Distortion"
1123
1124 def __init__(self, colXX, colXY, colYY, **kwargs):
1125 self.colXX = colXX
1126 self.colXY = colXY
1127 self.colYY = colYY
1128 self._columns = [self.colXX, self.colXY, self.colYY]
1129 super().__init__(**kwargs)
1130
1131 @property
1132 def columns(self):
1133 return [self.colXX, self.colXY, self.colYY]
1134
1135 def _func(self, df):
1136 return df[self.colXX] - df[self.colYY] / (df[self.colXX] + df[self.colYY])
1137
1138
1140 r"""Calculate :math:`e_2` ellipticity component for sources, defined as:
1141
1142 .. math::
1143 e_2 &= 2I_{xy}/(I_{xx}+I_{yy})
1144
1145 See Also
1146 --------
1147 E1
1148 """
1149 name = "Ellipticity e2"
1150
1151 def __init__(self, colXX, colXY, colYY, **kwargs):
1152 self.colXX = colXX
1153 self.colXY = colXY
1154 self.colYY = colYY
1155 super().__init__(**kwargs)
1156
1157 @property
1158 def columns(self):
1159 return [self.colXX, self.colXY, self.colYY]
1160
1161 def _func(self, df):
1162 return 2*df[self.colXY] / (df[self.colXX] + df[self.colYY])
1163
1164
1166 """Calculate the radius from the quadrupole moments.
1167
1168 This returns the fourth root of the determinant of the second moments
1169 tensor, which has units of pixels.
1170
1171 See Also
1172 --------
1173 SdssTraceSize
1174 HsmTraceSize
1175 """
1176
1177 def __init__(self, colXX, colXY, colYY, **kwargs):
1178 self.colXX = colXX
1179 self.colXY = colXY
1180 self.colYY = colYY
1181 super().__init__(**kwargs)
1182
1183 @property
1184 def columns(self):
1185 return [self.colXX, self.colXY, self.colYY]
1186
1187 def _func(self, df):
1188 return (df[self.colXX]*df[self.colYY] - df[self.colXY]**2)**0.25
1189
1190
1192 """Computations using the stored localWcs."""
1193 name = "LocalWcsOperations"
1194
1195 def __init__(self,
1196 colCD_1_1,
1197 colCD_1_2,
1198 colCD_2_1,
1199 colCD_2_2,
1200 **kwargs):
1201 self.colCD_1_1 = colCD_1_1
1202 self.colCD_1_2 = colCD_1_2
1203 self.colCD_2_1 = colCD_2_1
1204 self.colCD_2_2 = colCD_2_2
1205 super().__init__(**kwargs)
1206
1207 def computeDeltaRaDec(self, x, y, cd11, cd12, cd21, cd22):
1208 """Compute the dRA, dDec from dx, dy.
1209
1210 Parameters
1211 ----------
1212 x : `~pandas.Series`
1213 X pixel coordinate.
1214 y : `~pandas.Series`
1215 Y pixel coordinate.
1216 cd11 : `~pandas.Series`
1217 [1, 1] element of the local Wcs affine transform.
1218 cd12 : `~pandas.Series`
1219 [1, 2] element of the local Wcs affine transform.
1220 cd21 : `~pandas.Series`
1221 [2, 1] element of the local Wcs affine transform.
1222 cd22 : `~pandas.Series`
1223 [2, 2] element of the local Wcs affine transform.
1224
1225 Returns
1226 -------
1227 raDecTuple : tuple
1228 RA and Dec conversion of x and y given the local Wcs.
1229 Returned units are in radians.
1230
1231 Notes
1232 -----
1233 If x and y are with respect to the CRVAL1, CRVAL2
1234 then this will return the RA, Dec for that WCS.
1235 """
1236 return (x * cd11 + y * cd12, x * cd21 + y * cd22)
1237
1238 def computeSkySeparation(self, ra1, dec1, ra2, dec2):
1239 """Compute the local pixel scale conversion.
1240
1241 Parameters
1242 ----------
1243 ra1 : `~pandas.Series`
1244 Ra of the first coordinate in radians.
1245 dec1 : `~pandas.Series`
1246 Dec of the first coordinate in radians.
1247 ra2 : `~pandas.Series`
1248 Ra of the second coordinate in radians.
1249 dec2 : `~pandas.Series`
1250 Dec of the second coordinate in radians.
1251
1252 Returns
1253 -------
1254 dist : `~pandas.Series`
1255 Distance on the sphere in radians.
1256 """
1257 deltaDec = dec2 - dec1
1258 deltaRa = ra2 - ra1
1259 return 2 * np.arcsin(
1260 np.sqrt(
1261 np.sin(deltaDec / 2) ** 2
1262 + np.cos(dec2) * np.cos(dec1) * np.sin(deltaRa / 2) ** 2))
1263
1264 def getSkySeparationFromPixel(self, x1, y1, x2, y2, cd11, cd12, cd21, cd22):
1265 """Compute the distance on the sphere from x2, y1 to x1, y1.
1266
1267 Parameters
1268 ----------
1269 x1 : `~pandas.Series`
1270 X pixel coordinate.
1271 y1 : `~pandas.Series`
1272 Y pixel coordinate.
1273 x2 : `~pandas.Series`
1274 X pixel coordinate.
1275 y2 : `~pandas.Series`
1276 Y pixel coordinate.
1277 cd11 : `~pandas.Series`
1278 [1, 1] element of the local Wcs affine transform.
1279 cd12 : `~pandas.Series`
1280 [1, 2] element of the local Wcs affine transform.
1281 cd21 : `~pandas.Series`
1282 [2, 1] element of the local Wcs affine transform.
1283 cd22 : `~pandas.Series`
1284 [2, 2] element of the local Wcs affine transform.
1285
1286 Returns
1287 -------
1288 Distance : `~pandas.Series`
1289 Arcseconds per pixel at the location of the local WC.
1290 """
1291 ra1, dec1 = self.computeDeltaRaDec(x1, y1, cd11, cd12, cd21, cd22)
1292 ra2, dec2 = self.computeDeltaRaDec(x2, y2, cd11, cd12, cd21, cd22)
1293 # Great circle distance for small separations.
1294 return self.computeSkySeparation(ra1, dec1, ra2, dec2)
1295
1296 def computePositionAngle(self, ra1, dec1, ra2, dec2):
1297 """Compute position angle (E of N) from (ra1, dec1) to (ra2, dec2).
1298
1299 Parameters
1300 ----------
1301 ra1 : iterable [`float`]
1302 RA of the first coordinate [radian].
1303 dec1 : iterable [`float`]
1304 Dec of the first coordinate [radian].
1305 ra2 : iterable [`float`]
1306 RA of the second coordinate [radian].
1307 dec2 : iterable [`float`]
1308 Dec of the second coordinate [radian].
1309
1310 Returns
1311 -------
1312 Position Angle: `~pandas.Series`
1313 radians E of N
1314
1315 Notes
1316 -----
1317 (ra1, dec1) -> (ra2, dec2) is interpreted as the shorter way around the sphere
1318
1319 For a separation of 0.0001 rad, the position angle is good to 0.0009 rad
1320 all over the sphere.
1321 """
1322 # lsst.geom.SpherePoint has "bearingTo", which returns angle N of E
1323 # We instead want the astronomy convention of "Position Angle", which is angle E of N
1324 position_angle = np.zeros(len(ra1))
1325 for i, (r1, d1, r2, d2) in enumerate(zip(ra1, dec1, ra2, dec2)):
1326 point1 = geom.SpherePoint(r1, d1, geom.radians)
1327 point2 = geom.SpherePoint(r2, d2, geom.radians)
1328 bearing = point1.bearingTo(point2)
1329 pa_ref_angle = geom.Angle(np.pi/2, geom.radians) # in bearing system
1330 pa = pa_ref_angle - bearing
1331 # Wrap around to get Delta_RA from -pi to +pi
1332 pa = pa.wrapCtr()
1333 position_angle[i] = pa.asRadians()
1334
1335 return pd.Series(position_angle)
1336
1337 def getPositionAngleFromDetectorAngle(self, theta, cd11, cd12, cd21, cd22):
1338 """Compute position angle (E of N) from detector angle (+y of +x).
1339
1340 Parameters
1341 ----------
1342 theta : `float`
1343 detector angle [radian]
1344 cd11 : `float`
1345 [1, 1] element of the local Wcs affine transform.
1346 cd12 : `float`
1347 [1, 2] element of the local Wcs affine transform.
1348 cd21 : `float`
1349 [2, 1] element of the local Wcs affine transform.
1350 cd22 : `float`
1351 [2, 2] element of the local Wcs affine transform.
1352
1353 Returns
1354 -------
1355 Position Angle: `~pandas.Series`
1356 Degrees E of N.
1357 """
1358 # Create a unit vector in (x, y) along da
1359 dx = np.cos(theta)
1360 dy = np.sin(theta)
1361 ra1, dec1 = self.computeDeltaRaDec(0, 0, cd11, cd12, cd21, cd22)
1362 ra2, dec2 = self.computeDeltaRaDec(dx, dy, cd11, cd12, cd21, cd22)
1363 # Position angle of vector from (RA1, Dec1) to (RA2, Dec2)
1364 return np.rad2deg(self.computePositionAngle(ra1, dec1, ra2, dec2))
1365
1366
1368 """Compute the local pixel scale from the stored CDMatrix.
1369 """
1370 name = "PixelScale"
1371
1372 @property
1379 def pixelScaleArcseconds(self, cd11, cd12, cd21, cd22):
1380 """Compute the local pixel to scale conversion in arcseconds.
1381
1382 Parameters
1383 ----------
1384 cd11 : `~pandas.Series`
1385 [1, 1] element of the local Wcs affine transform in radians.
1386 cd11 : `~pandas.Series`
1387 [1, 1] element of the local Wcs affine transform in radians.
1388 cd12 : `~pandas.Series`
1389 [1, 2] element of the local Wcs affine transform in radians.
1390 cd21 : `~pandas.Series`
1391 [2, 1] element of the local Wcs affine transform in radians.
1392 cd22 : `~pandas.Series`
1393 [2, 2] element of the local Wcs affine transform in radians.
1394
1395 Returns
1396 -------
1397 pixScale : `~pandas.Series`
1398 Arcseconds per pixel at the location of the local WC.
1399 """
1400 return 3600 * np.degrees(np.sqrt(np.fabs(cd11 * cd22 - cd12 * cd21)))
1401
1402 def _func(self, df):
1403 return self.pixelScaleArcseconds(df[self.colCD_1_1colCD_1_1],
1404 df[self.colCD_1_2colCD_1_2],
1405 df[self.colCD_2_1colCD_2_1],
1406 df[self.colCD_2_2])
1407
1408
1410 """Convert a value in units of pixels to units of arcseconds."""
1411
1412 def __init__(self,
1413 col,
1414 colCD_1_1,
1415 colCD_1_2,
1416 colCD_2_1,
1417 colCD_2_2,
1418 **kwargs):
1419 self.col = col
1420 super().__init__(colCD_1_1,
1421 colCD_1_2,
1422 colCD_2_1,
1423 colCD_2_2,
1424 **kwargs)
1425
1426 @property
1427 def name(self):
1428 return f"{self.col}_asArcseconds"
1429
1430 @property
1438 def _func(self, df):
1439 return df[self.col] * self.pixelScaleArcseconds(df[self.colCD_1_1colCD_1_1colCD_1_1],
1442 df[self.colCD_2_2])
1443
1444
1446 """Convert a value in units of pixels squared to units of arcseconds
1447 squared.
1448 """
1449
1450 def __init__(self,
1451 col,
1452 colCD_1_1,
1453 colCD_1_2,
1454 colCD_2_1,
1455 colCD_2_2,
1456 **kwargs):
1457 self.col = col
1458 super().__init__(colCD_1_1,
1459 colCD_1_2,
1460 colCD_2_1,
1461 colCD_2_2,
1462 **kwargs)
1463
1464 @property
1465 def name(self):
1466 return f"{self.col}_asArcsecondsSq"
1467
1468 @property
1476 def _func(self, df):
1477 pixScale = self.pixelScaleArcseconds(df[self.colCD_1_1colCD_1_1colCD_1_1],
1480 df[self.colCD_2_2])
1481 return df[self.col] * pixScale * pixScale
1482
1483
1485 """Compute a position angle from a detector angle and the stored CDMatrix.
1486
1487 Returns
1488 -------
1489 position angle : degrees
1490 """
1491
1492 name = "PositionAngle"
1493
1495 self,
1496 theta_col,
1497 colCD_1_1,
1498 colCD_1_2,
1499 colCD_2_1,
1500 colCD_2_2,
1501 **kwargs
1502 ):
1503 self.theta_col = theta_col
1504 super().__init__(colCD_1_1, colCD_1_2, colCD_2_1, colCD_2_2, **kwargs)
1505
1506 @property
1507 def columns(self):
1508 return [
1509 self.theta_col,
1513 self.colCD_2_2
1514 ]
1515
1516 def _func(self, df):
1518 df[self.theta_col],
1519 df[self.colCD_1_1colCD_1_1],
1520 df[self.colCD_1_2colCD_1_2],
1521 df[self.colCD_2_1colCD_2_1],
1522 df[self.colCD_2_2]
1523 )
1524
1525
1527 """Return the band used to seed multiband forced photometry.
1528
1529 This functor is to be used on Object tables.
1530 It converts the boolean merge_measurements_{band} columns into a single
1531 string representing the first band for which merge_measurements_{band}
1532 is True.
1533
1534 Assumes the default priority order of i, r, z, y, g, u.
1535 """
1536 name = 'Reference Band'
1537 shortname = 'refBand'
1538
1539 @property
1540 def columns(self):
1541 return ["merge_measurement_i",
1542 "merge_measurement_r",
1543 "merge_measurement_z",
1544 "merge_measurement_y",
1545 "merge_measurement_g",
1546 "merge_measurement_u"]
1547
1548 def _func(self, df: pd.DataFrame) -> pd.Series:
1549 def getFilterAliasName(row):
1550 # Get column name with the max value (True > False).
1551 colName = row.idxmax()
1552 return colName.replace('merge_measurement_', '')
1553
1554 # Skip columns that are unavailable, because this functor requests the
1555 # superset of bands that could be included in the object table.
1556 columns = [col for col in self.columnscolumns if col in df.columns]
1557 # Makes a Series of dtype object if df is empty.
1558 return df[columns].apply(getFilterAliasName, axis=1,
1559 result_type='reduce').astype('object')
1560
1561
1563 """Base class for Object table calibrated fluxes and magnitudes."""
1564 # AB to NanoJansky (3631 Jansky).
1565 AB_FLUX_SCALE = (0 * u.ABmag).to_value(u.nJy)
1566 LOG_AB_FLUX_SCALE = 12.56
1567 FIVE_OVER_2LOG10 = 1.085736204758129569
1568 # TO DO: DM-21955 Replace hard coded photometic calibration values.
1569 COADD_ZP = 27
1570
1571 def __init__(self, colFlux, colFluxErr=None, **kwargs):
1572 self.vhypot = np.vectorize(self.hypot)
1573 self.col = colFlux
1574 self.colFluxErr = colFluxErr
1575
1576 self.fluxMag0 = 1./np.power(10, -0.4*self.COADD_ZP)
1577 self.fluxMag0Err = 0.
1578
1579 super().__init__(**kwargs)
1580
1581 @property
1582 def columns(self):
1583 return [self.col]
1584
1585 @property
1586 def name(self):
1587 return f'mag_{self.col}'
1588
1589 @classmethod
1590 def hypot(cls, a, b):
1591 """Compute sqrt(a^2 + b^2) without under/overflow."""
1592 if np.abs(a) < np.abs(b):
1593 a, b = b, a
1594 if a == 0.:
1595 return 0.
1596 q = b/a
1597 return np.abs(a) * np.sqrt(1. + q*q)
1598
1599 def dn2flux(self, dn, fluxMag0):
1600 """Convert instrumental flux to nanojanskys."""
1601 return self.AB_FLUX_SCALE * dn / fluxMag0
1602
1603 def dn2mag(self, dn, fluxMag0):
1604 """Convert instrumental flux to AB magnitude."""
1605 with warnings.catch_warnings():
1606 warnings.filterwarnings('ignore', r'invalid value encountered')
1607 warnings.filterwarnings('ignore', r'divide by zero')
1608 return -2.5 * np.log10(dn/fluxMag0)
1609
1610 def dn2fluxErr(self, dn, dnErr, fluxMag0, fluxMag0Err):
1611 """Convert instrumental flux error to nanojanskys."""
1612 retVal = self.vhypot(dn * fluxMag0Err, dnErr * fluxMag0)
1613 retVal *= self.AB_FLUX_SCALE / fluxMag0 / fluxMag0
1614 return retVal
1615
1616 def dn2MagErr(self, dn, dnErr, fluxMag0, fluxMag0Err):
1617 """Convert instrumental flux error to AB magnitude error."""
1618 retVal = self.dn2fluxErr(dn, dnErr, fluxMag0, fluxMag0Err) / self.dn2flux(dn, fluxMag0)
1619 return self.FIVE_OVER_2LOG10 * retVal
1620
1621
1623 """Convert instrumental flux to nanojanskys."""
1624 def _func(self, df):
1625 return self.dn2flux(df[self.col], self.fluxMag0fluxMag0)
1626
1627
1629 """Convert instrumental flux error to nanojanskys."""
1630 @property
1631 def columns(self):
1632 return [self.colcol, self.colFluxErr]
1633
1634 def _func(self, df):
1635 retArr = self.dn2fluxErr(df[self.colcol], df[self.colFluxErr], self.fluxMag0, self.fluxMag0Err)
1636 return pd.Series(retArr, index=df.index)
1637
1638
1640 """Base class for calibrating the specified instrument flux column using
1641 the local photometric calibration.
1642
1643 Parameters
1644 ----------
1645 instFluxCol : `str`
1646 Name of the instrument flux column.
1647 instFluxErrCol : `str`
1648 Name of the assocated error columns for ``instFluxCol``.
1649 photoCalibCol : `str`
1650 Name of local calibration column.
1651 photoCalibErrCol : `str`
1652 Error associated with ``photoCalibCol``
1653
1654 See Also
1655 --------
1656 LocalNanojansky
1657 LocalNanojanskyErr
1658 """
1659 logNJanskyToAB = (1 * u.nJy).to_value(u.ABmag)
1660
1661 def __init__(self,
1662 instFluxCol,
1663 instFluxErrCol,
1664 photoCalibCol,
1665 photoCalibErrCol,
1666 **kwargs):
1667 self.instFluxCol = instFluxCol
1668 self.instFluxErrCol = instFluxErrCol
1669 self.photoCalibCol = photoCalibCol
1670 self.photoCalibErrCol = photoCalibErrCol
1671 super().__init__(**kwargs)
1672
1673 def instFluxToNanojansky(self, instFlux, localCalib):
1674 """Convert instrument flux to nanojanskys.
1675
1676 Parameters
1677 ----------
1678 instFlux : `~numpy.ndarray` or `~pandas.Series`
1679 Array of instrument flux measurements.
1680 localCalib : `~numpy.ndarray` or `~pandas.Series`
1681 Array of local photometric calibration estimates.
1682
1683 Returns
1684 -------
1685 calibFlux : `~numpy.ndarray` or `~pandas.Series`
1686 Array of calibrated flux measurements.
1687 """
1688 return instFlux * localCalib
1689
1690 def instFluxErrToNanojanskyErr(self, instFlux, instFluxErr, localCalib, localCalibErr):
1691 """Convert instrument flux to nanojanskys.
1692
1693 Parameters
1694 ----------
1695 instFlux : `~numpy.ndarray` or `~pandas.Series`
1696 Array of instrument flux measurements.
1697 instFluxErr : `~numpy.ndarray` or `~pandas.Series`
1698 Errors on associated ``instFlux`` values.
1699 localCalib : `~numpy.ndarray` or `~pandas.Series`
1700 Array of local photometric calibration estimates.
1701 localCalibErr : `~numpy.ndarray` or `~pandas.Series`
1702 Errors on associated ``localCalib`` values.
1703
1704 Returns
1705 -------
1706 calibFluxErr : `~numpy.ndarray` or `~pandas.Series`
1707 Errors on calibrated flux measurements.
1708 """
1709 return np.hypot(instFluxErr * localCalib, instFlux * localCalibErr)
1710
1711 def instFluxToMagnitude(self, instFlux, localCalib):
1712 """Convert instrument flux to nanojanskys.
1713
1714 Parameters
1715 ----------
1716 instFlux : `~numpy.ndarray` or `~pandas.Series`
1717 Array of instrument flux measurements.
1718 localCalib : `~numpy.ndarray` or `~pandas.Series`
1719 Array of local photometric calibration estimates.
1720
1721 Returns
1722 -------
1723 calibMag : `~numpy.ndarray` or `~pandas.Series`
1724 Array of calibrated AB magnitudes.
1725 """
1726 return -2.5 * np.log10(self.instFluxToNanojansky(instFlux, localCalib)) + self.logNJanskyToAB
1727
1728 def instFluxErrToMagnitudeErr(self, instFlux, instFluxErr, localCalib, localCalibErr):
1729 """Convert instrument flux err to nanojanskys.
1730
1731 Parameters
1732 ----------
1733 instFlux : `~numpy.ndarray` or `~pandas.Series`
1734 Array of instrument flux measurements.
1735 instFluxErr : `~numpy.ndarray` or `~pandas.Series`
1736 Errors on associated ``instFlux`` values.
1737 localCalib : `~numpy.ndarray` or `~pandas.Series`
1738 Array of local photometric calibration estimates.
1739 localCalibErr : `~numpy.ndarray` or `~pandas.Series`
1740 Errors on associated ``localCalib`` values.
1741
1742 Returns
1743 -------
1744 calibMagErr: `~numpy.ndarray` or `~pandas.Series`
1745 Error on calibrated AB magnitudes.
1746 """
1747 err = self.instFluxErrToNanojanskyErr(instFlux, instFluxErr, localCalib, localCalibErr)
1748 return 2.5 / np.log(10) * err / self.instFluxToNanojansky(instFlux, instFluxErr)
1749
1750
1752 """Compute calibrated fluxes using the local calibration value.
1753
1754 This returns units of nanojanskys.
1755 """
1756
1757 @property
1758 def columns(self):
1760
1761 @property
1762 def name(self):
1763 return f'flux_{self.instFluxCol}'
1764
1765 def _func(self, df):
1766 return self.instFluxToNanojansky(df[self.instFluxColinstFluxCol], df[self.photoCalibCol])
1767
1768
1770 """Compute calibrated flux errors using the local calibration value.
1771
1772 This returns units of nanojanskys.
1773 """
1774
1775 @property
1780 @property
1781 def name(self):
1782 return f'fluxErr_{self.instFluxCol}'
1783
1784 def _func(self, df):
1787
1788
1790 """Compute absolute mean of dipole fluxes.
1791
1792 See Also
1793 --------
1794 LocalNanojansky
1795 LocalNanojanskyErr
1796 LocalDipoleMeanFluxErr
1797 LocalDipoleDiffFlux
1798 LocalDipoleDiffFluxErr
1799 """
1800 def __init__(self,
1801 instFluxPosCol,
1802 instFluxNegCol,
1803 instFluxPosErrCol,
1804 instFluxNegErrCol,
1805 photoCalibCol,
1806 photoCalibErrCol,
1807 **kwargs):
1808 self.instFluxNegCol = instFluxNegCol
1809 self.instFluxPosCol = instFluxPosCol
1810 self.instFluxNegErrCol = instFluxNegErrCol
1811 self.instFluxPosErrCol = instFluxPosErrCol
1812 self.photoCalibColphotoCalibCol = photoCalibCol
1813 self.photoCalibErrColphotoCalibErrCol = photoCalibErrCol
1814 super().__init__(instFluxNegCol,
1815 instFluxNegErrCol,
1816 photoCalibCol,
1817 photoCalibErrCol,
1818 **kwargs)
1819
1820 @property
1821 def columns(self):
1822 return [self.instFluxPosCol,
1823 self.instFluxNegCol,
1825
1826 @property
1827 def name(self):
1828 return f'dipMeanFlux_{self.instFluxPosCol}_{self.instFluxNegCol}'
1829
1830 def _func(self, df):
1831 return 0.5*(np.fabs(self.instFluxToNanojansky(df[self.instFluxNegCol], df[self.photoCalibColphotoCalibCol]))
1832 + np.fabs(self.instFluxToNanojansky(df[self.instFluxPosCol], df[self.photoCalibColphotoCalibCol])))
1833
1834
1836 """Compute the error on the absolute mean of dipole fluxes.
1837
1838 See Also
1839 --------
1840 LocalNanojansky
1841 LocalNanojanskyErr
1842 LocalDipoleMeanFlux
1843 LocalDipoleDiffFlux
1844 LocalDipoleDiffFluxErr
1845 """
1846
1847 @property
1856 @property
1857 def name(self):
1858 return f'dipMeanFluxErr_{self.instFluxPosCol}_{self.instFluxNegCol}'
1859
1860 def _func(self, df):
1861 return 0.5*np.sqrt(
1862 (np.fabs(df[self.instFluxNegColinstFluxNegCol]) + np.fabs(df[self.instFluxPosColinstFluxPosCol])
1866
1867
1869 """Compute the absolute difference of dipole fluxes.
1870
1871 Calculated value is (abs(pos) - abs(neg)).
1872
1873 See Also
1874 --------
1875 LocalNanojansky
1876 LocalNanojanskyErr
1877 LocalDipoleMeanFlux
1878 LocalDipoleMeanFluxErr
1879 LocalDipoleDiffFluxErr
1880 """
1881
1882 @property
1888 @property
1889 def name(self):
1890 return f'dipDiffFlux_{self.instFluxPosCol}_{self.instFluxNegCol}'
1891
1892 def _func(self, df):
1893 return (np.fabs(self.instFluxToNanojansky(df[self.instFluxPosColinstFluxPosCol], df[self.photoCalibColphotoCalibCol]))
1895
1896
1898 """Compute the error on the absolute difference of dipole fluxes.
1899
1900 See Also
1901 --------
1902 LocalNanojansky
1903 LocalNanojanskyErr
1904 LocalDipoleMeanFlux
1905 LocalDipoleMeanFluxErr
1906 LocalDipoleDiffFlux
1907 """
1908
1909 @property
1918 @property
1919 def name(self):
1920 return f'dipDiffFluxErr_{self.instFluxPosCol}_{self.instFluxNegCol}'
1921
1922 def _func(self, df):
1923 return np.sqrt(
1924 ((np.fabs(df[self.instFluxPosColinstFluxPosCol]) - np.fabs(df[self.instFluxNegColinstFluxNegCol]))
1928
1929
1931 """Compute E(B-V) from dustmaps.sfd."""
1932 _defaultDataset = 'ref'
1933 name = "E(B-V)"
1934 shortname = "ebv"
1935
1936 def __init__(self, **kwargs):
1937 # Import is only needed for Ebv.
1938 # Suppress unnecessary .dustmapsrc log message on import.
1939 with open(os.devnull, "w") as devnull:
1940 with redirect_stdout(devnull):
1941 from dustmaps.sfd import SFDQuery
1942 self._columns = ['coord_ra', 'coord_dec']
1943 self.sfd = SFDQuery()
1944 super().__init__(**kwargs)
1945
1946 def _func(self, df):
1947 coords = SkyCoord(df['coord_ra'].values * u.rad, df['coord_dec'].values * u.rad)
1948 ebv = self.sfd(coords)
1949 # Double precision unnecessary scientifically but currently needed for
1950 # ingest to qserv.
1951 return pd.Series(ebv, index=df.index).astype('float64')
std::vector< SchemaItem< Flag > > * items
A class representing an angle.
Definition Angle.h:128
Point in an unspecified spherical coordinate system.
Definition SpherePoint.h:57
__init__(self, col, filt2, filt1, **kwargs)
Definition functors.py:934
multilevelColumns(self, parq, **kwargs)
Definition functors.py:963
__init__(self, col, **kwargs)
Definition functors.py:649
multilevelColumns(self, data, **kwargs)
Definition functors.py:452
from_file(cls, filename, **kwargs)
Definition functors.py:542
from_yaml(cls, translationDefinition, **kwargs)
Definition functors.py:551
pixelScaleArcseconds(self, cd11, cd12, cd21, cd22)
Definition functors.py:1379
__init__(self, theta_col, colCD_1_1, colCD_1_2, colCD_2_1, colCD_2_2, **kwargs)
Definition functors.py:1502
__init__(self, col, colCD_1_1, colCD_1_2, colCD_2_1, colCD_2_2, **kwargs)
Definition functors.py:1456
__init__(self, col, colCD_1_1, colCD_1_2, colCD_2_1, colCD_2_2, **kwargs)
Definition functors.py:1418
__init__(self, col, **kwargs)
Definition functors.py:680
__call__(self, catalog, **kwargs)
Definition functors.py:710
__init__(self, colXX, colXY, colYY, **kwargs)
Definition functors.py:1124
__init__(self, colXX, colXY, colYY, **kwargs)
Definition functors.py:1151
_func(self, df, dropna=True)
Definition functors.py:291
multilevelColumns(self, data, columnIndex=None, returnTuple=False)
Definition functors.py:237
__call__(self, data, dropna=False)
Definition functors.py:348
_get_data_columnLevels(self, data, columnIndex=None)
Definition functors.py:184
_colsFromDict(self, colDict, columnIndex=None)
Definition functors.py:218
difference(self, data1, data2, **kwargs)
Definition functors.py:360
_get_data_columnLevelNames(self, data, columnIndex=None)
Definition functors.py:204
__init__(self, filt=None, dataset=None, noDup=None)
Definition functors.py:163
__init__(self, ra, dec, **kwargs)
Definition functors.py:762
__init__(self, instFluxPosCol, instFluxNegCol, instFluxPosErrCol, instFluxNegErrCol, photoCalibCol, photoCalibErrCol, **kwargs)
Definition functors.py:1807
instFluxToNanojansky(self, instFlux, localCalib)
Definition functors.py:1673
instFluxErrToNanojanskyErr(self, instFlux, instFluxErr, localCalib, localCalibErr)
Definition functors.py:1690
instFluxToMagnitude(self, instFlux, localCalib)
Definition functors.py:1711
__init__(self, instFluxCol, instFluxErrCol, photoCalibCol, photoCalibErrCol, **kwargs)
Definition functors.py:1666
instFluxErrToMagnitudeErr(self, instFlux, instFluxErr, localCalib, localCalibErr)
Definition functors.py:1728
computeSkySeparation(self, ra1, dec1, ra2, dec2)
Definition functors.py:1238
__init__(self, colCD_1_1, colCD_1_2, colCD_2_1, colCD_2_2, **kwargs)
Definition functors.py:1200
computeDeltaRaDec(self, x, y, cd11, cd12, cd21, cd22)
Definition functors.py:1207
getSkySeparationFromPixel(self, x1, y1, x2, y2, cd11, cd12, cd21, cd22)
Definition functors.py:1264
computePositionAngle(self, ra1, dec1, ra2, dec2)
Definition functors.py:1296
getPositionAngleFromDetectorAngle(self, theta, cd11, cd12, cd21, cd22)
Definition functors.py:1337
__init__(self, col1, col2, **kwargs)
Definition functors.py:882
__init__(self, *args, **kwargs)
Definition functors.py:854
__init__(self, col, **kwargs)
Definition functors.py:823
__init__(self, colFlux, colFluxErr=None, **kwargs)
Definition functors.py:1571
dn2MagErr(self, dn, dnErr, fluxMag0, fluxMag0Err)
Definition functors.py:1616
dn2fluxErr(self, dn, dnErr, fluxMag0, fluxMag0Err)
Definition functors.py:1610
__call__(self, catalog, **kwargs)
Definition functors.py:698
__init__(self, colXX, colXY, colYY, **kwargs)
Definition functors.py:1177
pd.Series _func(self, pd.DataFrame df)
Definition functors.py:1548
HtmPixelization provides HTM indexing of points and regions.
init_fromDict(initDict, basePath='lsst.pipe.tasks.functors', typeKey='functor', name=None)
Definition functors.py:60
mag_aware_eval(df, expr, log)
Definition functors.py:580