Loading [MathJax]/extensions/tex2jax.js
LSST Applications g0fba68d861+83433b07ee,g16d25e1f1b+23bc9e47ac,g1ec0fe41b4+3ea9d11450,g1fd858c14a+9be2b0f3b9,g2440f9efcc+8c5ae1fdc5,g35bb328faa+8c5ae1fdc5,g4a4af6cd76+d25431c27e,g4d2262a081+c74e83464e,g53246c7159+8c5ae1fdc5,g55585698de+1e04e59700,g56a49b3a55+92a7603e7a,g60b5630c4e+1e04e59700,g67b6fd64d1+3fc8cb0b9e,g78460c75b0+7e33a9eb6d,g786e29fd12+668abc6043,g8352419a5c+8c5ae1fdc5,g8852436030+60e38ee5ff,g89139ef638+3fc8cb0b9e,g94187f82dc+1e04e59700,g989de1cb63+3fc8cb0b9e,g9d31334357+1e04e59700,g9f33ca652e+0a83e03614,gabe3b4be73+8856018cbb,gabf8522325+977d9fabaf,gb1101e3267+8b4b9c8ed7,gb89ab40317+3fc8cb0b9e,gc0af124501+57ccba3ad1,gcf25f946ba+60e38ee5ff,gd6cbbdb0b4+1cc2750d2e,gd794735e4e+7be992507c,gdb1c4ca869+be65c9c1d7,gde0f65d7ad+c7f52e58fe,ge278dab8ac+6b863515ed,ge410e46f29+3fc8cb0b9e,gf35d7ec915+97dd712d81,gf5e32f922b+8c5ae1fdc5,gf618743f1b+747388abfa,gf67bdafdda+3fc8cb0b9e,w.2025.18
LSST Data Management Base Package
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Macros Modules Pages
functors.py
Go to the documentation of this file.
1# This file is part of pipe_tasks.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
21
22__all__ = ["init_fromDict", "Functor", "CompositeFunctor", "mag_aware_eval",
23 "CustomFunctor", "Column", "Index", "CoordColumn", "RAColumn",
24 "DecColumn", "SinglePrecisionFloatColumn", "HtmIndex20", "fluxName", "fluxErrName", "Mag",
25 "MagErr", "MagDiff", "Color", "DeconvolvedMoments", "SdssTraceSize",
26 "PsfSdssTraceSizeDiff", "HsmTraceSize", "PsfHsmTraceSizeDiff",
27 "HsmFwhm", "E1", "E2", "RadiusFromQuadrupole", "LocalWcs",
28 "ComputePixelScale", "ConvertPixelToArcseconds",
29 "ConvertPixelSqToArcsecondsSq",
30 "ConvertDetectorAngleToPositionAngle",
31 "ReferenceBand", "Photometry",
32 "NanoJansky", "NanoJanskyErr", "LocalPhotometry", "LocalNanojansky",
33 "LocalNanojanskyErr", "LocalDipoleMeanFlux",
34 "LocalDipoleMeanFluxErr", "LocalDipoleDiffFlux",
35 "LocalDipoleDiffFluxErr", "Ebv",
36 ]
37
38import logging
39import os
40import os.path
41import re
42import warnings
43from contextlib import redirect_stdout
44from itertools import product
45
46import astropy.units as u
47import lsst.geom as geom
48import lsst.sphgeom as sphgeom
49import numpy as np
50import pandas as pd
51import yaml
52from astropy.coordinates import SkyCoord
53from lsst.daf.butler import DeferredDatasetHandle
54from lsst.pipe.base import InMemoryDatasetHandle
55from lsst.utils import doImport
56from lsst.utils.introspection import get_full_type_name
57
58
59def init_fromDict(initDict, basePath='lsst.pipe.tasks.functors',
60 typeKey='functor', name=None):
61 """Initialize an object defined in a dictionary.
62
63 The object needs to be importable as f'{basePath}.{initDict[typeKey]}'.
64 The positional and keyword arguments (if any) are contained in "args" and
65 "kwargs" entries in the dictionary, respectively.
66 This is used in `~lsst.pipe.tasks.functors.CompositeFunctor.from_yaml` to
67 initialize a composite functor from a specification in a YAML file.
68
69 Parameters
70 ----------
71 initDict : dictionary
72 Dictionary describing object's initialization.
73 Must contain an entry keyed by ``typeKey`` that is the name of the
74 object, relative to ``basePath``.
75 basePath : str
76 Path relative to module in which ``initDict[typeKey]`` is defined.
77 typeKey : str
78 Key of ``initDict`` that is the name of the object (relative to
79 ``basePath``).
80 """
81 initDict = initDict.copy()
82 # TO DO: DM-21956 We should be able to define functors outside this module
83 pythonType = doImport(f'{basePath}.{initDict.pop(typeKey)}')
84 args = []
85 if 'args' in initDict:
86 args = initDict.pop('args')
87 if isinstance(args, str):
88 args = [args]
89 try:
90 element = pythonType(*args, **initDict)
91 except Exception as e:
92 message = f'Error in constructing functor "{name}" of type {pythonType.__name__} with args: {args}'
93 raise type(e)(message, e.args)
94 return element
95
96
97class Functor(object):
98 """Define and execute a calculation on a DataFrame or Handle holding a
99 DataFrame.
100
101 The `__call__` method accepts either a `~pandas.DataFrame` object or a
102 `~lsst.daf.butler.DeferredDatasetHandle` or
103 `~lsst.pipe.base.InMemoryDatasetHandle`, and returns the
104 result of the calculation as a single column.
105 Each functor defines what columns are needed for the calculation, and only
106 these columns are read from the dataset handle.
107
108 The action of `__call__` consists of two steps: first, loading the
109 necessary columns from disk into memory as a `~pandas.DataFrame` object;
110 and second, performing the computation on this DataFrame and returning the
111 result.
112
113 To define a new `Functor`, a subclass must define a `_func` method,
114 that takes a `~pandas.DataFrame` and returns result in a `~pandas.Series`.
115 In addition, it must define the following attributes:
116
117 * `_columns`: The columns necessary to perform the calculation
118 * `name`: A name appropriate for a figure axis label
119 * `shortname`: A name appropriate for use as a dictionary key
120
121 On initialization, a `Functor` should declare what band (``filt`` kwarg)
122 and dataset (e.g. ``'ref'``, ``'meas'``, ``'forced_src'``) it is intended
123 to be applied to.
124 This enables the `_get_data` method to extract the proper columns from the
125 underlying data.
126 If not specified, the dataset will fall back on the `_defaultDataset`
127 attribute.
128 If band is not specified and ``dataset`` is anything other than ``'ref'``,
129 then an error will be raised when trying to perform the calculation.
130
131 Originally, `Functor` was set up to expect datasets formatted like the
132 ``deepCoadd_obj`` dataset; that is, a DataFrame with a multi-level column
133 index, with the levels of the column index being ``band``, ``dataset``, and
134 ``column``.
135 It has since been generalized to apply to DataFrames without multi-level
136 indices and multi-level indices with just ``dataset`` and ``column``
137 levels.
138 In addition, the `_get_data` method that reads the columns from the
139 underlying data will return a DataFrame with column index levels defined by
140 the `_dfLevels` attribute; by default, this is ``column``.
141
142 The `_dfLevels` attributes should generally not need to be changed, unless
143 `_func` needs columns from multiple filters or datasets to do the
144 calculation.
145 An example of this is the `~lsst.pipe.tasks.functors.Color` functor, for
146 which `_dfLevels = ('band', 'column')`, and `_func` expects the DataFrame
147 it gets to have those levels in the column index.
148
149 Parameters
150 ----------
151 filt : str
152 Band upon which to do the calculation.
153
154 dataset : str
155 Dataset upon which to do the calculation (e.g., 'ref', 'meas',
156 'forced_src').
157 """
158
159 _defaultDataset = 'ref'
160 _dfLevels = ('column',)
161 _defaultNoDup = False
162
163 def __init__(self, filt=None, dataset=None, noDup=None):
164 self.filt = filt
165 self.dataset = dataset if dataset is not None else self._defaultDataset
166 self._noDup = noDup
167 self.log = logging.getLogger(type(self).__name__)
168
169 @property
170 def noDup(self):
171 """Do not explode by band if used on object table."""
172 if self._noDup is not None:
173 return self._noDup
174 else:
175 return self._defaultNoDup
176
177 @property
178 def columns(self):
179 """Columns required to perform calculation."""
180 if not hasattr(self, '_columns'):
181 raise NotImplementedError('Must define columns property or _columns attribute')
182 return self._columns
183
184 def _get_data_columnLevels(self, data, columnIndex=None):
185 """Gets the names of the column index levels.
186
187 This should only be called in the context of a multilevel table.
188
189 Parameters
190 ----------
191 data : various
192 The data to be read, can be a
193 `~lsst.daf.butler.DeferredDatasetHandle` or
194 `~lsst.pipe.base.InMemoryDatasetHandle`.
195 columnIndex (optional): pandas `~pandas.Index` object
196 If not passed, then it is read from the
197 `~lsst.daf.butler.DeferredDatasetHandle`
198 for `~lsst.pipe.base.InMemoryDatasetHandle`.
199 """
200 if columnIndex is None:
201 columnIndex = data.get(component="columns")
202 return columnIndex.names
203
204 def _get_data_columnLevelNames(self, data, columnIndex=None):
205 """Gets the content of each of the column levels for a multilevel
206 table.
207 """
208 if columnIndex is None:
209 columnIndex = data.get(component="columns")
210
211 columnLevels = columnIndex.names
212 columnLevelNames = {
213 level: list(np.unique(np.array([c for c in columnIndex])[:, i]))
214 for i, level in enumerate(columnLevels)
215 }
216 return columnLevelNames
217
218 def _colsFromDict(self, colDict, columnIndex=None):
219 """Converts dictionary column specficiation to a list of columns."""
220 new_colDict = {}
221 columnLevels = self._get_data_columnLevels(None, columnIndex=columnIndex)
222
223 for i, lev in enumerate(columnLevels):
224 if lev in colDict:
225 if isinstance(colDict[lev], str):
226 new_colDict[lev] = [colDict[lev]]
227 else:
228 new_colDict[lev] = colDict[lev]
229 else:
230 new_colDict[lev] = columnIndex.levels[i]
231
232 levelCols = [new_colDict[lev] for lev in columnLevels]
233 cols = list(product(*levelCols))
234 colsAvailable = [col for col in cols if col in columnIndex]
235 return colsAvailable
236
237 def multilevelColumns(self, data, columnIndex=None, returnTuple=False):
238 """Returns columns needed by functor from multilevel dataset.
239
240 To access tables with multilevel column structure, the
241 `~lsst.daf.butler.DeferredDatasetHandle` or
242 `~lsst.pipe.base.InMemoryDatasetHandle` needs to be passed
243 either a list of tuples or a dictionary.
244
245 Parameters
246 ----------
247 data : various
248 The data as either `~lsst.daf.butler.DeferredDatasetHandle`, or
249 `~lsst.pipe.base.InMemoryDatasetHandle`.
250 columnIndex (optional): pandas `~pandas.Index` object
251 Either passed or read in from
252 `~lsst.daf.butler.DeferredDatasetHandle`.
253 `returnTuple` : `bool`
254 If true, then return a list of tuples rather than the column
255 dictionary specification.
256 This is set to `True` by `CompositeFunctor` in order to be able to
257 combine columns from the various component functors.
258
259 """
260 if not isinstance(data, (DeferredDatasetHandle, InMemoryDatasetHandle)):
261 raise RuntimeError(f"Unexpected data type. Got {get_full_type_name(data)}.")
262
263 if columnIndex is None:
264 columnIndex = data.get(component="columns")
265
266 # Confirm that the dataset has the column levels the functor is
267 # expecting it to have.
268 columnLevels = self._get_data_columnLevels(data, columnIndex)
269
270 columnDict = {'column': self.columns,
271 'dataset': self.dataset}
272 if self.filt is None:
273 columnLevelNames = self._get_data_columnLevelNames(data, columnIndex)
274 if "band" in columnLevels:
275 if self.dataset == "ref":
276 columnDict["band"] = columnLevelNames["band"][0]
277 else:
278 raise ValueError(f"'filt' not set for functor {self.name}"
279 f"(dataset {self.dataset}) "
280 "and DataFrame "
281 "contains multiple filters in column index. "
282 "Set 'filt' or set 'dataset' to 'ref'.")
283 else:
284 columnDict['band'] = self.filt
285
286 if returnTuple:
287 return self._colsFromDict(columnDict, columnIndex=columnIndex)
288 else:
289 return columnDict
290
291 def _func(self, df, dropna=True):
292 raise NotImplementedError('Must define calculation on DataFrame')
293
294 def _get_columnIndex(self, data):
295 """Return columnIndex."""
296
297 if isinstance(data, (DeferredDatasetHandle, InMemoryDatasetHandle)):
298 return data.get(component="columns")
299 else:
300 return None
301
302 def _get_data(self, data):
303 """Retrieve DataFrame necessary for calculation.
304
305 The data argument can be a `~pandas.DataFrame`, a
306 `~lsst.daf.butler.DeferredDatasetHandle`, or
307 an `~lsst.pipe.base.InMemoryDatasetHandle`.
308
309 Returns a DataFrame upon which `self._func` can act.
310 """
311 # We wrap a DataFrame in a handle here to take advantage of the
312 # DataFrame delegate DataFrame column wrangling abilities.
313 if isinstance(data, pd.DataFrame):
314 _data = InMemoryDatasetHandle(data, storageClass="DataFrame")
315 elif isinstance(data, (DeferredDatasetHandle, InMemoryDatasetHandle)):
316 _data = data
317 else:
318 raise RuntimeError(f"Unexpected type provided for data. Got {get_full_type_name(data)}.")
319
320 # First thing to do: check to see if the data source has a multilevel
321 # column index or not.
322 columnIndex = self._get_columnIndex(_data)
323 is_multiLevel = isinstance(columnIndex, pd.MultiIndex)
324
325 # Get proper columns specification for this functor.
326 if is_multiLevel:
327 columns = self.multilevelColumns(_data, columnIndex=columnIndex)
328 else:
329 columns = self.columns
330
331 # Load in-memory DataFrame with appropriate columns the gen3 way.
332 df = _data.get(parameters={"columns": columns})
333
334 # Drop unnecessary column levels.
335 if is_multiLevel:
336 df = self._setLevels(df)
337
338 return df
339
340 def _setLevels(self, df):
341 levelsToDrop = [n for n in df.columns.names if n not in self._dfLevels]
342 df.columns = df.columns.droplevel(levelsToDrop)
343 return df
344
345 def _dropna(self, vals):
346 return vals.dropna()
347
348 def __call__(self, data, dropna=False):
349 df = self._get_data(data)
350 try:
351 vals = self._func(df)
352 except Exception as e:
353 self.log.error("Exception in %s call: %s: %s", self.name, type(e).__name__, e)
354 vals = self.fail(df)
355 if dropna:
356 vals = self._dropna(vals)
357
358 return vals
359
360 def difference(self, data1, data2, **kwargs):
361 """Computes difference between functor called on two different
362 DataFrame/Handle objects.
363 """
364 return self(data1, **kwargs) - self(data2, **kwargs)
365
366 def fail(self, df):
367 return pd.Series(np.full(len(df), np.nan), index=df.index)
368
369 @property
370 def name(self):
371 """Full name of functor (suitable for figure labels)."""
372 return NotImplementedError
373
374 @property
375 def shortname(self):
376 """Short name of functor (suitable for column name/dict key)."""
377 return self.name
378
379
381 """Perform multiple calculations at once on a catalog.
382
383 The role of a `CompositeFunctor` is to group together computations from
384 multiple functors.
385 Instead of returning `~pandas.Series` a `CompositeFunctor` returns a
386 `~pandas.DataFrame`, with the column names being the keys of ``funcDict``.
387
388 The `columns` attribute of a `CompositeFunctor` is the union of all columns
389 in all the component functors.
390
391 A `CompositeFunctor` does not use a `_func` method itself; rather, when a
392 `CompositeFunctor` is called, all its columns are loaded at once, and the
393 resulting DataFrame is passed to the `_func` method of each component
394 functor.
395 This has the advantage of only doing I/O (reading from parquet file) once,
396 and works because each individual `_func` method of each component functor
397 does not care if there are *extra* columns in the DataFrame being passed;
398 only that it must contain *at least* the `columns` it expects.
399
400 An important and useful class method is `from_yaml`, which takes as an
401 argument the path to a YAML file specifying a collection of functors.
402
403 Parameters
404 ----------
405 funcs : `dict` or `list`
406 Dictionary or list of functors.
407 If a list, then it will be converted into a dictonary according to the
408 `.shortname` attribute of each functor.
409 """
410 dataset = None
411 name = "CompositeFunctor"
412
413 def __init__(self, funcs, **kwargs):
414
415 if type(funcs) is dict:
416 self.funcDict = funcs
417 else:
418 self.funcDict = {f.shortname: f for f in funcs}
419
420 self._filt = None
421
422 super().__init__(**kwargs)
423
424 @property
425 def filt(self):
426 return self._filt
427
428 @filt.setter
429 def filt(self, filt):
430 if filt is not None:
431 for _, f in self.funcDict.items():
432 f.filt = filt
433 self._filt = filt
434
435 def update(self, new):
436 """Update the functor with new functors."""
437 if isinstance(new, dict):
438 self.funcDict.update(new)
439 elif isinstance(new, CompositeFunctor):
440 self.funcDict.update(new.funcDict)
441 else:
442 raise TypeError('Can only update with dictionary or CompositeFunctor.')
443
444 # Make sure new functors have the same 'filt' set.
445 if self.filt is not None:
446 self.filt = self.filt
447
448 @property
449 def columns(self):
450 return list(set([x for y in [f.columns for f in self.funcDict.values()] for x in y]))
451
452 def multilevelColumns(self, data, **kwargs):
453 # Get the union of columns for all component functors.
454 # Note the need to have `returnTuple=True` here.
455 return list(
456 set(
457 [
458 x
459 for y in [
460 f.multilevelColumns(data, returnTuple=True, **kwargs) for f in self.funcDict.values()
461 ]
462 for x in y
463 ]
464 )
465 )
466
467 def __call__(self, data, **kwargs):
468 """Apply the functor to the data table.
469
470 Parameters
471 ----------
472 data : various
473 The data represented as `~lsst.daf.butler.DeferredDatasetHandle`,
474 `~lsst.pipe.base.InMemoryDatasetHandle`, or `~pandas.DataFrame`.
475 The table or a pointer to a table on disk from which columns can
476 be accessed.
477 """
478 if isinstance(data, pd.DataFrame):
479 _data = InMemoryDatasetHandle(data, storageClass="DataFrame")
480 elif isinstance(data, (DeferredDatasetHandle, InMemoryDatasetHandle)):
481 _data = data
482 else:
483 raise RuntimeError(f"Unexpected type provided for data. Got {get_full_type_name(data)}.")
484
485 columnIndex = self._get_columnIndex(_data)
486
487 if isinstance(columnIndex, pd.MultiIndex):
488 columns = self.multilevelColumns(_data, columnIndex=columnIndex)
489 df = _data.get(parameters={"columns": columns})
490
491 valDict = {}
492 for k, f in self.funcDict.items():
493 try:
494 subdf = f._setLevels(
495 df[f.multilevelColumns(_data, returnTuple=True, columnIndex=columnIndex)]
496 )
497 valDict[k] = f._func(subdf)
498 except Exception as e:
499 self.log.exception(
500 "Exception in %s (funcs: %s) call: %s",
501 self.name,
502 str(list(self.funcDict.keys())),
503 type(e).__name__,
504 )
505 try:
506 valDict[k] = f.fail(subdf)
507 except NameError:
508 raise e
509
510 else:
511 df = _data.get(parameters={"columns": self.columns})
512
513 valDict = {k: f._func(df) for k, f in self.funcDict.items()}
514
515 # Check that output columns are actually columns.
516 for name, colVal in valDict.items():
517 if len(colVal.shape) != 1:
518 raise RuntimeError("Transformed column '%s' is not the shape of a column. "
519 "It is shaped %s and type %s." % (name, colVal.shape, type(colVal)))
520
521 try:
522 valDf = pd.concat(valDict, axis=1)
523 except TypeError:
524 print([(k, type(v)) for k, v in valDict.items()])
525 raise
526
527 if kwargs.get('dropna', False):
528 valDf = valDf.dropna(how='any')
529
530 return valDf
531
532 @classmethod
533 def renameCol(cls, col, renameRules):
534 if renameRules is None:
535 return col
536 for old, new in renameRules:
537 if col.startswith(old):
538 col = col.replace(old, new)
539 return col
540
541 @classmethod
542 def from_file(cls, filename, **kwargs):
543 # Allow environment variables in the filename.
544 filename = os.path.expandvars(filename)
545 with open(filename) as f:
546 translationDefinition = yaml.safe_load(f)
547
548 return cls.from_yaml(translationDefinition, **kwargs)
549
550 @classmethod
551 def from_yaml(cls, translationDefinition, **kwargs):
552 funcs = {}
553 for func, val in translationDefinition['funcs'].items():
554 funcs[func] = init_fromDict(val, name=func)
555
556 if 'flag_rename_rules' in translationDefinition:
557 renameRules = translationDefinition['flag_rename_rules']
558 else:
559 renameRules = None
560
561 if 'calexpFlags' in translationDefinition:
562 for flag in translationDefinition['calexpFlags']:
563 funcs[cls.renameCol(flag, renameRules)] = Column(flag, dataset='calexp')
564
565 if 'refFlags' in translationDefinition:
566 for flag in translationDefinition['refFlags']:
567 funcs[cls.renameCol(flag, renameRules)] = Column(flag, dataset='ref')
568
569 if 'forcedFlags' in translationDefinition:
570 for flag in translationDefinition['forcedFlags']:
571 funcs[cls.renameCol(flag, renameRules)] = Column(flag, dataset='forced_src')
572
573 if 'flags' in translationDefinition:
574 for flag in translationDefinition['flags']:
575 funcs[cls.renameCol(flag, renameRules)] = Column(flag, dataset='meas')
576
577 return cls(funcs, **kwargs)
578
579
580def mag_aware_eval(df, expr, log):
581 """Evaluate an expression on a DataFrame, knowing what the 'mag' function
582 means.
583
584 Builds on `pandas.DataFrame.eval`, which parses and executes math on
585 DataFrames.
586
587 Parameters
588 ----------
589 df : ~pandas.DataFrame
590 DataFrame on which to evaluate expression.
591
592 expr : str
593 Expression.
594 """
595 try:
596 expr_new = re.sub(r'mag\‍((\w+)\‍)', r'-2.5*log(\g<1>)/log(10)', expr)
597 val = df.eval(expr_new)
598 except Exception as e: # Should check what actually gets raised
599 log.error("Exception in mag_aware_eval: %s: %s", type(e).__name__, e)
600 expr_new = re.sub(r'mag\‍((\w+)\‍)', r'-2.5*log(\g<1>_instFlux)/log(10)', expr)
601 val = df.eval(expr_new)
602 return val
603
604
606 """Arbitrary computation on a catalog.
607
608 Column names (and thus the columns to be loaded from catalog) are found by
609 finding all words and trying to ignore all "math-y" words.
610
611 Parameters
612 ----------
613 expr : str
614 Expression to evaluate, to be parsed and executed by
615 `~lsst.pipe.tasks.functors.mag_aware_eval`.
616 """
617 _ignore_words = ('mag', 'sin', 'cos', 'exp', 'log', 'sqrt')
618
619 def __init__(self, expr, **kwargs):
620 self.expr = expr
621 super().__init__(**kwargs)
622
623 @property
624 def name(self):
625 return self.expr
626
627 @property
628 def columns(self):
629 flux_cols = re.findall(r'mag\‍(\s*(\w+)\s*\‍)', self.expr)
630
631 cols = [c for c in re.findall(r'[a-zA-Z_]+', self.expr) if c not in self._ignore_words]
632 not_a_col = []
633 for c in flux_cols:
634 if not re.search('_instFlux$', c):
635 cols.append(f'{c}_instFlux')
636 not_a_col.append(c)
637 else:
638 cols.append(c)
639
640 return list(set([c for c in cols if c not in not_a_col]))
641
642 def _func(self, df):
643 return mag_aware_eval(df, self.expr, self.log)
644
645
647 """Get column with a specified name."""
648
649 def __init__(self, col, **kwargs):
650 self.col = col
651 super().__init__(**kwargs)
652
653 @property
654 def name(self):
655 return self.col
656
657 @property
658 def columns(self):
659 return [self.col]
660
661 def _func(self, df):
662 return df[self.col]
663
664
666 """Return the value of the index for each object."""
667
668 columns = ['coord_ra'] # Just a dummy; something has to be here.
669 _defaultDataset = 'ref'
670 _defaultNoDup = True
671
672 def _func(self, df):
673 return pd.Series(df.index, index=df.index)
674
675
677 """Base class for coordinate column, in degrees."""
678 _radians = True
679
680 def __init__(self, col, **kwargs):
681 super().__init__(col, **kwargs)
682
683 def _func(self, df):
684 # Must not modify original column in case that column is used by
685 # another functor.
686 output = df[self.col] * 180 / np.pi if self._radians else df[self.col]
687 return output
688
689
691 """Right Ascension, in degrees."""
692 name = 'RA'
693 _defaultNoDup = True
694
695 def __init__(self, **kwargs):
696 super().__init__('coord_ra', **kwargs)
697
698 def __call__(self, catalog, **kwargs):
699 return super().__call__(catalog, **kwargs)
700
701
703 """Declination, in degrees."""
704 name = 'Dec'
705 _defaultNoDup = True
706
707 def __init__(self, **kwargs):
708 super().__init__('coord_dec', **kwargs)
709
710 def __call__(self, catalog, **kwargs):
711 return super().__call__(catalog, **kwargs)
712
713
715 """Uncertainty in Right Ascension, in degrees."""
716 name = 'RAErr'
717 _defaultNoDup = True
718
719 def __init__(self, **kwargs):
720 super().__init__('coord_raErr', **kwargs)
721
722
724 """Uncertainty in declination, in degrees."""
725 name = 'DecErr'
726 _defaultNoDup = True
727
728 def __init__(self, **kwargs):
729 super().__init__('coord_decErr', **kwargs)
730
731
733 """Coordinate covariance column, in degrees."""
734 _radians = True
735 name = 'RADecCov'
736 _defaultNoDup = True
737
738 def __init__(self, **kwargs):
739 super().__init__('coord_ra_dec_Cov', **kwargs)
740
741 def _func(self, df):
742 # Must not modify original column in case that column is used by
743 # another functor.
744 output = df[self.col]*(180/np.pi)**2 if self._radians else df[self.col]
745 return output
746
747
749 """A column with a band in a multiband table."""
750 def __init__(self, col, band_to_check, **kwargs):
751 self._band_to_check = band_to_check
752 super().__init__(col=col, **kwargs)
753
754 @property
755 def band_to_check(self):
756 return self._band_to_check
757
758
760 """A float32 MultibandColumn"""
761 def _func(self, df):
762 return super()._func(df).astype(np.float32)
763
764
766 """Return a column cast to a single-precision float."""
767
768 def _func(self, df):
769 return df[self.col].astype(np.float32)
770
771
773 """Compute the level 20 HtmIndex for the catalog.
774
775 Notes
776 -----
777 This functor was implemented to satisfy requirements of old APDB interface
778 which required the ``pixelId`` column in DiaObject with HTM20 index.
779 The APDB interface had migrated to not need that information, but we keep
780 this class in case it may be useful for something else.
781 """
782 name = "Htm20"
783 htmLevel = 20
784 _radians = True
785
786 def __init__(self, ra, dec, **kwargs):
788 self.ra = ra
789 self.dec = dec
790 self._columns = [self.ra, self.dec]
791 super().__init__(**kwargs)
792
793 def _func(self, df):
794
795 def computePixel(row):
796 if self._radians:
797 sphPoint = geom.SpherePoint(row[self.ra],
798 row[self.dec],
799 geom.radians)
800 else:
801 sphPoint = geom.SpherePoint(row[self.ra],
802 row[self.dec],
803 geom.degrees)
804 return self.pixelator.index(sphPoint.getVector())
805
806 return df.apply(computePixel, axis=1, result_type='reduce').astype('int64')
807
808
809def fluxName(col):
810 """Append _instFlux to the column name if it doesn't have it already."""
811 if not col.endswith('_instFlux'):
812 col += '_instFlux'
813 return col
814
815
816def fluxErrName(col):
817 """Append _instFluxErr to the column name if it doesn't have it already."""
818 if not col.endswith('_instFluxErr'):
819 col += '_instFluxErr'
820 return col
821
822
824 """Compute calibrated magnitude.
825
826 Returns the flux at mag=0.
827 The default ``fluxMag0`` is 63095734448.0194, which is default for HSC.
828 TO DO: This default should be made configurable in DM-21955.
829
830 This calculation hides warnings about invalid values and dividing by zero.
831
832 As with all functors, a ``dataset`` and ``filt`` kwarg should be provided
833 upon initialization.
834 Unlike the default `Functor`, however, the default dataset for a `Mag` is
835 ``'meas'``, rather than ``'ref'``.
836
837 Parameters
838 ----------
839 col : `str`
840 Name of flux column from which to compute magnitude.
841 Can be parseable by the `~lsst.pipe.tasks.functors.fluxName` function;
842 that is, you can pass ``'modelfit_CModel'`` instead of
843 ``'modelfit_CModel_instFlux'``, and it will understand.
844 """
845 _defaultDataset = 'meas'
846
847 def __init__(self, col, **kwargs):
848 self.col = fluxName(col)
849 # TO DO: DM-21955 Replace hard coded photometic calibration values.
850 self.fluxMag0 = 63095734448.0194
851
852 super().__init__(**kwargs)
853
854 @property
855 def columns(self):
856 return [self.col]
857
858 def _func(self, df):
859 with warnings.catch_warnings():
860 warnings.filterwarnings('ignore', r'invalid value encountered')
861 warnings.filterwarnings('ignore', r'divide by zero')
862 return -2.5*np.log10(df[self.col] / self.fluxMag0)
863
864 @property
865 def name(self):
866 return f'mag_{self.col}'
867
868
869class MagErr(Mag):
870 """Compute calibrated magnitude uncertainty.
871
872 Parameters
873 ----------
874 col : `str`
875 Name of the flux column.
876 """
877
878 def __init__(self, *args, **kwargs):
879 super().__init__(*args, **kwargs)
880 # TO DO: DM-21955 Replace hard coded photometic calibration values.
881 self.fluxMag0Err = 0.
882
883 @property
884 def columns(self):
885 return [self.col, self.col + 'Err']
886
887 def _func(self, df):
888 with warnings.catch_warnings():
889 warnings.filterwarnings('ignore', r'invalid value encountered')
890 warnings.filterwarnings('ignore', r'divide by zero')
891 fluxCol, fluxErrCol = self.columns
892 x = df[fluxErrCol] / df[fluxCol]
893 y = self.fluxMag0Err / self.fluxMag0
894 magErr = (2.5 / np.log(10.)) * np.sqrt(x*x + y*y)
895 return magErr
896
897 @property
898 def name(self):
899 return super().name + '_err'
900
901
903 """Functor to calculate magnitude difference."""
904 _defaultDataset = 'meas'
905
906 def __init__(self, col1, col2, **kwargs):
907 self.col1 = fluxName(col1)
908 self.col2 = fluxName(col2)
909 super().__init__(**kwargs)
910
911 @property
912 def columns(self):
913 return [self.col1, self.col2]
914
915 def _func(self, df):
916 with warnings.catch_warnings():
917 warnings.filterwarnings('ignore', r'invalid value encountered')
918 warnings.filterwarnings('ignore', r'divide by zero')
919 return -2.5*np.log10(df[self.col1]/df[self.col2])
920
921 @property
922 def name(self):
923 return f'(mag_{self.col1} - mag_{self.col2})'
924
925 @property
926 def shortname(self):
927 return f'magDiff_{self.col1}_{self.col2}'
928
929
931 """Compute the color between two filters.
932
933 Computes color by initializing two different `Mag` functors based on the
934 ``col`` and filters provided, and then returning the difference.
935
936 This is enabled by the `_func` method expecting a DataFrame with a
937 multilevel column index, with both ``'band'`` and ``'column'``, instead of
938 just ``'column'``, which is the `Functor` default.
939 This is controlled by the `_dfLevels` attribute.
940
941 Also of note, the default dataset for `Color` is ``forced_src'``, whereas
942 for `Mag` it is ``'meas'``.
943
944 Parameters
945 ----------
946 col : str
947 Name of the flux column from which to compute; same as would be passed
948 to `~lsst.pipe.tasks.functors.Mag`.
949
950 filt2, filt1 : str
951 Filters from which to compute magnitude difference.
952 Color computed is ``Mag(filt2) - Mag(filt1)``.
953 """
954 _defaultDataset = 'forced_src'
955 _dfLevels = ('band', 'column')
956 _defaultNoDup = True
957
958 def __init__(self, col, filt2, filt1, **kwargs):
959 self.col = fluxName(col)
960 if filt2 == filt1:
961 raise RuntimeError("Cannot compute Color for %s: %s - %s " % (col, filt2, filt1))
962 self.filt2 = filt2
963 self.filt1 = filt1
964
965 self.mag2 = Mag(col, filt=filt2, **kwargs)
966 self.mag1 = Mag(col, filt=filt1, **kwargs)
967
968 super().__init__(**kwargs)
969
970 @property
971 def filt(self):
972 return None
973
974 @filt.setter
975 def filt(self, filt):
976 pass
977
978 def _func(self, df):
979 mag2 = self.mag2._func(df[self.filt2])
980 mag1 = self.mag1._func(df[self.filt1])
981 return mag2 - mag1
982
983 @property
984 def columns(self):
985 return [self.mag1.col, self.mag2.col]
986
987 def multilevelColumns(self, parq, **kwargs):
988 return [(self.dataset, self.filt1, self.col), (self.dataset, self.filt2, self.col)]
989
990 @property
991 def name(self):
992 return f'{self.filt2} - {self.filt1} ({self.col})'
993
994 @property
995 def shortname(self):
996 return f"{self.col}_{self.filt2.replace('-', '')}m{self.filt1.replace('-', '')}"
997
998
1000 """This functor subtracts the trace of the PSF second moments from the
1001 trace of the second moments of the source.
1002
1003 If the HsmShapeAlgorithm measurement is valid, then these will be used for
1004 the sources.
1005 Otherwise, the SdssShapeAlgorithm measurements will be used.
1006 """
1007 name = 'Deconvolved Moments'
1008 shortname = 'deconvolvedMoments'
1009 _columns = ("ext_shapeHSM_HsmSourceMoments_xx",
1010 "ext_shapeHSM_HsmSourceMoments_yy",
1011 "base_SdssShape_xx", "base_SdssShape_yy",
1012 "ext_shapeHSM_HsmPsfMoments_xx",
1013 "ext_shapeHSM_HsmPsfMoments_yy")
1014
1015 def _func(self, df):
1016 """Calculate deconvolved moments."""
1017 if "ext_shapeHSM_HsmSourceMoments_xx" in df.columns: # _xx added by tdm
1018 hsm = df["ext_shapeHSM_HsmSourceMoments_xx"] + df["ext_shapeHSM_HsmSourceMoments_yy"]
1019 else:
1020 hsm = np.ones(len(df))*np.nan
1021 sdss = df["base_SdssShape_xx"] + df["base_SdssShape_yy"]
1022 if "ext_shapeHSM_HsmPsfMoments_xx" in df.columns:
1023 psf = df["ext_shapeHSM_HsmPsfMoments_xx"] + df["ext_shapeHSM_HsmPsfMoments_yy"]
1024 else:
1025 # LSST does not have shape.sdss.psf.
1026 # We could instead add base_PsfShape to the catalog using
1027 # exposure.getPsf().computeShape(s.getCentroid()).getIxx().
1028 raise RuntimeError('No psf shape parameter found in catalog')
1029
1030 return hsm.where(np.isfinite(hsm), sdss) - psf
1031
1032
1034 """Functor to calculate the SDSS trace radius size for sources.
1035
1036 The SDSS trace radius size is a measure of size equal to the square root of
1037 half of the trace of the second moments tensor measured with the
1038 SdssShapeAlgorithm plugin.
1039 This has units of pixels.
1040 """
1041 name = "SDSS Trace Size"
1042 shortname = 'sdssTrace'
1043 _columns = ("base_SdssShape_xx", "base_SdssShape_yy")
1044
1045 def _func(self, df):
1046 srcSize = np.sqrt(0.5*(df["base_SdssShape_xx"] + df["base_SdssShape_yy"]))
1047 return srcSize
1048
1049
1051 """Functor to calculate the SDSS trace radius size difference (%) between
1052 the object and the PSF model.
1053
1054 See Also
1055 --------
1056 SdssTraceSize
1057 """
1058 name = "PSF - SDSS Trace Size"
1059 shortname = 'psf_sdssTrace'
1060 _columns = ("base_SdssShape_xx", "base_SdssShape_yy",
1061 "base_SdssShape_psf_xx", "base_SdssShape_psf_yy")
1062
1063 def _func(self, df):
1064 srcSize = np.sqrt(0.5*(df["base_SdssShape_xx"] + df["base_SdssShape_yy"]))
1065 psfSize = np.sqrt(0.5*(df["base_SdssShape_psf_xx"] + df["base_SdssShape_psf_yy"]))
1066 sizeDiff = 100*(srcSize - psfSize)/(0.5*(srcSize + psfSize))
1067 return sizeDiff
1068
1069
1071 """Functor to calculate the HSM trace radius size for sources.
1072
1073 The HSM trace radius size is a measure of size equal to the square root of
1074 half of the trace of the second moments tensor measured with the
1075 HsmShapeAlgorithm plugin.
1076 This has units of pixels.
1077 """
1078 name = 'HSM Trace Size'
1079 shortname = 'hsmTrace'
1080 _columns = ("ext_shapeHSM_HsmSourceMoments_xx",
1081 "ext_shapeHSM_HsmSourceMoments_yy")
1082
1083 def _func(self, df):
1084 srcSize = np.sqrt(0.5*(df["ext_shapeHSM_HsmSourceMoments_xx"]
1085 + df["ext_shapeHSM_HsmSourceMoments_yy"]))
1086 return srcSize
1087
1088
1090 """Functor to calculate the HSM trace radius size difference (%) between
1091 the object and the PSF model.
1092
1093 See Also
1094 --------
1095 HsmTraceSize
1096 """
1097 name = 'PSF - HSM Trace Size'
1098 shortname = 'psf_HsmTrace'
1099 _columns = ("ext_shapeHSM_HsmSourceMoments_xx",
1100 "ext_shapeHSM_HsmSourceMoments_yy",
1101 "ext_shapeHSM_HsmPsfMoments_xx",
1102 "ext_shapeHSM_HsmPsfMoments_yy")
1103
1104 def _func(self, df):
1105 srcSize = np.sqrt(0.5*(df["ext_shapeHSM_HsmSourceMoments_xx"]
1106 + df["ext_shapeHSM_HsmSourceMoments_yy"]))
1107 psfSize = np.sqrt(0.5*(df["ext_shapeHSM_HsmPsfMoments_xx"]
1108 + df["ext_shapeHSM_HsmPsfMoments_yy"]))
1109 sizeDiff = 100*(srcSize - psfSize)/(0.5*(srcSize + psfSize))
1110 return sizeDiff
1111
1112
1114 """Functor to calculate the PSF FWHM with second moments measured from the
1115 HsmShapeAlgorithm plugin.
1116
1117 This is in units of arcseconds, and assumes the hsc_rings_v1 skymap pixel
1118 scale of 0.168 arcseconds/pixel.
1119
1120 Notes
1121 -----
1122 This conversion assumes the PSF is Gaussian, which is not always the case.
1123 """
1124 name = 'HSM Psf FWHM'
1125 _columns = ('ext_shapeHSM_HsmPsfMoments_xx', 'ext_shapeHSM_HsmPsfMoments_yy')
1126 # TODO: DM-21403 pixel scale should be computed from the CD matrix or transform matrix
1127 pixelScale = 0.168
1128 SIGMA2FWHM = 2*np.sqrt(2*np.log(2))
1129
1130 def _func(self, df):
1131 return (self.pixelScale*self.SIGMA2FWHM*np.sqrt(
1132 0.5*(df['ext_shapeHSM_HsmPsfMoments_xx']
1133 + df['ext_shapeHSM_HsmPsfMoments_yy']))).astype(np.float32)
1134
1135
1137 r"""Calculate :math:`e_1` ellipticity component for sources, defined as:
1138
1139 .. math::
1140 e_1 &= (I_{xx}-I_{yy})/(I_{xx}+I_{yy})
1141
1142 See Also
1143 --------
1144 E2
1145 """
1146 name = "Distortion Ellipticity (e1)"
1147 shortname = "Distortion"
1148
1149 def __init__(self, colXX, colXY, colYY, **kwargs):
1150 self.colXX = colXX
1151 self.colXY = colXY
1152 self.colYY = colYY
1153 self._columns = [self.colXX, self.colXY, self.colYY]
1154 super().__init__(**kwargs)
1155
1156 @property
1157 def columns(self):
1158 return [self.colXX, self.colXY, self.colYY]
1159
1160 def _func(self, df):
1161 return (df[self.colXX] - df[self.colYY] / (df[self.colXX]
1162 + df[self.colYY])).astype(np.float32)
1163
1164
1166 r"""Calculate :math:`e_2` ellipticity component for sources, defined as:
1167
1168 .. math::
1169 e_2 &= 2I_{xy}/(I_{xx}+I_{yy})
1170
1171 See Also
1172 --------
1173 E1
1174 """
1175 name = "Ellipticity e2"
1176
1177 def __init__(self, colXX, colXY, colYY, **kwargs):
1178 self.colXX = colXX
1179 self.colXY = colXY
1180 self.colYY = colYY
1181 super().__init__(**kwargs)
1182
1183 @property
1184 def columns(self):
1185 return [self.colXX, self.colXY, self.colYY]
1186
1187 def _func(self, df):
1188 return (2*df[self.colXY] / (df[self.colXX] + df[self.colYY])).astype(np.float32)
1189
1190
1192 """Calculate the radius from the quadrupole moments.
1193
1194 This returns the fourth root of the determinant of the second moments
1195 tensor, which has units of pixels.
1196
1197 See Also
1198 --------
1199 SdssTraceSize
1200 HsmTraceSize
1201 """
1202
1203 def __init__(self, colXX, colXY, colYY, **kwargs):
1204 self.colXX = colXX
1205 self.colXY = colXY
1206 self.colYY = colYY
1207 super().__init__(**kwargs)
1208
1209 @property
1210 def columns(self):
1211 return [self.colXX, self.colXY, self.colYY]
1212
1213 def _func(self, df):
1214 return ((df[self.colXX]*df[self.colYY] - df[self.colXY]**2)**0.25).astype(np.float32)
1215
1216
1218 """Computations using the stored localWcs."""
1219 name = "LocalWcsOperations"
1220
1221 def __init__(self,
1222 colCD_1_1,
1223 colCD_1_2,
1224 colCD_2_1,
1225 colCD_2_2,
1226 **kwargs):
1227 self.colCD_1_1 = colCD_1_1
1228 self.colCD_1_2 = colCD_1_2
1229 self.colCD_2_1 = colCD_2_1
1230 self.colCD_2_2 = colCD_2_2
1231 super().__init__(**kwargs)
1232
1233 def computeDeltaRaDec(self, x, y, cd11, cd12, cd21, cd22):
1234 """Compute the dRA, dDec from dx, dy.
1235
1236 Parameters
1237 ----------
1238 x : `~pandas.Series`
1239 X pixel coordinate.
1240 y : `~pandas.Series`
1241 Y pixel coordinate.
1242 cd11 : `~pandas.Series`
1243 [1, 1] element of the local Wcs affine transform.
1244 cd12 : `~pandas.Series`
1245 [1, 2] element of the local Wcs affine transform.
1246 cd21 : `~pandas.Series`
1247 [2, 1] element of the local Wcs affine transform.
1248 cd22 : `~pandas.Series`
1249 [2, 2] element of the local Wcs affine transform.
1250
1251 Returns
1252 -------
1253 raDecTuple : tuple
1254 RA and Dec conversion of x and y given the local Wcs.
1255 Returned units are in radians.
1256
1257 Notes
1258 -----
1259 If x and y are with respect to the CRVAL1, CRVAL2
1260 then this will return the RA, Dec for that WCS.
1261 """
1262 return (x * cd11 + y * cd12, x * cd21 + y * cd22)
1263
1264 def computeSkySeparation(self, ra1, dec1, ra2, dec2):
1265 """Compute the local pixel scale conversion.
1266
1267 Parameters
1268 ----------
1269 ra1 : `~pandas.Series`
1270 Ra of the first coordinate in radians.
1271 dec1 : `~pandas.Series`
1272 Dec of the first coordinate in radians.
1273 ra2 : `~pandas.Series`
1274 Ra of the second coordinate in radians.
1275 dec2 : `~pandas.Series`
1276 Dec of the second coordinate in radians.
1277
1278 Returns
1279 -------
1280 dist : `~pandas.Series`
1281 Distance on the sphere in radians.
1282 """
1283 deltaDec = dec2 - dec1
1284 deltaRa = ra2 - ra1
1285 return 2 * np.arcsin(
1286 np.sqrt(
1287 np.sin(deltaDec / 2) ** 2
1288 + np.cos(dec2) * np.cos(dec1) * np.sin(deltaRa / 2) ** 2))
1289
1290 def getSkySeparationFromPixel(self, x1, y1, x2, y2, cd11, cd12, cd21, cd22):
1291 """Compute the distance on the sphere from x2, y1 to x1, y1.
1292
1293 Parameters
1294 ----------
1295 x1 : `~pandas.Series`
1296 X pixel coordinate.
1297 y1 : `~pandas.Series`
1298 Y pixel coordinate.
1299 x2 : `~pandas.Series`
1300 X pixel coordinate.
1301 y2 : `~pandas.Series`
1302 Y pixel coordinate.
1303 cd11 : `~pandas.Series`
1304 [1, 1] element of the local Wcs affine transform.
1305 cd12 : `~pandas.Series`
1306 [1, 2] element of the local Wcs affine transform.
1307 cd21 : `~pandas.Series`
1308 [2, 1] element of the local Wcs affine transform.
1309 cd22 : `~pandas.Series`
1310 [2, 2] element of the local Wcs affine transform.
1311
1312 Returns
1313 -------
1314 Distance : `~pandas.Series`
1315 Arcseconds per pixel at the location of the local WC.
1316 """
1317 ra1, dec1 = self.computeDeltaRaDec(x1, y1, cd11, cd12, cd21, cd22)
1318 ra2, dec2 = self.computeDeltaRaDec(x2, y2, cd11, cd12, cd21, cd22)
1319 # Great circle distance for small separations.
1320 return self.computeSkySeparation(ra1, dec1, ra2, dec2)
1321
1322 def computePositionAngle(self, ra1, dec1, ra2, dec2):
1323 """Compute position angle (E of N) from (ra1, dec1) to (ra2, dec2).
1324
1325 Parameters
1326 ----------
1327 ra1 : iterable [`float`]
1328 RA of the first coordinate [radian].
1329 dec1 : iterable [`float`]
1330 Dec of the first coordinate [radian].
1331 ra2 : iterable [`float`]
1332 RA of the second coordinate [radian].
1333 dec2 : iterable [`float`]
1334 Dec of the second coordinate [radian].
1335
1336 Returns
1337 -------
1338 Position Angle: `~pandas.Series`
1339 radians E of N
1340
1341 Notes
1342 -----
1343 (ra1, dec1) -> (ra2, dec2) is interpreted as the shorter way around the sphere
1344
1345 For a separation of 0.0001 rad, the position angle is good to 0.0009 rad
1346 all over the sphere.
1347 """
1348 # lsst.geom.SpherePoint has "bearingTo", which returns angle N of E
1349 # We instead want the astronomy convention of "Position Angle", which is angle E of N
1350 position_angle = np.zeros(len(ra1))
1351 for i, (r1, d1, r2, d2) in enumerate(zip(ra1, dec1, ra2, dec2)):
1352 point1 = geom.SpherePoint(r1, d1, geom.radians)
1353 point2 = geom.SpherePoint(r2, d2, geom.radians)
1354 bearing = point1.bearingTo(point2)
1355 pa_ref_angle = geom.Angle(np.pi/2, geom.radians) # in bearing system
1356 pa = pa_ref_angle - bearing
1357 # Wrap around to get Delta_RA from -pi to +pi
1358 pa = pa.wrapCtr()
1359 position_angle[i] = pa.asRadians()
1360
1361 return pd.Series(position_angle)
1362
1363 def getPositionAngleFromDetectorAngle(self, theta, cd11, cd12, cd21, cd22):
1364 """Compute position angle (E of N) from detector angle (+y of +x).
1365
1366 Parameters
1367 ----------
1368 theta : `float`
1369 detector angle [radian]
1370 cd11 : `float`
1371 [1, 1] element of the local Wcs affine transform.
1372 cd12 : `float`
1373 [1, 2] element of the local Wcs affine transform.
1374 cd21 : `float`
1375 [2, 1] element of the local Wcs affine transform.
1376 cd22 : `float`
1377 [2, 2] element of the local Wcs affine transform.
1378
1379 Returns
1380 -------
1381 Position Angle: `~pandas.Series`
1382 Degrees E of N.
1383 """
1384 # Create a unit vector in (x, y) along da
1385 dx = np.cos(theta)
1386 dy = np.sin(theta)
1387 ra1, dec1 = self.computeDeltaRaDec(0, 0, cd11, cd12, cd21, cd22)
1388 ra2, dec2 = self.computeDeltaRaDec(dx, dy, cd11, cd12, cd21, cd22)
1389 # Position angle of vector from (RA1, Dec1) to (RA2, Dec2)
1390 return np.rad2deg(self.computePositionAngle(ra1, dec1, ra2, dec2))
1391
1392
1394 """Compute the local pixel scale from the stored CDMatrix.
1395 """
1396 name = "PixelScale"
1397
1398 @property
1399 def columns(self):
1400 return [self.colCD_1_1,
1403 self.colCD_2_2]
1404
1405 def pixelScaleArcseconds(self, cd11, cd12, cd21, cd22):
1406 """Compute the local pixel to scale conversion in arcseconds.
1407
1408 Parameters
1409 ----------
1410 cd11 : `~pandas.Series`
1411 [1, 1] element of the local Wcs affine transform in radians.
1412 cd11 : `~pandas.Series`
1413 [1, 1] element of the local Wcs affine transform in radians.
1414 cd12 : `~pandas.Series`
1415 [1, 2] element of the local Wcs affine transform in radians.
1416 cd21 : `~pandas.Series`
1417 [2, 1] element of the local Wcs affine transform in radians.
1418 cd22 : `~pandas.Series`
1419 [2, 2] element of the local Wcs affine transform in radians.
1420
1421 Returns
1422 -------
1423 pixScale : `~pandas.Series`
1424 Arcseconds per pixel at the location of the local WC.
1425 """
1426 return 3600 * np.degrees(np.sqrt(np.fabs(cd11 * cd22 - cd12 * cd21)))
1427
1428 def _func(self, df):
1429 return self.pixelScaleArcseconds(df[self.colCD_1_1],
1430 df[self.colCD_1_2],
1431 df[self.colCD_2_1],
1432 df[self.colCD_2_2])
1433
1434
1436 """Convert a value in units of pixels to units of arcseconds."""
1437
1438 def __init__(self,
1439 col,
1440 colCD_1_1,
1441 colCD_1_2,
1442 colCD_2_1,
1443 colCD_2_2,
1444 **kwargs):
1445 self.col = col
1446 super().__init__(colCD_1_1,
1447 colCD_1_2,
1448 colCD_2_1,
1449 colCD_2_2,
1450 **kwargs)
1451
1452 @property
1453 def name(self):
1454 return f"{self.col}_asArcseconds"
1455
1456 @property
1457 def columns(self):
1458 return [self.col,
1462 self.colCD_2_2]
1463
1464 def _func(self, df):
1465 return df[self.col] * self.pixelScaleArcseconds(df[self.colCD_1_1],
1466 df[self.colCD_1_2],
1467 df[self.colCD_2_1],
1468 df[self.colCD_2_2])
1469
1470
1472 """Convert a value in units of pixels squared to units of arcseconds
1473 squared.
1474 """
1475
1476 def __init__(self,
1477 col,
1478 colCD_1_1,
1479 colCD_1_2,
1480 colCD_2_1,
1481 colCD_2_2,
1482 **kwargs):
1483 self.col = col
1484 super().__init__(colCD_1_1,
1485 colCD_1_2,
1486 colCD_2_1,
1487 colCD_2_2,
1488 **kwargs)
1489
1490 @property
1491 def name(self):
1492 return f"{self.col}_asArcsecondsSq"
1493
1494 @property
1495 def columns(self):
1496 return [self.col,
1500 self.colCD_2_2]
1501
1502 def _func(self, df):
1503 pixScale = self.pixelScaleArcseconds(df[self.colCD_1_1],
1504 df[self.colCD_1_2],
1505 df[self.colCD_2_1],
1506 df[self.colCD_2_2])
1507 return df[self.col] * pixScale * pixScale
1508
1509
1511 """Compute a position angle from a detector angle and the stored CDMatrix.
1512
1513 Returns
1514 -------
1515 position angle : degrees
1516 """
1517
1518 name = "PositionAngle"
1519
1521 self,
1522 theta_col,
1523 colCD_1_1,
1524 colCD_1_2,
1525 colCD_2_1,
1526 colCD_2_2,
1527 **kwargs
1528 ):
1529 self.theta_col = theta_col
1530 super().__init__(colCD_1_1, colCD_1_2, colCD_2_1, colCD_2_2, **kwargs)
1531
1532 @property
1533 def columns(self):
1534 return [
1535 self.theta_col,
1539 self.colCD_2_2
1540 ]
1541
1542 def _func(self, df):
1544 df[self.theta_col],
1545 df[self.colCD_1_1],
1546 df[self.colCD_1_2],
1547 df[self.colCD_2_1],
1548 df[self.colCD_2_2]
1549 )
1550
1551
1553 """Return the band used to seed multiband forced photometry.
1554
1555 This functor is to be used on Object tables.
1556 It converts the boolean merge_measurements_{band} columns into a single
1557 string representing the first band for which merge_measurements_{band}
1558 is True.
1559
1560 Assumes the default priority order of i, r, z, y, g, u.
1561 """
1562 name = 'Reference Band'
1563 shortname = 'refBand'
1564
1565 band_order = ("i", "r", "z", "y", "g", "u")
1566
1567 @property
1568 def columns(self):
1569 # Build the actual input column list, not hardcoded ugrizy
1570 bands = [band for band in self.band_order if band in self.bands]
1571 # In the unlikely scenario that users attempt to add non-ugrizy bands
1572 bands += [band for band in self.bands if band not in self.band_order]
1573 return [f"merge_measurement_{band}" for band in bands]
1574
1575 def _func(self, df: pd.DataFrame) -> pd.Series:
1576 def getFilterAliasName(row):
1577 # Get column name with the max value (True > False).
1578 colName = row.idxmax()
1579 return colName.replace('merge_measurement_', '')
1580
1581 # Skip columns that are unavailable, because this functor requests the
1582 # superset of bands that could be included in the object table.
1583 columns = [col for col in self.columns if col in df.columns]
1584 # Makes a Series of dtype object if df is empty.
1585 return df[columns].apply(getFilterAliasName, axis=1,
1586 result_type='reduce').astype('object')
1587
1588 def __init__(self, bands: tuple[str] | list[str] | None = None, **kwargs):
1589 super().__init__(**kwargs)
1590 self.bands = self.band_order if bands is None else tuple(bands)
1591
1592
1594 """Base class for Object table calibrated fluxes and magnitudes."""
1595 # AB to NanoJansky (3631 Jansky).
1596 AB_FLUX_SCALE = (0 * u.ABmag).to_value(u.nJy)
1597 LOG_AB_FLUX_SCALE = 12.56
1598 FIVE_OVER_2LOG10 = 1.085736204758129569
1599 # TO DO: DM-21955 Replace hard coded photometic calibration values.
1600 COADD_ZP = 27
1601
1602 def __init__(self, colFlux, colFluxErr=None, **kwargs):
1603 self.vhypot = np.vectorize(self.hypot)
1604 self.col = colFlux
1605 self.colFluxErr = colFluxErr
1606
1607 self.fluxMag0 = 1./np.power(10, -0.4*self.COADD_ZP)
1608 self.fluxMag0Err = 0.
1609
1610 super().__init__(**kwargs)
1611
1612 @property
1613 def columns(self):
1614 return [self.col]
1615
1616 @property
1617 def name(self):
1618 return f'mag_{self.col}'
1619
1620 @classmethod
1621 def hypot(cls, a, b):
1622 """Compute sqrt(a^2 + b^2) without under/overflow."""
1623 if np.abs(a) < np.abs(b):
1624 a, b = b, a
1625 if a == 0.:
1626 return 0.
1627 q = b/a
1628 return np.abs(a) * np.sqrt(1. + q*q)
1629
1630 def dn2flux(self, dn, fluxMag0):
1631 """Convert instrumental flux to nanojanskys."""
1632 return (self.AB_FLUX_SCALE * dn / fluxMag0).astype(np.float32)
1633
1634 def dn2mag(self, dn, fluxMag0):
1635 """Convert instrumental flux to AB magnitude."""
1636 with warnings.catch_warnings():
1637 warnings.filterwarnings('ignore', r'invalid value encountered')
1638 warnings.filterwarnings('ignore', r'divide by zero')
1639 return (-2.5 * np.log10(dn/fluxMag0)).astype(np.float32)
1640
1641 def dn2fluxErr(self, dn, dnErr, fluxMag0, fluxMag0Err):
1642 """Convert instrumental flux error to nanojanskys."""
1643 retVal = self.vhypot(dn * fluxMag0Err, dnErr * fluxMag0)
1644 retVal *= self.AB_FLUX_SCALE / fluxMag0 / fluxMag0
1645 return retVal.astype(np.float32)
1646
1647 def dn2MagErr(self, dn, dnErr, fluxMag0, fluxMag0Err):
1648 """Convert instrumental flux error to AB magnitude error."""
1649 retVal = self.dn2fluxErr(dn, dnErr, fluxMag0, fluxMag0Err) / self.dn2flux(dn, fluxMag0)
1650 return (self.FIVE_OVER_2LOG10 * retVal).astype(np.float32)
1651
1652
1654 """Convert instrumental flux to nanojanskys."""
1655 def _func(self, df):
1656 return self.dn2flux(df[self.col], self.fluxMag0)
1657
1658
1660 """Convert instrumental flux error to nanojanskys."""
1661 @property
1662 def columns(self):
1663 return [self.col, self.colFluxErr]
1664
1665 def _func(self, df):
1666 retArr = self.dn2fluxErr(df[self.col], df[self.colFluxErr], self.fluxMag0, self.fluxMag0Err)
1667 return pd.Series(retArr, index=df.index)
1668
1669
1671 """Base class for calibrating the specified instrument flux column using
1672 the local photometric calibration.
1673
1674 Parameters
1675 ----------
1676 instFluxCol : `str`
1677 Name of the instrument flux column.
1678 instFluxErrCol : `str`
1679 Name of the assocated error columns for ``instFluxCol``.
1680 photoCalibCol : `str`
1681 Name of local calibration column.
1682 photoCalibErrCol : `str`, optional
1683 Error associated with ``photoCalibCol``. Ignored and deprecated; will
1684 be removed after v29.
1685
1686 See Also
1687 --------
1688 LocalNanojansky
1689 LocalNanojanskyErr
1690 """
1691 logNJanskyToAB = (1 * u.nJy).to_value(u.ABmag)
1692
1693 def __init__(self,
1694 instFluxCol,
1695 instFluxErrCol,
1696 photoCalibCol,
1697 photoCalibErrCol=None,
1698 **kwargs):
1699 self.instFluxCol = instFluxCol
1700 self.instFluxErrCol = instFluxErrCol
1701 self.photoCalibCol = photoCalibCol
1702 # TODO[DM-49400]: remove this check and the argument it corresponds to.
1703 if photoCalibErrCol is not None:
1704 warnings.warn("The photoCalibErrCol argument is deprecated and will be removed after v29.",
1705 category=FutureWarning)
1706 super().__init__(**kwargs)
1707
1708 def instFluxToNanojansky(self, instFlux, localCalib):
1709 """Convert instrument flux to nanojanskys.
1710
1711 Parameters
1712 ----------
1713 instFlux : `~numpy.ndarray` or `~pandas.Series`
1714 Array of instrument flux measurements.
1715 localCalib : `~numpy.ndarray` or `~pandas.Series`
1716 Array of local photometric calibration estimates.
1717
1718 Returns
1719 -------
1720 calibFlux : `~numpy.ndarray` or `~pandas.Series`
1721 Array of calibrated flux measurements.
1722 """
1723 return instFlux * localCalib
1724
1725 def instFluxErrToNanojanskyErr(self, instFlux, instFluxErr, localCalib, localCalibErr=None):
1726 """Convert instrument flux to nanojanskys.
1727
1728 Parameters
1729 ----------
1730 instFlux : `~numpy.ndarray` or `~pandas.Series`
1731 Array of instrument flux measurements. Ignored (accepted for
1732 backwards compatibility and consistency with magnitude-error
1733 calculation methods).
1734 instFluxErr : `~numpy.ndarray` or `~pandas.Series`
1735 Errors on associated ``instFlux`` values.
1736 localCalib : `~numpy.ndarray` or `~pandas.Series`
1737 Array of local photometric calibration estimates.
1738 localCalibErr : `~numpy.ndarray` or `~pandas.Series`, optional
1739 Errors on associated ``localCalib`` values. Ignored and deprecated;
1740 will be removed after v29.
1741
1742 Returns
1743 -------
1744 calibFluxErr : `~numpy.ndarray` or `~pandas.Series`
1745 Errors on calibrated flux measurements.
1746 """
1747 # TODO[DM-49400]: remove this check and the argument it corresponds to.
1748 if localCalibErr is not None:
1749 warnings.warn("The localCalibErr argument is deprecated and will be removed after v29.",
1750 category=FutureWarning)
1751 return instFluxErr * localCalib
1752
1753 def instFluxToMagnitude(self, instFlux, localCalib):
1754 """Convert instrument flux to nanojanskys.
1755
1756 Parameters
1757 ----------
1758 instFlux : `~numpy.ndarray` or `~pandas.Series`
1759 Array of instrument flux measurements.
1760 localCalib : `~numpy.ndarray` or `~pandas.Series`
1761 Array of local photometric calibration estimates.
1762
1763 Returns
1764 -------
1765 calibMag : `~numpy.ndarray` or `~pandas.Series`
1766 Array of calibrated AB magnitudes.
1767 """
1768 return -2.5 * np.log10(self.instFluxToNanojansky(instFlux, localCalib)) + self.logNJanskyToAB
1769
1770 def instFluxErrToMagnitudeErr(self, instFlux, instFluxErr, localCalib, localCalibErr=None):
1771 """Convert instrument flux err to nanojanskys.
1772
1773 Parameters
1774 ----------
1775 instFlux : `~numpy.ndarray` or `~pandas.Series`
1776 Array of instrument flux measurements.
1777 instFluxErr : `~numpy.ndarray` or `~pandas.Series`
1778 Errors on associated ``instFlux`` values.
1779 localCalib : `~numpy.ndarray` or `~pandas.Series`
1780 Array of local photometric calibration estimates.
1781 localCalibErr : `~numpy.ndarray` or `~pandas.Series`, optional
1782 Errors on associated ``localCalib`` values. Ignored and deprecated;
1783 will be removed after v29.
1784
1785 Returns
1786 -------
1787 calibMagErr: `~numpy.ndarray` or `~pandas.Series`
1788 Error on calibrated AB magnitudes.
1789 """
1790 # TODO[DM-49400]: remove this check and the argument it corresponds to.
1791 if localCalibErr is not None:
1792 warnings.warn("The localCalibErr argument is deprecated and will be removed after v29.",
1793 category=FutureWarning)
1794 err = self.instFluxErrToNanojanskyErr(instFlux, instFluxErr, localCalib)
1795 return 2.5 / np.log(10) * err / self.instFluxToNanojansky(instFlux, instFluxErr)
1796
1797
1799 """Compute calibrated fluxes using the local calibration value.
1800
1801 This returns units of nanojanskys.
1802 """
1803
1804 @property
1805 def columns(self):
1806 return [self.instFluxCol, self.photoCalibCol]
1807
1808 @property
1809 def name(self):
1810 return f'flux_{self.instFluxCol}'
1811
1812 def _func(self, df):
1813 return self.instFluxToNanojansky(df[self.instFluxCol],
1814 df[self.photoCalibCol]).astype(np.float32)
1815
1816
1818 """Compute calibrated flux errors using the local calibration value.
1819
1820 This returns units of nanojanskys.
1821 """
1822
1823 @property
1824 def columns(self):
1825 return [self.instFluxCol, self.instFluxErrCol, self.photoCalibCol]
1826
1827 @property
1828 def name(self):
1829 return f'fluxErr_{self.instFluxCol}'
1830
1831 def _func(self, df):
1832 return self.instFluxErrToNanojanskyErr(df[self.instFluxCol], df[self.instFluxErrCol],
1833 df[self.photoCalibCol]).astype(np.float32)
1834
1835
1837 """Compute absolute mean of dipole fluxes.
1838
1839 See Also
1840 --------
1841 LocalNanojansky
1842 LocalNanojanskyErr
1843 LocalDipoleMeanFluxErr
1844 LocalDipoleDiffFlux
1845 LocalDipoleDiffFluxErr
1846 """
1847 def __init__(self,
1848 instFluxPosCol,
1849 instFluxNegCol,
1850 instFluxPosErrCol,
1851 instFluxNegErrCol,
1852 photoCalibCol,
1853 # TODO[DM-49400]: remove this option; it's already deprecated (in super).
1854 photoCalibErrCol=None,
1855 **kwargs):
1856 self.instFluxNegCol = instFluxNegCol
1857 self.instFluxPosCol = instFluxPosCol
1858 self.instFluxNegErrCol = instFluxNegErrCol
1859 self.instFluxPosErrCol = instFluxPosErrCol
1860 self.photoCalibCol = photoCalibCol
1861 super().__init__(instFluxNegCol,
1862 instFluxNegErrCol,
1863 photoCalibCol,
1864 photoCalibErrCol,
1865 **kwargs)
1866
1867 @property
1868 def columns(self):
1869 return [self.instFluxPosCol,
1870 self.instFluxNegCol,
1871 self.photoCalibCol]
1872
1873 @property
1874 def name(self):
1875 return f'dipMeanFlux_{self.instFluxPosCol}_{self.instFluxNegCol}'
1876
1877 def _func(self, df):
1878 return 0.5*(np.fabs(self.instFluxToNanojansky(df[self.instFluxNegCol], df[self.photoCalibCol]))
1879 + np.fabs(self.instFluxToNanojansky(df[self.instFluxPosCol], df[self.photoCalibCol])))
1880
1881
1883 """Compute the error on the absolute mean of dipole fluxes.
1884
1885 See Also
1886 --------
1887 LocalNanojansky
1888 LocalNanojanskyErr
1889 LocalDipoleMeanFlux
1890 LocalDipoleDiffFlux
1891 LocalDipoleDiffFluxErr
1892 """
1893
1894 @property
1902 @property
1903 def name(self):
1904 return f'dipMeanFluxErr_{self.instFluxPosCol}_{self.instFluxNegCol}'
1905
1906 def _func(self, df):
1907 return 0.5*np.hypot(df[self.instFluxNegErrCol], df[self.instFluxPosErrCol]) * df[self.photoCalibCol]
1908
1909
1911 """Compute the absolute difference of dipole fluxes.
1912
1913 Calculated value is (abs(pos) - abs(neg)).
1914
1915 See Also
1916 --------
1917 LocalNanojansky
1918 LocalNanojanskyErr
1919 LocalDipoleMeanFlux
1920 LocalDipoleMeanFluxErr
1921 LocalDipoleDiffFluxErr
1922 """
1923
1924 @property
1925 def columns(self):
1926 return [self.instFluxPosCol,
1928 self.photoCalibCol]
1929
1930 @property
1931 def name(self):
1932 return f'dipDiffFlux_{self.instFluxPosCol}_{self.instFluxNegCol}'
1933
1934 def _func(self, df):
1935 return (np.fabs(self.instFluxToNanojansky(df[self.instFluxPosCol], df[self.photoCalibCol]))
1936 - np.fabs(self.instFluxToNanojansky(df[self.instFluxNegCol], df[self.photoCalibCol])))
1937
1938
1940 """Compute the error on the absolute difference of dipole fluxes.
1941
1942 See Also
1943 --------
1944 LocalNanojansky
1945 LocalNanojanskyErr
1946 LocalDipoleMeanFlux
1947 LocalDipoleMeanFluxErr
1948 LocalDipoleDiffFlux
1949 """
1950
1951 @property
1959 @property
1960 def name(self):
1961 return f'dipDiffFluxErr_{self.instFluxPosCol}_{self.instFluxNegCol}'
1962
1963 def _func(self, df):
1964 return np.hypot(df[self.instFluxPosErrCol], df[self.instFluxNegErrCol]) * df[self.photoCalibCol]
1965
1966
1968 """Compute E(B-V) from dustmaps.sfd."""
1969 _defaultDataset = 'ref'
1970 name = "E(B-V)"
1971 shortname = "ebv"
1972
1973 def __init__(self, **kwargs):
1974 # Import is only needed for Ebv.
1975 # Suppress unnecessary .dustmapsrc log message on import.
1976 with open(os.devnull, "w") as devnull:
1977 with redirect_stdout(devnull):
1978 from dustmaps.sfd import SFDQuery
1979 self._columns = ['coord_ra', 'coord_dec']
1980 self.sfd = SFDQuery()
1981 super().__init__(**kwargs)
1982
1983 def _func(self, df):
1984 coords = SkyCoord(df['coord_ra'].values * u.rad, df['coord_dec'].values * u.rad)
1985 ebv = self.sfd(coords)
1986 return pd.Series(ebv, index=df.index).astype('float32')
1987
1988
1990 """Base class for functors that use shape moments and localWCS"""
1991
1992 def __init__(self,
1993 shape_xx,
1994 shape_yy,
1995 shape_xy,
1996 colCD_1_1,
1997 colCD_1_2,
1998 colCD_2_1,
1999 colCD_2_2,
2000 **kwargs):
2001 self.shape_xx = shape_xx
2002 self.shape_yy = shape_yy
2003 self.shape_xy = shape_xy
2004 self.colCD_1_1 = colCD_1_1
2005 self.colCD_1_2 = colCD_1_2
2006 self.colCD_2_1 = colCD_2_1
2007 self.colCD_2_2 = colCD_2_2
2008 super().__init__(**kwargs)
2009
2010 @property
2011 def columns(self):
2012 return [
2013 self.shape_xx,
2014 self.shape_yy,
2015 self.shape_xy,
2016 self.colCD_1_1,
2017 self.colCD_1_2,
2018 self.colCD_2_1,
2019 self.colCD_2_2]
2020
2021 # Each of sky_uu, sky_vv, sky_uv evalutes one element of
2022 # CD_matrix * moments_matrix * CD_matrix.T
2023 def sky_uu(self, df):
2024 """Return the component of the moments tensor aligned with the RA axis, in radians."""
2025 i_xx = df[self.shape_xx]
2026 i_yy = df[self.shape_yy]
2027 i_xy = df[self.shape_xy]
2028 CD_1_1 = df[self.colCD_1_1]
2029 CD_1_2 = df[self.colCD_1_2]
2030 CD_2_1 = df[self.colCD_2_1]
2031 return (CD_1_1*(i_xx*CD_1_1 + i_xy*CD_2_1)
2032 + CD_1_2*(i_xy*CD_1_1 + i_yy*CD_2_1))
2033
2034 def sky_vv(self, df):
2035 """Return the component of the moments tensor aligned with the dec axis, in radians."""
2036 i_xx = df[self.shape_xx]
2037 i_yy = df[self.shape_yy]
2038 i_xy = df[self.shape_xy]
2039 CD_1_2 = df[self.colCD_1_2]
2040 CD_2_1 = df[self.colCD_2_1]
2041 CD_2_2 = df[self.colCD_2_2]
2042 return (CD_2_1*(i_xx*CD_1_2 + i_xy*CD_2_2)
2043 + CD_2_2*(i_xy*CD_1_2 + i_yy*CD_2_2))
2044
2045 def sky_uv(self, df):
2046 """Return the covariance of the moments tensor in ra, dec coordinates, in radians."""
2047 i_xx = df[self.shape_xx]
2048 i_yy = df[self.shape_yy]
2049 i_xy = df[self.shape_xy]
2050 CD_1_1 = df[self.colCD_1_1]
2051 CD_1_2 = df[self.colCD_1_2]
2052 CD_2_1 = df[self.colCD_2_1]
2053 CD_2_2 = df[self.colCD_2_2]
2054 return ((CD_1_1 * i_xx + CD_1_2 * i_xy) * CD_2_1
2055 + (CD_1_1 * i_xy + CD_1_2 * i_yy) * CD_2_2)
2056
2057
2059 """Rotate pixel moments Ixx,Iyy,Iyy into ra,dec frame and arcseconds"""
2060 _defaultDataset = 'meas'
2061 name = "moments_uu"
2062 shortname = "moments_uu"
2063
2064 def _func(self, df):
2065 sky_uu_radians = self.sky_uu(df)
2066
2067 return pd.Series(sky_uu_radians*((180/np.pi)*3600)**2, index=df.index).astype('float32')
2068
2069
2071 """Rotate pixel moments Ixx,Iyy,Iyy into ra,dec frame and arcseconds"""
2072 _defaultDataset = 'meas'
2073 name = "moments_vv"
2074 shortname = "moments_vv"
2075
2076 def _func(self, df):
2077 sky_vv_radians = self.sky_vv(df)
2078
2079 return pd.Series(sky_vv_radians*((180/np.pi)*3600)**2, index=df.index).astype('float32')
2080
2081
2083 """Rotate pixel moments Ixx,Iyy,Iyy into ra,dec frame and arcseconds"""
2084 _defaultDataset = 'meas'
2085 name = "moments_uv"
2086 shortname = "moments_uv"
2087
2088 def _func(self, df):
2089 sky_uv_radians = self.sky_uv(df)
2090
2091 return pd.Series(sky_uv_radians*((180/np.pi)*3600)**2, index=df.index).astype('float32')
2092
2093
2095 """Compute position angle relative to ra,dec frame, in degrees."""
2096 _defaultDataset = 'meas'
2097 name = "moments_theta"
2098 shortname = "moments_theta"
2099
2100 def _func(self, df):
2101
2102 sky_uu = self.sky_uu(df)
2103 sky_vv = self.sky_vv(df)
2104 sky_uv = self.sky_uv(df)
2105 theta = 0.5*np.arctan2(2*sky_uv, sky_uu - sky_vv)
2106
2107 return pd.Series(np.degrees(np.array(theta)), index=df.index).astype('float32')
2108
2109
2111 """Compute the semimajor axis length in arcseconds"""
2112 _defaultDataset = 'meas'
2113 name = "moments_a"
2114 shortname = "moments_a"
2115
2116 def _func(self, df):
2117
2118 sky_uu = self.sky_uu(df)
2119 sky_vv = self.sky_vv(df)
2120 sky_uv = self.sky_uv(df)
2121
2122 # This copies what is done (unvectorized) in afw.geom.
2123 xx_p_yy = sky_uu + sky_vv
2124 xx_m_yy = sky_uu - sky_vv
2125 t = np.sqrt(xx_m_yy * xx_m_yy + 4 * sky_uv * sky_uv)
2126 a_radians = np.sqrt(0.5 * (xx_p_yy + t))
2127
2128 return pd.Series(np.degrees(a_radians)*3600, index=df.index).astype('float32')
2129
2130
2132 """Compute the semiminor axis length in arcseconds"""
2133 _defaultDataset = 'meas'
2134 name = "moments_b"
2135 shortname = "moments_b"
2136
2137 def _func(self, df):
2138
2139 sky_uu = self.sky_uu(df)
2140 sky_vv = self.sky_vv(df)
2141 sky_uv = self.sky_uv(df)
2142
2143 # This copies what is done (unvectorized) in afw.geom.
2144 xx_p_yy = sky_uu + sky_vv
2145 xx_m_yy = sky_uu - sky_vv
2146 t = np.sqrt(xx_m_yy * xx_m_yy + 4 * sky_uv * sky_uv)
2147 b_radians = np.sqrt(0.5 * (xx_p_yy - t))
2148
2149 return pd.Series(np.degrees(b_radians)*3600, index=df.index).astype('float32')
A class representing an angle.
Definition Angle.h:128
Point in an unspecified spherical coordinate system.
Definition SpherePoint.h:57
__init__(self, col, filt2, filt1, **kwargs)
Definition functors.py:958
multilevelColumns(self, parq, **kwargs)
Definition functors.py:987
__init__(self, col, **kwargs)
Definition functors.py:649
multilevelColumns(self, data, **kwargs)
Definition functors.py:452
from_file(cls, filename, **kwargs)
Definition functors.py:542
from_yaml(cls, translationDefinition, **kwargs)
Definition functors.py:551
pixelScaleArcseconds(self, cd11, cd12, cd21, cd22)
Definition functors.py:1405
__init__(self, theta_col, colCD_1_1, colCD_1_2, colCD_2_1, colCD_2_2, **kwargs)
Definition functors.py:1528
__init__(self, col, colCD_1_1, colCD_1_2, colCD_2_1, colCD_2_2, **kwargs)
Definition functors.py:1482
__init__(self, col, colCD_1_1, colCD_1_2, colCD_2_1, colCD_2_2, **kwargs)
Definition functors.py:1444
__init__(self, col, **kwargs)
Definition functors.py:680
__call__(self, catalog, **kwargs)
Definition functors.py:710
__init__(self, colXX, colXY, colYY, **kwargs)
Definition functors.py:1149
__init__(self, colXX, colXY, colYY, **kwargs)
Definition functors.py:1177
_func(self, df, dropna=True)
Definition functors.py:291
multilevelColumns(self, data, columnIndex=None, returnTuple=False)
Definition functors.py:237
__call__(self, data, dropna=False)
Definition functors.py:348
_get_data_columnLevels(self, data, columnIndex=None)
Definition functors.py:184
_colsFromDict(self, colDict, columnIndex=None)
Definition functors.py:218
difference(self, data1, data2, **kwargs)
Definition functors.py:360
_get_data_columnLevelNames(self, data, columnIndex=None)
Definition functors.py:204
__init__(self, filt=None, dataset=None, noDup=None)
Definition functors.py:163
__init__(self, ra, dec, **kwargs)
Definition functors.py:786
__init__(self, instFluxPosCol, instFluxNegCol, instFluxPosErrCol, instFluxNegErrCol, photoCalibCol, photoCalibErrCol=None, **kwargs)
Definition functors.py:1855
instFluxToNanojansky(self, instFlux, localCalib)
Definition functors.py:1708
instFluxErrToMagnitudeErr(self, instFlux, instFluxErr, localCalib, localCalibErr=None)
Definition functors.py:1770
instFluxToMagnitude(self, instFlux, localCalib)
Definition functors.py:1753
__init__(self, instFluxCol, instFluxErrCol, photoCalibCol, photoCalibErrCol=None, **kwargs)
Definition functors.py:1698
instFluxErrToNanojanskyErr(self, instFlux, instFluxErr, localCalib, localCalibErr=None)
Definition functors.py:1725
computeSkySeparation(self, ra1, dec1, ra2, dec2)
Definition functors.py:1264
__init__(self, colCD_1_1, colCD_1_2, colCD_2_1, colCD_2_2, **kwargs)
Definition functors.py:1226
computeDeltaRaDec(self, x, y, cd11, cd12, cd21, cd22)
Definition functors.py:1233
getSkySeparationFromPixel(self, x1, y1, x2, y2, cd11, cd12, cd21, cd22)
Definition functors.py:1290
computePositionAngle(self, ra1, dec1, ra2, dec2)
Definition functors.py:1322
getPositionAngleFromDetectorAngle(self, theta, cd11, cd12, cd21, cd22)
Definition functors.py:1363
__init__(self, col1, col2, **kwargs)
Definition functors.py:906
__init__(self, *args, **kwargs)
Definition functors.py:878
__init__(self, col, **kwargs)
Definition functors.py:847
__init__(self, shape_xx, shape_yy, shape_xy, colCD_1_1, colCD_1_2, colCD_2_1, colCD_2_2, **kwargs)
Definition functors.py:2000
__init__(self, col, band_to_check, **kwargs)
Definition functors.py:750
__init__(self, colFlux, colFluxErr=None, **kwargs)
Definition functors.py:1602
dn2MagErr(self, dn, dnErr, fluxMag0, fluxMag0Err)
Definition functors.py:1647
dn2fluxErr(self, dn, dnErr, fluxMag0, fluxMag0Err)
Definition functors.py:1641
__call__(self, catalog, **kwargs)
Definition functors.py:698
__init__(self, colXX, colXY, colYY, **kwargs)
Definition functors.py:1203
pd.Series _func(self, pd.DataFrame df)
Definition functors.py:1575
__init__(self, tuple[str]|list[str]|None bands=None, **kwargs)
Definition functors.py:1588
HtmPixelization provides HTM indexing of points and regions.
init_fromDict(initDict, basePath='lsst.pipe.tasks.functors', typeKey='functor', name=None)
Definition functors.py:60
mag_aware_eval(df, expr, log)
Definition functors.py:580