22__all__ = [
"init_fromDict",
"Functor",
"CompositeFunctor",
"mag_aware_eval",
23 "CustomFunctor",
"Column",
"Index",
"CoordColumn",
"RAColumn",
24 "DecColumn",
"SinglePrecisionFloatColumn",
"HtmIndex20",
"fluxName",
"fluxErrName",
"Mag",
25 "MagErr",
"MagDiff",
"Color",
"DeconvolvedMoments",
"SdssTraceSize",
26 "PsfSdssTraceSizeDiff",
"HsmTraceSize",
"PsfHsmTraceSizeDiff",
27 "HsmFwhm",
"E1",
"E2",
"RadiusFromQuadrupole",
"LocalWcs",
28 "ComputePixelScale",
"ConvertPixelToArcseconds",
29 "ConvertPixelSqToArcsecondsSq",
30 "ConvertDetectorAngleToPositionAngle",
31 "ReferenceBand",
"Photometry",
32 "NanoJansky",
"NanoJanskyErr",
"LocalPhotometry",
"LocalNanojansky",
33 "LocalNanojanskyErr",
"LocalDipoleMeanFlux",
34 "LocalDipoleMeanFluxErr",
"LocalDipoleDiffFlux",
35 "LocalDipoleDiffFluxErr",
"Ebv",
43from contextlib
import redirect_stdout
44from itertools
import product
46import astropy.units
as u
52from astropy.coordinates
import SkyCoord
53from lsst.daf.butler
import DeferredDatasetHandle
56from lsst.utils.introspection
import get_full_type_name
60 typeKey='functor', name=None):
61 """Initialize an object defined in a dictionary.
63 The object needs to be importable as f'{basePath}.{initDict[typeKey]}'.
64 The positional and keyword arguments (if any) are contained in "args" and
65 "kwargs" entries in the dictionary, respectively.
66 This is used in `~lsst.pipe.tasks.functors.CompositeFunctor.from_yaml` to
67 initialize a composite functor from a specification in a YAML file.
72 Dictionary describing object's initialization.
73 Must contain an entry keyed by ``typeKey`` that is the name of the
74 object, relative to ``basePath``.
76 Path relative to module in which ``initDict[typeKey]`` is defined.
78 Key of ``initDict`` that is the name of the object (relative to
81 initDict = initDict.copy()
83 pythonType = doImport(f
'{basePath}.{initDict.pop(typeKey)}')
85 if 'args' in initDict:
86 args = initDict.pop(
'args')
87 if isinstance(args, str):
90 element = pythonType(*args, **initDict)
91 except Exception
as e:
92 message = f
'Error in constructing functor "{name}" of type {pythonType.__name__} with args: {args}'
93 raise type(e)(message, e.args)
98 """Define and execute a calculation on a DataFrame or Handle holding a
101 The `__call__` method accepts either a `~pandas.DataFrame` object or a
102 `~lsst.daf.butler.DeferredDatasetHandle` or
103 `~lsst.pipe.base.InMemoryDatasetHandle`, and returns the
104 result of the calculation as a single column.
105 Each functor defines what columns are needed for the calculation, and only
106 these columns are read from the dataset handle.
108 The action of `__call__` consists of two steps: first, loading the
109 necessary columns from disk into memory as a `~pandas.DataFrame` object;
110 and second, performing the computation on this DataFrame and returning the
113 To define a new `Functor`, a subclass must define a `_func` method,
114 that takes a `~pandas.DataFrame` and returns result in a `~pandas.Series`.
115 In addition, it must define the following attributes:
117 * `_columns`: The columns necessary to perform the calculation
118 * `name`: A name appropriate for a figure axis label
119 * `shortname`: A name appropriate for use as a dictionary key
121 On initialization, a `Functor` should declare what band (``filt`` kwarg)
122 and dataset (e.g. ``'ref'``, ``'meas'``, ``'forced_src'``) it is intended
124 This enables the `_get_data` method to extract the proper columns from the
126 If not specified, the dataset will fall back on the `_defaultDataset`
128 If band is not specified and ``dataset`` is anything other than ``'ref'``,
129 then an error will be raised when trying to perform the calculation.
131 Originally, `Functor` was set up to expect datasets formatted like the
132 ``deepCoadd_obj`` dataset; that is, a DataFrame with a multi-level column
133 index, with the levels of the column index being ``band``, ``dataset``, and
135 It has since been generalized to apply to DataFrames without multi-level
136 indices and multi-level indices with just ``dataset`` and ``column``
138 In addition, the `_get_data` method that reads the columns from the
139 underlying data will return a DataFrame with column index levels defined by
140 the `_dfLevels` attribute; by default, this is ``column``.
142 The `_dfLevels` attributes should generally not need to be changed, unless
143 `_func` needs columns from multiple filters or datasets to do the
145 An example of this is the `~lsst.pipe.tasks.functors.Color` functor, for
146 which `_dfLevels = ('band', 'column')`, and `_func` expects the DataFrame
147 it gets to have those levels in the column index.
152 Band upon which to do the calculation.
155 Dataset upon which to do the calculation (e.g., 'ref', 'meas',
159 _defaultDataset =
'ref'
160 _dfLevels = (
'column',)
161 _defaultNoDup =
False
163 def __init__(self, filt=None, dataset=None, noDup=None):
167 self.
log = logging.getLogger(type(self).__name__)
163 def __init__(self, filt=None, dataset=None, noDup=None):
…
171 """Do not explode by band if used on object table."""
172 if self.
_noDup is not None:
179 """Columns required to perform calculation."""
180 if not hasattr(self,
'_columns'):
181 raise NotImplementedError(
'Must define columns property or _columns attribute')
185 """Gets the names of the column index levels.
187 This should only be called in the context of a multilevel table.
192 The data to be read, can be a
193 `~lsst.daf.butler.DeferredDatasetHandle` or
194 `~lsst.pipe.base.InMemoryDatasetHandle`.
195 columnIndex (optional): pandas `~pandas.Index` object
196 If not passed, then it is read from the
197 `~lsst.daf.butler.DeferredDatasetHandle`
198 for `~lsst.pipe.base.InMemoryDatasetHandle`.
200 if columnIndex
is None:
201 columnIndex = data.get(component=
"columns")
202 return columnIndex.names
205 """Gets the content of each of the column levels for a multilevel
208 if columnIndex
is None:
209 columnIndex = data.get(component=
"columns")
211 columnLevels = columnIndex.names
213 level: list(np.unique(np.array([c
for c
in columnIndex])[:, i]))
214 for i, level
in enumerate(columnLevels)
216 return columnLevelNames
219 """Converts dictionary column specficiation to a list of columns."""
223 for i, lev
in enumerate(columnLevels):
225 if isinstance(colDict[lev], str):
226 new_colDict[lev] = [colDict[lev]]
228 new_colDict[lev] = colDict[lev]
230 new_colDict[lev] = columnIndex.levels[i]
232 levelCols = [new_colDict[lev]
for lev
in columnLevels]
233 cols = list(product(*levelCols))
234 colsAvailable = [col
for col
in cols
if col
in columnIndex]
238 """Returns columns needed by functor from multilevel dataset.
240 To access tables with multilevel column structure, the
241 `~lsst.daf.butler.DeferredDatasetHandle` or
242 `~lsst.pipe.base.InMemoryDatasetHandle` needs to be passed
243 either a list of tuples or a dictionary.
248 The data as either `~lsst.daf.butler.DeferredDatasetHandle`, or
249 `~lsst.pipe.base.InMemoryDatasetHandle`.
250 columnIndex (optional): pandas `~pandas.Index` object
251 Either passed or read in from
252 `~lsst.daf.butler.DeferredDatasetHandle`.
253 `returnTuple` : `bool`
254 If true, then return a list of tuples rather than the column
255 dictionary specification.
256 This is set to `True` by `CompositeFunctor` in order to be able to
257 combine columns from the various component functors.
260 if not isinstance(data, (DeferredDatasetHandle, InMemoryDatasetHandle)):
261 raise RuntimeError(f
"Unexpected data type. Got {get_full_type_name(data)}.")
263 if columnIndex
is None:
264 columnIndex = data.get(component=
"columns")
270 columnDict = {
'column': self.
columns,
272 if self.
filt is None:
274 if "band" in columnLevels:
276 columnDict[
"band"] = columnLevelNames[
"band"][0]
278 raise ValueError(f
"'filt' not set for functor {self.name}"
279 f
"(dataset {self.dataset}) "
281 "contains multiple filters in column index. "
282 "Set 'filt' or set 'dataset' to 'ref'.")
284 columnDict[
'band'] = self.
filt
287 return self.
_colsFromDict(columnDict, columnIndex=columnIndex)
292 raise NotImplementedError(
'Must define calculation on DataFrame')
295 """Return columnIndex."""
297 if isinstance(data, (DeferredDatasetHandle, InMemoryDatasetHandle)):
298 return data.get(component=
"columns")
303 """Retrieve DataFrame necessary for calculation.
305 The data argument can be a `~pandas.DataFrame`, a
306 `~lsst.daf.butler.DeferredDatasetHandle`, or
307 an `~lsst.pipe.base.InMemoryDatasetHandle`.
309 Returns a DataFrame upon which `self._func` can act.
313 if isinstance(data, pd.DataFrame):
314 _data = InMemoryDatasetHandle(data, storageClass=
"DataFrame")
315 elif isinstance(data, (DeferredDatasetHandle, InMemoryDatasetHandle)):
318 raise RuntimeError(f
"Unexpected type provided for data. Got {get_full_type_name(data)}.")
323 is_multiLevel = isinstance(columnIndex, pd.MultiIndex)
332 df = _data.get(parameters={
"columns": columns})
341 levelsToDrop = [n
for n
in df.columns.names
if n
not in self.
_dfLevels]
342 df.columns = df.columns.droplevel(levelsToDrop)
351 vals = self.
_func(df)
352 except Exception
as e:
353 self.
log.error(
"Exception in %s call: %s: %s", self.
name, type(e).__name__, e)
361 """Computes difference between functor called on two different
362 DataFrame/Handle objects.
364 return self(data1, **kwargs) - self(data2, **kwargs)
367 return pd.Series(np.full(len(df), np.nan), index=df.index)
371 """Full name of functor (suitable for figure labels)."""
372 return NotImplementedError
376 """Short name of functor (suitable for column name/dict key)."""
381 """Perform multiple calculations at once on a catalog.
383 The role of a `CompositeFunctor` is to group together computations from
385 Instead of returning `~pandas.Series` a `CompositeFunctor` returns a
386 `~pandas.DataFrame`, with the column names being the keys of ``funcDict``.
388 The `columns` attribute of a `CompositeFunctor` is the union of all columns
389 in all the component functors.
391 A `CompositeFunctor` does not use a `_func` method itself; rather, when a
392 `CompositeFunctor` is called, all its columns are loaded at once, and the
393 resulting DataFrame is passed to the `_func` method of each component
395 This has the advantage of only doing I/O (reading from parquet file) once,
396 and works because each individual `_func` method of each component functor
397 does not care if there are *extra* columns in the DataFrame being passed;
398 only that it must contain *at least* the `columns` it expects.
400 An important and useful class method is `from_yaml`, which takes as an
401 argument the path to a YAML file specifying a collection of functors.
405 funcs : `dict` or `list`
406 Dictionary or list of functors.
407 If a list, then it will be converted into a dictonary according to the
408 `.shortname` attribute of each functor.
411 name =
"CompositeFunctor"
415 if type(funcs)
is dict:
418 self.
funcDict = {f.shortname: f
for f
in funcs}
436 """Update the functor with new functors."""
437 if isinstance(new, dict):
439 elif isinstance(new, CompositeFunctor):
442 raise TypeError(
'Can only update with dictionary or CompositeFunctor.')
445 if self.
filt is not None:
450 return list(set([x
for y
in [f.columns
for f
in self.
funcDict.values()]
for x
in y]))
460 f.multilevelColumns(data, returnTuple=
True, **kwargs)
for f
in self.
funcDict.values()
468 """Apply the functor to the data table.
473 The data represented as `~lsst.daf.butler.DeferredDatasetHandle`,
474 `~lsst.pipe.base.InMemoryDatasetHandle`, or `~pandas.DataFrame`.
475 The table or a pointer to a table on disk from which columns can
478 if isinstance(data, pd.DataFrame):
479 _data = InMemoryDatasetHandle(data, storageClass=
"DataFrame")
480 elif isinstance(data, (DeferredDatasetHandle, InMemoryDatasetHandle)):
483 raise RuntimeError(f
"Unexpected type provided for data. Got {get_full_type_name(data)}.")
487 if isinstance(columnIndex, pd.MultiIndex):
489 df = _data.get(parameters={
"columns": columns})
494 subdf = f._setLevels(
495 df[f.multilevelColumns(_data, returnTuple=
True, columnIndex=columnIndex)]
497 valDict[k] = f._func(subdf)
498 except Exception
as e:
500 "Exception in %s (funcs: %s) call: %s",
506 valDict[k] = f.fail(subdf)
511 df = _data.get(parameters={
"columns": self.
columns})
513 valDict = {k: f._func(df)
for k, f
in self.
funcDict.items()}
516 for name, colVal
in valDict.items():
517 if len(colVal.shape) != 1:
518 raise RuntimeError(
"Transformed column '%s' is not the shape of a column. "
519 "It is shaped %s and type %s." % (name, colVal.shape, type(colVal)))
522 valDf = pd.concat(valDict, axis=1)
524 print([(k, type(v))
for k, v
in valDict.items()])
527 if kwargs.get(
'dropna',
False):
528 valDf = valDf.dropna(how=
'any')
534 if renameRules
is None:
536 for old, new
in renameRules:
537 if col.startswith(old):
538 col = col.replace(old, new)
544 filename = os.path.expandvars(filename)
545 with open(filename)
as f:
546 translationDefinition = yaml.safe_load(f)
548 return cls.
from_yaml(translationDefinition, **kwargs)
553 for func, val
in translationDefinition[
'funcs'].items():
556 if 'flag_rename_rules' in translationDefinition:
557 renameRules = translationDefinition[
'flag_rename_rules']
561 if 'calexpFlags' in translationDefinition:
562 for flag
in translationDefinition[
'calexpFlags']:
563 funcs[cls.
renameCol(flag, renameRules)] =
Column(flag, dataset=
'calexp')
565 if 'refFlags' in translationDefinition:
566 for flag
in translationDefinition[
'refFlags']:
569 if 'forcedFlags' in translationDefinition:
570 for flag
in translationDefinition[
'forcedFlags']:
571 funcs[cls.
renameCol(flag, renameRules)] =
Column(flag, dataset=
'forced_src')
573 if 'flags' in translationDefinition:
574 for flag
in translationDefinition[
'flags']:
577 return cls(funcs, **kwargs)
581 """Evaluate an expression on a DataFrame, knowing what the 'mag' function
584 Builds on `pandas.DataFrame.eval`, which parses and executes math on
589 df : ~pandas.DataFrame
590 DataFrame on which to evaluate expression.
596 expr_new = re.sub(
r'mag\((\w+)\)',
r'-2.5*log(\g<1>)/log(10)', expr)
597 val = df.eval(expr_new)
598 except Exception
as e:
599 log.error(
"Exception in mag_aware_eval: %s: %s", type(e).__name__, e)
600 expr_new = re.sub(
r'mag\((\w+)\)',
r'-2.5*log(\g<1>_instFlux)/log(10)', expr)
601 val = df.eval(expr_new)
606 """Arbitrary computation on a catalog.
608 Column names (and thus the columns to be loaded from catalog) are found by
609 finding all words and trying to ignore all "math-y" words.
614 Expression to evaluate, to be parsed and executed by
615 `~lsst.pipe.tasks.functors.mag_aware_eval`.
617 _ignore_words = (
'mag',
'sin',
'cos',
'exp',
'log',
'sqrt')
629 flux_cols = re.findall(
r'mag\(\s*(\w+)\s*\)', self.
expr)
631 cols = [c
for c
in re.findall(
r'[a-zA-Z_]+', self.
expr)
if c
not in self.
_ignore_words]
634 if not re.search(
'_instFlux$', c):
635 cols.append(f
'{c}_instFlux')
640 return list(set([c
for c
in cols
if c
not in not_a_col]))
647 """Get column with a specified name."""
666 """Return the value of the index for each object."""
668 columns = [
'coord_ra']
669 _defaultDataset =
'ref'
673 return pd.Series(df.index, index=df.index)
677 """Base class for coordinate column, in degrees."""
686 output = df[self.
col] * 180 / np.pi
if self.
_radians else df[self.
col]
691 """Right Ascension, in degrees."""
696 super().
__init__(
'coord_ra', **kwargs)
699 return super().
__call__(catalog, **kwargs)
703 """Declination, in degrees."""
708 super().
__init__(
'coord_dec', **kwargs)
711 return super().
__call__(catalog, **kwargs)
715 """Uncertainty in Right Ascension, in degrees."""
720 super().
__init__(
'coord_raErr', **kwargs)
724 """Uncertainty in declination, in degrees."""
729 super().
__init__(
'coord_decErr', **kwargs)
733 """Coordinate covariance column, in degrees."""
739 super().
__init__(
'coord_ra_dec_Cov', **kwargs)
744 output = df[self.
col]*(180/np.pi)**2
if self.
_radians else df[self.
col]
749 """A column with a band in a multiband table."""
760 """A float32 MultibandColumn"""
762 return super().
_func(df).astype(np.float32)
766 """Return a column cast to a single-precision float."""
769 return df[self.
col].astype(np.float32)
773 """Compute the level 20 HtmIndex for the catalog.
777 This functor was implemented to satisfy requirements of old APDB interface
778 which required the ``pixelId`` column in DiaObject with HTM20 index.
779 The APDB interface had migrated to not need that information, but we keep
780 this class in case it may be useful for something else.
795 def computePixel(row):
804 return self.
pixelator.index(sphPoint.getVector())
806 return df.apply(computePixel, axis=1, result_type=
'reduce').astype(
'int64')
810 """Append _instFlux to the column name if it doesn't have it already."""
811 if not col.endswith(
'_instFlux'):
817 """Append _instFluxErr to the column name if it doesn't have it already."""
818 if not col.endswith(
'_instFluxErr'):
819 col +=
'_instFluxErr'
824 """Compute calibrated magnitude.
826 Returns the flux at mag=0.
827 The default ``fluxMag0`` is 63095734448.0194, which is default for HSC.
828 TO DO: This default should be made configurable in DM-21955.
830 This calculation hides warnings about invalid values and dividing by zero.
832 As with all functors, a ``dataset`` and ``filt`` kwarg should be provided
834 Unlike the default `Functor`, however, the default dataset for a `Mag` is
835 ``'meas'``, rather than ``'ref'``.
840 Name of flux column from which to compute magnitude.
841 Can be parseable by the `~lsst.pipe.tasks.functors.fluxName` function;
842 that is, you can pass ``'modelfit_CModel'`` instead of
843 ``'modelfit_CModel_instFlux'``, and it will understand.
845 _defaultDataset =
'meas'
859 with warnings.catch_warnings():
860 warnings.filterwarnings(
'ignore',
r'invalid value encountered')
861 warnings.filterwarnings(
'ignore',
r'divide by zero')
866 return f
'mag_{self.col}'
870 """Compute calibrated magnitude uncertainty.
875 Name of the flux column.
888 with warnings.catch_warnings():
889 warnings.filterwarnings(
'ignore',
r'invalid value encountered')
890 warnings.filterwarnings(
'ignore',
r'divide by zero')
891 fluxCol, fluxErrCol = self.
columns
892 x = df[fluxErrCol] / df[fluxCol]
894 magErr = (2.5 / np.log(10.)) * np.sqrt(x*x + y*y)
899 return super().name +
'_err'
903 """Functor to calculate magnitude difference."""
904 _defaultDataset =
'meas'
916 with warnings.catch_warnings():
917 warnings.filterwarnings(
'ignore',
r'invalid value encountered')
918 warnings.filterwarnings(
'ignore',
r'divide by zero')
919 return -2.5*np.log10(df[self.
col1]/df[self.
col2])
923 return f
'(mag_{self.col1} - mag_{self.col2})'
927 return f
'magDiff_{self.col1}_{self.col2}'
931 """Compute the color between two filters.
933 Computes color by initializing two different `Mag` functors based on the
934 ``col`` and filters provided, and then returning the difference.
936 This is enabled by the `_func` method expecting a DataFrame with a
937 multilevel column index, with both ``'band'`` and ``'column'``, instead of
938 just ``'column'``, which is the `Functor` default.
939 This is controlled by the `_dfLevels` attribute.
941 Also of note, the default dataset for `Color` is ``forced_src'``, whereas
942 for `Mag` it is ``'meas'``.
947 Name of the flux column from which to compute; same as would be passed
948 to `~lsst.pipe.tasks.functors.Mag`.
951 Filters from which to compute magnitude difference.
952 Color computed is ``Mag(filt2) - Mag(filt1)``.
954 _defaultDataset =
'forced_src'
955 _dfLevels = (
'band',
'column')
961 raise RuntimeError(
"Cannot compute Color for %s: %s - %s " % (col, filt2, filt1))
979 mag2 = self.mag2.
_func(df[self.filt2])
980 mag1 = self.mag1.
_func(df[self.filt1])
985 return [self.
mag1.col, self.
mag2.col]
992 return f
'{self.filt2} - {self.filt1} ({self.col})'
996 return f
"{self.col}_{self.filt2.replace('-', '')}m{self.filt1.replace('-', '')}"
1000 """This functor subtracts the trace of the PSF second moments from the
1001 trace of the second moments of the source.
1003 If the HsmShapeAlgorithm measurement is valid, then these will be used for
1005 Otherwise, the SdssShapeAlgorithm measurements will be used.
1007 name =
'Deconvolved Moments'
1008 shortname =
'deconvolvedMoments'
1009 _columns = (
"ext_shapeHSM_HsmSourceMoments_xx",
1010 "ext_shapeHSM_HsmSourceMoments_yy",
1011 "base_SdssShape_xx",
"base_SdssShape_yy",
1012 "ext_shapeHSM_HsmPsfMoments_xx",
1013 "ext_shapeHSM_HsmPsfMoments_yy")
1016 """Calculate deconvolved moments."""
1017 if "ext_shapeHSM_HsmSourceMoments_xx" in df.columns:
1018 hsm = df[
"ext_shapeHSM_HsmSourceMoments_xx"] + df[
"ext_shapeHSM_HsmSourceMoments_yy"]
1020 hsm = np.ones(len(df))*np.nan
1021 sdss = df[
"base_SdssShape_xx"] + df[
"base_SdssShape_yy"]
1022 if "ext_shapeHSM_HsmPsfMoments_xx" in df.columns:
1023 psf = df[
"ext_shapeHSM_HsmPsfMoments_xx"] + df[
"ext_shapeHSM_HsmPsfMoments_yy"]
1028 raise RuntimeError(
'No psf shape parameter found in catalog')
1030 return hsm.where(np.isfinite(hsm), sdss) - psf
1034 """Functor to calculate the SDSS trace radius size for sources.
1036 The SDSS trace radius size is a measure of size equal to the square root of
1037 half of the trace of the second moments tensor measured with the
1038 SdssShapeAlgorithm plugin.
1039 This has units of pixels.
1041 name =
"SDSS Trace Size"
1042 shortname =
'sdssTrace'
1043 _columns = (
"base_SdssShape_xx",
"base_SdssShape_yy")
1046 srcSize = np.sqrt(0.5*(df[
"base_SdssShape_xx"] + df[
"base_SdssShape_yy"]))
1051 """Functor to calculate the SDSS trace radius size difference (%) between
1052 the object and the PSF model.
1058 name =
"PSF - SDSS Trace Size"
1059 shortname =
'psf_sdssTrace'
1060 _columns = (
"base_SdssShape_xx",
"base_SdssShape_yy",
1061 "base_SdssShape_psf_xx",
"base_SdssShape_psf_yy")
1064 srcSize = np.sqrt(0.5*(df[
"base_SdssShape_xx"] + df[
"base_SdssShape_yy"]))
1065 psfSize = np.sqrt(0.5*(df[
"base_SdssShape_psf_xx"] + df[
"base_SdssShape_psf_yy"]))
1066 sizeDiff = 100*(srcSize - psfSize)/(0.5*(srcSize + psfSize))
1071 """Functor to calculate the HSM trace radius size for sources.
1073 The HSM trace radius size is a measure of size equal to the square root of
1074 half of the trace of the second moments tensor measured with the
1075 HsmShapeAlgorithm plugin.
1076 This has units of pixels.
1078 name =
'HSM Trace Size'
1079 shortname =
'hsmTrace'
1080 _columns = (
"ext_shapeHSM_HsmSourceMoments_xx",
1081 "ext_shapeHSM_HsmSourceMoments_yy")
1084 srcSize = np.sqrt(0.5*(df[
"ext_shapeHSM_HsmSourceMoments_xx"]
1085 + df[
"ext_shapeHSM_HsmSourceMoments_yy"]))
1090 """Functor to calculate the HSM trace radius size difference (%) between
1091 the object and the PSF model.
1097 name =
'PSF - HSM Trace Size'
1098 shortname =
'psf_HsmTrace'
1099 _columns = (
"ext_shapeHSM_HsmSourceMoments_xx",
1100 "ext_shapeHSM_HsmSourceMoments_yy",
1101 "ext_shapeHSM_HsmPsfMoments_xx",
1102 "ext_shapeHSM_HsmPsfMoments_yy")
1105 srcSize = np.sqrt(0.5*(df[
"ext_shapeHSM_HsmSourceMoments_xx"]
1106 + df[
"ext_shapeHSM_HsmSourceMoments_yy"]))
1107 psfSize = np.sqrt(0.5*(df[
"ext_shapeHSM_HsmPsfMoments_xx"]
1108 + df[
"ext_shapeHSM_HsmPsfMoments_yy"]))
1109 sizeDiff = 100*(srcSize - psfSize)/(0.5*(srcSize + psfSize))
1114 """Functor to calculate the PSF FWHM with second moments measured from the
1115 HsmShapeAlgorithm plugin.
1117 This is in units of arcseconds, and assumes the hsc_rings_v1 skymap pixel
1118 scale of 0.168 arcseconds/pixel.
1122 This conversion assumes the PSF is Gaussian, which is not always the case.
1124 name =
'HSM Psf FWHM'
1125 _columns = (
'ext_shapeHSM_HsmPsfMoments_xx',
'ext_shapeHSM_HsmPsfMoments_yy')
1128 SIGMA2FWHM = 2*np.sqrt(2*np.log(2))
1132 0.5*(df[
'ext_shapeHSM_HsmPsfMoments_xx']
1133 + df[
'ext_shapeHSM_HsmPsfMoments_yy']))).astype(np.float32)
1137 r"""Calculate :math:`e_1` ellipticity component for sources, defined as:
1140 e_1 &= (I_{xx}-I_{yy})/(I_{xx}+I_{yy})
1146 name =
"Distortion Ellipticity (e1)"
1147 shortname =
"Distortion"
1162 + df[self.
colYY])).astype(np.float32)
1166 r"""Calculate :math:`e_2` ellipticity component for sources, defined as:
1169 e_2 &= 2I_{xy}/(I_{xx}+I_{yy})
1175 name =
"Ellipticity e2"
1188 return (2*df[self.
colXY] / (df[self.
colXX] + df[self.
colYY])).astype(np.float32)
1192 """Calculate the radius from the quadrupole moments.
1194 This returns the fourth root of the determinant of the second moments
1195 tensor, which has units of pixels.
1214 return ((df[self.
colXX]*df[self.
colYY] - df[self.
colXY]**2)**0.25).astype(np.float32)
1218 """Computations using the stored localWcs."""
1219 name =
"LocalWcsOperations"
1234 """Compute the dRA, dDec from dx, dy.
1238 x : `~pandas.Series`
1240 y : `~pandas.Series`
1242 cd11 : `~pandas.Series`
1243 [1, 1] element of the local Wcs affine transform.
1244 cd12 : `~pandas.Series`
1245 [1, 2] element of the local Wcs affine transform.
1246 cd21 : `~pandas.Series`
1247 [2, 1] element of the local Wcs affine transform.
1248 cd22 : `~pandas.Series`
1249 [2, 2] element of the local Wcs affine transform.
1254 RA and Dec conversion of x and y given the local Wcs.
1255 Returned units are in radians.
1259 If x and y are with respect to the CRVAL1, CRVAL2
1260 then this will return the RA, Dec for that WCS.
1262 return (x * cd11 + y * cd12, x * cd21 + y * cd22)
1265 """Compute the local pixel scale conversion.
1269 ra1 : `~pandas.Series`
1270 Ra of the first coordinate in radians.
1271 dec1 : `~pandas.Series`
1272 Dec of the first coordinate in radians.
1273 ra2 : `~pandas.Series`
1274 Ra of the second coordinate in radians.
1275 dec2 : `~pandas.Series`
1276 Dec of the second coordinate in radians.
1280 dist : `~pandas.Series`
1281 Distance on the sphere in radians.
1283 deltaDec = dec2 - dec1
1285 return 2 * np.arcsin(
1287 np.sin(deltaDec / 2) ** 2
1288 + np.cos(dec2) * np.cos(dec1) * np.sin(deltaRa / 2) ** 2))
1291 """Compute the distance on the sphere from x2, y1 to x1, y1.
1295 x1 : `~pandas.Series`
1297 y1 : `~pandas.Series`
1299 x2 : `~pandas.Series`
1301 y2 : `~pandas.Series`
1303 cd11 : `~pandas.Series`
1304 [1, 1] element of the local Wcs affine transform.
1305 cd12 : `~pandas.Series`
1306 [1, 2] element of the local Wcs affine transform.
1307 cd21 : `~pandas.Series`
1308 [2, 1] element of the local Wcs affine transform.
1309 cd22 : `~pandas.Series`
1310 [2, 2] element of the local Wcs affine transform.
1314 Distance : `~pandas.Series`
1315 Arcseconds per pixel at the location of the local WC.
1323 """Compute position angle (E of N) from (ra1, dec1) to (ra2, dec2).
1327 ra1 : iterable [`float`]
1328 RA of the first coordinate [radian].
1329 dec1 : iterable [`float`]
1330 Dec of the first coordinate [radian].
1331 ra2 : iterable [`float`]
1332 RA of the second coordinate [radian].
1333 dec2 : iterable [`float`]
1334 Dec of the second coordinate [radian].
1338 Position Angle: `~pandas.Series`
1343 (ra1, dec1) -> (ra2, dec2) is interpreted as the shorter way around the sphere
1345 For a separation of 0.0001 rad, the position angle is good to 0.0009 rad
1346 all over the sphere.
1350 position_angle = np.zeros(len(ra1))
1351 for i, (r1, d1, r2, d2)
in enumerate(zip(ra1, dec1, ra2, dec2)):
1354 bearing = point1.bearingTo(point2)
1355 pa_ref_angle =
geom.Angle(np.pi/2, geom.radians)
1356 pa = pa_ref_angle - bearing
1359 position_angle[i] = pa.asRadians()
1361 return pd.Series(position_angle)
1364 """Compute position angle (E of N) from detector angle (+y of +x).
1369 detector angle [radian]
1371 [1, 1] element of the local Wcs affine transform.
1373 [1, 2] element of the local Wcs affine transform.
1375 [2, 1] element of the local Wcs affine transform.
1377 [2, 2] element of the local Wcs affine transform.
1381 Position Angle: `~pandas.Series`
1394 """Compute the local pixel scale from the stored CDMatrix.
1406 """Compute the local pixel to scale conversion in arcseconds.
1410 cd11 : `~pandas.Series`
1411 [1, 1] element of the local Wcs affine transform in radians.
1412 cd11 : `~pandas.Series`
1413 [1, 1] element of the local Wcs affine transform in radians.
1414 cd12 : `~pandas.Series`
1415 [1, 2] element of the local Wcs affine transform in radians.
1416 cd21 : `~pandas.Series`
1417 [2, 1] element of the local Wcs affine transform in radians.
1418 cd22 : `~pandas.Series`
1419 [2, 2] element of the local Wcs affine transform in radians.
1423 pixScale : `~pandas.Series`
1424 Arcseconds per pixel at the location of the local WC.
1426 return 3600 * np.degrees(np.sqrt(np.fabs(cd11 * cd22 - cd12 * cd21)))
1436 """Convert a value in units of pixels to units of arcseconds."""
1454 return f
"{self.col}_asArcseconds"
1472 """Convert a value in units of pixels squared to units of arcseconds
1492 return f
"{self.col}_asArcsecondsSq"
1507 return df[self.
col] * pixScale * pixScale
1511 """Compute a position angle from a detector angle and the stored CDMatrix.
1515 position angle : degrees
1518 name =
"PositionAngle"
1530 super().
__init__(colCD_1_1, colCD_1_2, colCD_2_1, colCD_2_2, **kwargs)
1553 """Return the band used to seed multiband forced photometry.
1555 This functor is to be used on Object tables.
1556 It converts the boolean merge_measurements_{band} columns into a single
1557 string representing the first band for which merge_measurements_{band}
1560 Assumes the default priority order of i, r, z, y, g, u.
1562 name =
'Reference Band'
1563 shortname =
'refBand'
1565 band_order = (
"i",
"r",
"z",
"y",
"g",
"u")
1573 return [f
"merge_measurement_{band}" for band
in bands]
1575 def _func(self, df: pd.DataFrame) -> pd.Series:
1576 def getFilterAliasName(row):
1578 colName = row.idxmax()
1579 return colName.replace(
'merge_measurement_',
'')
1583 columns = [col
for col
in self.
columns if col
in df.columns]
1585 return df[columns].apply(getFilterAliasName, axis=1,
1586 result_type=
'reduce').astype(
'object')
1575 def _func(self, df: pd.DataFrame) -> pd.Series:
…
1588 def __init__(self, bands: tuple[str] | list[str] |
None =
None, **kwargs):
1588 def __init__(self, bands: tuple[str] | list[str] |
None =
None, **kwargs):
…
1594 """Base class for Object table calibrated fluxes and magnitudes."""
1596 AB_FLUX_SCALE = (0 * u.ABmag).to_value(u.nJy)
1597 LOG_AB_FLUX_SCALE = 12.56
1598 FIVE_OVER_2LOG10 = 1.085736204758129569
1602 def __init__(self, colFlux, colFluxErr=None, **kwargs):
1618 return f
'mag_{self.col}'
1622 """Compute sqrt(a^2 + b^2) without under/overflow."""
1623 if np.abs(a) < np.abs(b):
1628 return np.abs(a) * np.sqrt(1. + q*q)
1631 """Convert instrumental flux to nanojanskys."""
1632 return (self.
AB_FLUX_SCALE * dn / fluxMag0).astype(np.float32)
1635 """Convert instrumental flux to AB magnitude."""
1636 with warnings.catch_warnings():
1637 warnings.filterwarnings(
'ignore',
r'invalid value encountered')
1638 warnings.filterwarnings(
'ignore',
r'divide by zero')
1639 return (-2.5 * np.log10(dn/fluxMag0)).astype(np.float32)
1642 """Convert instrumental flux error to nanojanskys."""
1643 retVal = self.
vhypot(dn * fluxMag0Err, dnErr * fluxMag0)
1645 return retVal.astype(np.float32)
1648 """Convert instrumental flux error to AB magnitude error."""
1649 retVal = self.
dn2fluxErr(dn, dnErr, fluxMag0, fluxMag0Err) / self.
dn2flux(dn, fluxMag0)
1654 """Convert instrumental flux to nanojanskys."""
1660 """Convert instrumental flux error to nanojanskys."""
1667 return pd.Series(retArr, index=df.index)
1671 """Base class for calibrating the specified instrument flux column using
1672 the local photometric calibration.
1677 Name of the instrument flux column.
1678 instFluxErrCol : `str`
1679 Name of the assocated error columns for ``instFluxCol``.
1680 photoCalibCol : `str`
1681 Name of local calibration column.
1682 photoCalibErrCol : `str`, optional
1683 Error associated with ``photoCalibCol``. Ignored and deprecated; will
1684 be removed after v29.
1691 logNJanskyToAB = (1 * u.nJy).to_value(u.ABmag)
1697 photoCalibErrCol=None,
1703 if photoCalibErrCol
is not None:
1704 warnings.warn(
"The photoCalibErrCol argument is deprecated and will be removed after v29.",
1705 category=FutureWarning)
1709 """Convert instrument flux to nanojanskys.
1713 instFlux : `~numpy.ndarray` or `~pandas.Series`
1714 Array of instrument flux measurements.
1715 localCalib : `~numpy.ndarray` or `~pandas.Series`
1716 Array of local photometric calibration estimates.
1720 calibFlux : `~numpy.ndarray` or `~pandas.Series`
1721 Array of calibrated flux measurements.
1723 return instFlux * localCalib
1726 """Convert instrument flux to nanojanskys.
1730 instFlux : `~numpy.ndarray` or `~pandas.Series`
1731 Array of instrument flux measurements. Ignored (accepted for
1732 backwards compatibility and consistency with magnitude-error
1733 calculation methods).
1734 instFluxErr : `~numpy.ndarray` or `~pandas.Series`
1735 Errors on associated ``instFlux`` values.
1736 localCalib : `~numpy.ndarray` or `~pandas.Series`
1737 Array of local photometric calibration estimates.
1738 localCalibErr : `~numpy.ndarray` or `~pandas.Series`, optional
1739 Errors on associated ``localCalib`` values. Ignored and deprecated;
1740 will be removed after v29.
1744 calibFluxErr : `~numpy.ndarray` or `~pandas.Series`
1745 Errors on calibrated flux measurements.
1748 if localCalibErr
is not None:
1749 warnings.warn(
"The localCalibErr argument is deprecated and will be removed after v29.",
1750 category=FutureWarning)
1751 return instFluxErr * localCalib
1754 """Convert instrument flux to nanojanskys.
1758 instFlux : `~numpy.ndarray` or `~pandas.Series`
1759 Array of instrument flux measurements.
1760 localCalib : `~numpy.ndarray` or `~pandas.Series`
1761 Array of local photometric calibration estimates.
1765 calibMag : `~numpy.ndarray` or `~pandas.Series`
1766 Array of calibrated AB magnitudes.
1771 """Convert instrument flux err to nanojanskys.
1775 instFlux : `~numpy.ndarray` or `~pandas.Series`
1776 Array of instrument flux measurements.
1777 instFluxErr : `~numpy.ndarray` or `~pandas.Series`
1778 Errors on associated ``instFlux`` values.
1779 localCalib : `~numpy.ndarray` or `~pandas.Series`
1780 Array of local photometric calibration estimates.
1781 localCalibErr : `~numpy.ndarray` or `~pandas.Series`, optional
1782 Errors on associated ``localCalib`` values. Ignored and deprecated;
1783 will be removed after v29.
1787 calibMagErr: `~numpy.ndarray` or `~pandas.Series`
1788 Error on calibrated AB magnitudes.
1791 if localCalibErr
is not None:
1792 warnings.warn(
"The localCalibErr argument is deprecated and will be removed after v29.",
1793 category=FutureWarning)
1799 """Compute calibrated fluxes using the local calibration value.
1801 This returns units of nanojanskys.
1810 return f
'flux_{self.instFluxCol}'
1818 """Compute calibrated flux errors using the local calibration value.
1820 This returns units of nanojanskys.
1829 return f
'fluxErr_{self.instFluxCol}'
1837 """Compute absolute mean of dipole fluxes.
1843 LocalDipoleMeanFluxErr
1845 LocalDipoleDiffFluxErr
1854 photoCalibErrCol=None,
1875 return f
'dipMeanFlux_{self.instFluxPosCol}_{self.instFluxNegCol}'
1883 """Compute the error on the absolute mean of dipole fluxes.
1891 LocalDipoleDiffFluxErr
1904 return f
'dipMeanFluxErr_{self.instFluxPosCol}_{self.instFluxNegCol}'
1911 """Compute the absolute difference of dipole fluxes.
1913 Calculated value is (abs(pos) - abs(neg)).
1920 LocalDipoleMeanFluxErr
1921 LocalDipoleDiffFluxErr
1932 return f
'dipDiffFlux_{self.instFluxPosCol}_{self.instFluxNegCol}'
1940 """Compute the error on the absolute difference of dipole fluxes.
1947 LocalDipoleMeanFluxErr
1961 return f
'dipDiffFluxErr_{self.instFluxPosCol}_{self.instFluxNegCol}'
1968 """Compute E(B-V) from dustmaps.sfd."""
1969 _defaultDataset =
'ref'
1976 with open(os.devnull,
"w")
as devnull:
1977 with redirect_stdout(devnull):
1978 from dustmaps.sfd
import SFDQuery
1984 coords = SkyCoord(df[
'coord_ra'].values * u.rad, df[
'coord_dec'].values * u.rad)
1985 ebv = self.
sfd(coords)
1986 return pd.Series(ebv, index=df.index).astype(
'float32')
1990 """Base class for functors that use shape moments and localWCS"""
2024 """Return the component of the moments tensor aligned with the RA axis, in radians."""
2031 return (CD_1_1*(i_xx*CD_1_1 + i_xy*CD_2_1)
2032 + CD_1_2*(i_xy*CD_1_1 + i_yy*CD_2_1))
2035 """Return the component of the moments tensor aligned with the dec axis, in radians."""
2042 return (CD_2_1*(i_xx*CD_1_2 + i_xy*CD_2_2)
2043 + CD_2_2*(i_xy*CD_1_2 + i_yy*CD_2_2))
2046 """Return the covariance of the moments tensor in ra, dec coordinates, in radians."""
2054 return ((CD_1_1 * i_xx + CD_1_2 * i_xy) * CD_2_1
2055 + (CD_1_1 * i_xy + CD_1_2 * i_yy) * CD_2_2)
2059 """Rotate pixel moments Ixx,Iyy,Iyy into ra,dec frame and arcseconds"""
2060 _defaultDataset =
'meas'
2062 shortname =
"moments_uu"
2065 sky_uu_radians = self.
sky_uu(df)
2067 return pd.Series(sky_uu_radians*((180/np.pi)*3600)**2, index=df.index).astype(
'float32')
2071 """Rotate pixel moments Ixx,Iyy,Iyy into ra,dec frame and arcseconds"""
2072 _defaultDataset =
'meas'
2074 shortname =
"moments_vv"
2077 sky_vv_radians = self.
sky_vv(df)
2079 return pd.Series(sky_vv_radians*((180/np.pi)*3600)**2, index=df.index).astype(
'float32')
2083 """Rotate pixel moments Ixx,Iyy,Iyy into ra,dec frame and arcseconds"""
2084 _defaultDataset =
'meas'
2086 shortname =
"moments_uv"
2089 sky_uv_radians = self.
sky_uv(df)
2091 return pd.Series(sky_uv_radians*((180/np.pi)*3600)**2, index=df.index).astype(
'float32')
2095 """Compute position angle relative to ra,dec frame, in degrees."""
2096 _defaultDataset =
'meas'
2097 name =
"moments_theta"
2098 shortname =
"moments_theta"
2105 theta = 0.5*np.arctan2(2*sky_uv, sky_uu - sky_vv)
2107 return pd.Series(np.degrees(np.array(theta)), index=df.index).astype(
'float32')
2111 """Compute the semimajor axis length in arcseconds"""
2112 _defaultDataset =
'meas'
2114 shortname =
"moments_a"
2123 xx_p_yy = sky_uu + sky_vv
2124 xx_m_yy = sky_uu - sky_vv
2125 t = np.sqrt(xx_m_yy * xx_m_yy + 4 * sky_uv * sky_uv)
2126 a_radians = np.sqrt(0.5 * (xx_p_yy + t))
2128 return pd.Series(np.degrees(a_radians)*3600, index=df.index).astype(
'float32')
2132 """Compute the semiminor axis length in arcseconds"""
2133 _defaultDataset =
'meas'
2135 shortname =
"moments_b"
2144 xx_p_yy = sky_uu + sky_vv
2145 xx_m_yy = sky_uu - sky_vv
2146 t = np.sqrt(xx_m_yy * xx_m_yy + 4 * sky_uv * sky_uv)
2147 b_radians = np.sqrt(0.5 * (xx_p_yy - t))
2149 return pd.Series(np.degrees(b_radians)*3600, index=df.index).astype(
'float32')
A class representing an angle.
Point in an unspecified spherical coordinate system.
__init__(self, col, filt2, filt1, **kwargs)
multilevelColumns(self, parq, **kwargs)
__init__(self, col, **kwargs)
multilevelColumns(self, data, **kwargs)
__call__(self, data, **kwargs)
from_file(cls, filename, **kwargs)
renameCol(cls, col, renameRules)
from_yaml(cls, translationDefinition, **kwargs)
__init__(self, funcs, **kwargs)
pixelScaleArcseconds(self, cd11, cd12, cd21, cd22)
__init__(self, theta_col, colCD_1_1, colCD_1_2, colCD_2_1, colCD_2_2, **kwargs)
__init__(self, col, colCD_1_1, colCD_1_2, colCD_2_1, colCD_2_2, **kwargs)
__init__(self, col, colCD_1_1, colCD_1_2, colCD_2_1, colCD_2_2, **kwargs)
__init__(self, col, **kwargs)
__init__(self, expr, **kwargs)
__call__(self, catalog, **kwargs)
__init__(self, colXX, colXY, colYY, **kwargs)
__init__(self, colXX, colXY, colYY, **kwargs)
_func(self, df, dropna=True)
_get_columnIndex(self, data)
multilevelColumns(self, data, columnIndex=None, returnTuple=False)
__call__(self, data, dropna=False)
_get_data_columnLevels(self, data, columnIndex=None)
_colsFromDict(self, colDict, columnIndex=None)
difference(self, data1, data2, **kwargs)
_get_data_columnLevelNames(self, data, columnIndex=None)
__init__(self, filt=None, dataset=None, noDup=None)
__init__(self, ra, dec, **kwargs)
__init__(self, instFluxPosCol, instFluxNegCol, instFluxPosErrCol, instFluxNegErrCol, photoCalibCol, photoCalibErrCol=None, **kwargs)
instFluxToNanojansky(self, instFlux, localCalib)
instFluxErrToMagnitudeErr(self, instFlux, instFluxErr, localCalib, localCalibErr=None)
instFluxToMagnitude(self, instFlux, localCalib)
__init__(self, instFluxCol, instFluxErrCol, photoCalibCol, photoCalibErrCol=None, **kwargs)
instFluxErrToNanojanskyErr(self, instFlux, instFluxErr, localCalib, localCalibErr=None)
computeSkySeparation(self, ra1, dec1, ra2, dec2)
__init__(self, colCD_1_1, colCD_1_2, colCD_2_1, colCD_2_2, **kwargs)
computeDeltaRaDec(self, x, y, cd11, cd12, cd21, cd22)
getSkySeparationFromPixel(self, x1, y1, x2, y2, cd11, cd12, cd21, cd22)
computePositionAngle(self, ra1, dec1, ra2, dec2)
getPositionAngleFromDetectorAngle(self, theta, cd11, cd12, cd21, cd22)
__init__(self, col1, col2, **kwargs)
__init__(self, *args, **kwargs)
__init__(self, col, **kwargs)
__init__(self, shape_xx, shape_yy, shape_xy, colCD_1_1, colCD_1_2, colCD_2_1, colCD_2_2, **kwargs)
__init__(self, col, band_to_check, **kwargs)
dn2flux(self, dn, fluxMag0)
__init__(self, colFlux, colFluxErr=None, **kwargs)
dn2mag(self, dn, fluxMag0)
dn2MagErr(self, dn, dnErr, fluxMag0, fluxMag0Err)
dn2fluxErr(self, dn, dnErr, fluxMag0, fluxMag0Err)
__call__(self, catalog, **kwargs)
__init__(self, colXX, colXY, colYY, **kwargs)
pd.Series _func(self, pd.DataFrame df)
__init__(self, tuple[str]|list[str]|None bands=None, **kwargs)
HtmPixelization provides HTM indexing of points and regions.
init_fromDict(initDict, basePath='lsst.pipe.tasks.functors', typeKey='functor', name=None)
mag_aware_eval(df, expr, log)