6 import astropy.units
as u
9 from .parquetTable
import MultilevelParquetTable
13 typeKey='functor', name=None):
14 """Initialize an object defined in a dictionary
16 The object needs to be importable as
17 f'{basePath}.{initDict[typeKey]}'
18 The positional and keyword arguments (if any) are contained in
19 "args" and "kwargs" entries in the dictionary, respectively.
20 This is used in `functors.CompositeFunctor.from_yaml` to initialize
21 a composite functor from a specification in a YAML file.
26 Dictionary describing object's initialization. Must contain
27 an entry keyed by ``typeKey`` that is the name of the object,
28 relative to ``basePath``.
30 Path relative to module in which ``initDict[typeKey]`` is defined.
32 Key of ``initDict`` that is the name of the object
33 (relative to `basePath`).
35 initDict = initDict.copy()
37 pythonType =
doImport(f
'{basePath}.{initDict.pop(typeKey)}')
39 if 'args' in initDict:
40 args = initDict.pop(
'args')
41 if isinstance(args, str):
44 element = pythonType(*args, **initDict)
45 except Exception
as e:
46 message = f
'Error in constructing functor "{name}" of type {pythonType.__name__} with args: {args}'
47 raise type(e)(message, e.args)
52 """Define and execute a calculation on a ParquetTable
54 The `__call__` method accepts a `ParquetTable` object, and returns the
55 result of the calculation as a single column. Each functor defines what
56 columns are needed for the calculation, and only these columns are read
57 from the `ParquetTable`.
59 The action of `__call__` consists of two steps: first, loading the
60 necessary columns from disk into memory as a `pandas.DataFrame` object;
61 and second, performing the computation on this dataframe and returning the
65 To define a new `Functor`, a subclass must define a `_func` method,
66 that takes a `pandas.DataFrame` and returns result in a `pandas.Series`.
67 In addition, it must define the following attributes
69 * `_columns`: The columns necessary to perform the calculation
70 * `name`: A name appropriate for a figure axis label
71 * `shortname`: A name appropriate for use as a dictionary key
73 On initialization, a `Functor` should declare what filter (`filt` kwarg)
74 and dataset (e.g. `'ref'`, `'meas'`, `'forced_src'`) it is intended to be
75 applied to. This enables the `_get_cols` method to extract the proper
76 columns from the parquet file. If not specified, the dataset will fall back
77 on the `_defaultDataset`attribute. If filter is not specified and `dataset`
78 is anything other than `'ref'`, then an error will be raised when trying to
79 perform the calculation.
81 As currently implemented, `Functor` is only set up to expect a
82 `ParquetTable` of the format of the `deepCoadd_obj` dataset; that is, a
83 `MultilevelParquetTable` with the levels of the column index being `filter`,
84 `dataset`, and `column`. This is defined in the `_columnLevels` attribute,
85 as well as being implicit in the role of the `filt` and `dataset` attributes
86 defined at initialization. In addition, the `_get_cols` method that reads
87 the dataframe from the `ParquetTable` will return a dataframe with column
88 index levels defined by the `_dfLevels` attribute; by default, this is
91 The `_columnLevels` and `_dfLevels` attributes should generally not need to
92 be changed, unless `_func` needs columns from multiple filters or datasets
93 to do the calculation.
94 An example of this is the `lsst.pipe.tasks.functors.Color` functor, for
95 which `_dfLevels = ('filter', 'column')`, and `_func` expects the dataframe
96 it gets to have those levels in the column index.
101 Filter upon which to do the calculation
104 Dataset upon which to do the calculation
105 (e.g., 'ref', 'meas', 'forced_src').
109 _defaultDataset =
'ref'
110 _columnLevels = (
'filter',
'dataset',
'column')
111 _dfLevels = (
'column',)
112 _defaultNoDup =
False
114 def __init__(self, filt=None, dataset=None, noDup=None):
121 if self.
_noDup is not None:
128 """Columns required to perform calculation
130 if not hasattr(self,
'_columns'):
131 raise NotImplementedError(
'Must define columns property or _columns attribute')
136 raise ValueError(
'ParquetTable does not have the expected column levels. '
137 f
'Got {parq.columnLevels}; expected {self._columnLevels}.')
139 columnDict = {
'column': self.
columns,
141 if self.
filt is None:
142 if 'filter' in parq.columnLevels:
144 columnDict[
'filter'] = parq.columnLevelNames[
'filter'][0]
146 raise ValueError(f
"'filt' not set for functor {self.name}"
147 f
"(dataset {self.dataset}) "
149 "contains multiple filters in column index. "
150 "Set 'filt' or set 'dataset' to 'ref'.")
152 columnDict[
'filter'] = self.
filt
154 return parq._colsFromDict(columnDict)
156 def _func(self, df, dropna=True):
157 raise NotImplementedError(
'Must define calculation on dataframe')
159 def _get_cols(self, parq):
160 """Retrieve dataframe necessary for calculation.
162 Returns dataframe upon which `self._func` can act.
164 if isinstance(parq, MultilevelParquetTable):
166 df = parq.toDataFrame(columns=columns, droplevels=
False)
170 df = parq.toDataFrame(columns=columns)
174 def _setLevels(self, df):
175 levelsToDrop = [n
for n
in df.columns.names
if n
not in self.
_dfLevels]
176 df.columns = df.columns.droplevel(levelsToDrop)
179 def _dropna(self, vals):
185 vals = self.
_func(df)
194 """Computes difference between functor called on two different ParquetTable objects
196 return self(parq1, **kwargs) - self(parq2, **kwargs)
199 return pd.Series(np.full(len(df), np.nan), index=df.index)
203 """Full name of functor (suitable for figure labels)
205 return NotImplementedError
209 """Short name of functor (suitable for column name/dict key)
215 """Perform multiple calculations at once on a catalog
217 The role of a `CompositeFunctor` is to group together computations from
218 multiple functors. Instead of returning `pandas.Series` a
219 `CompositeFunctor` returns a `pandas.Dataframe`, with the column names
220 being the keys of `funcDict`.
222 The `columns` attribute of a `CompositeFunctor` is the union of all columns
223 in all the component functors.
225 A `CompositeFunctor` does not use a `_func` method itself; rather,
226 when a `CompositeFunctor` is called, all its columns are loaded
227 at once, and the resulting dataframe is passed to the `_func` method of each component
228 functor. This has the advantage of only doing I/O (reading from parquet file) once,
229 and works because each individual `_func` method of each component functor does not
230 care if there are *extra* columns in the dataframe being passed; only that it must contain
231 *at least* the `columns` it expects.
233 An important and useful class method is `from_yaml`, which takes as argument the path to a YAML
234 file specifying a collection of functors.
238 funcs : `dict` or `list`
239 Dictionary or list of functors. If a list, then it will be converted
240 into a dictonary according to the `.shortname` attribute of each functor.
247 if type(funcs) == dict:
250 self.
funcDict = {f.shortname: f
for f
in funcs}
268 if isinstance(new, dict):
270 elif isinstance(new, CompositeFunctor):
273 raise TypeError(
'Can only update with dictionary or CompositeFunctor.')
276 if self.
filt is not None:
281 return list(
set([x
for y
in [f.columns
for f
in self.
funcDict.values()]
for x
in y]))
284 return list(
set([x
for y
in [f.multilevelColumns(parq)
285 for f
in self.
funcDict.values()]
for x
in y]))
288 if isinstance(parq, MultilevelParquetTable):
290 df = parq.toDataFrame(columns=columns, droplevels=
False)
294 subdf = f._setLevels(df[f.multilevelColumns(parq)])
295 valDict[k] = f._func(subdf)
297 valDict[k] = f.fail(subdf)
300 df = parq.toDataFrame(columns=columns)
304 valDf = pd.concat(valDict, axis=1)
306 print([(k,
type(v))
for k, v
in valDict.items()])
309 if kwargs.get(
'dropna',
False):
310 valDf = valDf.dropna(how=
'any')
316 if renameRules
is None:
318 for old, new
in renameRules:
319 if col.startswith(old):
320 col = col.replace(old, new)
325 with open(filename)
as f:
326 translationDefinition = yaml.safe_load(f)
328 return cls.
from_yaml(translationDefinition, **kwargs)
333 for func, val
in translationDefinition[
'funcs'].
items():
336 if 'flag_rename_rules' in translationDefinition:
337 renameRules = translationDefinition[
'flag_rename_rules']
341 if 'refFlags' in translationDefinition:
342 for flag
in translationDefinition[
'refFlags']:
345 if 'flags' in translationDefinition:
346 for flag
in translationDefinition[
'flags']:
349 return cls(funcs, **kwargs)
353 """Evaluate an expression on a DataFrame, knowing what the 'mag' function means
355 Builds on `pandas.DataFrame.eval`, which parses and executes math on dataframes.
359 df : pandas.DataFrame
360 Dataframe on which to evaluate expression.
366 expr_new = re.sub(
r'mag\((\w+)\)',
r'-2.5*log(\g<1>)/log(10)', expr)
367 val = df.eval(expr_new, truediv=
True)
369 expr_new = re.sub(
r'mag\((\w+)\)',
r'-2.5*log(\g<1>_instFlux)/log(10)', expr)
370 val = df.eval(expr_new, truediv=
True)
375 """Arbitrary computation on a catalog
377 Column names (and thus the columns to be loaded from catalog) are found
378 by finding all words and trying to ignore all "math-y" words.
383 Expression to evaluate, to be parsed and executed by `mag_aware_eval`.
385 _ignore_words = (
'mag',
'sin',
'cos',
'exp',
'log',
'sqrt')
397 flux_cols = re.findall(
r'mag\(\s*(\w+)\s*\)', self.
expr)
399 cols = [c
for c
in re.findall(
r'[a-zA-Z_]+', self.
expr)
if c
not in self.
_ignore_words]
402 if not re.search(
'_instFlux$', c):
403 cols.append(f
'{c}_instFlux')
408 return list(
set([c
for c
in cols
if c
not in not_a_col]))
415 """Get column with specified name
435 """Return the value of the index for each object
438 columns = [
'coord_ra']
439 _defaultDataset =
'ref'
443 return pd.Series(df.index, index=df.index)
448 _allow_difference =
False
452 return pd.Series(df.index, index=df.index)
456 col =
'base_Footprint_nPix'
460 """Base class for coordinate column, in degrees
469 output = df[self.
col] * 180 / np.pi
if self.
_radians else df[self.
col]
474 """Right Ascension, in degrees
480 super().
__init__(
'coord_ra', **kwargs)
483 return super().
__call__(catalog, **kwargs)
487 """Declination, in degrees
493 super().
__init__(
'coord_dec', **kwargs)
496 return super().
__call__(catalog, **kwargs)
500 if not col.endswith(
'_instFlux'):
506 if not col.endswith(
'_instFluxErr'):
507 col +=
'_instFluxErr'
512 """Compute calibrated magnitude
514 Takes a `calib` argument, which returns the flux at mag=0
515 as `calib.getFluxMag0()`. If not provided, then the default
516 `fluxMag0` is 63095734448.0194, which is default for HSC.
517 This default should be removed in DM-21955
519 This calculation hides warnings about invalid values and dividing by zero.
521 As for all functors, a `dataset` and `filt` kwarg should be provided upon
522 initialization. Unlike the default `Functor`, however, the default dataset
523 for a `Mag` is `'meas'`, rather than `'ref'`.
528 Name of flux column from which to compute magnitude. Can be parseable
529 by `lsst.pipe.tasks.functors.fluxName` function---that is, you can pass
530 `'modelfit_CModel'` instead of `'modelfit_CModel_instFlux'`) and it will
532 calib : `lsst.afw.image.calib.Calib` (optional)
533 Object that knows zero point.
535 _defaultDataset =
'meas'
540 if calib
is not None:
553 with np.warnings.catch_warnings():
554 np.warnings.filterwarnings(
'ignore',
r'invalid value encountered')
555 np.warnings.filterwarnings(
'ignore',
r'divide by zero')
560 return f
'mag_{self.col}'
564 """Compute calibrated magnitude uncertainty
566 Takes the same `calib` object as `lsst.pipe.tasks.functors.Mag`.
571 calib : `lsst.afw.image.calib.Calib` (optional)
572 Object that knows zero point.
577 if self.
calib is not None:
584 return [self.
col, self.
col +
'Err']
587 with np.warnings.catch_warnings():
588 np.warnings.filterwarnings(
'ignore',
r'invalid value encountered')
589 np.warnings.filterwarnings(
'ignore',
r'divide by zero')
590 fluxCol, fluxErrCol = self.
columns
591 x = df[fluxErrCol] / df[fluxCol]
593 magErr = (2.5 / np.log(10.)) * np.sqrt(x*x + y*y)
598 return super().name +
'_err'
610 _defaultDataset =
'meas'
612 """Functor to calculate magnitude difference"""
624 with np.warnings.catch_warnings():
625 np.warnings.filterwarnings(
'ignore',
r'invalid value encountered')
626 np.warnings.filterwarnings(
'ignore',
r'divide by zero')
627 return -2.5*np.log10(df[self.
col1]/df[self.
col2])
631 return f
'(mag_{self.col1} - mag_{self.col2})'
635 return f
'magDiff_{self.col1}_{self.col2}'
639 """Compute the color between two filters
641 Computes color by initializing two different `Mag`
642 functors based on the `col` and filters provided, and
643 then returning the difference.
645 This is enabled by the `_func` expecting a dataframe with a
646 multilevel column index, with both `'filter'` and `'column'`,
647 instead of just `'column'`, which is the `Functor` default.
648 This is controlled by the `_dfLevels` attribute.
650 Also of note, the default dataset for `Color` is `forced_src'`,
651 whereas for `Mag` it is `'meas'`.
656 Name of flux column from which to compute; same as would be passed to
657 `lsst.pipe.tasks.functors.Mag`.
660 Filters from which to compute magnitude difference.
661 Color computed is `Mag(filt2) - Mag(filt1)`.
663 _defaultDataset =
'forced_src'
664 _dfLevels = (
'filter',
'column')
670 raise RuntimeError(
"Cannot compute Color for %s: %s - %s " % (col, filt2, filt1))
688 mag2 = self.mag2._func(df[self.filt2])
689 mag1 = self.mag1._func(df[self.filt1])
694 return [self.
mag1.col, self.
mag2.col]
702 return f
'{self.filt2} - {self.filt1} ({self.col})'
706 return f
"{self.col}_{self.filt2.replace('-', '')}m{self.filt1.replace('-', '')}"
710 """Main function of this subclass is to override the dropna=True
713 _allow_difference =
False
718 return super().
__call__(parq, dropna=
False, **kwargs)
722 _columns = [
"base_ClassificationExtendedness_value"]
723 _column =
"base_ClassificationExtendedness_value"
728 test = (x < 0.5).astype(int)
729 test = test.mask(mask, 2)
734 label = pd.Series(pd.Categorical.from_codes(test, categories=categories),
735 index=x.index, name=
'label')
737 label = label.astype(str)
742 _columns = [
'numStarFlags']
743 labels = {
"star": 0,
"maybe": 1,
"notStar": 2}
749 n = len(x.unique()) - 1
751 labels = [
'noStar',
'maybe',
'star']
752 label = pd.Series(pd.cut(x, [-1, 0, n-1, n], labels=labels),
753 index=x.index, name=
'label')
756 label = label.astype(str)
762 name =
'Deconvolved Moments'
763 shortname =
'deconvolvedMoments'
764 _columns = (
"ext_shapeHSM_HsmSourceMoments_xx",
765 "ext_shapeHSM_HsmSourceMoments_yy",
766 "base_SdssShape_xx",
"base_SdssShape_yy",
767 "ext_shapeHSM_HsmPsfMoments_xx",
768 "ext_shapeHSM_HsmPsfMoments_yy")
771 """Calculate deconvolved moments"""
772 if "ext_shapeHSM_HsmSourceMoments_xx" in df.columns:
773 hsm = df[
"ext_shapeHSM_HsmSourceMoments_xx"] + df[
"ext_shapeHSM_HsmSourceMoments_yy"]
775 hsm = np.ones(len(df))*np.nan
776 sdss = df[
"base_SdssShape_xx"] + df[
"base_SdssShape_yy"]
777 if "ext_shapeHSM_HsmPsfMoments_xx" in df.columns:
778 psf = df[
"ext_shapeHSM_HsmPsfMoments_xx"] + df[
"ext_shapeHSM_HsmPsfMoments_yy"]
783 raise RuntimeError(
'No psf shape parameter found in catalog')
785 return hsm.where(np.isfinite(hsm), sdss) - psf
789 """Functor to calculate SDSS trace radius size for sources"""
790 name =
"SDSS Trace Size"
791 shortname =
'sdssTrace'
792 _columns = (
"base_SdssShape_xx",
"base_SdssShape_yy")
795 srcSize = np.sqrt(0.5*(df[
"base_SdssShape_xx"] + df[
"base_SdssShape_yy"]))
800 """Functor to calculate SDSS trace radius size difference (%) between object and psf model"""
801 name =
"PSF - SDSS Trace Size"
802 shortname =
'psf_sdssTrace'
803 _columns = (
"base_SdssShape_xx",
"base_SdssShape_yy",
804 "base_SdssShape_psf_xx",
"base_SdssShape_psf_yy")
807 srcSize = np.sqrt(0.5*(df[
"base_SdssShape_xx"] + df[
"base_SdssShape_yy"]))
808 psfSize = np.sqrt(0.5*(df[
"base_SdssShape_psf_xx"] + df[
"base_SdssShape_psf_yy"]))
809 sizeDiff = 100*(srcSize - psfSize)/(0.5*(srcSize + psfSize))
814 """Functor to calculate HSM trace radius size for sources"""
815 name =
'HSM Trace Size'
816 shortname =
'hsmTrace'
817 _columns = (
"ext_shapeHSM_HsmSourceMoments_xx",
818 "ext_shapeHSM_HsmSourceMoments_yy")
821 srcSize = np.sqrt(0.5*(df[
"ext_shapeHSM_HsmSourceMoments_xx"]
822 + df[
"ext_shapeHSM_HsmSourceMoments_yy"]))
827 """Functor to calculate HSM trace radius size difference (%) between object and psf model"""
828 name =
'PSF - HSM Trace Size'
829 shortname =
'psf_HsmTrace'
830 _columns = (
"ext_shapeHSM_HsmSourceMoments_xx",
831 "ext_shapeHSM_HsmSourceMoments_yy",
832 "ext_shapeHSM_HsmPsfMoments_xx",
833 "ext_shapeHSM_HsmPsfMoments_yy")
836 srcSize = np.sqrt(0.5*(df[
"ext_shapeHSM_HsmSourceMoments_xx"]
837 + df[
"ext_shapeHSM_HsmSourceMoments_yy"]))
838 psfSize = np.sqrt(0.5*(df[
"ext_shapeHSM_HsmPsfMoments_xx"]
839 + df[
"ext_shapeHSM_HsmPsfMoments_yy"]))
840 sizeDiff = 100*(srcSize - psfSize)/(0.5*(srcSize + psfSize))
845 name =
'HSM Psf FWHM'
846 _columns = (
'ext_shapeHSM_HsmPsfMoments_xx',
'ext_shapeHSM_HsmPsfMoments_yy')
849 SIGMA2FWHM = 2*np.sqrt(2*np.log(2))
853 0.5*(df[
'ext_shapeHSM_HsmPsfMoments_xx'] + df[
'ext_shapeHSM_HsmPsfMoments_yy']))
857 name =
"Distortion Ellipticity (e1)"
858 shortname =
"Distortion"
876 name =
"Ellipticity e2"
909 """Computations using the stored localWcs.
911 name =
"LocalWcsOperations"
926 """Compute the distance on the sphere from x2, y1 to x1, y1.
934 cd11 : `pandas.Series`
935 [1, 1] element of the local Wcs affine transform.
936 cd11 : `pandas.Series`
937 [1, 1] element of the local Wcs affine transform.
938 cd12 : `pandas.Series`
939 [1, 2] element of the local Wcs affine transform.
940 cd21 : `pandas.Series`
941 [2, 1] element of the local Wcs affine transform.
942 cd22 : `pandas.Series`
943 [2, 2] element of the local Wcs affine transform.
948 RA and dec conversion of x and y given the local Wcs. Returned
949 units are in radians.
952 return (x * cd11 + y * cd12, x * cd21 + y * cd22)
955 """Compute the local pixel scale conversion.
959 ra1 : `pandas.Series`
960 Ra of the first coordinate in radians.
961 dec1 : `pandas.Series`
962 Dec of the first coordinate in radians.
963 ra2 : `pandas.Series`
964 Ra of the second coordinate in radians.
965 dec2 : `pandas.Series`
966 Dec of the second coordinate in radians.
970 dist : `pandas.Series`
971 Distance on the sphere in radians.
973 deltaDec = dec2 - dec1
975 return 2 * np.arcsin(
977 np.sin(deltaDec / 2) ** 2
978 + np.cos(dec2) * np.cos(dec1) * np.sin(deltaRa / 2) ** 2))
981 """Compute the distance on the sphere from x2, y1 to x1, y1.
993 cd11 : `pandas.Series`
994 [1, 1] element of the local Wcs affine transform.
995 cd11 : `pandas.Series`
996 [1, 1] element of the local Wcs affine transform.
997 cd12 : `pandas.Series`
998 [1, 2] element of the local Wcs affine transform.
999 cd21 : `pandas.Series`
1000 [2, 1] element of the local Wcs affine transform.
1001 cd22 : `pandas.Series`
1002 [2, 2] element of the local Wcs affine transform.
1006 Distance : `pandas.Series`
1007 Arcseconds per pixel at the location of the local WC
1016 """Compute the local pixel scale from the stored CDMatrix.
1028 """Compute the local pixel to scale conversion in arcseconds.
1032 cd11 : `pandas.Series`
1033 [1, 1] element of the local Wcs affine transform in radians.
1034 cd11 : `pandas.Series`
1035 [1, 1] element of the local Wcs affine transform in radians.
1036 cd12 : `pandas.Series`
1037 [1, 2] element of the local Wcs affine transform in radians.
1038 cd21 : `pandas.Series`
1039 [2, 1] element of the local Wcs affine transform in radians.
1040 cd22 : `pandas.Series`
1041 [2, 2] element of the local Wcs affine transform in radians.
1045 pixScale : `pandas.Series`
1046 Arcseconds per pixel at the location of the local WC
1048 return 3600 * np.degrees(np.sqrt(np.fabs(cd11 * cd22 - cd12 * cd21)))
1050 def _func(self, df):
1058 """Convert a value in units pixels to units arcseconds.
1077 return f
"{self.col}_asArcseconds"
1087 def _func(self, df):
1095 name =
'Reference Band'
1096 shortname =
'refBand'
1100 return [
"merge_measurement_i",
1101 "merge_measurement_r",
1102 "merge_measurement_z",
1103 "merge_measurement_y",
1104 "merge_measurement_g"]
1106 def _func(self, df):
1107 def getFilterAliasName(row):
1109 colName = row.idxmax()
1110 return colName.replace(
'merge_measurement_',
'')
1112 return df[self.
columns].apply(getFilterAliasName, axis=1)
1117 AB_FLUX_SCALE = (0 * u.ABmag).to_value(u.nJy)
1118 LOG_AB_FLUX_SCALE = 12.56
1119 FIVE_OVER_2LOG10 = 1.085736204758129569
1123 def __init__(self, colFlux, colFluxErr=None, calib=None, **kwargs):
1129 if calib
is not None:
1143 return f
'mag_{self.col}'
1147 if np.abs(a) < np.abs(b):
1152 return np.abs(a) * np.sqrt(1. + q*q)
1158 with np.warnings.catch_warnings():
1159 np.warnings.filterwarnings(
'ignore',
r'invalid value encountered')
1160 np.warnings.filterwarnings(
'ignore',
r'divide by zero')
1161 return -2.5 * np.log10(dn/fluxMag0)
1164 retVal = self.
vhypot(dn * fluxMag0Err, dnErr * fluxMag0)
1169 retVal = self.
dn2fluxErr(dn, dnErr, fluxMag0, fluxMag0Err) / self.
dn2flux(dn, fluxMag0)
1174 def _func(self, df):
1183 def _func(self, df):
1185 return pd.Series(retArr, index=df.index)
1189 def _func(self, df):
1198 def _func(self, df):
1200 return pd.Series(retArr, index=df.index)
1204 """Base class for calibrating the specified instrument flux column using
1205 the local photometric calibration.
1210 Name of the instrument flux column.
1211 instFluxErrCol : `str`
1212 Name of the assocated error columns for ``instFluxCol``.
1213 photoCalibCol : `str`
1214 Name of local calibration column.
1215 photoCalibErrCol : `str`
1216 Error associated with ``photoCalibCol``
1226 logNJanskyToAB = (1 * u.nJy).to_value(u.ABmag)
1241 """Convert instrument flux to nanojanskys.
1245 instFlux : `numpy.ndarray` or `pandas.Series`
1246 Array of instrument flux measurements
1247 localCalib : `numpy.ndarray` or `pandas.Series`
1248 Array of local photometric calibration estimates.
1252 calibFlux : `numpy.ndarray` or `pandas.Series`
1253 Array of calibrated flux measurements.
1255 return instFlux * localCalib
1258 """Convert instrument flux to nanojanskys.
1262 instFlux : `numpy.ndarray` or `pandas.Series`
1263 Array of instrument flux measurements
1264 instFluxErr : `numpy.ndarray` or `pandas.Series`
1265 Errors on associated ``instFlux`` values
1266 localCalib : `numpy.ndarray` or `pandas.Series`
1267 Array of local photometric calibration estimates.
1268 localCalibErr : `numpy.ndarray` or `pandas.Series`
1269 Errors on associated ``localCalib`` values
1273 calibFluxErr : `numpy.ndarray` or `pandas.Series`
1274 Errors on calibrated flux measurements.
1276 return np.hypot(instFluxErr * localCalib, instFlux * localCalibErr)
1279 """Convert instrument flux to nanojanskys.
1283 instFlux : `numpy.ndarray` or `pandas.Series`
1284 Array of instrument flux measurements
1285 localCalib : `numpy.ndarray` or `pandas.Series`
1286 Array of local photometric calibration estimates.
1290 calibMag : `numpy.ndarray` or `pandas.Series`
1291 Array of calibrated AB magnitudes.
1296 """Convert instrument flux err to nanojanskys.
1300 instFlux : `numpy.ndarray` or `pandas.Series`
1301 Array of instrument flux measurements
1302 instFluxErr : `numpy.ndarray` or `pandas.Series`
1303 Errors on associated ``instFlux`` values
1304 localCalib : `numpy.ndarray` or `pandas.Series`
1305 Array of local photometric calibration estimates.
1306 localCalibErr : `numpy.ndarray` or `pandas.Series`
1307 Errors on associated ``localCalib`` values
1311 calibMagErr: `numpy.ndarray` or `pandas.Series`
1312 Error on calibrated AB magnitudes.
1319 """Compute calibrated fluxes using the local calibration value.
1335 return f
'flux_{self.instFluxCol}'
1337 def _func(self, df):
1342 """Compute calibrated flux errors using the local calibration value.
1359 return f
'fluxErr_{self.instFluxCol}'
1361 def _func(self, df):
1367 """Compute calibrated AB magnitudes using the local calibration value.
1383 return f
'mag_{self.instFluxCol}'
1385 def _func(self, df):
1391 """Compute calibrated AB magnitude errors using the local calibration value.
1408 return f
'magErr_{self.instFluxCol}'
1410 def _func(self, df):