6 import astropy.units
as u
9 from .parquetTable
import MultilevelParquetTable
13 typeKey='functor', name=None):
14 """Initialize an object defined in a dictionary
16 The object needs to be importable as
17 f'{basePath}.{initDict[typeKey]}'
18 The positional and keyword arguments (if any) are contained in
19 "args" and "kwargs" entries in the dictionary, respectively.
20 This is used in `functors.CompositeFunctor.from_yaml` to initialize
21 a composite functor from a specification in a YAML file.
26 Dictionary describing object's initialization. Must contain
27 an entry keyed by ``typeKey`` that is the name of the object,
28 relative to ``basePath``.
30 Path relative to module in which ``initDict[typeKey]`` is defined.
32 Key of ``initDict`` that is the name of the object
33 (relative to `basePath`).
35 initDict = initDict.copy()
37 pythonType =
doImport(f
'{basePath}.{initDict.pop(typeKey)}')
39 if 'args' in initDict:
40 args = initDict.pop(
'args')
41 if isinstance(args, str):
44 element = pythonType(*args, **initDict)
45 except Exception
as e:
46 message = f
'Error in constructing functor "{name}" of type {pythonType.__name__} with args: {args}'
47 raise type(e)(message, e.args)
52 """Define and execute a calculation on a ParquetTable
54 The `__call__` method accepts a `ParquetTable` object, and returns the
55 result of the calculation as a single column. Each functor defines what
56 columns are needed for the calculation, and only these columns are read
57 from the `ParquetTable`.
59 The action of `__call__` consists of two steps: first, loading the
60 necessary columns from disk into memory as a `pandas.DataFrame` object;
61 and second, performing the computation on this dataframe and returning the
65 To define a new `Functor`, a subclass must define a `_func` method,
66 that takes a `pandas.DataFrame` and returns result in a `pandas.Series`.
67 In addition, it must define the following attributes
69 * `_columns`: The columns necessary to perform the calculation
70 * `name`: A name appropriate for a figure axis label
71 * `shortname`: A name appropriate for use as a dictionary key
73 On initialization, a `Functor` should declare what filter (`filt` kwarg)
74 and dataset (e.g. `'ref'`, `'meas'`, `'forced_src'`) it is intended to be
75 applied to. This enables the `_get_cols` method to extract the proper
76 columns from the parquet file. If not specified, the dataset will fall back
77 on the `_defaultDataset`attribute. If filter is not specified and `dataset`
78 is anything other than `'ref'`, then an error will be raised when trying to
79 perform the calculation.
81 As currently implemented, `Functor` is only set up to expect a
82 `ParquetTable` of the format of the `deepCoadd_obj` dataset; that is, a
83 `MultilevelParquetTable` with the levels of the column index being `filter`,
84 `dataset`, and `column`. This is defined in the `_columnLevels` attribute,
85 as well as being implicit in the role of the `filt` and `dataset` attributes
86 defined at initialization. In addition, the `_get_cols` method that reads
87 the dataframe from the `ParquetTable` will return a dataframe with column
88 index levels defined by the `_dfLevels` attribute; by default, this is
91 The `_columnLevels` and `_dfLevels` attributes should generally not need to
92 be changed, unless `_func` needs columns from multiple filters or datasets
93 to do the calculation.
94 An example of this is the `lsst.pipe.tasks.functors.Color` functor, for
95 which `_dfLevels = ('filter', 'column')`, and `_func` expects the dataframe
96 it gets to have those levels in the column index.
101 Filter upon which to do the calculation
104 Dataset upon which to do the calculation
105 (e.g., 'ref', 'meas', 'forced_src').
109 _defaultDataset =
'ref'
110 _columnLevels = (
'filter',
'dataset',
'column')
111 _dfLevels = (
'column',)
112 _defaultNoDup =
False
114 def __init__(self, filt=None, dataset=None, noDup=None):
121 if self.
_noDup is not None:
128 """Columns required to perform calculation
130 if not hasattr(self,
'_columns'):
131 raise NotImplementedError(
'Must define columns property or _columns attribute')
136 raise ValueError(
'ParquetTable does not have the expected column levels. '
137 f
'Got {parq.columnLevels}; expected {self._columnLevels}.')
139 columnDict = {
'column': self.
columns,
141 if self.
filt is None:
142 if 'filter' in parq.columnLevels:
144 columnDict[
'filter'] = parq.columnLevelNames[
'filter'][0]
146 raise ValueError(f
"'filt' not set for functor {self.name}"
147 f
"(dataset {self.dataset}) "
149 "contains multiple filters in column index. "
150 "Set 'filt' or set 'dataset' to 'ref'.")
152 columnDict[
'filter'] = self.
filt
154 return parq._colsFromDict(columnDict)
156 def _func(self, df, dropna=True):
157 raise NotImplementedError(
'Must define calculation on dataframe')
159 def _get_cols(self, parq):
160 """Retrieve dataframe necessary for calculation.
162 Returns dataframe upon which `self._func` can act.
164 if isinstance(parq, MultilevelParquetTable):
166 df = parq.toDataFrame(columns=columns, droplevels=
False)
170 df = parq.toDataFrame(columns=columns)
174 def _setLevels(self, df):
175 levelsToDrop = [n
for n
in df.columns.names
if n
not in self.
_dfLevels]
176 df.columns = df.columns.droplevel(levelsToDrop)
179 def _dropna(self, vals):
185 vals = self.
_func(df)
194 return pd.Series(np.full(len(df), np.nan), index=df.index)
198 """Full name of functor (suitable for figure labels)
200 return NotImplementedError
204 """Short name of functor (suitable for column name/dict key)
210 """Perform multiple calculations at once on a catalog
212 The role of a `CompositeFunctor` is to group together computations from
213 multiple functors. Instead of returning `pandas.Series` a
214 `CompositeFunctor` returns a `pandas.Dataframe`, with the column names
215 being the keys of `funcDict`.
217 The `columns` attribute of a `CompositeFunctor` is the union of all columns
218 in all the component functors.
220 A `CompositeFunctor` does not use a `_func` method itself; rather,
221 when a `CompositeFunctor` is called, all its columns are loaded
222 at once, and the resulting dataframe is passed to the `_func` method of each component
223 functor. This has the advantage of only doing I/O (reading from parquet file) once,
224 and works because each individual `_func` method of each component functor does not
225 care if there are *extra* columns in the dataframe being passed; only that it must contain
226 *at least* the `columns` it expects.
228 An important and useful class method is `from_yaml`, which takes as argument the path to a YAML
229 file specifying a collection of functors.
233 funcs : `dict` or `list`
234 Dictionary or list of functors. If a list, then it will be converted
235 into a dictonary according to the `.shortname` attribute of each functor.
242 if type(funcs) == dict:
245 self.
funcDict = {f.shortname: f
for f
in funcs}
263 if isinstance(new, dict):
265 elif isinstance(new, CompositeFunctor):
268 raise TypeError(
'Can only update with dictionary or CompositeFunctor.')
271 if self.
filt is not None:
276 return list(
set([x
for y
in [f.columns
for f
in self.
funcDict.values()]
for x
in y]))
279 return list(
set([x
for y
in [f.multilevelColumns(parq)
280 for f
in self.
funcDict.values()]
for x
in y]))
283 if isinstance(parq, MultilevelParquetTable):
285 df = parq.toDataFrame(columns=columns, droplevels=
False)
289 subdf = f._setLevels(df[f.multilevelColumns(parq)])
290 valDict[k] = f._func(subdf)
292 valDict[k] = f.fail(subdf)
295 df = parq.toDataFrame(columns=columns)
299 valDf = pd.concat(valDict, axis=1)
301 print([(k,
type(v))
for k, v
in valDict.items()])
304 if kwargs.get(
'dropna',
False):
305 valDf = valDf.dropna(how=
'any')
311 if renameRules
is None:
313 for old, new
in renameRules:
314 if col.startswith(old):
315 col = col.replace(old, new)
320 with open(filename)
as f:
321 translationDefinition = yaml.safe_load(f)
323 return cls.
from_yaml(translationDefinition, **kwargs)
328 for func, val
in translationDefinition[
'funcs'].
items():
331 if 'flag_rename_rules' in translationDefinition:
332 renameRules = translationDefinition[
'flag_rename_rules']
336 if 'refFlags' in translationDefinition:
337 for flag
in translationDefinition[
'refFlags']:
340 if 'flags' in translationDefinition:
341 for flag
in translationDefinition[
'flags']:
344 return cls(funcs, **kwargs)
348 """Evaluate an expression on a DataFrame, knowing what the 'mag' function means
350 Builds on `pandas.DataFrame.eval`, which parses and executes math on dataframes.
354 df : pandas.DataFrame
355 Dataframe on which to evaluate expression.
361 expr_new = re.sub(
r'mag\((\w+)\)',
r'-2.5*log(\g<1>)/log(10)', expr)
362 val = df.eval(expr_new, truediv=
True)
364 expr_new = re.sub(
r'mag\((\w+)\)',
r'-2.5*log(\g<1>_instFlux)/log(10)', expr)
365 val = df.eval(expr_new, truediv=
True)
370 """Arbitrary computation on a catalog
372 Column names (and thus the columns to be loaded from catalog) are found
373 by finding all words and trying to ignore all "math-y" words.
378 Expression to evaluate, to be parsed and executed by `mag_aware_eval`.
380 _ignore_words = (
'mag',
'sin',
'cos',
'exp',
'log',
'sqrt')
392 flux_cols = re.findall(
r'mag\(\s*(\w+)\s*\)', self.
expr)
394 cols = [c
for c
in re.findall(
r'[a-zA-Z_]+', self.
expr)
if c
not in self.
_ignore_words]
397 if not re.search(
'_instFlux$', c):
398 cols.append(f
'{c}_instFlux')
403 return list(
set([c
for c
in cols
if c
not in not_a_col]))
410 """Get column with specified name
430 """Return the value of the index for each object
433 columns = [
'coord_ra']
434 _defaultDataset =
'ref'
438 return pd.Series(df.index, index=df.index)
443 _allow_difference =
False
447 return pd.Series(df.index, index=df.index)
451 col =
'base_Footprint_nPix'
455 """Base class for coordinate column, in degrees
464 output = df[self.
col] * 180 / np.pi
if self.
_radians else df[self.
col]
469 """Right Ascension, in degrees
475 super().
__init__(
'coord_ra', **kwargs)
478 return super().
__call__(catalog, **kwargs)
482 """Declination, in degrees
488 super().
__init__(
'coord_dec', **kwargs)
491 return super().
__call__(catalog, **kwargs)
495 if not col.endswith(
'_instFlux'):
501 if not col.endswith(
'_instFluxErr'):
502 col +=
'_instFluxErr'
507 """Compute calibrated magnitude
509 Takes a `calib` argument, which returns the flux at mag=0
510 as `calib.getFluxMag0()`. If not provided, then the default
511 `fluxMag0` is 63095734448.0194, which is default for HSC.
512 This default should be removed in DM-21955
514 This calculation hides warnings about invalid values and dividing by zero.
516 As for all functors, a `dataset` and `filt` kwarg should be provided upon
517 initialization. Unlike the default `Functor`, however, the default dataset
518 for a `Mag` is `'meas'`, rather than `'ref'`.
523 Name of flux column from which to compute magnitude. Can be parseable
524 by `lsst.pipe.tasks.functors.fluxName` function---that is, you can pass
525 `'modelfit_CModel'` instead of `'modelfit_CModel_instFlux'`) and it will
527 calib : `lsst.afw.image.calib.Calib` (optional)
528 Object that knows zero point.
530 _defaultDataset =
'meas'
535 if calib
is not None:
548 with np.warnings.catch_warnings():
549 np.warnings.filterwarnings(
'ignore',
r'invalid value encountered')
550 np.warnings.filterwarnings(
'ignore',
r'divide by zero')
555 return f
'mag_{self.col}'
559 """Compute calibrated magnitude uncertainty
561 Takes the same `calib` object as `lsst.pipe.tasks.functors.Mag`.
566 calib : `lsst.afw.image.calib.Calib` (optional)
567 Object that knows zero point.
572 if self.
calib is not None:
579 return [self.
col, self.
col +
'Err']
582 with np.warnings.catch_warnings():
583 np.warnings.filterwarnings(
'ignore',
r'invalid value encountered')
584 np.warnings.filterwarnings(
'ignore',
r'divide by zero')
585 fluxCol, fluxErrCol = self.
columns
586 x = df[fluxErrCol] / df[fluxCol]
588 magErr = (2.5 / np.log(10.)) * np.sqrt(x*x + y*y)
593 return super().name +
'_err'
605 _defaultDataset =
'meas'
607 """Functor to calculate magnitude difference"""
619 with np.warnings.catch_warnings():
620 np.warnings.filterwarnings(
'ignore',
r'invalid value encountered')
621 np.warnings.filterwarnings(
'ignore',
r'divide by zero')
622 return -2.5*np.log10(df[self.
col1]/df[self.
col2])
626 return f
'(mag_{self.col1} - mag_{self.col2})'
630 return f
'magDiff_{self.col1}_{self.col2}'
634 """Compute the color between two filters
636 Computes color by initializing two different `Mag`
637 functors based on the `col` and filters provided, and
638 then returning the difference.
640 This is enabled by the `_func` expecting a dataframe with a
641 multilevel column index, with both `'filter'` and `'column'`,
642 instead of just `'column'`, which is the `Functor` default.
643 This is controlled by the `_dfLevels` attribute.
645 Also of note, the default dataset for `Color` is `forced_src'`,
646 whereas for `Mag` it is `'meas'`.
651 Name of flux column from which to compute; same as would be passed to
652 `lsst.pipe.tasks.functors.Mag`.
655 Filters from which to compute magnitude difference.
656 Color computed is `Mag(filt2) - Mag(filt1)`.
658 _defaultDataset =
'forced_src'
659 _dfLevels = (
'filter',
'column')
665 raise RuntimeError(
"Cannot compute Color for %s: %s - %s " % (col, filt2, filt1))
683 mag2 = self.mag2._func(df[self.filt2])
684 mag1 = self.mag1._func(df[self.filt1])
689 return [self.
mag1.col, self.
mag2.col]
697 return f
'{self.filt2} - {self.filt1} ({self.col})'
701 return f
"{self.col}_{self.filt2.replace('-', '')}m{self.filt1.replace('-', '')}"
705 """Main function of this subclass is to override the dropna=True
708 _allow_difference =
False
713 return super().
__call__(parq, dropna=
False, **kwargs)
717 _columns = [
"base_ClassificationExtendedness_value"]
718 _column =
"base_ClassificationExtendedness_value"
723 test = (x < 0.5).astype(int)
724 test = test.mask(mask, 2)
729 label = pd.Series(pd.Categorical.from_codes(test, categories=categories),
730 index=x.index, name=
'label')
732 label = label.astype(str)
737 _columns = [
'numStarFlags']
738 labels = {
"star": 0,
"maybe": 1,
"notStar": 2}
744 n = len(x.unique()) - 1
746 labels = [
'noStar',
'maybe',
'star']
747 label = pd.Series(pd.cut(x, [-1, 0, n-1, n], labels=labels),
748 index=x.index, name=
'label')
751 label = label.astype(str)
757 name =
'Deconvolved Moments'
758 shortname =
'deconvolvedMoments'
759 _columns = (
"ext_shapeHSM_HsmSourceMoments_xx",
760 "ext_shapeHSM_HsmSourceMoments_yy",
761 "base_SdssShape_xx",
"base_SdssShape_yy",
762 "ext_shapeHSM_HsmPsfMoments_xx",
763 "ext_shapeHSM_HsmPsfMoments_yy")
766 """Calculate deconvolved moments"""
767 if "ext_shapeHSM_HsmSourceMoments_xx" in df.columns:
768 hsm = df[
"ext_shapeHSM_HsmSourceMoments_xx"] + df[
"ext_shapeHSM_HsmSourceMoments_yy"]
770 hsm = np.ones(len(df))*np.nan
771 sdss = df[
"base_SdssShape_xx"] + df[
"base_SdssShape_yy"]
772 if "ext_shapeHSM_HsmPsfMoments_xx" in df.columns:
773 psf = df[
"ext_shapeHSM_HsmPsfMoments_xx"] + df[
"ext_shapeHSM_HsmPsfMoments_yy"]
778 raise RuntimeError(
'No psf shape parameter found in catalog')
780 return hsm.where(np.isfinite(hsm), sdss) - psf
784 """Functor to calculate SDSS trace radius size for sources"""
785 name =
"SDSS Trace Size"
786 shortname =
'sdssTrace'
787 _columns = (
"base_SdssShape_xx",
"base_SdssShape_yy")
790 srcSize = np.sqrt(0.5*(df[
"base_SdssShape_xx"] + df[
"base_SdssShape_yy"]))
795 """Functor to calculate SDSS trace radius size difference (%) between object and psf model"""
796 name =
"PSF - SDSS Trace Size"
797 shortname =
'psf_sdssTrace'
798 _columns = (
"base_SdssShape_xx",
"base_SdssShape_yy",
799 "base_SdssShape_psf_xx",
"base_SdssShape_psf_yy")
802 srcSize = np.sqrt(0.5*(df[
"base_SdssShape_xx"] + df[
"base_SdssShape_yy"]))
803 psfSize = np.sqrt(0.5*(df[
"base_SdssShape_psf_xx"] + df[
"base_SdssShape_psf_yy"]))
804 sizeDiff = 100*(srcSize - psfSize)/(0.5*(srcSize + psfSize))
809 """Functor to calculate HSM trace radius size for sources"""
810 name =
'HSM Trace Size'
811 shortname =
'hsmTrace'
812 _columns = (
"ext_shapeHSM_HsmSourceMoments_xx",
813 "ext_shapeHSM_HsmSourceMoments_yy")
816 srcSize = np.sqrt(0.5*(df[
"ext_shapeHSM_HsmSourceMoments_xx"]
817 + df[
"ext_shapeHSM_HsmSourceMoments_yy"]))
822 """Functor to calculate HSM trace radius size difference (%) between object and psf model"""
823 name =
'PSF - HSM Trace Size'
824 shortname =
'psf_HsmTrace'
825 _columns = (
"ext_shapeHSM_HsmSourceMoments_xx",
826 "ext_shapeHSM_HsmSourceMoments_yy",
827 "ext_shapeHSM_HsmPsfMoments_xx",
828 "ext_shapeHSM_HsmPsfMoments_yy")
831 srcSize = np.sqrt(0.5*(df[
"ext_shapeHSM_HsmSourceMoments_xx"]
832 + df[
"ext_shapeHSM_HsmSourceMoments_yy"]))
833 psfSize = np.sqrt(0.5*(df[
"ext_shapeHSM_HsmPsfMoments_xx"]
834 + df[
"ext_shapeHSM_HsmPsfMoments_yy"]))
835 sizeDiff = 100*(srcSize - psfSize)/(0.5*(srcSize + psfSize))
840 name =
'HSM Psf FWHM'
841 _columns = (
'ext_shapeHSM_HsmPsfMoments_xx',
'ext_shapeHSM_HsmPsfMoments_yy')
844 SIGMA2FWHM = 2*np.sqrt(2*np.log(2))
848 0.5*(df[
'ext_shapeHSM_HsmPsfMoments_xx'] + df[
'ext_shapeHSM_HsmPsfMoments_yy']))
852 name =
"Distortion Ellipticity (e1)"
853 shortname =
"Distortion"
871 name =
"Ellipticity e2"
904 """Computations using the stored localWcs.
906 name =
"LocalWcsOperations"
921 """Compute the distance on the sphere from x2, y1 to x1, y1.
929 cd11 : `pandas.Series`
930 [1, 1] element of the local Wcs affine transform.
931 cd11 : `pandas.Series`
932 [1, 1] element of the local Wcs affine transform.
933 cd12 : `pandas.Series`
934 [1, 2] element of the local Wcs affine transform.
935 cd21 : `pandas.Series`
936 [2, 1] element of the local Wcs affine transform.
937 cd22 : `pandas.Series`
938 [2, 2] element of the local Wcs affine transform.
943 RA and dec conversion of x and y given the local Wcs. Returned
944 units are in radians.
947 return (x * cd11 + y * cd12, x * cd21 + y * cd22)
950 """Compute the local pixel scale conversion.
954 ra1 : `pandas.Series`
955 Ra of the first coordinate in radians.
956 dec1 : `pandas.Series`
957 Dec of the first coordinate in radians.
958 ra2 : `pandas.Series`
959 Ra of the second coordinate in radians.
960 dec2 : `pandas.Series`
961 Dec of the second coordinate in radians.
965 dist : `pandas.Series`
966 Distance on the sphere in radians.
968 deltaDec = dec2 - dec1
970 return 2 * np.arcsin(
972 np.sin(deltaDec / 2) ** 2
973 + np.cos(dec2) * np.cos(dec1) * np.sin(deltaRa / 2) ** 2))
976 """Compute the distance on the sphere from x2, y1 to x1, y1.
988 cd11 : `pandas.Series`
989 [1, 1] element of the local Wcs affine transform.
990 cd11 : `pandas.Series`
991 [1, 1] element of the local Wcs affine transform.
992 cd12 : `pandas.Series`
993 [1, 2] element of the local Wcs affine transform.
994 cd21 : `pandas.Series`
995 [2, 1] element of the local Wcs affine transform.
996 cd22 : `pandas.Series`
997 [2, 2] element of the local Wcs affine transform.
1001 Distance : `pandas.Series`
1002 Arcseconds per pixel at the location of the local WC
1011 """Compute the local pixel scale from the stored CDMatrix.
1023 """Compute the local pixel to scale conversion in arcseconds.
1027 cd11 : `pandas.Series`
1028 [1, 1] element of the local Wcs affine transform in radians.
1029 cd11 : `pandas.Series`
1030 [1, 1] element of the local Wcs affine transform in radians.
1031 cd12 : `pandas.Series`
1032 [1, 2] element of the local Wcs affine transform in radians.
1033 cd21 : `pandas.Series`
1034 [2, 1] element of the local Wcs affine transform in radians.
1035 cd22 : `pandas.Series`
1036 [2, 2] element of the local Wcs affine transform in radians.
1040 pixScale : `pandas.Series`
1041 Arcseconds per pixel at the location of the local WC
1043 return 3600 * np.degrees(np.sqrt(np.fabs(cd11 * cd22 - cd12 * cd21)))
1045 def _func(self, df):
1053 """Convert a value in units pixels to units arcseconds.
1072 return f
"{self.col}_asArcseconds"
1082 def _func(self, df):
1090 name =
'Reference Band'
1091 shortname =
'refBand'
1095 return [
"merge_measurement_i",
1096 "merge_measurement_r",
1097 "merge_measurement_z",
1098 "merge_measurement_y",
1099 "merge_measurement_g"]
1101 def _func(self, df):
1102 def getFilterAliasName(row):
1104 colName = row.idxmax()
1105 return colName.replace(
'merge_measurement_',
'')
1107 return df[self.
columns].apply(getFilterAliasName, axis=1)
1112 AB_FLUX_SCALE = (0 * u.ABmag).to_value(u.nJy)
1113 LOG_AB_FLUX_SCALE = 12.56
1114 FIVE_OVER_2LOG10 = 1.085736204758129569
1118 def __init__(self, colFlux, colFluxErr=None, calib=None, **kwargs):
1124 if calib
is not None:
1138 return f
'mag_{self.col}'
1142 if np.abs(a) < np.abs(b):
1147 return np.abs(a) * np.sqrt(1. + q*q)
1153 with np.warnings.catch_warnings():
1154 np.warnings.filterwarnings(
'ignore',
r'invalid value encountered')
1155 np.warnings.filterwarnings(
'ignore',
r'divide by zero')
1156 return -2.5 * np.log10(dn/fluxMag0)
1159 retVal = self.
vhypot(dn * fluxMag0Err, dnErr * fluxMag0)
1164 retVal = self.
dn2fluxErr(dn, dnErr, fluxMag0, fluxMag0Err) / self.
dn2flux(dn, fluxMag0)
1169 def _func(self, df):
1178 def _func(self, df):
1180 return pd.Series(retArr, index=df.index)
1184 def _func(self, df):
1193 def _func(self, df):
1195 return pd.Series(retArr, index=df.index)
1199 """Base class for calibrating the specified instrument flux column using
1200 the local photometric calibration.
1205 Name of the instrument flux column.
1206 instFluxErrCol : `str`
1207 Name of the assocated error columns for ``instFluxCol``.
1208 photoCalibCol : `str`
1209 Name of local calibration column.
1210 photoCalibErrCol : `str`
1211 Error associated with ``photoCalibCol``
1221 logNJanskyToAB = (1 * u.nJy).to_value(u.ABmag)
1236 """Convert instrument flux to nanojanskys.
1240 instFlux : `numpy.ndarray` or `pandas.Series`
1241 Array of instrument flux measurements
1242 localCalib : `numpy.ndarray` or `pandas.Series`
1243 Array of local photometric calibration estimates.
1247 calibFlux : `numpy.ndarray` or `pandas.Series`
1248 Array of calibrated flux measurements.
1250 return instFlux * localCalib
1253 """Convert instrument flux to nanojanskys.
1257 instFlux : `numpy.ndarray` or `pandas.Series`
1258 Array of instrument flux measurements
1259 instFluxErr : `numpy.ndarray` or `pandas.Series`
1260 Errors on associated ``instFlux`` values
1261 localCalib : `numpy.ndarray` or `pandas.Series`
1262 Array of local photometric calibration estimates.
1263 localCalibErr : `numpy.ndarray` or `pandas.Series`
1264 Errors on associated ``localCalib`` values
1268 calibFluxErr : `numpy.ndarray` or `pandas.Series`
1269 Errors on calibrated flux measurements.
1271 return np.hypot(instFluxErr * localCalib, instFlux * localCalibErr)
1274 """Convert instrument flux to nanojanskys.
1278 instFlux : `numpy.ndarray` or `pandas.Series`
1279 Array of instrument flux measurements
1280 localCalib : `numpy.ndarray` or `pandas.Series`
1281 Array of local photometric calibration estimates.
1285 calibMag : `numpy.ndarray` or `pandas.Series`
1286 Array of calibrated AB magnitudes.
1291 """Convert instrument flux err to nanojanskys.
1295 instFlux : `numpy.ndarray` or `pandas.Series`
1296 Array of instrument flux measurements
1297 instFluxErr : `numpy.ndarray` or `pandas.Series`
1298 Errors on associated ``instFlux`` values
1299 localCalib : `numpy.ndarray` or `pandas.Series`
1300 Array of local photometric calibration estimates.
1301 localCalibErr : `numpy.ndarray` or `pandas.Series`
1302 Errors on associated ``localCalib`` values
1306 calibMagErr: `numpy.ndarray` or `pandas.Series`
1307 Error on calibrated AB magnitudes.
1314 """Compute calibrated fluxes using the local calibration value.
1330 return f
'flux_{self.instFluxCol}'
1332 def _func(self, df):
1337 """Compute calibrated flux errors using the local calibration value.
1354 return f
'fluxErr_{self.instFluxCol}'
1356 def _func(self, df):
1362 """Compute calibrated AB magnitudes using the local calibration value.
1378 return f
'mag_{self.instFluxCol}'
1380 def _func(self, df):
1386 """Compute calibrated AB magnitude errors using the local calibration value.
1403 return f
'magErr_{self.instFluxCol}'
1405 def _func(self, df):