LSST Applications g063fba187b+fee0456c91,g0f08755f38+ea96e5a5a3,g1653933729+a8ce1bb630,g168dd56ebc+a8ce1bb630,g1a2382251a+90257ff92a,g20f6ffc8e0+ea96e5a5a3,g217e2c1bcf+937a289c59,g28da252d5a+daa7da44eb,g2bbee38e9b+253935c60e,g2bc492864f+253935c60e,g3156d2b45e+6e55a43351,g32e5bea42b+31359a2a7a,g347aa1857d+253935c60e,g35bb328faa+a8ce1bb630,g3a166c0a6a+253935c60e,g3b1af351f3+a8ce1bb630,g3e281a1b8c+c5dd892a6c,g414038480c+416496e02f,g41af890bb2+afe91b1188,g599934f4f4+0db33f7991,g7af13505b9+e36de7bce6,g80478fca09+da231ba887,g82479be7b0+a4516e59e3,g858d7b2824+ea96e5a5a3,g89c8672015+f4add4ffd5,g9125e01d80+a8ce1bb630,ga5288a1d22+bc6ab8dfbd,gb58c049af0+d64f4d3760,gc28159a63d+253935c60e,gcab2d0539d+3f2b72788c,gcf0d15dbbd+4ea9c45075,gda6a2b7d83+4ea9c45075,gdaeeff99f8+1711a396fd,ge79ae78c31+253935c60e,gef2f8181fd+3031e3cf99,gf0baf85859+c1f95f4921,gfa517265be+ea96e5a5a3,gfa999e8aa5+17cd334064,w.2024.50
LSST Data Management Base Package
Loading...
Searching...
No Matches
Classes | Functions | Variables
lsst.meas.algorithms.convertReferenceCatalog Namespace Reference

Classes

class  ConvertReferenceCatalogConfig
 
class  ConvertReferenceCatalogTask
 
class  DatasetConfig
 

Functions

 addRefCatMetadata (catalog)
 
 _makeSchema (filterNameList, *addCentroid=False, addIsPhotometric=False, addIsResolved=False, addIsVariable=False, fullPositionInformation=False)
 
 _addExtraColumnsToSchema (schema, dtype, extra_col_names, key_map)
 
 build_argparser ()
 
 run_convert (outputDir, configFile, fileglob)
 
 main ()
 

Variables

int LATEST_FORMAT_VERSION = 2
 

Detailed Description

Convert an external reference catalog into the hierarchical triangular mesh
(HTM) sharded LSST-style format, to be ingested into the butler.

Function Documentation

◆ _addExtraColumnsToSchema()

lsst.meas.algorithms.convertReferenceCatalog._addExtraColumnsToSchema ( schema,
dtype,
extra_col_names,
key_map )
protected
Add extra columns to a schema from a numpy dtype.

Note that schema and key_map will be modified in place.

Parameters
----------
schema : `lsst.afw.table.Schema`
    Schema to append extra columns.
dtype : `numpy.dtype`
    Numpy record array dtype.
extra_col_names : `list` [`str`]
    Extra column names to convert from dtype into schema.
key_map : `dict` [`str`, `lsst.afw.table.Key`]
    Mapping from column name to table key.

Definition at line 220 of file convertReferenceCatalog.py.

220def _addExtraColumnsToSchema(schema, dtype, extra_col_names, key_map):
221 """Add extra columns to a schema from a numpy dtype.
222
223 Note that schema and key_map will be modified in place.
224
225 Parameters
226 ----------
227 schema : `lsst.afw.table.Schema`
228 Schema to append extra columns.
229 dtype : `numpy.dtype`
230 Numpy record array dtype.
231 extra_col_names : `list` [`str`]
232 Extra column names to convert from dtype into schema.
233 key_map : `dict` [`str`, `lsst.afw.table.Key`]
234 Mapping from column name to table key.
235 """
236 def addField(name):
237 if dtype[name].kind == 'U':
238 # dealing with a string like thing. Need to get type and size.
239 at_size = dtype[name].itemsize
240 return schema.addField(name, type=str, size=at_size)
241 elif dtype[name].kind == 'b':
242 # Dealing with a boolean, which needs to be a flag.
243 return schema.addField(name, type="Flag")
244 else:
245 at_type = dtype[name].type
246 return schema.addField(name, at_type)
247
248 for col in extra_col_names:
249 key_map[col] = addField(col)
250
251

◆ _makeSchema()

lsst.meas.algorithms.convertReferenceCatalog._makeSchema ( filterNameList,
* addCentroid = False,
addIsPhotometric = False,
addIsResolved = False,
addIsVariable = False,
fullPositionInformation = False )
protected
Make a standard schema for reference object catalogs.

Parameters
----------
filterNameList : `list` of `str`
    List of filter names. Used to create <filterName>_flux fields.
addCentroid : `bool`
    If True then add fields "centroid" and "hasCentroid".
addIsPhotometric : `bool`
    If True then add field "photometric".
addIsResolved : `bool`
    If True then add field "resolved".
addIsVariable : `bool`
    If True then add field "variable".
fullPositionInformation : `bool`
    If True then add epoch, proper motion, and parallax, along with the
    full five-dimensional covariance between ra and dec coordinates,
    proper motion in ra and dec, and parallax.

Returns
-------
schema : `lsst.afw.table.Schema`
    Schema for reference catalog, an
    `lsst.afw.table.SimpleCatalog`.

Definition at line 69 of file convertReferenceCatalog.py.

71 addIsVariable=False, fullPositionInformation=False):
72 """Make a standard schema for reference object catalogs.
73
74 Parameters
75 ----------
76 filterNameList : `list` of `str`
77 List of filter names. Used to create <filterName>_flux fields.
78 addCentroid : `bool`
79 If True then add fields "centroid" and "hasCentroid".
80 addIsPhotometric : `bool`
81 If True then add field "photometric".
82 addIsResolved : `bool`
83 If True then add field "resolved".
84 addIsVariable : `bool`
85 If True then add field "variable".
86 fullPositionInformation : `bool`
87 If True then add epoch, proper motion, and parallax, along with the
88 full five-dimensional covariance between ra and dec coordinates,
89 proper motion in ra and dec, and parallax.
90
91 Returns
92 -------
93 schema : `lsst.afw.table.Schema`
94 Schema for reference catalog, an
95 `lsst.afw.table.SimpleCatalog`.
96 """
98 if addCentroid:
99 lsst.afw.table.Point2DKey.addFields(
100 schema,
101 "centroid",
102 "centroid on an exposure, if relevant",
103 "pixel",
104 )
105 schema.addField(
106 field="hasCentroid",
107 type="Flag",
108 doc="is position known?",
109 )
110 for filterName in filterNameList:
111 schema.addField(
112 field="%s_flux" % (filterName,),
113 type=numpy.float64,
114 doc="flux in filter %s" % (filterName,),
115 units="nJy",
116 )
117 for filterName in filterNameList:
118 schema.addField(
119 field="%s_fluxErr" % (filterName,),
120 type=numpy.float64,
121 doc="flux uncertainty in filter %s" % (filterName,),
122 units="nJy",
123 )
124 if addIsPhotometric:
125 schema.addField(
126 field="photometric",
127 type="Flag",
128 doc="set if the object can be used for photometric calibration",
129 )
130 if addIsResolved:
131 schema.addField(
132 field="resolved",
133 type="Flag",
134 doc="set if the object is spatially resolved",
135 )
136 if addIsVariable:
137 schema.addField(
138 field="variable",
139 type="Flag",
140 doc="set if the object has variable brightness",
141 )
142 lsst.afw.table.CovarianceMatrix2fKey.addFields(
143 schema=schema,
144 prefix="coord",
145 names=["ra", "dec"],
146 units=["rad", "rad"],
147 diagonalOnly=True,
148 )
149
150 if fullPositionInformation:
151 schema.addField(
152 field="epoch",
153 type=numpy.float64,
154 doc="date of observation (TAI, MJD)",
155 units="day",
156 )
157 schema.addField(
158 field="pm_ra",
159 type="Angle",
160 doc="proper motion in the right ascension direction = dra/dt * cos(dec)",
161 units="rad/year",
162 )
163 schema.addField(
164 field="pm_dec",
165 type="Angle",
166 doc="proper motion in the declination direction",
167 units="rad/year",
168 )
169 lsst.afw.table.CovarianceMatrix2fKey.addFields(
170 schema=schema,
171 prefix="pm",
172 names=["ra", "dec"],
173 units=["rad/year", "rad/year"],
174 diagonalOnly=True,
175 )
176 schema.addField(
177 field="pm_flag",
178 type="Flag",
179 doc="Set if proper motion or proper motion error is bad",
180 )
181 schema.addField(
182 field="parallax",
183 type="Angle",
184 doc="parallax",
185 units="rad",
186 )
187 schema.addField(
188 field="parallaxErr",
189 type="Angle",
190 doc="uncertainty in parallax",
191 units="rad",
192 )
193 schema.addField(
194 field="parallax_flag",
195 type="Flag",
196 doc="Set if parallax or parallax error is bad",
197 )
198 # Add all the off-diagonal covariance terms
199 fields = ["coord_ra", "coord_dec", "pm_ra", "pm_dec", "parallax"]
200 units = ["rad", "rad", "rad/year", "rad/year", "rad"]
201 for field, unit in zip(itertools.combinations(fields, r=2), itertools.combinations(units, r=2)):
202 i_field = field[0]
203 i_unit = unit[0]
204 j_field = field[1]
205 j_unit = unit[1]
206 formatted_unit = "rad^2"
207 if ("year" in i_unit) and ("year" in j_unit):
208 formatted_unit += "/year^2"
209 elif ("year" in i_unit) or ("year" in j_unit):
210 formatted_unit += "/year"
211 schema.addField(
212 field=f"{i_field}_{j_field}_Cov",
213 type="F",
214 doc=f"Covariance between {i_field} and {j_field}",
215 units=formatted_unit
216 )
217 return schema
218
219
static Schema makeMinimalSchema()
Return a minimal schema for Simple tables and records.
Definition Simple.h:140

◆ addRefCatMetadata()

lsst.meas.algorithms.convertReferenceCatalog.addRefCatMetadata ( catalog)
Add metadata to a new (not yet populated) reference catalog.

Parameters
----------
catalog : `lsst.afw.table.SimpleCatalog`
    Catalog to which metadata should be attached.  Will be modified
    in-place.

Definition at line 53 of file convertReferenceCatalog.py.

53def addRefCatMetadata(catalog):
54 """Add metadata to a new (not yet populated) reference catalog.
55
56 Parameters
57 ----------
58 catalog : `lsst.afw.table.SimpleCatalog`
59 Catalog to which metadata should be attached. Will be modified
60 in-place.
61 """
62 md = catalog.getMetadata()
63 if md is None:
64 md = PropertyList()
65 md.set("REFCAT_FORMAT_VERSION", LATEST_FORMAT_VERSION)
66 catalog.setMetadata(md)
67
68

◆ build_argparser()

lsst.meas.algorithms.convertReferenceCatalog.build_argparser ( )
Construct an argument parser for the ``convertReferenceCatalog`` script.

Returns
-------
argparser : `argparse.ArgumentParser`
    The argument parser that defines the ``convertReferenceCatalog``
    command-line interface.

Definition at line 638 of file convertReferenceCatalog.py.

638def build_argparser():
639 """Construct an argument parser for the ``convertReferenceCatalog`` script.
640
641 Returns
642 -------
643 argparser : `argparse.ArgumentParser`
644 The argument parser that defines the ``convertReferenceCatalog``
645 command-line interface.
646 """
647 parser = argparse.ArgumentParser(
648 description=__doc__,
649 formatter_class=argparse.RawDescriptionHelpFormatter,
650 epilog='More information is available at https://pipelines.lsst.io.'
651 )
652 parser.add_argument("outputDir",
653 help="Path to write the output shard files, configs, and `ingest-files` table to.")
654 parser.add_argument("configFile",
655 help="File containing the ConvertReferenceCatalogConfig fields.")
656 # Use a "+"-list here, so we can produce a more useful error if the user
657 # uses an unquoted glob that gets shell expanded.
658 parser.add_argument("fileglob", nargs="+",
659 help="Quoted glob for the files to be read in and converted."
660 " Example (note required quotes to prevent shell expansion):"
661 ' "gaia_source/csv/GaiaSource*"')
662 return parser
663
664

◆ main()

lsst.meas.algorithms.convertReferenceCatalog.main ( )

Definition at line 699 of file convertReferenceCatalog.py.

699def main():
700 args = build_argparser().parse_args()
701 if len(args.fileglob) > 1:
702 raise RuntimeError("Final argument must be a quoted file glob, not a shell-expanded list of files.")
703 # Fileglob comes out as a length=1 list, so we can test it above.
704 run_convert(args.outputDir, args.configFile, args.fileglob[0])
int main(void)

◆ run_convert()

lsst.meas.algorithms.convertReferenceCatalog.run_convert ( outputDir,
configFile,
fileglob )
Run `ConvertReferenceCatalogTask` on the input arguments.

Parameters
----------
outputDir : `str`
    Path to write the output files to.
configFile : `str`
    File specifying the ``ConvertReferenceCatalogConfig`` fields.
fileglob : `str`
    Quoted glob for the files to be read in and converted.

Definition at line 665 of file convertReferenceCatalog.py.

665def run_convert(outputDir, configFile, fileglob):
666 """Run `ConvertReferenceCatalogTask` on the input arguments.
667
668 Parameters
669 ----------
670 outputDir : `str`
671 Path to write the output files to.
672 configFile : `str`
673 File specifying the ``ConvertReferenceCatalogConfig`` fields.
674 fileglob : `str`
675 Quoted glob for the files to be read in and converted.
676 """
677 # We have to initialize the logger manually when running from the commandline.
678 logging.basicConfig(level=logging.INFO, format="{name} {levelname}: {message}", style="{")
679
680 config = ConvertReferenceCatalogTask.ConfigClass()
681 config.load(configFile)
682 converter = ConvertReferenceCatalogTask(output_dir=outputDir, config=config)
683 files = glob.glob(fileglob)
684 converter.run(files)
685 with open(os.path.join(outputDir, "convertReferenceCatalogConfig.py"), "w") as outfile:
686 converter.config.saveToStream(outfile)
687 msg = ("Completed refcat conversion.\n\n"
688 "Ingest the resulting files with the following commands, substituting the path\n"
689 "to your butler repo for `REPO`, and the ticket number you are tracking this\n"
690 "ingest on for `DM-NNNNN`:\n"
691 f"\n butler register-dataset-type REPO {config.dataset_config.ref_dataset_name} "
692 "SimpleCatalog htm7"
693 "\n butler ingest-files -t direct REPO gaia_dr2 refcats/DM-NNNNN "
694 f"{converter.ingest_table_file}"
695 "\n butler collection-chain REPO --mode extend refcats refcats/DM-NNNNN")
696 print(msg)
697
698

Variable Documentation

◆ LATEST_FORMAT_VERSION

int lsst.meas.algorithms.convertReferenceCatalog.LATEST_FORMAT_VERSION = 2

Definition at line 50 of file convertReferenceCatalog.py.