LSSTApplications  17.0+11,17.0+34,17.0+56,17.0+57,17.0+59,17.0+7,17.0-1-g377950a+33,17.0.1-1-g114240f+2,17.0.1-1-g4d4fbc4+28,17.0.1-1-g55520dc+49,17.0.1-1-g5f4ed7e+52,17.0.1-1-g6dd7d69+17,17.0.1-1-g8de6c91+11,17.0.1-1-gb9095d2+7,17.0.1-1-ge9fec5e+5,17.0.1-1-gf4e0155+55,17.0.1-1-gfc65f5f+50,17.0.1-1-gfc6fb1f+20,17.0.1-10-g87f9f3f+1,17.0.1-11-ge9de802+16,17.0.1-16-ga14f7d5c+4,17.0.1-17-gc79d625+1,17.0.1-17-gdae4c4a+8,17.0.1-2-g26618f5+29,17.0.1-2-g54f2ebc+9,17.0.1-2-gf403422+1,17.0.1-20-g2ca2f74+6,17.0.1-23-gf3eadeb7+1,17.0.1-3-g7e86b59+39,17.0.1-3-gb5ca14a,17.0.1-3-gd08d533+40,17.0.1-30-g596af8797,17.0.1-4-g59d126d+4,17.0.1-4-gc69c472+5,17.0.1-6-g5afd9b9+4,17.0.1-7-g35889ee+1,17.0.1-7-gc7c8782+18,17.0.1-9-gc4bbfb2+3,w.2019.22
LSSTDataManagementBasePackage
loadIndexedReferenceObjects.py
Go to the documentation of this file.
1 #
2 # LSST Data Management System
3 #
4 # Copyright 2008-2017 AURA/LSST.
5 #
6 # This product includes software developed by the
7 # LSST Project (http://www.lsst.org/).
8 #
9 # This program is free software: you can redistribute it and/or modify
10 # it under the terms of the GNU General Public License as published by
11 # the Free Software Foundation, either version 3 of the License, or
12 # (at your option) any later version.
13 #
14 # This program is distributed in the hope that it will be useful,
15 # but WITHOUT ANY WARRANTY; without even the implied warranty of
16 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 # GNU General Public License for more details.
18 #
19 # You should have received a copy of the LSST License Statement and
20 # the GNU General Public License along with this program. If not,
21 # see <https://www.lsstcorp.org/LegalNotices/>.
22 #
23 
24 __all__ = ["LoadIndexedReferenceObjectsConfig", "LoadIndexedReferenceObjectsTask"]
25 
26 from .loadReferenceObjects import hasNanojanskyFluxUnits, convertToNanojansky, getFormatVersionFromRefCat
27 from lsst.meas.algorithms import getRefFluxField, LoadReferenceObjectsTask, LoadReferenceObjectsConfig
28 import lsst.afw.table as afwTable
29 import lsst.geom
30 import lsst.pex.config as pexConfig
31 import lsst.pipe.base as pipeBase
32 from .indexerRegistry import IndexerRegistry
33 
34 
36  ref_dataset_name = pexConfig.Field(
37  dtype=str,
38  default='cal_ref_cat',
39  doc='Name of the ingested reference dataset'
40  )
41 
42 
44  """Load reference objects from an indexed catalog ingested by
45  IngestIndexReferenceTask.
46 
47  Parameters
48  ----------
49  butler : `lsst.daf.persistence.Butler`
50  Data butler for reading catalogs
51  """
52  ConfigClass = LoadIndexedReferenceObjectsConfig
53  _DefaultName = 'LoadIndexedReferenceObjectsTask'
54 
55  def __init__(self, butler, *args, **kwargs):
56  LoadReferenceObjectsTask.__init__(self, *args, **kwargs)
57  self.dataset_config = butler.get("ref_cat_config", name=self.config.ref_dataset_name, immediate=True)
58  self.indexer = IndexerRegistry[self.dataset_config.indexer.name](self.dataset_config.indexer.active)
59  # This needs to come from the loader config, not the dataset_config since directory aliases can
60  # change the path where the shards are found.
61  self.ref_dataset_name = self.config.ref_dataset_name
62  self.butler = butler
63 
64  @pipeBase.timeMethod
65  def loadSkyCircle(self, ctrCoord, radius, filterName=None, epoch=None):
66  shardIdList, isOnBoundaryList = self.indexer.getShardIds(ctrCoord, radius)
67  shards = self.getShards(shardIdList)
68  refCat = self.butler.get('ref_cat',
69  dataId=self.indexer.makeDataId('master_schema', self.ref_dataset_name),
70  immediate=True)
71 
72  # load the catalog, one shard at a time
73  for shard, isOnBoundary in zip(shards, isOnBoundaryList):
74  if shard is None:
75  continue
76  if isOnBoundary:
77  refCat.extend(self._trimToCircle(shard, ctrCoord, radius))
78  else:
79  refCat.extend(shard)
80 
81  # apply proper motion corrections
82  if epoch is not None and "pm_ra" in refCat.schema:
83  # check for a catalog in a non-standard format
84  if isinstance(refCat.schema["pm_ra"].asKey(), lsst.afw.table.KeyAngle):
85  self.applyProperMotions(refCat, epoch)
86  else:
87  self.log.warn("Catalog pm_ra field is not an Angle; not applying proper motion")
88 
89  # update version=0 style refcats to have nJy fluxes
90  if self.dataset_config.format_version == 0 or not hasNanojanskyFluxUnits(refCat.schema):
91  self.log.warn("Found version 0 reference catalog with old style units in schema.")
92  self.log.warn("run `meas_algorithms/bin/convert_refcat_to_nJy.py` to convert fluxes to nJy.")
93  self.log.warn("See RFC-575 for more details.")
94  refCat = convertToNanojansky(refCat, self.log)
95  else:
96  # For version >= 1, the version should be in the catalog header,
97  # too, and should be consistent with the version in the config.
98  catVersion = getFormatVersionFromRefCat(refCat)
99  if catVersion != self.dataset_config.format_version:
100  raise RuntimeError(f"Format version in reference catalog ({catVersion}) does not match"
101  f" format_version field in config ({self.dataset_config.format_version})")
102 
103  self._addFluxAliases(refCat.schema)
104  fluxField = getRefFluxField(schema=refCat.schema, filterName=filterName)
105 
106  # add and initialize centroid and hasCentroid fields (these are
107  # added after loading to avoid wasting space in the saved catalogs)
108  # the new fields are automatically initialized to (nan, nan) and
109  # False so no need to set them explicitly
110  mapper = afwTable.SchemaMapper(refCat.schema, True)
111  mapper.addMinimalSchema(refCat.schema, True)
112  mapper.editOutputSchema().addField("centroid_x", type=float)
113  mapper.editOutputSchema().addField("centroid_y", type=float)
114  mapper.editOutputSchema().addField("hasCentroid", type="Flag")
115  expandedCat = afwTable.SimpleCatalog(mapper.getOutputSchema())
116  expandedCat.extend(refCat, mapper=mapper)
117  del refCat # avoid accidentally returning the unexpanded ref cat
118 
119  # make sure catalog is contiguous
120  if not expandedCat.isContiguous():
121  expandedCat = expandedCat.copy(True)
122 
123  # return reference catalog
124  return pipeBase.Struct(
125  refCat=expandedCat,
126  fluxField=fluxField,
127  )
128 
129  def getShards(self, shardIdList):
130  """Get shards by ID.
131 
132  Parameters
133  ----------
134  shardIdList : `list` of `int`
135  A list of integer shard ids.
136 
137  Returns
138  -------
139  catalogs : `list` of `lsst.afw.table.SimpleCatalog`
140  A list of reference catalogs, one for each entry in shardIdList.
141  """
142  shards = []
143  for shardId in shardIdList:
144  if self.butler.datasetExists('ref_cat',
145  dataId=self.indexer.makeDataId(shardId, self.ref_dataset_name)):
146  shards.append(self.butler.get('ref_cat',
147  dataId=self.indexer.makeDataId(shardId, self.ref_dataset_name),
148  immediate=True))
149  return shards
150 
151  def _trimToCircle(self, refCat, ctrCoord, radius):
152  """Trim a reference catalog to a circular aperture.
153 
154  Parameters
155  ----------
156  refCat : `lsst.afw.table.SimpleCatalog`
157  Reference catalog to be trimmed.
158  ctrCoord : `lsst.geom.SpherePoint`
159  ICRS center of search region.
160  radius : `lsst.geom.Angle`
161  Radius of search region.
162 
163  Returns
164  -------
165  catalog : `lsst.afw.table.SimpleCatalog`
166  Catalog containing objects that fall in the circular aperture.
167  """
168  tempCat = type(refCat)(refCat.schema)
169  for record in refCat:
170  if record.getCoord().separation(ctrCoord) < radius:
171  tempCat.append(record)
172  return tempCat
A mapping between the keys of two Schemas, used to copy data between them.
Definition: SchemaMapper.h:21
Fit spatial kernel using approximate fluxes for candidates, and solving a linear system of equations...
def convertToNanojansky(catalog, log, doConvert=True)
table::Key< int > type
Definition: Detector.cc:167
Custom catalog class for record/table subclasses that are guaranteed to have an ID, and should generally be sorted by that ID.
Definition: fwd.h:63
Abstract base class to load objects from reference catalogs.