LSSTApplications  18.0.0+106,18.0.0+50,19.0.0,19.0.0+1,19.0.0+10,19.0.0+11,19.0.0+13,19.0.0+17,19.0.0+2,19.0.0-1-g20d9b18+6,19.0.0-1-g425ff20,19.0.0-1-g5549ca4,19.0.0-1-g580fafe+6,19.0.0-1-g6fe20d0+1,19.0.0-1-g7011481+9,19.0.0-1-g8c57eb9+6,19.0.0-1-gb5175dc+11,19.0.0-1-gdc0e4a7+9,19.0.0-1-ge272bc4+6,19.0.0-1-ge3aa853,19.0.0-10-g448f008b,19.0.0-12-g6990b2c,19.0.0-2-g0d9f9cd+11,19.0.0-2-g3d9e4fb2+11,19.0.0-2-g5037de4,19.0.0-2-gb96a1c4+3,19.0.0-2-gd955cfd+15,19.0.0-3-g2d13df8,19.0.0-3-g6f3c7dc,19.0.0-4-g725f80e+11,19.0.0-4-ga671dab3b+1,19.0.0-4-gad373c5+3,19.0.0-5-ga2acb9c+2,19.0.0-5-gfe96e6c+2,w.2020.01
LSSTDataManagementBasePackage
loadIndexedReferenceObjects.py
Go to the documentation of this file.
1 #
2 # LSST Data Management System
3 #
4 # Copyright 2008-2017 AURA/LSST.
5 #
6 # This product includes software developed by the
7 # LSST Project (http://www.lsst.org/).
8 #
9 # This program is free software: you can redistribute it and/or modify
10 # it under the terms of the GNU General Public License as published by
11 # the Free Software Foundation, either version 3 of the License, or
12 # (at your option) any later version.
13 #
14 # This program is distributed in the hope that it will be useful,
15 # but WITHOUT ANY WARRANTY; without even the implied warranty of
16 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 # GNU General Public License for more details.
18 #
19 # You should have received a copy of the LSST License Statement and
20 # the GNU General Public License along with this program. If not,
21 # see <https://www.lsstcorp.org/LegalNotices/>.
22 #
23 
24 __all__ = ["LoadIndexedReferenceObjectsConfig", "LoadIndexedReferenceObjectsTask"]
25 
26 from .loadReferenceObjects import hasNanojanskyFluxUnits, convertToNanojansky, getFormatVersionFromRefCat
27 from lsst.meas.algorithms import getRefFluxField, LoadReferenceObjectsTask, LoadReferenceObjectsConfig
28 import lsst.afw.table as afwTable
29 import lsst.geom
30 import lsst.pex.config as pexConfig
31 import lsst.pipe.base as pipeBase
32 from .indexerRegistry import IndexerRegistry
33 
34 
36  ref_dataset_name = pexConfig.Field(
37  dtype=str,
38  default='cal_ref_cat',
39  doc='Name of the ingested reference dataset'
40  )
41 
42 
44  """Load reference objects from an indexed catalog ingested by
45  IngestIndexReferenceTask.
46 
47  Parameters
48  ----------
49  butler : `lsst.daf.persistence.Butler`
50  Data butler for reading catalogs
51  """
52  ConfigClass = LoadIndexedReferenceObjectsConfig
53  _DefaultName = 'LoadIndexedReferenceObjectsTask'
54 
55  def __init__(self, butler, *args, **kwargs):
56  LoadReferenceObjectsTask.__init__(self, *args, **kwargs)
57  self.dataset_config = butler.get("ref_cat_config", name=self.config.ref_dataset_name, immediate=True)
58  self.indexer = IndexerRegistry[self.dataset_config.indexer.name](self.dataset_config.indexer.active)
59  # This needs to come from the loader config, not the dataset_config since directory aliases can
60  # change the path where the shards are found.
61  self.ref_dataset_name = self.config.ref_dataset_name
62  self.butler = butler
63 
64  @pipeBase.timeMethod
65  def loadSkyCircle(self, ctrCoord, radius, filterName=None, epoch=None, centroids=False):
66  shardIdList, isOnBoundaryList = self.indexer.getShardIds(ctrCoord, radius)
67  shards = self.getShards(shardIdList)
68  refCat = self.butler.get('ref_cat',
69  dataId=self.indexer.makeDataId('master_schema', self.ref_dataset_name),
70  immediate=True)
71 
72  # load the catalog, one shard at a time
73  for shard, isOnBoundary in zip(shards, isOnBoundaryList):
74  if shard is None:
75  continue
76  if isOnBoundary:
77  refCat.extend(self._trimToCircle(shard, ctrCoord, radius))
78  else:
79  refCat.extend(shard)
80 
81  # apply proper motion corrections
82  if epoch is not None and "pm_ra" in refCat.schema:
83  # check for a catalog in a non-standard format
84  if isinstance(refCat.schema["pm_ra"].asKey(), lsst.afw.table.KeyAngle):
85  self.applyProperMotions(refCat, epoch)
86  else:
87  self.log.warn("Catalog pm_ra field is not an Angle; not applying proper motion")
88 
89  # update version=0 style refcats to have nJy fluxes
90  if self.dataset_config.format_version == 0 or not hasNanojanskyFluxUnits(refCat.schema):
91  self.log.warn("Found version 0 reference catalog with old style units in schema.")
92  self.log.warn("run `meas_algorithms/bin/convert_refcat_to_nJy.py` to convert fluxes to nJy.")
93  self.log.warn("See RFC-575 for more details.")
94  refCat = convertToNanojansky(refCat, self.log)
95  else:
96  # For version >= 1, the version should be in the catalog header,
97  # too, and should be consistent with the version in the config.
98  catVersion = getFormatVersionFromRefCat(refCat)
99  if catVersion != self.dataset_config.format_version:
100  raise RuntimeError(f"Format version in reference catalog ({catVersion}) does not match"
101  f" format_version field in config ({self.dataset_config.format_version})")
102 
103  self._addFluxAliases(refCat.schema)
104  fluxField = getRefFluxField(schema=refCat.schema, filterName=filterName)
105 
106  if centroids:
107  # add and initialize centroid and hasCentroid fields (these are
108  # added after loading to avoid wasting space in the saved catalogs)
109  # the new fields are automatically initialized to (nan, nan) and
110  # False so no need to set them explicitly
111  mapper = afwTable.SchemaMapper(refCat.schema, True)
112  mapper.addMinimalSchema(refCat.schema, True)
113  mapper.editOutputSchema().addField("centroid_x", type=float)
114  mapper.editOutputSchema().addField("centroid_y", type=float)
115  mapper.editOutputSchema().addField("hasCentroid", type="Flag")
116  expandedCat = afwTable.SimpleCatalog(mapper.getOutputSchema())
117  expandedCat.extend(refCat, mapper=mapper)
118  refCat = expandedCat
119 
120  # make sure catalog is contiguous
121  if not refCat.isContiguous():
122  refCat = refCat.copy(True)
123 
124  # return reference catalog
125  return pipeBase.Struct(
126  refCat=refCat,
127  fluxField=fluxField,
128  )
129 
130  def getShards(self, shardIdList):
131  """Get shards by ID.
132 
133  Parameters
134  ----------
135  shardIdList : `list` of `int`
136  A list of integer shard ids.
137 
138  Returns
139  -------
140  catalogs : `list` of `lsst.afw.table.SimpleCatalog`
141  A list of reference catalogs, one for each entry in shardIdList.
142  """
143  shards = []
144  for shardId in shardIdList:
145  if self.butler.datasetExists('ref_cat',
146  dataId=self.indexer.makeDataId(shardId, self.ref_dataset_name)):
147  shards.append(self.butler.get('ref_cat',
148  dataId=self.indexer.makeDataId(shardId, self.ref_dataset_name),
149  immediate=True))
150  return shards
151 
152  def _trimToCircle(self, refCat, ctrCoord, radius):
153  """Trim a reference catalog to a circular aperture.
154 
155  Parameters
156  ----------
157  refCat : `lsst.afw.table.SimpleCatalog`
158  Reference catalog to be trimmed.
159  ctrCoord : `lsst.geom.SpherePoint`
160  ICRS center of search region.
161  radius : `lsst.geom.Angle`
162  Radius of search region.
163 
164  Returns
165  -------
166  catalog : `lsst.afw.table.SimpleCatalog`
167  Catalog containing objects that fall in the circular aperture.
168  """
169  tempCat = type(refCat)(refCat.schema)
170  for record in refCat:
171  if record.getCoord().separation(ctrCoord) < radius:
172  tempCat.append(record)
173  return tempCat
A mapping between the keys of two Schemas, used to copy data between them.
Definition: SchemaMapper.h:21
def loadSkyCircle(self, ctrCoord, radius, filterName=None, epoch=None, centroids=False)
Fit spatial kernel using approximate fluxes for candidates, and solving a linear system of equations...
def convertToNanojansky(catalog, log, doConvert=True)
table::Key< int > type
Definition: Detector.cc:163
Custom catalog class for record/table subclasses that are guaranteed to have an ID, and should generally be sorted by that ID.
Definition: fwd.h:63
Abstract base class to load objects from reference catalogs.