LSST Applications  21.0.0-147-g0e635eb1+1acddb5be5,22.0.0+052faf71bd,22.0.0+1ea9a8b2b2,22.0.0+6312710a6c,22.0.0+729191ecac,22.0.0+7589c3a021,22.0.0+9f079a9461,22.0.1-1-g7d6de66+b8044ec9de,22.0.1-1-g87000a6+536b1ee016,22.0.1-1-g8e32f31+6312710a6c,22.0.1-10-gd060f87+016f7cdc03,22.0.1-12-g9c3108e+df145f6f68,22.0.1-16-g314fa6d+c825727ab8,22.0.1-19-g93a5c75+d23f2fb6d8,22.0.1-19-gb93eaa13+aab3ef7709,22.0.1-2-g8ef0a89+b8044ec9de,22.0.1-2-g92698f7+9f079a9461,22.0.1-2-ga9b0f51+052faf71bd,22.0.1-2-gac51dbf+052faf71bd,22.0.1-2-gb66926d+6312710a6c,22.0.1-2-gcb770ba+09e3807989,22.0.1-20-g32debb5+b8044ec9de,22.0.1-23-gc2439a9a+fb0756638e,22.0.1-3-g496fd5d+09117f784f,22.0.1-3-g59f966b+1e6ba2c031,22.0.1-3-g849a1b8+f8b568069f,22.0.1-3-gaaec9c0+c5c846a8b1,22.0.1-32-g5ddfab5d3+60ce4897b0,22.0.1-4-g037fbe1+64e601228d,22.0.1-4-g8623105+b8044ec9de,22.0.1-5-g096abc9+d18c45d440,22.0.1-5-g15c806e+57f5c03693,22.0.1-7-gba73697+57f5c03693,master-g6e05de7fdc+c1283a92b8,master-g72cdda8301+729191ecac,w.2021.39
LSST Data Management Base Package
loadIndexedReferenceObjects.py
Go to the documentation of this file.
1 #
2 # LSST Data Management System
3 #
4 # Copyright 2008-2017 AURA/LSST.
5 #
6 # This product includes software developed by the
7 # LSST Project (http://www.lsst.org/).
8 #
9 # This program is free software: you can redistribute it and/or modify
10 # it under the terms of the GNU General Public License as published by
11 # the Free Software Foundation, either version 3 of the License, or
12 # (at your option) any later version.
13 #
14 # This program is distributed in the hope that it will be useful,
15 # but WITHOUT ANY WARRANTY; without even the implied warranty of
16 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 # GNU General Public License for more details.
18 #
19 # You should have received a copy of the LSST License Statement and
20 # the GNU General Public License along with this program. If not,
21 # see <https://www.lsstcorp.org/LegalNotices/>.
22 #
23 
24 __all__ = ["LoadIndexedReferenceObjectsConfig", "LoadIndexedReferenceObjectsTask"]
25 
26 from .loadReferenceObjects import hasNanojanskyFluxUnits, convertToNanojansky, getFormatVersionFromRefCat
27 from lsst.meas.algorithms import getRefFluxField, LoadReferenceObjectsTask, LoadReferenceObjectsConfig
28 import lsst.afw.table as afwTable
29 import lsst.pex.config as pexConfig
30 import lsst.pipe.base as pipeBase
31 from .indexerRegistry import IndexerRegistry
32 
33 
35  ref_dataset_name = pexConfig.Field(
36  dtype=str,
37  default='cal_ref_cat',
38  doc='Name of the ingested reference dataset'
39  )
40 
41 
43  """Load reference objects from an indexed catalog ingested by
44  IngestIndexReferenceTask.
45 
46  Parameters
47  ----------
48  butler : `lsst.daf.persistence.Butler`
49  Data butler for reading catalogs
50  """
51  ConfigClass = LoadIndexedReferenceObjectsConfig
52  _DefaultName = 'LoadIndexedReferenceObjectsTask'
53 
54  def __init__(self, butler, *args, **kwargs):
55  LoadReferenceObjectsTask.__init__(self, *args, **kwargs)
56  self.dataset_configdataset_config = butler.get("ref_cat_config", name=self.config.ref_dataset_name, immediate=True)
57  self.indexerindexer = IndexerRegistry[self.dataset_configdataset_config.indexer.name](self.dataset_configdataset_config.indexer.active)
58  # This needs to come from the loader config, not the dataset_config since directory aliases can
59  # change the path where the shards are found.
60  self.ref_dataset_nameref_dataset_name = self.config.ref_dataset_name
61  self.butlerbutlerbutler = butler
62 
63  @pipeBase.timeMethod
64  def loadSkyCircle(self, ctrCoord, radius, filterName=None, epoch=None, centroids=False):
65  shardIdList, isOnBoundaryList = self.indexerindexer.getShardIds(ctrCoord, radius)
66  shards = self.getShardsgetShards(shardIdList)
67  refCat = self.butlerbutlerbutler.get('ref_cat',
68  dataId=self.indexerindexer.makeDataId('master_schema', self.ref_dataset_nameref_dataset_name),
69  immediate=True)
70 
71  # load the catalog, one shard at a time
72  for shard, isOnBoundary in zip(shards, isOnBoundaryList):
73  if shard is None:
74  continue
75  if isOnBoundary:
76  refCat.extend(self._trimToCircle_trimToCircle(shard, ctrCoord, radius))
77  else:
78  refCat.extend(shard)
79 
80  # make sure catalog is contiguous: must do this before PM calculations
81  if not refCat.isContiguous():
82  refCat = refCat.copy(True)
83 
84  # apply proper motion corrections
85  self.applyProperMotionsapplyProperMotions(refCat, epoch)
86 
87  # update version=0 style refcats to have nJy fluxes
88  if self.dataset_configdataset_config.format_version == 0 or not hasNanojanskyFluxUnits(refCat.schema):
89  self.log.warning("Found version 0 reference catalog with old style units in schema.")
90  self.log.warning("run `meas_algorithms/bin/convert_refcat_to_nJy.py` to convert fluxes to nJy.")
91  self.log.warning("See RFC-575 for more details.")
92  refCat = convertToNanojansky(refCat, self.log)
93  else:
94  # For version >= 1, the version should be in the catalog header,
95  # too, and should be consistent with the version in the config.
96  catVersion = getFormatVersionFromRefCat(refCat)
97  if catVersion != self.dataset_configdataset_config.format_version:
98  raise RuntimeError(f"Format version in reference catalog ({catVersion}) does not match"
99  f" format_version field in config ({self.dataset_config.format_version})")
100 
101  self._addFluxAliases_addFluxAliases(refCat.schema)
102  fluxField = getRefFluxField(schema=refCat.schema, filterName=filterName)
103 
104  if centroids:
105  # add and initialize centroid and hasCentroid fields (these are
106  # added after loading to avoid wasting space in the saved catalogs)
107  # the new fields are automatically initialized to (nan, nan) and
108  # False so no need to set them explicitly
109  mapper = afwTable.SchemaMapper(refCat.schema, True)
110  mapper.addMinimalSchema(refCat.schema, True)
111  mapper.editOutputSchema().addField("centroid_x", type=float)
112  mapper.editOutputSchema().addField("centroid_y", type=float)
113  mapper.editOutputSchema().addField("hasCentroid", type="Flag")
114  expandedCat = afwTable.SimpleCatalog(mapper.getOutputSchema())
115  expandedCat.extend(refCat, mapper=mapper)
116  refCat = expandedCat
117 
118  # return reference catalog
119  return pipeBase.Struct(
120  refCat=refCat,
121  fluxField=fluxField,
122  )
123 
124  def getShards(self, shardIdList):
125  """Get shards by ID.
126 
127  Parameters
128  ----------
129  shardIdList : `list` of `int`
130  A list of integer shard ids.
131 
132  Returns
133  -------
134  catalogs : `list` of `lsst.afw.table.SimpleCatalog`
135  A list of reference catalogs, one for each entry in shardIdList.
136  """
137  shards = []
138  for shardId in shardIdList:
139  if self.butlerbutlerbutler.datasetExists('ref_cat',
140  dataId=self.indexerindexer.makeDataId(shardId, self.ref_dataset_nameref_dataset_name)):
141  shards.append(self.butlerbutlerbutler.get('ref_cat',
142  dataId=self.indexerindexer.makeDataId(shardId, self.ref_dataset_nameref_dataset_name),
143  immediate=True))
144  return shards
145 
146  def _trimToCircle(self, refCat, ctrCoord, radius):
147  """Trim a reference catalog to a circular aperture.
148 
149  Parameters
150  ----------
151  refCat : `lsst.afw.table.SimpleCatalog`
152  Reference catalog to be trimmed.
153  ctrCoord : `lsst.geom.SpherePoint`
154  ICRS center of search region.
155  radius : `lsst.geom.Angle`
156  Radius of search region.
157 
158  Returns
159  -------
160  catalog : `lsst.afw.table.SimpleCatalog`
161  Catalog containing objects that fall in the circular aperture.
162  """
163  tempCat = type(refCat)(refCat.schema)
164  for record in refCat:
165  if record.getCoord().separation(ctrCoord) < radius:
166  tempCat.append(record)
167  return tempCat
table::Key< int > type
Definition: Detector.cc:163
A mapping between the keys of two Schemas, used to copy data between them.
Definition: SchemaMapper.h:21
Custom catalog class for record/table subclasses that are guaranteed to have an ID,...
Definition: SortedCatalog.h:42
def loadSkyCircle(self, ctrCoord, radius, filterName=None, epoch=None, centroids=False)
def convertToNanojansky(catalog, log, doConvert=True)
Fit spatial kernel using approximate fluxes for candidates, and solving a linear system of equations.