LSSTApplications  17.0+1,17.0+10,17.0+16,17.0+17,17.0+2,17.0+21,17.0+3,17.0+4,17.0-1-g377950a+9,17.0.1-1-g444bd44+9,17.0.1-1-g46e6382+10,17.0.1-1-g4d4fbc4+4,17.0.1-1-g703d48b+6,17.0.1-1-g8de6c91,17.0.1-1-g9deacb5+9,17.0.1-1-gf4e0155+10,17.0.1-1-gfc65f5f+9,17.0.1-1-gfc6fb1f+5,17.0.1-2-g3bdf598,17.0.1-2-g3e5d191+1,17.0.1-2-ga5d6a7c+4,17.0.1-2-gd73ec07+10,17.0.1-3-gcbbb95d+5,17.0.1-3-geaa4c8a+4,17.0.1-4-g088434c+4,17.0.1-4-ga7077188,17.0.1-4-gf25f8e6,17.0.1-5-g5a10bbc+1,17.0.1-5-gf0ac6446+12,17.0.1-6-g7bb9714,17.0.1-7-g69836a1+10,17.0.1-7-gf7766dbc3,w.2019.13
LSSTDataManagementBasePackage
Public Member Functions | Public Attributes | Static Public Attributes | List of all members
lsst.meas.algorithms.debugger.MeasurementDebuggerTask Class Reference
Inheritance diagram for lsst.meas.algorithms.debugger.MeasurementDebuggerTask:
lsst.pipe.base.cmdLineTask.CmdLineTask lsst.pipe.base.task.Task

Public Member Functions

def __init__ (self, schema=None, kwargs)
 
def runDataRef (self, dataRef, image, catalog)
 
def readImage (self, image)
 
def readSources (self, catalog)
 
def mapSchemas (self, sources)
 
def subsetSources (self, sources)
 
def writeSources (self, sources)
 
def writeConfig (self, args, kwargs)
 
def writeMetadata (self, args, kwargs)
 
def writeSchemas (self, args, kwargs)
 
def applyOverrides (cls, config)
 
def parseAndRun (cls, args=None, config=None, log=None, doReturnResults=False)
 
def writeConfig (self, butler, clobber=False, doBackup=True)
 
def writeSchemas (self, butler, clobber=False, doBackup=True)
 
def writeMetadata (self, dataRef)
 
def writePackageVersions (self, butler, clobber=False, doBackup=True, dataset="packages")
 
def emptyMetadata (self)
 
def getSchemaCatalogs (self)
 
def getAllSchemaCatalogs (self)
 
def getFullMetadata (self)
 
def getFullName (self)
 
def getName (self)
 
def getTaskDict (self)
 
def makeSubtask (self, name, keyArgs)
 
def timer (self, name, logLevel=Log.DEBUG)
 
def makeField (cls, doc)
 
def __reduce__ (self)
 

Public Attributes

 schema
 
 metadata
 
 log
 
 config
 

Static Public Attributes

 ConfigClass = MeasurementDebuggerConfig
 
 RunnerClass = MeasurementDebuggerRunner
 
bool canMultiprocess = True
 

Detailed Description

Definition at line 96 of file debugger.py.

Constructor & Destructor Documentation

◆ __init__()

def lsst.meas.algorithms.debugger.MeasurementDebuggerTask.__init__ (   self,
  schema = None,
  kwargs 
)

Definition at line 101 of file debugger.py.

101  def __init__(self, schema=None, **kwargs):
102  super(MeasurementDebuggerTask, self).__init__(**kwargs)
103  if schema is None:
104  schema = afwTable.SourceTable.makeMinimalSchema()
105  self.schema = schema
106  self.makeSubtask("measurement", schema=schema)
107 
def __init__(self, minimum, dataRange, Q)

Member Function Documentation

◆ __reduce__()

def lsst.pipe.base.task.Task.__reduce__ (   self)
inherited
Pickler.

Definition at line 373 of file task.py.

373  def __reduce__(self):
374  """Pickler.
375  """
376  return self.__class__, (self.config, self._name, self._parentTask, None)
377 

◆ applyOverrides()

def lsst.pipe.base.cmdLineTask.CmdLineTask.applyOverrides (   cls,
  config 
)
inherited
A hook to allow a task to change the values of its config *after* the camera-specific
overrides are loaded but before any command-line overrides are applied.

Parameters
----------
config : instance of task's ``ConfigClass``
    Task configuration.

Notes
-----
This is necessary in some cases because the camera-specific overrides may retarget subtasks,
wiping out changes made in ConfigClass.setDefaults. See LSST Trac ticket #2282 for more discussion.

.. warning::

   This is called by CmdLineTask.parseAndRun; other ways of constructing a config will not apply
   these overrides.

Definition at line 527 of file cmdLineTask.py.

527  def applyOverrides(cls, config):
528  """A hook to allow a task to change the values of its config *after* the camera-specific
529  overrides are loaded but before any command-line overrides are applied.
530 
531  Parameters
532  ----------
533  config : instance of task's ``ConfigClass``
534  Task configuration.
535 
536  Notes
537  -----
538  This is necessary in some cases because the camera-specific overrides may retarget subtasks,
539  wiping out changes made in ConfigClass.setDefaults. See LSST Trac ticket #2282 for more discussion.
540 
541  .. warning::
542 
543  This is called by CmdLineTask.parseAndRun; other ways of constructing a config will not apply
544  these overrides.
545  """
546  pass
547 

◆ emptyMetadata()

def lsst.pipe.base.task.Task.emptyMetadata (   self)
inherited
Empty (clear) the metadata for this Task and all sub-Tasks.

Definition at line 153 of file task.py.

153  def emptyMetadata(self):
154  """Empty (clear) the metadata for this Task and all sub-Tasks.
155  """
156  for subtask in self._taskDict.values():
157  subtask.metadata = dafBase.PropertyList()
158 
Class for storing ordered metadata with comments.
Definition: PropertyList.h:68

◆ getAllSchemaCatalogs()

def lsst.pipe.base.task.Task.getAllSchemaCatalogs (   self)
inherited
Get schema catalogs for all tasks in the hierarchy, combining the results into a single dict.

Returns
-------
schemacatalogs : `dict`
    Keys are butler dataset type, values are a empty catalog (an instance of the appropriate
    lsst.afw.table Catalog type) for all tasks in the hierarchy, from the top-level task down
    through all subtasks.

Notes
-----
This method may be called on any task in the hierarchy; it will return the same answer, regardless.

The default implementation should always suffice. If your subtask uses schemas the override
`Task.getSchemaCatalogs`, not this method.

Definition at line 188 of file task.py.

188  def getAllSchemaCatalogs(self):
189  """Get schema catalogs for all tasks in the hierarchy, combining the results into a single dict.
190 
191  Returns
192  -------
193  schemacatalogs : `dict`
194  Keys are butler dataset type, values are a empty catalog (an instance of the appropriate
195  lsst.afw.table Catalog type) for all tasks in the hierarchy, from the top-level task down
196  through all subtasks.
197 
198  Notes
199  -----
200  This method may be called on any task in the hierarchy; it will return the same answer, regardless.
201 
202  The default implementation should always suffice. If your subtask uses schemas the override
203  `Task.getSchemaCatalogs`, not this method.
204  """
205  schemaDict = self.getSchemaCatalogs()
206  for subtask in self._taskDict.values():
207  schemaDict.update(subtask.getSchemaCatalogs())
208  return schemaDict
209 

◆ getFullMetadata()

def lsst.pipe.base.task.Task.getFullMetadata (   self)
inherited
Get metadata for all tasks.

Returns
-------
metadata : `lsst.daf.base.PropertySet`
    The `~lsst.daf.base.PropertySet` keys are the full task name. Values are metadata
    for the top-level task and all subtasks, sub-subtasks, etc..

Notes
-----
The returned metadata includes timing information (if ``@timer.timeMethod`` is used)
and any metadata set by the task. The name of each item consists of the full task name
with ``.`` replaced by ``:``, followed by ``.`` and the name of the item, e.g.::

    topLevelTaskName:subtaskName:subsubtaskName.itemName

using ``:`` in the full task name disambiguates the rare situation that a task has a subtask
and a metadata item with the same name.

Definition at line 210 of file task.py.

210  def getFullMetadata(self):
211  """Get metadata for all tasks.
212 
213  Returns
214  -------
215  metadata : `lsst.daf.base.PropertySet`
216  The `~lsst.daf.base.PropertySet` keys are the full task name. Values are metadata
217  for the top-level task and all subtasks, sub-subtasks, etc..
218 
219  Notes
220  -----
221  The returned metadata includes timing information (if ``@timer.timeMethod`` is used)
222  and any metadata set by the task. The name of each item consists of the full task name
223  with ``.`` replaced by ``:``, followed by ``.`` and the name of the item, e.g.::
224 
225  topLevelTaskName:subtaskName:subsubtaskName.itemName
226 
227  using ``:`` in the full task name disambiguates the rare situation that a task has a subtask
228  and a metadata item with the same name.
229  """
230  fullMetadata = dafBase.PropertySet()
231  for fullName, task in self.getTaskDict().items():
232  fullMetadata.set(fullName.replace(".", ":"), task.metadata)
233  return fullMetadata
234 
Class for storing generic metadata.
Definition: PropertySet.h:68
std::vector< SchemaItem< Flag > > * items

◆ getFullName()

def lsst.pipe.base.task.Task.getFullName (   self)
inherited
Get the task name as a hierarchical name including parent task names.

Returns
-------
fullName : `str`
    The full name consists of the name of the parent task and each subtask separated by periods.
    For example:

    - The full name of top-level task "top" is simply "top".
    - The full name of subtask "sub" of top-level task "top" is "top.sub".
    - The full name of subtask "sub2" of subtask "sub" of top-level task "top" is "top.sub.sub2".

Definition at line 235 of file task.py.

235  def getFullName(self):
236  """Get the task name as a hierarchical name including parent task names.
237 
238  Returns
239  -------
240  fullName : `str`
241  The full name consists of the name of the parent task and each subtask separated by periods.
242  For example:
243 
244  - The full name of top-level task "top" is simply "top".
245  - The full name of subtask "sub" of top-level task "top" is "top.sub".
246  - The full name of subtask "sub2" of subtask "sub" of top-level task "top" is "top.sub.sub2".
247  """
248  return self._fullName
249 

◆ getName()

def lsst.pipe.base.task.Task.getName (   self)
inherited
Get the name of the task.

Returns
-------
taskName : `str`
    Name of the task.

See also
--------
getFullName

Definition at line 250 of file task.py.

250  def getName(self):
251  """Get the name of the task.
252 
253  Returns
254  -------
255  taskName : `str`
256  Name of the task.
257 
258  See also
259  --------
260  getFullName
261  """
262  return self._name
263 

◆ getSchemaCatalogs()

def lsst.pipe.base.task.Task.getSchemaCatalogs (   self)
inherited
Get the schemas generated by this task.

Returns
-------
schemaCatalogs : `dict`
    Keys are butler dataset type, values are an empty catalog (an instance of the appropriate
    `lsst.afw.table` Catalog type) for this task.

Notes
-----

.. warning::

   Subclasses that use schemas must override this method. The default implemenation returns
   an empty dict.

This method may be called at any time after the Task is constructed, which means that all task
schemas should be computed at construction time, *not* when data is actually processed. This
reflects the philosophy that the schema should not depend on the data.

Returning catalogs rather than just schemas allows us to save e.g. slots for SourceCatalog as well.

See also
--------
Task.getAllSchemaCatalogs

Definition at line 159 of file task.py.

159  def getSchemaCatalogs(self):
160  """Get the schemas generated by this task.
161 
162  Returns
163  -------
164  schemaCatalogs : `dict`
165  Keys are butler dataset type, values are an empty catalog (an instance of the appropriate
166  `lsst.afw.table` Catalog type) for this task.
167 
168  Notes
169  -----
170 
171  .. warning::
172 
173  Subclasses that use schemas must override this method. The default implemenation returns
174  an empty dict.
175 
176  This method may be called at any time after the Task is constructed, which means that all task
177  schemas should be computed at construction time, *not* when data is actually processed. This
178  reflects the philosophy that the schema should not depend on the data.
179 
180  Returning catalogs rather than just schemas allows us to save e.g. slots for SourceCatalog as well.
181 
182  See also
183  --------
184  Task.getAllSchemaCatalogs
185  """
186  return {}
187 

◆ getTaskDict()

def lsst.pipe.base.task.Task.getTaskDict (   self)
inherited
Get a dictionary of all tasks as a shallow copy.

Returns
-------
taskDict : `dict`
    Dictionary containing full task name: task object for the top-level task and all subtasks,
    sub-subtasks, etc..

Definition at line 264 of file task.py.

264  def getTaskDict(self):
265  """Get a dictionary of all tasks as a shallow copy.
266 
267  Returns
268  -------
269  taskDict : `dict`
270  Dictionary containing full task name: task object for the top-level task and all subtasks,
271  sub-subtasks, etc..
272  """
273  return self._taskDict.copy()
274 
def getTaskDict(config, taskDict=None, baseName="")

◆ makeField()

def lsst.pipe.base.task.Task.makeField (   cls,
  doc 
)
inherited
Make a `lsst.pex.config.ConfigurableField` for this task.

Parameters
----------
doc : `str`
    Help text for the field.

Returns
-------
configurableField : `lsst.pex.config.ConfigurableField`
    A `~ConfigurableField` for this task.

Examples
--------
Provides a convenient way to specify this task is a subtask of another task.

Here is an example of use::

    class OtherTaskConfig(lsst.pex.config.Config)
aSubtask = ATaskClass.makeField("a brief description of what this task does")

Definition at line 329 of file task.py.

329  def makeField(cls, doc):
330  """Make a `lsst.pex.config.ConfigurableField` for this task.
331 
332  Parameters
333  ----------
334  doc : `str`
335  Help text for the field.
336 
337  Returns
338  -------
339  configurableField : `lsst.pex.config.ConfigurableField`
340  A `~ConfigurableField` for this task.
341 
342  Examples
343  --------
344  Provides a convenient way to specify this task is a subtask of another task.
345 
346  Here is an example of use::
347 
348  class OtherTaskConfig(lsst.pex.config.Config)
349  aSubtask = ATaskClass.makeField("a brief description of what this task does")
350  """
351  return ConfigurableField(doc=doc, target=cls)
352 

◆ makeSubtask()

def lsst.pipe.base.task.Task.makeSubtask (   self,
  name,
  keyArgs 
)
inherited
Create a subtask as a new instance as the ``name`` attribute of this task.

Parameters
----------
name : `str`
    Brief name of the subtask.
keyArgs
    Extra keyword arguments used to construct the task. The following arguments are automatically
    provided and cannot be overridden:

    - "config".
    - "parentTask".

Notes
-----
The subtask must be defined by ``Task.config.name``, an instance of pex_config ConfigurableField
or RegistryField.

Definition at line 275 of file task.py.

275  def makeSubtask(self, name, **keyArgs):
276  """Create a subtask as a new instance as the ``name`` attribute of this task.
277 
278  Parameters
279  ----------
280  name : `str`
281  Brief name of the subtask.
282  keyArgs
283  Extra keyword arguments used to construct the task. The following arguments are automatically
284  provided and cannot be overridden:
285 
286  - "config".
287  - "parentTask".
288 
289  Notes
290  -----
291  The subtask must be defined by ``Task.config.name``, an instance of pex_config ConfigurableField
292  or RegistryField.
293  """
294  taskField = getattr(self.config, name, None)
295  if taskField is None:
296  raise KeyError("%s's config does not have field %r" % (self.getFullName(), name))
297  subtask = taskField.apply(name=name, parentTask=self, **keyArgs)
298  setattr(self, name, subtask)
299 

◆ mapSchemas()

def lsst.meas.algorithms.debugger.MeasurementDebuggerTask.mapSchemas (   self,
  sources 
)

Definition at line 131 of file debugger.py.

131  def mapSchemas(self, sources):
132  catalog = afwTable.SourceCatalog(self.schema)
133  for ss in sources:
134  new = catalog.addNew()
135  new.setFootprint(ss.getFootprint())
136  for name in self.schema.getNames():
137  if name in ss.schema:
138  new.set(name, ss.get(name))
139  return catalog
140 

◆ parseAndRun()

def lsst.pipe.base.cmdLineTask.CmdLineTask.parseAndRun (   cls,
  args = None,
  config = None,
  log = None,
  doReturnResults = False 
)
inherited
Parse an argument list and run the command.

Parameters
----------
args : `list`, optional
    List of command-line arguments; if `None` use `sys.argv`.
config : `lsst.pex.config.Config`-type, optional
    Config for task. If `None` use `Task.ConfigClass`.
log : `lsst.log.Log`-type, optional
    Log. If `None` use the default log.
doReturnResults : `bool`, optional
    If `True`, return the results of this task. Default is `False`. This is only intended for
    unit tests and similar use. It can easily exhaust memory (if the task returns enough data and you
    call it enough times) and it will fail when using multiprocessing if the returned data cannot be
    pickled.

Returns
-------
struct : `lsst.pipe.base.Struct`
    Fields are:

    - ``argumentParser``: the argument parser.
    - ``parsedCmd``: the parsed command returned by the argument parser's
      `lsst.pipe.base.ArgumentParser.parse_args` method.
    - ``taskRunner``: the task runner used to run the task (an instance of `Task.RunnerClass`).
    - ``resultList``: results returned by the task runner's ``run`` method, one entry per invocation.
This will typically be a list of `None` unless ``doReturnResults`` is `True`;
see `Task.RunnerClass` (`TaskRunner` by default) for more information.

Notes
-----
Calling this method with no arguments specified is the standard way to run a command-line task
from the command-line. For an example see ``pipe_tasks`` ``bin/makeSkyMap.py`` or almost any other
file in that directory.

If one or more of the dataIds fails then this routine will exit (with a status giving the
number of failed dataIds) rather than returning this struct;  this behaviour can be
overridden by specifying the ``--noExit`` command-line option.

Definition at line 549 of file cmdLineTask.py.

549  def parseAndRun(cls, args=None, config=None, log=None, doReturnResults=False):
550  """Parse an argument list and run the command.
551 
552  Parameters
553  ----------
554  args : `list`, optional
555  List of command-line arguments; if `None` use `sys.argv`.
556  config : `lsst.pex.config.Config`-type, optional
557  Config for task. If `None` use `Task.ConfigClass`.
558  log : `lsst.log.Log`-type, optional
559  Log. If `None` use the default log.
560  doReturnResults : `bool`, optional
561  If `True`, return the results of this task. Default is `False`. This is only intended for
562  unit tests and similar use. It can easily exhaust memory (if the task returns enough data and you
563  call it enough times) and it will fail when using multiprocessing if the returned data cannot be
564  pickled.
565 
566  Returns
567  -------
568  struct : `lsst.pipe.base.Struct`
569  Fields are:
570 
571  - ``argumentParser``: the argument parser.
572  - ``parsedCmd``: the parsed command returned by the argument parser's
573  `lsst.pipe.base.ArgumentParser.parse_args` method.
574  - ``taskRunner``: the task runner used to run the task (an instance of `Task.RunnerClass`).
575  - ``resultList``: results returned by the task runner's ``run`` method, one entry per invocation.
576  This will typically be a list of `None` unless ``doReturnResults`` is `True`;
577  see `Task.RunnerClass` (`TaskRunner` by default) for more information.
578 
579  Notes
580  -----
581  Calling this method with no arguments specified is the standard way to run a command-line task
582  from the command-line. For an example see ``pipe_tasks`` ``bin/makeSkyMap.py`` or almost any other
583  file in that directory.
584 
585  If one or more of the dataIds fails then this routine will exit (with a status giving the
586  number of failed dataIds) rather than returning this struct; this behaviour can be
587  overridden by specifying the ``--noExit`` command-line option.
588  """
589  if args is None:
590  commandAsStr = " ".join(sys.argv)
591  args = sys.argv[1:]
592  else:
593  commandAsStr = "{}{}".format(lsst.utils.get_caller_name(skip=1), tuple(args))
594 
595  argumentParser = cls._makeArgumentParser()
596  if config is None:
597  config = cls.ConfigClass()
598  parsedCmd = argumentParser.parse_args(config=config, args=args, log=log, override=cls.applyOverrides)
599  # print this message after parsing the command so the log is fully configured
600  parsedCmd.log.info("Running: %s", commandAsStr)
601 
602  taskRunner = cls.RunnerClass(TaskClass=cls, parsedCmd=parsedCmd, doReturnResults=doReturnResults)
603  resultList = taskRunner.run(parsedCmd)
604 
605  try:
606  nFailed = sum(((res.exitStatus != 0) for res in resultList))
607  except (TypeError, AttributeError) as e:
608  # NOTE: TypeError if resultList is None, AttributeError if it doesn't have exitStatus.
609  parsedCmd.log.warn("Unable to retrieve exit status (%s); assuming success", e)
610  nFailed = 0
611 
612  if nFailed > 0:
613  if parsedCmd.noExit:
614  parsedCmd.log.error("%d dataRefs failed; not exiting as --noExit was set", nFailed)
615  else:
616  sys.exit(nFailed)
617 
618  return Struct(
619  argumentParser=argumentParser,
620  parsedCmd=parsedCmd,
621  taskRunner=taskRunner,
622  resultList=resultList,
623  )
624 
def format(config, name=None, writeSourceLine=True, prefix="", verbose=False)
Definition: history.py:168

◆ readImage()

def lsst.meas.algorithms.debugger.MeasurementDebuggerTask.readImage (   self,
  image 
)

Definition at line 121 of file debugger.py.

121  def readImage(self, image):
122  exp = afwImage.ExposureF(image)
123  self.log.info("Read %dx%d image", exp.getWidth(), exp.getHeight())
124  return exp
125 

◆ readSources()

def lsst.meas.algorithms.debugger.MeasurementDebuggerTask.readSources (   self,
  catalog 
)

Definition at line 126 of file debugger.py.

126  def readSources(self, catalog):
127  sources = afwTable.SourceCatalog.readFits(catalog)
128  self.log.info("Read %d sources", len(sources))
129  return sources
130 

◆ runDataRef()

def lsst.meas.algorithms.debugger.MeasurementDebuggerTask.runDataRef (   self,
  dataRef,
  image,
  catalog 
)

Definition at line 112 of file debugger.py.

112  def runDataRef(self, dataRef, image, catalog):
113  exp = self.readImage(image)
114  sources = self.readSources(catalog)
115  sources = self.subsetSources(sources)
116  sources = self.mapSchemas(sources)
117  self.measurement.measure(exp, sources)
118  self.writeSources(sources)
119  return Struct(exp=exp, sources=sources)
120 
def measure(mi, x, y, size, statistic, stats)
Definition: fringe.py:357

◆ subsetSources()

def lsst.meas.algorithms.debugger.MeasurementDebuggerTask.subsetSources (   self,
  sources 
)
Return a subset of the input catalog

The full catalog is used if the 'sourceId' list is empty.

Parent sources (in the deblending sense) are also added to the
subset so that they can be removed (via replaceWithNoise).

Definition at line 141 of file debugger.py.

141  def subsetSources(self, sources):
142  """Return a subset of the input catalog
143 
144  The full catalog is used if the 'sourceId' list is empty.
145 
146  Parent sources (in the deblending sense) are also added to the
147  subset so that they can be removed (via replaceWithNoise).
148  """
149  if not self.config.sourceId:
150  return sources
151 
152  identifiers = set(self.config.sourceId)
153  subset = afwTable.SourceCatalog(sources.table)
154  while len(identifiers) > 0:
155  ident = identifiers.pop()
156  ss = sources.find(ident)
157  if ss is None:
158  raise RuntimeError("Unable to find id=%d in catalog" % ident)
159  subset.append(ss)
160  parent = ss.getParent()
161  if parent:
162  identifiers.add(parent)
163  self.log.info("Subset to %d sources", len(subset))
164  return subset
165 
daf::base::PropertySet * set
Definition: fits.cc:884

◆ timer()

def lsst.pipe.base.task.Task.timer (   self,
  name,
  logLevel = Log.DEBUG 
)
inherited
Context manager to log performance data for an arbitrary block of code.

Parameters
----------
name : `str`
    Name of code being timed; data will be logged using item name: ``Start`` and ``End``.
logLevel
    A `lsst.log` level constant.

Examples
--------
Creating a timer context::

    with self.timer("someCodeToTime"):
pass  # code to time

See also
--------
timer.logInfo

Definition at line 301 of file task.py.

301  def timer(self, name, logLevel=Log.DEBUG):
302  """Context manager to log performance data for an arbitrary block of code.
303 
304  Parameters
305  ----------
306  name : `str`
307  Name of code being timed; data will be logged using item name: ``Start`` and ``End``.
308  logLevel
309  A `lsst.log` level constant.
310 
311  Examples
312  --------
313  Creating a timer context::
314 
315  with self.timer("someCodeToTime"):
316  pass # code to time
317 
318  See also
319  --------
320  timer.logInfo
321  """
322  logInfo(obj=self, prefix=name + "Start", logLevel=logLevel)
323  try:
324  yield
325  finally:
326  logInfo(obj=self, prefix=name + "End", logLevel=logLevel)
327 
def logInfo(obj, prefix, logLevel=Log.DEBUG)
Definition: timer.py:62

◆ writeConfig() [1/2]

def lsst.meas.algorithms.debugger.MeasurementDebuggerTask.writeConfig (   self,
  args,
  kwargs 
)

Definition at line 170 of file debugger.py.

170  def writeConfig(self, *args, **kwargs):
171  pass
172 

◆ writeConfig() [2/2]

def lsst.pipe.base.cmdLineTask.CmdLineTask.writeConfig (   self,
  butler,
  clobber = False,
  doBackup = True 
)
inherited
Write the configuration used for processing the data, or check that an existing
one is equal to the new one if present.

Parameters
----------
butler : `lsst.daf.persistence.Butler`
    Data butler used to write the config. The config is written to dataset type
    `CmdLineTask._getConfigName`.
clobber : `bool`, optional
    A boolean flag that controls what happens if a config already has been saved:
    - `True`: overwrite or rename the existing config, depending on ``doBackup``.
    - `False`: raise `TaskError` if this config does not match the existing config.
doBackup : bool, optional
    Set to `True` to backup the config files if clobbering.

Definition at line 649 of file cmdLineTask.py.

649  def writeConfig(self, butler, clobber=False, doBackup=True):
650  """Write the configuration used for processing the data, or check that an existing
651  one is equal to the new one if present.
652 
653  Parameters
654  ----------
655  butler : `lsst.daf.persistence.Butler`
656  Data butler used to write the config. The config is written to dataset type
657  `CmdLineTask._getConfigName`.
658  clobber : `bool`, optional
659  A boolean flag that controls what happens if a config already has been saved:
660  - `True`: overwrite or rename the existing config, depending on ``doBackup``.
661  - `False`: raise `TaskError` if this config does not match the existing config.
662  doBackup : bool, optional
663  Set to `True` to backup the config files if clobbering.
664  """
665  configName = self._getConfigName()
666  if configName is None:
667  return
668  if clobber:
669  butler.put(self.config, configName, doBackup=doBackup)
670  elif butler.datasetExists(configName, write=True):
671  # this may be subject to a race condition; see #2789
672  try:
673  oldConfig = butler.get(configName, immediate=True)
674  except Exception as exc:
675  raise type(exc)("Unable to read stored config file %s (%s); consider using --clobber-config" %
676  (configName, exc))
677 
678  def logConfigMismatch(msg):
679  self.log.fatal("Comparing configuration: %s", msg)
680 
681  if not self.config.compare(oldConfig, shortcut=False, output=logConfigMismatch):
682  raise TaskError(
683  ("Config does not match existing task config %r on disk; tasks configurations " +
684  "must be consistent within the same output repo (override with --clobber-config)") %
685  (configName,))
686  else:
687  butler.put(self.config, configName)
688 
table::Key< int > type
Definition: Detector.cc:164

◆ writeMetadata() [1/2]

def lsst.meas.algorithms.debugger.MeasurementDebuggerTask.writeMetadata (   self,
  args,
  kwargs 
)

Definition at line 173 of file debugger.py.

173  def writeMetadata(self, *args, **kwargs):
174  pass
175 

◆ writeMetadata() [2/2]

def lsst.pipe.base.cmdLineTask.CmdLineTask.writeMetadata (   self,
  dataRef 
)
inherited
Write the metadata produced from processing the data.

Parameters
----------
dataRef
    Butler data reference used to write the metadata.
    The metadata is written to dataset type `CmdLineTask._getMetadataName`.

Definition at line 724 of file cmdLineTask.py.

724  def writeMetadata(self, dataRef):
725  """Write the metadata produced from processing the data.
726 
727  Parameters
728  ----------
729  dataRef
730  Butler data reference used to write the metadata.
731  The metadata is written to dataset type `CmdLineTask._getMetadataName`.
732  """
733  try:
734  metadataName = self._getMetadataName()
735  if metadataName is not None:
736  dataRef.put(self.getFullMetadata(), metadataName)
737  except Exception as e:
738  self.log.warn("Could not persist metadata for dataId=%s: %s", dataRef.dataId, e)
739 

◆ writePackageVersions()

def lsst.pipe.base.cmdLineTask.CmdLineTask.writePackageVersions (   self,
  butler,
  clobber = False,
  doBackup = True,
  dataset = "packages" 
)
inherited
Compare and write package versions.

Parameters
----------
butler : `lsst.daf.persistence.Butler`
    Data butler used to read/write the package versions.
clobber : `bool`, optional
    A boolean flag that controls what happens if versions already have been saved:
    - `True`: overwrite or rename the existing version info, depending on ``doBackup``.
    - `False`: raise `TaskError` if this version info does not match the existing.
doBackup : `bool`, optional
    If `True` and clobbering, old package version files are backed up.
dataset : `str`, optional
    Name of dataset to read/write.

Raises
------
TaskError
    Raised if there is a version mismatch with current and persisted lists of package versions.

Notes
-----
Note that this operation is subject to a race condition.

Definition at line 740 of file cmdLineTask.py.

740  def writePackageVersions(self, butler, clobber=False, doBackup=True, dataset="packages"):
741  """Compare and write package versions.
742 
743  Parameters
744  ----------
745  butler : `lsst.daf.persistence.Butler`
746  Data butler used to read/write the package versions.
747  clobber : `bool`, optional
748  A boolean flag that controls what happens if versions already have been saved:
749  - `True`: overwrite or rename the existing version info, depending on ``doBackup``.
750  - `False`: raise `TaskError` if this version info does not match the existing.
751  doBackup : `bool`, optional
752  If `True` and clobbering, old package version files are backed up.
753  dataset : `str`, optional
754  Name of dataset to read/write.
755 
756  Raises
757  ------
758  TaskError
759  Raised if there is a version mismatch with current and persisted lists of package versions.
760 
761  Notes
762  -----
763  Note that this operation is subject to a race condition.
764  """
765  packages = Packages.fromSystem()
766 
767  if clobber:
768  return butler.put(packages, dataset, doBackup=doBackup)
769  if not butler.datasetExists(dataset, write=True):
770  return butler.put(packages, dataset)
771 
772  try:
773  old = butler.get(dataset, immediate=True)
774  except Exception as exc:
775  raise type(exc)("Unable to read stored version dataset %s (%s); "
776  "consider using --clobber-versions or --no-versions" %
777  (dataset, exc))
778  # Note that because we can only detect python modules that have been imported, the stored
779  # list of products may be more or less complete than what we have now. What's important is
780  # that the products that are in common have the same version.
781  diff = packages.difference(old)
782  if diff:
783  raise TaskError(
784  "Version mismatch (" +
785  "; ".join("%s: %s vs %s" % (pkg, diff[pkg][1], diff[pkg][0]) for pkg in diff) +
786  "); consider using --clobber-versions or --no-versions")
787  # Update the old set of packages in case we have more packages that haven't been persisted.
788  extra = packages.extra(old)
789  if extra:
790  old.update(packages)
791  butler.put(old, dataset, doBackup=doBackup)
792 
table::Key< int > type
Definition: Detector.cc:164

◆ writeSchemas() [1/2]

def lsst.meas.algorithms.debugger.MeasurementDebuggerTask.writeSchemas (   self,
  args,
  kwargs 
)

Definition at line 176 of file debugger.py.

176  def writeSchemas(self, *args, **kwargs):
177  pass
178 

◆ writeSchemas() [2/2]

def lsst.pipe.base.cmdLineTask.CmdLineTask.writeSchemas (   self,
  butler,
  clobber = False,
  doBackup = True 
)
inherited
Write the schemas returned by `lsst.pipe.base.Task.getAllSchemaCatalogs`.

Parameters
----------
butler : `lsst.daf.persistence.Butler`
    Data butler used to write the schema. Each schema is written to the dataset type specified as the
    key in the dict returned by `~lsst.pipe.base.Task.getAllSchemaCatalogs`.
clobber : `bool`, optional
    A boolean flag that controls what happens if a schema already has been saved:
    - `True`: overwrite or rename the existing schema, depending on ``doBackup``.
    - `False`: raise `TaskError` if this schema does not match the existing schema.
doBackup : `bool`, optional
    Set to `True` to backup the schema files if clobbering.

Notes
-----
If ``clobber`` is `False` and an existing schema does not match a current schema,
then some schemas may have been saved successfully and others may not, and there is no easy way to
tell which is which.

Definition at line 689 of file cmdLineTask.py.

689  def writeSchemas(self, butler, clobber=False, doBackup=True):
690  """Write the schemas returned by `lsst.pipe.base.Task.getAllSchemaCatalogs`.
691 
692  Parameters
693  ----------
694  butler : `lsst.daf.persistence.Butler`
695  Data butler used to write the schema. Each schema is written to the dataset type specified as the
696  key in the dict returned by `~lsst.pipe.base.Task.getAllSchemaCatalogs`.
697  clobber : `bool`, optional
698  A boolean flag that controls what happens if a schema already has been saved:
699  - `True`: overwrite or rename the existing schema, depending on ``doBackup``.
700  - `False`: raise `TaskError` if this schema does not match the existing schema.
701  doBackup : `bool`, optional
702  Set to `True` to backup the schema files if clobbering.
703 
704  Notes
705  -----
706  If ``clobber`` is `False` and an existing schema does not match a current schema,
707  then some schemas may have been saved successfully and others may not, and there is no easy way to
708  tell which is which.
709  """
710  for dataset, catalog in self.getAllSchemaCatalogs().items():
711  schemaDataset = dataset + "_schema"
712  if clobber:
713  butler.put(catalog, schemaDataset, doBackup=doBackup)
714  elif butler.datasetExists(schemaDataset, write=True):
715  oldSchema = butler.get(schemaDataset, immediate=True).getSchema()
716  if not oldSchema.compare(catalog.getSchema(), afwTable.Schema.IDENTICAL):
717  raise TaskError(
718  ("New schema does not match schema %r on disk; schemas must be " +
719  " consistent within the same output repo (override with --clobber-config)") %
720  (dataset,))
721  else:
722  butler.put(catalog, schemaDataset)
723 
std::vector< SchemaItem< Flag > > * items

◆ writeSources()

def lsst.meas.algorithms.debugger.MeasurementDebuggerTask.writeSources (   self,
  sources 
)

Definition at line 166 of file debugger.py.

166  def writeSources(self, sources):
167  sources.writeFits(self.config.outputName)
168  self.log.info("Wrote %s", self.config.outputName)
169 

Member Data Documentation

◆ canMultiprocess

bool lsst.pipe.base.cmdLineTask.CmdLineTask.canMultiprocess = True
staticinherited

Definition at line 524 of file cmdLineTask.py.

◆ config

lsst.pipe.base.task.Task.config
inherited

Definition at line 149 of file task.py.

◆ ConfigClass

lsst.meas.algorithms.debugger.MeasurementDebuggerTask.ConfigClass = MeasurementDebuggerConfig
static

Definition at line 98 of file debugger.py.

◆ log

lsst.pipe.base.task.Task.log
inherited

Definition at line 148 of file task.py.

◆ metadata

lsst.pipe.base.task.Task.metadata
inherited

Definition at line 121 of file task.py.

◆ RunnerClass

lsst.meas.algorithms.debugger.MeasurementDebuggerTask.RunnerClass = MeasurementDebuggerRunner
static

Definition at line 99 of file debugger.py.

◆ schema

lsst.meas.algorithms.debugger.MeasurementDebuggerTask.schema

Definition at line 105 of file debugger.py.


The documentation for this class was generated from the following file:
  • /j/snowflake/release/lsstsw/stack/Linux64/meas_algorithms/17.0.1-4-ga7077188/python/lsst/meas/algorithms/debugger.py