|
def | parseAndRun (cls, *args, **kwargs) |
|
def | parseAndSubmit (cls, args=None, **kwargs) |
|
def | batchWallTime (cls, time, parsedCmd, numCores) |
| Return walltime request for batch job. More...
|
|
def | batchCommand (cls, args) |
| Return command to run CmdLineTask. More...
|
|
def | logOperation (self, operation, catch=False, trace=True) |
| Provide a context manager for logging an operation. More...
|
|
def | applyOverrides (cls, config) |
|
def | parseAndRun (cls, args=None, config=None, log=None, doReturnResults=False) |
|
def | writeConfig (self, butler, clobber=False, doBackup=True) |
|
def | writeSchemas (self, butler, clobber=False, doBackup=True) |
|
def | writeMetadata (self, dataRef) |
|
def | writePackageVersions (self, butler, clobber=False, doBackup=True, dataset="packages") |
|
def | emptyMetadata (self) |
|
def | getSchemaCatalogs (self) |
|
def | getAllSchemaCatalogs (self) |
|
def | getFullMetadata (self) |
|
def | getFullName (self) |
|
def | getName (self) |
|
def | getTaskDict (self) |
|
def | makeSubtask (self, name, **keyArgs) |
|
def | timer (self, name, logLevel=Log.DEBUG) |
|
def | makeField (cls, doc) |
|
def | __reduce__ (self) |
|
Starts a BatchCmdLineTask with an MPI process pool
Use this subclass of BatchCmdLineTask if you want to use the Pool directly.
Definition at line 527 of file parallel.py.
◆ __reduce__()
def lsst.pipe.base.task.Task.__reduce__ |
( |
|
self | ) |
|
|
inherited |
◆ applyOverrides()
def lsst.pipe.base.cmdLineTask.CmdLineTask.applyOverrides |
( |
|
cls, |
|
|
|
config |
|
) |
| |
|
inherited |
A hook to allow a task to change the values of its config *after* the camera-specific
overrides are loaded but before any command-line overrides are applied.
Parameters
----------
config : instance of task's ``ConfigClass``
Task configuration.
Notes
-----
This is necessary in some cases because the camera-specific overrides may retarget subtasks,
wiping out changes made in ConfigClass.setDefaults. See LSST Trac ticket #2282 for more discussion.
.. warning::
This is called by CmdLineTask.parseAndRun; other ways of constructing a config will not apply
these overrides.
Reimplemented in lsst.pipe.drivers.constructCalibs.FringeTask, lsst.pipe.drivers.constructCalibs.FlatTask, lsst.pipe.drivers.constructCalibs.DarkTask, and lsst.pipe.drivers.constructCalibs.BiasTask.
Definition at line 527 of file cmdLineTask.py.
527 def applyOverrides(cls, config):
528 """A hook to allow a task to change the values of its config *after* the camera-specific
529 overrides are loaded but before any command-line overrides are applied.
533 config : instance of task's ``ConfigClass``
538 This is necessary in some cases because the camera-specific overrides may retarget subtasks,
539 wiping out changes made in ConfigClass.setDefaults. See LSST Trac ticket #2282 for more discussion.
543 This is called by CmdLineTask.parseAndRun; other ways of constructing a config will not apply
◆ batchCommand()
def lsst.ctrl.pool.parallel.BatchCmdLineTask.batchCommand |
( |
|
cls, |
|
|
|
args |
|
) |
| |
|
inherited |
Return command to run CmdLineTask.
- Parameters
-
Definition at line 476 of file parallel.py.
476 def batchCommand(cls, args):
477 """!Return command to run CmdLineTask
480 @param args: Parsed batch job arguments (from BatchArgumentParser)
482 job = args.job
if args.job
is not None else "job"
483 module = cls.__module__
484 script = (
"import os; os.umask(%#05o); " +
485 "import lsst.base; lsst.base.disableImplicitThreading(); " +
486 "import lsst.ctrl.pool.log; lsst.ctrl.pool.log.jobLog(\"%s\"); ") % (UMASK, job)
489 script += (
"import lsst.ctrl.pool.parallel; import atexit; " +
490 "atexit.register(lsst.ctrl.pool.parallel.printProcessStats); ")
492 script +=
"import %s; %s.%s.parseAndRun();" % (module, module, cls.__name__)
494 profilePre =
"import cProfile; import os; cProfile.run(\"\"\""
495 profilePost =
"\"\"\", filename=\"profile-" + job +
"-%s-%d.dat\" % (os.uname()[1], os.getpid()))"
497 return (
"python -c '" + (profilePre
if args.batchProfile
else "") + script +
498 (profilePost
if args.batchProfile
else "") +
"' " +
shCommandFromArgs(args.leftover) +
◆ batchWallTime()
def lsst.ctrl.pool.parallel.BatchCmdLineTask.batchWallTime |
( |
|
cls, |
|
|
|
time, |
|
|
|
parsedCmd, |
|
|
|
numCores |
|
) |
| |
|
inherited |
Return walltime request for batch job.
Subclasses should override if the walltime should be calculated differently (e.g., addition of some serial time).
- Parameters
-
cls | Class |
time | Requested time per iteration |
parsedCmd | Results of argument parsing |
numCores | Number of cores |
Reimplemented in lsst.pipe.drivers.multiBandDriver.MultiBandDriverTask, lsst.pipe.drivers.constructCalibs.CalibTask, lsst.pipe.drivers.skyCorrection.SkyCorrectionTask, lsst.pipe.drivers.coaddDriver.CoaddDriverTask, lsst.pipe.drivers.visualizeVisit.VisualizeVisitTask, lsst.ctrl.pool.test.demoTask.DemoTask, and lsst.pipe.drivers.ingestDriver.PoolIngestTask.
Definition at line 461 of file parallel.py.
461 def batchWallTime(cls, time, parsedCmd, numCores):
462 """!Return walltime request for batch job
464 Subclasses should override if the walltime should be calculated
465 differently (e.g., addition of some serial time).
468 @param time: Requested time per iteration
469 @param parsedCmd: Results of argument parsing
470 @param numCores: Number of cores
472 numTargets = len(cls.RunnerClass.getTargetList(parsedCmd))
473 return time*numTargets/float(numCores)
◆ emptyMetadata()
def lsst.pipe.base.task.Task.emptyMetadata |
( |
|
self | ) |
|
|
inherited |
Empty (clear) the metadata for this Task and all sub-Tasks.
Definition at line 153 of file task.py.
153 def emptyMetadata(self):
154 """Empty (clear) the metadata for this Task and all sub-Tasks.
156 for subtask
in self._taskDict.values():
◆ getAllSchemaCatalogs()
def lsst.pipe.base.task.Task.getAllSchemaCatalogs |
( |
|
self | ) |
|
|
inherited |
Get schema catalogs for all tasks in the hierarchy, combining the results into a single dict.
Returns
-------
schemacatalogs : `dict`
Keys are butler dataset type, values are a empty catalog (an instance of the appropriate
lsst.afw.table Catalog type) for all tasks in the hierarchy, from the top-level task down
through all subtasks.
Notes
-----
This method may be called on any task in the hierarchy; it will return the same answer, regardless.
The default implementation should always suffice. If your subtask uses schemas the override
`Task.getSchemaCatalogs`, not this method.
Definition at line 188 of file task.py.
188 def getAllSchemaCatalogs(self):
189 """Get schema catalogs for all tasks in the hierarchy, combining the results into a single dict.
193 schemacatalogs : `dict`
194 Keys are butler dataset type, values are a empty catalog (an instance of the appropriate
195 lsst.afw.table Catalog type) for all tasks in the hierarchy, from the top-level task down
196 through all subtasks.
200 This method may be called on any task in the hierarchy; it will return the same answer, regardless.
202 The default implementation should always suffice. If your subtask uses schemas the override
203 `Task.getSchemaCatalogs`, not this method.
205 schemaDict = self.getSchemaCatalogs()
206 for subtask
in self._taskDict.values():
207 schemaDict.update(subtask.getSchemaCatalogs())
◆ getFullMetadata()
def lsst.pipe.base.task.Task.getFullMetadata |
( |
|
self | ) |
|
|
inherited |
Get metadata for all tasks.
Returns
-------
metadata : `lsst.daf.base.PropertySet`
The `~lsst.daf.base.PropertySet` keys are the full task name. Values are metadata
for the top-level task and all subtasks, sub-subtasks, etc..
Notes
-----
The returned metadata includes timing information (if ``@timer.timeMethod`` is used)
and any metadata set by the task. The name of each item consists of the full task name
with ``.`` replaced by ``:``, followed by ``.`` and the name of the item, e.g.::
topLevelTaskName:subtaskName:subsubtaskName.itemName
using ``:`` in the full task name disambiguates the rare situation that a task has a subtask
and a metadata item with the same name.
Definition at line 210 of file task.py.
210 def getFullMetadata(self):
211 """Get metadata for all tasks.
215 metadata : `lsst.daf.base.PropertySet`
216 The `~lsst.daf.base.PropertySet` keys are the full task name. Values are metadata
217 for the top-level task and all subtasks, sub-subtasks, etc..
221 The returned metadata includes timing information (if ``@timer.timeMethod`` is used)
222 and any metadata set by the task. The name of each item consists of the full task name
223 with ``.`` replaced by ``:``, followed by ``.`` and the name of the item, e.g.::
225 topLevelTaskName:subtaskName:subsubtaskName.itemName
227 using ``:`` in the full task name disambiguates the rare situation that a task has a subtask
228 and a metadata item with the same name.
231 for fullName, task
in self.getTaskDict().
items():
232 fullMetadata.set(fullName.replace(
".",
":"), task.metadata)
◆ getFullName()
def lsst.pipe.base.task.Task.getFullName |
( |
|
self | ) |
|
|
inherited |
Get the task name as a hierarchical name including parent task names.
Returns
-------
fullName : `str`
The full name consists of the name of the parent task and each subtask separated by periods.
For example:
- The full name of top-level task "top" is simply "top".
- The full name of subtask "sub" of top-level task "top" is "top.sub".
- The full name of subtask "sub2" of subtask "sub" of top-level task "top" is "top.sub.sub2".
Definition at line 235 of file task.py.
235 def getFullName(self):
236 """Get the task name as a hierarchical name including parent task names.
241 The full name consists of the name of the parent task and each subtask separated by periods.
244 - The full name of top-level task "top" is simply "top".
245 - The full name of subtask "sub" of top-level task "top" is "top.sub".
246 - The full name of subtask "sub2" of subtask "sub" of top-level task "top" is "top.sub.sub2".
248 return self._fullName
◆ getName()
def lsst.pipe.base.task.Task.getName |
( |
|
self | ) |
|
|
inherited |
Get the name of the task.
Returns
-------
taskName : `str`
Name of the task.
See also
--------
getFullName
Definition at line 250 of file task.py.
251 """Get the name of the task.
◆ getSchemaCatalogs()
def lsst.pipe.base.task.Task.getSchemaCatalogs |
( |
|
self | ) |
|
|
inherited |
Get the schemas generated by this task.
Returns
-------
schemaCatalogs : `dict`
Keys are butler dataset type, values are an empty catalog (an instance of the appropriate
`lsst.afw.table` Catalog type) for this task.
Notes
-----
.. warning::
Subclasses that use schemas must override this method. The default implemenation returns
an empty dict.
This method may be called at any time after the Task is constructed, which means that all task
schemas should be computed at construction time, *not* when data is actually processed. This
reflects the philosophy that the schema should not depend on the data.
Returning catalogs rather than just schemas allows us to save e.g. slots for SourceCatalog as well.
See also
--------
Task.getAllSchemaCatalogs
Definition at line 159 of file task.py.
159 def getSchemaCatalogs(self):
160 """Get the schemas generated by this task.
164 schemaCatalogs : `dict`
165 Keys are butler dataset type, values are an empty catalog (an instance of the appropriate
166 `lsst.afw.table` Catalog type) for this task.
173 Subclasses that use schemas must override this method. The default implemenation returns
176 This method may be called at any time after the Task is constructed, which means that all task
177 schemas should be computed at construction time, *not* when data is actually processed. This
178 reflects the philosophy that the schema should not depend on the data.
180 Returning catalogs rather than just schemas allows us to save e.g. slots for SourceCatalog as well.
184 Task.getAllSchemaCatalogs
◆ getTaskDict()
def lsst.pipe.base.task.Task.getTaskDict |
( |
|
self | ) |
|
|
inherited |
Get a dictionary of all tasks as a shallow copy.
Returns
-------
taskDict : `dict`
Dictionary containing full task name: task object for the top-level task and all subtasks,
sub-subtasks, etc..
Definition at line 264 of file task.py.
265 """Get a dictionary of all tasks as a shallow copy.
270 Dictionary containing full task name: task object for the top-level task and all subtasks,
273 return self._taskDict.copy()
◆ logOperation()
def lsst.ctrl.pool.parallel.BatchCmdLineTask.logOperation |
( |
|
self, |
|
|
|
operation, |
|
|
|
catch = False , |
|
|
|
trace = True |
|
) |
| |
|
inherited |
Provide a context manager for logging an operation.
- Parameters
-
operation | description of operation (string) |
catch | Catch all exceptions? |
trace | Log a traceback of caught exception? |
Note that if 'catch' is True, all exceptions are swallowed, but there may be other side-effects such as undefined variables.
Definition at line 502 of file parallel.py.
502 def logOperation(self, operation, catch=False, trace=True):
503 """!Provide a context manager for logging an operation
505 @param operation: description of operation (string)
506 @param catch: Catch all exceptions?
507 @param trace: Log a traceback of caught exception?
509 Note that if 'catch' is True, all exceptions are swallowed, but there may
510 be other side-effects such as undefined variables.
512 self.log.
info(
"%s: Start %s" % (NODE, operation))
517 cls, e, _ = sys.exc_info()
518 self.log.
warn(
"%s: Caught %s while %s: %s" % (NODE, cls.__name__, operation, e))
520 self.log.
info(
"%s: Traceback:\n%s" % (NODE, traceback.format_exc()))
524 self.log.
info(
"%s: Finished %s" % (NODE, operation))
◆ makeField()
def lsst.pipe.base.task.Task.makeField |
( |
|
cls, |
|
|
|
doc |
|
) |
| |
|
inherited |
Make a `lsst.pex.config.ConfigurableField` for this task.
Parameters
----------
doc : `str`
Help text for the field.
Returns
-------
configurableField : `lsst.pex.config.ConfigurableField`
A `~ConfigurableField` for this task.
Examples
--------
Provides a convenient way to specify this task is a subtask of another task.
Here is an example of use::
class OtherTaskConfig(lsst.pex.config.Config)
aSubtask = ATaskClass.makeField("a brief description of what this task does")
Definition at line 329 of file task.py.
329 def makeField(cls, doc):
330 """Make a `lsst.pex.config.ConfigurableField` for this task.
335 Help text for the field.
339 configurableField : `lsst.pex.config.ConfigurableField`
340 A `~ConfigurableField` for this task.
344 Provides a convenient way to specify this task is a subtask of another task.
346 Here is an example of use::
348 class OtherTaskConfig(lsst.pex.config.Config)
349 aSubtask = ATaskClass.makeField("a brief description of what this task does")
351 return ConfigurableField(doc=doc, target=cls)
◆ makeSubtask()
def lsst.pipe.base.task.Task.makeSubtask |
( |
|
self, |
|
|
|
name, |
|
|
** |
keyArgs |
|
) |
| |
|
inherited |
Create a subtask as a new instance as the ``name`` attribute of this task.
Parameters
----------
name : `str`
Brief name of the subtask.
keyArgs
Extra keyword arguments used to construct the task. The following arguments are automatically
provided and cannot be overridden:
- "config".
- "parentTask".
Notes
-----
The subtask must be defined by ``Task.config.name``, an instance of pex_config ConfigurableField
or RegistryField.
Definition at line 275 of file task.py.
275 def makeSubtask(self, name, **keyArgs):
276 """Create a subtask as a new instance as the ``name`` attribute of this task.
281 Brief name of the subtask.
283 Extra keyword arguments used to construct the task. The following arguments are automatically
284 provided and cannot be overridden:
291 The subtask must be defined by ``Task.config.name``, an instance of pex_config ConfigurableField
294 taskField = getattr(self.config, name,
None)
295 if taskField
is None:
296 raise KeyError(f
"{self.getFullName()}'s config does not have field {name!r}")
297 subtask = taskField.apply(name=name, parentTask=self, **keyArgs)
298 setattr(self, name, subtask)
◆ parseAndRun() [1/2]
def lsst.ctrl.pool.parallel.BatchPoolTask.parseAndRun |
( |
|
cls, |
|
|
* |
args, |
|
|
** |
kwargs |
|
) |
| |
Run with a MPI process pool
Definition at line 534 of file parallel.py.
534 def parseAndRun(cls, *args, **kwargs):
535 """Run with a MPI process pool"""
537 super(BatchPoolTask, cls).parseAndRun(*args, **kwargs)
◆ parseAndRun() [2/2]
def lsst.pipe.base.cmdLineTask.CmdLineTask.parseAndRun |
( |
|
cls, |
|
|
|
args = None , |
|
|
|
config = None , |
|
|
|
log = None , |
|
|
|
doReturnResults = False |
|
) |
| |
|
inherited |
Parse an argument list and run the command.
Parameters
----------
args : `list`, optional
List of command-line arguments; if `None` use `sys.argv`.
config : `lsst.pex.config.Config`-type, optional
Config for task. If `None` use `Task.ConfigClass`.
log : `lsst.log.Log`-type, optional
Log. If `None` use the default log.
doReturnResults : `bool`, optional
If `True`, return the results of this task. Default is `False`. This is only intended for
unit tests and similar use. It can easily exhaust memory (if the task returns enough data and you
call it enough times) and it will fail when using multiprocessing if the returned data cannot be
pickled.
Returns
-------
struct : `lsst.pipe.base.Struct`
Fields are:
``argumentParser``
the argument parser (`lsst.pipe.base.ArgumentParser`).
``parsedCmd``
the parsed command returned by the argument parser's
`~lsst.pipe.base.ArgumentParser.parse_args` method
(`argparse.Namespace`).
``taskRunner``
the task runner used to run the task (an instance of `Task.RunnerClass`).
``resultList``
results returned by the task runner's ``run`` method, one entry
per invocation (`list`). This will typically be a list of
`Struct`, each containing at least an ``exitStatus`` integer
(0 or 1); see `Task.RunnerClass` (`TaskRunner` by default) for
more details.
Notes
-----
Calling this method with no arguments specified is the standard way to run a command-line task
from the command-line. For an example see ``pipe_tasks`` ``bin/makeSkyMap.py`` or almost any other
file in that directory.
If one or more of the dataIds fails then this routine will exit (with a status giving the
number of failed dataIds) rather than returning this struct; this behaviour can be
overridden by specifying the ``--noExit`` command-line option.
Definition at line 549 of file cmdLineTask.py.
549 def parseAndRun(cls, args=None, config=None, log=None, doReturnResults=False):
550 """Parse an argument list and run the command.
554 args : `list`, optional
555 List of command-line arguments; if `None` use `sys.argv`.
556 config : `lsst.pex.config.Config`-type, optional
557 Config for task. If `None` use `Task.ConfigClass`.
558 log : `lsst.log.Log`-type, optional
559 Log. If `None` use the default log.
560 doReturnResults : `bool`, optional
561 If `True`, return the results of this task. Default is `False`. This is only intended for
562 unit tests and similar use. It can easily exhaust memory (if the task returns enough data and you
563 call it enough times) and it will fail when using multiprocessing if the returned data cannot be
568 struct : `lsst.pipe.base.Struct`
572 the argument parser (`lsst.pipe.base.ArgumentParser`).
574 the parsed command returned by the argument parser's
575 `~lsst.pipe.base.ArgumentParser.parse_args` method
576 (`argparse.Namespace`).
578 the task runner used to run the task (an instance of `Task.RunnerClass`).
580 results returned by the task runner's ``run`` method, one entry
581 per invocation (`list`). This will typically be a list of
582 `Struct`, each containing at least an ``exitStatus`` integer
583 (0 or 1); see `Task.RunnerClass` (`TaskRunner` by default) for
588 Calling this method with no arguments specified is the standard way to run a command-line task
589 from the command-line. For an example see ``pipe_tasks`` ``bin/makeSkyMap.py`` or almost any other
590 file in that directory.
592 If one or more of the dataIds fails then this routine will exit (with a status giving the
593 number of failed dataIds) rather than returning this struct; this behaviour can be
594 overridden by specifying the ``--noExit`` command-line option.
597 commandAsStr =
" ".join(sys.argv)
602 argumentParser = cls._makeArgumentParser()
604 config = cls.ConfigClass()
605 parsedCmd = argumentParser.parse_args(config=config, args=args, log=log, override=cls.applyOverrides)
607 parsedCmd.log.info(
"Running: %s", commandAsStr)
609 taskRunner = cls.RunnerClass(TaskClass=cls, parsedCmd=parsedCmd, doReturnResults=doReturnResults)
610 resultList = taskRunner.run(parsedCmd)
613 nFailed = sum(((res.exitStatus != 0)
for res
in resultList))
614 except (TypeError, AttributeError)
as e:
616 parsedCmd.log.warn(
"Unable to retrieve exit status (%s); assuming success", e)
621 parsedCmd.log.error(
"%d dataRefs failed; not exiting as --noExit was set", nFailed)
626 argumentParser=argumentParser,
628 taskRunner=taskRunner,
629 resultList=resultList,
◆ parseAndSubmit()
def lsst.ctrl.pool.parallel.BatchCmdLineTask.parseAndSubmit |
( |
|
cls, |
|
|
|
args = None , |
|
|
** |
kwargs |
|
) |
| |
|
inherited |
Definition at line 435 of file parallel.py.
435 def parseAndSubmit(cls, args=None, **kwargs):
436 taskParser = cls._makeArgumentParser(doBatch=
True, add_help=
False)
437 batchParser = BatchArgumentParser(parent=taskParser)
438 batchArgs = batchParser.parse_args(config=cls.ConfigClass(), args=args, override=cls.applyOverrides,
441 if not cls.RunnerClass(cls, batchArgs.parent).precall(batchArgs.parent):
442 taskParser.error(
"Error in task preparation")
446 if batchArgs.batch
is None:
447 sys.argv = [sys.argv[0]] + batchArgs.leftover
449 return cls.parseAndRun()
451 if batchArgs.walltime > 0:
452 walltime = batchArgs.walltime
454 numCores = batchArgs.cores
if batchArgs.cores > 0
else batchArgs.nodes*batchArgs.procs
455 walltime = cls.batchWallTime(batchArgs.time, batchArgs.parent, numCores)
457 command = cls.batchCommand(batchArgs)
458 batchArgs.batch.run(command, walltime=walltime)
◆ timer()
def lsst.pipe.base.task.Task.timer |
( |
|
self, |
|
|
|
name, |
|
|
|
logLevel = Log.DEBUG |
|
) |
| |
|
inherited |
Context manager to log performance data for an arbitrary block of code.
Parameters
----------
name : `str`
Name of code being timed; data will be logged using item name: ``Start`` and ``End``.
logLevel
A `lsst.log` level constant.
Examples
--------
Creating a timer context::
with self.timer("someCodeToTime"):
pass # code to time
See also
--------
timer.logInfo
Definition at line 301 of file task.py.
301 def timer(self, name, logLevel=Log.DEBUG):
302 """Context manager to log performance data for an arbitrary block of code.
307 Name of code being timed; data will be logged using item name: ``Start`` and ``End``.
309 A `lsst.log` level constant.
313 Creating a timer context::
315 with self.timer("someCodeToTime"):
322 logInfo(obj=self, prefix=name +
"Start", logLevel=logLevel)
326 logInfo(obj=self, prefix=name +
"End", logLevel=logLevel)
◆ writeConfig()
def lsst.pipe.base.cmdLineTask.CmdLineTask.writeConfig |
( |
|
self, |
|
|
|
butler, |
|
|
|
clobber = False , |
|
|
|
doBackup = True |
|
) |
| |
|
inherited |
Write the configuration used for processing the data, or check that an existing
one is equal to the new one if present.
Parameters
----------
butler : `lsst.daf.persistence.Butler`
Data butler used to write the config. The config is written to dataset type
`CmdLineTask._getConfigName`.
clobber : `bool`, optional
A boolean flag that controls what happens if a config already has been saved:
- `True`: overwrite or rename the existing config, depending on ``doBackup``.
- `False`: raise `TaskError` if this config does not match the existing config.
doBackup : bool, optional
Set to `True` to backup the config files if clobbering.
Reimplemented in lsst.pipe.tasks.postprocess.ConsolidateSourceTableTask.
Definition at line 656 of file cmdLineTask.py.
656 def writeConfig(self, butler, clobber=False, doBackup=True):
657 """Write the configuration used for processing the data, or check that an existing
658 one is equal to the new one if present.
662 butler : `lsst.daf.persistence.Butler`
663 Data butler used to write the config. The config is written to dataset type
664 `CmdLineTask._getConfigName`.
665 clobber : `bool`, optional
666 A boolean flag that controls what happens if a config already has been saved:
667 - `True`: overwrite or rename the existing config, depending on ``doBackup``.
668 - `False`: raise `TaskError` if this config does not match the existing config.
669 doBackup : bool, optional
670 Set to `True` to backup the config files if clobbering.
672 configName = self._getConfigName()
673 if configName
is None:
676 butler.put(self.config, configName, doBackup=doBackup)
677 elif butler.datasetExists(configName, write=
True):
680 oldConfig = butler.get(configName, immediate=
True)
681 except Exception
as exc:
682 raise type(exc)(f
"Unable to read stored config file {configName} (exc); "
683 "consider using --clobber-config")
685 def logConfigMismatch(msg):
686 self.log.
fatal(
"Comparing configuration: %s", msg)
688 if not self.config.compare(oldConfig, shortcut=
False, output=logConfigMismatch):
690 f
"Config does not match existing task config {configName!r} on disk; "
691 "tasks configurations must be consistent within the same output repo "
692 "(override with --clobber-config)")
694 butler.put(self.config, configName)
◆ writeMetadata()
def lsst.pipe.base.cmdLineTask.CmdLineTask.writeMetadata |
( |
|
self, |
|
|
|
dataRef |
|
) |
| |
|
inherited |
Write the metadata produced from processing the data.
Parameters
----------
dataRef
Butler data reference used to write the metadata.
The metadata is written to dataset type `CmdLineTask._getMetadataName`.
Reimplemented in lsst.pipe.tasks.postprocess.WriteObjectTableTask, lsst.pipe.tasks.postprocess.ConsolidateSourceTableTask, lsst.pipe.tasks.postprocess.TransformSourceTableTask, lsst.pipe.tasks.postprocess.ConsolidateObjectTableTask, lsst.pipe.tasks.postprocess.TransformCatalogBaseTask, lsst.pipe.drivers.multiBandDriver.MultiBandDriverTask, lsst.pipe.drivers.coaddDriver.CoaddDriverTask, and lsst.pipe.tasks.postprocess.WriteSourceTableTask.
Definition at line 731 of file cmdLineTask.py.
732 """Write the metadata produced from processing the data.
737 Butler data reference used to write the metadata.
738 The metadata is written to dataset type `CmdLineTask._getMetadataName`.
741 metadataName = self._getMetadataName()
742 if metadataName
is not None:
743 dataRef.put(self.getFullMetadata(), metadataName)
744 except Exception
as e:
745 self.log.
warn(
"Could not persist metadata for dataId=%s: %s", dataRef.dataId, e)
◆ writePackageVersions()
def lsst.pipe.base.cmdLineTask.CmdLineTask.writePackageVersions |
( |
|
self, |
|
|
|
butler, |
|
|
|
clobber = False , |
|
|
|
doBackup = True , |
|
|
|
dataset = "packages" |
|
) |
| |
|
inherited |
Compare and write package versions.
Parameters
----------
butler : `lsst.daf.persistence.Butler`
Data butler used to read/write the package versions.
clobber : `bool`, optional
A boolean flag that controls what happens if versions already have been saved:
- `True`: overwrite or rename the existing version info, depending on ``doBackup``.
- `False`: raise `TaskError` if this version info does not match the existing.
doBackup : `bool`, optional
If `True` and clobbering, old package version files are backed up.
dataset : `str`, optional
Name of dataset to read/write.
Raises
------
TaskError
Raised if there is a version mismatch with current and persisted lists of package versions.
Notes
-----
Note that this operation is subject to a race condition.
Definition at line 747 of file cmdLineTask.py.
747 def writePackageVersions(self, butler, clobber=False, doBackup=True, dataset="packages"):
748 """Compare and write package versions.
752 butler : `lsst.daf.persistence.Butler`
753 Data butler used to read/write the package versions.
754 clobber : `bool`, optional
755 A boolean flag that controls what happens if versions already have been saved:
756 - `True`: overwrite or rename the existing version info, depending on ``doBackup``.
757 - `False`: raise `TaskError` if this version info does not match the existing.
758 doBackup : `bool`, optional
759 If `True` and clobbering, old package version files are backed up.
760 dataset : `str`, optional
761 Name of dataset to read/write.
766 Raised if there is a version mismatch with current and persisted lists of package versions.
770 Note that this operation is subject to a race condition.
772 packages = Packages.fromSystem()
775 return butler.put(packages, dataset, doBackup=doBackup)
776 if not butler.datasetExists(dataset, write=
True):
777 return butler.put(packages, dataset)
780 old = butler.get(dataset, immediate=
True)
781 except Exception
as exc:
782 raise type(exc)(f
"Unable to read stored version dataset {dataset} ({exc}); "
783 "consider using --clobber-versions or --no-versions")
787 diff = packages.difference(old)
789 versions_str =
"; ".join(f
"{pkg}: {diff[pkg][1]} vs {diff[pkg][0]}" for pkg
in diff)
791 f
"Version mismatch ({versions_str}); consider using --clobber-versions or --no-versions")
793 extra = packages.extra(old)
796 butler.put(old, dataset, doBackup=doBackup)
◆ writeSchemas()
def lsst.pipe.base.cmdLineTask.CmdLineTask.writeSchemas |
( |
|
self, |
|
|
|
butler, |
|
|
|
clobber = False , |
|
|
|
doBackup = True |
|
) |
| |
|
inherited |
Write the schemas returned by `lsst.pipe.base.Task.getAllSchemaCatalogs`.
Parameters
----------
butler : `lsst.daf.persistence.Butler`
Data butler used to write the schema. Each schema is written to the dataset type specified as the
key in the dict returned by `~lsst.pipe.base.Task.getAllSchemaCatalogs`.
clobber : `bool`, optional
A boolean flag that controls what happens if a schema already has been saved:
- `True`: overwrite or rename the existing schema, depending on ``doBackup``.
- `False`: raise `TaskError` if this schema does not match the existing schema.
doBackup : `bool`, optional
Set to `True` to backup the schema files if clobbering.
Notes
-----
If ``clobber`` is `False` and an existing schema does not match a current schema,
then some schemas may have been saved successfully and others may not, and there is no easy way to
tell which is which.
Definition at line 696 of file cmdLineTask.py.
696 def writeSchemas(self, butler, clobber=False, doBackup=True):
697 """Write the schemas returned by `lsst.pipe.base.Task.getAllSchemaCatalogs`.
701 butler : `lsst.daf.persistence.Butler`
702 Data butler used to write the schema. Each schema is written to the dataset type specified as the
703 key in the dict returned by `~lsst.pipe.base.Task.getAllSchemaCatalogs`.
704 clobber : `bool`, optional
705 A boolean flag that controls what happens if a schema already has been saved:
706 - `True`: overwrite or rename the existing schema, depending on ``doBackup``.
707 - `False`: raise `TaskError` if this schema does not match the existing schema.
708 doBackup : `bool`, optional
709 Set to `True` to backup the schema files if clobbering.
713 If ``clobber`` is `False` and an existing schema does not match a current schema,
714 then some schemas may have been saved successfully and others may not, and there is no easy way to
717 for dataset, catalog
in self.getAllSchemaCatalogs().
items():
718 schemaDataset = dataset +
"_schema"
720 butler.put(catalog, schemaDataset, doBackup=doBackup)
721 elif butler.datasetExists(schemaDataset, write=
True):
722 oldSchema = butler.get(schemaDataset, immediate=
True).getSchema()
723 if not oldSchema.compare(catalog.getSchema(), afwTable.Schema.IDENTICAL):
725 f
"New schema does not match schema {dataset!r} on disk; "
726 "schemas must be consistent within the same output repo "
727 "(override with --clobber-config)")
729 butler.put(catalog, schemaDataset)
◆ canMultiprocess
bool lsst.pipe.base.cmdLineTask.CmdLineTask.canMultiprocess = True |
|
staticinherited |
◆ config
lsst.pipe.base.task.Task.config |
|
inherited |
◆ log
lsst.pipe.base.task.Task.log |
|
inherited |
◆ metadata
lsst.pipe.base.task.Task.metadata |
|
inherited |
◆ RunnerClass
lsst.pipe.base.cmdLineTask.CmdLineTask.RunnerClass = TaskRunner |
|
staticinherited |
The documentation for this class was generated from the following file:
- /j/snowflake/release/lsstsw/stack/1a1d771/Linux64/ctrl_pool/20.0.0/python/lsst/ctrl/pool/parallel.py
def format(config, name=None, writeSourceLine=True, prefix="", verbose=False)