22 __all__ = [
"CmdLineTask",
"TaskRunner",
"ButlerInitializedTaskRunner",
"LegacyTaskRunner"]
30 from lsst.base import disableImplicitThreading
32 from .task
import Task, TaskError
33 from .struct
import Struct
34 from .argumentParser
import ArgumentParser
39 def _runPool(pool, timeout, function, iterable):
40 """Wrapper around ``pool.map_async``, to handle timeout
42 This is required so as to trigger an immediate interrupt on the
43 KeyboardInterrupt (Ctrl-C); see
44 http://stackoverflow.com/questions/1408356/keyboard-interrupts-with-pythons-multiprocessing-pool
46 return pool.map_async(function, iterable).get(timeout)
49 @contextlib.contextmanager
51 """Context manager for profiling with cProfile.
57 Filename to which to write profile (profiling disabled if `None` or
59 log : `lsst.log.Log`, optional
60 Log object for logging the profile operations.
62 If profiling is enabled, the context manager returns the cProfile.Profile
63 object (otherwise it returns None), which allows additional control over
64 profiling. You can obtain this using the "as" clause, e.g.:
66 .. code-block:: python
68 with profile(filename) as prof:
71 The output cumulative profile can be printed with a command-line like:
75 python -c 'import pstats; \
76 pstats.Stats("<filename>").sort_stats("cumtime").print_stats(30)'
82 from cProfile
import Profile
85 log.info(
"Enabling cProfile profiling")
89 profile.dump_stats(filename)
91 log.info(
"cProfile stats written to %s", filename)
95 """Run a command-line task, using `multiprocessing` if requested.
99 TaskClass : `lsst.pipe.base.Task` subclass
100 The class of the task to run.
101 parsedCmd : `argparse.Namespace`
102 The parsed command-line arguments, as returned by the task's argument
103 parser's `~lsst.pipe.base.ArgumentParser.parse_args` method.
107 Do not store ``parsedCmd``, as this instance is pickled (if
108 multiprocessing) and parsedCmd may contain non-picklable elements.
109 It certainly contains more data than we need to send to each
110 instance of the task.
111 doReturnResults : `bool`, optional
112 Should run return the collected result from each invocation of the
113 task? This is only intended for unit tests and similar use. It can
114 easily exhaust memory (if the task returns enough data and you call it
115 enough times) and it will fail when using multiprocessing if the
116 returned data cannot be pickled.
118 Note that even if ``doReturnResults`` is False a struct with a single
119 member "exitStatus" is returned, with value 0 or 1 to be returned to
125 Raised if multiprocessing is requested (and the task supports it) but
126 the multiprocessing library cannot be imported.
130 Each command-line task (subclass of `lsst.pipe.base.CmdLineTask`) has a
131 task runner. By default it is this class, but some tasks require a
132 subclass. See the manual :ref:`creating-a-command-line-task` for more
133 information. See `CmdLineTask.parseAndRun` to see how a task runner is
136 You may use this task runner for your command-line task if your task has a
137 ``runDataRef`` method that takes exactly one argument: a butler data
138 reference. Otherwise you must provide a task-specific subclass of
139 this runner for your task's ``RunnerClass`` that overrides
140 `TaskRunner.getTargetList` and possibly
141 `TaskRunner.__call__`. See `TaskRunner.getTargetList` for details.
143 This design matches the common pattern for command-line tasks: the
144 ``runDataRef`` method takes a single data reference, of some suitable name.
145 Additional arguments are rare, and if present, require a subclass of
146 `TaskRunner` that calls these additional arguments by name.
148 Instances of this class must be picklable in order to be compatible with
149 multiprocessing. If multiprocessing is requested
150 (``parsedCmd.numProcesses > 1``) then `runDataRef` calls
151 `prepareForMultiProcessing` to jettison optional non-picklable elements.
152 If your task runner is not compatible with multiprocessing then indicate
153 this in your task by setting class variable ``canMultiprocess=False``.
155 Due to a `python bug`__, handling a `KeyboardInterrupt` properly `requires
156 specifying a timeout`__. This timeout (in sec) can be specified as the
157 ``timeout`` element in the output from `~lsst.pipe.base.ArgumentParser`
158 (the ``parsedCmd``), if available, otherwise we use `TaskRunner.TIMEOUT`.
160 By default, we disable "implicit" threading -- ie, as provided by
161 underlying numerical libraries such as MKL or BLAS. This is designed to
162 avoid thread contention both when a single command line task spawns
163 multiple processes and when multiple users are running on a shared system.
164 Users can override this behaviour by setting the
165 ``LSST_ALLOW_IMPLICIT_THREADS`` environment variable.
167 .. __: http://bugs.python.org/issue8296
168 .. __: http://stackoverflow.com/questions/1408356/
172 """Default timeout (seconds) for multiprocessing."""
174 def __init__(self, TaskClass, parsedCmd, doReturnResults=False):
178 self.
loglog = parsedCmd.log
181 self.
doBackupdoBackup =
not bool(parsedCmd.noBackupConfig)
182 self.
numProcessesnumProcesses = int(getattr(parsedCmd,
'processes', 1))
184 self.
timeouttimeout = getattr(parsedCmd,
'timeout',
None)
189 if not TaskClass.canMultiprocess:
190 self.
loglog.
warn(
"This task does not support multiprocessing; using one process")
194 """Prepare this instance for multiprocessing
196 Optional non-picklable elements are removed.
198 This is only called if the task is run under multiprocessing.
203 """Run the task on all targets.
207 parsedCmd : `argparse.Namespace`
208 Parsed command `argparse.Namespace`.
213 A list of results returned by `TaskRunner.__call__`, or an empty
214 list if `TaskRunner.__call__` is not called (e.g. if
215 `TaskRunner.precall` returns `False`). See `TaskRunner.__call__`
220 The task is run under multiprocessing if `TaskRunner.numProcesses`
221 is more than 1; otherwise processing is serial.
226 import multiprocessing
228 pool = multiprocessing.Pool(processes=self.
numProcessesnumProcesses, maxtasksperchild=1)
229 mapFunc = functools.partial(_runPool, pool, self.
timeouttimeout)
234 if self.
precallprecall(parsedCmd):
235 profileName = parsedCmd.profile
if hasattr(parsedCmd,
"profile")
else None
238 if len(targetList) > 0:
239 with profile(profileName, log):
241 resultList =
list(mapFunc(self, targetList))
243 log.warn(
"Not running the task because there is no data to process; "
244 "you may preview data using \"--show data\"")
254 """Get a list of (dataRef, kwargs) for `TaskRunner.__call__`.
258 parsedCmd : `argparse.Namespace`
259 The parsed command object returned by
260 `lsst.pipe.base.argumentParser.ArgumentParser.parse_args`.
262 Any additional keyword arguments. In the default `TaskRunner` this
263 is an empty dict, but having it simplifies overriding `TaskRunner`
264 for tasks whose runDataRef method takes additional arguments
265 (see case (1) below).
269 The default implementation of `TaskRunner.getTargetList` and
270 `TaskRunner.__call__` works for any command-line task whose
271 ``runDataRef`` method takes exactly one argument: a data reference.
272 Otherwise you must provide a variant of TaskRunner that overrides
273 `TaskRunner.getTargetList` and possibly `TaskRunner.__call__`.
278 If your command-line task has a ``runDataRef`` method that takes one
279 data reference followed by additional arguments, then you need only
280 override `TaskRunner.getTargetList` to return the additional
281 arguments as an argument dict. To make this easier, your overridden
282 version of `~TaskRunner.getTargetList` may call
283 `TaskRunner.getTargetList` with the extra arguments as keyword
284 arguments. For example, the following adds an argument dict containing
285 a single key: "calExpList", whose value is the list of data IDs for
286 the calexp ID argument:
288 .. code-block:: python
290 def getTargetList(parsedCmd):
291 return TaskRunner.getTargetList(
293 calExpList=parsedCmd.calexp.idList
296 It is equivalent to this slightly longer version:
298 .. code-block:: python
301 def getTargetList(parsedCmd):
302 argDict = dict(calExpList=parsedCmd.calexp.idList)
303 return [(dataId, argDict) for dataId in parsedCmd.id.idList]
307 If your task does not meet condition (1) then you must override both
308 TaskRunner.getTargetList and `TaskRunner.__call__`. You may do this
309 however you see fit, so long as `TaskRunner.getTargetList`
310 returns a list, each of whose elements is sent to
311 `TaskRunner.__call__`, which runs your task.
313 return [(ref, kwargs)
for ref
in parsedCmd.id.refList]
316 """Create a Task instance.
321 Parsed command-line options (used for extra task args by some task
324 Args tuple passed to `TaskRunner.__call__` (used for extra task
325 arguments by some task runners).
329 ``makeTask`` can be called with either the ``parsedCmd`` argument or
330 ``args`` argument set to None, but it must construct identical Task
331 instances in either case.
333 Subclasses may ignore this method entirely if they reimplement both
334 `TaskRunner.precall` and `TaskRunner.__call__`.
338 def _precallImpl(self, task, parsedCmd):
339 """The main work of `precall`.
341 We write package versions, schemas and configs, or compare these to
342 existing files on disk if present.
344 if not parsedCmd.noVersions:
345 task.writePackageVersions(parsedCmd.butler, clobber=parsedCmd.clobberVersions)
346 task.writeConfig(parsedCmd.butler, clobber=self.
clobberConfigclobberConfig, doBackup=self.
doBackupdoBackup)
347 task.writeSchemas(parsedCmd.butler, clobber=self.
clobberConfigclobberConfig, doBackup=self.
doBackupdoBackup)
350 """Hook for code that should run exactly once, before multiprocessing.
354 Must return True if `TaskRunner.__call__` should subsequently be
359 Implementations must take care to ensure that no unpicklable
360 attributes are added to the TaskRunner itself, for compatibility
361 with multiprocessing.
363 The default implementation writes package versions, schemas and
364 configs, or compares them to existing files on disk if present.
366 task = self.
makeTaskmakeTask(parsedCmd=parsedCmd)
373 except Exception
as e:
374 task.log.fatal(
"Failed in task initialization: %s", e)
375 if not isinstance(e, TaskError):
376 traceback.print_exc(file=sys.stderr)
381 """Run the Task on a single target.
386 Arguments for Task.runDataRef()
390 struct : `lsst.pipe.base.Struct`
391 Contains these fields if ``doReturnResults`` is `True`:
393 - ``dataRef``: the provided data reference.
394 - ``metadata``: task metadata after execution of run.
395 - ``result``: result returned by task run, or `None` if the task
397 - ``exitStatus``: 0 if the task completed successfully, 1
400 If ``doReturnResults`` is `False` the struct contains:
402 - ``exitStatus``: 0 if the task completed successfully, 1
407 This default implementation assumes that the ``args`` is a tuple
408 containing a data reference and a dict of keyword arguments.
412 If you override this method and wish to return something when
413 ``doReturnResults`` is `False`, then it must be picklable to
414 support multiprocessing and it should be small enough that pickling
415 and unpickling do not add excessive overhead.
417 dataRef, kwargs = args
418 if self.
loglog
is None:
419 self.
loglog = Log.getDefaultLogger()
420 if hasattr(dataRef,
"dataId"):
421 self.
loglog.
MDC(
"LABEL", str(dataRef.dataId))
422 elif isinstance(dataRef, (list, tuple)):
423 self.
loglog.
MDC(
"LABEL", str([ref.dataId
for ref
in dataRef
if hasattr(ref,
"dataId")]))
424 task = self.
makeTaskmakeTask(args=args)
428 result = self.
runTaskrunTask(task, dataRef, kwargs)
431 result = self.
runTaskrunTask(task, dataRef, kwargs)
432 except Exception
as e:
439 eName =
type(e).__name__
440 if hasattr(dataRef,
"dataId"):
441 task.log.fatal(
"Failed on dataId=%s: %s: %s", dataRef.dataId, eName, e)
442 elif isinstance(dataRef, (list, tuple)):
443 task.log.fatal(
"Failed on dataIds=[%s]: %s: %s",
444 ", ".join(str(ref.dataId)
for ref
in dataRef), eName, e)
446 task.log.fatal(
"Failed on dataRef=%s: %s: %s", dataRef, eName, e)
448 if not isinstance(e, TaskError):
449 traceback.print_exc(file=sys.stderr)
456 task.writeMetadata(dataRef)
463 exitStatus=exitStatus,
465 metadata=task.metadata,
470 exitStatus=exitStatus,
474 """Make the actual call to `runDataRef` for this task.
478 task : `lsst.pipe.base.CmdLineTask` class
479 The class of the task to run.
481 Butler data reference that contains the data the task will process.
483 Any additional keyword arguments. See `TaskRunner.getTargetList`
488 The default implementation of `TaskRunner.runTask` works for any
489 command-line task which has a ``runDataRef`` method that takes a data
490 reference and an optional set of additional keyword arguments.
491 This method returns the results generated by the task's `runDataRef`
495 return task.runDataRef(dataRef, **kwargs)
499 r"""A `TaskRunner` for `CmdLineTask`\ s which calls the `Task`\ 's `run`
500 method on a `dataRef` rather than the `runDataRef` method.
504 """Call `run` for this task instead of `runDataRef`. See
505 `TaskRunner.runTask` above for details.
507 return task.run(dataRef, **kwargs)
511 r"""A `TaskRunner` for `CmdLineTask`\ s that require a ``butler`` keyword
512 argument to be passed to their constructor.
516 """A variant of the base version that passes a butler argument to the
521 parsedCmd : `argparse.Namespace`
522 Parsed command-line options, as returned by the
523 `~lsst.pipe.base.ArgumentParser`; if specified then args is
526 Other arguments; if ``parsedCmd`` is `None` then this must be
532 Raised if ``parsedCmd`` and ``args`` are both `None`.
534 if parsedCmd
is not None:
535 butler = parsedCmd.butler
536 elif args
is not None:
537 dataRef, kwargs = args
538 butler = dataRef.butlerSubset.butler
540 raise RuntimeError(
"parsedCmd or args must be specified")
541 return self.
TaskClassTaskClass(config=self.
configconfig, log=self.
loglog, butler=butler)
545 """Base class for command-line tasks: tasks that may be executed from the
550 See :ref:`task-framework-overview` to learn what tasks are and
551 :ref:`creating-a-command-line-task` for more information about writing
554 Subclasses must specify the following class variables:
556 - ``ConfigClass``: configuration class for your task (a subclass of
557 `lsst.pex.config.Config`, or if your task needs no configuration, then
558 `lsst.pex.config.Config` itself).
559 - ``_DefaultName``: default name used for this task (a `str`).
561 Subclasses may also specify the following class variables:
563 - ``RunnerClass``: a task runner class. The default is ``TaskRunner``,
564 which works for any task with a runDataRef method that takes exactly one
565 argument: a data reference. If your task does not meet this requirement
566 then you must supply a variant of ``TaskRunner``; see ``TaskRunner``
567 for more information.
568 - ``canMultiprocess``: the default is `True`; set `False` if your task
569 does not support multiprocessing.
571 Subclasses must specify a method named ``runDataRef``:
573 - By default ``runDataRef`` accepts a single butler data reference, but
574 you can specify an alternate task runner (subclass of ``TaskRunner``) as
575 the value of class variable ``RunnerClass`` if your run method needs
577 - ``runDataRef`` is expected to return its data in a
578 `lsst.pipe.base.Struct`. This provides safety for evolution of the task
579 since new values may be added without harming existing code.
580 - The data returned by ``runDataRef`` must be picklable if your task is to
581 support multiprocessing.
583 RunnerClass = TaskRunner
584 canMultiprocess =
True
588 """A hook to allow a task to change the values of its config *after*
589 the camera-specific overrides are loaded but before any command-line
590 overrides are applied.
594 config : instance of task's ``ConfigClass``
599 This is necessary in some cases because the camera-specific overrides
600 may retarget subtasks, wiping out changes made in
601 ConfigClass.setDefaults. See LSST Trac ticket #2282 for more
606 This is called by CmdLineTask.parseAndRun; other ways of
607 constructing a config will not apply these overrides.
612 def parseAndRun(cls, args=None, config=None, log=None, doReturnResults=False):
613 """Parse an argument list and run the command.
617 args : `list`, optional
618 List of command-line arguments; if `None` use `sys.argv`.
619 config : `lsst.pex.config.Config`-type, optional
620 Config for task. If `None` use `Task.ConfigClass`.
621 log : `lsst.log.Log`-type, optional
622 Log. If `None` use the default log.
623 doReturnResults : `bool`, optional
624 If `True`, return the results of this task. Default is `False`.
625 This is only intended for unit tests and similar use. It can
626 easily exhaust memory (if the task returns enough data and you
627 call it enough times) and it will fail when using multiprocessing
628 if the returned data cannot be pickled.
632 struct : `lsst.pipe.base.Struct`
636 the argument parser (`lsst.pipe.base.ArgumentParser`).
638 the parsed command returned by the argument parser's
639 `~lsst.pipe.base.ArgumentParser.parse_args` method
640 (`argparse.Namespace`).
642 the task runner used to run the task (an instance of
645 results returned by the task runner's ``run`` method, one entry
646 per invocation (`list`). This will typically be a list of
647 `Struct`, each containing at least an ``exitStatus`` integer
648 (0 or 1); see `Task.RunnerClass` (`TaskRunner` by default) for
653 Calling this method with no arguments specified is the standard way to
654 run a command-line task from the command-line. For an example see
655 ``pipe_tasks`` ``bin/makeSkyMap.py`` or almost any other file in that
658 If one or more of the dataIds fails then this routine will exit (with
659 a status giving the number of failed dataIds) rather than returning
660 this struct; this behaviour can be overridden by specifying the
661 ``--noExit`` command-line option.
664 commandAsStr =
" ".join(sys.argv)
671 config = cls.ConfigClass()
672 parsedCmd = argumentParser.parse_args(config=config, args=args, log=log, override=cls.
applyOverridesapplyOverrides)
675 parsedCmd.log.info(
"Running: %s", commandAsStr)
677 taskRunner = cls.
RunnerClassRunnerClass(TaskClass=cls, parsedCmd=parsedCmd, doReturnResults=doReturnResults)
678 resultList = taskRunner.run(parsedCmd)
681 nFailed = sum(((res.exitStatus != 0)
for res
in resultList))
682 except (TypeError, AttributeError)
as e:
685 parsedCmd.log.warn(
"Unable to retrieve exit status (%s); assuming success", e)
690 parsedCmd.log.error(
"%d dataRefs failed; not exiting as --noExit was set", nFailed)
695 argumentParser=argumentParser,
697 taskRunner=taskRunner,
698 resultList=resultList,
702 def _makeArgumentParser(cls):
703 """Create and return an argument parser.
707 parser : `lsst.pipe.base.ArgumentParser`
708 The argument parser for this task.
712 By default this returns an `~lsst.pipe.base.ArgumentParser` with one
713 ID argument named `--id` of dataset type ``raw``.
715 Your task subclass may need to override this method to change the
716 dataset type or data ref level, or to add additional data ID arguments.
717 If you add additional data ID arguments or your task's runDataRef
718 method takes more than a single data reference then you will also have
719 to provide a task-specific task runner (see TaskRunner for more
723 parser.add_id_argument(name=
"--id", datasetType=
"raw",
724 help=
"data IDs, e.g. --id visit=12345 ccd=1,2^0,3")
728 """Write the configuration used for processing the data, or check that
729 an existing one is equal to the new one if present.
733 butler : `lsst.daf.persistence.Butler`
734 Data butler used to write the config. The config is written to
735 dataset type `CmdLineTask._getConfigName`.
736 clobber : `bool`, optional
737 A boolean flag that controls what happens if a config already has
740 - `True`: overwrite or rename the existing config, depending on
742 - `False`: raise `TaskError` if this config does not match the
744 doBackup : `bool`, optional
745 Set to `True` to backup the config files if clobbering.
748 if configName
is None:
751 butler.put(self.
configconfig, configName, doBackup=doBackup)
752 elif butler.datasetExists(configName, write=
True):
755 oldConfig = butler.get(configName, immediate=
True)
756 except Exception
as exc:
757 raise type(exc)(f
"Unable to read stored config file {configName} (exc); "
758 "consider using --clobber-config")
760 def logConfigMismatch(msg):
761 self.
loglog.
fatal(
"Comparing configuration: %s", msg)
763 if not self.
configconfig.compare(oldConfig, shortcut=
False, output=logConfigMismatch):
765 f
"Config does not match existing task config {configName!r} on disk; "
766 "tasks configurations must be consistent within the same output repo "
767 "(override with --clobber-config)")
769 butler.put(self.
configconfig, configName)
772 """Write the schemas returned by
773 `lsst.pipe.base.Task.getAllSchemaCatalogs`.
777 butler : `lsst.daf.persistence.Butler`
778 Data butler used to write the schema. Each schema is written to the
779 dataset type specified as the key in the dict returned by
780 `~lsst.pipe.base.Task.getAllSchemaCatalogs`.
781 clobber : `bool`, optional
782 A boolean flag that controls what happens if a schema already has
785 - `True`: overwrite or rename the existing schema, depending on
787 - `False`: raise `TaskError` if this schema does not match the
789 doBackup : `bool`, optional
790 Set to `True` to backup the schema files if clobbering.
794 If ``clobber`` is `False` and an existing schema does not match a
795 current schema, then some schemas may have been saved successfully
796 and others may not, and there is no easy way to tell which is which.
799 schemaDataset = dataset +
"_schema"
801 butler.put(catalog, schemaDataset, doBackup=doBackup)
802 elif butler.datasetExists(schemaDataset, write=
True):
803 oldSchema = butler.get(schemaDataset, immediate=
True).getSchema()
804 if not oldSchema.compare(catalog.getSchema(), afwTable.Schema.IDENTICAL):
806 f
"New schema does not match schema {dataset!r} on disk; "
807 "schemas must be consistent within the same output repo "
808 "(override with --clobber-config)")
810 butler.put(catalog, schemaDataset)
813 """Write the metadata produced from processing the data.
818 Butler data reference used to write the metadata.
819 The metadata is written to dataset type
820 `CmdLineTask._getMetadataName`.
824 if metadataName
is not None:
826 except Exception
as e:
827 self.
loglog.
warn(
"Could not persist metadata for dataId=%s: %s", dataRef.dataId, e)
830 """Compare and write package versions.
834 butler : `lsst.daf.persistence.Butler`
835 Data butler used to read/write the package versions.
836 clobber : `bool`, optional
837 A boolean flag that controls what happens if versions already have
840 - `True`: overwrite or rename the existing version info, depending
842 - `False`: raise `TaskError` if this version info does not match
844 doBackup : `bool`, optional
845 If `True` and clobbering, old package version files are backed up.
846 dataset : `str`, optional
847 Name of dataset to read/write.
852 Raised if there is a version mismatch with current and persisted
853 lists of package versions.
857 Note that this operation is subject to a race condition.
859 packages = Packages.fromSystem()
862 return butler.put(packages, dataset, doBackup=doBackup)
863 if not butler.datasetExists(dataset, write=
True):
864 return butler.put(packages, dataset)
867 old = butler.get(dataset, immediate=
True)
868 except Exception
as exc:
869 raise type(exc)(f
"Unable to read stored version dataset {dataset} ({exc}); "
870 "consider using --clobber-versions or --no-versions")
875 diff = packages.difference(old)
877 versions_str =
"; ".join(f
"{pkg}: {diff[pkg][1]} vs {diff[pkg][0]}" for pkg
in diff)
879 f
"Version mismatch ({versions_str}); consider using --clobber-versions or --no-versions")
882 extra = packages.extra(old)
885 butler.put(old, dataset, doBackup=doBackup)
887 def _getConfigName(self):
888 """Get the name of the config dataset type, or `None` if config is not
893 The name may depend on the config; that is why this is not a class
896 return self._DefaultName +
"_config"
898 def _getMetadataName(self):
899 """Get the name of the metadata dataset type, or `None` if metadata is
904 The name may depend on the config; that is why this is not a class
907 return self._DefaultName +
"_metadata"
std::vector< SchemaItem< Flag > > * items
def makeTask(self, parsedCmd=None, args=None)
def writeConfig(self, butler, clobber=False, doBackup=True)
def applyOverrides(cls, config)
def parseAndRun(cls, args=None, config=None, log=None, doReturnResults=False)
def writeSchemas(self, butler, clobber=False, doBackup=True)
def _getMetadataName(self)
def _makeArgumentParser(cls)
def writeMetadata(self, dataRef)
def writePackageVersions(self, butler, clobber=False, doBackup=True, dataset="packages")
def runTask(self, task, dataRef, kwargs)
def _precallImpl(self, task, parsedCmd)
def __init__(self, TaskClass, parsedCmd, doReturnResults=False)
def getTargetList(parsedCmd, **kwargs)
def runTask(self, task, dataRef, kwargs)
def precall(self, parsedCmd)
def makeTask(self, parsedCmd=None, args=None)
def prepareForMultiProcessing(self)
def getAllSchemaCatalogs(self)
def getFullMetadata(self)
daf::base::PropertyList * list
bool disableImplicitThreading()
Disable threading that has not been set explicitly.
def format(config, name=None, writeSourceLine=True, prefix="", verbose=False)
def profile(filename, log=None)