|
def | __init__ (self, dataset, camera, cameraMapper) |
|
def | walk (self, root, rules=None) |
|
File system scanner for a dataset known to a camera mapper.
Definition at line 399 of file datasetScanner.py.
◆ __init__()
def lsst.datarel.datasetScanner.DatasetScanner.__init__ |
( |
|
self, |
|
|
|
dataset, |
|
|
|
camera, |
|
|
|
cameraMapper |
|
) |
| |
Definition at line 403 of file datasetScanner.py.
403 def __init__(self, dataset, camera, cameraMapper):
404 if not isinstance(cameraMapper, lsst.daf.butlerUtils.CameraMapper):
405 raise TypeError(
'Expecting a lsst.daf.butlerUtils.CameraMapper!')
406 if dataset
not in cameraMapper.mappings:
407 raise NotFoundError(
'Unknown dataset ' + str(dataset))
408 HfsScanner.__init__(self, cameraMapper.mappings[dataset].template)
409 camera = camera.lower()
410 if camera
not in _keyTypes:
411 raise RuntimeError(
'{} camera not supported yet'.
format(camera))
412 for k
in self._formatKeys:
413 if k
not in _keyTypes[camera]:
414 raise RuntimeError(
'{} is not a valid dataId key for camera {}'.
format(k, camera))
415 self._formatKeys[k].munge = _mungeFunctions[camera]
416 def __init__(self, needLockOnRead=True, data=None, cond=None)
def format(config, name=None, writeSourceLine=True, prefix="", verbose=False)
◆ walk()
def lsst.datarel.datasetScanner.HfsScanner.walk |
( |
|
self, |
|
|
|
root, |
|
|
|
rules = None |
|
) |
| |
|
inherited |
Generator that descends the given root directory in top-down
fashion, matching paths corresponding to the template and satisfying
the given rule list. The generator yields tuples of the form
(path, dataId), where path is a dataset file name relative to root,
and dataId is a key value dictionary identifying the file.
Definition at line 281 of file datasetScanner.py.
281 def walk(self, root, rules=None):
282 """Generator that descends the given root directory in top-down 283 fashion, matching paths corresponding to the template and satisfying 284 the given rule list. The generator yields tuples of the form 285 (path, dataId), where path is a dataset file name relative to root, 286 and dataId is a key value dictionary identifying the file. 289 while os.path.exists(root)
and not oneFound:
290 stack = [(0, root, rules, {})]
292 depth, path, rules, dataId = stack.pop()
293 if os.path.isfile(path):
295 pc = self._pathComponents[depth]
299 if not os.path.exists(os.path.join(path, pc.regex)):
302 entries = os.listdir(path)
309 m = pc.regex.match(e)
314 for i, k
in enumerate(pc.keys):
315 subDataId = self._formatKeys[k].munge(k, m.group(i + 1), subDataId)
320 if subRules
and pc.keys:
325 if k
not in r
or subDataId[k]
in r[k]:
331 p = os.path.join(path, e)
332 if depth < len(self._pathComponents):
334 stack.append((depth, p, subRules, subDataId))
335 elif depth == len(self._pathComponents):
336 if os.path.isfile(p):
338 yield os.path.relpath(p, root), subDataId
341 root = os.path.join(root,
"_parent")
The documentation for this class was generated from the following file:
- /home/jenkins-slave/snowflake/release/lsstsw/stack/Linux64/datarel/14.0+77/python/lsst/datarel/datasetScanner.py