27 """This module defines the ButlerSubset class and the ButlerDataRefs contained
28 within it as well as an iterator over the subset."""
35 """ButlerSubset is a container for ButlerDataRefs. It represents a
36 collection of data ids that can be used to obtain datasets of the type
37 used when creating the collection or a compatible dataset type. It can be
38 thought of as the result of a query for datasets matching a partial data
41 The ButlerDataRefs are generated at a specified level of the data id
42 hierarchy. If that is not the level at which datasets are specified, the
43 ButlerDataRef.subItems() method may be used to dive further into the
46 ButlerSubsets should generally be created using Butler.subset().
48 This mechanism replaces the creation of butlers using partial dataIds.
52 __init__(self, butler, datasetType, level, dataId)
61 """This is a Generation 2 ButlerSubset.
64 def __init__(self, butler, datasetType, level, dataId):
66 Create a ButlerSubset by querying a butler for data ids matching a
67 given partial data id for a given dataset type at a given hierarchy
70 @param butler (Butler) butler that is being queried.
71 @param datasetType (str) the type of dataset to query.
72 @param level (str) the hierarchy level to descend to. if empty string will look up the default
74 @param dataId (dict) the (partial or complete) data id.
82 keys = self.
butlerbutler.getKeys(datasetType, level, tag=dataId.tag)
85 fmt =
list(keys.keys())
97 idTuples = butler.queryMetadata(self.
datasetTypedatasetType, fmt, self.
dataIddataId)
98 for idTuple
in idTuples:
99 tempId = dict(self.
dataIddataId)
101 tempId[fmt[0]] = idTuple
103 for i
in range(len(fmt)):
104 tempId[fmt[i]] = idTuple[i]
108 return "ButlerSubset(butler=%s, datasetType=%s, dataId=%s, cache=%s, level=%s)" % (
113 Number of ButlerDataRefs in the ButlerSubset.
118 return len(self.
cachecache)
122 Iterator over the ButlerDataRefs in the ButlerSubset.
124 @returns (ButlerIterator)
132 An iterator over the ButlerDataRefs in a ButlerSubset.
148 A ButlerDataRef is a reference to a potential dataset or group of datasets
149 that is portable between compatible dataset types. As such, it can be
150 used to create or retrieve datasets.
152 ButlerDataRefs are (conceptually) created as elements of a ButlerSubset by
153 Butler.subset(). They are initially specific to the dataset type passed
154 to that call, but they may be used with any other compatible dataset type.
155 Dataset type compatibility must be determined externally (or by trial and
158 ButlerDataRefs may be created at any level of a data identifier hierarchy.
159 If the level is not one at which datasets exist, a ButlerSubset
160 with lower-level ButlerDataRefs can be created using
161 ButlerDataRef.subItems().
165 get(self, datasetType=None, **rest)
167 put(self, obj, datasetType=None, **rest)
169 subItems(self, level=None)
171 datasetExists(self, datasetType=None, **rest)
177 """This is a Generation 2 DataRef.
182 For internal use only. ButlerDataRefs should only be created by
183 ButlerSubset and ButlerSubsetIterator.
190 return 'ButlerDataRef(butlerSubset=%s, dataId=%s)' % (self.
butlerSubsetbutlerSubset, self.
dataIddataId)
192 def get(self, datasetType=None, **rest):
194 Retrieve a dataset of the given type (or the type used when creating
195 the ButlerSubset, if None) as specified by the ButlerDataRef.
197 @param datasetType (str) dataset type to retrieve.
198 @param **rest keyword arguments with data identifiers
199 @returns object corresponding to the given dataset type.
201 if datasetType
is None:
203 return self.
butlerSubsetbutlerSubset.butler.get(datasetType, self.
dataIddataId, **rest)
205 def put(self, obj, datasetType=None, doBackup=False, **rest):
207 Persist a dataset of the given type (or the type used when creating
208 the ButlerSubset, if None) as specified by the ButlerDataRef.
210 @param obj object to persist.
211 @param datasetType (str) dataset type to persist.
212 @param doBackup if True, rename existing instead of overwriting
213 @param **rest keyword arguments with data identifiers
215 WARNING: Setting doBackup=True is not safe for parallel processing, as it
216 may be subject to race conditions.
219 if datasetType
is None:
221 self.
butlerSubsetbutlerSubset.butler.put(obj, datasetType, self.
dataIddataId, doBackup=doBackup, **rest)
223 def getUri(self, datasetType=None, write=False, **rest):
224 """Return the URL for a dataset
226 .. warning:: This is intended only for debugging. The URI should
227 never be used for anything other than printing.
229 .. note:: In the event there are multiple URIs, we return only
232 .. note:: getUri() does not currently support composite datasets.
236 datasetType : `str`, optional
237 The dataset type of interest.
238 write : `bool`, optional
239 Return the URI for writing?
240 rest : `dict`, optional
241 Keyword arguments for the data id.
249 if datasetType
is None:
251 return self.
butlerSubsetbutlerSubset.butler.getUri(datasetType, self.
dataIddataId, write=write, **rest)
255 Return a list of the lower levels of the hierarchy than this
258 @returns (iterable) list of strings with level keys."""
273 Generate a ButlerSubset at a lower level of the hierarchy than this
274 ButlerDataRef, using it as a partial data id. If level is None, a
275 default lower level for the original ButlerSubset level and dataset
278 As currently implemented, the default sublevels for all the
279 repositories used by this Butler instance must match for the Butler to
280 be able to select a default sublevel to get the subset.
282 @param level (str) the hierarchy level to descend to.
283 @returns (ButlerSubset) resulting from the lower-level query or () if
284 there is no lower level.
289 for repoData
in self.
butlerSubsetbutlerSubset.butler._repos.all():
290 levelSet.add(repoData.repo._mapper.getDefaultSubLevel(
292 if len(levelSet) > 1:
294 "Support for multiple levels not implemented.")
295 level = levelSet.pop()
303 Determine if a dataset exists of the given type (or the type used when
304 creating the ButlerSubset, if None) as specified by the ButlerDataRef.
306 @param datasetType (str) dataset type to check.
307 @param write (bool) if True, search only in output repositories
308 @param **rest keywords arguments with data identifiers
311 if datasetType
is None:
313 return self.
butlerSubsetbutlerSubset.butler.datasetExists(
314 datasetType, self.
dataIddataId, write=write, **rest)
318 Return the butler associated with this data reference.
def datasetExists(self, datasetType=None, write=False, **rest)
def get(self, datasetType=None, **rest)
def put(self, obj, datasetType=None, doBackup=False, **rest)
def __init__(self, butlerSubset, dataId)
def getUri(self, datasetType=None, write=False, **rest)
def subItems(self, level=None)
def __init__(self, butler, datasetType, level, dataId)
def __init__(self, butlerSubset)
daf::base::PropertyList * list
daf::base::PropertySet * set
std::shared_ptr< FrameSet > append(FrameSet const &first, FrameSet const &second)
Construct a FrameSet that performs two transformations in series.