LSSTApplications  1.1.2+25,10.0+13,10.0+132,10.0+133,10.0+224,10.0+41,10.0+8,10.0-1-g0f53050+14,10.0-1-g4b7b172+19,10.0-1-g61a5bae+98,10.0-1-g7408a83+3,10.0-1-gc1e0f5a+19,10.0-1-gdb4482e+14,10.0-11-g3947115+2,10.0-12-g8719d8b+2,10.0-15-ga3f480f+1,10.0-2-g4f67435,10.0-2-gcb4bc6c+26,10.0-28-gf7f57a9+1,10.0-3-g1bbe32c+14,10.0-3-g5b46d21,10.0-4-g027f45f+5,10.0-4-g86f66b5+2,10.0-4-gc4fccf3+24,10.0-40-g4349866+2,10.0-5-g766159b,10.0-5-gca2295e+25,10.0-6-g462a451+1
LSSTDataManagementBasePackage
_syntax.py
Go to the documentation of this file.
1 #
2 # LSST Data Management System
3 # Copyright 2008-2015 LSST Corporation.
4 #
5 # This product includes software developed by the
6 # LSST Project (http://www.lsst.org/).
7 #
8 # This program is free software: you can redistribute it and/or modify
9 # it under the terms of the GNU General Public License as published by
10 # the Free Software Foundation, either version 3 of the License, or
11 # (at your option) any later version.
12 #
13 # This program is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU General Public License for more details.
17 #
18 # You should have received a copy of the LSST License Statement and
19 # the GNU General Public License along with this program. If not,
20 # see <http://www.lsstcorp.org/LegalNotices/>.
21 #
22 """
23 Special Python syntactic sugar for Catalogs and Records.
24 
25 This module is imported by tableLib.py, and should not need to be imported by any other module.
26 I've moved the code out of the .i file here to avoid recompiling when only pure-Python code is
27 changed.
28 """
29 
30 import fnmatch
31 import re
32 import numpy
33 import collections
34 
36  """a tuple of Key subfield extraction indices (the lower-triangular elements)."""
37  r = []
38  for i in range(self.getSize()):
39  for j in range(i+1):
40  r.append((i,j))
41  return tuple(r)
42 
44  """a tuple of subelement Keys (the lower-triangular elements)."""
45  r = []
46  for i in range(self.getSize()):
47  for j in range(i+1):
48  r.append(self[i,j])
49  return tuple(r)
50 
51 def Schema_extract(self, *patterns, **kwds):
52  """
53  Extract a dictionary of {<name>: <schema-item>} in which the field names
54  match the given shell-style glob pattern(s).
55 
56  Any number of glob patterns may be passed; the result will be the union of all
57  the result of each glob considered separately.
58 
59  Additional optional arguments may be passed as keywords:
60 
61  regex ------ A regular expression to be used in addition to any glob patterns passed
62  as positional arguments. Note that this will be compared with re.match,
63  not re.search.
64 
65  sub -------- A replacement string template (see re.MatchObject.expand) used to set the
66  dictionary keys of any fields matched by regex. The field name in the
67  SchemaItem is not modified.
68 
69  ordered----- If True, a collections.OrderedDict will be returned instead of a standard
70  dict, with the order corresponding to the definition order of the Schema.
71 
72  """
73  if kwds.pop("ordered", False):
74  d = collections.OrderedDict()
75  else:
76  d = dict()
77  regex = kwds.pop("regex", None)
78  sub = kwds.pop("sub", None)
79  if sub is not None and regex is None:
80  raise ValueError("'sub' keyword argument to extract is invalid without 'regex' argument")
81  if kwds:
82  raise ValueError("Unrecognized keyword arguments for extract: %s" % ", ".join(kwds.keys()))
83  for item in self:
84  trueName = item.field.getName()
85  names = [trueName]
86  for alias, target in self.getAliasMap().iteritems():
87  if trueName.startswith(target):
88  names.append(trueName.replace(target, alias, 1))
89  for name in names:
90  if regex is not None:
91  m = re.match(regex, name)
92  if m is not None:
93  if sub is not None:
94  name = m.expand(sub)
95  d[name] = item
96  continue # continue middle loop so we don't match the same name twice
97  for pattern in patterns:
98  if fnmatch.fnmatchcase(name, pattern):
99  d[name] = item
100  break # break inner loop so we don't match the same name twice
101  return d
102 
103 def BaseRecord_extract(self, *patterns, **kwds):
104  """
105  Extract a dictionary of {<name>: <field-value>} in which the field names
106  match the given shell-style glob pattern(s).
107 
108  Any number of glob patterns may be passed; the result will be the union of all
109  the result of each glob considered separately.
110 
111  Additional optional arguments may be passed as keywords:
112 
113  items ------ The result of a call to self.schema.extract(); this will be used instead
114  of doing any new matching, and allows the pattern matching to be reused
115  to extract values from multiple records. This keyword is incompatible
116  with any position arguments and the regex, sub, and ordered keyword
117  arguments.
118 
119  split ------ If True, fields with named subfields (e.g. points) will be split into
120  separate items in the dict; instead of {"point": lsst.afw.geom.Point2I(2,3)},
121  for instance, you'd get {"point.x": 2, "point.y": 3}.
122  Default is False.
123 
124  regex ------ A regular expression to be used in addition to any glob patterns passed
125  as positional arguments. Note that this will be compared with re.match,
126  not re.search.
127 
128  sub -------- A replacement string (see re.MatchObject.expand) used to set the
129  dictionary keys of any fields matched by regex.
130 
131  ordered----- If True, a collections.OrderedDict will be returned instead of a standard
132  dict, with the order corresponding to the definition order of the Schema.
133  Default is False.
134 
135  """
136  d = kwds.pop("items", None)
137  split = kwds.pop("split", False)
138  if d is None:
139  d = self.schema.extract(*patterns, **kwds).copy()
140  elif kwds:
141  raise ValueError("Unrecognized keyword arguments for extract: %s" % ", ".join(kwds.keys()))
142  for name, schemaItem in d.items(): # can't use iteritems because we might be adding/deleting elements
143  key = schemaItem.key
144  if split and key.HAS_NAMED_SUBFIELDS:
145  for subname, subkey in zip(key.subfields, key.subkeys):
146  d["%s.%s" % (name, subname)] = self.get(subkey)
147  del d[name]
148  else:
149  d[name] = self.get(schemaItem.key)
150  return d
151 
152 def BaseColumnView_extract(self, *patterns, **kwds):
153  """
154  Extract a dictionary of {<name>: <column-array>} in which the field names
155  match the given shell-style glob pattern(s).
156 
157  Any number of glob patterns may be passed; the result will be the union of all
158  the result of each glob considered separately.
159 
160  Note that extract("*", copy=True) provides an easy way to transform a row-major
161  ColumnView into a possibly more efficient set of contiguous NumPy arrays.
162 
163  This routines unpacks Flag columns into full boolean arrays and covariances into dense
164  (i.e. non-triangular packed) arrays with dimension (N,M,M), where N is the number of
165  records and M is the dimension of the covariance matrix. Fields with named subfields
166  (e.g. points) are always split into separate dictionary items, as is done in
167  BaseRecord.extract(..., split=True). String fields are silently ignored.
168 
169  Additional optional arguments may be passed as keywords:
170 
171  items ------ The result of a call to self.schema.extract(); this will be used instead
172  of doing any new matching, and allows the pattern matching to be reused
173  to extract values from multiple records. This keyword is incompatible
174  with any position arguments and the regex, sub, and ordered keyword
175  arguments.
176 
177  where ------ Any expression that can be passed as indices to a NumPy array, including
178  slices, boolean arrays, and index arrays, that will be used to index
179  each column array. This is applied before arrays are copied when
180  copy is True, so if the indexing results in an implicit copy no
181  unnecessary second copy is performed.
182 
183  copy ------- If True, the returned arrays will be contiguous copies rather than strided
184  views into the catalog. This ensures that the lifetime of the catalog is
185  not tied to the lifetime of a particular catalog, and it also may improve
186  the performance if the array is used repeatedly.
187  Default is False.
188 
189  regex ------ A regular expression to be used in addition to any glob patterns passed
190  as positional arguments. Note that this will be compared with re.match,
191  not re.search.
192 
193  sub -------- A replacement string (see re.MatchObject.expand) used to set the
194  dictionary keys of any fields matched by regex.
195 
196  ordered----- If True, a collections.OrderedDict will be returned instead of a standard
197  dict, with the order corresponding to the definition order of the Schema.
198  Default is False.
199 
200  """
201  copy = kwds.pop("copy", False)
202  where = kwds.pop("where", None)
203  d = kwds.pop("items", None)
204  if d is None:
205  d = self.schema.extract(*patterns, **kwds).copy()
206  elif kwds:
207  raise ValueError("Unrecognized keyword arguments for extract: %s" % ", ".join(kwds.keys()))
208  def processArray(a):
209  if where is not None:
210  a = a[where]
211  if copy:
212  a = numpy.ascontiguousarray(a)
213  return a
214  for name, schemaItem in d.items(): # can't use iteritems because we might be adding/deleting elements
215  key = schemaItem.key
216  if key.HAS_NAMED_SUBFIELDS:
217  for subname, subkey in zip(key.subfields, key.subkeys):
218  d["%s.%s" % (name, subname)] = processArray(self.get(subkey))
219  del d[name]
220  elif key.getTypeString().startswith("Cov"):
221  unpacked = None
222  for idx, subkey in zip(key.subfields, key.subkeys):
223  i, j = idx
224  array = processArray(self.get(subkey))
225  if unpacked is None:
226  unpacked = numpy.zeros((array.size, key.getSize(), key.getSize()), dtype=array.dtype)
227  unpacked[:,i,j] = array
228  if i != j:
229  unpacked[:,j,i] = array
230  d[name] = unpacked
231  elif key.getTypeString() == "String":
232  del d[name]
233  else:
234  d[name] = processArray(self.get(schemaItem.key))
235  return d
SelectEigenView< T >::Type copy(Eigen::EigenBase< T > const &other)
Copy an arbitrary Eigen expression into a new EigenView.
Definition: eigen.h:390