LSSTApplications  16.0-10-g0ee56ad+5,16.0-11-ga33d1f2+5,16.0-12-g3ef5c14+3,16.0-12-g71e5ef5+18,16.0-12-gbdf3636+3,16.0-13-g118c103+3,16.0-13-g8f68b0a+3,16.0-15-gbf5c1cb+4,16.0-16-gfd17674+3,16.0-17-g7c01f5c+3,16.0-18-g0a50484+1,16.0-20-ga20f992+8,16.0-21-g0e05fd4+6,16.0-21-g15e2d33+4,16.0-22-g62d8060+4,16.0-22-g847a80f+4,16.0-25-gf00d9b8+1,16.0-28-g3990c221+4,16.0-3-gf928089+3,16.0-32-g88a4f23+5,16.0-34-gd7987ad+3,16.0-37-gc7333cb+2,16.0-4-g10fc685+2,16.0-4-g18f3627+26,16.0-4-g5f3a788+26,16.0-5-gaf5c3d7+4,16.0-5-gcc1f4bb+1,16.0-6-g3b92700+4,16.0-6-g4412fcd+3,16.0-6-g7235603+4,16.0-69-g2562ce1b+2,16.0-8-g14ebd58+4,16.0-8-g2df868b+1,16.0-8-g4cec79c+6,16.0-8-gadf6c7a+1,16.0-8-gfc7ad86,16.0-82-g59ec2a54a+1,16.0-9-g5400cdc+2,16.0-9-ge6233d7+5,master-g2880f2d8cf+3,v17.0.rc1
LSSTDataManagementBasePackage
BaseTable.cc
Go to the documentation of this file.
1 // -*- lsst-c++ -*-
2 
3 #include <memory>
4 
5 #include "boost/shared_ptr.hpp" // only for ndarray
6 
10 #include "lsst/afw/table/Catalog.h"
14 
15 namespace lsst {
16 namespace afw {
17 namespace table {
18 
19 // =============== Block ====================================================================================
20 
21 // This is a block of memory that doles out record-sized chunks when a table asks for them.
22 // It inherits from ndarray::Manager so we can return ndarrays that refer to the memory in the
23 // block with correct reference counting (ndarray::Manager is just an empty base class with an
24 // internal reference count - it's like a shared_ptr without the pointer and template parameter.
25 //
26 // Records are allocated in Blocks for two reasons:
27 // - it allows tables to be either totally contiguous in memory (enabling column views) or
28 // not (enabling dynamic addition of records) all in one class.
29 // - it saves us from ever having to reallocate all the records associated with a table
30 // when we run out of space (that's what a std::vector-like model would require). This keeps
31 // records and/or iterators to them from being invalidated, and it keeps tables from having
32 // to track all the records whose data it owns.
33 
34 namespace {
35 
36 class Block : public ndarray::Manager {
37 public:
38  typedef boost::intrusive_ptr<Block> Ptr;
39 
40  // If the last chunk allocated isn't needed after all (usually because of an exception in a constructor)
41  // we reuse it immediately. If it wasn't the last chunk allocated, it can't be reclaimed until
42  // the entire block goes out of scope.
43  static void reclaim(std::size_t recordSize, void *data, ndarray::Manager::Ptr const &manager) {
44  Ptr block = boost::static_pointer_cast<Block>(manager);
45  if (reinterpret_cast<char *>(data) + recordSize == block->_next) {
46  block->_next -= recordSize;
47  }
48  }
49 
50  // Ensure we have space for at least the given number of records as a contiguous block.
51  // May not actually allocate anything if we already do.
52  static void preallocate(std::size_t recordSize, std::size_t recordCount, ndarray::Manager::Ptr &manager) {
53  Ptr block = boost::static_pointer_cast<Block>(manager);
54  if (!block || static_cast<std::size_t>(block->_end - block->_next) < recordSize * recordCount) {
55  block = Ptr(new Block(recordSize, recordCount));
56  manager = block;
57  }
58  }
59 
60  static std::size_t getBufferSize(std::size_t recordSize, ndarray::Manager::Ptr const &manager) {
61  Ptr block = boost::static_pointer_cast<Block>(manager);
62  return static_cast<std::size_t>(block->_end - block->_next) / recordSize;
63  }
64 
65  // Get the next chunk from the block, making a new block and installing it into the table
66  // if we're all out of space.
67  static void *get(std::size_t recordSize, ndarray::Manager::Ptr &manager) {
68  Ptr block = boost::static_pointer_cast<Block>(manager);
69  if (!block || block->_next == block->_end) {
70  block = Ptr(new Block(recordSize, BaseTable::nRecordsPerBlock));
71  manager = block;
72  }
73  void *r = block->_next;
74  block->_next += recordSize;
75  return r;
76  }
77 
78  // Block is also keeper of the special number that says what alignment boundaries are needed for
79  // schemas. Before we start using a schema, we need to first ensure it meets that requirement,
80  // and pad it if not.
81  static void padSchema(Schema &schema) {
82  static int const MIN_RECORD_ALIGN = sizeof(AllocType);
83  int remainder = schema.getRecordSize() % MIN_RECORD_ALIGN;
84  if (remainder) {
85  detail::Access::padSchema(schema, MIN_RECORD_ALIGN - remainder);
86  }
87  }
88 
89 private:
90  struct AllocType {
91  double element[2];
92  };
93 
94  explicit Block(std::size_t recordSize, std::size_t recordCount)
95  : _mem(new AllocType[(recordSize * recordCount) / sizeof(AllocType)]),
96  _next(reinterpret_cast<char *>(_mem.get())),
97  _end(_next + recordSize * recordCount) {
98  assert((recordSize * recordCount) % sizeof(AllocType) == 0);
99  std::fill(_next, _end, 0); // initialize to zero; we'll later initialize floats to NaN.
100  }
101 
103  char *_next;
104  char *_end;
105 };
106 
107 } // namespace
108 
109 // =============== BaseTable implementation (see header for docs) ===========================================
110 
111 void BaseTable::preallocate(std::size_t n) { Block::preallocate(_schema.getRecordSize(), n, _manager); }
112 
114  if (_manager) {
115  return Block::getBufferSize(_schema.getRecordSize(), _manager);
116  } else {
117  return 0;
118  }
119 }
120 
122  return std::shared_ptr<BaseTable>(new BaseTable(schema));
123 }
124 
126  std::shared_ptr<BaseRecord> output = makeRecord();
127  output->assign(input);
128  return output;
129 }
130 
132  std::shared_ptr<BaseRecord> output = makeRecord();
133  output->assign(input, mapper);
134  return output;
135 }
136 
137 std::shared_ptr<io::FitsWriter> BaseTable::makeFitsWriter(fits::Fits *fitsfile, int flags) const {
138  return std::make_shared<io::FitsWriter>(fitsfile, flags);
139 }
140 
142  return std::shared_ptr<BaseTable>(new BaseTable(*this));
143 }
144 
146  return std::shared_ptr<BaseRecord>(new BaseRecord(shared_from_this()));
147 }
148 
149 BaseTable::BaseTable(Schema const &schema) : daf::base::Citizen(typeid(this)), _schema(schema) {
150  Block::padSchema(_schema);
151  _schema.disconnectAliases();
152  _schema.getAliasMap()->_table = this;
153 }
154 
155 BaseTable::~BaseTable() { _schema.getAliasMap()->_table = 0; }
156 
157 namespace {
158 
159 // A Schema Functor used to set floating point-fields to NaN and initialize variable-length arrays
160 // using placement new. All other fields are left alone, as they should already be zero.
161 struct RecordInitializer {
162  template <typename T>
163  static void fill(T *element, int size) {} // this matches all non-floating-point-element fields.
164 
165  static void fill(float *element, int size) {
166  std::fill(element, element + size, std::numeric_limits<float>::quiet_NaN());
167  }
168 
169  static void fill(double *element, int size) {
170  std::fill(element, element + size, std::numeric_limits<double>::quiet_NaN());
171  }
172 
173  static void fill(lsst::geom::Angle *element, int size) {
174  fill(reinterpret_cast<double *>(element), size);
175  }
176 
177  template <typename T>
178  void operator()(SchemaItem<T> const &item) const {
179  fill(reinterpret_cast<typename Field<T>::Element *>(data + item.key.getOffset()),
180  item.key.getElementCount());
181  }
182 
183  template <typename T>
184  void operator()(SchemaItem<Array<T> > const &item) const {
185  if (item.key.isVariableLength()) {
186  // Use placement new because the memory (for one ndarray) is already allocated
187  new (data + item.key.getOffset()) ndarray::Array<T, 1, 1>();
188  } else {
189  fill(reinterpret_cast<typename Field<T>::Element *>(data + item.key.getOffset()),
190  item.key.getElementCount());
191  }
192  }
193 
194  void operator()(SchemaItem<std::string> const &item) const {
195  if (item.key.isVariableLength()) {
196  // Use placement new because the memory (for one std::string) is already allocated
197  new (reinterpret_cast<std::string *>(data + item.key.getOffset())) std::string();
198  } else {
199  fill(reinterpret_cast<char *>(data + item.key.getOffset()), item.key.getElementCount());
200  }
201  }
202 
203  void operator()(SchemaItem<Flag> const &item) const {} // do nothing for Flag fields; already 0
204 
205  char *data;
206 };
207 
208 // A Schema Functor used to set destroy variable-length array fields using an explicit call to their
209 // destructor (necessary since we used placement new). All other fields are ignored, as they're POD.
210 struct RecordDestroyer {
211  template <typename T>
212  void operator()(SchemaItem<T> const &item) const {}
213 
214  template <typename T>
215  void operator()(SchemaItem<Array<T> > const &item) const {
216  typedef ndarray::Array<T, 1, 1> Element;
217  if (item.key.isVariableLength()) {
218  (*reinterpret_cast<Element *>(data + item.key.getOffset())).~Element();
219  }
220  }
221 
222  void operator()(SchemaItem<std::string> const &item) const {
223  if (item.key.isVariableLength()) {
224  using std::string; // invoking the destructor on a qualified name doesn't compile in gcc 4.8.1
225  // https://stackoverflow.com/q/24593942
226  (*reinterpret_cast<string *>(data + item.key.getOffset())).~string();
227  }
228  }
229 
230  char *data;
231 };
232 
233 } // namespace
234 
235 void BaseTable::_initialize(BaseRecord &record) {
236  record._data = Block::get(_schema.getRecordSize(), _manager);
237  RecordInitializer f = {reinterpret_cast<char *>(record._data)};
238  _schema.forEach(f);
239  record._manager = _manager; // manager always points to the most recently-used block.
240 }
241 
242 void BaseTable::_destroy(BaseRecord &record) {
243  assert(record._table.get() == this);
244  RecordDestroyer f = {reinterpret_cast<char *>(record._data)};
245  _schema.forEach(f);
246  if (record._manager == _manager) Block::reclaim(_schema.getRecordSize(), record._data, _manager);
247 }
248 
249 /*
250  * JFB has no idea whether the default value below is sensible, or even whether
251  * it should be expressed ultimately as an approximate size in bytes rather than a
252  * number of records; the answer probably depends on both the typical size of
253  * records and the typical number of records.
254  */
256 
257 // =============== BaseCatalog instantiation =================================================================
258 
259 template class CatalogT<BaseRecord>;
260 template class CatalogT<BaseRecord const>;
261 } // namespace table
262 } // namespace afw
263 } // namespace lsst
Defines the fields and offsets for a table.
Definition: Schema.h:50
char * data
Definition: BaseTable.cc:205
static void padSchema(Schema &schema, int bytes)
Definition: Access.h:79
int getOffset() const noexcept
Return the offset (in bytes) of this field within a record.
Definition: Key.h:87
bool isVariableLength() const noexcept
Return true if the field is variable-length (each record can have a different size array)...
Definition: FieldBase.h:268
A mapping between the keys of two Schemas, used to copy data between them.
Definition: SchemaMapper.h:21
void disconnectAliases()
Sever the connection between this schema and any others with which it shares aliases.
Definition: Schema.cc:729
virtual std::shared_ptr< BaseRecord > _makeRecord()
Default-construct an associated record (protected implementation).
Definition: BaseTable.cc:145
A simple struct that combines the two arguments that must be passed to most cfitsio routines and cont...
Definition: fits.h:296
std::shared_ptr< BaseRecord > copyRecord(BaseRecord const &input)
Deep-copy a record, requiring that it have the same schema as this table.
Definition: BaseTable.cc:125
A class representing an angle.
Definition: Angle.h:127
STL class.
double element[2]
Definition: BaseTable.cc:91
void preallocate(std::size_t nRecords)
Allocate contiguous space for new records in advance.
Definition: BaseTable.cc:111
T remainder(T... args)
static int nRecordsPerBlock
Number of records in each memory block.
Definition: BaseTable.h:59
A base class for image defects.
table::Schema schema
Definition: Camera.cc:161
Tag types used to declare specialized field types.
Definition: misc.h:32
BaseTable(Schema const &schema)
Construct from a schema.
Definition: BaseTable.cc:149
std::shared_ptr< AliasMap > getAliasMap() const
Return the map of aliases.
Definition: Schema.h:269
STL class.
Base class for all records.
Definition: BaseRecord.h:31
static std::shared_ptr< BaseTable > make(Schema const &schema)
Construct a new table.
Definition: BaseTable.cc:121
FieldBase< T >::Element Element
Type used to store field data in the table (a field may have multiple elements).
Definition: Field.h:26
virtual std::shared_ptr< BaseTable > _clone() const
Clone implementation with noncovariant return types.
Definition: BaseTable.cc:141
std::size_t getBufferSize() const
Return the number of additional records space has been already been allocated for.
Definition: BaseTable.cc:113
T fill(T... args)
int getElementCount() const noexcept
Return the number of subfield elements (equal to the size of the string, including a null terminator)...
Definition: FieldBase.h:261
Base class for all tables.
Definition: BaseTable.h:44
SchemaMapper * mapper
Definition: SchemaMapper.cc:78
A simple pair-like struct for mapping a Field (name and description) with a Key (used for actual data...
Definition: SchemaImpl.h:27