LSST Applications  21.0.0-172-gfb10e10a+18fedfabac,22.0.0+297cba6710,22.0.0+80564b0ff1,22.0.0+8d77f4f51a,22.0.0+a28f4c53b1,22.0.0+dcf3732eb2,22.0.1-1-g7d6de66+2a20fdde0d,22.0.1-1-g8e32f31+297cba6710,22.0.1-1-geca5380+7fa3b7d9b6,22.0.1-12-g44dc1dc+2a20fdde0d,22.0.1-15-g6a90155+515f58c32b,22.0.1-16-g9282f48+790f5f2caa,22.0.1-2-g92698f7+dcf3732eb2,22.0.1-2-ga9b0f51+7fa3b7d9b6,22.0.1-2-gd1925c9+bf4f0e694f,22.0.1-24-g1ad7a390+a9625a72a8,22.0.1-25-g5bf6245+3ad8ecd50b,22.0.1-25-gb120d7b+8b5510f75f,22.0.1-27-g97737f7+2a20fdde0d,22.0.1-32-gf62ce7b1+aa4237961e,22.0.1-4-g0b3f228+2a20fdde0d,22.0.1-4-g243d05b+871c1b8305,22.0.1-4-g3a563be+32dcf1063f,22.0.1-4-g44f2e3d+9e4ab0f4fa,22.0.1-42-gca6935d93+ba5e5ca3eb,22.0.1-5-g15c806e+85460ae5f3,22.0.1-5-g58711c4+611d128589,22.0.1-5-g75bb458+99c117b92f,22.0.1-6-g1c63a23+7fa3b7d9b6,22.0.1-6-g50866e6+84ff5a128b,22.0.1-6-g8d3140d+720564cf76,22.0.1-6-gd805d02+cc5644f571,22.0.1-8-ge5750ce+85460ae5f3,master-g6e05de7fdc+babf819c66,master-g99da0e417a+8d77f4f51a,w.2021.48
LSST Data Management Base Package
BaseTable.cc
Go to the documentation of this file.
1 // -*- lsst-c++ -*-
2 
3 #include <memory>
4 
5 #include "boost/shared_ptr.hpp" // only for ndarray
6 
13 
14 namespace lsst {
15 namespace afw {
16 namespace table {
17 
18 // =============== Block ====================================================================================
19 
20 // This is a block of memory that doles out record-sized chunks when a table asks for them.
21 // It inherits from ndarray::Manager so we can return ndarrays that refer to the memory in the
22 // block with correct reference counting (ndarray::Manager is just an empty base class with an
23 // internal reference count - it's like a shared_ptr without the pointer and template parameter.
24 //
25 // Records are allocated in Blocks for two reasons:
26 // - it allows tables to be either totally contiguous in memory (enabling column views) or
27 // not (enabling dynamic addition of records) all in one class.
28 // - it saves us from ever having to reallocate all the records associated with a table
29 // when we run out of space (that's what a std::vector-like model would require). This keeps
30 // records and/or iterators to them from being invalidated, and it keeps tables from having
31 // to track all the records whose data it owns.
32 
33 namespace {
34 
35 class Block : public ndarray::Manager {
36 public:
37  using Ptr = boost::intrusive_ptr<Block>;
38 
39  // If the last chunk allocated isn't needed after all (usually because of an exception in a constructor)
40  // we reuse it immediately. If it wasn't the last chunk allocated, it can't be reclaimed until
41  // the entire block goes out of scope.
42  static void reclaim(std::size_t recordSize, void *data, ndarray::Manager::Ptr const &manager) {
43  Ptr block = boost::static_pointer_cast<Block>(manager);
44  if (reinterpret_cast<char *>(data) + recordSize == block->_next) {
45  block->_next -= recordSize;
46  }
47  }
48 
49  // Ensure we have space for at least the given number of records as a contiguous block.
50  // May not actually allocate anything if we already do.
51  static void preallocate(std::size_t recordSize, std::size_t recordCount, ndarray::Manager::Ptr &manager) {
52  Ptr block = boost::static_pointer_cast<Block>(manager);
53  if (!block || static_cast<std::size_t>(block->_end - block->_next) < recordSize * recordCount) {
54  block = Ptr(new Block(recordSize, recordCount));
55  manager = block;
56  }
57  }
58 
59  static std::size_t getBufferSize(std::size_t recordSize, ndarray::Manager::Ptr const &manager) {
60  Ptr block = boost::static_pointer_cast<Block>(manager);
61  return static_cast<std::size_t>(block->_end - block->_next) / recordSize;
62  }
63 
64  // Get the next chunk from the block, making a new block and installing it into the table
65  // if we're all out of space.
66  static void *get(std::size_t recordSize, ndarray::Manager::Ptr &manager) {
67  Ptr block = boost::static_pointer_cast<Block>(manager);
68  if (!block || block->_next == block->_end) {
69  block = Ptr(new Block(recordSize, BaseTable::nRecordsPerBlock));
70  manager = block;
71  }
72  void *r = block->_next;
73  block->_next += recordSize;
74  return r;
75  }
76 
77  // Block is also keeper of the special number that says what alignment boundaries are needed for
78  // schemas. Before we start using a schema, we need to first ensure it meets that requirement,
79  // and pad it if not.
80  static void padSchema(Schema &schema) {
81  static int const MIN_RECORD_ALIGN = sizeof(AllocType);
82  std::size_t remainder = schema.getRecordSize() % MIN_RECORD_ALIGN;
83  if (remainder) {
84  detail::Access::padSchema(schema, MIN_RECORD_ALIGN - remainder);
85  }
86  }
87 
88 private:
89  struct AllocType {
90  double element[2];
91  };
92 
93  explicit Block(std::size_t recordSize, std::size_t recordCount)
94  : _mem(new AllocType[(recordSize * recordCount) / sizeof(AllocType)]),
95  _next(reinterpret_cast<char *>(_mem.get())),
96  _end(_next + recordSize * recordCount) {
97  assert((recordSize * recordCount) % sizeof(AllocType) == 0);
98  std::fill(_next, _end, 0); // initialize to zero; we'll later initialize floats to NaN.
99  }
100 
102  char *_next;
103  char *_end;
104 };
105 
106 } // namespace
107 
108 // =============== BaseTable implementation (see header for docs) ===========================================
109 
110 void BaseTable::preallocate(std::size_t n) { Block::preallocate(_schema.getRecordSize(), n, _manager); }
111 
113  if (_manager) {
114  return Block::getBufferSize(_schema.getRecordSize(), _manager);
115  } else {
116  return 0;
117  }
118 }
119 
122 }
123 
125  return Schema();
126 }
127 
130  output->assign(input);
131  return output;
132 }
133 
136  output->assign(input, mapper);
137  return output;
138 }
139 
141  return std::make_shared<io::FitsWriter>(fitsfile, flags);
142 }
143 
145  return std::shared_ptr<BaseTable>(new BaseTable(*this));
146 }
147 
149  return constructRecord<BaseRecord>();
150 }
151 
153  Block::padSchema(_schema);
154  _schema.disconnectAliases();
155  _schema.getAliasMap()->_table = this;
156 }
157 
158 BaseTable::~BaseTable() { _schema.getAliasMap()->_table = nullptr; }
159 
160 namespace {
161 
162 // A Schema Functor used to set destroy variable-length array fields using an explicit call to their
163 // destructor (necessary since we used placement new). All other fields are ignored, as they're POD.
164 struct RecordDestroyer {
165  template <typename T>
166  void operator()(SchemaItem<T> const &item) const {}
167 
168  template <typename T>
169  void operator()(SchemaItem<Array<T> > const &item) const {
170  using Element = ndarray::Array<T, 1, 1>;
171  if (item.key.isVariableLength()) {
172  (*reinterpret_cast<Element *>(data + item.key.getOffset())).~Element();
173  }
174  }
175 
176  void operator()(SchemaItem<std::string> const &item) const {
177  if (item.key.isVariableLength()) {
178  using std::string; // invoking the destructor on a qualified name doesn't compile in gcc 4.8.1
179  // https://stackoverflow.com/q/24593942
180  (*reinterpret_cast<string *>(data + item.key.getOffset())).~string();
181  }
182  }
183 
184  char *data;
185 };
186 
187 } // namespace
188 
189 detail::RecordData BaseTable::_makeNewRecordData() {
190  auto data = Block::get(_schema.getRecordSize(), _manager);
191  return detail::RecordData{
192  data,
194  _manager // manager always points to the most recently-used block.
195  };
196 }
197 
198 void BaseTable::_destroy(BaseRecord &record) {
199  assert(record._table.get() == this);
200  RecordDestroyer f = {reinterpret_cast<char *>(record._data)};
201  _schema.forEach(f);
202  if (record._manager == _manager) Block::reclaim(_schema.getRecordSize(), record._data, _manager);
203 }
204 
205 /*
206  * JFB has no idea whether the default value below is sensible, or even whether
207  * it should be expressed ultimately as an approximate size in bytes rather than a
208  * number of records; the answer probably depends on both the typical size of
209  * records and the typical number of records.
210  */
212 
213 // =============== BaseCatalog instantiation =================================================================
214 
215 template class CatalogT<BaseRecord>;
216 template class CatalogT<BaseRecord const>;
217 } // namespace table
218 } // namespace afw
219 } // namespace lsst
double element[2]
Definition: BaseTable.cc:90
char * data
Definition: BaseTable.cc:184
SchemaMapper * mapper
Definition: SchemaMapper.cc:71
table::Schema schema
Definition: python.h:134
A simple struct that combines the two arguments that must be passed to most cfitsio routines and cont...
Definition: fits.h:297
Base class for all records.
Definition: BaseRecord.h:31
std::shared_ptr< BaseRecord > makeRecord()
Default-construct an associated record.
Definition: BaseTable.h:108
virtual std::shared_ptr< BaseRecord > _makeRecord()
Default-construct an associated record (protected implementation).
Definition: BaseTable.cc:148
virtual std::shared_ptr< io::FitsWriter > makeFitsWriter(fits::Fits *fitsfile, int flags) const
Definition: BaseTable.cc:140
void preallocate(std::size_t nRecords)
Allocate contiguous space for new records in advance.
Definition: BaseTable.cc:110
std::shared_ptr< BaseRecord > copyRecord(BaseRecord const &input)
Deep-copy a record, requiring that it have the same schema as this table.
Definition: BaseTable.cc:128
BaseTable(Schema const &schema)
Construct from a schema.
Definition: BaseTable.cc:152
std::size_t getBufferSize() const
Return the number of additional records space has been already been allocated for.
Definition: BaseTable.cc:112
static Schema makeMinimalSchema()
Return a minimal schema for Base tables and records.
Definition: BaseTable.cc:124
static int nRecordsPerBlock
Number of records in each memory block.
Definition: BaseTable.h:76
static std::shared_ptr< BaseTable > make(Schema const &schema)
Construct a new table.
Definition: BaseTable.cc:120
virtual std::shared_ptr< BaseTable > _clone() const
Clone implementation with noncovariant return types.
Definition: BaseTable.cc:144
Defines the fields and offsets for a table.
Definition: Schema.h:51
void forEach(F &func) const
Apply a functor to each SchemaItem in the Schema.
Definition: Schema.h:214
void disconnectAliases()
Sever the connection between this schema and any others with which it shares aliases.
Definition: Schema.cc:540
std::size_t getRecordSize() const
Return the raw size of a record in bytes.
Definition: Schema.h:149
std::shared_ptr< AliasMap > getAliasMap() const
Return the map of aliases.
Definition: Schema.h:279
A mapping between the keys of two Schemas, used to copy data between them.
Definition: SchemaMapper.h:21
static void padSchema(Schema &schema, std::size_t bytes)
Definition: Access.h:88
T fill(T... args)
A base class for image defects.
T remainder(T... args)
A simple pair-like struct for mapping a Field (name and description) with a Key (used for actual data...
Definition: SchemaImpl.h:22