LSST Applications  21.0.0-147-g0e635eb1+1acddb5be5,22.0.0+052faf71bd,22.0.0+1ea9a8b2b2,22.0.0+6312710a6c,22.0.0+729191ecac,22.0.0+7589c3a021,22.0.0+9f079a9461,22.0.1-1-g7d6de66+b8044ec9de,22.0.1-1-g87000a6+536b1ee016,22.0.1-1-g8e32f31+6312710a6c,22.0.1-10-gd060f87+016f7cdc03,22.0.1-12-g9c3108e+df145f6f68,22.0.1-16-g314fa6d+c825727ab8,22.0.1-19-g93a5c75+d23f2fb6d8,22.0.1-19-gb93eaa13+aab3ef7709,22.0.1-2-g8ef0a89+b8044ec9de,22.0.1-2-g92698f7+9f079a9461,22.0.1-2-ga9b0f51+052faf71bd,22.0.1-2-gac51dbf+052faf71bd,22.0.1-2-gb66926d+6312710a6c,22.0.1-2-gcb770ba+09e3807989,22.0.1-20-g32debb5+b8044ec9de,22.0.1-23-gc2439a9a+fb0756638e,22.0.1-3-g496fd5d+09117f784f,22.0.1-3-g59f966b+1e6ba2c031,22.0.1-3-g849a1b8+f8b568069f,22.0.1-3-gaaec9c0+c5c846a8b1,22.0.1-32-g5ddfab5d3+60ce4897b0,22.0.1-4-g037fbe1+64e601228d,22.0.1-4-g8623105+b8044ec9de,22.0.1-5-g096abc9+d18c45d440,22.0.1-5-g15c806e+57f5c03693,22.0.1-7-gba73697+57f5c03693,master-g6e05de7fdc+c1283a92b8,master-g72cdda8301+729191ecac,w.2021.39
LSST Data Management Base Package
BaseTable.cc
Go to the documentation of this file.
1 // -*- lsst-c++ -*-
2 
3 #include <memory>
4 
5 #include "boost/shared_ptr.hpp" // only for ndarray
6 
13 
14 namespace lsst {
15 namespace afw {
16 namespace table {
17 
18 // =============== Block ====================================================================================
19 
20 // This is a block of memory that doles out record-sized chunks when a table asks for them.
21 // It inherits from ndarray::Manager so we can return ndarrays that refer to the memory in the
22 // block with correct reference counting (ndarray::Manager is just an empty base class with an
23 // internal reference count - it's like a shared_ptr without the pointer and template parameter.
24 //
25 // Records are allocated in Blocks for two reasons:
26 // - it allows tables to be either totally contiguous in memory (enabling column views) or
27 // not (enabling dynamic addition of records) all in one class.
28 // - it saves us from ever having to reallocate all the records associated with a table
29 // when we run out of space (that's what a std::vector-like model would require). This keeps
30 // records and/or iterators to them from being invalidated, and it keeps tables from having
31 // to track all the records whose data it owns.
32 
33 namespace {
34 
35 class Block : public ndarray::Manager {
36 public:
37  using Ptr = boost::intrusive_ptr<Block>;
38 
39  // If the last chunk allocated isn't needed after all (usually because of an exception in a constructor)
40  // we reuse it immediately. If it wasn't the last chunk allocated, it can't be reclaimed until
41  // the entire block goes out of scope.
42  static void reclaim(std::size_t recordSize, void *data, ndarray::Manager::Ptr const &manager) {
43  Ptr block = boost::static_pointer_cast<Block>(manager);
44  if (reinterpret_cast<char *>(data) + recordSize == block->_next) {
45  block->_next -= recordSize;
46  }
47  }
48 
49  // Ensure we have space for at least the given number of records as a contiguous block.
50  // May not actually allocate anything if we already do.
51  static void preallocate(std::size_t recordSize, std::size_t recordCount, ndarray::Manager::Ptr &manager) {
52  Ptr block = boost::static_pointer_cast<Block>(manager);
53  if (!block || static_cast<std::size_t>(block->_end - block->_next) < recordSize * recordCount) {
54  block = Ptr(new Block(recordSize, recordCount));
55  manager = block;
56  }
57  }
58 
59  static std::size_t getBufferSize(std::size_t recordSize, ndarray::Manager::Ptr const &manager) {
60  Ptr block = boost::static_pointer_cast<Block>(manager);
61  return static_cast<std::size_t>(block->_end - block->_next) / recordSize;
62  }
63 
64  // Get the next chunk from the block, making a new block and installing it into the table
65  // if we're all out of space.
66  static void *get(std::size_t recordSize, ndarray::Manager::Ptr &manager) {
67  Ptr block = boost::static_pointer_cast<Block>(manager);
68  if (!block || block->_next == block->_end) {
69  block = Ptr(new Block(recordSize, BaseTable::nRecordsPerBlock));
70  manager = block;
71  }
72  void *r = block->_next;
73  block->_next += recordSize;
74  return r;
75  }
76 
77  // Block is also keeper of the special number that says what alignment boundaries are needed for
78  // schemas. Before we start using a schema, we need to first ensure it meets that requirement,
79  // and pad it if not.
80  static void padSchema(Schema &schema) {
81  static int const MIN_RECORD_ALIGN = sizeof(AllocType);
82  std::size_t remainder = schema.getRecordSize() % MIN_RECORD_ALIGN;
83  if (remainder) {
84  detail::Access::padSchema(schema, MIN_RECORD_ALIGN - remainder);
85  }
86  }
87 
88 private:
89  struct AllocType {
90  double element[2];
91  };
92 
93  explicit Block(std::size_t recordSize, std::size_t recordCount)
94  : _mem(new AllocType[(recordSize * recordCount) / sizeof(AllocType)]),
95  _next(reinterpret_cast<char *>(_mem.get())),
96  _end(_next + recordSize * recordCount) {
97  assert((recordSize * recordCount) % sizeof(AllocType) == 0);
98  std::fill(_next, _end, 0); // initialize to zero; we'll later initialize floats to NaN.
99  }
100 
102  char *_next;
103  char *_end;
104 };
105 
106 } // namespace
107 
108 // =============== BaseTable implementation (see header for docs) ===========================================
109 
110 void BaseTable::preallocate(std::size_t n) { Block::preallocate(_schema.getRecordSize(), n, _manager); }
111 
113  if (_manager) {
114  return Block::getBufferSize(_schema.getRecordSize(), _manager);
115  } else {
116  return 0;
117  }
118 }
119 
122 }
123 
125  return Schema();
126 }
127 
130  output->assign(input);
131  return output;
132 }
133 
136  output->assign(input, mapper);
137  return output;
138 }
139 
141  return std::make_shared<io::FitsWriter>(fitsfile, flags);
142 }
143 
145  return std::shared_ptr<BaseTable>(new BaseTable(*this));
146 }
147 
149  return constructRecord<BaseRecord>();
150 }
151 
153  Block::padSchema(_schema);
154  _schema.disconnectAliases();
155  _schema.getAliasMap()->_table = this;
156 }
157 
158 BaseTable::~BaseTable() { _schema.getAliasMap()->_table = nullptr; }
159 
160 namespace {
161 
162 // A Schema Functor used to set destroy variable-length array fields using an explicit call to their
163 // destructor (necessary since we used placement new). All other fields are ignored, as they're POD.
164 struct RecordDestroyer {
165  template <typename T>
166  void operator()(SchemaItem<T> const &item) const {}
167 
168  template <typename T>
169  void operator()(SchemaItem<Array<T> > const &item) const {
170  using Element = ndarray::Array<T, 1, 1>;
171  if (item.key.isVariableLength()) {
172  (*reinterpret_cast<Element *>(data + item.key.getOffset())).~Element();
173  }
174  }
175 
176  void operator()(SchemaItem<std::string> const &item) const {
177  if (item.key.isVariableLength()) {
178  using std::string; // invoking the destructor on a qualified name doesn't compile in gcc 4.8.1
179  // https://stackoverflow.com/q/24593942
180  (*reinterpret_cast<string *>(data + item.key.getOffset())).~string();
181  }
182  }
183 
184  char *data;
185 };
186 
187 } // namespace
188 
189 detail::RecordData BaseTable::_makeNewRecordData() {
190  auto data = Block::get(_schema.getRecordSize(), _manager);
191  return detail::RecordData{
192  data,
194  _manager // manager always points to the most recently-used block.
195  };
196 }
197 
198 void BaseTable::_destroy(BaseRecord &record) {
199  assert(record._table.get() == this);
200  RecordDestroyer f = {reinterpret_cast<char *>(record._data)};
201  _schema.forEach(f);
202  if (record._manager == _manager) Block::reclaim(_schema.getRecordSize(), record._data, _manager);
203 }
204 
205 /*
206  * JFB has no idea whether the default value below is sensible, or even whether
207  * it should be expressed ultimately as an approximate size in bytes rather than a
208  * number of records; the answer probably depends on both the typical size of
209  * records and the typical number of records.
210  */
212 
213 // =============== BaseCatalog instantiation =================================================================
214 
215 template class CatalogT<BaseRecord>;
216 template class CatalogT<BaseRecord const>;
217 } // namespace table
218 } // namespace afw
219 } // namespace lsst
double element[2]
Definition: BaseTable.cc:90
char * data
Definition: BaseTable.cc:184
SchemaMapper * mapper
Definition: SchemaMapper.cc:71
table::Schema schema
Definition: python.h:134
A simple struct that combines the two arguments that must be passed to most cfitsio routines and cont...
Definition: fits.h:297
Base class for all records.
Definition: BaseRecord.h:31
std::shared_ptr< BaseRecord > makeRecord()
Default-construct an associated record.
Definition: BaseTable.h:108
virtual std::shared_ptr< BaseRecord > _makeRecord()
Default-construct an associated record (protected implementation).
Definition: BaseTable.cc:148
virtual std::shared_ptr< io::FitsWriter > makeFitsWriter(fits::Fits *fitsfile, int flags) const
Definition: BaseTable.cc:140
void preallocate(std::size_t nRecords)
Allocate contiguous space for new records in advance.
Definition: BaseTable.cc:110
std::shared_ptr< BaseRecord > copyRecord(BaseRecord const &input)
Deep-copy a record, requiring that it have the same schema as this table.
Definition: BaseTable.cc:128
BaseTable(Schema const &schema)
Construct from a schema.
Definition: BaseTable.cc:152
std::size_t getBufferSize() const
Return the number of additional records space has been already been allocated for.
Definition: BaseTable.cc:112
static Schema makeMinimalSchema()
Return a minimal schema for Base tables and records.
Definition: BaseTable.cc:124
static int nRecordsPerBlock
Number of records in each memory block.
Definition: BaseTable.h:76
static std::shared_ptr< BaseTable > make(Schema const &schema)
Construct a new table.
Definition: BaseTable.cc:120
virtual std::shared_ptr< BaseTable > _clone() const
Clone implementation with noncovariant return types.
Definition: BaseTable.cc:144
Defines the fields and offsets for a table.
Definition: Schema.h:51
void forEach(F &func) const
Apply a functor to each SchemaItem in the Schema.
Definition: Schema.h:214
void disconnectAliases()
Sever the connection between this schema and any others with which it shares aliases.
Definition: Schema.cc:540
std::size_t getRecordSize() const
Return the raw size of a record in bytes.
Definition: Schema.h:149
std::shared_ptr< AliasMap > getAliasMap() const
Return the map of aliases.
Definition: Schema.h:279
A mapping between the keys of two Schemas, used to copy data between them.
Definition: SchemaMapper.h:21
static void padSchema(Schema &schema, std::size_t bytes)
Definition: Access.h:88
T fill(T... args)
A base class for image defects.
T remainder(T... args)
A simple pair-like struct for mapping a Field (name and description) with a Key (used for actual data...
Definition: SchemaImpl.h:22