LSST Applications g0f08755f38+82efc23009,g12f32b3c4e+e7bdf1200e,g1653933729+a8ce1bb630,g1a0ca8cf93+50eff2b06f,g28da252d5a+52db39f6a5,g2bbee38e9b+37c5a29d61,g2bc492864f+37c5a29d61,g2cdde0e794+c05ff076ad,g3156d2b45e+41e33cbcdc,g347aa1857d+37c5a29d61,g35bb328faa+a8ce1bb630,g3a166c0a6a+37c5a29d61,g3e281a1b8c+fb992f5633,g414038480c+7f03dfc1b0,g41af890bb2+11b950c980,g5fbc88fb19+17cd334064,g6b1c1869cb+12dd639c9a,g781aacb6e4+a8ce1bb630,g80478fca09+72e9651da0,g82479be7b0+04c31367b4,g858d7b2824+82efc23009,g9125e01d80+a8ce1bb630,g9726552aa6+8047e3811d,ga5288a1d22+e532dc0a0b,gae0086650b+a8ce1bb630,gb58c049af0+d64f4d3760,gc28159a63d+37c5a29d61,gcf0d15dbbd+2acd6d4d48,gd7358e8bfb+778a810b6e,gda3e153d99+82efc23009,gda6a2b7d83+2acd6d4d48,gdaeeff99f8+1711a396fd,ge2409df99d+6b12de1076,ge79ae78c31+37c5a29d61,gf0baf85859+d0a5978c5a,gf3967379c6+4954f8c433,gfb92a5be7c+82efc23009,gfec2e1e490+2aaed99252,w.2024.46
LSST Data Management Base Package
Loading...
Searching...
No Matches
BaseTable.cc
Go to the documentation of this file.
1// -*- lsst-c++ -*-
2
3#include <memory>
4
5#include "boost/shared_ptr.hpp" // only for ndarray
6
13
14namespace lsst {
15namespace afw {
16namespace table {
17
18// =============== Block ====================================================================================
19
20// This is a block of memory that doles out record-sized chunks when a table asks for them.
21// It inherits from ndarray::Manager so we can return ndarrays that refer to the memory in the
22// block with correct reference counting (ndarray::Manager is just an empty base class with an
23// internal reference count - it's like a shared_ptr without the pointer and template parameter.
24//
25// Records are allocated in Blocks for two reasons:
26// - it allows tables to be either totally contiguous in memory (enabling column views) or
27// not (enabling dynamic addition of records) all in one class.
28// - it saves us from ever having to reallocate all the records associated with a table
29// when we run out of space (that's what a std::vector-like model would require). This keeps
30// records and/or iterators to them from being invalidated, and it keeps tables from having
31// to track all the records whose data it owns.
32
33namespace {
34
35class Block : public ndarray::Manager {
36public:
37 using Ptr = boost::intrusive_ptr<Block>;
38
39 // If the last chunk allocated isn't needed after all (usually because of an exception in a constructor)
40 // we reuse it immediately. If it wasn't the last chunk allocated, it can't be reclaimed until
41 // the entire block goes out of scope.
42 static void reclaim(std::size_t recordSize, void *data, ndarray::Manager::Ptr const &manager) {
43 Ptr block = boost::static_pointer_cast<Block>(manager);
44 if (reinterpret_cast<char *>(data) + recordSize == block->_next) {
45 block->_next -= recordSize;
46 }
47 }
48
49 // Ensure we have space for at least the given number of records as a contiguous block.
50 // May not actually allocate anything if we already do.
51 static void preallocate(std::size_t recordSize, std::size_t recordCount, ndarray::Manager::Ptr &manager) {
52 Ptr block = boost::static_pointer_cast<Block>(manager);
53 if (!block || static_cast<std::size_t>(block->_end - block->_next) < recordSize * recordCount) {
54 block = Ptr(new Block(recordSize, recordCount));
55 manager = block;
56 }
57 }
58
59 static std::size_t getBufferSize(std::size_t recordSize, ndarray::Manager::Ptr const &manager) {
60 Ptr block = boost::static_pointer_cast<Block>(manager);
61 return static_cast<std::size_t>(block->_end - block->_next) / recordSize;
62 }
63
64 // Get the next chunk from the block, making a new block and installing it into the table
65 // if we're all out of space.
66 static void *get(std::size_t recordSize, ndarray::Manager::Ptr &manager) {
67 Ptr block = boost::static_pointer_cast<Block>(manager);
68 if (!block || block->_next == block->_end) {
69 block = Ptr(new Block(recordSize, BaseTable::nRecordsPerBlock));
70 manager = block;
71 }
72 void *r = block->_next;
73 block->_next += recordSize;
74 return r;
75 }
76
77 // Block is also keeper of the special number that says what alignment boundaries are needed for
78 // schemas. Before we start using a schema, we need to first ensure it meets that requirement,
79 // and pad it if not.
80 static void padSchema(Schema &schema) {
81 static int const MIN_RECORD_ALIGN = sizeof(AllocType);
82 std::size_t remainder = schema.getRecordSize() % MIN_RECORD_ALIGN;
83 if (remainder) {
84 detail::Access::padSchema(schema, MIN_RECORD_ALIGN - remainder);
85 }
86 }
87
88private:
89 struct AllocType {
90 double element[2];
91 };
92
93 explicit Block(std::size_t recordSize, std::size_t recordCount)
94 : _mem(new AllocType[(recordSize * recordCount) / sizeof(AllocType)]),
95 _next(reinterpret_cast<char *>(_mem.get())),
96 _end(_next + recordSize * recordCount) {
97 assert((recordSize * recordCount) % sizeof(AllocType) == 0);
98 std::fill(_next, _end, 0); // initialize to zero; we'll later initialize floats to NaN.
99 }
100
102 char *_next;
103 char *_end;
104};
105
106} // namespace
107
108// =============== BaseTable implementation (see header for docs) ===========================================
109
110void BaseTable::preallocate(std::size_t n) { Block::preallocate(_schema.getRecordSize(), n, _manager); }
111
113 if (_manager) {
114 return Block::getBufferSize(_schema.getRecordSize(), _manager);
115 } else {
116 return 0;
117 }
118}
119
122 table->getSchema().getAliasMap()->setTable(table);
123 return table;
124}
125
127
130 output->assign(input);
131 return output;
132}
133
136 output->assign(input, mapper);
137 return output;
138}
139
141 return std::make_shared<io::FitsWriter>(fitsfile, flags);
142}
143
145 std::shared_ptr<BaseTable> table(new BaseTable(*this));
146 table->getSchema().getAliasMap()->setTable(table);
147 return table;
148}
149
151
153 : _schema(schema), _metadata(metadata) {
154 Block::padSchema(_schema);
155 _schema.disconnectAliases();
156 _schema.getAliasMap()->getTable().reset();
157}
158
159BaseTable::BaseTable(BaseTable const &other) : _schema(other._schema), _metadata(other._metadata) {
160 _schema.disconnectAliases();
161 _schema.getAliasMap()->getTable().reset();
162 if (_metadata) _metadata = std::static_pointer_cast<daf::base::PropertyList>(_metadata->deepCopy());
163}
164
165BaseTable::~BaseTable() { _schema.getAliasMap()->getTable().reset(); }
166
167namespace {
168
169// A Schema Functor used to set destroy variable-length array fields using an explicit call to their
170// destructor (necessary since we used placement new). All other fields are ignored, as they're POD.
171struct RecordDestroyer {
172 template <typename T>
173 void operator()(SchemaItem<T> const &item) const {}
174
175 template <typename T>
176 void operator()(SchemaItem<Array<T> > const &item) const {
177 using Element = ndarray::Array<T, 1, 1>;
178 if (item.key.isVariableLength()) {
179 (*reinterpret_cast<Element *>(data + item.key.getOffset())).~Element();
180 }
181 }
182
183 void operator()(SchemaItem<std::string> const &item) const {
184 if (item.key.isVariableLength()) {
185 using std::string; // invoking the destructor on a qualified name doesn't compile in gcc 4.8.1
186 // https://stackoverflow.com/q/24593942
187 (*reinterpret_cast<string *>(data + item.key.getOffset())).~string();
188 }
189 }
190
191 char *data;
192};
193
194} // namespace
195
196detail::RecordData BaseTable::_makeNewRecordData() {
197 auto data = Block::get(_schema.getRecordSize(), _manager);
198 return detail::RecordData{
200 _manager // manager always points to the most recently-used block.
201 };
202}
203
204void BaseTable::_destroy(BaseRecord &record) {
205 assert(record._table.get() == this);
206 RecordDestroyer f = {reinterpret_cast<char *>(record._data)};
207 _schema.forEach(f);
208 if (record._manager == _manager) Block::reclaim(_schema.getRecordSize(), record._data, _manager);
209}
210
211/*
212 * JFB has no idea whether the default value below is sensible, or even whether
213 * it should be expressed ultimately as an approximate size in bytes rather than a
214 * number of records; the answer probably depends on both the typical size of
215 * records and the typical number of records.
216 */
218
219// =============== BaseCatalog instantiation =================================================================
220
221template class CatalogT<BaseRecord>;
222template class CatalogT<BaseRecord const>;
223} // namespace table
224} // namespace afw
225} // namespace lsst
char * data
Definition BaseRecord.cc:61
double element[2]
Definition BaseTable.cc:90
SchemaMapper * mapper
table::Schema schema
Definition python.h:134
A simple struct that combines the two arguments that must be passed to most cfitsio routines and cont...
Definition fits.h:308
Tag types used to declare specialized field types.
Definition misc.h:31
Base class for all records.
Definition BaseRecord.h:31
Base class for all tables.
Definition BaseTable.h:61
virtual std::shared_ptr< BaseRecord > _makeRecord()
Default-construct an associated record (protected implementation).
Definition BaseTable.cc:150
virtual std::shared_ptr< io::FitsWriter > makeFitsWriter(fits::Fits *fitsfile, int flags) const
Definition BaseTable.cc:140
void preallocate(std::size_t nRecords)
Allocate contiguous space for new records in advance.
Definition BaseTable.cc:110
std::shared_ptr< BaseRecord > copyRecord(BaseRecord const &input)
Deep-copy a record, requiring that it have the same schema as this table.
Definition BaseTable.cc:128
BaseTable(Schema const &schema, std::shared_ptr< daf::base::PropertyList > metadata=nullptr)
Construct from a schema.
Definition BaseTable.cc:152
std::size_t getBufferSize() const
Return the number of additional records space has been already been allocated for.
Definition BaseTable.cc:112
static Schema makeMinimalSchema()
Return a minimal schema for Base tables and records.
Definition BaseTable.cc:126
static int nRecordsPerBlock
Number of records in each memory block.
Definition BaseTable.h:76
static std::shared_ptr< BaseTable > make(Schema const &schema)
Construct a new table.
Definition BaseTable.cc:120
virtual std::shared_ptr< BaseTable > _clone() const
Clone implementation with noncovariant return types.
Definition BaseTable.cc:144
std::shared_ptr< BaseRecord > makeRecord()
Default-construct an associated record.
Definition BaseTable.h:108
Defines the fields and offsets for a table.
Definition Schema.h:51
void forEach(F &func) const
Apply a functor to each SchemaItem in the Schema.
Definition Schema.h:214
void disconnectAliases()
Sever the connection between this schema and any others with which it shares aliases.
Definition Schema.cc:540
std::shared_ptr< AliasMap > getAliasMap() const
Return the map of aliases.
Definition Schema.h:279
std::size_t getRecordSize() const
Return the raw size of a record in bytes.
Definition Schema.h:149
A mapping between the keys of two Schemas, used to copy data between them.
static void padSchema(Schema &schema, std::size_t bytes)
Definition Access.h:88
T fill(T... args)
T remainder(T... args)