LSST Applications g063fba187b+cac8b7c890,g0f08755f38+6aee506743,g1653933729+a8ce1bb630,g168dd56ebc+a8ce1bb630,g1a2382251a+b4475c5878,g1dcb35cd9c+8f9bc1652e,g20f6ffc8e0+6aee506743,g217e2c1bcf+73dee94bd0,g28da252d5a+1f19c529b9,g2bbee38e9b+3f2625acfc,g2bc492864f+3f2625acfc,g3156d2b45e+6e55a43351,g32e5bea42b+1bb94961c2,g347aa1857d+3f2625acfc,g35bb328faa+a8ce1bb630,g3a166c0a6a+3f2625acfc,g3e281a1b8c+c5dd892a6c,g3e8969e208+a8ce1bb630,g414038480c+5927e1bc1e,g41af890bb2+8a9e676b2a,g7af13505b9+809c143d88,g80478fca09+6ef8b1810f,g82479be7b0+f568feb641,g858d7b2824+6aee506743,g89c8672015+f4add4ffd5,g9125e01d80+a8ce1bb630,ga5288a1d22+2903d499ea,gb58c049af0+d64f4d3760,gc28159a63d+3f2625acfc,gcab2d0539d+b12535109e,gcf0d15dbbd+46a3f46ba9,gda6a2b7d83+46a3f46ba9,gdaeeff99f8+1711a396fd,ge79ae78c31+3f2625acfc,gef2f8181fd+0a71e47438,gf0baf85859+c1f95f4921,gfa517265be+6aee506743,gfa999e8aa5+17cd334064,w.2024.51
LSST Data Management Base Package
Loading...
Searching...
No Matches
apdbCassandraSchema.py
Go to the documentation of this file.
1# This file is part of dax_apdb.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (http://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <http://www.gnu.org/licenses/>.
21
22from __future__ import annotations
23
24__all__ = ["ApdbCassandraSchema", "CreateTableOptions", "TableOptions"]
25
26import enum
27import logging
28from collections.abc import Mapping
29from typing import TYPE_CHECKING, cast
30
31import felis.datamodel
32import pydantic
33
34from .. import schema_model
35from ..apdbSchema import ApdbSchema, ApdbTables
36
37if TYPE_CHECKING:
38 import cassandra.cluster
39
40
41_LOG = logging.getLogger(__name__)
42
43
44class InconsistentSchemaError(RuntimeError):
45 """Exception raised when schema state is inconsistent."""
46
47
48class TableOptions(pydantic.BaseModel):
49 """Set of per-table options for creating Cassandra tables."""
50
51 model_config = pydantic.ConfigDict(extra="forbid")
52
53 tables: list[str]
54 """List of table names for which the options should be applied."""
55
56 options: str
57
58
59class CreateTableOptions(pydantic.BaseModel):
60 """Set of options for creating Cassandra tables."""
61
62 model_config = pydantic.ConfigDict(extra="forbid")
63
64 table_options: list[TableOptions] = pydantic.Field(default_factory=list)
65 """Collection of per-table options."""
66
67 default_table_options: str = ""
68 """Default options used for tables that are not in the above list."""
69
70 def get_options(self, table_name: str) -> str:
71 """Find table options for a given table name."""
72 for table_options in self.table_options:
73 if table_name in table_options.tables:
74 return table_options.options
75 return self.default_table_options
76
77
78@enum.unique
79class ExtraTables(enum.Enum):
80 """Names of the extra tables used by Cassandra implementation."""
81
82 ApdbReplicaChunks = "ApdbReplicaChunks"
83 """Name of the table for replica chunk records."""
84
85 DiaObjectChunks = "DiaObjectChunks"
86 """Name of the table for DIAObject chunk data."""
87
88 DiaSourceChunks = "DiaSourceChunks"
89 """Name of the table for DIASource chunk data."""
90
91 DiaForcedSourceChunks = "DiaForcedSourceChunks"
92 """Name of the table for DIAForcedSource chunk data."""
93
94 DiaSourceToPartition = "DiaSourceToPartition"
95 "Maps diaSourceId to its partition values (pixel and time)."
96
97 DiaObjectLastToPartition = "DiaObjectLastToPartition"
98 "Maps last diaObjectId version to its partition (pixel)."
99
100 def table_name(self, prefix: str = "") -> str:
101 """Return full table name."""
102 return prefix + self.value
103
104 @classmethod
105 def replica_chunk_tables(cls) -> Mapping[ExtraTables, ApdbTables]:
106 """Return mapping of tables used for replica chunks storage to their
107 corresponding regular tables.
108 """
109 return {
110 cls.DiaObjectChunks: ApdbTables.DiaObject,
111 cls.DiaSourceChunks: ApdbTables.DiaSource,
112 cls.DiaForcedSourceChunks: ApdbTables.DiaForcedSource,
113 }
114
115
117 """Class for management of APDB schema.
118
119 Parameters
120 ----------
121 session : `cassandra.cluster.Session`
122 Cassandra session object
123 keyspace : `str`
124 Keyspace name for all tables.
125 schema_file : `str`
126 Name of the YAML schema file.
127 schema_name : `str`, optional
128 Name of the schema in YAML files.
129 prefix : `str`, optional
130 Prefix to add to all schema elements.
131 time_partition_tables : `bool`
132 If `True` then schema will have a separate table for each time
133 partition.
134 enable_replica : `bool`, optional
135 If `True` then use additional tables for replica chunks.
136 """
137
138 _type_map = {
139 felis.datamodel.DataType.double: "DOUBLE",
140 felis.datamodel.DataType.float: "FLOAT",
141 felis.datamodel.DataType.timestamp: "TIMESTAMP",
142 felis.datamodel.DataType.long: "BIGINT",
143 felis.datamodel.DataType.int: "INT",
144 felis.datamodel.DataType.short: "SMALLINT",
145 felis.datamodel.DataType.byte: "TINYINT",
146 felis.datamodel.DataType.binary: "BLOB",
147 felis.datamodel.DataType.char: "TEXT",
148 felis.datamodel.DataType.string: "TEXT",
149 felis.datamodel.DataType.unicode: "TEXT",
150 felis.datamodel.DataType.text: "TEXT",
151 felis.datamodel.DataType.boolean: "BOOLEAN",
152 schema_model.ExtraDataTypes.UUID: "UUID",
153 }
154 """Map YAML column types to Cassandra"""
155
156 _time_partitioned_tables = [
157 ApdbTables.DiaObject,
158 ApdbTables.DiaSource,
159 ApdbTables.DiaForcedSource,
160 ]
161 _spatially_partitioned_tables = [ApdbTables.DiaObjectLast]
162
164 self,
165 session: cassandra.cluster.Session,
166 keyspace: str,
167 schema_file: str,
168 schema_name: str = "ApdbSchema",
169 prefix: str = "",
170 time_partition_tables: bool = False,
171 enable_replica: bool = False,
172 ):
173 super().__init__(schema_file, schema_name)
174
175 self._session = session
176 self._keyspace = keyspace
177 self._prefix = prefix
178 self._time_partition_tables = time_partition_tables
179 self._enable_replica = enable_replica
180 self._has_replica_chunks: bool | None = None
181
182 self._apdb_tables = self._apdb_tables_schema(time_partition_tables)
184
185 def _apdb_tables_schema(self, time_partition_tables: bool) -> Mapping[ApdbTables, schema_model.Table]:
186 """Generate schema for regular APDB tables."""
187 apdb_tables: dict[ApdbTables, schema_model.Table] = {}
188
189 # add columns and index for partitioning.
190 for table, apdb_table_def in self.tableSchemas.items():
191 part_columns = []
192 add_columns = []
193 primary_key = apdb_table_def.primary_key[:]
194 if table in self._spatially_partitioned_tables:
195 # DiaObjectLast does not need temporal partitioning
196 part_columns = ["apdb_part"]
197 add_columns = part_columns
199 if time_partition_tables:
200 part_columns = ["apdb_part"]
201 else:
202 part_columns = ["apdb_part", "apdb_time_part"]
203 add_columns = part_columns
204 elif table is ApdbTables.SSObject:
205 # For SSObject there is no natural partition key but we have
206 # to partition it because there are too many of them. I'm
207 # going to partition on its primary key (and drop separate
208 # primary key index).
209 part_columns = ["ssObjectId"]
210 primary_key = []
211 elif table is ApdbTables.metadata:
212 # Metadata is in one partition because we want to read all of
213 # it in one query, add an extra column for partition.
214 part_columns = ["meta_part"]
215 add_columns = part_columns
216 else:
217 # TODO: Do not know what to do with the other tables
218 continue
219
220 column_defs = []
221 if add_columns:
222 column_defs = [
224 id=f"#{name}", name=name, datatype=felis.datamodel.DataType.long, nullable=False
225 )
226 for name in add_columns
227 ]
228
229 annotations = dict(apdb_table_def.annotations)
230 annotations["cassandra:apdb_column_names"] = [column.name for column in apdb_table_def.columns]
231 if part_columns:
232 annotations["cassandra:partitioning_columns"] = part_columns
233
234 apdb_tables[table] = schema_model.Table(
235 id=apdb_table_def.id,
236 name=apdb_table_def.name,
237 columns=column_defs + apdb_table_def.columns,
238 primary_key=primary_key,
239 indexes=[],
240 constraints=[],
241 annotations=annotations,
242 )
243
244 return apdb_tables
245
246 def _extra_tables_schema(self) -> Mapping[ExtraTables, schema_model.Table]:
247 """Generate schema for extra tables."""
248 extra_tables: dict[ExtraTables, schema_model.Table] = {}
249
250 # This table maps DiaSource ID to its partitions in DiaSource table and
251 # DiaSourceChunks tables.
252 extra_tables[ExtraTables.DiaSourceToPartition] = schema_model.Table(
253 id="#" + ExtraTables.DiaSourceToPartition.value,
254 name=ExtraTables.DiaSourceToPartition.table_name(self._prefix),
255 columns=[
257 id="#diaSourceId",
258 name="diaSourceId",
259 datatype=felis.datamodel.DataType.long,
260 nullable=False,
261 ),
263 id="#apdb_part", name="apdb_part", datatype=felis.datamodel.DataType.long, nullable=False
264 ),
266 id="#apdb_time_part",
267 name="apdb_time_part",
268 datatype=felis.datamodel.DataType.int,
269 nullable=False,
270 ),
272 id="#apdb_replica_chunk",
273 name="apdb_replica_chunk",
274 datatype=felis.datamodel.DataType.long,
275 nullable=True,
276 ),
277 ],
278 primary_key=[],
279 indexes=[],
280 constraints=[],
281 annotations={"cassandra:partitioning_columns": ["diaSourceId"]},
282 )
283
284 # This table maps diaObjectId to its partition in DiaObjectLast table.
285 extra_tables[ExtraTables.DiaObjectLastToPartition] = schema_model.Table(
286 id="#" + ExtraTables.DiaObjectLastToPartition.value,
287 name=ExtraTables.DiaObjectLastToPartition.table_name(self._prefix),
288 columns=[
290 id="#diaObjectId",
291 name="diaObjectId",
292 datatype=felis.datamodel.DataType.long,
293 nullable=False,
294 ),
296 id="#apdb_part", name="apdb_part", datatype=felis.datamodel.DataType.long, nullable=False
297 ),
298 ],
299 primary_key=[],
300 indexes=[],
301 constraints=[],
302 annotations={"cassandra:partitioning_columns": ["diaObjectId"]},
303 )
304
305 replica_chunk_column = schema_model.Column(
306 id="#apdb_replica_chunk",
307 name="apdb_replica_chunk",
308 datatype=felis.datamodel.DataType.long,
309 nullable=False,
310 )
311
312 if not self._enable_replica:
313 return extra_tables
314
315 # Table containing insert IDs, this one is not partitioned, but
316 # partition key must be defined.
317 extra_tables[ExtraTables.ApdbReplicaChunks] = schema_model.Table(
318 id="#" + ExtraTables.ApdbReplicaChunks.value,
319 name=ExtraTables.ApdbReplicaChunks.table_name(self._prefix),
320 columns=[
322 id="#partition", name="partition", datatype=felis.datamodel.DataType.int, nullable=False
323 ),
324 replica_chunk_column,
326 id="#last_update_time",
327 name="last_update_time",
328 datatype=felis.datamodel.DataType.timestamp,
329 nullable=False,
330 ),
332 id="#unique_id",
333 name="unique_id",
334 datatype=schema_model.ExtraDataTypes.UUID,
335 nullable=False,
336 ),
337 ],
338 primary_key=[replica_chunk_column],
339 indexes=[],
340 constraints=[],
341 annotations={"cassandra:partitioning_columns": ["partition"]},
342 )
343
344 for chunk_table_enum, apdb_table_enum in ExtraTables.replica_chunk_tables().items():
345 apdb_table_def = self.tableSchemas[apdb_table_enum]
346
347 extra_tables[chunk_table_enum] = schema_model.Table(
348 id="#" + chunk_table_enum.value,
349 name=chunk_table_enum.table_name(self._prefix),
350 columns=[replica_chunk_column] + apdb_table_def.columns,
351 primary_key=apdb_table_def.primary_key[:],
352 indexes=[],
353 constraints=[],
354 annotations={
355 "cassandra:partitioning_columns": ["apdb_replica_chunk"],
356 "cassandra:apdb_column_names": [column.name for column in apdb_table_def.columns],
357 },
358 )
359
360 return extra_tables
361
362 @property
363 def has_replica_chunks(self) -> bool:
364 """Whether insert ID tables are to be used (`bool`)."""
365 if self._has_replica_chunks is None:
367 return self._has_replica_chunks
368
369 def _check_replica_chunks(self) -> bool:
370 """Check whether database has tables for tracking insert IDs."""
371 table_name = ExtraTables.ApdbReplicaChunks.table_name(self._prefix)
372 query = "SELECT count(*) FROM system_schema.tables WHERE keyspace_name = %s and table_name = %s"
373 result = self._session.execute(query, (self._keyspace, table_name))
374 row = result.one()
375 return bool(row[0])
376
377 def empty(self) -> bool:
378 """Return True if database schema is empty.
379
380 Returns
381 -------
382 empty : `bool`
383 `True` if none of the required APDB tables exist in the database,
384 `False` if all required tables exist.
385
386 Raises
387 ------
388 InconsistentSchemaError
389 Raised when some of the required tables exist but not all.
390 """
391 query = "SELECT table_name FROM system_schema.tables WHERE keyspace_name = %s"
392 result = self._session.execute(query, (self._keyspace,))
393 table_names = set(row[0] for row in result.all())
394
395 existing_tables = []
396 missing_tables = []
397 for table_enum in self._apdb_tables:
398 table_name = table_enum.table_name(self._prefix)
400 # Check prefix for time-partitioned tables.
401 exists = any(table.startswith(f"{table_name}_") for table in table_names)
402 else:
403 exists = table_name in table_names
404 if exists:
405 existing_tables.append(table_name)
406 else:
407 missing_tables.append(table_name)
408
409 if not missing_tables:
410 return False
411 elif not existing_tables:
412 return True
413 else:
415 f"Only some required APDB tables exist: {existing_tables}, missing tables: {missing_tables}"
416 )
417
418 def existing_tables(self, *args: ApdbTables) -> dict[ApdbTables, list[str]]:
419 """Return the list of existing table names for given table.
420
421 Parameters
422 ----------
423 *args : `ApdbTables`
424 Tables for which to return their existing table names.
425
426 Returns
427 -------
428 tables : `dict` [`ApdbTables`, `list`[`str`]]
429 Mapping of the APDB table to the list of the existing table names.
430 More than one name can be present in the list if configuration
431 specifies per-partition tables.
432 """
434 # Some of the tables should have per-partition tables.
435 query = "SELECT table_name FROM system_schema.tables WHERE keyspace_name = %s"
436 result = self._session.execute(query, (self._keyspace,))
437 table_names = set(row[0] for row in result.all())
438
439 tables = {}
440 for table_enum in args:
441 base_name = table_enum.table_name(self._prefix)
443 tables[table_enum] = [table for table in table_names if table.startswith(f"{base_name}_")]
444 else:
445 tables[table_enum] = [base_name]
446 return tables
447 else:
448 # Do not check that they exist, we know that they should.
449 return {table_enum: [table_enum.table_name(self._prefix)] for table_enum in args}
450
451 def tableName(self, table_name: ApdbTables | ExtraTables) -> str:
452 """Return Cassandra table name for APDB table."""
453 return table_name.table_name(self._prefix)
454
455 def keyspace(self) -> str:
456 """Return Cassandra keyspace for APDB tables."""
457 return self._keyspace
458
459 def getColumnMap(self, table_name: ApdbTables | ExtraTables) -> Mapping[str, schema_model.Column]:
460 """Return mapping of column names to Column definitions.
461
462 Parameters
463 ----------
464 table_name : `ApdbTables`
465 One of known APDB table names.
466
467 Returns
468 -------
469 column_map : `dict`
470 Mapping of column names to `ColumnDef` instances.
471 """
472 table_schema = self._table_schema(table_name)
473 cmap = {column.name: column for column in table_schema.columns}
474 return cmap
475
476 def apdbColumnNames(self, table_name: ApdbTables | ExtraTables) -> list[str]:
477 """Return a list of columns names for a table as defined in APDB
478 schema.
479
480 Parameters
481 ----------
482 table_name : `ApdbTables` or `ExtraTables`
483 Enum for a table in APDB schema.
484
485 Returns
486 -------
487 columns : `list` of `str`
488 Names of regular columns in the table.
489 """
490 table_schema = self._table_schema(table_name)
491 return table_schema.annotations["cassandra:apdb_column_names"]
492
493 def partitionColumns(self, table_name: ApdbTables | ExtraTables) -> list[str]:
494 """Return a list of columns used for table partitioning.
495
496 Parameters
497 ----------
498 table_name : `ApdbTables`
499 Table name in APDB schema
500
501 Returns
502 -------
503 columns : `list` of `str`
504 Names of columns used for partitioning.
505 """
506 table_schema = self._table_schema(table_name)
507 return table_schema.annotations.get("cassandra:partitioning_columns", [])
508
509 def clusteringColumns(self, table_name: ApdbTables | ExtraTables) -> list[str]:
510 """Return a list of columns used for clustering.
511
512 Parameters
513 ----------
514 table_name : `ApdbTables`
515 Table name in APDB schema
516
517 Returns
518 -------
519 columns : `list` of `str`
520 Names of columns for used for clustering.
521 """
522 table_schema = self._table_schema(table_name)
523 return [column.name for column in table_schema.primary_key]
524
526 self,
527 *,
528 drop: bool = False,
529 part_range: tuple[int, int] | None = None,
530 replication_factor: int | None = None,
531 table_options: CreateTableOptions | None = None,
532 ) -> None:
533 """Create or re-create all tables.
534
535 Parameters
536 ----------
537 drop : `bool`
538 If True then drop tables before creating new ones. Note that
539 only tables are dropped and not the whole keyspace.
540 part_range : `tuple` [ `int` ] or `None`
541 Start and end partition number for time partitions, end is not
542 inclusive. Used to create per-partition DiaObject, DiaSource, and
543 DiaForcedSource tables. If `None` then per-partition tables are
544 not created.
545 replication_factor : `int`, optional
546 Replication factor used when creating new keyspace, if keyspace
547 already exists its replication factor is not changed.
548 """
549 # Try to create keyspace if it does not exist
550 if replication_factor is None:
551 replication_factor = 1
552
553 # If keyspace exists check its replication factor.
554 query = "SELECT replication FROM system_schema.keyspaces WHERE keyspace_name = %s"
555 result = self._session.execute(query, (self._keyspace,))
556 if row := result.one():
557 # Check replication factor, ignore strategy class.
558 repl_config = cast(Mapping[str, str], row[0])
559 current_repl = int(repl_config["replication_factor"])
560 if replication_factor != current_repl:
561 raise ValueError(
562 f"New replication factor {replication_factor} differs from the replication factor "
563 f"for already existing keyspace: {current_repl}"
564 )
565 else:
566 # Need a new keyspace.
567 query = (
568 f'CREATE KEYSPACE "{self._keyspace}"'
569 " WITH replication = {'class': 'SimpleStrategy', 'replication_factor': "
570 f"{replication_factor}"
571 "}"
572 )
573 self._session.execute(query)
574
575 for table in self._apdb_tables:
576 self._makeTableSchema(table, drop, part_range, table_options)
577 for extra_table in self._extra_tables:
578 self._makeTableSchema(extra_table, drop, part_range, table_options)
579 # Reset cached information.
580 self._has_replica_chunks = None
581
583 self,
584 table: ApdbTables | ExtraTables,
585 drop: bool = False,
586 part_range: tuple[int, int] | None = None,
587 table_options: CreateTableOptions | None = None,
588 ) -> None:
589 _LOG.debug("Making table %s", table)
590
591 fullTable = table.table_name(self._prefix)
592
593 table_list = [fullTable]
594 if part_range is not None:
596 partitions = range(*part_range)
597 table_list = [f"{fullTable}_{part}" for part in partitions]
598
599 if drop:
600 queries = [f'DROP TABLE IF EXISTS "{self._keyspace}"."{table_name}"' for table_name in table_list]
601 futures = [self._session.execute_async(query, timeout=None) for query in queries]
602 for future in futures:
603 _LOG.debug("wait for query: %s", future.query)
604 future.result()
605 _LOG.debug("query finished: %s", future.query)
606
607 queries = []
608 options = table_options.get_options(fullTable).strip() if table_options else None
609 for table_name in table_list:
610 if_not_exists = "" if drop else "IF NOT EXISTS"
611 columns = ", ".join(self._tableColumns(table))
612 query = f'CREATE TABLE {if_not_exists} "{self._keyspace}"."{table_name}" ({columns})'
613 if options:
614 query = f"{query} WITH {options}"
615 _LOG.debug("query: %s", query)
616 queries.append(query)
617 futures = [self._session.execute_async(query, timeout=None) for query in queries]
618 for future in futures:
619 _LOG.debug("wait for query: %s", future.query)
620 future.result()
621 _LOG.debug("query finished: %s", future.query)
622
623 def _tableColumns(self, table_name: ApdbTables | ExtraTables) -> list[str]:
624 """Return set of columns in a table
625
626 Parameters
627 ----------
628 table_name : `ApdbTables`
629 Name of the table.
630
631 Returns
632 -------
633 column_defs : `list`
634 List of strings in the format "column_name type".
635 """
636 table_schema = self._table_schema(table_name)
637
638 # must have partition columns and clustering columns
639 part_columns = table_schema.annotations.get("cassandra:partitioning_columns", [])
640 clust_columns = [column.name for column in table_schema.primary_key]
641 _LOG.debug("part_columns: %s", part_columns)
642 _LOG.debug("clust_columns: %s", clust_columns)
643 if not part_columns:
644 raise ValueError(f"Table {table_name} configuration is missing partition index")
645
646 # all columns
647 column_defs = []
648 for column in table_schema.columns:
649 ctype = self._type_map[column.datatype]
650 column_defs.append(f'"{column.name}" {ctype}')
651
652 # primary key definition
653 part_columns = [f'"{col}"' for col in part_columns]
654 clust_columns = [f'"{col}"' for col in clust_columns]
655 if len(part_columns) > 1:
656 columns = ", ".join(part_columns)
657 part_columns = [f"({columns})"]
658 pkey = ", ".join(part_columns + clust_columns)
659 _LOG.debug("pkey: %s", pkey)
660 column_defs.append(f"PRIMARY KEY ({pkey})")
661
662 return column_defs
663
664 def _table_schema(self, table: ApdbTables | ExtraTables) -> schema_model.Table:
665 """Return schema definition for a table."""
666 if isinstance(table, ApdbTables):
667 table_schema = self._apdb_tables[table]
668 else:
669 table_schema = self._extra_tables[table]
670 return table_schema
std::vector< SchemaItem< Flag > > * items
Tag types used to declare specialized field types.
Definition misc.h:31
Mapping[ExtraTables, schema_model.Table] _extra_tables_schema(self)
schema_model.Table _table_schema(self, ApdbTables|ExtraTables table)
None _makeTableSchema(self, ApdbTables|ExtraTables table, bool drop=False, tuple[int, int]|None part_range=None, CreateTableOptions|None table_options=None)
list[str] _tableColumns(self, ApdbTables|ExtraTables table_name)
None makeSchema(self, *bool drop=False, tuple[int, int]|None part_range=None, int|None replication_factor=None, CreateTableOptions|None table_options=None)
list[str] clusteringColumns(self, ApdbTables|ExtraTables table_name)
Mapping[ApdbTables, schema_model.Table] _apdb_tables_schema(self, bool time_partition_tables)
Mapping[str, schema_model.Column] getColumnMap(self, ApdbTables|ExtraTables table_name)
list[str] apdbColumnNames(self, ApdbTables|ExtraTables table_name)
list[str] partitionColumns(self, ApdbTables|ExtraTables table_name)
__init__(self, cassandra.cluster.Session session, str keyspace, str schema_file, str schema_name="ApdbSchema", str prefix="", bool time_partition_tables=False, bool enable_replica=False)
dict[ApdbTables, list[str]] existing_tables(self, *ApdbTables args)
Mapping[ExtraTables, ApdbTables] replica_chunk_tables(cls)
bool strip
Definition fits.cc:930