22from __future__
import annotations
24__all__ = [
"ApdbCassandra"]
31from collections
import defaultdict
33from typing
import TYPE_CHECKING, Any, cast
42 import cassandra.query
43 from cassandra.query
import UNSET_VALUE
45 CASSANDRA_IMPORTED =
True
47 CASSANDRA_IMPORTED =
False
52from lsst
import sphgeom
53from lsst.utils.iteration
import chunk_iterable
55from ..apdb
import Apdb, ApdbConfig
56from ..apdbConfigFreezer
import ApdbConfigFreezer
57from ..apdbReplica
import ApdbTableData, ReplicaChunk
58from ..apdbSchema
import ApdbSchema, ApdbTables
59from ..monitor
import MonAgent
60from ..schema_model
import Table
61from ..timer
import Timer
62from ..versionTuple
import IncompatibleVersionError, VersionTuple
63from .apdbCassandraAdmin
import ApdbCassandraAdmin
64from .apdbCassandraReplica
import ApdbCassandraReplica
65from .apdbCassandraSchema
import ApdbCassandraSchema, CreateTableOptions, ExtraTables
66from .apdbMetadataCassandra
import ApdbMetadataCassandra
67from .cassandra_utils
import (
73from .config
import ApdbCassandraConfig, ApdbCassandraConnectionConfig, ApdbCassandraTimePartitionRange
74from .connectionContext
import ConnectionContext, DbVersions
75from .exceptions
import CassandraMissingError
76from .partitioner
import Partitioner
77from .sessionFactory
import SessionContext, SessionFactory
80 from ..apdbMetadata
import ApdbMetadata
81 from ..apdbUpdateRecord
import ApdbUpdateRecord
83_LOG = logging.getLogger(__name__)
88"""Version for the code controlling non-replication tables. This needs to be
89updated following compatibility rules when schema produced by this code
95 """Implementation of APDB database with Apache Cassandra backend.
99 config : `ApdbCassandraConfig`
100 Configuration object.
104 if not CASSANDRA_IMPORTED:
116 """Establish connection if not established and return context."""
123 schema_version=self.
_schema.schemaVersion(),
126 ApdbCassandraReplica.apdbReplicaImplementationVersion()
131 _LOG.debug(
"Current versions: %s", current_versions)
134 if _LOG.isEnabledFor(logging.DEBUG):
135 _LOG.debug(
"ApdbCassandra Configuration: %s", self.
_connection_context.config.model_dump())
139 def _timer(self, name: str, *, tags: Mapping[str, str | int] |
None =
None) -> Timer:
140 """Create `Timer` instance given its name."""
141 return Timer(name, _MON, tags=tags)
143 def _versionCheck(self, current_versions: DbVersions, db_versions: DbVersions) ->
None:
144 """Check schema version compatibility."""
145 if not current_versions.schema_version.checkCompatibility(db_versions.schema_version):
147 f
"Configured schema version {current_versions.schema_version} "
148 f
"is not compatible with database version {db_versions.schema_version}"
150 if not current_versions.code_version.checkCompatibility(db_versions.code_version):
152 f
"Current code version {current_versions.code_version} "
153 f
"is not compatible with database version {db_versions.code_version}"
157 match current_versions.replica_version, db_versions.replica_version:
161 if not current.checkCompatibility(stored):
163 f
"Current replication code version {current} "
164 f
"is not compatible with database version {stored}"
168 f
"Current replication code version {current_versions.replica_version} "
169 f
"is not compatible with database version {db_versions.replica_version}"
174 """Return version number for current APDB implementation.
178 version : `VersionTuple`
179 Version of the code defined in implementation class.
187 def tableDef(self, table: ApdbTables) -> Table |
None:
189 return self.
_schema.tableSchemas.get(table)
194 hosts: tuple[str, ...],
197 schema_file: str |
None =
None,
198 ss_schema_file: str |
None =
None,
199 read_sources_months: int |
None =
None,
200 read_forced_sources_months: int |
None =
None,
201 enable_replica: bool =
False,
202 replica_skips_diaobjects: bool =
False,
203 port: int |
None =
None,
204 username: str |
None =
None,
205 prefix: str |
None =
None,
206 part_pixelization: str |
None =
None,
207 part_pix_level: int |
None =
None,
208 time_partition_tables: bool =
True,
209 time_partition_start: str |
None =
None,
210 time_partition_end: str |
None =
None,
211 read_consistency: str |
None =
None,
212 write_consistency: str |
None =
None,
213 read_timeout: int |
None =
None,
214 write_timeout: int |
None =
None,
215 ra_dec_columns: tuple[str, str] |
None =
None,
216 replication_factor: int |
None =
None,
218 table_options: CreateTableOptions |
None =
None,
219 ) -> ApdbCassandraConfig:
220 """Initialize new APDB instance and make configuration object for it.
224 hosts : `tuple` [`str`, ...]
225 List of host names or IP addresses for Cassandra cluster.
227 Name of the keyspace for APDB tables.
228 schema_file : `str`, optional
229 Location of (YAML) configuration file with APDB schema. If not
230 specified then default location will be used.
231 ss_schema_file : `str`, optional
232 Location of (YAML) configuration file with SSO schema. If not
233 specified then default location will be used.
234 read_sources_months : `int`, optional
235 Number of months of history to read from DiaSource.
236 read_forced_sources_months : `int`, optional
237 Number of months of history to read from DiaForcedSource.
238 enable_replica : `bool`, optional
239 If True, make additional tables used for replication to PPDB.
240 replica_skips_diaobjects : `bool`, optional
241 If `True` then do not fill regular ``DiaObject`` table when
242 ``enable_replica`` is `True`.
243 port : `int`, optional
244 Port number to use for Cassandra connections.
245 username : `str`, optional
246 User name for Cassandra connections.
247 prefix : `str`, optional
248 Optional prefix for all table names.
249 part_pixelization : `str`, optional
250 Name of the MOC pixelization used for partitioning.
251 part_pix_level : `int`, optional
253 time_partition_tables : `bool`, optional
254 Create per-partition tables.
255 time_partition_start : `str`, optional
256 Starting time for per-partition tables, in yyyy-mm-ddThh:mm:ss
258 time_partition_end : `str`, optional
259 Ending time for per-partition tables, in yyyy-mm-ddThh:mm:ss
261 read_consistency : `str`, optional
262 Name of the consistency level for read operations.
263 write_consistency : `str`, optional
264 Name of the consistency level for write operations.
265 read_timeout : `int`, optional
266 Read timeout in seconds.
267 write_timeout : `int`, optional
268 Write timeout in seconds.
269 ra_dec_columns : `tuple` [`str`, `str`], optional
270 Names of ra/dec columns in DiaObject table.
271 replication_factor : `int`, optional
272 Replication factor used when creating new keyspace, if keyspace
273 already exists its replication factor is not changed.
274 drop : `bool`, optional
275 If `True` then drop existing tables before re-creating the schema.
276 table_options : `CreateTableOptions`, optional
277 Options used when creating Cassandra tables.
281 config : `ApdbCassandraConfig`
282 Resulting configuration object for a created APDB instance.
291 "idle_heartbeat_interval": 0,
292 "idle_heartbeat_timeout": 30,
293 "control_connection_timeout": 100,
297 contact_points=hosts,
299 enable_replica=enable_replica,
300 replica_skips_diaobjects=replica_skips_diaobjects,
301 connection_config=connection_config,
303 config.partitioning.time_partition_tables = time_partition_tables
304 if schema_file
is not None:
305 config.schema_file = schema_file
306 if ss_schema_file
is not None:
307 config.ss_schema_file = ss_schema_file
308 if read_sources_months
is not None:
309 config.read_sources_months = read_sources_months
310 if read_forced_sources_months
is not None:
311 config.read_forced_sources_months = read_forced_sources_months
313 config.connection_config.port = port
314 if username
is not None:
315 config.connection_config.username = username
316 if prefix
is not None:
317 config.prefix = prefix
318 if part_pixelization
is not None:
319 config.partitioning.part_pixelization = part_pixelization
320 if part_pix_level
is not None:
321 config.partitioning.part_pix_level = part_pix_level
322 if time_partition_start
is not None:
323 config.partitioning.time_partition_start = time_partition_start
324 if time_partition_end
is not None:
325 config.partitioning.time_partition_end = time_partition_end
326 if read_consistency
is not None:
327 config.connection_config.read_consistency = read_consistency
328 if write_consistency
is not None:
329 config.connection_config.write_consistency = write_consistency
330 if read_timeout
is not None:
331 config.connection_config.read_timeout = read_timeout
332 if write_timeout
is not None:
333 config.connection_config.write_timeout = write_timeout
334 if ra_dec_columns
is not None:
335 config.ra_dec_columns = ra_dec_columns
337 cls.
_makeSchema(config, drop=drop, replication_factor=replication_factor, table_options=table_options)
342 """Return `ApdbReplica` instance for this database."""
353 replication_factor: int |
None =
None,
354 table_options: CreateTableOptions |
None =
None,
358 if not isinstance(config, ApdbCassandraConfig):
359 raise TypeError(f
"Unexpected type of configuration object: {type(config)}")
361 simple_schema =
ApdbSchema(config.schema_file, config.ss_schema_file)
366 keyspace=config.keyspace,
367 table_schemas=simple_schema.tableSchemas,
368 prefix=config.prefix,
369 time_partition_tables=config.partitioning.time_partition_tables,
370 enable_replica=config.enable_replica,
371 replica_skips_diaobjects=config.replica_skips_diaobjects,
375 part_range_config: ApdbCassandraTimePartitionRange |
None =
None
376 if config.partitioning.time_partition_tables:
378 time_partition_start = astropy.time.Time(
379 config.partitioning.time_partition_start, format=
"isot", scale=
"tai"
381 time_partition_end = astropy.time.Time(
382 config.partitioning.time_partition_end, format=
"isot", scale=
"tai"
385 start=partitioner.time_partition(time_partition_start),
386 end=partitioner.time_partition(time_partition_end),
390 part_range=part_range_config,
391 replication_factor=replication_factor,
392 table_options=table_options,
396 drop=drop, replication_factor=replication_factor, table_options=table_options
399 meta_table_name = ApdbTables.metadata.table_name(config.prefix)
401 session, meta_table_name, config.keyspace,
"read_tuples",
"write"
406 ConnectionContext.metadataSchemaVersionKey, str(simple_schema.schemaVersion()), force=
True
412 if config.enable_replica:
415 ConnectionContext.metadataReplicaVersionKey,
416 str(ApdbCassandraReplica.apdbReplicaImplementationVersion()),
421 freezer = ApdbConfigFreezer[ApdbCassandraConfig](ConnectionContext.frozen_parameters)
422 metadata.set(ConnectionContext.metadataConfigKey, freezer.to_json(config), force=
True)
425 if part_range_config:
426 part_range_config.save_to_meta(metadata)
431 config = context.config
433 sp_where, num_sp_part = context.partitioner.spatial_where(region, for_prepare=
True)
434 _LOG.debug(
"getDiaObjects: #partitions: %s", len(sp_where))
437 column_names = context.schema.apdbColumnNames(ApdbTables.DiaObjectLast)
438 what =
",".join(quote_id(column)
for column
in column_names)
440 table_name = context.schema.tableName(ApdbTables.DiaObjectLast)
441 query = f
'SELECT {what} from "{self._keyspace}"."{table_name}"'
442 statements: list[tuple] = []
443 for where, params
in sp_where:
444 full_query = f
"{query} WHERE {where}"
446 statement = context.preparer.prepare(full_query)
451 statement = cassandra.query.SimpleStatement(full_query)
452 statements.append((statement, params))
453 _LOG.debug(
"getDiaObjects: #queries: %s", len(statements))
455 with _MON.context_tags({
"table":
"DiaObject"}):
457 "select_query_stats", values={
"num_sp_part": num_sp_part,
"num_queries": len(statements)}
459 with self.
_timer(
"select_time")
as timer:
466 config.connection_config.read_concurrency,
469 timer.add_values(row_count=len(objects))
471 _LOG.debug(
"found %s DiaObjects", objects.shape[0])
476 region: sphgeom.Region,
477 object_ids: Iterable[int] |
None,
478 visit_time: astropy.time.Time,
479 start_time: astropy.time.Time |
None =
None,
480 ) -> pandas.DataFrame |
None:
483 config = context.config
485 months = config.read_sources_months
486 if start_time
is None and months == 0:
489 mjd_end = float(visit_time.tai.mjd)
490 if start_time
is None:
491 mjd_start = mjd_end - months * 30
493 mjd_start = float(start_time.tai.mjd)
495 return self.
_getSources(region, object_ids, mjd_start, mjd_end, ApdbTables.DiaSource)
499 region: sphgeom.Region,
500 object_ids: Iterable[int] |
None,
501 visit_time: astropy.time.Time,
502 start_time: astropy.time.Time |
None =
None,
503 ) -> pandas.DataFrame |
None:
506 config = context.config
508 months = config.read_forced_sources_months
509 if start_time
is None and months == 0:
512 mjd_end = float(visit_time.tai.mjd)
513 if start_time
is None:
514 mjd_start = mjd_end - months * 30
516 mjd_start = float(start_time.tai.mjd)
518 return self.
_getSources(region, object_ids, mjd_start, mjd_end, ApdbTables.DiaForcedSource)
525 visit_time: astropy.time.Time,
529 config = context.config
532 if context.has_visit_detector_table:
533 table_name = context.schema.tableName(ExtraTables.ApdbVisitDetector)
535 f
'SELECT count(*) FROM "{self._keyspace}"."{table_name}" WHERE visit = %s AND detector = %s'
537 with self.
_timer(
"contains_visit_detector_time"):
538 result = context.session.execute(query, (visit, detector))
539 return bool(result.one()[0])
546 sp_where, _ = context.partitioner.spatial_where(region, use_ranges=
True, for_prepare=
True)
547 visit_detector_where = (
"visit = ? AND detector = ?", (visit, detector))
551 mjd_start = float(visit_time.tai.mjd) - 1.0 / 24
552 mjd_end = float(visit_time.tai.mjd) + 1.0 / 24
554 statements: list[tuple] = []
555 for table_type
in ApdbTables.DiaSource, ApdbTables.DiaForcedSource:
556 tables, temporal_where = context.partitioner.temporal_where(
557 table_type, mjd_start, mjd_end, query_per_time_part=
True, for_prepare=
True
560 prefix = f
'SELECT apdb_part FROM "{self._keyspace}"."{table}"'
562 suffix =
"PER PARTITION LIMIT 1 LIMIT 1 ALLOW FILTERING"
564 self.
_combine_where(prefix, sp_where, temporal_where, visit_detector_where, suffix)
567 with self.
_timer(
"contains_visit_detector_time"):
569 list[tuple[int] |
None],
574 config.connection_config.read_concurrency,
581 visit_time: astropy.time.Time,
582 objects: pandas.DataFrame,
583 sources: pandas.DataFrame |
None =
None,
584 forced_sources: pandas.DataFrame |
None =
None,
588 config = context.config
590 if context.has_visit_detector_table:
594 visit_detector: set[tuple[int, int]] = set()
595 for df
in sources, forced_sources:
596 if df
is not None and not df.empty:
597 df = df[[
"visit",
"detector"]]
598 for visit, detector
in df.itertuples(index=
False):
599 visit_detector.add((visit, detector))
604 table_name = context.schema.tableName(ExtraTables.ApdbVisitDetector)
605 query = f
'INSERT INTO "{self._keyspace}"."{table_name}" (visit, detector) VALUES (%s, %s)'
606 for item
in visit_detector:
607 context.session.execute(query, item, execution_profile=
"write")
610 if sources
is not None:
612 if forced_sources
is not None:
615 replica_chunk: ReplicaChunk |
None =
None
616 if context.schema.replication_enabled:
617 replica_chunk = ReplicaChunk.make_replica_chunk(visit_time, config.replica_chunk_seconds)
624 if sources
is not None and len(sources) > 0:
627 subchunk = self.
_storeDiaSources(ApdbTables.DiaSource, sources, replica_chunk)
630 if forced_sources
is not None and len(forced_sources) > 0:
632 self.
_storeDiaSources(ApdbTables.DiaForcedSource, forced_sources, replica_chunk)
637 config = context.config
639 if self.
_schema.has_mjd_timestamps:
640 reassign_time_column =
"ssObjectReassocTimeMjdTai"
641 reassignTime = float(astropy.time.Time.now().tai.mjd)
643 reassign_time_column =
"ssObjectReassocTime"
645 reassignTime = int(datetime.datetime.now(tz=datetime.UTC).timestamp() * 1000)
651 table_name = context.schema.tableName(ExtraTables.DiaSourceToPartition)
653 selects: list[tuple] = []
654 for ids
in chunk_iterable(idMap.keys(), 1_000):
655 ids_str =
",".join(str(item)
for item
in ids)
659 'SELECT "diaSourceId", "apdb_part", "apdb_time_part", "apdb_replica_chunk" '
660 f
'FROM "{self._keyspace}"."{table_name}" WHERE "diaSourceId" IN ({ids_str})'
668 list[tuple[int, int, int, int |
None]],
670 context.session, selects,
"read_tuples", config.connection_config.read_concurrency
675 id2partitions: dict[int, tuple[int, int]] = {}
676 id2chunk_id: dict[int, int] = {}
678 id2partitions[row[0]] = row[1:3]
679 if row[3]
is not None:
680 id2chunk_id[row[0]] = row[3]
683 if set(id2partitions) != set(idMap):
684 missing =
",".join(str(item)
for item
in set(idMap) - set(id2partitions))
685 raise ValueError(f
"Following DiaSource IDs do not exist in the database: {missing}")
688 queries: list[tuple[cassandra.query.PreparedStatement, tuple]] = []
689 for diaSourceId, ssObjectId
in idMap.items():
690 apdb_part, apdb_time_part = id2partitions[diaSourceId]
692 if config.partitioning.time_partition_tables:
693 table_name = context.schema.tableName(ApdbTables.DiaSource, apdb_time_part)
695 f
'UPDATE "{self._keyspace}"."{table_name}"'
696 f
' SET "ssObjectId" = ?, "diaObjectId" = NULL, "{reassign_time_column}" = ?'
697 ' WHERE "apdb_part" = ? AND "diaSourceId" = ?'
699 values = (ssObjectId, reassignTime, apdb_part, diaSourceId)
701 table_name = context.schema.tableName(ApdbTables.DiaSource)
703 f
'UPDATE "{self._keyspace}"."{table_name}"'
704 f
' SET "ssObjectId" = ?, "diaObjectId" = NULL, "{reassign_time_column}" = ?'
705 ' WHERE "apdb_part" = ? AND "apdb_time_part" = ? AND "diaSourceId" = ?'
707 values = (ssObjectId, reassignTime, apdb_part, apdb_time_part, diaSourceId)
708 queries.append((context.preparer.prepare(query), values))
712 warnings.warn(
"Replication of reassigned DiaSource records is not implemented.", stacklevel=2)
714 _LOG.debug(
"%s: will update %d records", table_name, len(idMap))
715 with self.
_timer(
"source_reassign_time")
as timer:
716 execute_concurrent(context.session, queries, execution_profile=
"write")
717 timer.add_values(source_count=len(idMap))
727 raise NotImplementedError()
733 return context.metadata
736 def admin(self) -> ApdbCassandraAdmin:
742 region: sphgeom.Region,
743 object_ids: Iterable[int] |
None,
746 table_name: ApdbTables,
747 ) -> pandas.DataFrame:
748 """Return catalog of DiaSource instances given set of DiaObject IDs.
752 region : `lsst.sphgeom.Region`
755 Collection of DiaObject IDs
757 Lower bound of time interval.
759 Upper bound of time interval.
760 table_name : `ApdbTables`
765 catalog : `pandas.DataFrame`, or `None`
766 Catalog containing DiaSource records. Empty catalog is returned if
767 ``object_ids`` is empty.
770 config = context.config
772 object_id_set: Set[int] = set()
773 if object_ids
is not None:
774 object_id_set = set(object_ids)
775 if len(object_id_set) == 0:
778 sp_where, num_sp_part = context.partitioner.spatial_where(region, for_prepare=
True)
779 tables, temporal_where = context.partitioner.temporal_where(
780 table_name, mjd_start, mjd_end, for_prepare=
True, partitons_range=context.time_partitions_range
783 start = astropy.time.Time(mjd_start, format=
"mjd", scale=
"tai")
784 end = astropy.time.Time(mjd_end, format=
"mjd", scale=
"tai")
786 f
"Query time range ({start.isot} - {end.isot}) does not overlap database time partitions."
790 column_names = context.schema.apdbColumnNames(table_name)
791 what =
",".join(quote_id(column)
for column
in column_names)
794 statements: list[tuple] = []
796 prefix = f
'SELECT {what} from "{self._keyspace}"."{table}"'
797 statements += list(self.
_combine_where(prefix, sp_where, temporal_where))
798 _LOG.debug(
"_getSources %s: #queries: %s", table_name, len(statements))
800 with _MON.context_tags({
"table": table_name.name}):
802 "select_query_stats", values={
"num_sp_part": num_sp_part,
"num_queries": len(statements)}
804 with self.
_timer(
"select_time")
as timer:
811 config.connection_config.read_concurrency,
814 timer.add_values(row_count_from_db=len(catalog))
817 if len(object_id_set) > 0:
818 catalog = cast(pandas.DataFrame, catalog[catalog[
"diaObjectId"].isin(object_id_set)])
821 catalog = cast(pandas.DataFrame, catalog[catalog[
"midpointMjdTai"] > mjd_start])
823 timer.add_values(row_count=len(catalog))
825 _LOG.debug(
"found %d %ss", catalog.shape[0], table_name.name)
830 config = context.config
833 timestamp = int(replica_chunk.last_update_time.unix_tai * 1000)
838 table_name = context.schema.tableName(ExtraTables.ApdbReplicaChunks)
840 columns = [
"partition",
"apdb_replica_chunk",
"last_update_time",
"unique_id"]
841 values = [partition, replica_chunk.id, timestamp, replica_chunk.unique_id]
842 if context.has_chunk_sub_partitions:
843 columns.append(
"has_subchunks")
846 column_list =
", ".join(columns)
847 placeholders =
",".join([
"%s"] * len(columns))
848 query = f
'INSERT INTO "{self._keyspace}"."{table_name}" ({column_list}) VALUES ({placeholders})'
850 context.session.execute(
853 timeout=config.connection_config.write_timeout,
854 execution_profile=
"write",
858 """Return existing mapping of diaObjectId to its last partition."""
860 config = context.config
862 table_name = context.schema.tableName(ExtraTables.DiaObjectLastToPartition)
865 for id_chunk
in chunk_iterable(ids, 10_000):
866 id_chunk_list = list(id_chunk)
868 f
'SELECT "diaObjectId", apdb_part FROM "{self._keyspace}"."{table_name}" '
869 f
'WHERE "diaObjectId" in ({",".join(str(oid) for oid in id_chunk_list)})'
871 queries.append((query, ()))
872 object_count += len(id_chunk_list)
874 with self.
_timer(
"query_object_last_partitions")
as timer:
881 config.connection_config.read_concurrency,
884 timer.add_values(object_count=object_count, row_count=len(data.rows()))
886 if data.column_names() != [
"diaObjectId",
"apdb_part"]:
887 raise RuntimeError(f
"Unexpected column names in query result: {data.column_names()}")
889 return {row[0]: row[1]
for row
in data.rows()}
892 """Objects in DiaObjectsLast can move from one spatial partition to
893 another. For those objects inserting new version does not replace old
894 one, so we need to explicitly remove old versions before inserting new
900 new_partitions = dict(zip(objs[
"diaObjectId"], objs[
"apdb_part"]))
903 moved_oids: dict[int, tuple[int, int]] = {}
904 for oid, old_part
in old_partitions.items():
905 new_part = new_partitions.get(oid, old_part)
906 if new_part != old_part:
907 moved_oids[oid] = (old_part, new_part)
908 _LOG.debug(
"DiaObject IDs that moved to new partition: %s", moved_oids)
912 table_name = context.schema.tableName(ApdbTables.DiaObjectLast)
913 query = f
'DELETE FROM "{self._keyspace}"."{table_name}" WHERE apdb_part = ? AND "diaObjectId" = ?'
914 statement = context.preparer.prepare(query)
916 for oid, (old_part, _)
in moved_oids.items():
917 queries.append((statement, (old_part, oid)))
918 with self.
_timer(
"delete_object_last")
as timer:
919 execute_concurrent(context.session, queries, execution_profile=
"write")
920 timer.add_values(row_count=len(moved_oids))
923 table_name = context.schema.tableName(ExtraTables.DiaObjectLastToPartition)
924 query = f
'INSERT INTO "{self._keyspace}"."{table_name}" ("diaObjectId", apdb_part) VALUES (?,?)'
925 statement = context.preparer.prepare(query)
928 for oid, new_part
in new_partitions.items():
929 queries.append((statement, (oid, new_part)))
931 with self.
_timer(
"update_object_last_partition")
as timer:
932 execute_concurrent(context.session, queries, execution_profile=
"write")
933 timer.add_values(row_count=len(queries))
936 self, objs: pandas.DataFrame, visit_time: astropy.time.Time, replica_chunk: ReplicaChunk |
None
938 """Store catalog of DiaObjects from current visit.
942 objs : `pandas.DataFrame`
943 Catalog with DiaObject records
944 visit_time : `astropy.time.Time`
945 Time of the current visit.
946 replica_chunk : `ReplicaChunk` or `None`
947 Replica chunk identifier if replication is configured.
950 _LOG.debug(
"No objects to write to database.")
954 config = context.config
956 if context.has_dia_object_last_to_partition:
959 timestamp: float | datetime.datetime
960 if self.
_schema.has_mjd_timestamps:
961 validity_start_column =
"validityStartMjdTai"
962 timestamp = float(visit_time.tai.mjd)
964 validity_start_column =
"validityStart"
965 timestamp = visit_time.datetime
968 extra_columns: dict[str, Any] = {}
969 if context.schema.check_column(ApdbTables.DiaObjectLast, validity_start_column):
970 extra_columns[validity_start_column] = timestamp
974 extra_columns[validity_start_column] = timestamp
975 visit_time_part = context.partitioner.time_partition(visit_time)
976 time_part: int |
None = visit_time_part
977 if (time_partitions_range := context.time_partitions_range)
is not None:
979 if not config.partitioning.time_partition_tables:
980 extra_columns[
"apdb_time_part"] = time_part
985 if replica_chunk
is None or not config.replica_skips_diaobjects:
987 objs, ApdbTables.DiaObject, extra_columns=extra_columns, time_part=time_part
990 if replica_chunk
is not None:
991 extra_columns = {
"apdb_replica_chunk": replica_chunk.id, validity_start_column: timestamp}
992 table = ExtraTables.DiaObjectChunks
993 if context.has_chunk_sub_partitions:
994 table = ExtraTables.DiaObjectChunks2
998 extra_columns[
"apdb_replica_subchunk"] = random.randrange(config.replica_sub_chunk_count)
1003 table_name: ApdbTables,
1004 sources: pandas.DataFrame,
1005 replica_chunk: ReplicaChunk |
None,
1007 """Store catalog of DIASources or DIAForcedSources from current visit.
1011 table_name : `ApdbTables`
1012 Table where to store the data.
1013 sources : `pandas.DataFrame`
1014 Catalog containing DiaSource records
1015 visit_time : `astropy.time.Time`
1016 Time of the current visit.
1017 replica_chunk : `ReplicaChunk` or `None`
1018 Replica chunk identifier if replication is configured.
1022 subchunk : `int` or `None`
1023 Subchunk number for resulting replica data, `None` if relication is
1024 not enabled ot subchunking is not enabled.
1027 config = context.config
1031 tp_sources = sources.copy(deep=
False)
1032 tp_sources[
"apdb_time_part"] = tp_sources[
"midpointMjdTai"].apply(context.partitioner.time_partition)
1033 if (time_partitions_range := context.time_partitions_range)
is not None:
1035 extra_columns: dict[str, Any] = {}
1036 if not config.partitioning.time_partition_tables:
1040 partitions = set(tp_sources[
"apdb_time_part"])
1041 if len(partitions) == 1:
1043 time_part = partitions.pop()
1047 for time_part, sub_frame
in tp_sources.groupby(by=
"apdb_time_part"):
1048 sub_frame.drop(columns=
"apdb_time_part", inplace=
True)
1051 subchunk: int |
None =
None
1052 if replica_chunk
is not None:
1053 extra_columns = {
"apdb_replica_chunk": replica_chunk.id}
1054 if context.has_chunk_sub_partitions:
1055 subchunk = random.randrange(config.replica_sub_chunk_count)
1056 extra_columns[
"apdb_replica_subchunk"] = subchunk
1057 if table_name
is ApdbTables.DiaSource:
1058 extra_table = ExtraTables.DiaSourceChunks2
1060 extra_table = ExtraTables.DiaForcedSourceChunks2
1062 if table_name
is ApdbTables.DiaSource:
1063 extra_table = ExtraTables.DiaSourceChunks
1065 extra_table = ExtraTables.DiaForcedSourceChunks
1071 self, partitions: Iterable[int], time_partitions_range: ApdbCassandraTimePartitionRange
1073 """Check that time partitons for new data actually exist.
1077 partitions : `~collections.abc.Iterable` [`int`]
1078 Time partitions for new data.
1079 time_partitions_range : `ApdbCassandraTimePartitionRange`
1080 Currrent time partition range.
1082 partitions = set(partitions)
1083 min_part = min(partitions)
1084 max_part = max(partitions)
1085 if min_part < time_partitions_range.start
or max_part > time_partitions_range.end:
1087 "Attempt to store data for time partitions that do not yet exist. "
1088 f
"Partitons for new records: {min_part}-{max_part}. "
1089 f
"Database partitons: {time_partitions_range.start}-{time_partitions_range.end}."
1092 if max_part == time_partitions_range.end:
1094 "Writing into the last temporal partition. Partition range needs to be extended soon.",
1100 sources: pandas.DataFrame,
1101 visit_time: astropy.time.Time,
1102 replica_chunk: ReplicaChunk |
None,
1103 subchunk: int |
None,
1105 """Store mapping of diaSourceId to its partitioning values.
1109 sources : `pandas.DataFrame`
1110 Catalog containing DiaSource records
1111 visit_time : `astropy.time.Time`
1112 Time of the current visit.
1113 replica_chunk : `ReplicaChunk` or `None`
1114 Replication chunk, or `None` when replication is disabled.
1115 subchunk : `int` or `None`
1116 Replication sub-chunk, or `None` when replication is disabled or
1117 sub-chunking is not used.
1121 id_map = cast(pandas.DataFrame, sources[[
"diaSourceId",
"apdb_part"]])
1123 "apdb_time_part": context.partitioner.time_partition(visit_time),
1124 "apdb_replica_chunk": replica_chunk.id
if replica_chunk
is not None else None,
1126 if context.has_chunk_sub_partitions:
1127 extra_columns[
"apdb_replica_subchunk"] = subchunk
1130 id_map, ExtraTables.DiaSourceToPartition, extra_columns=extra_columns, time_part=
None
1135 records: pandas.DataFrame,
1136 table_name: ApdbTables | ExtraTables,
1137 extra_columns: Mapping |
None =
None,
1138 time_part: int |
None =
None,
1140 """Store generic objects.
1142 Takes Pandas catalog and stores a bunch of records in a table.
1146 records : `pandas.DataFrame`
1147 Catalog containing object records
1148 table_name : `ApdbTables`
1149 Name of the table as defined in APDB schema.
1150 extra_columns : `dict`, optional
1151 Mapping (column_name, column_value) which gives fixed values for
1152 columns in each row, overrides values in ``records`` if matching
1153 columns exist there.
1154 time_part : `int`, optional
1155 If not `None` then insert into a per-partition table.
1159 If Pandas catalog contains additional columns not defined in table
1160 schema they are ignored. Catalog does not have to contain all columns
1161 defined in a table, but partition and clustering keys must be present
1162 in a catalog or ``extra_columns``.
1167 if extra_columns
is None:
1169 extra_fields = list(extra_columns.keys())
1172 df_fields = [column
for column
in records.columns
if column
not in extra_fields]
1174 column_map = context.schema.getColumnMap(table_name)
1176 fields = [column_map[field].name
for field
in df_fields
if field
in column_map]
1177 fields += extra_fields
1180 partition_columns = context.schema.partitionColumns(table_name)
1181 required_columns = partition_columns + context.schema.clusteringColumns(table_name)
1182 missing_columns = [column
for column
in required_columns
if column
not in fields]
1184 raise ValueError(f
"Primary key columns are missing from catalog: {missing_columns}")
1186 qfields = [quote_id(field)
for field
in fields]
1187 qfields_str =
",".join(qfields)
1191 with self.
_timer(
"insert_build_time", tags={
"table": table_name.name}):
1194 values_by_key: dict[tuple, list[list]] = defaultdict(list)
1195 for rec
in records.itertuples(index=
False):
1197 partitioning_values: dict[str, Any] = {}
1198 for field
in df_fields:
1199 if field
not in column_map:
1201 value = getattr(rec, field)
1202 if column_map[field].datatype
is felis.datamodel.DataType.timestamp:
1203 if isinstance(value, pandas.Timestamp):
1204 value = value.to_pydatetime()
1205 elif value
is pandas.NaT:
1210 value = int(value * 1000)
1211 value = literal(value)
1212 values.append(UNSET_VALUE
if value
is None else value)
1213 if field
in partition_columns:
1214 partitioning_values[field] = value
1215 for field
in extra_fields:
1216 value = literal(extra_columns[field])
1217 values.append(UNSET_VALUE
if value
is None else value)
1218 if field
in partition_columns:
1219 partitioning_values[field] = value
1221 key = tuple(partitioning_values[field]
for field
in partition_columns)
1222 values_by_key[key].append(values)
1224 table = context.schema.tableName(table_name, time_part)
1226 holders =
",".join([
"?"] * len(qfields))
1227 query = f
'INSERT INTO "{self._keyspace}"."{table}" ({qfields_str}) VALUES ({holders})'
1228 statement = context.preparer.prepare(query)
1232 for key_values
in values_by_key.values():
1233 for values_chunk
in chunk_iterable(key_values, batch_size):
1234 batch = cassandra.query.BatchStatement()
1235 for row_values
in values_chunk:
1236 batch.add(statement, row_values)
1237 queries.append((batch,
None))
1238 assert batch.routing_key
is not None and batch.keyspace
is not None
1240 _LOG.debug(
"%s: will store %d records", context.schema.tableName(table_name), records.shape[0])
1241 with self.
_timer(
"insert_time", tags={
"table": table_name.name})
as timer:
1242 execute_concurrent(context.session, queries, execution_profile=
"write")
1243 timer.add_values(row_count=len(records), num_batches=len(queries))
1246 self, records: Iterable[ApdbUpdateRecord], chunk: ReplicaChunk, *, store_chunk: bool =
False
1248 """Store ApdbUpdateRecords in the replica table for those records.
1252 records : `list` [`ApdbUpdateRecord`]
1254 chunk : `ReplicaChunk`
1255 Replica chunk for these records.
1256 store_chunk : `bool`
1257 If True then also store replica chunk.
1262 Raised if replication is not enabled for this instance.
1265 config = context.config
1267 if not context.schema.replication_enabled:
1268 raise TypeError(
"Replication is not enabled for this APDB instance.")
1273 apdb_replica_chunk = chunk.id
1276 update_unique_id = uuid.uuid4()
1279 for record
in records:
1283 record.update_time_ns,
1284 record.update_order,
1290 "apdb_replica_chunk",
1296 if context.has_chunk_sub_partitions:
1297 subchunk = random.randrange(config.replica_sub_chunk_count)
1299 row.append(subchunk)
1300 columns.append(
"apdb_replica_subchunk")
1302 table_name = context.schema.tableName(ExtraTables.ApdbUpdateRecordChunks)
1303 placeholders =
", ".join([
"%s"] * len(columns))
1304 columns_str =
", ".join(columns)
1305 query = f
'INSERT INTO "{self._keyspace}"."{table_name}" ({columns_str}) VALUES ({placeholders})'
1306 queries = [(query, row)
for row
in rows]
1308 with self.
_timer(
"store_update_record")
as timer:
1309 execute_concurrent(context.session, queries, execution_profile=
"write")
1310 timer.add_values(row_count=len(queries))
1313 """Calculate spatial partition for each record and add it to a
1318 df : `pandas.DataFrame`
1319 DataFrame which has to contain ra/dec columns, names of these
1320 columns are defined by configuration ``ra_dec_columns`` field.
1324 df : `pandas.DataFrame`
1325 DataFrame with ``apdb_part`` column which contains pixel index
1326 for ra/dec coordinates.
1330 This overrides any existing column in a DataFrame with the same name
1331 (``apdb_part``). Original DataFrame is not changed, copy of a DataFrame
1335 config = context.config
1338 apdb_part = np.zeros(df.shape[0], dtype=np.int64)
1339 ra_col, dec_col = config.ra_dec_columns
1340 for i, (ra, dec)
in enumerate(zip(df[ra_col], df[dec_col])):
1342 idx = context.partitioner.pixel(uv3d)
1345 df[
"apdb_part"] = apdb_part
1349 """Make an empty catalog for a table with a given name.
1353 table_name : `ApdbTables`
1358 catalog : `pandas.DataFrame`
1361 table = self.
_schema.tableSchemas[table_name]
1364 columnDef.name: pandas.Series(dtype=self.
_schema.column_dtype(columnDef.datatype))
1365 for columnDef
in table.columns
1367 return pandas.DataFrame(data)
1372 where1: list[tuple[str, tuple]],
1373 where2: list[tuple[str, tuple]],
1374 where3: tuple[str, tuple] |
None =
None,
1375 suffix: str |
None =
None,
1376 ) -> Iterator[tuple[cassandra.query.Statement, tuple]]:
1377 """Make cartesian product of two parts of WHERE clause into a series
1378 of statements to execute.
1383 Initial statement prefix that comes before WHERE clause, e.g.
1384 "SELECT * from Table"
1394 for expr1, params1
in where1:
1395 for expr2, params2
in where2:
1398 params = params1 + params2
1400 wheres.append(expr1)
1402 wheres.append(expr2)
1404 wheres.append(where3[0])
1407 full_query +=
" WHERE " +
" AND ".join(wheres)
1409 full_query +=
" " + suffix
1411 statement = context.preparer.prepare(full_query)
1416 statement = cassandra.query.SimpleStatement(full_query)
1417 yield (statement, params)
1420 """Update timestamp columns in input DataFrame to be naive datetime
1423 Clients may or may not generate aware timestamps, code in this class
1424 assumes that timestamps are naive, so we convert them to UTC and
1428 columns = [column
for column, dtype
in df.dtypes.items()
if isinstance(dtype, pandas.DatetimeTZDtype)]
1429 for column
in columns:
1431 df[column] = df[column].dt.tz_convert(
None)
1435 """Calculate batch size based on config parameters."""
1437 config = context.config
1441 if 0 < config.batch_statement_limit < batch_size:
1442 batch_size = config.batch_statement_limit
1443 if config.batch_size_limit > 0:
1451 row_size = context.schema.table_row_size(table)
1452 row_size += 4 * len(context.schema.getColumnMap(table))
1453 batch_size = min(batch_size, (config.batch_size_limit // row_size) + 1)
int _batch_size(self, ApdbTables|ExtraTables table)
int|None _storeDiaSources(self, ApdbTables table_name, pandas.DataFrame sources, ReplicaChunk|None replica_chunk)
pandas.DataFrame _fix_input_timestamps(self, pandas.DataFrame df)
None _versionCheck(self, DbVersions current_versions, DbVersions db_versions)
pandas.DataFrame|None getDiaForcedSources(self, sphgeom.Region region, Iterable[int]|None object_ids, astropy.time.Time visit_time, astropy.time.Time|None start_time=None)
None reassignDiaSources(self, Mapping[int, int] idMap)
pandas.DataFrame getDiaObjects(self, sphgeom.Region region)
bool containsVisitDetector(self, int visit, int detector, sphgeom.Region region, astropy.time.Time visit_time)
None _storeDiaObjects(self, pandas.DataFrame objs, astropy.time.Time visit_time, ReplicaChunk|None replica_chunk)
pandas.DataFrame _make_empty_catalog(self, ApdbTables table_name)
None _makeSchema(cls, ApdbConfig config, *, bool drop=False, int|None replication_factor=None, CreateTableOptions|None table_options=None)
None _check_time_partitions(self, Iterable[int] partitions, ApdbCassandraTimePartitionRange time_partitions_range)
__init__(self, ApdbCassandraConfig config)
ApdbMetadata metadata(self)
pandas.DataFrame _add_apdb_part(self, pandas.DataFrame df)
VersionTuple apdbImplementationVersion(cls)
None store(self, astropy.time.Time visit_time, pandas.DataFrame objects, pandas.DataFrame|None sources=None, pandas.DataFrame|None forced_sources=None)
None _storeObjectsPandas(self, pandas.DataFrame records, ApdbTables|ExtraTables table_name, Mapping|None extra_columns=None, int|None time_part=None)
Mapping[int, int] _queryDiaObjectLastPartitions(self, Iterable[int] ids)
ApdbCassandraReplica get_replica(self)
pandas.DataFrame|None getDiaSources(self, sphgeom.Region region, Iterable[int]|None object_ids, astropy.time.Time visit_time, astropy.time.Time|None start_time=None)
None _storeDiaSourcesPartitions(self, pandas.DataFrame sources, astropy.time.Time visit_time, ReplicaChunk|None replica_chunk, int|None subchunk)
None _deleteMovingObjects(self, pandas.DataFrame objs)
None _storeReplicaChunk(self, ReplicaChunk replica_chunk)
None _storeUpdateRecords(self, Iterable[ApdbUpdateRecord] records, ReplicaChunk chunk, *, bool store_chunk=False)
pandas.DataFrame _getSources(self, sphgeom.Region region, Iterable[int]|None object_ids, float mjd_start, float mjd_end, ApdbTables table_name)
ApdbCassandraConfig init_database(cls, tuple[str,...] hosts, str keyspace, *, str|None schema_file=None, str|None ss_schema_file=None, int|None read_sources_months=None, int|None read_forced_sources_months=None, bool enable_replica=False, bool replica_skips_diaobjects=False, int|None port=None, str|None username=None, str|None prefix=None, str|None part_pixelization=None, int|None part_pix_level=None, bool time_partition_tables=True, str|None time_partition_start=None, str|None time_partition_end=None, str|None read_consistency=None, str|None write_consistency=None, int|None read_timeout=None, int|None write_timeout=None, tuple[str, str]|None ra_dec_columns=None, int|None replication_factor=None, bool drop=False, CreateTableOptions|None table_options=None)
int countUnassociatedObjects(self)
Timer _timer(self, str name, *, Mapping[str, str|int]|None tags=None)
Table|None tableDef(self, ApdbTables table)
ConnectionContext|None _connection_context
ConnectionContext _context(self)
ApdbCassandraAdmin admin(self)
Iterator[tuple[cassandra.query.Statement, tuple]] _combine_where(self, str prefix, list[tuple[str, tuple]] where1, list[tuple[str, tuple]] where2, tuple[str, tuple]|None where3=None, str|None suffix=None)
ApdbCassandraConfig getConfig(self)
Region is a minimal interface for 2-dimensional regions on the unit sphere.
UnitVector3d is a unit vector in ℝ³ with components stored in double precision.