22"""Module defining Apdb class and related methods.
25from __future__
import annotations
27__all__ = [
"ApdbSqlConfig",
"ApdbSql"]
29from contextlib
import contextmanager
33from typing
import cast, Any, Dict, Iterable, Iterator, List, Mapping, Optional, Tuple
37from lsst.sphgeom import HtmPixelization, LonLat, Region, UnitVector3d
39from sqlalchemy
import (func, sql)
40from sqlalchemy.pool
import NullPool
41from .apdb
import Apdb, ApdbConfig
42from .apdbSchema
import ApdbTables, TableDef
43from .apdbSqlSchema
import ApdbSqlSchema
44from .timer
import Timer
47_LOG = logging.getLogger(__name__)
50def _split(seq: Iterable, nItems: int) -> Iterator[List]:
51 """Split a sequence into smaller sequences"""
58def _coerce_uint64(df: pandas.DataFrame) -> pandas.DataFrame:
59 """Change type of the uint64 columns to int64, return copy of data frame.
61 names = [c[0] for c
in df.dtypes.items()
if c[1] == np.uint64]
62 return df.astype({name: np.int64
for name
in names})
65def _make_midPointTai_start(visit_time: dafBase.DateTime, months: int) -> float:
66 """Calculate starting point for time-based source search.
71 Time of current visit.
73 Number of months in the sources history.
78 A ``midPointTai`` starting point, MJD time.
82 return visit_time.get(system=dafBase.DateTime.MJD) - months * 30
86def _ansi_session(engine: sqlalchemy.engine.Engine) -> Iterator[sqlalchemy.engine.Connection]:
87 """Returns a connection, makes sure that ANSI mode is set for MySQL
89 with engine.begin()
as conn:
90 if engine.name ==
'mysql':
91 conn.execute(sql.text(
"SET SESSION SQL_MODE = 'ANSI'"))
97 """APDB configuration class for SQL implementation (ApdbSql).
101 doc="SQLAlchemy database connection URI"
105 doc=
"Transaction isolation level, if unset then backend-default value "
106 "is used, except for SQLite backend where we use READ_UNCOMMITTED. "
107 "Some backends may not support every allowed value.",
109 "READ_COMMITTED":
"Read committed",
110 "READ_UNCOMMITTED":
"Read uncommitted",
111 "REPEATABLE_READ":
"Repeatable read",
112 "SERIALIZABLE":
"Serializable"
119 doc=
"If False then disable SQLAlchemy connection pool. "
120 "Do not use connection pool when forking.",
125 doc=
"Maximum time to wait time for database lock to be released before "
126 "exiting. Defaults to sqlachemy defaults if not set.",
132 doc=
"If True then pass SQLAlchemy echo option.",
137 doc=
"Indexing mode for DiaObject table",
139 'baseline':
"Index defined in baseline schema",
140 'pix_id_iov':
"(pixelId, objectId, iovStart) PK",
141 'last_object_table':
"Separate DiaObjectLast table"
147 doc=
"HTM indexing level",
152 doc=
"Max number of ranges in HTM envelope",
158 doc=
"Name of a HTM index column for DiaObject and DiaSource tables"
162 default=[
"ra",
"decl"],
163 doc=
"Names ra/dec columns in DiaObject table"
167 doc=
"List of columns to read from DiaObject, by default read all columns",
172 doc=
"If True (default) then use \"upsert\" for DiaObjectsLast table",
177 doc=
"Prefix to add to table names and index names",
182 doc=
"If True then run EXPLAIN SQL command on each executed query",
187 doc=
"If True then print/log timing information",
194 raise ValueError(
"ra_dec_columns must have exactly two column names")
198 """Implementation of APDB interface based on SQL database.
200 The implementation is configured via standard ``pex_config`` mechanism
201 using `ApdbSqlConfig` configuration
class. For an example of different
202 configurations check ``config/`` folder.
206 config : `ApdbSqlConfig`
207 Configuration object.
210 ConfigClass = ApdbSqlConfig
216 _LOG.debug(
"APDB Configuration:")
217 _LOG.debug(
" dia_object_index: %s", self.
configconfig.dia_object_index)
218 _LOG.debug(
" read_sources_months: %s", self.
configconfig.read_sources_months)
219 _LOG.debug(
" read_forced_sources_months: %s", self.
configconfig.read_forced_sources_months)
220 _LOG.debug(
" dia_object_columns: %s", self.
configconfig.dia_object_columns)
221 _LOG.debug(
" object_last_replace: %s", self.
configconfig.object_last_replace)
222 _LOG.debug(
" schema_file: %s", self.
configconfig.schema_file)
223 _LOG.debug(
" extra_schema_file: %s", self.
configconfig.extra_schema_file)
224 _LOG.debug(
" schema prefix: %s", self.
configconfig.prefix)
228 kw = dict(echo=self.
configconfig.sql_echo)
229 conn_args: Dict[str, Any] = dict()
230 if not self.
configconfig.connection_pool:
231 kw.update(poolclass=NullPool)
232 if self.
configconfig.isolation_level
is not None:
233 kw.update(isolation_level=self.
configconfig.isolation_level)
234 elif self.
configconfig.db_url.startswith(
"sqlite"):
236 kw.update(isolation_level=
"READ_UNCOMMITTED")
237 if self.
configconfig.connection_timeout
is not None:
238 if self.
configconfig.db_url.startswith(
"sqlite"):
239 conn_args.update(timeout=self.
configconfig.connection_timeout)
240 elif self.
configconfig.db_url.startswith((
"postgresql",
"mysql")):
241 conn_args.update(connect_timeout=self.
configconfig.connection_timeout)
242 kw.update(connect_args=conn_args)
243 self.
_engine_engine = sqlalchemy.create_engine(self.
configconfig.db_url, **kw)
246 dia_object_index=self.
configconfig.dia_object_index,
247 schema_file=self.
configconfig.schema_file,
248 schema_name=self.
configconfig.schema_name,
249 prefix=self.
configconfig.prefix,
250 htm_index_column=self.
configconfig.htm_index_column)
255 """Returns dictionary with the table names and row counts.
257 Used by ``ap_proto`` to keep track of the size of the database tables.
258 Depending on database technology this could be expensive operation.
263 Dict where key is a table name
and value
is a row count.
266 tables: List[sqlalchemy.schema.Table] = [
268 if self.
configconfig.dia_object_index ==
'last_object_table':
269 tables.append(self.
_schema_schema.objects_last)
271 stmt = sql.select([func.count()]).select_from(table)
272 count = self.
_engine_engine.scalar(stmt)
273 res[table.name] = count
277 def tableDef(self, table: ApdbTables) -> Optional[TableDef]:
279 return self.
_schema_schema.tableSchemas.get(table)
289 table: sqlalchemy.schema.Table
290 if self.
configconfig.dia_object_index ==
'last_object_table':
291 table = self.
_schema_schema.objects_last
293 table = self.
_schema_schema.objects
294 if not self.
configconfig.dia_object_columns:
295 query = table.select()
297 columns = [table.c[col]
for col
in self.
configconfig.dia_object_columns]
298 query = sql.select(columns)
301 query = query.where(self.
_filterRegion_filterRegion(table, region))
304 if self.
configconfig.dia_object_index !=
'last_object_table':
305 query = query.where(table.c.validityEnd ==
None)
307 _LOG.debug(
"query: %s", query)
309 if self.
configconfig.explain:
314 with Timer(
'DiaObject select', self.
configconfig.timer):
315 with self.
_engine_engine.begin()
as conn:
316 objects = pandas.read_sql_query(query, conn)
317 _LOG.debug(
"found %s DiaObjects", len(objects))
321 object_ids: Optional[Iterable[int]],
324 if self.
configconfig.read_sources_months == 0:
325 _LOG.debug(
"Skip DiaSources fetching")
328 if object_ids
is None:
335 object_ids: Optional[Iterable[int]],
337 """Return catalog of DiaForcedSource instances from a given region.
342 Region to search for DIASources.
343 object_ids : iterable [ `int` ], optional
344 List of DiaObject IDs to further constrain the set of returned
345 sources. If list
is empty then empty catalog
is returned
with a
348 Time of the current visit.
352 catalog : `pandas.DataFrame`,
or `
None`
353 Catalog containing DiaSource records. `
None`
is returned
if
354 ``read_sources_months`` configuration parameter
is set to 0.
359 Raised
if ``object_ids``
is `
None`.
363 Even though base
class allows `
None` to be passed
for ``object_ids``,
364 this
class requires ``object_ids`` to be
not-`
None`.
365 `NotImplementedError`
is raised
if `
None`
is passed.
367 This method returns DiaForcedSource catalog
for a region
with additional
368 filtering based on DiaObject IDs. Only a subset of DiaSource history
369 is returned limited by ``read_forced_sources_months`` config parameter,
370 w.r.t. ``visit_time``. If ``object_ids``
is empty then an empty catalog
371 is always returned
with a correct schema (columns/types).
374 if self.
configconfig.read_forced_sources_months == 0:
375 _LOG.debug(
"Skip DiaForceSources fetching")
378 if object_ids
is None:
380 raise NotImplementedError(
"Region-based selection is not supported")
384 midPointTai_start = _make_midPointTai_start(visit_time, self.
configconfig.read_forced_sources_months)
385 _LOG.debug(
"midPointTai_start = %.6f", midPointTai_start)
387 table: sqlalchemy.schema.Table = self.
_schema_schema.forcedSources
388 with Timer(
'DiaForcedSource select', self.
configconfig.timer):
391 _LOG.debug(
"found %s DiaForcedSources", len(sources))
395 start_time: dafBase.DateTime,
397 region: Optional[Region] =
None) -> pandas.DataFrame:
400 table = self.
_schema_schema.objects
401 query = table.select()
404 time_filter = sql.expression.and_(
405 table.columns[
"validityStart"] >= start_time.toPython(),
406 table.columns[
"validityStart"] < end_time.toPython()
410 where = sql.expression.and_(self.
_filterRegion_filterRegion(table, region), time_filter)
411 query = query.where(where)
413 query = query.where(time_filter)
416 with Timer(
'DiaObject history select', self.
configconfig.timer):
417 with self.
_engine_engine.begin()
as conn:
418 catalog = pandas.read_sql_query(query, conn)
419 _LOG.debug(
"found %s DiaObjects history records", len(catalog))
423 start_time: dafBase.DateTime,
425 region: Optional[Region] =
None) -> pandas.DataFrame:
428 table = self.
_schema_schema.sources
429 query = table.select()
432 time_filter = sql.expression.and_(
433 table.columns[
"midPointTai"] >= start_time.get(system=dafBase.DateTime.MJD),
434 table.columns[
"midPointTai"] < end_time.get(system=dafBase.DateTime.MJD)
438 where = sql.expression.and_(self.
_filterRegion_filterRegion(table, region), time_filter)
439 query = query.where(where)
441 query = query.where(time_filter)
444 with Timer(
'DiaSource history select', self.
configconfig.timer):
445 with self.
_engine_engine.begin()
as conn:
446 catalog = pandas.read_sql_query(query, conn)
447 _LOG.debug(
"found %s DiaSource history records", len(catalog))
451 start_time: dafBase.DateTime,
453 region: Optional[Region] =
None) -> pandas.DataFrame:
456 table = self.
_schema_schema.forcedSources
457 query = table.select()
460 time_filter = sql.expression.and_(
461 table.columns[
"midPointTai"] >= start_time.get(system=dafBase.DateTime.MJD),
462 table.columns[
"midPointTai"] < end_time.get(system=dafBase.DateTime.MJD)
465 query = query.where(time_filter)
468 with Timer(
'DiaForcedSource history select', self.
configconfig.timer):
469 with self.
_engine_engine.begin()
as conn:
470 catalog = pandas.read_sql_query(query, conn)
471 _LOG.debug(
"found %s DiaForcedSource history records", len(catalog))
477 table = self.
_schema_schema.ssObjects
478 query = table.select()
480 if self.
configconfig.explain:
485 with Timer(
'DiaObject select', self.
configconfig.timer):
486 with self.
_engine_engine.begin()
as conn:
487 objects = pandas.read_sql_query(query, conn)
488 _LOG.debug(
"found %s SSObjects", len(objects))
492 visit_time: dafBase.DateTime,
493 objects: pandas.DataFrame,
494 sources: Optional[pandas.DataFrame] =
None,
495 forced_sources: Optional[pandas.DataFrame] =
None) ->
None:
502 if sources
is not None:
507 if forced_sources
is not None:
513 idColumn =
"ssObjectId"
514 table = self.
_schema_schema.ssObjects
517 with self.
_engine_engine.begin()
as conn:
520 ids = sorted(objects[idColumn])
521 query = sql.select(table.columns[idColumn], table.columns[idColumn].in_(ids))
522 result = conn.execute(query)
523 knownIds =
set(row[idColumn]
for row
in result)
525 filter = objects[idColumn].isin(knownIds)
526 toUpdate = cast(pandas.DataFrame, objects[filter])
527 toInsert = cast(pandas.DataFrame, objects[~filter])
530 if len(toInsert) > 0:
531 toInsert.to_sql(ApdbTables.SSObject.table_name(), conn, if_exists=
'append', index=
False)
534 if len(toUpdate) > 0:
535 whereKey = f
"{idColumn}_param"
536 query = table.update().where(table.columns[idColumn] == sql.bindparam(whereKey))
537 toUpdate = toUpdate.rename({idColumn: whereKey}, axis=
"columns")
538 values = toUpdate.to_dict(
"records")
539 result = conn.execute(query, values)
544 table = self.
_schema_schema.sources
545 query = table.update().where(table.columns[
"diaSourceId"] == sql.bindparam(
"srcId"))
547 with self.
_engine_engine.begin()
as conn:
551 missing_ids: List[int] = []
552 for key, value
in idMap.items():
553 params = dict(srcId=key, diaObjectId=0, ssObjectId=value)
554 result = conn.execute(query, params)
555 if result.rowcount == 0:
556 missing_ids.append(key)
558 missing =
",".join(
str(item)
for item
in missing_ids)
559 raise ValueError(f
"Following DiaSource IDs do not exist in the database: {missing}")
564 if self.
_engine_engine.name ==
'postgresql':
567 _LOG.info(
"Running VACUUM on all tables")
568 connection = self.
_engine_engine.raw_connection()
569 ISOLATION_LEVEL_AUTOCOMMIT = 0
570 connection.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
571 cursor = connection.cursor()
572 cursor.execute(
"VACUUM ANALYSE")
578 table: sqlalchemy.schema.Table = self.
_schema_schema.objects
581 stmt = sql.select([func.count()]).select_from(table).where(table.c.nDiaSources == 1)
582 stmt = stmt.where(table.c.validityEnd ==
None)
585 with self.
_engine_engine.begin()
as conn:
586 count = conn.scalar(stmt)
590 def _getDiaSourcesInRegion(self, region: Region, visit_time:
dafBase.DateTime
591 ) -> pandas.DataFrame:
592 """Returns catalog of DiaSource instances from given region.
597 Region to search for DIASources.
599 Time of the current visit.
603 catalog : `pandas.DataFrame`
604 Catalog containing DiaSource records.
608 midPointTai_start = _make_midPointTai_start(visit_time, self.
configconfig.read_sources_months)
609 _LOG.debug(
"midPointTai_start = %.6f", midPointTai_start)
611 table: sqlalchemy.schema.Table = self.
_schema_schema.sources
612 query = table.select()
615 time_filter = table.columns[
"midPointTai"] > midPointTai_start
616 where = sql.expression.and_(self.
_filterRegion_filterRegion(table, region), time_filter)
617 query = query.where(where)
620 with Timer(
'DiaSource select', self.
configconfig.timer):
621 with _ansi_session(self.
_engine_engine)
as conn:
622 sources = pandas.read_sql_query(query, conn)
623 _LOG.debug(
"found %s DiaSources", len(sources))
626 def _getDiaSourcesByIDs(self, object_ids: List[int], visit_time:
dafBase.DateTime
627 ) -> pandas.DataFrame:
628 """Returns catalog of DiaSource instances given set of DiaObject IDs.
633 Collection of DiaObject IDs
635 Time of the current visit.
639 catalog : `pandas.DataFrame`
640 Catalog contaning DiaSource records.
644 midPointTai_start = _make_midPointTai_start(visit_time, self.
configconfig.read_sources_months)
645 _LOG.debug(
"midPointTai_start = %.6f", midPointTai_start)
647 table: sqlalchemy.schema.Table = self.
_schema_schema.sources
648 with Timer(
'DiaSource select', self.
configconfig.timer):
649 sources = self.
_getSourcesByIDs_getSourcesByIDs(table, object_ids, midPointTai_start)
651 _LOG.debug(
"found %s DiaSources", len(sources))
654 def _getSourcesByIDs(self, table: sqlalchemy.schema.Table,
655 object_ids: List[int],
656 midPointTai_start: float
657 ) -> pandas.DataFrame:
658 """Returns catalog of DiaSource or DiaForcedSource instances given set
663 table : `sqlalchemy.schema.Table`
666 Collection of DiaObject IDs
667 midPointTai_start : `float`
668 Earliest midPointTai to retrieve.
672 catalog : `pandas.DataFrame`
673 Catalog contaning DiaSource records. `None`
is returned
if
674 ``read_sources_months`` configuration parameter
is set to 0
or
675 when ``object_ids``
is empty.
677 sources: Optional[pandas.DataFrame] = None
678 with _ansi_session(self.
_engine_engine)
as conn:
679 if len(object_ids) <= 0:
680 _LOG.debug(
"ID list is empty, just fetch empty result")
681 query = table.select().where(
False)
682 sources = pandas.read_sql_query(query, conn)
684 for ids
in _split(sorted(object_ids), 1000):
685 query = f
'SELECT * FROM "{table.name}" WHERE '
688 ids_str =
",".join(
str(id)
for id
in ids)
689 query += f
'"diaObjectId" IN ({ids_str})'
690 query += f
' AND "midPointTai" > {midPointTai_start}'
693 df = pandas.read_sql_query(sql.text(query), conn)
697 sources = sources.append(df)
698 assert sources
is not None,
"Catalog cannot be None"
701 def _storeDiaObjects(self, objs: pandas.DataFrame, visit_time:
dafBase.DateTime) ->
None:
702 """Store catalog of DiaObjects from current visit.
706 objs : `pandas.DataFrame`
707 Catalog with DiaObject records.
712 ids = sorted(objs['diaObjectId'])
713 _LOG.debug(
"first object ID: %d", ids[0])
717 table: sqlalchemy.schema.Table = self.
_schema_schema.objects
721 dt = visit_time.toPython()
724 with _ansi_session(self.
_engine_engine)
as conn:
726 ids_str =
",".join(
str(id)
for id
in ids)
728 if self.
configconfig.dia_object_index ==
'last_object_table':
732 table = self.
_schema_schema.objects_last
733 do_replace = self.
configconfig.object_last_replace
737 if not do_replace
or isinstance(objs, pandas.DataFrame):
738 query =
'DELETE FROM "' + table.name +
'" '
739 query +=
'WHERE "diaObjectId" IN (' + ids_str +
') '
741 if self.
configconfig.explain:
745 with Timer(table.name +
' delete', self.
configconfig.timer):
746 res = conn.execute(sql.text(query))
747 _LOG.debug(
"deleted %s objects", res.rowcount)
749 extra_columns: Dict[str, Any] = dict(lastNonForcedSource=dt)
750 with Timer(
"DiaObjectLast insert", self.
configconfig.timer):
751 objs = _coerce_uint64(objs)
752 for col, data
in extra_columns.items():
754 objs.to_sql(
"DiaObjectLast", conn, if_exists=
'append',
759 table = self.
_schema_schema.objects
760 query =
'UPDATE "' + table.name +
'" '
761 query +=
"SET \"validityEnd\" = '" +
str(dt) +
"' "
762 query +=
'WHERE "diaObjectId" IN (' + ids_str +
') '
763 query +=
'AND "validityEnd" IS NULL'
767 if self.
configconfig.explain:
771 with Timer(table.name +
' truncate', self.
configconfig.timer):
772 res = conn.execute(sql.text(query))
773 _LOG.debug(
"truncated %s intervals", res.rowcount)
776 table = self.
_schema_schema.objects
777 extra_columns = dict(lastNonForcedSource=dt, validityStart=dt,
779 with Timer(
"DiaObject insert", self.
configconfig.timer):
780 objs = _coerce_uint64(objs)
781 for col, data
in extra_columns.items():
783 objs.to_sql(
"DiaObject", conn, if_exists=
'append',
786 def _storeDiaSources(self, sources: pandas.DataFrame) ->
None:
787 """Store catalog of DiaSources from current visit.
791 sources : `pandas.DataFrame`
792 Catalog containing DiaSource records
795 with _ansi_session(self.
_engine_engine)
as conn:
797 with Timer(
"DiaSource insert", self.
configconfig.timer):
798 sources = _coerce_uint64(sources)
799 sources.to_sql(
"DiaSource", conn, if_exists=
'append', index=
False)
801 def _storeDiaForcedSources(self, sources: pandas.DataFrame) ->
None:
802 """Store a set of DiaForcedSources from current visit.
806 sources : `pandas.DataFrame`
807 Catalog containing DiaForcedSource records
811 with _ansi_session(self.
_engine_engine)
as conn:
813 with Timer(
"DiaForcedSource insert", self.
configconfig.timer):
814 sources = _coerce_uint64(sources)
815 sources.to_sql(
"DiaForcedSource", conn, if_exists=
'append', index=
False)
817 def _explain(self, query: str, conn: sqlalchemy.engine.Connection) ->
None:
818 """Run the query with explain
821 _LOG.info("explain for query: %s...", query[:64])
823 if conn.engine.name ==
'mysql':
824 query =
"EXPLAIN EXTENDED " + query
826 query =
"EXPLAIN " + query
828 res = conn.execute(sql.text(query))
830 _LOG.info(
"explain: %s", res.keys())
832 _LOG.info(
"explain: %s", row)
834 _LOG.info(
"EXPLAIN returned nothing")
836 def _htm_indices(self, region: Region) -> List[Tuple[int, int]]:
837 """Generate a set of HTM indices covering specified region.
842 Region that needs to be indexed.
846 Sequence of ranges, range is a tuple (minHtmID, maxHtmID).
848 _LOG.debug('region: %s', region)
849 indices = self.
pixelatorpixelator.envelope(region, self.
configconfig.htm_max_ranges)
851 if _LOG.isEnabledFor(logging.DEBUG):
852 for irange
in indices.ranges():
853 _LOG.debug(
'range: %s %s', self.
pixelatorpixelator.toString(irange[0]),
854 self.
pixelatorpixelator.toString(irange[1]))
856 return indices.ranges()
858 def _filterRegion(self, table: sqlalchemy.schema.Table, region: Region) -> sql.ClauseElement:
859 """Make SQLAlchemy expression for selecting records in a region.
861 htm_index_column = table.columns[self.configconfig.htm_index_column]
864 for low, upper
in pixel_ranges:
867 exprlist.append(htm_index_column == low)
869 exprlist.append(sql.expression.between(htm_index_column, low, upper))
871 return sql.expression.or_(*exprlist)
873 def _add_obj_htm_index(self, df: pandas.DataFrame) -> pandas.DataFrame:
874 """Calculate HTM index for each record and add it to a DataFrame.
878 This overrides any existing column in a DataFrame
with the same name
879 (pixelId). Original DataFrame
is not changed, copy of a DataFrame
is
883 htm_index = np.zeros(df.shape[0], dtype=np.int64)
884 ra_col, dec_col = self.
configconfig.ra_dec_columns
885 for i, (ra, dec)
in enumerate(zip(df[ra_col], df[dec_col])):
887 idx = self.
pixelatorpixelator.index(uv3d)
890 df[self.
configconfig.htm_index_column] = htm_index
893 def _add_src_htm_index(self, sources: pandas.DataFrame, objs: pandas.DataFrame) -> pandas.DataFrame:
894 """Add pixelId column to DiaSource catalog.
898 This method copies pixelId value from a matching DiaObject record.
899 DiaObject catalog needs to have a pixelId column filled by
900 ``_add_obj_htm_index`` method
and DiaSource records need to be
901 associated to DiaObjects via ``diaObjectId`` column.
903 This overrides any existing column
in a DataFrame
with the same name
904 (pixelId). Original DataFrame
is not changed, copy of a DataFrame
is
907 pixel_id_map: Dict[int, int] = {
908 diaObjectId: pixelId for diaObjectId, pixelId
909 in zip(objs[
"diaObjectId"], objs[self.
configconfig.htm_index_column])
915 htm_index = np.zeros(sources.shape[0], dtype=np.int64)
916 for i, diaObjId
in enumerate(sources[
"diaObjectId"]):
917 htm_index[i] = pixel_id_map[diaObjId]
918 sources = sources.copy()
919 sources[self.
configconfig.htm_index_column] = htm_index
Class for handling dates/times, including MJD, UTC, and TAI.
None _storeDiaForcedSources(self, pandas.DataFrame sources)
Optional[pandas.DataFrame] getDiaForcedSources(self, Region region, Optional[Iterable[int]] object_ids, dafBase.DateTime visit_time)
pandas.DataFrame getDiaForcedSourcesHistory(self, dafBase.DateTime start_time, dafBase.DateTime end_time, Optional[Region] region=None)
pandas.DataFrame _getDiaSourcesByIDs(self, List[int] object_ids, dafBase.DateTime visit_time)
pandas.DataFrame _add_src_htm_index(self, pandas.DataFrame sources, pandas.DataFrame objs)
None store(self, dafBase.DateTime visit_time, pandas.DataFrame objects, Optional[pandas.DataFrame] sources=None, Optional[pandas.DataFrame] forced_sources=None)
pandas.DataFrame getDiaObjects(self, Region region)
None _storeDiaSources(self, pandas.DataFrame sources)
pandas.DataFrame getSSObjects(self)
None _explain(self, str query, sqlalchemy.engine.Connection conn)
List[Tuple[int, int]] _htm_indices(self, Region region)
None makeSchema(self, bool drop=False)
pandas.DataFrame _getDiaSourcesInRegion(self, Region region, dafBase.DateTime visit_time)
pandas.DataFrame _getSourcesByIDs(self, sqlalchemy.schema.Table table, List[int] object_ids, float midPointTai_start)
Optional[pandas.DataFrame] getDiaSources(self, Region region, Optional[Iterable[int]] object_ids, dafBase.DateTime visit_time)
pandas.DataFrame getDiaObjectsHistory(self, dafBase.DateTime start_time, dafBase.DateTime end_time, Optional[Region] region=None)
None storeSSObjects(self, pandas.DataFrame objects)
Optional[TableDef] tableDef(self, ApdbTables table)
Dict[str, int] tableRowCount(self)
pandas.DataFrame getDiaSourcesHistory(self, dafBase.DateTime start_time, dafBase.DateTime end_time, Optional[Region] region=None)
def __init__(self, ApdbSqlConfig config)
int countUnassociatedObjects(self)
pandas.DataFrame _add_obj_htm_index(self, pandas.DataFrame df)
None _storeDiaObjects(self, pandas.DataFrame objs, dafBase.DateTime visit_time)
sql.ClauseElement _filterRegion(self, sqlalchemy.schema.Table table, Region region)
None reassignDiaSources(self, Mapping[int, int] idMap)
HtmPixelization provides HTM indexing of points and regions.
Region is a minimal interface for 2-dimensional regions on the unit sphere.
UnitVector3d is a unit vector in ℝ³ with components stored in double precision.
daf::base::PropertyList * list
daf::base::PropertySet * set