22"""Module defining Apdb class and related methods.
25from __future__
import annotations
27__all__ = [
"ApdbSqlConfig",
"ApdbSql"]
31from contextlib
import contextmanager
32from typing
import Any, Dict, List, Optional, Tuple, cast
38from felis.simple
import Table
40from lsst.sphgeom import HtmPixelization, LonLat, Region, UnitVector3d
41from lsst.utils.iteration
import chunk_iterable
42from sqlalchemy
import func, sql
43from sqlalchemy.pool
import NullPool
45from .apdb
import Apdb, ApdbConfig
46from .apdbSchema
import ApdbTables
47from .apdbSqlSchema
import ApdbSqlSchema
48from .timer
import Timer
50_LOG = logging.getLogger(__name__)
53def _coerce_uint64(df: pandas.DataFrame) -> pandas.DataFrame:
54 """Change type of the uint64 columns to int64, return copy of data frame.
56 names = [c[0] for c
in df.dtypes.items()
if c[1] == np.uint64]
57 return df.astype({name: np.int64
for name
in names})
60def _make_midPointTai_start(visit_time: dafBase.DateTime, months: int) -> float:
61 """Calculate starting point for time-based source search.
66 Time of current visit.
68 Number of months in the sources history.
73 A ``midPointTai`` starting point, MJD time.
77 return visit_time.get(system=dafBase.DateTime.MJD) - months * 30
81def _ansi_session(engine: sqlalchemy.engine.Engine) -> Iterator[sqlalchemy.engine.Connection]:
82 """Returns a connection, makes sure that ANSI mode is set for MySQL
84 with engine.begin()
as conn:
85 if engine.name ==
'mysql':
86 conn.execute(sql.text(
"SET SESSION SQL_MODE = 'ANSI'"))
92 """APDB configuration class for SQL implementation (ApdbSql).
95 doc="SQLAlchemy database connection URI"
97 isolation_level = ChoiceField[str](
98 doc=
"Transaction isolation level, if unset then backend-default value "
99 "is used, except for SQLite backend where we use READ_UNCOMMITTED. "
100 "Some backends may not support every allowed value.",
102 "READ_COMMITTED":
"Read committed",
103 "READ_UNCOMMITTED":
"Read uncommitted",
104 "REPEATABLE_READ":
"Repeatable read",
105 "SERIALIZABLE":
"Serializable"
110 connection_pool = Field[bool](
111 doc=
"If False then disable SQLAlchemy connection pool. "
112 "Do not use connection pool when forking.",
115 connection_timeout = Field[float](
116 doc=
"Maximum time to wait time for database lock to be released before "
117 "exiting. Defaults to sqlalchemy defaults if not set.",
121 sql_echo = Field[bool](
122 doc=
"If True then pass SQLAlchemy echo option.",
125 dia_object_index = ChoiceField[str](
126 doc=
"Indexing mode for DiaObject table",
128 'baseline':
"Index defined in baseline schema",
129 'pix_id_iov':
"(pixelId, objectId, iovStart) PK",
130 'last_object_table':
"Separate DiaObjectLast table"
134 htm_level = Field[int](
135 doc=
"HTM indexing level",
138 htm_max_ranges = Field[int](
139 doc=
"Max number of ranges in HTM envelope",
142 htm_index_column = Field[str](
144 doc=
"Name of a HTM index column for DiaObject and DiaSource tables"
146 ra_dec_columns = ListField[str](
147 default=[
"ra",
"decl"],
148 doc=
"Names ra/dec columns in DiaObject table"
150 dia_object_columns = ListField[str](
151 doc=
"List of columns to read from DiaObject, by default read all columns",
154 object_last_replace = Field[bool](
155 doc=
"If True (default) then use \"upsert\" for DiaObjectsLast table",
157 deprecated=
"This field is not used and will be removed on 2022-21-31."
160 doc=
"Prefix to add to table names and index names",
163 namespace = Field[str](
165 "Namespace or schema name for all tables in APDB database. "
166 "Presently only makes sense for PostgresQL backend. "
167 "If schema with this name does not exist it will be created when "
168 "APDB tables are created."
173 explain = Field[bool](
174 doc=
"If True then run EXPLAIN SQL command on each executed query",
178 doc=
"If True then print/log timing information",
185 raise ValueError(
"ra_dec_columns must have exactly two column names")
189 """Implementation of APDB interface based on SQL database.
191 The implementation is configured via standard ``pex_config`` mechanism
192 using `ApdbSqlConfig` configuration
class. For an example of different
193 configurations check ``config/`` folder.
197 config : `ApdbSqlConfig`
198 Configuration object.
201 ConfigClass = ApdbSqlConfig
208 _LOG.debug(
"APDB Configuration:")
209 _LOG.debug(
" dia_object_index: %s", self.
config.dia_object_index)
210 _LOG.debug(
" read_sources_months: %s", self.
config.read_sources_months)
211 _LOG.debug(
" read_forced_sources_months: %s", self.
config.read_forced_sources_months)
212 _LOG.debug(
" dia_object_columns: %s", self.
config.dia_object_columns)
213 _LOG.debug(
" schema_file: %s", self.
config.schema_file)
214 _LOG.debug(
" extra_schema_file: %s", self.
config.extra_schema_file)
215 _LOG.debug(
" schema prefix: %s", self.
config.prefix)
219 kw: MutableMapping[str, Any] = dict(echo=self.
config.sql_echo)
220 conn_args: Dict[str, Any] = dict()
221 if not self.
config.connection_pool:
222 kw.update(poolclass=NullPool)
223 if self.
config.isolation_level
is not None:
224 kw.update(isolation_level=self.
config.isolation_level)
225 elif self.
config.db_url.startswith(
"sqlite"):
227 kw.update(isolation_level=
"READ_UNCOMMITTED")
228 if self.
config.connection_timeout
is not None:
229 if self.
config.db_url.startswith(
"sqlite"):
230 conn_args.update(timeout=self.
config.connection_timeout)
231 elif self.
config.db_url.startswith((
"postgresql",
"mysql")):
232 conn_args.update(connect_timeout=self.
config.connection_timeout)
233 kw.update(connect_args=conn_args)
234 self.
_engine = sqlalchemy.create_engine(self.
config.db_url, **kw)
237 dia_object_index=self.
config.dia_object_index,
238 schema_file=self.
config.schema_file,
239 schema_name=self.
config.schema_name,
240 prefix=self.
config.prefix,
241 namespace=self.
config.namespace,
242 htm_index_column=self.
config.htm_index_column)
247 """Returns dictionary with the table names and row counts.
249 Used by ``ap_proto`` to keep track of the size of the database tables.
250 Depending on database technology this could be expensive operation.
255 Dict where key is a table name
and value
is a row count.
258 tables: List[sqlalchemy.schema.Table] = [
260 if self.
config.dia_object_index ==
'last_object_table':
261 tables.append(self.
_schema.objects_last)
263 stmt = sql.select([func.count()]).select_from(table)
264 count = self.
_engine.scalar(stmt)
265 res[table.name] = count
269 def tableDef(self, table: ApdbTables) -> Optional[Table]:
271 return self.
_schema.tableSchemas.get(table)
281 table: sqlalchemy.schema.Table
282 if self.
config.dia_object_index ==
'last_object_table':
283 table = self.
_schema.objects_last
286 if not self.
config.dia_object_columns:
287 query = table.select()
289 columns = [table.c[col]
for col
in self.
config.dia_object_columns]
290 query = sql.select(columns)
296 if self.
config.dia_object_index !=
'last_object_table':
297 query = query.where(table.c.validityEnd ==
None)
307 with self.
_engine.begin()
as conn:
308 objects = pandas.read_sql_query(query, conn)
309 _LOG.debug(
"found %s DiaObjects", len(objects))
313 object_ids: Optional[Iterable[int]],
316 if self.
config.read_sources_months == 0:
317 _LOG.debug(
"Skip DiaSources fetching")
320 if object_ids
is None:
327 object_ids: Optional[Iterable[int]],
329 """Return catalog of DiaForcedSource instances from a given region.
334 Region to search for DIASources.
335 object_ids : iterable [ `int` ], optional
336 List of DiaObject IDs to further constrain the set of returned
337 sources. If list
is empty then empty catalog
is returned
with a
340 Time of the current visit.
344 catalog : `pandas.DataFrame`,
or `
None`
345 Catalog containing DiaSource records. `
None`
is returned
if
346 ``read_sources_months`` configuration parameter
is set to 0.
351 Raised
if ``object_ids``
is `
None`.
355 Even though base
class allows `
None` to be passed
for ``object_ids``,
356 this
class requires ``object_ids`` to be
not-`
None`.
357 `NotImplementedError`
is raised
if `
None`
is passed.
359 This method returns DiaForcedSource catalog
for a region
with additional
360 filtering based on DiaObject IDs. Only a subset of DiaSource history
361 is returned limited by ``read_forced_sources_months`` config parameter,
362 w.r.t. ``visit_time``. If ``object_ids``
is empty then an empty catalog
363 is always returned
with a correct schema (columns/types).
366 if self.
config.read_forced_sources_months == 0:
367 _LOG.debug(
"Skip DiaForceSources fetching")
370 if object_ids
is None:
372 raise NotImplementedError(
"Region-based selection is not supported")
376 midPointTai_start = _make_midPointTai_start(visit_time, self.
config.read_forced_sources_months)
377 _LOG.debug(
"midPointTai_start = %.6f", midPointTai_start)
379 table: sqlalchemy.schema.Table = self.
_schema.forcedSources
380 with Timer(
'DiaForcedSource select', self.
config.timer):
383 _LOG.debug(
"found %s DiaForcedSources", len(sources))
387 start_time: dafBase.DateTime,
389 region: Optional[Region] =
None) -> pandas.DataFrame:
393 query = table.select()
396 time_filter = sql.expression.and_(
397 table.columns[
"validityStart"] >= start_time.toPython(),
398 table.columns[
"validityStart"] < end_time.toPython()
402 where = sql.expression.and_(self.
_filterRegion(table, region), time_filter)
403 query = query.where(where)
405 query = query.where(time_filter)
408 with Timer(
'DiaObject history select', self.
config.timer):
409 with self.
_engine.begin()
as conn:
410 catalog = pandas.read_sql_query(query, conn)
411 _LOG.debug(
"found %s DiaObjects history records", len(catalog))
415 start_time: dafBase.DateTime,
417 region: Optional[Region] =
None) -> pandas.DataFrame:
421 query = table.select()
424 time_filter = sql.expression.and_(
425 table.columns[
"midPointTai"] >= start_time.get(system=dafBase.DateTime.MJD),
426 table.columns[
"midPointTai"] < end_time.get(system=dafBase.DateTime.MJD)
430 where = sql.expression.and_(self.
_filterRegion(table, region), time_filter)
431 query = query.where(where)
433 query = query.where(time_filter)
436 with Timer(
'DiaSource history select', self.
config.timer):
437 with self.
_engine.begin()
as conn:
438 catalog = pandas.read_sql_query(query, conn)
439 _LOG.debug(
"found %s DiaSource history records", len(catalog))
443 start_time: dafBase.DateTime,
445 region: Optional[Region] =
None) -> pandas.DataFrame:
448 table = self.
_schema.forcedSources
449 query = table.select()
452 time_filter = sql.expression.and_(
453 table.columns[
"midPointTai"] >= start_time.get(system=dafBase.DateTime.MJD),
454 table.columns[
"midPointTai"] < end_time.get(system=dafBase.DateTime.MJD)
457 query = query.where(time_filter)
460 with Timer(
'DiaForcedSource history select', self.
config.timer):
461 with self.
_engine.begin()
as conn:
462 catalog = pandas.read_sql_query(query, conn)
463 _LOG.debug(
"found %s DiaForcedSource history records", len(catalog))
470 query = table.select()
478 with self.
_engine.begin()
as conn:
479 objects = pandas.read_sql_query(query, conn)
480 _LOG.debug(
"found %s SSObjects", len(objects))
484 visit_time: dafBase.DateTime,
485 objects: pandas.DataFrame,
486 sources: Optional[pandas.DataFrame] =
None,
487 forced_sources: Optional[pandas.DataFrame] =
None) ->
None:
494 if sources
is not None:
499 if forced_sources
is not None:
505 idColumn =
"ssObjectId"
509 with self.
_engine.begin()
as conn:
513 ids = sorted(int(oid)
for oid
in objects[idColumn])
515 query = sql.select(table.columns[idColumn], table.columns[idColumn].in_(ids))
516 result = conn.execute(query)
517 knownIds =
set(row[idColumn]
for row
in result)
519 filter = objects[idColumn].isin(knownIds)
520 toUpdate = cast(pandas.DataFrame, objects[filter])
521 toInsert = cast(pandas.DataFrame, objects[~filter])
524 if len(toInsert) > 0:
525 toInsert.to_sql(table.name, conn, if_exists=
'append', index=
False, schema=table.schema)
528 if len(toUpdate) > 0:
529 whereKey = f
"{idColumn}_param"
530 query = table.update().where(table.columns[idColumn] == sql.bindparam(whereKey))
531 toUpdate = toUpdate.rename({idColumn: whereKey}, axis=
"columns")
532 values = toUpdate.to_dict(
"records")
533 result = conn.execute(query, values)
539 query = table.update().where(table.columns[
"diaSourceId"] == sql.bindparam(
"srcId"))
541 with self.
_engine.begin()
as conn:
545 missing_ids: List[int] = []
546 for key, value
in idMap.items():
547 params = dict(srcId=key, diaObjectId=0, ssObjectId=value)
548 result = conn.execute(query, params)
549 if result.rowcount == 0:
550 missing_ids.append(key)
552 missing =
",".join(
str(item)
for item
in missing_ids)
553 raise ValueError(f
"Following DiaSource IDs do not exist in the database: {missing}")
558 if self.
_engine.name ==
'postgresql':
561 _LOG.info(
"Running VACUUM on all tables")
562 connection = self.
_engine.raw_connection()
563 ISOLATION_LEVEL_AUTOCOMMIT = 0
564 connection.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
565 cursor = connection.cursor()
566 cursor.execute(
"VACUUM ANALYSE")
572 table: sqlalchemy.schema.Table = self.
_schema.objects
575 stmt = sql.select([func.count()]).select_from(table).where(table.c.nDiaSources == 1)
576 stmt = stmt.where(table.c.validityEnd ==
None)
579 with self.
_engine.begin()
as conn:
580 count = conn.scalar(stmt)
584 def _getDiaSourcesInRegion(self, region: Region, visit_time:
dafBase.DateTime
585 ) -> pandas.DataFrame:
586 """Returns catalog of DiaSource instances from given region.
591 Region to search for DIASources.
593 Time of the current visit.
597 catalog : `pandas.DataFrame`
598 Catalog containing DiaSource records.
602 midPointTai_start = _make_midPointTai_start(visit_time, self.
config.read_sources_months)
603 _LOG.debug(
"midPointTai_start = %.6f", midPointTai_start)
605 table: sqlalchemy.schema.Table = self.
_schema.sources
606 query = table.select()
609 time_filter = table.columns[
"midPointTai"] > midPointTai_start
610 where = sql.expression.and_(self.
_filterRegion(table, region), time_filter)
611 query = query.where(where)
615 with _ansi_session(self.
_engine)
as conn:
616 sources = pandas.read_sql_query(query, conn)
617 _LOG.debug(
"found %s DiaSources", len(sources))
620 def _getDiaSourcesByIDs(self, object_ids: List[int], visit_time:
dafBase.DateTime
621 ) -> pandas.DataFrame:
622 """Returns catalog of DiaSource instances given set of DiaObject IDs.
627 Collection of DiaObject IDs
629 Time of the current visit.
633 catalog : `pandas.DataFrame`
634 Catalog contaning DiaSource records.
638 midPointTai_start = _make_midPointTai_start(visit_time, self.
config.read_sources_months)
639 _LOG.debug(
"midPointTai_start = %.6f", midPointTai_start)
641 table: sqlalchemy.schema.Table = self.
_schema.sources
645 _LOG.debug(
"found %s DiaSources", len(sources))
648 def _getSourcesByIDs(self, table: sqlalchemy.schema.Table,
649 object_ids: List[int],
650 midPointTai_start: float
651 ) -> pandas.DataFrame:
652 """Returns catalog of DiaSource or DiaForcedSource instances given set
657 table : `sqlalchemy.schema.Table`
660 Collection of DiaObject IDs
661 midPointTai_start : `float`
662 Earliest midPointTai to retrieve.
666 catalog : `pandas.DataFrame`
667 Catalog contaning DiaSource records. `None`
is returned
if
668 ``read_sources_months`` configuration parameter
is set to 0
or
669 when ``object_ids``
is empty.
671 sources: Optional[pandas.DataFrame] = None
672 with _ansi_session(self.
_engine)
as conn:
673 if len(object_ids) <= 0:
674 _LOG.debug(
"ID list is empty, just fetch empty result")
675 query = table.select().where(
False)
676 sources = pandas.read_sql_query(query, conn)
678 for ids
in chunk_iterable(sorted(object_ids), 1000):
679 query = table.select()
683 int_ids = [int(oid)
for oid
in ids]
688 table.columns[
"diaObjectId"].in_(int_ids),
689 table.columns[
"midPointTai"] > midPointTai_start,
694 df = pandas.read_sql_query(query, conn)
698 sources = sources.append(df)
699 assert sources
is not None,
"Catalog cannot be None"
702 def _storeDiaObjects(self, objs: pandas.DataFrame, visit_time:
dafBase.DateTime) ->
None:
703 """Store catalog of DiaObjects from current visit.
707 objs : `pandas.DataFrame`
708 Catalog with DiaObject records.
715 ids = sorted(int(oid)
for oid
in objs[
'diaObjectId'])
716 _LOG.debug(
"first object ID: %d", ids[0])
720 table: sqlalchemy.schema.Table = self.
_schema.objects
724 dt = visit_time.toPython()
727 with _ansi_session(self.
_engine)
as conn:
729 if self.
config.dia_object_index ==
'last_object_table':
733 table = self.
_schema.objects_last
736 query = table.delete().where(
737 table.columns[
"diaObjectId"].in_(ids)
744 with Timer(table.name +
' delete', self.
config.timer):
745 res = conn.execute(query)
746 _LOG.debug(
"deleted %s objects", res.rowcount)
749 last_column_names = [column.name
for column
in table.columns]
750 last_objs = objs[last_column_names]
752 extra_columns: Dict[str, Any] = dict(lastNonForcedSource=dt)
753 with Timer(
"DiaObjectLast insert", self.
config.timer):
754 last_objs = _coerce_uint64(last_objs)
755 for col, data
in extra_columns.items():
756 last_objs[col] = data
757 last_objs.to_sql(table.name, conn, if_exists=
'append', index=
False, schema=table.schema)
763 query = table.update().values(validityEnd=dt).where(
765 table.columns[
"diaObjectId"].in_(ids),
766 table.columns[
"validityEnd"].is_(
None),
776 with Timer(table.name +
' truncate', self.
config.timer):
777 res = conn.execute(query)
778 _LOG.debug(
"truncated %s intervals", res.rowcount)
782 extra_columns = dict(lastNonForcedSource=dt, validityStart=dt,
785 objs = _coerce_uint64(objs)
787 columns: List[pandas.Series] = []
788 for col, data
in extra_columns.items():
789 columns.append(pandas.Series([data]*len(objs), name=col))
790 objs.set_index(columns[0].index, inplace=
True)
791 objs = pandas.concat([objs] + columns, axis=
"columns")
792 objs.to_sql(table.name, conn, if_exists=
'append', index=
False, schema=table.schema)
794 def _storeDiaSources(self, sources: pandas.DataFrame) ->
None:
795 """Store catalog of DiaSources from current visit.
799 sources : `pandas.DataFrame`
800 Catalog containing DiaSource records
803 with _ansi_session(self.
_engine)
as conn:
806 sources = _coerce_uint64(sources)
808 sources.to_sql(table.name, conn, if_exists=
'append', index=
False, schema=table.schema)
810 def _storeDiaForcedSources(self, sources: pandas.DataFrame) ->
None:
811 """Store a set of DiaForcedSources from current visit.
815 sources : `pandas.DataFrame`
816 Catalog containing DiaForcedSource records
820 with _ansi_session(self.
_engine)
as conn:
822 with Timer(
"DiaForcedSource insert", self.
config.timer):
823 sources = _coerce_uint64(sources)
824 table = self.
_schema.forcedSources
825 sources.to_sql(table.name, conn, if_exists=
'append', index=
False, schema=table.schema)
827 def _explain(self, query: str, conn: sqlalchemy.engine.Connection) ->
None:
828 """Run the query with explain
831 _LOG.info("explain for query: %s...", query[:64])
833 if conn.engine.name ==
'mysql':
834 query =
"EXPLAIN EXTENDED " + query
836 query =
"EXPLAIN " + query
838 res = conn.execute(sql.text(query))
840 _LOG.info(
"explain: %s", res.keys())
842 _LOG.info(
"explain: %s", row)
844 _LOG.info(
"EXPLAIN returned nothing")
846 def _htm_indices(self, region: Region) -> List[Tuple[int, int]]:
847 """Generate a set of HTM indices covering specified region.
852 Region that needs to be indexed.
856 Sequence of ranges, range is a tuple (minHtmID, maxHtmID).
858 _LOG.debug('region: %s', region)
861 return indices.ranges()
863 def _filterRegion(self, table: sqlalchemy.schema.Table, region: Region) -> sql.ClauseElement:
864 """Make SQLAlchemy expression for selecting records in a region.
866 htm_index_column = table.columns[self.config.htm_index_column]
869 for low, upper
in pixel_ranges:
872 exprlist.append(htm_index_column == low)
874 exprlist.append(sql.expression.between(htm_index_column, low, upper))
876 return sql.expression.or_(*exprlist)
878 def _add_obj_htm_index(self, df: pandas.DataFrame) -> pandas.DataFrame:
879 """Calculate HTM index for each record and add it to a DataFrame.
883 This overrides any existing column in a DataFrame
with the same name
884 (pixelId). Original DataFrame
is not changed, copy of a DataFrame
is
888 htm_index = np.zeros(df.shape[0], dtype=np.int64)
889 ra_col, dec_col = self.
config.ra_dec_columns
890 for i, (ra, dec)
in enumerate(zip(df[ra_col], df[dec_col])):
895 df[self.
config.htm_index_column] = htm_index
898 def _add_src_htm_index(self, sources: pandas.DataFrame, objs: pandas.DataFrame) -> pandas.DataFrame:
899 """Add pixelId column to DiaSource catalog.
903 This method copies pixelId value from a matching DiaObject record.
904 DiaObject catalog needs to have a pixelId column filled by
905 ``_add_obj_htm_index`` method
and DiaSource records need to be
906 associated to DiaObjects via ``diaObjectId`` column.
908 This overrides any existing column
in a DataFrame
with the same name
909 (pixelId). Original DataFrame
is not changed, copy of a DataFrame
is
912 pixel_id_map: Dict[int, int] = {
913 diaObjectId: pixelId for diaObjectId, pixelId
914 in zip(objs[
"diaObjectId"], objs[self.
config.htm_index_column])
920 htm_index = np.zeros(sources.shape[0], dtype=np.int64)
921 for i, diaObjId
in enumerate(sources[
"diaObjectId"]):
922 htm_index[i] = pixel_id_map[diaObjId]
923 sources = sources.copy()
924 sources[self.
config.htm_index_column] = htm_index
Class for handling dates/times, including MJD, UTC, and TAI.
None _storeDiaForcedSources(self, pandas.DataFrame sources)
Optional[pandas.DataFrame] getDiaForcedSources(self, Region region, Optional[Iterable[int]] object_ids, dafBase.DateTime visit_time)
pandas.DataFrame getDiaForcedSourcesHistory(self, dafBase.DateTime start_time, dafBase.DateTime end_time, Optional[Region] region=None)
pandas.DataFrame _getDiaSourcesByIDs(self, List[int] object_ids, dafBase.DateTime visit_time)
Optional[Table] tableDef(self, ApdbTables table)
pandas.DataFrame _add_src_htm_index(self, pandas.DataFrame sources, pandas.DataFrame objs)
None store(self, dafBase.DateTime visit_time, pandas.DataFrame objects, Optional[pandas.DataFrame] sources=None, Optional[pandas.DataFrame] forced_sources=None)
pandas.DataFrame getDiaObjects(self, Region region)
None _storeDiaSources(self, pandas.DataFrame sources)
pandas.DataFrame getSSObjects(self)
None _explain(self, str query, sqlalchemy.engine.Connection conn)
List[Tuple[int, int]] _htm_indices(self, Region region)
None makeSchema(self, bool drop=False)
pandas.DataFrame _getDiaSourcesInRegion(self, Region region, dafBase.DateTime visit_time)
pandas.DataFrame _getSourcesByIDs(self, sqlalchemy.schema.Table table, List[int] object_ids, float midPointTai_start)
Optional[pandas.DataFrame] getDiaSources(self, Region region, Optional[Iterable[int]] object_ids, dafBase.DateTime visit_time)
pandas.DataFrame getDiaObjectsHistory(self, dafBase.DateTime start_time, dafBase.DateTime end_time, Optional[Region] region=None)
None storeSSObjects(self, pandas.DataFrame objects)
Dict[str, int] tableRowCount(self)
pandas.DataFrame getDiaSourcesHistory(self, dafBase.DateTime start_time, dafBase.DateTime end_time, Optional[Region] region=None)
def __init__(self, ApdbSqlConfig config)
int countUnassociatedObjects(self)
pandas.DataFrame _add_obj_htm_index(self, pandas.DataFrame df)
None _storeDiaObjects(self, pandas.DataFrame objs, dafBase.DateTime visit_time)
sql.ClauseElement _filterRegion(self, sqlalchemy.schema.Table table, Region region)
None reassignDiaSources(self, Mapping[int, int] idMap)
HtmPixelization provides HTM indexing of points and regions.
Region is a minimal interface for 2-dimensional regions on the unit sphere.
UnitVector3d is a unit vector in ℝ³ with components stored in double precision.
daf::base::PropertyList * list
daf::base::PropertySet * set