LSST Applications  21.0.0-142-gef555c1e+42c9bccae2,22.0.0+052faf71bd,22.0.0+1c4650f311,22.0.0+40ce427c77,22.0.0+5b6c068b1a,22.0.0+7589c3a021,22.0.0+81ed51be6d,22.0.1-1-g7d6de66+6cae67f2c6,22.0.1-1-g87000a6+314cd8b7ea,22.0.1-1-g8760c09+052faf71bd,22.0.1-1-g8e32f31+5b6c068b1a,22.0.1-10-g779eefa+a163f08322,22.0.1-12-g3bd7ecb+bbeacc25a9,22.0.1-15-g63cc0c1+2a7037787d,22.0.1-17-ge5a99e88+3d2c1afe2e,22.0.1-19-g88addfe+6cae67f2c6,22.0.1-2-g1cb3e5b+84de06d286,22.0.1-2-g8ef0a89+6cae67f2c6,22.0.1-2-g92698f7+1c4650f311,22.0.1-2-ga9b0f51+052faf71bd,22.0.1-2-gb66926d+5b6c068b1a,22.0.1-2-gcb770ba+0723a13595,22.0.1-2-ge470956+ff9f1dc8d5,22.0.1-22-g608e23ac+2ac85e833c,22.0.1-29-g184b6e44e+8b185d4e2d,22.0.1-3-g59f966b+11ba4df19d,22.0.1-3-g8c1d971+f90df4c6d0,22.0.1-3-g997b569+d69a7aa2f8,22.0.1-3-gaaec9c0+4d194bf81c,22.0.1-4-g1930a60+283d9d2f1a,22.0.1-4-g5b7b756+c1283a92b8,22.0.1-4-g8623105+6cae67f2c6,22.0.1-7-gba73697+283d9d2f1a,22.0.1-8-g47d23f5+43acea82f3,master-g5f2689bdc5+40ce427c77,w.2021.38
LSST Data Management Base Package
tests.py
Go to the documentation of this file.
1 #
2 # LSST Data Management System
3 #
4 # Copyright 2008-2017 AURA/LSST.
5 #
6 # This product includes software developed by the
7 # LSST Project (http://www.lsst.org/).
8 #
9 # This program is free software: you can redistribute it and/or modify
10 # it under the terms of the GNU General Public License as published by
11 # the Free Software Foundation, either version 3 of the License, or
12 # (at your option) any later version.
13 #
14 # This program is distributed in the hope that it will be useful,
15 # but WITHOUT ANY WARRANTY; without even the implied warranty of
16 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 # GNU General Public License for more details.
18 #
19 # You should have received a copy of the LSST License Statement and
20 # the GNU General Public License along with this program. If not,
21 # see <https://www.lsstcorp.org/LegalNotices/>.
22 #
23 """Support code for running unit tests"""
24 
25 import contextlib
26 import gc
27 import inspect
28 import os
29 import subprocess
30 import sys
31 import unittest
32 import warnings
33 import numpy
34 import psutil
35 import functools
36 import tempfile
37 import shutil
38 import itertools
39 
40 __all__ = ["init", "MemoryTestCase", "ExecutablesTestCase", "getTempFilePath",
41  "TestCase", "assertFloatsAlmostEqual", "assertFloatsNotEqual", "assertFloatsEqual",
42  "debugger", "classParameters", "methodParameters"]
43 
44 # Initialize the list of open files to an empty set
45 open_files = set()
46 
47 
48 def _get_open_files():
49  """Return a set containing the list of files currently open in this
50  process.
51 
52  Returns
53  -------
54  open_files : `set`
55  Set containing the list of open files.
56  """
57  return set(p.path for p in psutil.Process().open_files())
58 
59 
60 def init():
61  """Initialize the memory tester and file descriptor leak tester."""
62  global open_files
63  # Reset the list of open files
64  open_files = _get_open_files()
65 
66 
67 def sort_tests(tests):
68  """Sort supplied test suites such that MemoryTestCases are at the end.
69 
70  `lsst.utils.tests.MemoryTestCase` tests should always run after any other
71  tests in the module.
72 
73  Parameters
74  ----------
75  tests : sequence
76  Sequence of test suites.
77 
78  Returns
79  -------
80  suite : `unittest.TestSuite`
81  A combined `~unittest.TestSuite` with
82  `~lsst.utils.tests.MemoryTestCase` at the end.
83  """
84 
85  suite = unittest.TestSuite()
86  memtests = []
87  for test_suite in tests:
88  try:
89  # Just test the first test method in the suite for MemoryTestCase
90  # Use loop rather than next as it is possible for a test class
91  # to not have any test methods and the Python community prefers
92  # for loops over catching a StopIteration exception.
93  bases = None
94  for method in test_suite:
95  bases = inspect.getmro(method.__class__)
96  break
97  if bases is not None and MemoryTestCase in bases:
98  memtests.append(test_suite)
99  else:
100  suite.addTests(test_suite)
101  except TypeError:
102  if isinstance(test_suite, MemoryTestCase):
103  memtests.append(test_suite)
104  else:
105  suite.addTest(test_suite)
106  suite.addTests(memtests)
107  return suite
108 
109 
110 def suiteClassWrapper(tests):
111  return unittest.TestSuite(sort_tests(tests))
112 
113 
114 # Replace the suiteClass callable in the defaultTestLoader
115 # so that we can reorder the test ordering. This will have
116 # no effect if no memory test cases are found.
117 unittest.defaultTestLoader.suiteClass = suiteClassWrapper
118 
119 
120 class MemoryTestCase(unittest.TestCase):
121  """Check for resource leaks."""
122 
123  @classmethod
124  def tearDownClass(cls):
125  """Reset the leak counter when the tests have been completed"""
126  init()
127 
129  """Check if any file descriptors are open since init() called."""
130  gc.collect()
131  global open_files
132  now_open = _get_open_files()
133 
134  # Some files are opened out of the control of the stack.
135  now_open = set(f for f in now_open if not f.endswith(".car")
136  and not f.startswith("/proc/")
137  and not f.endswith(".ttf")
138  and not (f.startswith("/var/lib/") and f.endswith("/passwd"))
139  and not f.endswith("astropy.log"))
140 
141  diff = now_open.difference(open_files)
142  if diff:
143  for f in diff:
144  print("File open: %s" % f)
145  self.fail("Failed to close %d file%s" % (len(diff), "s" if len(diff) != 1 else ""))
146 
147 
148 class ExecutablesTestCase(unittest.TestCase):
149  """Test that executables can be run and return good status.
150 
151  The test methods are dynamically created. Callers
152  must subclass this class in their own test file and invoke
153  the create_executable_tests() class method to register the tests.
154  """
155  TESTS_DISCOVERED = -1
156 
157  @classmethod
158  def setUpClass(cls):
159  """Abort testing if automated test creation was enabled and
160  no tests were found."""
161 
162  if cls.TESTS_DISCOVEREDTESTS_DISCOVEREDTESTS_DISCOVERED == 0:
163  raise RuntimeError("No executables discovered.")
164 
165  def testSanity(self):
166  """This test exists to ensure that there is at least one test to be
167  executed. This allows the test runner to trigger the class set up
168  machinery to test whether there are some executables to test."""
169  pass
170 
171  def assertExecutable(self, executable, root_dir=None, args=None, msg=None):
172  """Check an executable runs and returns good status.
173 
174  Prints output to standard out. On bad exit status the test
175  fails. If the executable can not be located the test is skipped.
176 
177  Parameters
178  ----------
179  executable : `str`
180  Path to an executable. ``root_dir`` is not used if this is an
181  absolute path.
182  root_dir : `str`, optional
183  Directory containing executable. Ignored if `None`.
184  args : `list` or `tuple`, optional
185  Arguments to be provided to the executable.
186  msg : `str`, optional
187  Message to use when the test fails. Can be `None` for default
188  message.
189 
190  Raises
191  ------
192  AssertionError
193  The executable did not return 0 exit status.
194  """
195 
196  if root_dir is not None and not os.path.isabs(executable):
197  executable = os.path.join(root_dir, executable)
198 
199  # Form the argument list for subprocess
200  sp_args = [executable]
201  argstr = "no arguments"
202  if args is not None:
203  sp_args.extend(args)
204  argstr = 'arguments "' + " ".join(args) + '"'
205 
206  print("Running executable '{}' with {}...".format(executable, argstr))
207  if not os.path.exists(executable):
208  self.skipTest("Executable {} is unexpectedly missing".format(executable))
209  failmsg = None
210  try:
211  output = subprocess.check_output(sp_args)
212  except subprocess.CalledProcessError as e:
213  output = e.output
214  failmsg = "Bad exit status from '{}': {}".format(executable, e.returncode)
215  print(output.decode('utf-8'))
216  if failmsg:
217  if msg is None:
218  msg = failmsg
219  self.fail(msg)
220 
221  @classmethod
222  def _build_test_method(cls, executable, root_dir):
223  """Build a test method and attach to class.
224 
225  A test method is created for the supplied excutable located
226  in the supplied root directory. This method is attached to the class
227  so that the test runner will discover the test and run it.
228 
229  Parameters
230  ----------
231  cls : `object`
232  The class in which to create the tests.
233  executable : `str`
234  Name of executable. Can be absolute path.
235  root_dir : `str`
236  Path to executable. Not used if executable path is absolute.
237  """
238  if not os.path.isabs(executable):
239  executable = os.path.abspath(os.path.join(root_dir, executable))
240 
241  # Create the test name from the executable path.
242  test_name = "test_exe_" + executable.replace("/", "_")
243 
244  # This is the function that will become the test method
245  def test_executable_runs(*args):
246  self = args[0]
247  self.assertExecutable(executable)
248 
249  # Give it a name and attach it to the class
250  test_executable_runs.__name__ = test_name
251  setattr(cls, test_name, test_executable_runs)
252 
253  @classmethod
254  def create_executable_tests(cls, ref_file, executables=None):
255  """Discover executables to test and create corresponding test methods.
256 
257  Scans the directory containing the supplied reference file
258  (usually ``__file__`` supplied from the test class) to look for
259  executables. If executables are found a test method is created
260  for each one. That test method will run the executable and
261  check the returned value.
262 
263  Executable scripts with a ``.py`` extension and shared libraries
264  are ignored by the scanner.
265 
266  This class method must be called before test discovery.
267 
268  Parameters
269  ----------
270  ref_file : `str`
271  Path to a file within the directory to be searched.
272  If the files are in the same location as the test file, then
273  ``__file__`` can be used.
274  executables : `list` or `tuple`, optional
275  Sequence of executables that can override the automated
276  detection. If an executable mentioned here is not found, a
277  skipped test will be created for it, rather than a failed
278  test.
279 
280  Examples
281  --------
282  >>> cls.create_executable_tests(__file__)
283  """
284 
285  # Get the search directory from the reference file
286  ref_dir = os.path.abspath(os.path.dirname(ref_file))
287 
288  if executables is None:
289  # Look for executables to test by walking the tree
290  executables = []
291  for root, dirs, files in os.walk(ref_dir):
292  for f in files:
293  # Skip Python files. Shared libraries are executable.
294  if not f.endswith(".py") and not f.endswith(".so"):
295  full_path = os.path.join(root, f)
296  if os.access(full_path, os.X_OK):
297  executables.append(full_path)
298 
299  # Store the number of tests found for later assessment.
300  # Do not raise an exception if we have no executables as this would
301  # cause the testing to abort before the test runner could properly
302  # integrate it into the failure report.
303  cls.TESTS_DISCOVEREDTESTS_DISCOVEREDTESTS_DISCOVERED = len(executables)
304 
305  # Create the test functions and attach them to the class
306  for e in executables:
307  cls._build_test_method_build_test_method(e, ref_dir)
308 
309 
310 @contextlib.contextmanager
311 def getTempFilePath(ext, expectOutput=True):
312  """Return a path suitable for a temporary file and try to delete the
313  file on success
314 
315  If the with block completes successfully then the file is deleted,
316  if possible; failure results in a printed warning.
317  If a file is remains when it should not, a RuntimeError exception is
318  raised. This exception is also raised if a file is not present on context
319  manager exit when one is expected to exist.
320  If the block exits with an exception the file if left on disk so it can be
321  examined. The file name has a random component such that nested context
322  managers can be used with the same file suffix.
323 
324  Parameters
325  ----------
326 
327  ext : `str`
328  File name extension, e.g. ``.fits``.
329  expectOutput : `bool`, optional
330  If `True`, a file should be created within the context manager.
331  If `False`, a file should not be present when the context manager
332  exits.
333 
334  Returns
335  -------
336  `str`
337  Path for a temporary file. The path is a combination of the caller's
338  file path and the name of the top-level function
339 
340  Notes
341  -----
342  ::
343 
344  # file tests/testFoo.py
345  import unittest
346  import lsst.utils.tests
347  class FooTestCase(unittest.TestCase):
348  def testBasics(self):
349  self.runTest()
350 
351  def runTest(self):
352  with lsst.utils.tests.getTempFilePath(".fits") as tmpFile:
353  # if tests/.tests exists then
354  # tmpFile = "tests/.tests/testFoo_testBasics.fits"
355  # otherwise tmpFile = "testFoo_testBasics.fits"
356  ...
357  # at the end of this "with" block the path tmpFile will be
358  # deleted, but only if the file exists and the "with"
359  # block terminated normally (rather than with an exception)
360  ...
361  """
362  stack = inspect.stack()
363  # get name of first function in the file
364  for i in range(2, len(stack)):
365  frameInfo = inspect.getframeinfo(stack[i][0])
366  if i == 2:
367  callerFilePath = frameInfo.filename
368  callerFuncName = frameInfo.function
369  elif callerFilePath == frameInfo.filename:
370  # this function called the previous function
371  callerFuncName = frameInfo.function
372  else:
373  break
374 
375  callerDir, callerFileNameWithExt = os.path.split(callerFilePath)
376  callerFileName = os.path.splitext(callerFileNameWithExt)[0]
377  outDir = os.path.join(callerDir, ".tests")
378  if not os.path.isdir(outDir):
379  outDir = ""
380  prefix = "%s_%s-" % (callerFileName, callerFuncName)
381  outPath = tempfile.mktemp(dir=outDir, suffix=ext, prefix=prefix)
382  if os.path.exists(outPath):
383  # There should not be a file there given the randomizer. Warn and remove.
384  # Use stacklevel 3 so that the warning is reported from the end of the with block
385  warnings.warn("Unexpectedly found pre-existing tempfile named %r" % (outPath,),
386  stacklevel=3)
387  try:
388  os.remove(outPath)
389  except OSError:
390  pass
391 
392  yield outPath
393 
394  fileExists = os.path.exists(outPath)
395  if expectOutput:
396  if not fileExists:
397  raise RuntimeError("Temp file expected named {} but none found".format(outPath))
398  else:
399  if fileExists:
400  raise RuntimeError("Unexpectedly discovered temp file named {}".format(outPath))
401  # Try to clean up the file regardless
402  if fileExists:
403  try:
404  os.remove(outPath)
405  except OSError as e:
406  # Use stacklevel 3 so that the warning is reported from the end of the with block
407  warnings.warn("Warning: could not remove file %r: %s" % (outPath, e), stacklevel=3)
408 
409 
410 class TestCase(unittest.TestCase):
411  """Subclass of unittest.TestCase that adds some custom assertions for
412  convenience.
413  """
414 
415 
416 def inTestCase(func):
417  """A decorator to add a free function to our custom TestCase class, while also
418  making it available as a free function.
419  """
420  setattr(TestCase, func.__name__, func)
421  return func
422 
423 
424 def debugger(*exceptions):
425  """Decorator to enter the debugger when there's an uncaught exception
426 
427  To use, just slap a ``@debugger()`` on your function.
428 
429  You may provide specific exception classes to catch as arguments to
430  the decorator function, e.g.,
431  ``@debugger(RuntimeError, NotImplementedError)``.
432  This defaults to just `AssertionError`, for use on `unittest.TestCase`
433  methods.
434 
435  Code provided by "Rosh Oxymoron" on StackOverflow:
436  http://stackoverflow.com/questions/4398967/python-unit-testing-automatically-running-the-debugger-when-a-test-fails
437 
438  Notes
439  -----
440  Consider using ``pytest --pdb`` instead of this decorator.
441  """
442  if not exceptions:
443  exceptions = (Exception, )
444 
445  def decorator(f):
446  @functools.wraps(f)
447  def wrapper(*args, **kwargs):
448  try:
449  return f(*args, **kwargs)
450  except exceptions:
451  import sys
452  import pdb
453  pdb.post_mortem(sys.exc_info()[2])
454  return wrapper
455  return decorator
456 
457 
458 def plotImageDiff(lhs, rhs, bad=None, diff=None, plotFileName=None):
459  """Plot the comparison of two 2-d NumPy arrays.
460 
461  Parameters
462  ----------
463  lhs : `numpy.ndarray`
464  LHS values to compare; a 2-d NumPy array
465  rhs : `numpy.ndarray`
466  RHS values to compare; a 2-d NumPy array
467  bad : `numpy.ndarray`
468  A 2-d boolean NumPy array of values to emphasize in the plots
469  diff : `numpy.ndarray`
470  difference array; a 2-d NumPy array, or None to show lhs-rhs
471  plotFileName : `str`
472  Filename to save the plot to. If None, the plot will be displayed in
473  a window.
474 
475  Notes
476  -----
477  This method uses `matplotlib` and imports it internally; it should be
478  wrapped in a try/except block within packages that do not depend on
479  `matplotlib` (including `~lsst.utils`).
480  """
481  from matplotlib import pyplot
482  if diff is None:
483  diff = lhs - rhs
484  pyplot.figure()
485  if bad is not None:
486  # make an rgba image that's red and transparent where not bad
487  badImage = numpy.zeros(bad.shape + (4,), dtype=numpy.uint8)
488  badImage[:, :, 0] = 255
489  badImage[:, :, 1] = 0
490  badImage[:, :, 2] = 0
491  badImage[:, :, 3] = 255*bad
492  vmin1 = numpy.minimum(numpy.min(lhs), numpy.min(rhs))
493  vmax1 = numpy.maximum(numpy.max(lhs), numpy.max(rhs))
494  vmin2 = numpy.min(diff)
495  vmax2 = numpy.max(diff)
496  for n, (image, title) in enumerate([(lhs, "lhs"), (rhs, "rhs"), (diff, "diff")]):
497  pyplot.subplot(2, 3, n + 1)
498  im1 = pyplot.imshow(image, cmap=pyplot.cm.gray, interpolation='nearest', origin='lower',
499  vmin=vmin1, vmax=vmax1)
500  if bad is not None:
501  pyplot.imshow(badImage, alpha=0.2, interpolation='nearest', origin='lower')
502  pyplot.axis("off")
503  pyplot.title(title)
504  pyplot.subplot(2, 3, n + 4)
505  im2 = pyplot.imshow(image, cmap=pyplot.cm.gray, interpolation='nearest', origin='lower',
506  vmin=vmin2, vmax=vmax2)
507  if bad is not None:
508  pyplot.imshow(badImage, alpha=0.2, interpolation='nearest', origin='lower')
509  pyplot.axis("off")
510  pyplot.title(title)
511  pyplot.subplots_adjust(left=0.05, bottom=0.05, top=0.92, right=0.75, wspace=0.05, hspace=0.05)
512  cax1 = pyplot.axes([0.8, 0.55, 0.05, 0.4])
513  pyplot.colorbar(im1, cax=cax1)
514  cax2 = pyplot.axes([0.8, 0.05, 0.05, 0.4])
515  pyplot.colorbar(im2, cax=cax2)
516  if plotFileName:
517  pyplot.savefig(plotFileName)
518  else:
519  pyplot.show()
520 
521 
522 @inTestCase
523 def assertFloatsAlmostEqual(testCase, lhs, rhs, rtol=sys.float_info.epsilon,
524  atol=sys.float_info.epsilon, relTo=None,
525  printFailures=True, plotOnFailure=False,
526  plotFileName=None, invert=False, msg=None,
527  ignoreNaNs=False):
528  """Highly-configurable floating point comparisons for scalars and arrays.
529 
530  The test assertion will fail if all elements ``lhs`` and ``rhs`` are not
531  equal to within the tolerances specified by ``rtol`` and ``atol``.
532  More precisely, the comparison is:
533 
534  ``abs(lhs - rhs) <= relTo*rtol OR abs(lhs - rhs) <= atol``
535 
536  If ``rtol`` or ``atol`` is `None`, that term in the comparison is not
537  performed at all.
538 
539  When not specified, ``relTo`` is the elementwise maximum of the absolute
540  values of ``lhs`` and ``rhs``. If set manually, it should usually be set
541  to either ``lhs`` or ``rhs``, or a scalar value typical of what is
542  expected.
543 
544  Parameters
545  ----------
546  testCase : `unittest.TestCase`
547  Instance the test is part of.
548  lhs : scalar or array-like
549  LHS value(s) to compare; may be a scalar or array-like of any
550  dimension.
551  rhs : scalar or array-like
552  RHS value(s) to compare; may be a scalar or array-like of any
553  dimension.
554  rtol : `float`, optional
555  Relative tolerance for comparison; defaults to double-precision
556  epsilon.
557  atol : `float`, optional
558  Absolute tolerance for comparison; defaults to double-precision
559  epsilon.
560  relTo : `float`, optional
561  Value to which comparison with rtol is relative.
562  printFailures : `bool`, optional
563  Upon failure, print all inequal elements as part of the message.
564  plotOnFailure : `bool`, optional
565  Upon failure, plot the originals and their residual with matplotlib.
566  Only 2-d arrays are supported.
567  plotFileName : `str`, optional
568  Filename to save the plot to. If `None`, the plot will be displayed in
569  a window.
570  invert : `bool`, optional
571  If `True`, invert the comparison and fail only if any elements *are*
572  equal. Used to implement `~lsst.utils.tests.assertFloatsNotEqual`,
573  which should generally be used instead for clarity.
574  will return `True`).
575  msg : `str`, optional
576  String to append to the error message when assert fails.
577  ignoreNaNs : `bool`, optional
578  If `True` (`False` is default) mask out any NaNs from operand arrays
579  before performing comparisons if they are in the same locations; NaNs
580  in different locations are trigger test assertion failures, even when
581  ``invert=True``. Scalar NaNs are treated like arrays containing only
582  NaNs of the same shape as the other operand, and no comparisons are
583  performed if both sides are scalar NaNs.
584 
585  Raises
586  ------
587  AssertionError
588  The values are not almost equal.
589  """
590  if ignoreNaNs:
591  lhsMask = numpy.isnan(lhs)
592  rhsMask = numpy.isnan(rhs)
593  if not numpy.all(lhsMask == rhsMask):
594  testCase.fail(f"lhs has {lhsMask.sum()} NaN values and rhs has {rhsMask.sum()} NaN values, "
595  f"in different locations.")
596  if numpy.all(lhsMask):
597  assert numpy.all(rhsMask), "Should be guaranteed by previous if."
598  # All operands are fully NaN (either scalar NaNs or arrays of only
599  # NaNs).
600  return
601  assert not numpy.all(rhsMask), "Should be guaranteed by prevoius two ifs."
602  # If either operand is an array select just its not-NaN values. Note
603  # that these expressions are never True for scalar operands, because if
604  # they are NaN then the numpy.all checks above will catch them.
605  if numpy.any(lhsMask):
606  lhs = lhs[numpy.logical_not(lhsMask)]
607  if numpy.any(rhsMask):
608  rhs = rhs[numpy.logical_not(rhsMask)]
609  if not numpy.isfinite(lhs).all():
610  testCase.fail("Non-finite values in lhs")
611  if not numpy.isfinite(rhs).all():
612  testCase.fail("Non-finite values in rhs")
613  diff = lhs - rhs
614  absDiff = numpy.abs(lhs - rhs)
615  if rtol is not None:
616  if relTo is None:
617  relTo = numpy.maximum(numpy.abs(lhs), numpy.abs(rhs))
618  else:
619  relTo = numpy.abs(relTo)
620  bad = absDiff > rtol*relTo
621  if atol is not None:
622  bad = numpy.logical_and(bad, absDiff > atol)
623  else:
624  if atol is None:
625  raise ValueError("rtol and atol cannot both be None")
626  bad = absDiff > atol
627  failed = numpy.any(bad)
628  if invert:
629  failed = not failed
630  bad = numpy.logical_not(bad)
631  cmpStr = "=="
632  failStr = "are the same"
633  else:
634  cmpStr = "!="
635  failStr = "differ"
636  errMsg = []
637  if failed:
638  if numpy.isscalar(bad):
639  if rtol is None:
640  errMsg = ["%s %s %s; diff=%s with atol=%s"
641  % (lhs, cmpStr, rhs, absDiff, atol)]
642  elif atol is None:
643  errMsg = ["%s %s %s; diff=%s/%s=%s with rtol=%s"
644  % (lhs, cmpStr, rhs, absDiff, relTo, absDiff/relTo, rtol)]
645  else:
646  errMsg = ["%s %s %s; diff=%s/%s=%s with rtol=%s, atol=%s"
647  % (lhs, cmpStr, rhs, absDiff, relTo, absDiff/relTo, rtol, atol)]
648  else:
649  errMsg = ["%d/%d elements %s with rtol=%s, atol=%s"
650  % (bad.sum(), bad.size, failStr, rtol, atol)]
651  if plotOnFailure:
652  if len(lhs.shape) != 2 or len(rhs.shape) != 2:
653  raise ValueError("plotOnFailure is only valid for 2-d arrays")
654  try:
655  plotImageDiff(lhs, rhs, bad, diff=diff, plotFileName=plotFileName)
656  except ImportError:
657  errMsg.append("Failure plot requested but matplotlib could not be imported.")
658  if printFailures:
659  # Make sure everything is an array if any of them are, so we can treat
660  # them the same (diff and absDiff are arrays if either rhs or lhs is),
661  # and we don't get here if neither is.
662  if numpy.isscalar(relTo):
663  relTo = numpy.ones(bad.shape, dtype=float) * relTo
664  if numpy.isscalar(lhs):
665  lhs = numpy.ones(bad.shape, dtype=float) * lhs
666  if numpy.isscalar(rhs):
667  rhs = numpy.ones(bad.shape, dtype=float) * rhs
668  if rtol is None:
669  for a, b, diff in zip(lhs[bad], rhs[bad], absDiff[bad]):
670  errMsg.append("%s %s %s (diff=%s)" % (a, cmpStr, b, diff))
671  else:
672  for a, b, diff, rel in zip(lhs[bad], rhs[bad], absDiff[bad], relTo[bad]):
673  errMsg.append("%s %s %s (diff=%s/%s=%s)" % (a, cmpStr, b, diff, rel, diff/rel))
674 
675  if msg is not None:
676  errMsg.append(msg)
677  testCase.assertFalse(failed, msg="\n".join(errMsg))
678 
679 
680 @inTestCase
681 def assertFloatsNotEqual(testCase, lhs, rhs, **kwds):
682  """Fail a test if the given floating point values are equal to within the
683  given tolerances.
684 
685  See `~lsst.utils.tests.assertFloatsAlmostEqual` (called with
686  ``rtol=atol=0``) for more information.
687 
688  Parameters
689  ----------
690  testCase : `unittest.TestCase`
691  Instance the test is part of.
692  lhs : scalar or array-like
693  LHS value(s) to compare; may be a scalar or array-like of any
694  dimension.
695  rhs : scalar or array-like
696  RHS value(s) to compare; may be a scalar or array-like of any
697  dimension.
698 
699  Raises
700  ------
701  AssertionError
702  The values are almost equal.
703  """
704  return assertFloatsAlmostEqual(testCase, lhs, rhs, invert=True, **kwds)
705 
706 
707 @inTestCase
708 def assertFloatsEqual(testCase, lhs, rhs, **kwargs):
709  """
710  Assert that lhs == rhs (both numeric types, whether scalar or array).
711 
712  See `~lsst.utils.tests.assertFloatsAlmostEqual` (called with
713  ``rtol=atol=0``) for more information.
714 
715  Parameters
716  ----------
717  testCase : `unittest.TestCase`
718  Instance the test is part of.
719  lhs : scalar or array-like
720  LHS value(s) to compare; may be a scalar or array-like of any
721  dimension.
722  rhs : scalar or array-like
723  RHS value(s) to compare; may be a scalar or array-like of any
724  dimension.
725 
726  Raises
727  ------
728  AssertionError
729  The values are not equal.
730  """
731  return assertFloatsAlmostEqual(testCase, lhs, rhs, rtol=0, atol=0, **kwargs)
732 
733 
734 def _settingsIterator(settings):
735  """Return an iterator for the provided test settings
736 
737  Parameters
738  ----------
739  settings : `dict` (`str`: iterable)
740  Lists of test parameters. Each should be an iterable of the same length.
741  If a string is provided as an iterable, it will be converted to a list
742  of a single string.
743 
744  Raises
745  ------
746  AssertionError
747  If the ``settings`` are not of the same length.
748 
749  Yields
750  ------
751  parameters : `dict` (`str`: anything)
752  Set of parameters.
753  """
754  for name, values in settings.items():
755  if isinstance(values, str):
756  # Probably meant as a single-element string, rather than an iterable of chars
757  settings[name] = [values]
758  num = len(next(iter(settings.values()))) # Number of settings
759  for name, values in settings.items():
760  assert len(values) == num, f"Length mismatch for setting {name}: {len(values)} vs {num}"
761  for ii in range(num):
762  values = [settings[kk][ii] for kk in settings]
763  yield dict(zip(settings, values))
764 
765 
766 def classParameters(**settings):
767  """Class decorator for generating unit tests
768 
769  This decorator generates classes with class variables according to the
770  supplied ``settings``.
771 
772  Parameters
773  ----------
774  **settings : `dict` (`str`: iterable)
775  The lists of test parameters to set as class variables in turn. Each
776  should be an iterable of the same length.
777 
778  Examples
779  --------
780  ::
781 
782  @classParameters(foo=[1, 2], bar=[3, 4])
783  class MyTestCase(unittest.TestCase):
784  ...
785 
786  will generate two classes, as if you wrote::
787 
788  class MyTestCase_1_3(unittest.TestCase):
789  foo = 1
790  bar = 3
791  ...
792 
793  class MyTestCase_2_4(unittest.TestCase):
794  foo = 2
795  bar = 4
796  ...
797 
798  Note that the values are embedded in the class name.
799  """
800  def decorator(cls):
801  module = sys.modules[cls.__module__].__dict__
802  for params in _settingsIterator(settings):
803  name = f"{cls.__name__}_{'_'.join(str(vv) for vv in params.values())}"
804  bindings = dict(cls.__dict__)
805  bindings.update(params)
806  module[name] = type(name, (cls,), bindings)
807  return decorator
808 
809 
810 def methodParameters(**settings):
811  """Method decorator for unit tests
812 
813  This decorator iterates over the supplied settings, using
814  ``TestCase.subTest`` to communicate the values in the event of a failure.
815 
816  Parameters
817  ----------
818  **settings : `dict` (`str`: iterable)
819  The lists of test parameters. Each should be an iterable of the same
820  length.
821 
822  Examples
823  --------
824  ::
825 
826  @methodParameters(foo=[1, 2], bar=[3, 4])
827  def testSomething(self, foo, bar):
828  ...
829 
830  will run::
831 
832  testSomething(foo=1, bar=3)
833  testSomething(foo=2, bar=4)
834  """
835  def decorator(func):
836  @functools.wraps(func)
837  def wrapper(self, *args, **kwargs):
838  for params in _settingsIterator(settings):
839  kwargs.update(params)
840  with self.subTest(**params):
841  func(self, *args, **kwargs)
842  return wrapper
843  return decorator
844 
845 
846 def _cartesianProduct(settings):
847  """Return the cartesian product of the settings
848 
849  Parameters
850  ----------
851  settings : `dict` mapping `str` to `iterable`
852  Parameter combinations.
853 
854  Returns
855  -------
856  product : `dict` mapping `str` to `iterable`
857  Parameter combinations covering the cartesian product (all possible
858  combinations) of the input parameters.
859 
860  Example
861  -------
862 
863  cartesianProduct({"foo": [1, 2], "bar": ["black", "white"]})
864 
865  returns:
866 
867  {"foo": [1, 1, 2, 2], "bar": ["black", "white", "black", "white"]}
868  """
869  product = {kk: [] for kk in settings}
870  for values in itertools.product(*settings.values()):
871  for kk, vv in zip(settings.keys(), values):
872  product[kk].append(vv)
873  return product
874 
875 
876 def classParametersProduct(**settings):
877  """Class decorator for generating unit tests
878 
879  This decorator generates classes with class variables according to the
880  cartesian product of the supplied ``settings``.
881 
882  Parameters
883  ----------
884  **settings : `dict` (`str`: iterable)
885  The lists of test parameters to set as class variables in turn. Each
886  should be an iterable.
887 
888  Examples
889  --------
890  ::
891 
892  @classParametersProduct(foo=[1, 2], bar=[3, 4])
893  class MyTestCase(unittest.TestCase):
894  ...
895 
896  will generate four classes, as if you wrote::
897 
898  class MyTestCase_1_3(unittest.TestCase):
899  foo = 1
900  bar = 3
901  ...
902 
903  class MyTestCase_1_4(unittest.TestCase):
904  foo = 1
905  bar = 4
906  ...
907 
908  class MyTestCase_2_3(unittest.TestCase):
909  foo = 2
910  bar = 3
911  ...
912 
913  class MyTestCase_2_4(unittest.TestCase):
914  foo = 2
915  bar = 4
916  ...
917 
918  Note that the values are embedded in the class name.
919  """
920  return classParameters(**_cartesianProduct(settings))
921 
922 
923 def methodParametersProduct(**settings):
924  """Method decorator for unit tests
925 
926  This decorator iterates over the cartesian product of the supplied settings,
927  using ``TestCase.subTest`` to communicate the values in the event of a
928  failure.
929 
930  Parameters
931  ----------
932  **settings : `dict` (`str`: iterable)
933  The parameter combinations to test. Each should be an iterable.
934 
935  Example
936  -------
937 
938  @methodParametersProduct(foo=[1, 2], bar=["black", "white"])
939  def testSomething(self, foo, bar):
940  ...
941 
942  will run:
943 
944  testSomething(foo=1, bar="black")
945  testSomething(foo=1, bar="white")
946  testSomething(foo=2, bar="black")
947  testSomething(foo=2, bar="white")
948  """
949  return methodParameters(**_cartesianProduct(settings))
950 
951 
952 @contextlib.contextmanager
954  """Context manager that creates and destroys a temporary directory.
955 
956  The difference from `tempfile.TemporaryDirectory` is that this ignores
957  errors when deleting a directory, which may happen with some filesystems.
958  """
959  tmpdir = tempfile.mkdtemp()
960  yield tmpdir
961  shutil.rmtree(tmpdir, ignore_errors=True)
table::Key< int > type
Definition: Detector.cc:163
def assertExecutable(self, executable, root_dir=None, args=None, msg=None)
Definition: tests.py:171
def create_executable_tests(cls, ref_file, executables=None)
Definition: tests.py:254
def _build_test_method(cls, executable, root_dir)
Definition: tests.py:222
daf::base::PropertySet * set
Definition: fits.cc:912
std::shared_ptr< FrameSet > append(FrameSet const &first, FrameSet const &second)
Construct a FrameSet that performs two transformations in series.
Definition: functional.cc:33
bool all(CoordinateExpr< N > const &expr) noexcept
Return true if all elements are true.
def format(config, name=None, writeSourceLine=True, prefix="", verbose=False)
Definition: history.py:174
def assertFloatsEqual(testCase, lhs, rhs, **kwargs)
Definition: tests.py:708
def classParameters(**settings)
Definition: tests.py:766
def assertFloatsAlmostEqual(testCase, lhs, rhs, rtol=sys.float_info.epsilon, atol=sys.float_info.epsilon, relTo=None, printFailures=True, plotOnFailure=False, plotFileName=None, invert=False, msg=None, ignoreNaNs=False)
Definition: tests.py:527
def temporaryDirectory()
Definition: tests.py:953
def methodParameters(**settings)
Definition: tests.py:810
def methodParametersProduct(**settings)
Definition: tests.py:923
def debugger(*exceptions)
Definition: tests.py:424
def assertFloatsNotEqual(testCase, lhs, rhs, **kwds)
Definition: tests.py:681
def getTempFilePath(ext, expectOutput=True)
Definition: tests.py:311
def plotImageDiff(lhs, rhs, bad=None, diff=None, plotFileName=None)
Definition: tests.py:458
def sort_tests(tests)
Definition: tests.py:67
def inTestCase(func)
Definition: tests.py:416
def suiteClassWrapper(tests)
Definition: tests.py:110
def classParametersProduct(**settings)
Definition: tests.py:876
def init()
Definition: tests.py:60