23 """Support code for running unit tests"""
39 __all__ = [
"init",
"MemoryTestCase",
"ExecutablesTestCase",
"getTempFilePath",
40 "TestCase",
"assertFloatsAlmostEqual",
"assertFloatsNotEqual",
"assertFloatsEqual",
41 "debugger",
"classParameters",
"methodParameters"]
47 def _get_open_files():
48 """Return a set containing the list of files currently open in this
54 Set containing the list of open files.
60 """Initialize the memory tester and file descriptor leak tester."""
63 open_files = _get_open_files()
67 """Sort supplied test suites such that MemoryTestCases are at the end.
69 `lsst.utils.tests.MemoryTestCase` tests should always run after any other
75 Sequence of test suites.
79 suite : `unittest.TestSuite`
80 A combined `~unittest.TestSuite` with
81 `~lsst.utils.tests.MemoryTestCase` at the end.
84 suite = unittest.TestSuite()
86 for test_suite
in tests:
93 for method
in test_suite:
94 bases = inspect.getmro(method.__class__)
96 if bases
is not None and MemoryTestCase
in bases:
97 memtests.append(test_suite)
99 suite.addTests(test_suite)
101 if isinstance(test_suite, MemoryTestCase):
102 memtests.append(test_suite)
104 suite.addTest(test_suite)
105 suite.addTests(memtests)
116 unittest.defaultTestLoader.suiteClass = suiteClassWrapper
120 """Check for resource leaks."""
124 """Reset the leak counter when the tests have been completed"""
128 """Check if any file descriptors are open since init() called."""
131 now_open = _get_open_files()
134 now_open =
set(f
for f
in now_open
if not f.endswith(
".car")
and
135 not f.startswith(
"/proc/")
and
136 not f.endswith(
".ttf")
and
137 not (f.startswith(
"/var/lib/")
and f.endswith(
"/passwd"))
and
138 not f.endswith(
"astropy.log"))
140 diff = now_open.difference(open_files)
143 print(
"File open: %s" % f)
144 self.fail(
"Failed to close %d file%s" % (len(diff),
"s" if len(diff) != 1
else ""))
148 """Test that executables can be run and return good status.
150 The test methods are dynamically created. Callers
151 must subclass this class in their own test file and invoke
152 the create_executable_tests() class method to register the tests.
154 TESTS_DISCOVERED = -1
158 """Abort testing if automated test creation was enabled and
159 no tests were found."""
162 raise RuntimeError(
"No executables discovered.")
165 """This test exists to ensure that there is at least one test to be
166 executed. This allows the test runner to trigger the class set up
167 machinery to test whether there are some executables to test."""
171 """Check an executable runs and returns good status.
173 Prints output to standard out. On bad exit status the test
174 fails. If the executable can not be located the test is skipped.
179 Path to an executable. ``root_dir`` is not used if this is an
181 root_dir : `str`, optional
182 Directory containing executable. Ignored if `None`.
183 args : `list` or `tuple`, optional
184 Arguments to be provided to the executable.
185 msg : `str`, optional
186 Message to use when the test fails. Can be `None` for default
192 The executable did not return 0 exit status.
195 if root_dir
is not None and not os.path.isabs(executable):
196 executable = os.path.join(root_dir, executable)
199 sp_args = [executable]
200 argstr =
"no arguments"
203 argstr =
'arguments "' +
" ".join(args) +
'"'
205 print(
"Running executable '{}' with {}...".
format(executable, argstr))
206 if not os.path.exists(executable):
207 self.skipTest(
"Executable {} is unexpectedly missing".
format(executable))
210 output = subprocess.check_output(sp_args)
211 except subprocess.CalledProcessError
as e:
213 failmsg =
"Bad exit status from '{}': {}".
format(executable, e.returncode)
214 print(output.decode(
'utf-8'))
221 def _build_test_method(cls, executable, root_dir):
222 """Build a test method and attach to class.
224 A test method is created for the supplied excutable located
225 in the supplied root directory. This method is attached to the class
226 so that the test runner will discover the test and run it.
231 The class in which to create the tests.
233 Name of executable. Can be absolute path.
235 Path to executable. Not used if executable path is absolute.
237 if not os.path.isabs(executable):
238 executable = os.path.abspath(os.path.join(root_dir, executable))
241 test_name =
"test_exe_" + executable.replace(
"/",
"_")
244 def test_executable_runs(*args):
246 self.assertExecutable(executable)
249 test_executable_runs.__name__ = test_name
250 setattr(cls, test_name, test_executable_runs)
254 """Discover executables to test and create corresponding test methods.
256 Scans the directory containing the supplied reference file
257 (usually ``__file__`` supplied from the test class) to look for
258 executables. If executables are found a test method is created
259 for each one. That test method will run the executable and
260 check the returned value.
262 Executable scripts with a ``.py`` extension and shared libraries
263 are ignored by the scanner.
265 This class method must be called before test discovery.
270 Path to a file within the directory to be searched.
271 If the files are in the same location as the test file, then
272 ``__file__`` can be used.
273 executables : `list` or `tuple`, optional
274 Sequence of executables that can override the automated
275 detection. If an executable mentioned here is not found, a
276 skipped test will be created for it, rather than a failed
281 >>> cls.create_executable_tests(__file__)
285 ref_dir = os.path.abspath(os.path.dirname(ref_file))
287 if executables
is None:
290 for root, dirs, files
in os.walk(ref_dir):
293 if not f.endswith(
".py")
and not f.endswith(
".so"):
294 full_path = os.path.join(root, f)
295 if os.access(full_path, os.X_OK):
296 executables.append(full_path)
305 for e
in executables:
309 @contextlib.contextmanager
311 """Return a path suitable for a temporary file and try to delete the
314 If the with block completes successfully then the file is deleted,
315 if possible; failure results in a printed warning.
316 If a file is remains when it should not, a RuntimeError exception is
317 raised. This exception is also raised if a file is not present on context
318 manager exit when one is expected to exist.
319 If the block exits with an exception the file if left on disk so it can be
320 examined. The file name has a random component such that nested context
321 managers can be used with the same file suffix.
327 File name extension, e.g. ``.fits``.
328 expectOutput : `bool`, optional
329 If `True`, a file should be created within the context manager.
330 If `False`, a file should not be present when the context manager
336 Path for a temporary file. The path is a combination of the caller's
337 file path and the name of the top-level function
343 # file tests/testFoo.py
345 import lsst.utils.tests
346 class FooTestCase(unittest.TestCase):
347 def testBasics(self):
351 with lsst.utils.tests.getTempFilePath(".fits") as tmpFile:
352 # if tests/.tests exists then
353 # tmpFile = "tests/.tests/testFoo_testBasics.fits"
354 # otherwise tmpFile = "testFoo_testBasics.fits"
356 # at the end of this "with" block the path tmpFile will be
357 # deleted, but only if the file exists and the "with"
358 # block terminated normally (rather than with an exception)
361 stack = inspect.stack()
363 for i
in range(2, len(stack)):
364 frameInfo = inspect.getframeinfo(stack[i][0])
366 callerFilePath = frameInfo.filename
367 callerFuncName = frameInfo.function
368 elif callerFilePath == frameInfo.filename:
370 callerFuncName = frameInfo.function
374 callerDir, callerFileNameWithExt = os.path.split(callerFilePath)
375 callerFileName = os.path.splitext(callerFileNameWithExt)[0]
376 outDir = os.path.join(callerDir,
".tests")
377 if not os.path.isdir(outDir):
379 prefix =
"%s_%s-" % (callerFileName, callerFuncName)
380 outPath = tempfile.mktemp(dir=outDir, suffix=ext, prefix=prefix)
381 if os.path.exists(outPath):
384 warnings.warn(
"Unexpectedly found pre-existing tempfile named %r" % (outPath,),
393 fileExists = os.path.exists(outPath)
396 raise RuntimeError(
"Temp file expected named {} but none found".
format(outPath))
399 raise RuntimeError(
"Unexpectedly discovered temp file named {}".
format(outPath))
406 warnings.warn(
"Warning: could not remove file %r: %s" % (outPath, e), stacklevel=3)
410 """Subclass of unittest.TestCase that adds some custom assertions for
416 """A decorator to add a free function to our custom TestCase class, while also
417 making it available as a free function.
419 setattr(TestCase, func.__name__, func)
424 """Decorator to enter the debugger when there's an uncaught exception
426 To use, just slap a ``@debugger()`` on your function.
428 You may provide specific exception classes to catch as arguments to
429 the decorator function, e.g.,
430 ``@debugger(RuntimeError, NotImplementedError)``.
431 This defaults to just `AssertionError`, for use on `unittest.TestCase`
434 Code provided by "Rosh Oxymoron" on StackOverflow:
435 http://stackoverflow.com/questions/4398967/python-unit-testing-automatically-running-the-debugger-when-a-test-fails
439 Consider using ``pytest --pdb`` instead of this decorator.
442 exceptions = (Exception, )
446 def wrapper(*args, **kwargs):
448 return f(*args, **kwargs)
452 pdb.post_mortem(sys.exc_info()[2])
458 """Plot the comparison of two 2-d NumPy arrays.
462 lhs : `numpy.ndarray`
463 LHS values to compare; a 2-d NumPy array
464 rhs : `numpy.ndarray`
465 RHS values to compare; a 2-d NumPy array
466 bad : `numpy.ndarray`
467 A 2-d boolean NumPy array of values to emphasize in the plots
468 diff : `numpy.ndarray`
469 difference array; a 2-d NumPy array, or None to show lhs-rhs
471 Filename to save the plot to. If None, the plot will be displayed in
476 This method uses `matplotlib` and imports it internally; it should be
477 wrapped in a try/except block within packages that do not depend on
478 `matplotlib` (including `~lsst.utils`).
480 from matplotlib
import pyplot
486 badImage = numpy.zeros(bad.shape + (4,), dtype=numpy.uint8)
487 badImage[:, :, 0] = 255
488 badImage[:, :, 1] = 0
489 badImage[:, :, 2] = 0
490 badImage[:, :, 3] = 255*bad
491 vmin1 = numpy.minimum(numpy.min(lhs), numpy.min(rhs))
492 vmax1 = numpy.maximum(numpy.max(lhs), numpy.max(rhs))
493 vmin2 = numpy.min(diff)
494 vmax2 = numpy.max(diff)
495 for n, (image, title)
in enumerate([(lhs,
"lhs"), (rhs,
"rhs"), (diff,
"diff")]):
496 pyplot.subplot(2, 3, n + 1)
497 im1 = pyplot.imshow(image, cmap=pyplot.cm.gray, interpolation=
'nearest', origin=
'lower',
498 vmin=vmin1, vmax=vmax1)
500 pyplot.imshow(badImage, alpha=0.2, interpolation=
'nearest', origin=
'lower')
503 pyplot.subplot(2, 3, n + 4)
504 im2 = pyplot.imshow(image, cmap=pyplot.cm.gray, interpolation=
'nearest', origin=
'lower',
505 vmin=vmin2, vmax=vmax2)
507 pyplot.imshow(badImage, alpha=0.2, interpolation=
'nearest', origin=
'lower')
510 pyplot.subplots_adjust(left=0.05, bottom=0.05, top=0.92, right=0.75, wspace=0.05, hspace=0.05)
511 cax1 = pyplot.axes([0.8, 0.55, 0.05, 0.4])
512 pyplot.colorbar(im1, cax=cax1)
513 cax2 = pyplot.axes([0.8, 0.05, 0.05, 0.4])
514 pyplot.colorbar(im2, cax=cax2)
516 pyplot.savefig(plotFileName)
523 atol=sys.float_info.epsilon, relTo=None,
524 printFailures=True, plotOnFailure=False,
525 plotFileName=None, invert=False, msg=None):
526 """Highly-configurable floating point comparisons for scalars and arrays.
528 The test assertion will fail if all elements ``lhs`` and ``rhs`` are not
529 equal to within the tolerances specified by ``rtol`` and ``atol``.
530 More precisely, the comparison is:
532 ``abs(lhs - rhs) <= relTo*rtol OR abs(lhs - rhs) <= atol``
534 If ``rtol`` or ``atol`` is `None`, that term in the comparison is not
537 When not specified, ``relTo`` is the elementwise maximum of the absolute
538 values of ``lhs`` and ``rhs``. If set manually, it should usually be set
539 to either ``lhs`` or ``rhs``, or a scalar value typical of what is
544 testCase : `unittest.TestCase`
545 Instance the test is part of.
546 lhs : scalar or array-like
547 LHS value(s) to compare; may be a scalar or array-like of any
549 rhs : scalar or array-like
550 RHS value(s) to compare; may be a scalar or array-like of any
552 rtol : `float`, optional
553 Relative tolerance for comparison; defaults to double-precision
555 atol : `float`, optional
556 Absolute tolerance for comparison; defaults to double-precision
558 relTo : `float`, optional
559 Value to which comparison with rtol is relative.
560 printFailures : `bool`, optional
561 Upon failure, print all inequal elements as part of the message.
562 plotOnFailure : `bool`, optional
563 Upon failure, plot the originals and their residual with matplotlib.
564 Only 2-d arrays are supported.
565 plotFileName : `str`, optional
566 Filename to save the plot to. If `None`, the plot will be displayed in
568 invert : `bool`, optional
569 If `True`, invert the comparison and fail only if any elements *are*
570 equal. Used to implement `~lsst.utils.tests.assertFloatsNotEqual`,
571 which should generally be used instead for clarity.
572 msg : `str`, optional
573 String to append to the error message when assert fails.
578 The values are not almost equal.
580 if not numpy.isfinite(lhs).
all():
581 testCase.fail(
"Non-finite values in lhs")
582 if not numpy.isfinite(rhs).
all():
583 testCase.fail(
"Non-finite values in rhs")
585 absDiff = numpy.abs(lhs - rhs)
588 relTo = numpy.maximum(numpy.abs(lhs), numpy.abs(rhs))
590 relTo = numpy.abs(relTo)
591 bad = absDiff > rtol*relTo
593 bad = numpy.logical_and(bad, absDiff > atol)
596 raise ValueError(
"rtol and atol cannot both be None")
598 failed = numpy.any(bad)
601 bad = numpy.logical_not(bad)
603 failStr =
"are the same"
609 if numpy.isscalar(bad):
611 errMsg = [
"%s %s %s; diff=%s with atol=%s"
612 % (lhs, cmpStr, rhs, absDiff, atol)]
614 errMsg = [
"%s %s %s; diff=%s/%s=%s with rtol=%s"
615 % (lhs, cmpStr, rhs, absDiff, relTo, absDiff/relTo, rtol)]
617 errMsg = [
"%s %s %s; diff=%s/%s=%s with rtol=%s, atol=%s"
618 % (lhs, cmpStr, rhs, absDiff, relTo, absDiff/relTo, rtol, atol)]
620 errMsg = [
"%d/%d elements %s with rtol=%s, atol=%s"
621 % (bad.sum(), bad.size, failStr, rtol, atol)]
623 if len(lhs.shape) != 2
or len(rhs.shape) != 2:
624 raise ValueError(
"plotOnFailure is only valid for 2-d arrays")
626 plotImageDiff(lhs, rhs, bad, diff=diff, plotFileName=plotFileName)
628 errMsg.append(
"Failure plot requested but matplotlib could not be imported.")
633 if numpy.isscalar(relTo):
634 relTo = numpy.ones(bad.shape, dtype=float) * relTo
635 if numpy.isscalar(lhs):
636 lhs = numpy.ones(bad.shape, dtype=float) * lhs
637 if numpy.isscalar(rhs):
638 rhs = numpy.ones(bad.shape, dtype=float) * rhs
640 for a, b, diff
in zip(lhs[bad], rhs[bad], absDiff[bad]):
641 errMsg.append(
"%s %s %s (diff=%s)" % (a, cmpStr, b, diff))
643 for a, b, diff, rel
in zip(lhs[bad], rhs[bad], absDiff[bad], relTo[bad]):
644 errMsg.append(
"%s %s %s (diff=%s/%s=%s)" % (a, cmpStr, b, diff, rel, diff/rel))
648 testCase.assertFalse(failed, msg=
"\n".join(errMsg))
653 """Fail a test if the given floating point values are equal to within the
656 See `~lsst.utils.tests.assertFloatsAlmostEqual` (called with
657 ``rtol=atol=0``) for more information.
661 testCase : `unittest.TestCase`
662 Instance the test is part of.
663 lhs : scalar or array-like
664 LHS value(s) to compare; may be a scalar or array-like of any
666 rhs : scalar or array-like
667 RHS value(s) to compare; may be a scalar or array-like of any
673 The values are almost equal.
681 Assert that lhs == rhs (both numeric types, whether scalar or array).
683 See `~lsst.utils.tests.assertFloatsAlmostEqual` (called with
684 ``rtol=atol=0``) for more information.
688 testCase : `unittest.TestCase`
689 Instance the test is part of.
690 lhs : scalar or array-like
691 LHS value(s) to compare; may be a scalar or array-like of any
693 rhs : scalar or array-like
694 RHS value(s) to compare; may be a scalar or array-like of any
700 The values are not equal.
705 def _settingsIterator(settings):
706 """Return an iterator for the provided test settings
710 settings : `dict` (`str`: iterable)
711 Lists of test parameters. Each should be an iterable of the same length.
712 If a string is provided as an iterable, it will be converted to a list
718 If the ``settings`` are not of the same length.
722 parameters : `dict` (`str`: anything)
725 for name, values
in settings.items():
726 if isinstance(values, str):
728 settings[name] = [values]
729 num = len(
next(
iter(settings.values())))
730 for name, values
in settings.items():
731 assert len(values) == num, f
"Length mismatch for setting {name}: {len(values)} vs {num}"
732 for ii
in range(num):
733 values = [settings[kk][ii]
for kk
in settings]
734 yield dict(zip(settings.keys(), values))
738 """Class decorator for generating unit tests
740 This decorator generates classes with class variables according to the
741 supplied ``settings``.
745 **settings : `dict` (`str`: iterable)
746 The lists of test parameters to set as class variables in turn. Each
747 should be an iterable of the same length.
753 @classParameters(foo=[1, 2], bar=[3, 4])
754 class MyTestCase(unittest.TestCase):
757 will generate two classes, as if you wrote::
759 class MyTestCase_1_3(unittest.TestCase):
764 class MyTestCase_2_4(unittest.TestCase):
769 Note that the values are embedded in the class name.
772 module = sys.modules[cls.__module__].__dict__
773 for params
in _settingsIterator(settings):
774 name = f
"{cls.__name__}_{'_'.join(str(vv) for vv in params.values())}"
775 bindings = dict(cls.__dict__)
776 bindings.update(params)
777 module[name] =
type(name, (cls,), bindings)
782 """Method decorator for unit tests
784 This decorator iterates over the supplied settings, using
785 ``TestCase.subTest`` to communicate the values in the event of a failure.
789 **settings : `dict` (`str`: iterable)
790 The lists of test parameters. Each should be an iterable of the same
797 @methodParameters(foo=[1, 2], bar=[3, 4])
798 def testSomething(self, foo, bar):
803 testSomething(foo=1, bar=3)
804 testSomething(foo=2, bar=4)
807 @functools.wraps(func)
808 def wrapper(self, *args, **kwargs):
809 for params
in _settingsIterator(settings):
810 kwargs.update(params)
811 with self.subTest(**params):
812 func(self, *args, **kwargs)
817 @contextlib.contextmanager
819 """Context manager that creates and destroys a temporary directory.
821 The difference from `tempfile.TemporaryDirectory` is that this ignores
822 errors when deleting a directory, which may happen with some filesystems.
824 tmpdir = tempfile.mkdtemp()
826 shutil.rmtree(tmpdir, ignore_errors=
True)