| 1 | n/a | import faulthandler |
|---|
| 2 | n/a | import importlib |
|---|
| 3 | n/a | import io |
|---|
| 4 | n/a | import os |
|---|
| 5 | n/a | import sys |
|---|
| 6 | n/a | import time |
|---|
| 7 | n/a | import traceback |
|---|
| 8 | n/a | import unittest |
|---|
| 9 | n/a | from test import support |
|---|
| 10 | n/a | from test.libregrtest.refleak import dash_R, clear_caches |
|---|
| 11 | n/a | from test.libregrtest.save_env import saved_test_environment |
|---|
| 12 | n/a | |
|---|
| 13 | n/a | |
|---|
| 14 | n/a | # Test result constants. |
|---|
| 15 | n/a | PASSED = 1 |
|---|
| 16 | n/a | FAILED = 0 |
|---|
| 17 | n/a | ENV_CHANGED = -1 |
|---|
| 18 | n/a | SKIPPED = -2 |
|---|
| 19 | n/a | RESOURCE_DENIED = -3 |
|---|
| 20 | n/a | INTERRUPTED = -4 |
|---|
| 21 | n/a | CHILD_ERROR = -5 # error in a child process |
|---|
| 22 | n/a | |
|---|
| 23 | n/a | _FORMAT_TEST_RESULT = { |
|---|
| 24 | n/a | PASSED: '%s passed', |
|---|
| 25 | n/a | FAILED: '%s failed', |
|---|
| 26 | n/a | ENV_CHANGED: '%s failed (env changed)', |
|---|
| 27 | n/a | SKIPPED: '%s skipped', |
|---|
| 28 | n/a | RESOURCE_DENIED: '%s skipped (resource denied)', |
|---|
| 29 | n/a | INTERRUPTED: '%s interrupted', |
|---|
| 30 | n/a | CHILD_ERROR: '%s crashed', |
|---|
| 31 | n/a | } |
|---|
| 32 | n/a | |
|---|
| 33 | n/a | # Minimum duration of a test to display its duration or to mention that |
|---|
| 34 | n/a | # the test is running in background |
|---|
| 35 | n/a | PROGRESS_MIN_TIME = 30.0 # seconds |
|---|
| 36 | n/a | |
|---|
| 37 | n/a | # small set of tests to determine if we have a basically functioning interpreter |
|---|
| 38 | n/a | # (i.e. if any of these fail, then anything else is likely to follow) |
|---|
| 39 | n/a | STDTESTS = [ |
|---|
| 40 | n/a | 'test_grammar', |
|---|
| 41 | n/a | 'test_opcodes', |
|---|
| 42 | n/a | 'test_dict', |
|---|
| 43 | n/a | 'test_builtin', |
|---|
| 44 | n/a | 'test_exceptions', |
|---|
| 45 | n/a | 'test_types', |
|---|
| 46 | n/a | 'test_unittest', |
|---|
| 47 | n/a | 'test_doctest', |
|---|
| 48 | n/a | 'test_doctest2', |
|---|
| 49 | n/a | 'test_support' |
|---|
| 50 | n/a | ] |
|---|
| 51 | n/a | |
|---|
| 52 | n/a | # set of tests that we don't want to be executed when using regrtest |
|---|
| 53 | n/a | NOTTESTS = set() |
|---|
| 54 | n/a | |
|---|
| 55 | n/a | |
|---|
| 56 | n/a | def format_test_result(test_name, result): |
|---|
| 57 | n/a | fmt = _FORMAT_TEST_RESULT.get(result, "%s") |
|---|
| 58 | n/a | return fmt % test_name |
|---|
| 59 | n/a | |
|---|
| 60 | n/a | |
|---|
| 61 | n/a | def findtests(testdir=None, stdtests=STDTESTS, nottests=NOTTESTS): |
|---|
| 62 | n/a | """Return a list of all applicable test modules.""" |
|---|
| 63 | n/a | testdir = findtestdir(testdir) |
|---|
| 64 | n/a | names = os.listdir(testdir) |
|---|
| 65 | n/a | tests = [] |
|---|
| 66 | n/a | others = set(stdtests) | nottests |
|---|
| 67 | n/a | for name in names: |
|---|
| 68 | n/a | mod, ext = os.path.splitext(name) |
|---|
| 69 | n/a | if mod[:5] == "test_" and ext in (".py", "") and mod not in others: |
|---|
| 70 | n/a | tests.append(mod) |
|---|
| 71 | n/a | return stdtests + sorted(tests) |
|---|
| 72 | n/a | |
|---|
| 73 | n/a | |
|---|
| 74 | n/a | def runtest(ns, test): |
|---|
| 75 | n/a | """Run a single test. |
|---|
| 76 | n/a | |
|---|
| 77 | n/a | ns -- regrtest namespace of options |
|---|
| 78 | n/a | test -- the name of the test |
|---|
| 79 | n/a | |
|---|
| 80 | n/a | Returns the tuple (result, test_time), where result is one of the |
|---|
| 81 | n/a | constants: |
|---|
| 82 | n/a | |
|---|
| 83 | n/a | INTERRUPTED KeyboardInterrupt when run under -j |
|---|
| 84 | n/a | RESOURCE_DENIED test skipped because resource denied |
|---|
| 85 | n/a | SKIPPED test skipped for some other reason |
|---|
| 86 | n/a | ENV_CHANGED test failed because it changed the execution environment |
|---|
| 87 | n/a | FAILED test failed |
|---|
| 88 | n/a | PASSED test passed |
|---|
| 89 | n/a | """ |
|---|
| 90 | n/a | |
|---|
| 91 | n/a | output_on_failure = ns.verbose3 |
|---|
| 92 | n/a | |
|---|
| 93 | n/a | use_timeout = (ns.timeout is not None) |
|---|
| 94 | n/a | if use_timeout: |
|---|
| 95 | n/a | faulthandler.dump_traceback_later(ns.timeout, exit=True) |
|---|
| 96 | n/a | try: |
|---|
| 97 | n/a | support.match_tests = ns.match_tests |
|---|
| 98 | n/a | if ns.failfast: |
|---|
| 99 | n/a | support.failfast = True |
|---|
| 100 | n/a | if output_on_failure: |
|---|
| 101 | n/a | support.verbose = True |
|---|
| 102 | n/a | |
|---|
| 103 | n/a | # Reuse the same instance to all calls to runtest(). Some |
|---|
| 104 | n/a | # tests keep a reference to sys.stdout or sys.stderr |
|---|
| 105 | n/a | # (eg. test_argparse). |
|---|
| 106 | n/a | if runtest.stringio is None: |
|---|
| 107 | n/a | stream = io.StringIO() |
|---|
| 108 | n/a | runtest.stringio = stream |
|---|
| 109 | n/a | else: |
|---|
| 110 | n/a | stream = runtest.stringio |
|---|
| 111 | n/a | stream.seek(0) |
|---|
| 112 | n/a | stream.truncate() |
|---|
| 113 | n/a | |
|---|
| 114 | n/a | orig_stdout = sys.stdout |
|---|
| 115 | n/a | orig_stderr = sys.stderr |
|---|
| 116 | n/a | try: |
|---|
| 117 | n/a | sys.stdout = stream |
|---|
| 118 | n/a | sys.stderr = stream |
|---|
| 119 | n/a | result = runtest_inner(ns, test, display_failure=False) |
|---|
| 120 | n/a | if result[0] != PASSED: |
|---|
| 121 | n/a | output = stream.getvalue() |
|---|
| 122 | n/a | orig_stderr.write(output) |
|---|
| 123 | n/a | orig_stderr.flush() |
|---|
| 124 | n/a | finally: |
|---|
| 125 | n/a | sys.stdout = orig_stdout |
|---|
| 126 | n/a | sys.stderr = orig_stderr |
|---|
| 127 | n/a | else: |
|---|
| 128 | n/a | support.verbose = ns.verbose # Tell tests to be moderately quiet |
|---|
| 129 | n/a | result = runtest_inner(ns, test, display_failure=not ns.verbose) |
|---|
| 130 | n/a | return result |
|---|
| 131 | n/a | finally: |
|---|
| 132 | n/a | if use_timeout: |
|---|
| 133 | n/a | faulthandler.cancel_dump_traceback_later() |
|---|
| 134 | n/a | cleanup_test_droppings(test, ns.verbose) |
|---|
| 135 | n/a | runtest.stringio = None |
|---|
| 136 | n/a | |
|---|
| 137 | n/a | |
|---|
| 138 | n/a | def runtest_inner(ns, test, display_failure=True): |
|---|
| 139 | n/a | support.unload(test) |
|---|
| 140 | n/a | |
|---|
| 141 | n/a | test_time = 0.0 |
|---|
| 142 | n/a | refleak = False # True if the test leaked references. |
|---|
| 143 | n/a | try: |
|---|
| 144 | n/a | if test.startswith('test.') or ns.testdir: |
|---|
| 145 | n/a | abstest = test |
|---|
| 146 | n/a | else: |
|---|
| 147 | n/a | # Always import it from the test package |
|---|
| 148 | n/a | abstest = 'test.' + test |
|---|
| 149 | n/a | clear_caches() |
|---|
| 150 | n/a | with saved_test_environment(test, ns.verbose, ns.quiet, pgo=ns.pgo) as environment: |
|---|
| 151 | n/a | start_time = time.time() |
|---|
| 152 | n/a | the_module = importlib.import_module(abstest) |
|---|
| 153 | n/a | # If the test has a test_main, that will run the appropriate |
|---|
| 154 | n/a | # tests. If not, use normal unittest test loading. |
|---|
| 155 | n/a | test_runner = getattr(the_module, "test_main", None) |
|---|
| 156 | n/a | if test_runner is None: |
|---|
| 157 | n/a | def test_runner(): |
|---|
| 158 | n/a | loader = unittest.TestLoader() |
|---|
| 159 | n/a | tests = loader.loadTestsFromModule(the_module) |
|---|
| 160 | n/a | for error in loader.errors: |
|---|
| 161 | n/a | print(error, file=sys.stderr) |
|---|
| 162 | n/a | if loader.errors: |
|---|
| 163 | n/a | raise Exception("errors while loading tests") |
|---|
| 164 | n/a | support.run_unittest(tests) |
|---|
| 165 | n/a | test_runner() |
|---|
| 166 | n/a | if ns.huntrleaks: |
|---|
| 167 | n/a | refleak = dash_R(the_module, test, test_runner, ns.huntrleaks) |
|---|
| 168 | n/a | test_time = time.time() - start_time |
|---|
| 169 | n/a | except support.ResourceDenied as msg: |
|---|
| 170 | n/a | if not ns.quiet and not ns.pgo: |
|---|
| 171 | n/a | print(test, "skipped --", msg, flush=True) |
|---|
| 172 | n/a | return RESOURCE_DENIED, test_time |
|---|
| 173 | n/a | except unittest.SkipTest as msg: |
|---|
| 174 | n/a | if not ns.quiet and not ns.pgo: |
|---|
| 175 | n/a | print(test, "skipped --", msg, flush=True) |
|---|
| 176 | n/a | return SKIPPED, test_time |
|---|
| 177 | n/a | except KeyboardInterrupt: |
|---|
| 178 | n/a | raise |
|---|
| 179 | n/a | except support.TestFailed as msg: |
|---|
| 180 | n/a | if not ns.pgo: |
|---|
| 181 | n/a | if display_failure: |
|---|
| 182 | n/a | print("test", test, "failed --", msg, file=sys.stderr, |
|---|
| 183 | n/a | flush=True) |
|---|
| 184 | n/a | else: |
|---|
| 185 | n/a | print("test", test, "failed", file=sys.stderr, flush=True) |
|---|
| 186 | n/a | return FAILED, test_time |
|---|
| 187 | n/a | except: |
|---|
| 188 | n/a | msg = traceback.format_exc() |
|---|
| 189 | n/a | if not ns.pgo: |
|---|
| 190 | n/a | print("test", test, "crashed --", msg, file=sys.stderr, |
|---|
| 191 | n/a | flush=True) |
|---|
| 192 | n/a | return FAILED, test_time |
|---|
| 193 | n/a | else: |
|---|
| 194 | n/a | if refleak: |
|---|
| 195 | n/a | return FAILED, test_time |
|---|
| 196 | n/a | if environment.changed: |
|---|
| 197 | n/a | return ENV_CHANGED, test_time |
|---|
| 198 | n/a | return PASSED, test_time |
|---|
| 199 | n/a | |
|---|
| 200 | n/a | |
|---|
| 201 | n/a | def cleanup_test_droppings(testname, verbose): |
|---|
| 202 | n/a | import shutil |
|---|
| 203 | n/a | import stat |
|---|
| 204 | n/a | import gc |
|---|
| 205 | n/a | |
|---|
| 206 | n/a | # First kill any dangling references to open files etc. |
|---|
| 207 | n/a | # This can also issue some ResourceWarnings which would otherwise get |
|---|
| 208 | n/a | # triggered during the following test run, and possibly produce failures. |
|---|
| 209 | n/a | gc.collect() |
|---|
| 210 | n/a | |
|---|
| 211 | n/a | # Try to clean up junk commonly left behind. While tests shouldn't leave |
|---|
| 212 | n/a | # any files or directories behind, when a test fails that can be tedious |
|---|
| 213 | n/a | # for it to arrange. The consequences can be especially nasty on Windows, |
|---|
| 214 | n/a | # since if a test leaves a file open, it cannot be deleted by name (while |
|---|
| 215 | n/a | # there's nothing we can do about that here either, we can display the |
|---|
| 216 | n/a | # name of the offending test, which is a real help). |
|---|
| 217 | n/a | for name in (support.TESTFN, |
|---|
| 218 | n/a | "db_home", |
|---|
| 219 | n/a | ): |
|---|
| 220 | n/a | if not os.path.exists(name): |
|---|
| 221 | n/a | continue |
|---|
| 222 | n/a | |
|---|
| 223 | n/a | if os.path.isdir(name): |
|---|
| 224 | n/a | kind, nuker = "directory", shutil.rmtree |
|---|
| 225 | n/a | elif os.path.isfile(name): |
|---|
| 226 | n/a | kind, nuker = "file", os.unlink |
|---|
| 227 | n/a | else: |
|---|
| 228 | n/a | raise SystemError("os.path says %r exists but is neither " |
|---|
| 229 | n/a | "directory nor file" % name) |
|---|
| 230 | n/a | |
|---|
| 231 | n/a | if verbose: |
|---|
| 232 | n/a | print("%r left behind %s %r" % (testname, kind, name)) |
|---|
| 233 | n/a | try: |
|---|
| 234 | n/a | # if we have chmod, fix possible permissions problems |
|---|
| 235 | n/a | # that might prevent cleanup |
|---|
| 236 | n/a | if (hasattr(os, 'chmod')): |
|---|
| 237 | n/a | os.chmod(name, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) |
|---|
| 238 | n/a | nuker(name) |
|---|
| 239 | n/a | except Exception as msg: |
|---|
| 240 | n/a | print(("%r left behind %s %r and it couldn't be " |
|---|
| 241 | n/a | "removed: %s" % (testname, kind, name, msg)), file=sys.stderr) |
|---|
| 242 | n/a | |
|---|
| 243 | n/a | |
|---|
| 244 | n/a | def findtestdir(path=None): |
|---|
| 245 | n/a | return path or os.path.dirname(os.path.dirname(__file__)) or os.curdir |
|---|