From 6bc56686ff3d45849cc73a07d0070a9d936b1093 Mon Sep 17 00:00:00 2001 From: Victor Stinner Date: Mon, 26 Jun 2017 14:18:51 +0200 Subject: [PATCH 1/3] bpo-30523: regrtest --list-cases --match (#2401) * regrtest --list-cases now supports --match and --match-file options. Example: ./python -m test --list-cases -m FileTests test_os * --list-cases now also sets support.verbose to False to prevent messages to stdout when loading test modules. * Add support._match_test() private function. (cherry picked from commit ace56d583664f855d89d1219ece7c21c2fddcf30) --- Lib/test/libregrtest/main.py | 6 +++++- Lib/test/support/__init__.py | 32 ++++++++++++++++++-------------- Lib/test/test_regrtest.py | 9 +++++++++ 3 files changed, 32 insertions(+), 15 deletions(-) diff --git a/Lib/test/libregrtest/main.py b/Lib/test/libregrtest/main.py index 527de177792ce5..1a776556929f28 100644 --- a/Lib/test/libregrtest/main.py +++ b/Lib/test/libregrtest/main.py @@ -256,9 +256,13 @@ def _list_cases(self, suite): if isinstance(test, unittest.TestSuite): self._list_cases(test) elif isinstance(test, unittest.TestCase): - print(test.id()) + if support._match_test(test): + print(test.id()) def list_cases(self): + support.verbose = False + support.match_tests = self.ns.match_tests + for test in self.selected: abstest = get_abs_module(self.ns, test) try: diff --git a/Lib/test/support/__init__.py b/Lib/test/support/__init__.py index be926ea6c71d2a..677743bb389802 100644 --- a/Lib/test/support/__init__.py +++ b/Lib/test/support/__init__.py @@ -1898,6 +1898,23 @@ def _run_suite(suite): raise TestFailed(err) +def _match_test(test): + global match_tests + + if match_tests is None: + return True + test_id = test.id() + + for match_test in match_tests: + if fnmatch.fnmatchcase(test_id, match_test): + return True + + for name in test_id.split("."): + if fnmatch.fnmatchcase(name, match_test): + return True + return False + + def run_unittest(*classes): """Run tests from unittest.TestCase-derived classes.""" valid_types = (unittest.TestSuite, unittest.TestCase) @@ -1912,20 +1929,7 @@ def run_unittest(*classes): suite.addTest(cls) else: suite.addTest(unittest.makeSuite(cls)) - def case_pred(test): - if match_tests is None: - return True - test_id = test.id() - - for match_test in match_tests: - if fnmatch.fnmatchcase(test_id, match_test): - return True - - for name in test_id.split("."): - if fnmatch.fnmatchcase(name, match_test): - return True - return False - _filter_suite(suite, case_pred) + _filter_suite(suite, _match_test) _run_suite(suite) #======================================================================= diff --git a/Lib/test/test_regrtest.py b/Lib/test/test_regrtest.py index 5c6154a347d332..6f4fa79c5e8cd6 100644 --- a/Lib/test/test_regrtest.py +++ b/Lib/test/test_regrtest.py @@ -837,11 +837,20 @@ def test_method2(self): pass """) testname = self.create_test(code=code) + + # Test --list-cases all_methods = ['%s.Tests.test_method1' % testname, '%s.Tests.test_method2' % testname] output = self.run_tests('--list-cases', testname) self.assertEqual(output.splitlines(), all_methods) + # Test --list-cases with --match + all_methods = ['%s.Tests.test_method1' % testname] + output = self.run_tests('--list-cases', + '-m', 'test_method1', + testname) + self.assertEqual(output.splitlines(), all_methods) + def test_crashed(self): # Any code which causes a crash code = 'import faulthandler; faulthandler._sigsegv()' From 2e92628cc84df7bb42274bc126e3dd18bd996948 Mon Sep 17 00:00:00 2001 From: Victor Stinner Date: Mon, 26 Jun 2017 18:33:19 +0200 Subject: [PATCH 2/3] bpo-30764: regrtest: add --fail-env-changed option (#2402) * bpo-30764: regrtest: change exit code on failure * Exit code 2 if failed tests ("bad") * Exit code 3 if interrupted * bpo-30764: regrtest: add --fail-env-changed option If the option is set, mark a test as failed if it alters the environment, for example if it creates a file without removing it. (cherry picked from commit 63f54c68936d648c70ca411661e4208329edcf26) --- Lib/test/libregrtest/cmdline.py | 3 ++ Lib/test/libregrtest/main.py | 10 ++++++- Lib/test/test_regrtest.py | 53 +++++++++++++++++++++++++-------- 3 files changed, 52 insertions(+), 14 deletions(-) diff --git a/Lib/test/libregrtest/cmdline.py b/Lib/test/libregrtest/cmdline.py index bf64062ef964b8..2315cd59b4e679 100644 --- a/Lib/test/libregrtest/cmdline.py +++ b/Lib/test/libregrtest/cmdline.py @@ -255,6 +255,9 @@ def _create_parser(): ' , don\'t execute them') group.add_argument('-P', '--pgo', dest='pgo', action='store_true', help='enable Profile Guided Optimization training') + group.add_argument('--fail-env-changed', action='store_true', + help='if a test file alters the environment, mark ' + 'the test as failed') return parser diff --git a/Lib/test/libregrtest/main.py b/Lib/test/libregrtest/main.py index 1a776556929f28..571eb6112520eb 100644 --- a/Lib/test/libregrtest/main.py +++ b/Lib/test/libregrtest/main.py @@ -478,6 +478,8 @@ def finalize(self): result = "FAILURE" elif self.interrupted: result = "INTERRUPTED" + elif self.environment_changed and self.ns.fail_env_changed: + result = "ENV CHANGED" else: result = "SUCCESS" print("Tests result: %s" % result) @@ -538,7 +540,13 @@ def _main(self, tests, kwargs): self.rerun_failed_tests() self.finalize() - sys.exit(len(self.bad) > 0 or self.interrupted) + if self.bad: + sys.exit(2) + if self.interrupted: + sys.exit(130) + if self.ns.fail_env_changed and self.environment_changed: + sys.exit(3) + sys.exit(0) def removepy(names): diff --git a/Lib/test/test_regrtest.py b/Lib/test/test_regrtest.py index 6f4fa79c5e8cd6..0e676ee33a9c0c 100644 --- a/Lib/test/test_regrtest.py +++ b/Lib/test/test_regrtest.py @@ -377,19 +377,19 @@ def parse_executed_tests(self, output): return list(match.group(1) for match in parser) def check_executed_tests(self, output, tests, skipped=(), failed=(), - omitted=(), randomize=False, interrupted=False): + env_changed=(), omitted=(), + randomize=False, interrupted=False, + fail_env_changed=False): if isinstance(tests, str): tests = [tests] if isinstance(skipped, str): skipped = [skipped] if isinstance(failed, str): failed = [failed] + if isinstance(env_changed, str): + env_changed = [env_changed] if isinstance(omitted, str): omitted = [omitted] - ntest = len(tests) - nskipped = len(skipped) - nfailed = len(failed) - nomitted = len(omitted) executed = self.parse_executed_tests(output) if randomize: @@ -415,11 +415,17 @@ def list_regex(line_format, tests): regex = list_regex('%s test%s failed', failed) self.check_line(output, regex) + if env_changed: + regex = list_regex('%s test%s altered the execution environment', + env_changed) + self.check_line(output, regex) + if omitted: regex = list_regex('%s test%s omitted', omitted) self.check_line(output, regex) - good = ntest - nskipped - nfailed - nomitted + good = (len(tests) - len(skipped) - len(failed) + - len(omitted) - len(env_changed)) if good: regex = r'%s test%s OK\.$' % (good, plural(good)) if not skipped and not failed and good > 1: @@ -429,10 +435,12 @@ def list_regex(line_format, tests): if interrupted: self.check_line(output, 'Test suite interrupted by signal SIGINT.') - if nfailed: + if failed: result = 'FAILURE' elif interrupted: result = 'INTERRUPTED' + elif fail_env_changed and env_changed: + result = 'ENV CHANGED' else: result = 'SUCCESS' self.check_line(output, 'Tests result: %s' % result) @@ -604,7 +612,7 @@ def test_failing(self): test_failing = self.create_test('failing', code=code) tests = [test_ok, test_failing] - output = self.run_tests(*tests, exitcode=1) + output = self.run_tests(*tests, exitcode=2) self.check_executed_tests(output, tests, failed=test_failing) def test_resources(self): @@ -703,7 +711,7 @@ def test_fromfile(self): def test_interrupted(self): code = TEST_INTERRUPTED test = self.create_test('sigint', code=code) - output = self.run_tests(test, exitcode=1) + output = self.run_tests(test, exitcode=130) self.check_executed_tests(output, test, omitted=test, interrupted=True) @@ -732,7 +740,7 @@ def test_slow_interrupted(self): args = ("--slowest", "-j2", test) else: args = ("--slowest", test) - output = self.run_tests(*args, exitcode=1) + output = self.run_tests(*args, exitcode=130) self.check_executed_tests(output, test, omitted=test, interrupted=True) @@ -772,7 +780,7 @@ def test_run(self): builtins.__dict__['RUN'] = 1 """) test = self.create_test('forever', code=code) - output = self.run_tests('--forever', test, exitcode=1) + output = self.run_tests('--forever', test, exitcode=2) self.check_executed_tests(output, [test]*3, failed=test) @unittest.skipUnless(Py_DEBUG, 'need a debug build') @@ -804,7 +812,7 @@ def test_leak(self): filename = 'reflog.txt' self.addCleanup(support.unlink, filename) output = self.run_tests('--huntrleaks', '3:3:', test, - exitcode=1, + exitcode=2, stderr=subprocess.STDOUT) self.check_executed_tests(output, [test], failed=test) @@ -858,7 +866,7 @@ def test_crashed(self): ok_test = self.create_test(name="ok") tests = [crash_test, ok_test] - output = self.run_tests("-j2", *tests, exitcode=1) + output = self.run_tests("-j2", *tests, exitcode=2) self.check_executed_tests(output, tests, failed=crash_test, randomize=True) @@ -907,6 +915,25 @@ def test_method4(self): subset = ['test_method1', 'test_method3'] self.assertEqual(methods, subset) + def test_env_changed(self): + code = textwrap.dedent(""" + import unittest + + class Tests(unittest.TestCase): + def test_env_changed(self): + open("env_changed", "w").close() + """) + testname = self.create_test(code=code) + + # don't fail by default + output = self.run_tests(testname) + self.check_executed_tests(output, [testname], env_changed=testname) + + # fail with --fail-env-changed + output = self.run_tests("--fail-env-changed", testname, exitcode=3) + self.check_executed_tests(output, [testname], env_changed=testname, + fail_env_changed=True) + if __name__ == '__main__': unittest.main() From 40590e4fbfd0583f1a61298ccb069550f3eaaa25 Mon Sep 17 00:00:00 2001 From: Victor Stinner Date: Tue, 27 Jun 2017 02:02:04 +0200 Subject: [PATCH 3/3] bpo-30776: reduce regrtest -R false positives (#2422) * Change the regrtest --huntrleaks checker to decide if a test file leaks or not. Require that each run leaks at least 1 reference. * Warmup runs are now completely ignored: ignored in the checker test and not used anymore to compute the sum. * Add an unit test for a reference leak. Example of reference differences previously considered a failure (leak) and now considered as success (success, no leak): [3, 0, 0] [0, 1, 0] [8, -8, 1] (cherry picked from commit 48b5c422ffb03affb00c184b9a99e5537be92732) --- Lib/test/libregrtest/refleak.py | 25 ++++++++++++++-- Lib/test/test_regrtest.py | 53 ++++++++++++++++++++++----------- 2 files changed, 57 insertions(+), 21 deletions(-) diff --git a/Lib/test/libregrtest/refleak.py b/Lib/test/libregrtest/refleak.py index c69fc0f147706b..8e18d75b217e6e 100644 --- a/Lib/test/libregrtest/refleak.py +++ b/Lib/test/libregrtest/refleak.py @@ -93,9 +93,21 @@ def dash_R(the_module, test, indirect_test, huntrleaks): rc_before = rc_after fd_before = fd_after print(file=sys.stderr) + # These checkers return False on success, True on failure def check_rc_deltas(deltas): - return any(deltas) + # bpo-30776: Try to ignore false positives: + # + # [3, 0, 0] + # [0, 1, 0] + # [8, -8, 1] + # + # Expected leaks: + # + # [5, 5, 6] + # [10, 1, 1] + return all(delta >= 1 for delta in deltas) + def check_alloc_deltas(deltas): # At least 1/3rd of 0s if 3 * deltas.count(0) < len(deltas): @@ -104,14 +116,21 @@ def check_alloc_deltas(deltas): if not set(deltas) <= {1,0,-1}: return True return False + + def check_fd_deltas(deltas): + return any(deltas) + failed = False for deltas, item_name, checker in [ (rc_deltas, 'references', check_rc_deltas), (alloc_deltas, 'memory blocks', check_alloc_deltas), - (fd_deltas, 'file descriptors', check_rc_deltas)]: + (fd_deltas, 'file descriptors', check_fd_deltas) + ]: + # ignore warmup runs + deltas = deltas[nwarmup:] if checker(deltas): msg = '%s leaked %s %s, sum=%s' % ( - test, deltas[nwarmup:], item_name, sum(deltas)) + test, deltas, item_name, sum(deltas)) print(msg, file=sys.stderr, flush=True) with open(fname, "a") as refrep: print(msg, file=refrep) diff --git a/Lib/test/test_regrtest.py b/Lib/test/test_regrtest.py index 0e676ee33a9c0c..a544f880d2be01 100644 --- a/Lib/test/test_regrtest.py +++ b/Lib/test/test_regrtest.py @@ -783,6 +783,40 @@ def test_run(self): output = self.run_tests('--forever', test, exitcode=2) self.check_executed_tests(output, [test]*3, failed=test) + def check_leak(self, code, what): + test = self.create_test('huntrleaks', code=code) + + filename = 'reflog.txt' + self.addCleanup(support.unlink, filename) + output = self.run_tests('--huntrleaks', '3:3:', test, + exitcode=2, + stderr=subprocess.STDOUT) + self.check_executed_tests(output, [test], failed=test) + + line = 'beginning 6 repetitions\n123456\n......\n' + self.check_line(output, re.escape(line)) + + line2 = '%s leaked [1, 1, 1] %s, sum=3\n' % (test, what) + self.assertIn(line2, output) + + with open(filename) as fp: + reflog = fp.read() + self.assertIn(line2, reflog) + + @unittest.skipUnless(Py_DEBUG, 'need a debug build') + def test_huntrleaks(self): + # test --huntrleaks + code = textwrap.dedent(""" + import unittest + + GLOBAL_LIST = [] + + class RefLeakTest(unittest.TestCase): + def test_leak(self): + GLOBAL_LIST.append(object()) + """) + self.check_leak(code, 'references') + @unittest.skipUnless(Py_DEBUG, 'need a debug build') def test_huntrleaks_fd_leak(self): # test --huntrleaks for file descriptor leak @@ -807,24 +841,7 @@ def test_leak(self): fd = os.open(__file__, os.O_RDONLY) # bug: never cloes the file descriptor """) - test = self.create_test('huntrleaks', code=code) - - filename = 'reflog.txt' - self.addCleanup(support.unlink, filename) - output = self.run_tests('--huntrleaks', '3:3:', test, - exitcode=2, - stderr=subprocess.STDOUT) - self.check_executed_tests(output, [test], failed=test) - - line = 'beginning 6 repetitions\n123456\n......\n' - self.check_line(output, re.escape(line)) - - line2 = '%s leaked [1, 1, 1] file descriptors, sum=3\n' % test - self.assertIn(line2, output) - - with open(filename) as fp: - reflog = fp.read() - self.assertIn(line2, reflog) + self.check_leak(code, 'file descriptors') def test_list_tests(self): # test --list-tests