diff --git a/.gitignore b/.gitignore index 6535968a..ee5e45bf 100644 --- a/.gitignore +++ b/.gitignore @@ -21,3 +21,6 @@ pyperformance/tests/data/cpython/ # Created by the tox program .tox/ + +# coverage +.coverage diff --git a/pyperformance/data-files/benchmarks/MANIFEST b/pyperformance/data-files/benchmarks/MANIFEST index 38cd40b5..72cb6730 100644 --- a/pyperformance/data-files/benchmarks/MANIFEST +++ b/pyperformance/data-files/benchmarks/MANIFEST @@ -6,6 +6,7 @@ async_tree async_tree_cpu_io_mixed async_tree_io async_tree_memoization +coverage generators chameleon chaos diff --git a/pyperformance/data-files/benchmarks/bm_coverage/pyproject.toml b/pyperformance/data-files/benchmarks/bm_coverage/pyproject.toml new file mode 100644 index 00000000..0dac4af7 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_coverage/pyproject.toml @@ -0,0 +1,12 @@ +[project] +name = "pyperformance_bm_coverage" +requires-python = ">=3.8" +dependencies = [ + "pyperf", + "coverage", +] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "coverage" diff --git a/pyperformance/data-files/benchmarks/bm_coverage/requirements.txt b/pyperformance/data-files/benchmarks/bm_coverage/requirements.txt new file mode 100644 index 00000000..755f8b95 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_coverage/requirements.txt @@ -0,0 +1 @@ +coverage==6.4.1 diff --git a/pyperformance/data-files/benchmarks/bm_coverage/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_coverage/run_benchmark.py new file mode 100644 index 00000000..6e8c029b --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_coverage/run_benchmark.py @@ -0,0 +1,29 @@ +""" +Benchmark coverage performance with a recursive fibonacci function. +""" + +import coverage +import pyperf + + +def fibonacci(n: int) -> int: + if n <= 1: + return n + return fibonacci(n - 1) + fibonacci(n - 2) + + +def bench_coverage(loops: int) -> None: + range_it = range(loops) + cov = coverage.Coverage() + cov.start() + t0 = pyperf.perf_counter() + for _ in range_it: + fibonacci(25) + cov.stop() + return pyperf.perf_counter() - t0 + + +if __name__ == "__main__": + runner = pyperf.Runner() + runner.metadata['description'] = "Benchmark coverage" + runner.bench_time_func('coverage', bench_coverage)