Thanks to visit codestin.com
Credit goes to github.com

Skip to content

TST: increase coverage on tk tests #22498

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Mar 23, 2022
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
58 changes: 29 additions & 29 deletions lib/matplotlib/tests/test_backend_tk.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,15 @@
import functools
import inspect
import importlib
import os
import platform
import re
import subprocess
import sys

import pytest

from matplotlib.testing import subprocess_run_helper
from matplotlib import _c_internal_utils

_test_timeout = 60 # A reasonably safe value for slower architectures.


Expand All @@ -18,30 +20,33 @@ def _isolated_tk_test(success_count, func=None):

TkAgg tests seem to have interactions between tests, so isolate each test
in a subprocess. See GH#18261

The decorated function must be fully self-contained, and thus perform
all the imports it needs. Because its source is extracted and run by
itself, coverage will consider it as not being run, so it should be marked
with ``# pragma: no cover``
"""

if func is None:
return functools.partial(_isolated_tk_test, success_count)

# Remove decorators.
source = re.search(r"(?ms)^def .*", inspect.getsource(func)).group(0)

if "MPL_TEST_ESCAPE_HATCH" in os.environ:
# set in subprocess_run_helper() below
return func
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So, we are checking coverage of tests too? I guess I don't fully follow what coverage is reporting here...

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah, we look at the coverage on the tests as well (@dopplershift has argued that tests are the only place insisting in 100% coverage actually make senses as if all of the code in your test suite is not running what are you even doing?!).

The change here is that instead of extracting the source and shoving it through python -c we are importing the test module and then running the actual test function in a sub-process. This bit of logic is so that when pytest discovers the tests it will go through the logic to spawn a sub-process to import an run the function but we import it to run it we need to actually run it. Setting an env in the subprocess call was the simplest way I found to do this.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

And I new see the warning that this is not actually covered and I am confused....

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

(@dopplershift has argued that tests are the only place insisting in 100% coverage actually make senses as if all of the code in your test suite is not running what are you even doing?!).

There can be many reasons you don't run all of your test suite at a given time. Some optional dependencies may not be available, you could have very costly tests that you run only at larger intervals, ...

Out of curiosity, does coverage stretch across all the running CI jobs or where does it get its data from?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yeah, at the end of all of our CI jobs we have something like

      - name: Upload code coverage
        uses: codecov/codecov-action@v2

that pushes coverage information to codecov and then they merge all of them together.

If you click through the coverage UI enough you can get to https://codecov.io/gh/matplotlib/matplotlib/commit/153bfa18377c1a15a76906cfb77973971e0cee95/build which will show you which CI runs contributed to a given report.


@pytest.mark.skipif(
not importlib.util.find_spec('tkinter'),
reason="missing tkinter"
)
@pytest.mark.skipif(
sys.platform == "linux" and not _c_internal_utils.display_is_valid(),
reason="$DISPLAY and $WAYLAND_DISPLAY are unset"
)
@functools.wraps(func)
def test_func():
# even if the package exists, may not actually be importable this can
# be the case on some CI systems.
pytest.importorskip('tkinter')
try:
proc = subprocess.run(
[sys.executable, "-c", f"{source}\n{func.__name__}()"],
env={**os.environ, "MPLBACKEND": "TkAgg"},
timeout=_test_timeout,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True,
universal_newlines=True,
proc = subprocess_run_helper(
func, timeout=_test_timeout,
MPLBACKEND="TkAgg",
MPL_TEST_ESCAPE_HATCH="1"
)
except subprocess.TimeoutExpired:
pytest.fail("Subprocess timed out")
Expand All @@ -59,9 +64,8 @@ def test_func():
return test_func


@pytest.mark.backend('TkAgg', skip_on_importerror=True)
@_isolated_tk_test(success_count=6) # len(bad_boxes)
def test_blit(): # pragma: no cover
def test_blit():
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.backends.backend_tkagg # noqa
Expand All @@ -88,9 +92,8 @@ def test_blit(): # pragma: no cover
print("success")


@pytest.mark.backend('TkAgg', skip_on_importerror=True)
@_isolated_tk_test(success_count=1)
def test_figuremanager_preserves_host_mainloop(): # pragma: no cover
def test_figuremanager_preserves_host_mainloop():
import tkinter
import matplotlib.pyplot as plt
success = []
Expand All @@ -116,10 +119,9 @@ def legitimate_quit():
@pytest.mark.skipif(platform.python_implementation() != 'CPython',
reason='PyPy does not support Tkinter threading: '
'https://foss.heptapod.net/pypy/pypy/-/issues/1929')
@pytest.mark.backend('TkAgg', skip_on_importerror=True)
@pytest.mark.flaky(reruns=3)
@_isolated_tk_test(success_count=1)
def test_figuremanager_cleans_own_mainloop(): # pragma: no cover
def test_figuremanager_cleans_own_mainloop():
import tkinter
import time
import matplotlib.pyplot as plt
Expand All @@ -144,10 +146,9 @@ def target():
thread.join()


@pytest.mark.backend('TkAgg', skip_on_importerror=True)
@pytest.mark.flaky(reruns=3)
@_isolated_tk_test(success_count=0)
def test_never_update(): # pragma: no cover
def test_never_update():
import tkinter
del tkinter.Misc.update
del tkinter.Misc.update_idletasks
Expand All @@ -171,9 +172,8 @@ def test_never_update(): # pragma: no cover
# checks them.


@pytest.mark.backend('TkAgg', skip_on_importerror=True)
@_isolated_tk_test(success_count=2)
def test_missing_back_button(): # pragma: no cover
def test_missing_back_button():
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import NavigationToolbar2Tk

Expand All @@ -190,7 +190,7 @@ class Toolbar(NavigationToolbar2Tk):

@pytest.mark.backend('TkAgg', skip_on_importerror=True)
@_isolated_tk_test(success_count=1)
def test_canvas_focus(): # pragma: no cover
def test_canvas_focus():
import tkinter as tk
import matplotlib.pyplot as plt
success = []
Expand Down