From 5869fbf9ed081b7ef87e1225a1131ce5fcd4b030 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Est=C3=A8ve?= Date: Thu, 21 Sep 2017 11:15:05 +0200 Subject: [PATCH 1/2] [doc build] Remove nose from CIs and documentation --- .travis.yml | 13 ++----- CONTRIBUTING.md | 4 +- Makefile | 17 +++------ README.rst | 10 +---- appveyor.yml | 9 ++--- build_tools/appveyor/requirements.txt | 3 +- build_tools/circle/build_doc.sh | 2 +- build_tools/travis/after_success.sh | 2 +- build_tools/travis/install.sh | 28 +++----------- build_tools/travis/test_script.sh | 40 +++++++++++--------- doc/developers/advanced_installation.rst | 19 +++------- doc/developers/contributing.rst | 10 ++--- setup.cfg | 27 ++----------- setup32.cfg | 22 ----------- sklearn/cross_validation.py | 6 --- sklearn/externals/copy_joblib.sh | 4 -- sklearn/externals/test_externals_setup.py | 10 ----- sklearn/model_selection/_split.py | 3 -- sklearn/model_selection/_validation.py | 3 -- sklearn/tree/tests/test_tree.py | 1 - sklearn/utils/tests/test_estimator_checks.py | 20 ++++++++-- 21 files changed, 80 insertions(+), 173 deletions(-) delete mode 100644 setup32.cfg delete mode 100644 sklearn/externals/test_externals_setup.py diff --git a/.travis.yml b/.travis.yml index f7d01c7cbd4ac..4737365c7bd07 100644 --- a/.travis.yml +++ b/.travis.yml @@ -42,24 +42,19 @@ matrix: - env: DISTRIB="conda" PYTHON_VERSION="3.6.2" INSTALL_MKL="true" NUMPY_VERSION="1.13.1" SCIPY_VERSION="0.19.1" PANDAS_VERSION="0.20.3" CYTHON_VERSION="0.26.1" COVERAGE=true - if: type != cron - # This environment use pytest to run the tests. It uses the newest - # supported Anaconda release (5.0.0). It also runs tests requiring Pandas. - - env: USE_PYTEST="true" DISTRIB="conda" PYTHON_VERSION="3.6.2" - INSTALL_MKL="true" NUMPY_VERSION="1.13.1" SCIPY_VERSION="0.19.1" - PANDAS_VERSION="0.20.3" CYTHON_VERSION="0.26.1" - TEST_DOCSTRINGS="true" + CHECK_PYTEST_SOFT_DEPENDENCY="true" if: type != cron # flake8 linting on diff wrt common ancestor with upstream/master - env: RUN_FLAKE8="true" SKIP_TESTS="true" DISTRIB="conda" PYTHON_VERSION="3.5" INSTALL_MKL="true" - NUMPY_VERSION="1.13.1" SCIPY_VERSION="0.19.1" CYTHON_VERSION="0.26.1" + NUMPY_VERSION="1.13.1" SCIPY_VERSION="0.19.1" + CYTHON_VERSION="0.26.1" if: type != cron # This environment tests scikit-learn against numpy and scipy master # installed from their CI wheels in a virtualenv with the Python # interpreter provided by travis. - python: 3.6 - env: USE_PYTEST="true" DISTRIB="scipy-dev-wheels" + env: DISTRIB="scipy-dev-wheels" if: type = cron install: source build_tools/travis/install.sh diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 6f643fc46c4e5..c217e73354f81 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -139,8 +139,8 @@ tools: - Code with good unittest **coverage** (at least 80%), check with: ```bash - $ pip install nose coverage - $ nosetests --with-coverage path/to/tests_for_package + $ pip install pytest pytest-cov + $ pytest --cov sklearn path/to/tests_for_package ``` - No pyflakes warnings, check with: diff --git a/Makefile b/Makefile index aa6203f3cdbe7..6725a7441f75a 100644 --- a/Makefile +++ b/Makefile @@ -4,17 +4,12 @@ PYTHON ?= python CYTHON ?= cython -NOSETESTS ?= nosetests +PYTEST ?= pytest CTAGS ?= ctags # skip doctests on 32bit python BITS := $(shell python -c 'import struct; print(8 * struct.calcsize("P"))') -ifeq ($(BITS),32) - NOSETESTS:=$(NOSETESTS) -c setup32.cfg -endif - - all: clean inplace test clean-ctags: @@ -29,19 +24,17 @@ inplace: $(PYTHON) setup.py build_ext -i test-code: in - $(NOSETESTS) -s -v sklearn + $(PYTEST) --showlocals -v sklearn test-sphinxext: - $(NOSETESTS) -s -v doc/sphinxext/ + $(PYTEST) --showlocals -v doc/sphinxext/ test-doc: ifeq ($(BITS),64) - $(NOSETESTS) -s -v doc/*.rst doc/modules/ doc/datasets/ \ - doc/developers doc/tutorial/basic doc/tutorial/statistical_inference \ - doc/tutorial/text_analytics + $(PYTEST) $(shell find doc -name '*.rst' | sort) endif test-coverage: rm -rf coverage .coverage - $(NOSETESTS) -s -v --with-coverage sklearn + $(PYTEST) sklearn --show-locals -v --with-cov sklearn test: test-code test-sphinxext test-doc diff --git a/README.rst b/README.rst index 64c8875be6c07..a1ec4c06f5358 100644 --- a/README.rst +++ b/README.rst @@ -110,15 +110,9 @@ Testing ~~~~~~~ After installation, you can launch the test suite from outside the -source directory (you will need to have the ``nose`` package installed):: +source directory (you will need to have the ``pytest`` package installed):: - nosetests -v sklearn - -Under Windows, it is recommended to use the following command (adjust the path -to the ``python.exe`` program) as using the ``nosetests.exe`` program can badly -interact with tests that use ``multiprocessing``:: - - C:\Python34\python.exe -c "import nose; nose.main()" -v sklearn + pytest sklearn See the web page http://scikit-learn.org/stable/developers/advanced_installation.html#testing for more information. diff --git a/appveyor.yml b/appveyor.yml index 768089e880e25..dac1417cafd4c 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -78,12 +78,11 @@ build: false test_script: # Change to a non-source folder to make sure we run the tests on the # installed library. - - "mkdir empty_folder" - - "cd empty_folder" - - "python -c \"import nose; nose.main()\" --with-timer --timer-top-n 20 -s sklearn" - + - mkdir "../empty_folder" + - cd "../empty_folder" + - pytest --showlocals --durations=20 --pyargs sklearn # Move back to the project folder - - "cd .." + - cd "../scikit-learn" artifacts: # Archive the generated wheel package in the ci.appveyor.com build report. diff --git a/build_tools/appveyor/requirements.txt b/build_tools/appveyor/requirements.txt index 0b9c63f72789f..26fa790392b31 100644 --- a/build_tools/appveyor/requirements.txt +++ b/build_tools/appveyor/requirements.txt @@ -10,7 +10,6 @@ numpy==1.9.3 scipy==0.16.0 cython -nose -nose-timer +pytest wheel wheelhouse_uploader diff --git a/build_tools/circle/build_doc.sh b/build_tools/circle/build_doc.sh index b3f785254c2ae..8edcd4e1b5a79 100755 --- a/build_tools/circle/build_doc.sh +++ b/build_tools/circle/build_doc.sh @@ -107,7 +107,7 @@ conda update --yes --quiet conda # Configure the conda environment and put it in the path using the # provided versions conda create -n $CONDA_ENV_NAME --yes --quiet python numpy scipy \ - cython nose coverage matplotlib sphinx=1.6.2 pillow + cython pytest coverage matplotlib sphinx=1.6.2 pillow source activate testenv pip install sphinx-gallery numpydoc diff --git a/build_tools/travis/after_success.sh b/build_tools/travis/after_success.sh index aead28cb2865d..f15aaabd07097 100755 --- a/build_tools/travis/after_success.sh +++ b/build_tools/travis/after_success.sh @@ -8,7 +8,7 @@ set -e if [[ "$COVERAGE" == "true" ]]; then # Need to run codecov from a git checkout, so we copy .coverage - # from TEST_DIR where nosetests has been run + # from TEST_DIR where pytest has been run cp $TEST_DIR/.coverage $TRAVIS_BUILD_DIR cd $TRAVIS_BUILD_DIR # Ignore codecov failures as the codecov server is not diff --git a/build_tools/travis/install.sh b/build_tools/travis/install.sh index ad402bb35ae02..f1f574ac822af 100755 --- a/build_tools/travis/install.sh +++ b/build_tools/travis/install.sh @@ -39,31 +39,20 @@ if [[ "$DISTRIB" == "conda" ]]; then # Configure the conda environment and put it in the path using the # provided versions - if [[ "$USE_PYTEST" == "true" ]]; then - TEST_RUNNER_PACKAGE=pytest - else - TEST_RUNNER_PACKAGE=nose - fi - if [[ "$INSTALL_MKL" == "true" ]]; then conda create -n testenv --yes python=$PYTHON_VERSION pip \ - $TEST_RUNNER_PACKAGE numpy=$NUMPY_VERSION scipy=$SCIPY_VERSION \ + pytest pytest-cov numpy=$NUMPY_VERSION scipy=$SCIPY_VERSION \ mkl cython=$CYTHON_VERSION \ ${PANDAS_VERSION+pandas=$PANDAS_VERSION} else conda create -n testenv --yes python=$PYTHON_VERSION pip \ - $TEST_RUNNER_PACKAGE numpy=$NUMPY_VERSION scipy=$SCIPY_VERSION \ + pytest pytest-cov numpy=$NUMPY_VERSION scipy=$SCIPY_VERSION \ nomkl cython=$CYTHON_VERSION \ ${PANDAS_VERSION+pandas=$PANDAS_VERSION} fi source activate testenv - if [[ $USE_PYTEST != "true" ]]; then - # Install nose-timer via pip - pip install nose-timer - fi - elif [[ "$DISTRIB" == "ubuntu" ]]; then # At the time of writing numpy 1.9.1 is included in the travis # virtualenv but we want to use the numpy installed through apt-get @@ -73,7 +62,7 @@ elif [[ "$DISTRIB" == "ubuntu" ]]; then # and scipy virtualenv --system-site-packages testvenv source testvenv/bin/activate - pip install nose nose-timer cython==$CYTHON_VERSION + pip install pytest pytest-cov cython==$CYTHON_VERSION elif [[ "$DISTRIB" == "scipy-dev-wheels" ]]; then # Set up our own virtualenv environment to avoid travis' numpy. @@ -86,12 +75,7 @@ elif [[ "$DISTRIB" == "scipy-dev-wheels" ]]; then echo "Installing numpy and scipy master wheels" dev_url=https://7933911d6844c6c53a7d-47bd50c35cd79bd838daf386af554a83.ssl.cf2.rackcdn.com pip install --pre --upgrade --timeout=60 -f $dev_url numpy scipy pandas cython - if [[ $USE_PYTEST == "true" ]]; then - pip install pytest - else - # Install nose-timer via pip - pip install nose nose-timer - fi + pip install pytest pytest-cov fi if [[ "$COVERAGE" == "true" ]]; then @@ -102,8 +86,8 @@ if [[ "$TEST_DOCSTRINGS" == "true" ]]; then pip install sphinx numpydoc # numpydoc requires sphinx fi -if [[ "$SKIP_TESTS" == "true" ]]; then - echo "No need to build scikit-learn when not running the tests" +if [[ "$SKIP_TESTS" == "true" && "$CHECK_PYTEST_SOFT_DEPENDENCY" != "true" ]]; then + echo "No need to build scikit-learn" else # Build scikit-learn in the install.sh script to collapse the verbose # build output in the travis output when it succeeds. diff --git a/build_tools/travis/test_script.sh b/build_tools/travis/test_script.sh index 0ed6f5e3b87a0..ef6f15d77286e 100755 --- a/build_tools/travis/test_script.sh +++ b/build_tools/travis/test_script.sh @@ -21,15 +21,12 @@ except ImportError: python -c "import multiprocessing as mp; print('%d CPUs' % mp.cpu_count())" run_tests() { - if [[ "$USE_PYTEST" == "true" ]]; then - TEST_CMD="pytest --showlocals --durations=20 --pyargs" - else - TEST_CMD="nosetests --with-timer --timer-top-n 20" - fi + TEST_CMD="pytest --showlocals --durations=20 --pyargs" + # Get into a temp directory to run test from the installed scikit-learn and # check if we do not leave artifacts mkdir -p $TEST_DIR - # We need the setup.cfg for the nose settings + # We need the setup.cfg for the pytest settings cp setup.cfg $TEST_DIR cd $TEST_DIR @@ -39,23 +36,18 @@ run_tests() { export SKLEARN_SKIP_NETWORK_TESTS=1 if [[ "$COVERAGE" == "true" ]]; then - TEST_CMD="$TEST_CMD --with-coverage" + TEST_CMD="$TEST_CMD --cov sklearn" fi $TEST_CMD sklearn # Going back to git checkout folder needed to test documentation cd $OLDPWD - if [[ "$USE_PYTEST" == "true" ]]; then - # Do not run doctests in scipy-dev-wheels build for now - # (broken by numpy 1.14.dev array repr/str formatting - # change even with np.set_printoptions(sign='legacy')). - # See https://github.com/numpy/numpy/issues/9804 for more details - if [[ "$DISTRIB" != "scipy-dev-wheels" ]]; then - pytest $(find doc -name '*.rst' | sort) - fi - else - # Makefile is using nose + # Do not run doctests in scipy-dev-wheels build for now + # (broken by numpy 1.14.dev array repr/str formatting + # change even with np.set_printoptions(sign='legacy')). + # See https://github.com/numpy/numpy/issues/9804 for more details + if [[ "$DISTRIB" != "scipy-dev-wheels" ]]; then make test-doc fi } @@ -67,3 +59,17 @@ fi if [[ "$SKIP_TESTS" != "true" ]]; then run_tests fi + +if [[ "$CHECK_PYTEST_SOFT_DEPENDENCY" == "true" ]]; then + conda remove -y py pytest || echo "pytest not found by conda" + pip uninstall -y py pytest || echo "pytest not found by pip" + if [[ "$COVERAGE" == "true" ]]; then + CMD="coverage run -a" + else + CMD="python" + fi + # .coverage from running the tests is in TEST_DIR + cd $TEST_DIR + $CMD -m sklearn.utils.tests.test_estimator_checks + cd $OLDPWD +fi diff --git a/doc/developers/advanced_installation.rst b/doc/developers/advanced_installation.rst index 4a9e82ff17f5d..1566fa5302ba7 100644 --- a/doc/developers/advanced_installation.rst +++ b/doc/developers/advanced_installation.rst @@ -376,24 +376,17 @@ Testing Testing scikit-learn once installed ----------------------------------- -Testing requires having the `nose -`_ library. After +Testing requires having the `pytest +`_ library. After installation, the package can be tested by executing *from outside* the source directory:: - $ nosetests -v sklearn - -Under Windows, it is recommended to use the following command (adjust the path -to the ``python.exe`` program) as using the ``nosetests.exe`` program can badly -interact with tests that use ``multiprocessing``:: - - C:\Python34\python.exe -c "import nose; nose.main()" -v sklearn + $ pytest sklearn This should give you a lot of output (and some warnings) but eventually should finish with a message similar to:: - Ran 3246 tests in 260.618s - OK (SKIP=20) + =========== 8304 passed, 26 skipped, 4659 warnings in 557.76 seconds =========== Otherwise, please consider posting an issue into the `bug tracker `_ or to the @@ -411,9 +404,9 @@ source directory:: python setup.py build_ext --inplace -Test can now be run using nosetests:: +Test can now be run using pytest:: - nosetests -v sklearn/ + pytest sklearn This is automated by the commands:: diff --git a/doc/developers/contributing.rst b/doc/developers/contributing.rst index 72e68bc458750..292453d7a8f4c 100644 --- a/doc/developers/contributing.rst +++ b/doc/developers/contributing.rst @@ -280,8 +280,8 @@ You can also check for common programming errors with the following tools: * Code with a good unittest coverage (at least 90%, better 100%), check with:: - $ pip install nose coverage - $ nosetests --with-coverage path/to/tests_for_package + $ pip install pytest pytest-cov + $ pytest --cov sklearn path/to/tests_for_package see also :ref:`testing_coverage` @@ -519,13 +519,13 @@ Testing and improving test coverage High-quality `unit testing `_ is a corner-stone of the scikit-learn development process. For this -purpose, we use the `nose `_ +purpose, we use the `pytest `_ package. The tests are functions appropriately named, located in `tests` subdirectories, that check the validity of the algorithms and the different options of the code. The full scikit-learn tests can be run using 'make' in the root folder. -Alternatively, running 'nosetests' in a folder will run all the tests of +Alternatively, running 'pytest' in a folder will run all the tests of the corresponding subpackages. We expect code coverage of new features to be at least around 90%. @@ -533,7 +533,7 @@ We expect code coverage of new features to be at least around 90%. .. note:: **Workflow to improve test coverage** To test code coverage, you need to install the `coverage - `_ package in addition to nose. + `_ package in addition to pytest. 1. Run 'make test-coverage'. The output lists for each file the line numbers that are not tested. diff --git a/setup.cfg b/setup.cfg index 378905311e17e..f204ab560d2e8 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,30 +1,9 @@ [aliases] -# python2.7 has upgraded unittest and it is no longer compatible with some -# of our tests, so we run all through nose -test = nosetests - -[nosetests] -# nosetests skips test files with the executable bit by default -# which can silently hide failing tests. -# There are no executable scripts within the scikit-learn project -# so let's turn the --exe flag on to avoid skipping tests by -# mistake. -exe = 1 -cover-html = 1 -cover-html-dir = coverage -cover-package = sklearn - -detailed-errors = 1 -with-doctest = 1 -doctest-tests = 1 -doctest-extension = rst -doctest-fixtures = _fixture -ignore-files=^setup\.py$ -#doctest-options = +ELLIPSIS,+NORMALIZE_WHITESPACE +test = pytest [tool:pytest] -# disable-pytest-warnings should be removed once we drop nose and we -# rewrite tests using yield with parametrize +# disable-pytest-warnings should be removed once we rewrite tests +# using yield with parametrize addopts = --doctest-modules --disable-pytest-warnings diff --git a/setup32.cfg b/setup32.cfg deleted file mode 100644 index 7294cb813fc8d..0000000000000 --- a/setup32.cfg +++ /dev/null @@ -1,22 +0,0 @@ -# This config file is here to skip doctests on 32bit linux when running make or make test -# For newer versions of nose, we can simply use "NOSE_IGNORE_CONFIG_FILES", which -# we should do in the future. - -[aliases] -# python2.7 has upgraded unittest and it is no longer compatible with some -# of our tests, so we run all through nose -test = nosetests - -[nosetests] -# nosetests skips test files with the executable bit by default -# which can silently hide failing tests. -# There are no executable scripts within the scikit-learn project -# so let's turn the --exe flag on to avoid skipping tests by -# mistake. -exe = 1 -cover-html = 1 -cover-html-dir = coverage -cover-package = sklearn - -detailed-errors = 1 -with-doctest = 0 diff --git a/sklearn/cross_validation.py b/sklearn/cross_validation.py index 7646459da3936..3198c39139210 100644 --- a/sklearn/cross_validation.py +++ b/sklearn/cross_validation.py @@ -1953,9 +1953,6 @@ def permutation_test_score(estimator, X, y, cv=None, return score, permutation_scores, pvalue -permutation_test_score.__test__ = False # to avoid a pb with nosetests - - def train_test_split(*arrays, **options): """Split arrays or matrices into random train and test subsets @@ -2070,6 +2067,3 @@ def train_test_split(*arrays, **options): train, test = next(iter(cv)) return list(chain.from_iterable((safe_indexing(a, train), safe_indexing(a, test)) for a in arrays)) - - -train_test_split.__test__ = False # to avoid a pb with nosetests diff --git a/sklearn/externals/copy_joblib.sh b/sklearn/externals/copy_joblib.sh index f6db76c9df5b3..398b567b81743 100755 --- a/sklearn/externals/copy_joblib.sh +++ b/sklearn/externals/copy_joblib.sh @@ -24,7 +24,3 @@ find joblib -name "*.bak" | xargs rm # Remove the tests folders to speed-up test time for scikit-learn. # joblib is already tested on its own CI infrastructure upstream. rm -r joblib/test - -# Remove joblib/testing.py which is only used in tests and has a -# pytest dependency (needed until we drop nose) -rm joblib/testing.py diff --git a/sklearn/externals/test_externals_setup.py b/sklearn/externals/test_externals_setup.py deleted file mode 100644 index d3198050a33b1..0000000000000 --- a/sklearn/externals/test_externals_setup.py +++ /dev/null @@ -1,10 +0,0 @@ -""" -Fixtures to get the external bundled dependencies tested. - -This module gets loaded by test discovery scanners (such as nose) in -their collection scan. -""" - -import sys -import os -sys.path.append(os.path.abspath(os.path.dirname(__file__))) diff --git a/sklearn/model_selection/_split.py b/sklearn/model_selection/_split.py index 24d9423b22278..b12f309de33cd 100644 --- a/sklearn/model_selection/_split.py +++ b/sklearn/model_selection/_split.py @@ -2059,9 +2059,6 @@ def train_test_split(*arrays, **options): safe_indexing(a, test)) for a in arrays)) -train_test_split.__test__ = False # to avoid a pb with nosetests - - def _build_repr(self): # XXX This is copied from BaseEstimator's get_params cls = self.__class__ diff --git a/sklearn/model_selection/_validation.py b/sklearn/model_selection/_validation.py index fdf6fa6912544..24362620c809f 100644 --- a/sklearn/model_selection/_validation.py +++ b/sklearn/model_selection/_validation.py @@ -948,9 +948,6 @@ def permutation_test_score(estimator, X, y, groups=None, cv=None, return score, permutation_scores, pvalue -permutation_test_score.__test__ = False # to avoid a pb with nosetests - - def _permutation_test_score(estimator, X, y, groups, cv, scorer): """Auxiliary function for permutation_test_score""" avg_score = [] diff --git a/sklearn/tree/tests/test_tree.py b/sklearn/tree/tests/test_tree.py index 71ee8fa2bcb61..c209c93ae3c99 100644 --- a/sklearn/tree/tests/test_tree.py +++ b/sklearn/tree/tests/test_tree.py @@ -1549,7 +1549,6 @@ def test_1d_input(): def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight): - # Private function to keep pretty printing in nose yielded tests est = TreeEstimator(random_state=0) est.fit(X, y, sample_weight=sample_weight) assert_equal(est.tree_.max_depth, 1) diff --git a/sklearn/utils/tests/test_estimator_checks.py b/sklearn/utils/tests/test_estimator_checks.py index 1b3a1ea7e597a..1c737ea74a650 100644 --- a/sklearn/utils/tests/test_estimator_checks.py +++ b/sklearn/utils/tests/test_estimator_checks.py @@ -1,6 +1,10 @@ -import scipy.sparse as sp -import numpy as np +import unittest import sys + +import numpy as np + +import scipy.sparse as sp + from sklearn.externals.six.moves import cStringIO as StringIO from sklearn.externals import joblib @@ -246,8 +250,18 @@ def __init__(self): 'should not be initialized in the constructor.+' "Attribute 'you_should_not_set_this_' was found.+" 'in estimator estimator_name') - assert_raises_regex(AssertionError, msg, check_no_fit_attributes_set_in_init, 'estimator_name', NonConformantEstimator) + + +if __name__ == '__main__': + main_module = sys.modules['__main__'] + test_functions = [getattr(main_module, name) for name in dir(main_module) + if name.startswith('test_')] + test_cases = [unittest.FunctionTestCase(fn) for fn in test_functions] + suite = unittest.TestSuite() + suite.addTests(test_cases) + runner = unittest.TextTestRunner() + runner.run(suite) From eb024104efe00eeca5fffe856ce74359bab4eea7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Est=C3=A8ve?= Date: Mon, 13 Nov 2017 09:59:53 +0100 Subject: [PATCH 2/2] Tackle comments + some tweaks --- build_tools/travis/test_script.sh | 7 ++++--- sklearn/utils/tests/test_estimator_checks.py | 10 +++++++++- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/build_tools/travis/test_script.sh b/build_tools/travis/test_script.sh index ef6f15d77286e..c4309c7cb3683 100755 --- a/build_tools/travis/test_script.sh +++ b/build_tools/travis/test_script.sh @@ -61,10 +61,11 @@ if [[ "$SKIP_TESTS" != "true" ]]; then fi if [[ "$CHECK_PYTEST_SOFT_DEPENDENCY" == "true" ]]; then - conda remove -y py pytest || echo "pytest not found by conda" - pip uninstall -y py pytest || echo "pytest not found by pip" + conda remove -y py pytest || pip uninstall -y py pytest if [[ "$COVERAGE" == "true" ]]; then - CMD="coverage run -a" + # Need to append the coverage to the existing .coverage generated by + # running the tests + CMD="coverage run --append" else CMD="python" fi diff --git a/sklearn/utils/tests/test_estimator_checks.py b/sklearn/utils/tests/test_estimator_checks.py index 1c737ea74a650..ae3d6fc8f108c 100644 --- a/sklearn/utils/tests/test_estimator_checks.py +++ b/sklearn/utils/tests/test_estimator_checks.py @@ -256,7 +256,9 @@ def __init__(self): NonConformantEstimator) -if __name__ == '__main__': +def run_tests_without_pytest(): + """Runs the tests in this file without using pytest. + """ main_module = sys.modules['__main__'] test_functions = [getattr(main_module, name) for name in dir(main_module) if name.startswith('test_')] @@ -265,3 +267,9 @@ def __init__(self): suite.addTests(test_cases) runner = unittest.TextTestRunner() runner.run(suite) + + +if __name__ == '__main__': + # This module is run as a script to check that we have no dependency on + # pytest for estimator checks. + run_tests_without_pytest()