diff --git a/sklearn/linear_model/tests/test_least_angle.py b/sklearn/linear_model/tests/test_least_angle.py index a8b0e939c080d..4321c39b45e92 100644 --- a/sklearn/linear_model/tests/test_least_angle.py +++ b/sklearn/linear_model/tests/test_least_angle.py @@ -3,12 +3,10 @@ import numpy as np import pytest from scipy import linalg - from sklearn.base import clone from sklearn.model_selection import train_test_split from sklearn.utils._testing import assert_allclose from sklearn.utils._testing import assert_array_almost_equal -from sklearn.utils._testing import assert_raises from sklearn.utils._testing import ignore_warnings from sklearn.utils._testing import TempMemmap from sklearn.utils.fixes import np_version, parse_version @@ -96,8 +94,8 @@ def test_lars_path_gram_equivalent(method, return_path): def test_x_none_gram_none_raises_value_error(): # Test that lars_path with no X and Gram raises exception Xy = np.dot(X.T, y) - assert_raises(ValueError, linear_model.lars_path, None, y, Gram=None, - Xy=Xy) + with pytest.raises(ValueError): + linear_model.lars_path(None, y, Gram=None, Xy=Xy) def test_all_precomputed(): @@ -489,7 +487,9 @@ def test_lasso_lars_ic(): # test error on unknown IC lars_broken = linear_model.LassoLarsIC('') - assert_raises(ValueError, lars_broken.fit, X, y) + + with pytest.raises(ValueError): + lars_broken.fit(X, y) def test_lars_path_readonly_data(): diff --git a/sklearn/linear_model/tests/test_omp.py b/sklearn/linear_model/tests/test_omp.py index 3cbda003f0148..1d2eb6a239786 100644 --- a/sklearn/linear_model/tests/test_omp.py +++ b/sklearn/linear_model/tests/test_omp.py @@ -4,7 +4,6 @@ import numpy as np import pytest -from sklearn.utils._testing import assert_raises from sklearn.utils._testing import assert_array_equal from sklearn.utils._testing import assert_array_almost_equal from sklearn.utils._testing import ignore_warnings @@ -33,16 +32,16 @@ def test_correct_shapes(): assert (orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape == - (n_features,)) + (n_features,)) assert (orthogonal_mp(X, y, n_nonzero_coefs=5).shape == - (n_features, 3)) + (n_features, 3)) def test_correct_shapes_gram(): assert (orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape == - (n_features,)) + (n_features,)) assert (orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape == - (n_features, 3)) + (n_features, 3)) def test_n_nonzero_coefs(): @@ -88,15 +87,14 @@ def test_unreachable_accuracy(): n_nonzero_coefs=n_features)) -def test_bad_input(): - assert_raises(ValueError, orthogonal_mp, X, y, tol=-1) - assert_raises(ValueError, orthogonal_mp, X, y, n_nonzero_coefs=-1) - assert_raises(ValueError, orthogonal_mp, X, y, - n_nonzero_coefs=n_features + 1) - assert_raises(ValueError, orthogonal_mp_gram, G, Xy, tol=-1) - assert_raises(ValueError, orthogonal_mp_gram, G, Xy, n_nonzero_coefs=-1) - assert_raises(ValueError, orthogonal_mp_gram, G, Xy, - n_nonzero_coefs=n_features + 1) +@pytest.mark.parametrize("positional_params", [(X, y), (G, Xy)]) +@pytest.mark.parametrize( + "keyword_params", + [{"tol": -1}, {"n_nonzero_coefs": -1}, {"n_nonzero_coefs": n_features + 1}] +) +def test_bad_input(positional_params, keyword_params): + with pytest.raises(ValueError): + orthogonal_mp(*positional_params, **keyword_params) def test_perfect_signal_recovery(): diff --git a/sklearn/linear_model/tests/test_theil_sen.py b/sklearn/linear_model/tests/test_theil_sen.py index c670fc3979b80..125c89599af83 100644 --- a/sklearn/linear_model/tests/test_theil_sen.py +++ b/sklearn/linear_model/tests/test_theil_sen.py @@ -17,7 +17,7 @@ from sklearn.linear_model import LinearRegression, TheilSenRegressor from sklearn.linear_model._theil_sen import _spatial_median, _breakdown_point from sklearn.linear_model._theil_sen import _modified_weiszfeld_step -from sklearn.utils._testing import assert_almost_equal, assert_raises +from sklearn.utils._testing import assert_almost_equal @contextmanager @@ -209,19 +209,23 @@ def test_calc_breakdown_point(): def test_checksubparams_negative_subpopulation(): X, y, w, c = gen_toy_problem_1d() theil_sen = TheilSenRegressor(max_subpopulation=-1, random_state=0) - assert_raises(ValueError, theil_sen.fit, X, y) + + with pytest.raises(ValueError): + theil_sen.fit(X, y) def test_checksubparams_too_few_subsamples(): X, y, w, c = gen_toy_problem_1d() theil_sen = TheilSenRegressor(n_subsamples=1, random_state=0) - assert_raises(ValueError, theil_sen.fit, X, y) + with pytest.raises(ValueError): + theil_sen.fit(X, y) def test_checksubparams_too_many_subsamples(): X, y, w, c = gen_toy_problem_1d() theil_sen = TheilSenRegressor(n_subsamples=101, random_state=0) - assert_raises(ValueError, theil_sen.fit, X, y) + with pytest.raises(ValueError): + theil_sen.fit(X, y) def test_checksubparams_n_subsamples_if_less_samples_than_features(): @@ -230,7 +234,8 @@ def test_checksubparams_n_subsamples_if_less_samples_than_features(): X = random_state.normal(size=(n_samples, n_features)) y = random_state.normal(size=n_samples) theil_sen = TheilSenRegressor(n_subsamples=9, random_state=0) - assert_raises(ValueError, theil_sen.fit, X, y) + with pytest.raises(ValueError): + theil_sen.fit(X, y) def test_subpopulation():