diff --git a/sklearn/linear_model/_glm/glm.py b/sklearn/linear_model/_glm/glm.py index 216026a66d797..dfbdc2bf590fb 100644 --- a/sklearn/linear_model/_glm/glm.py +++ b/sklearn/linear_model/_glm/glm.py @@ -13,6 +13,7 @@ from ...base import BaseEstimator, RegressorMixin from ...utils.optimize import _check_optimize_result +from ...utils import check_scalar from ...utils.validation import check_is_fitted, _check_sample_weight from ..._loss.glm_distribution import ( ExponentialDispersionModel, @@ -209,12 +210,13 @@ def fit(self, X, y, sample_weight=None): "got (link={0})".format(self.link) ) - if not isinstance(self.alpha, numbers.Number) or self.alpha < 0: - raise ValueError( - "Penalty term must be a non-negative number; got (alpha={0})".format( - self.alpha - ) - ) + check_scalar( + self.alpha, + name="alpha", + target_type=numbers.Real, + min_val=0.0, + include_boundaries="left", + ) if not isinstance(self.fit_intercept, bool): raise ValueError( "The argument fit_intercept must be bool; got {0}".format( @@ -227,17 +229,25 @@ def fit(self, X, y, sample_weight=None): "'lbfgs'; got {0}".format(self.solver) ) solver = self.solver - if not isinstance(self.max_iter, numbers.Integral) or self.max_iter <= 0: - raise ValueError( - "Maximum number of iteration must be a positive " - "integer;" - " got (max_iter={0!r})".format(self.max_iter) - ) - if not isinstance(self.tol, numbers.Number) or self.tol <= 0: - raise ValueError( - "Tolerance for stopping criteria must be " - "positive; got (tol={0!r})".format(self.tol) - ) + check_scalar( + self.max_iter, + name="max_iter", + target_type=numbers.Integral, + min_val=1, + ) + check_scalar( + self.tol, + name="tol", + target_type=numbers.Real, + min_val=0.0, + include_boundaries="neither", + ) + check_scalar( + self.verbose, + name="verbose", + target_type=numbers.Integral, + min_val=0, + ) if not isinstance(self.warm_start, bool): raise ValueError( "The argument warm_start must be bool; got {0}".format(self.warm_start) diff --git a/sklearn/linear_model/_glm/tests/test_glm.py b/sklearn/linear_model/_glm/tests/test_glm.py index 2180f3c88f87b..b90e273cbd246 100644 --- a/sklearn/linear_model/_glm/tests/test_glm.py +++ b/sklearn/linear_model/_glm/tests/test_glm.py @@ -110,16 +110,6 @@ def test_glm_link_auto(family, expected_link_class): assert isinstance(glm._link_instance, expected_link_class) -@pytest.mark.parametrize("alpha", ["not a number", -4.2]) -def test_glm_alpha_argument(alpha): - """Test GLM for invalid alpha argument.""" - y = np.array([1, 2]) - X = np.array([[1], [2]]) - glm = GeneralizedLinearRegressor(family="normal", alpha=alpha) - with pytest.raises(ValueError, match="Penalty term must be a non-negative"): - glm.fit(X, y) - - @pytest.mark.parametrize("fit_intercept", ["not bool", 1, 0, [True]]) def test_glm_fit_intercept_argument(fit_intercept): """Test GLM for invalid fit_intercept argument.""" @@ -140,23 +130,73 @@ def test_glm_solver_argument(solver): glm.fit(X, y) -@pytest.mark.parametrize("max_iter", ["not a number", 0, -1, 5.5, [1]]) -def test_glm_max_iter_argument(max_iter): - """Test GLM for invalid max_iter argument.""" - y = np.array([1, 2]) - X = np.array([[1], [2]]) - glm = GeneralizedLinearRegressor(max_iter=max_iter) - with pytest.raises(ValueError, match="must be a positive integer"): - glm.fit(X, y) - - -@pytest.mark.parametrize("tol", ["not a number", 0, -1.0, [1e-3]]) -def test_glm_tol_argument(tol): - """Test GLM for invalid tol argument.""" +@pytest.mark.parametrize( + "Estimator", + [GeneralizedLinearRegressor, PoissonRegressor, GammaRegressor, TweedieRegressor], +) +@pytest.mark.parametrize( + "params, err_type, err_msg", + [ + ({"max_iter": 0}, ValueError, "max_iter == 0, must be >= 1"), + ({"max_iter": -1}, ValueError, "max_iter == -1, must be >= 1"), + ( + {"max_iter": "not a number"}, + TypeError, + "max_iter must be an instance of , not ", + ), + ( + {"max_iter": [1]}, + TypeError, + "max_iter must be an instance of ," + " not ", + ), + ( + {"max_iter": 5.5}, + TypeError, + "max_iter must be an instance of ," + " not ", + ), + ({"alpha": -1}, ValueError, "alpha == -1, must be >= 0.0"), + ( + {"alpha": "1"}, + TypeError, + "alpha must be an instance of , not ", + ), + ({"tol": -1.0}, ValueError, "tol == -1.0, must be > 0."), + ({"tol": 0.0}, ValueError, "tol == 0.0, must be > 0.0"), + ({"tol": 0}, ValueError, "tol == 0, must be > 0.0"), + ( + {"tol": "1"}, + TypeError, + "tol must be an instance of , not ", + ), + ( + {"tol": [1e-3]}, + TypeError, + "tol must be an instance of , not ", + ), + ({"verbose": -1}, ValueError, "verbose == -1, must be >= 0."), + ( + {"verbose": "1"}, + TypeError, + "verbose must be an instance of , not ", + ), + ( + {"verbose": 1.0}, + TypeError, + "verbose must be an instance of , not ", + ), + ], +) +def test_glm_scalar_argument(Estimator, params, err_type, err_msg): + """Test GLM for invalid parameter arguments.""" y = np.array([1, 2]) X = np.array([[1], [2]]) - glm = GeneralizedLinearRegressor(tol=tol) - with pytest.raises(ValueError, match="stopping criteria must be positive"): + glm = Estimator(**params) + with pytest.raises(err_type, match=err_msg): glm.fit(X, y)