diff --git a/doc/whats_new/v1.1.rst b/doc/whats_new/v1.1.rst index 317d0b6cb5ed8..3ad182acabb36 100644 --- a/doc/whats_new/v1.1.rst +++ b/doc/whats_new/v1.1.rst @@ -520,24 +520,24 @@ Changelog `n_iter_`, the number of iterations of the libsvm optimization routine. :pr:`21408` by :user:`Juan Martín Loyola `. +- |Enhancement| :func:`svm.SVR`, :func:`svm.SVC`, :func:`svm.NuSVR`, + :func:`svm.OneClassSVM`, :func:`svm.NuSVC` now raise an error + when the dual-gap estimation produce non-finite parameter weights. + :pr:`22149` by :user:`Christian Ritter ` and + :user:`Norbert Preining `. + - |Fix| :class:`smv.NuSVC`, :class:`svm.NuSVR`, :class:`svm.SVC`, :class:`svm.SVR`, :class:`svm.OneClassSVM` now validate input parameters in `fit` instead of `__init__`. :pr:`21436` by :user:`Haidar Almubarak `. :mod:`sklearn.tree` -....................... +................... - |Fix| Fix a bug in the Poisson splitting criterion for :class:`tree.DecisionTreeRegressor`. :pr:`22191` by :user:`Christian Lorentzen `. -- |Enhancement| :func:`svm.SVR`, :func:`svm.SVC`, :func:`svm.NuSVR`, - :func:`svm.OneClassSVM`, :func:`svm.NuSVC` now raise an error - when the dual-gap estimation produce non-finite parameter weights. - :pr:`22149` by :user:`Christian Ritter ` and - :user:`Norbert Preining `. - :mod:`sklearn.utils` .................... diff --git a/sklearn/ensemble/tests/test_forest.py b/sklearn/ensemble/tests/test_forest.py index 8408c870aa2d0..24c16f0558a2c 100644 --- a/sklearn/ensemble/tests/test_forest.py +++ b/sklearn/ensemble/tests/test_forest.py @@ -229,7 +229,7 @@ def test_poisson_vs_mse(): forest_mse.fit(X_train, y_train) dummy = DummyRegressor(strategy="mean").fit(X_train, y_train) - for X, y, val in [(X_train, y_train, "train"), (X_test, y_test, "test")]: + for X, y, data_name in [(X_train, y_train, "train"), (X_test, y_test, "test")]: metric_poi = mean_poisson_deviance(y, forest_poi.predict(X)) # squared_error forest might produce non-positive predictions => clip # If y = 0 for those, the poisson deviance gets too good. @@ -244,9 +244,9 @@ def test_poisson_vs_mse(): # As squared_error might correctly predict 0 in train set, its train # score can be better than Poisson. This is no longer the case for the # test set. But keep the above comment for clipping in mind. - if val == "test": + if data_name == "test": assert metric_poi < metric_mse - assert metric_poi < 0.5 * metric_dummy + assert metric_poi < 0.8 * metric_dummy @pytest.mark.parametrize("criterion", ("poisson", "squared_error"))