diff --git a/sklearn/linear_model/_ridge.py b/sklearn/linear_model/_ridge.py index 827366fab2a25..fda2e778b17c4 100644 --- a/sklearn/linear_model/_ridge.py +++ b/sklearn/linear_model/_ridge.py @@ -2248,12 +2248,7 @@ def _score(self, *, predictions, y, n_y, scorer, score_params): ] ) else: - _score = scorer( - identity_estimator, - predictions.ravel(), - y.ravel(), - **score_params, - ) + _score = scorer(identity_estimator, predictions, y, **score_params) return _score diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index 008ccf11d6ac3..cb42e1a473647 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -2252,6 +2252,32 @@ def test_ridge_cv_values_deprecated(): ridge.cv_values_ +def test_ridge_cv_multioutput_sample_weight(global_random_seed): + """Check that `RidgeCV` works properly with multioutput and sample_weight + when `scoring != None`. + + We check the error reported by the RidgeCV is close to a naive LOO-CV using a + Ridge estimator. + """ + X, y = make_regression(n_targets=2, random_state=global_random_seed) + sample_weight = np.ones(shape=(X.shape[0],)) + + ridge_cv = RidgeCV(scoring="neg_mean_squared_error", store_cv_results=True) + ridge_cv.fit(X, y, sample_weight=sample_weight) + + cv = LeaveOneOut() + ridge = Ridge(alpha=ridge_cv.alpha_) + y_pred_loo = np.squeeze( + [ + ridge.fit(X[train], y[train], sample_weight=sample_weight[train]).predict( + X[test] + ) + for train, test in cv.split(X) + ] + ) + assert_allclose(ridge_cv.best_score_, -mean_squared_error(y, y_pred_loo)) + + # Metadata Routing Tests # ======================