diff --git a/sklearn/metrics/_regression.py b/sklearn/metrics/_regression.py index 197db45e5ac59..fc8ccaac82c62 100644 --- a/sklearn/metrics/_regression.py +++ b/sklearn/metrics/_regression.py @@ -1304,6 +1304,18 @@ def d2_tweedie_score(y_true, y_pred, *, sample_weight=None, power=0): return 1 - numerator / denominator +@validate_params( + { + "y_true": ["array-like"], + "y_pred": ["array-like"], + "sample_weight": ["array-like", None], + "alpha": [Interval(Real, 0, 1, closed="both")], + "multioutput": [ + StrOptions({"raw_values", "uniform_average"}), + "array-like", + ], + } +) def d2_pinball_score( y_true, y_pred, *, sample_weight=None, alpha=0.5, multioutput="uniform_average" ): @@ -1327,7 +1339,7 @@ def d2_pinball_score( y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. - sample_weight : array-like of shape (n_samples,), optional + sample_weight : array-like of shape (n_samples,), default=None Sample weights. alpha : float, default=0.5 @@ -1434,15 +1446,9 @@ def d2_pinball_score( if multioutput == "raw_values": # return scores individually return output_scores - elif multioutput == "uniform_average": + else: # multioutput == "uniform_average" # passing None as weights to np.average results in uniform mean avg_weights = None - else: - raise ValueError( - "multioutput is expected to be 'raw_values' " - "or 'uniform_average' but we got %r" - " instead." % multioutput - ) else: avg_weights = multioutput diff --git a/sklearn/metrics/tests/test_regression.py b/sklearn/metrics/tests/test_regression.py index 9da59e0764394..241a9ba4f2855 100644 --- a/sklearn/metrics/tests/test_regression.py +++ b/sklearn/metrics/tests/test_regression.py @@ -351,9 +351,6 @@ def test_regression_multioutput_array(): with pytest.raises(ValueError, match=err_msg): mean_pinball_loss(y_true, y_pred, multioutput="variance_weighted") - with pytest.raises(ValueError, match=err_msg): - d2_pinball_score(y_true, y_pred, multioutput="variance_weighted") - pbl = mean_pinball_loss(y_true, y_pred, multioutput="raw_values") mape = mean_absolute_percentage_error(y_true, y_pred, multioutput="raw_values") r = r2_score(y_true, y_pred, multioutput="raw_values") diff --git a/sklearn/tests/test_public_functions.py b/sklearn/tests/test_public_functions.py index 4014b03607ee3..b51334e8b35f5 100644 --- a/sklearn/tests/test_public_functions.py +++ b/sklearn/tests/test_public_functions.py @@ -117,6 +117,7 @@ def _check_function_param_validation( "sklearn.metrics.cluster.contingency_matrix", "sklearn.metrics.cohen_kappa_score", "sklearn.metrics.confusion_matrix", + "sklearn.metrics.d2_pinball_score", "sklearn.metrics.det_curve", "sklearn.metrics.hamming_loss", "sklearn.metrics.mean_absolute_error",