From ffecfe9791fca1d5e0b6fdc30f7fc3eff32faf01 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Juan=20Mart=C3=ADn=20Loyola?= Date: Thu, 3 Feb 2022 15:42:52 -0300 Subject: [PATCH 1/3] Remove make_scorer from FUNCTION_DOCSTRING_IGNORE_LIST --- sklearn/tests/test_docstrings.py | 1 - 1 file changed, 1 deletion(-) diff --git a/sklearn/tests/test_docstrings.py b/sklearn/tests/test_docstrings.py index b2b0f18eb1ab3..2a17f6d579cd3 100644 --- a/sklearn/tests/test_docstrings.py +++ b/sklearn/tests/test_docstrings.py @@ -86,7 +86,6 @@ "sklearn.metrics._ranking.roc_curve", "sklearn.metrics._ranking.top_k_accuracy_score", "sklearn.metrics._regression.mean_pinball_loss", - "sklearn.metrics._scorer.make_scorer", "sklearn.metrics.cluster._bicluster.consensus_score", "sklearn.metrics.cluster._supervised.adjusted_mutual_info_score", "sklearn.metrics.cluster._supervised.adjusted_rand_score", From 6bb67ef394a4c503d3b57cdadb9e314be36f52c3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Juan=20Mart=C3=ADn=20Loyola?= Date: Thu, 3 Feb 2022 15:44:04 -0300 Subject: [PATCH 2/3] Fix numpydocs from make_scorer --- sklearn/metrics/_scorer.py | 42 +++++++++++++++++++------------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/sklearn/metrics/_scorer.py b/sklearn/metrics/_scorer.py index 7a20ffd32c954..a63e9c8331bfd 100644 --- a/sklearn/metrics/_scorer.py +++ b/sklearn/metrics/_scorer.py @@ -604,52 +604,41 @@ def make_scorer( ---------- score_func : callable Score function (or loss function) with signature - ``score_func(y, y_pred, **kwargs)``. + `score_func(y, y_pred, **kwargs)`. greater_is_better : bool, default=True - Whether score_func is a score function (default), meaning high is good, - or a loss function, meaning low is good. In the latter case, the - scorer object will sign-flip the outcome of the score_func. + Whether `score_func` is a score function (default), meaning high is + good, or a loss function, meaning low is good. In the latter case, the + scorer object will sign-flip the outcome of the `score_func`. needs_proba : bool, default=False - Whether score_func requires predict_proba to get probability estimates - out of a classifier. + Whether `score_func` requires `predict_proba` to get probability + estimates out of a classifier. If True, for binary `y_true`, the score function is supposed to accept a 1D `y_pred` (i.e., probability of the positive class, shape `(n_samples,)`). needs_threshold : bool, default=False - Whether score_func takes a continuous decision certainty. + Whether `score_func` takes a continuous decision certainty. This only works for binary classification using estimators that - have either a decision_function or predict_proba method. + have either a `decision_function` or `predict_proba` method. If True, for binary `y_true`, the score function is supposed to accept a 1D `y_pred` (i.e., probability of the positive class or the decision function, shape `(n_samples,)`). - For example ``average_precision`` or the area under the roc curve + For example `average_precision` or the area under the roc curve can not be computed using discrete predictions alone. **kwargs : additional arguments - Additional parameters to be passed to score_func. + Additional parameters to be passed to `score_func`. Returns ------- scorer : callable Callable object that returns a scalar score; greater is better. - Examples - -------- - >>> from sklearn.metrics import fbeta_score, make_scorer - >>> ftwo_scorer = make_scorer(fbeta_score, beta=2) - >>> ftwo_scorer - make_scorer(fbeta_score, beta=2) - >>> from sklearn.model_selection import GridSearchCV - >>> from sklearn.svm import LinearSVC - >>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]}, - ... scoring=ftwo_scorer) - Notes ----- If `needs_proba=False` and `needs_threshold=False`, the score @@ -660,6 +649,17 @@ def make_scorer( `needs_threshold=True`, the score function is supposed to accept the output of :term:`decision_function` or :term:`predict_proba` when :term:`decision_function` is not present. + + Examples + -------- + >>> from sklearn.metrics import fbeta_score, make_scorer + >>> ftwo_scorer = make_scorer(fbeta_score, beta=2) + >>> ftwo_scorer + make_scorer(fbeta_score, beta=2) + >>> from sklearn.model_selection import GridSearchCV + >>> from sklearn.svm import LinearSVC + >>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]}, + ... scoring=ftwo_scorer) """ sign = 1 if greater_is_better else -1 if needs_proba and needs_threshold: From d80ec2fee060e93920f7f63932d2101539dcecf0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Juan=20Mart=C3=ADn=20Loyola?= Date: Thu, 3 Feb 2022 15:45:12 -0300 Subject: [PATCH 3/3] Fix broken links --- sklearn/metrics/_scorer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sklearn/metrics/_scorer.py b/sklearn/metrics/_scorer.py index a63e9c8331bfd..1e8e330d1af81 100644 --- a/sklearn/metrics/_scorer.py +++ b/sklearn/metrics/_scorer.py @@ -591,8 +591,8 @@ def make_scorer( :func:`~sklearn.model_selection.cross_val_score`. It takes a score function, such as :func:`~sklearn.metrics.accuracy_score`, :func:`~sklearn.metrics.mean_squared_error`, - :func:`~sklearn.metrics.adjusted_rand_index` or - :func:`~sklearn.metrics.average_precision` + :func:`~sklearn.metrics.adjusted_rand_score` or + :func:`~sklearn.metrics.average_precision_score` and returns a callable that scores an estimator's output. The signature of the call is `(estimator, X, y)` where `estimator` is the model to be evaluated, `X` is the data and `y` is the