From 1c530b9d7801c6ac1e666e8d45f1d52277391a25 Mon Sep 17 00:00:00 2001 From: Matt Hall Date: Tue, 3 Dec 2019 20:49:25 +0000 Subject: [PATCH 1/2] Long sentence was hard to parse and ambiguous --- sklearn/metrics/_classification.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/sklearn/metrics/_classification.py b/sklearn/metrics/_classification.py index 8a975a6f59802..42f58a6129756 100644 --- a/sklearn/metrics/_classification.py +++ b/sklearn/metrics/_classification.py @@ -1916,10 +1916,10 @@ def classification_report(y_true, y_pred, labels=None, target_names=None, The reported averages include macro average (averaging the unweighted mean per label), weighted average (averaging the support-weighted mean - per label), sample average (only for multilabel classification) and - micro average (averaging the total true positives, false negatives and - false positives) it is only shown for multi-label or multi-class - with a subset of classes because it is accuracy otherwise. + per label), and sample average (only for multilabel classification). + Micro average (averaging the total true positives, false negatives and + false positives) is only shown for multi-label or multi-class + with a subset of classes, because it is accuracy otherwise. See also :func:`precision_recall_fscore_support` for more details on averages. From f59560f518cbf47654bd1003368986feeaaefcc4 Mon Sep 17 00:00:00 2001 From: Hanmin Qin Date: Fri, 6 Dec 2019 16:14:24 +0800 Subject: [PATCH 2/2] Update _classification.py --- sklearn/metrics/_classification.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sklearn/metrics/_classification.py b/sklearn/metrics/_classification.py index 42f58a6129756..10f91934f79da 100644 --- a/sklearn/metrics/_classification.py +++ b/sklearn/metrics/_classification.py @@ -1919,7 +1919,7 @@ def classification_report(y_true, y_pred, labels=None, target_names=None, per label), and sample average (only for multilabel classification). Micro average (averaging the total true positives, false negatives and false positives) is only shown for multi-label or multi-class - with a subset of classes, because it is accuracy otherwise. + with a subset of classes, because it corresponds to accuracy otherwise. See also :func:`precision_recall_fscore_support` for more details on averages.