@@ -314,7 +314,7 @@ def average_precision_score(y_true, y_score, average="macro",
314
314
315
315
----------
316
316
y_true : array, shape = [n_samples] or [n_samples, n_classes]
317
- True binary labels in binary indicator format .
317
+ True binary labels in binary label indicators .
318
318
319
319
y_score : array, shape = [n_samples] or [n_samples, n_classes]
320
320
Target scores, can either be probability estimates of the positive
@@ -426,7 +426,7 @@ def _average_binary_score(binary_metric, y_true, y_score, average,
426
426
Parameters
427
427
----------
428
428
y_true : array, shape = [n_samples] or [n_samples, n_classes]
429
- True binary labels in binary indicator format .
429
+ True binary labels in binary label indicators .
430
430
431
431
y_score : array, shape = [n_samples] or [n_samples, n_classes]
432
432
Target scores, can either be probability estimates of the positive
@@ -527,7 +527,7 @@ def roc_auc_score(y_true, y_score, average="macro", sample_weight=None):
527
527
Parameters
528
528
----------
529
529
y_true : array, shape = [n_samples] or [n_samples, n_classes]
530
- True binary labels in binary indicator format .
530
+ True binary labels in binary label indicators .
531
531
532
532
y_score : array, shape = [n_samples] or [n_samples, n_classes]
533
533
Target scores, can either be probability estimates of the positive
@@ -986,10 +986,10 @@ def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None):
986
986
987
987
Parameters
988
988
----------
989
- y_true : array-like or list of labels or label indicator matrix
989
+ y_true : array-like or label indicator matrix
990
990
Ground truth (correct) labels.
991
991
992
- y_pred : array-like or list of labels or label indicator matrix
992
+ y_pred : array-like or label indicator matrix
993
993
Predicted labels, as returned by a classifier.
994
994
995
995
normalize : bool, optional (default=True)
@@ -1025,17 +1025,10 @@ def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None):
1025
1025
>>> zero_one_loss(y_true, y_pred, normalize=False)
1026
1026
1
1027
1027
1028
- In the multilabel case with binary indicator format :
1028
+ In the multilabel case with binary label indicators :
1029
1029
1030
- >>> zero_one_loss(np.array([[0.0 , 1.0 ], [1.0 , 1.0 ]]), np.ones((2, 2)))
1030
+ >>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
1031
1031
0.5
1032
-
1033
- and with a list of labels format:
1034
-
1035
- >>> zero_one_loss([(1, ), (3, )], [(1, 2), tuple()])
1036
- 1.0
1037
-
1038
-
1039
1032
"""
1040
1033
score = accuracy_score (y_true , y_pred ,
1041
1034
normalize = normalize ,
@@ -1064,7 +1057,7 @@ def log_loss(y_true, y_pred, eps=1e-15, normalize=True):
1064
1057
1065
1058
Parameters
1066
1059
----------
1067
- y_true : array-like or list of labels or label indicator matrix
1060
+ y_true : array-like or label indicator matrix
1068
1061
Ground truth (correct) labels for n_samples samples.
1069
1062
1070
1063
y_pred : array-like of float, shape = (n_samples, n_classes)
@@ -1139,10 +1132,10 @@ def jaccard_similarity_score(y_true, y_pred, normalize=True):
1139
1132
1140
1133
Parameters
1141
1134
----------
1142
- y_true : array-like or list of labels or label indicator matrix
1135
+ y_true : array-like or label indicator matrix
1143
1136
Ground truth (correct) labels.
1144
1137
1145
- y_pred : array-like or list of labels or label indicator matrix
1138
+ y_pred : array-like or label indicator matrix
1146
1139
Predicted labels, as returned by a classifier.
1147
1140
1148
1141
normalize : bool, optional (default=True)
@@ -1187,17 +1180,11 @@ def jaccard_similarity_score(y_true, y_pred, normalize=True):
1187
1180
>>> jaccard_similarity_score(y_true, y_pred, normalize=False)
1188
1181
2
1189
1182
1190
- In the multilabel case with binary indicator format :
1183
+ In the multilabel case with binary label indicators :
1191
1184
1192
- >>> jaccard_similarity_score(np.array([[0.0 , 1.0 ], [1.0 , 1.0 ]]),\
1185
+ >>> jaccard_similarity_score(np.array([[0, 1], [1, 1]]),\
1193
1186
np.ones((2, 2)))
1194
1187
0.75
1195
-
1196
- and with a list of labels format:
1197
-
1198
- >>> jaccard_similarity_score([(1, ), (3, )], [(1, 2), tuple()])
1199
- 0.25
1200
-
1201
1188
"""
1202
1189
1203
1190
# Compute accuracy for each possible representation
@@ -1252,10 +1239,10 @@ def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
1252
1239
1253
1240
Parameters
1254
1241
----------
1255
- y_true : array-like or list of labels or label indicator matrix
1242
+ y_true : array-like or label indicator matrix
1256
1243
Ground truth (correct) labels.
1257
1244
1258
- y_pred : array-like or list of labels or label indicator matrix
1245
+ y_pred : array-like or label indicator matrix
1259
1246
Predicted labels, as returned by a classifier.
1260
1247
1261
1248
normalize : bool, optional (default=True)
@@ -1295,16 +1282,10 @@ def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
1295
1282
>>> accuracy_score(y_true, y_pred, normalize=False)
1296
1283
2
1297
1284
1298
- In the multilabel case with binary indicator format :
1285
+ In the multilabel case with binary label indicators :
1299
1286
1300
- >>> accuracy_score(np.array([[0.0 , 1.0 ], [1.0 , 1.0 ]]), np.ones((2, 2)))
1287
+ >>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
1301
1288
0.5
1302
-
1303
- and with a list of labels format:
1304
-
1305
- >>> accuracy_score([(1, ), (3, )], [(1, 2), tuple()])
1306
- 0.0
1307
-
1308
1289
"""
1309
1290
1310
1291
# Compute accuracy for each possible representation
@@ -1343,10 +1324,10 @@ def f1_score(y_true, y_pred, labels=None, pos_label=1, average='weighted',
1343
1324
1344
1325
Parameters
1345
1326
----------
1346
- y_true : array-like or list of labels or label indicator matrix
1327
+ y_true : array-like or label indicator matrix
1347
1328
Ground truth (correct) target values.
1348
1329
1349
- y_pred : array-like or list of labels or label indicator matrix
1330
+ y_pred : array-like or label indicator matrix
1350
1331
Estimated targets as returned by a classifier.
1351
1332
1352
1333
labels : array
@@ -1426,10 +1407,10 @@ def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1,
1426
1407
1427
1408
Parameters
1428
1409
----------
1429
- y_true : array-like or list of labels or label indicator matrix
1410
+ y_true : array-like or label indicator matrix
1430
1411
Ground truth (correct) target values.
1431
1412
1432
- y_pred : array-like or list of labels or label indicator matrix
1413
+ y_pred : array-like or label indicator matrix
1433
1414
Estimated targets as returned by a classifier.
1434
1415
1435
1416
beta: float
@@ -1585,10 +1566,10 @@ def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
1585
1566
1586
1567
Parameters
1587
1568
----------
1588
- y_true : array-like or list of labels or label indicator matrix
1569
+ y_true : array-like or label indicator matrix
1589
1570
Ground truth (correct) target values.
1590
1571
1591
- y_pred : array-like or list of labels or label indicator matrix
1572
+ y_pred : array-like or label indicator matrix
1592
1573
Estimated targets as returned by a classifier.
1593
1574
1594
1575
beta : float, 1.0 by default
@@ -1830,10 +1811,10 @@ def precision_score(y_true, y_pred, labels=None, pos_label=1,
1830
1811
1831
1812
Parameters
1832
1813
----------
1833
- y_true : array-like or list of labels or label indicator matrix
1814
+ y_true : array-like or label indicator matrix
1834
1815
Ground truth (correct) target values.
1835
1816
1836
- y_pred : array-like or list of labels or label indicator matrix
1817
+ y_pred : array-like or label indicator matrix
1837
1818
Estimated targets as returned by a classifier.
1838
1819
1839
1820
labels : array
@@ -1912,10 +1893,10 @@ def recall_score(y_true, y_pred, labels=None, pos_label=1, average='weighted',
1912
1893
1913
1894
Parameters
1914
1895
----------
1915
- y_true : array-like or list of labels or label indicator matrix
1896
+ y_true : array-like or label indicator matrix
1916
1897
Ground truth (correct) target values.
1917
1898
1918
- y_pred : array-like or list of labels or label indicator matrix
1899
+ y_pred : array-like or label indicator matrix
1919
1900
Estimated targets as returned by a classifier.
1920
1901
1921
1902
labels : array
@@ -1987,10 +1968,10 @@ def classification_report(y_true, y_pred, labels=None, target_names=None,
1987
1968
1988
1969
Parameters
1989
1970
----------
1990
- y_true : array-like or list of labels or label indicator matrix
1971
+ y_true : array-like or label indicator matrix
1991
1972
Ground truth (correct) target values.
1992
1973
1993
- y_pred : array-like or list of labels or label indicator matrix
1974
+ y_pred : array-like or label indicator matrix
1994
1975
Estimated targets as returned by a classifier.
1995
1976
1996
1977
labels : array, shape = [n_labels]
@@ -2081,10 +2062,10 @@ def hamming_loss(y_true, y_pred, classes=None):
2081
2062
2082
2063
Parameters
2083
2064
----------
2084
- y_true : array-like or list of labels or label indicator matrix
2065
+ y_true : array-like or label indicator matrix
2085
2066
Ground truth (correct) labels.
2086
2067
2087
- y_pred : array-like or list of labels or label indicator matrix
2068
+ y_pred : array-like or label indicator matrix
2088
2069
Predicted labels, as returned by a classifier.
2089
2070
2090
2071
classes : array, shape = [n_labels], optional
@@ -2132,16 +2113,10 @@ def hamming_loss(y_true, y_pred, classes=None):
2132
2113
>>> hamming_loss(y_true, y_pred)
2133
2114
0.25
2134
2115
2135
- In the multilabel case with binary indicator format :
2116
+ In the multilabel case with binary label indicators :
2136
2117
2137
- >>> hamming_loss(np.array([[0.0 , 1.0 ], [1.0 , 1.0 ]]), np.zeros((2, 2)))
2118
+ >>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2)))
2138
2119
0.75
2139
-
2140
- and with a list of labels format:
2141
-
2142
- >>> hamming_loss([(1, 2), (3, )], [(1, 2), tuple()]) # doctest: +ELLIPSIS
2143
- 0.166...
2144
-
2145
2120
"""
2146
2121
y_type , y_true , y_pred = _check_clf_targets (y_true , y_pred )
2147
2122
0 commit comments