@@ -918,8 +918,8 @@ def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
918
918
Examples
919
919
--------
920
920
>>> from sklearn.metrics import precision_recall_fscore_support
921
- >>> y_true = np.array([0, 1, 2, 0, 1, 2 ])
922
- >>> y_pred = np.array([0, 2, 1, 0, 0, 1 ])
921
+ >>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig' ])
922
+ >>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog' ])
923
923
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
924
924
... # doctest: +ELLIPSIS
925
925
(0.22..., 0.33..., 0.26..., None)
@@ -930,6 +930,16 @@ def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
930
930
... # doctest: +ELLIPSIS
931
931
(0.22..., 0.33..., 0.26..., None)
932
932
933
+ It is possible to compute per-label precisions, recalls, F1-scores and
934
+ supports instead of averaging:
935
+ >>> precision_recall_fscore_support(y_true, y_pred, average=None,
936
+ ... labels=['pig', 'dog', 'cat'])
937
+ ... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE
938
+ (array([ 0. , 0. , 0.66...]),
939
+ array([ 0., 0., 1.]),
940
+ array([ 0. , 0. , 0.8]),
941
+ array([2, 2, 2]))
942
+
933
943
"""
934
944
average_options = (None , 'micro' , 'macro' , 'weighted' , 'samples' )
935
945
if average not in average_options and average != 'binary' :
0 commit comments