@@ -918,8 +918,8 @@ def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
918918 Examples
919919 --------
920920 >>> from sklearn.metrics import precision_recall_fscore_support
921- >>> y_true = np.array([0, 1, 2, 0, 1, 2 ])
922- >>> y_pred = np.array([0, 2, 1, 0, 0, 1 ])
921+ >>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig' ])
922+ >>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog' ])
923923 >>> precision_recall_fscore_support(y_true, y_pred, average='macro')
924924 ... # doctest: +ELLIPSIS
925925 (0.22..., 0.33..., 0.26..., None)
@@ -930,6 +930,16 @@ def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
930930 ... # doctest: +ELLIPSIS
931931 (0.22..., 0.33..., 0.26..., None)
932932
933+ It is possible to compute per-label precisions, recalls, F1-scores and
934+ supports instead of averaging:
935+ >>> precision_recall_fscore_support(y_true, y_pred, average=None,
936+ ... labels=['pig', 'dog', 'cat'])
937+ ... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE
938+ (array([ 0. , 0. , 0.66...]),
939+ array([ 0., 0., 1.]),
940+ array([ 0. , 0. , 0.8]),
941+ array([2, 2, 2]))
942+
933943 """
934944 average_options = (None , 'micro' , 'macro' , 'weighted' , 'samples' )
935945 if average not in average_options and average != 'binary' :
0 commit comments