@@ -227,18 +227,13 @@ def roc_auc_score(y_true, y_score, multiclass="ovr", average="macro",
227
227
Parameters
228
228
----------
229
229
y_true : array, shape = [n_samples] or [n_samples, n_classes]
230
- <<<<<<< 68c38761be8d86c944012b67d8d84feb3606ce6f
231
230
True binary labels in binary label indicators.
232
231
The multiclass case expects shape = [n_samples] and labels
233
232
with values from 0 to (n_classes-1), inclusive.
234
- =======
235
- True binary labels or binary label indicators.
236
- >>>>>>> [MRG+1] Completely support binary y_true in roc_auc_score (#9828)
237
233
238
234
y_score : array, shape = [n_samples] or [n_samples, n_classes]
239
235
Target scores, can either be probability estimates of the positive
240
236
class, confidence values, or non-thresholded measure of decisions
241
- <<<<<<< 68c38761be8d86c944012b67d8d84feb3606ce6f
242
237
(as returned by "decision_function" on some classifiers).
243
238
The multiclass case expects shape = [n_samples, n_classes]
244
239
where the scores correspond to probability estimates.
@@ -253,11 +248,6 @@ def roc_auc_score(y_true, y_score, multiclass="ovr", average="macro",
253
248
``'ovo'``:
254
249
Calculate metrics for the multiclass case using the one-vs-one
255
250
approach.
256
- =======
257
- (as returned by "decision_function" on some classifiers). For binary
258
- y_true, y_score is supposed to be the score of the class with greater
259
- label.
260
- >>>>>>> [MRG+1] Completely support binary y_true in roc_auc_score (#9828)
261
251
262
252
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
263
253
If ``None``, the scores for each class are returned. Otherwise,
@@ -287,6 +277,9 @@ def roc_auc_score(y_true, y_score, multiclass="ovr", average="macro",
287
277
.. [1] `Wikipedia entry for the Receiver operating characteristic
288
278
<https://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
289
279
280
+ .. [2] Fawcett T. An introduction to ROC analysis[J]. Pattern Recognition
281
+ Letters, 2006, 27(8):861-874.
282
+
290
283
See also
291
284
--------
292
285
average_precision_score : Area under the precision-recall curve
@@ -589,6 +582,8 @@ def roc_curve(y_true, y_score, pos_label=None, sample_weight=None,
589
582
.. [1] `Wikipedia entry for the Receiver operating characteristic
590
583
<https://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
591
584
585
+ .. [2] Fawcett T. An introduction to ROC analysis[J]. Pattern Recognition
586
+ Letters, 2006, 27(8):861-874.
592
587
593
588
Examples
594
589
--------
@@ -598,11 +593,11 @@ def roc_curve(y_true, y_score, pos_label=None, sample_weight=None,
598
593
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
599
594
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)
600
595
>>> fpr
601
- array([ 0. , 0.5, 0.5, 1. ])
596
+ array([ 0. , 0. , 0. 5, 0.5, 1. ])
602
597
>>> tpr
603
- array([ 0.5, 0.5, 1. , 1. ])
598
+ array([ 0. , 0. 5, 0.5, 1. , 1. ])
604
599
>>> thresholds
605
- array([ 0.8 , 0.4 , 0.35, 0.1 ])
600
+ array([ 1.8 , 0.8 , 0.4 , 0.35, 0.1 ])
606
601
607
602
"""
608
603
fps , tps , thresholds = _binary_clf_curve (
@@ -626,8 +621,9 @@ def roc_curve(y_true, y_score, pos_label=None, sample_weight=None,
626
621
tps = tps [optimal_idxs ]
627
622
thresholds = thresholds [optimal_idxs ]
628
623
629
- if tps .size == 0 or fps [0 ] != 0 :
624
+ if tps .size == 0 or fps [0 ] != 0 or tps [ 0 ] != 0 :
630
625
# Add an extra threshold position if necessary
626
+ # to make sure that the curve starts at (0, 0)
631
627
tps = np .r_ [0 , tps ]
632
628
fps = np .r_ [0 , fps ]
633
629
thresholds = np .r_ [thresholds [0 ] + 1 , thresholds ]
0 commit comments