@@ -174,6 +174,7 @@ def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
174
174
175
175
# Compute accuracy for each possible representation
176
176
y_type , y_true , y_pred = _check_targets (y_true , y_pred )
177
+ check_consistent_length (y_true , y_pred , sample_weight )
177
178
if y_type .startswith ('multilabel' ):
178
179
differing_labels = count_nonzero (y_true - y_pred , axis = 1 )
179
180
score = differing_labels == 0
@@ -263,7 +264,7 @@ def confusion_matrix(y_true, y_pred, labels=None, sample_weight=None):
263
264
else :
264
265
sample_weight = np .asarray (sample_weight )
265
266
266
- check_consistent_length (sample_weight , y_true , y_pred )
267
+ check_consistent_length (y_true , y_pred , sample_weight )
267
268
268
269
n_labels = labels .size
269
270
label_to_ind = dict ((y , x ) for x , y in enumerate (labels ))
@@ -444,6 +445,7 @@ def jaccard_similarity_score(y_true, y_pred, normalize=True,
444
445
445
446
# Compute accuracy for each possible representation
446
447
y_type , y_true , y_pred = _check_targets (y_true , y_pred )
448
+ check_consistent_length (y_true , y_pred , sample_weight )
447
449
if y_type .startswith ('multilabel' ):
448
450
with np .errstate (divide = 'ignore' , invalid = 'ignore' ):
449
451
# oddly, we may get an "invalid" rather than a "divide" error here
@@ -519,6 +521,7 @@ def matthews_corrcoef(y_true, y_pred, sample_weight=None):
519
521
-0.33...
520
522
"""
521
523
y_type , y_true , y_pred = _check_targets (y_true , y_pred )
524
+ check_consistent_length (y_true , y_pred , sample_weight )
522
525
if y_type not in {"binary" , "multiclass" }:
523
526
raise ValueError ("%s is not supported" % y_type )
524
527
@@ -1023,6 +1026,7 @@ def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
1023
1026
raise ValueError ("beta should be >0 in the F-beta score" )
1024
1027
1025
1028
y_type , y_true , y_pred = _check_targets (y_true , y_pred )
1029
+ check_consistent_length (y_true , y_pred , sample_weight )
1026
1030
present_labels = unique_labels (y_true , y_pred )
1027
1031
1028
1032
if average == 'binary' :
@@ -1550,6 +1554,7 @@ def hamming_loss(y_true, y_pred, labels=None, sample_weight=None,
1550
1554
labels = classes
1551
1555
1552
1556
y_type , y_true , y_pred = _check_targets (y_true , y_pred )
1557
+ check_consistent_length (y_true , y_pred , sample_weight )
1553
1558
1554
1559
if labels is None :
1555
1560
labels = unique_labels (y_true , y_pred )
@@ -1638,7 +1643,7 @@ def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None,
1638
1643
The logarithm used is the natural logarithm (base-e).
1639
1644
"""
1640
1645
y_pred = check_array (y_pred , ensure_2d = False )
1641
- check_consistent_length (y_pred , y_true )
1646
+ check_consistent_length (y_pred , y_true , sample_weight )
1642
1647
1643
1648
lb = LabelBinarizer ()
1644
1649
@@ -1911,6 +1916,7 @@ def brier_score_loss(y_true, y_prob, sample_weight=None, pos_label=None):
1911
1916
y_prob = column_or_1d (y_prob )
1912
1917
assert_all_finite (y_true )
1913
1918
assert_all_finite (y_prob )
1919
+ check_consistent_length (y_true , y_prob , sample_weight )
1914
1920
1915
1921
if pos_label is None :
1916
1922
pos_label = y_true .max ()
0 commit comments