@@ -174,6 +174,7 @@ def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
174
174
175
175
# Compute accuracy for each possible representation
176
176
y_type , y_true , y_pred = _check_targets (y_true , y_pred )
177
+ check_consistent_length (y_true , y_pred , sample_weight )
177
178
if y_type .startswith ('multilabel' ):
178
179
differing_labels = count_nonzero (y_true - y_pred , axis = 1 )
179
180
score = differing_labels == 0
@@ -337,7 +338,7 @@ def confusion_matrix(y_true, y_pred, labels=None, sample_weight=None):
337
338
else :
338
339
sample_weight = np .asarray (sample_weight )
339
340
340
- check_consistent_length (sample_weight , y_true , y_pred )
341
+ check_consistent_length (y_true , y_pred , sample_weight )
341
342
342
343
n_labels = labels .size
343
344
label_to_ind = dict ((y , x ) for x , y in enumerate (labels ))
@@ -518,6 +519,7 @@ def jaccard_similarity_score(y_true, y_pred, normalize=True,
518
519
519
520
# Compute accuracy for each possible representation
520
521
y_type , y_true , y_pred = _check_targets (y_true , y_pred )
522
+ check_consistent_length (y_true , y_pred , sample_weight )
521
523
if y_type .startswith ('multilabel' ):
522
524
with np .errstate (divide = 'ignore' , invalid = 'ignore' ):
523
525
# oddly, we may get an "invalid" rather than a "divide" error here
@@ -593,6 +595,7 @@ def matthews_corrcoef(y_true, y_pred, sample_weight=None):
593
595
-0.33...
594
596
"""
595
597
y_type , y_true , y_pred = _check_targets (y_true , y_pred )
598
+ check_consistent_length (y_true , y_pred , sample_weight )
596
599
if y_type not in {"binary" , "multiclass" }:
597
600
raise ValueError ("%s is not supported" % y_type )
598
601
@@ -1097,6 +1100,7 @@ def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
1097
1100
raise ValueError ("beta should be >0 in the F-beta score" )
1098
1101
1099
1102
y_type , y_true , y_pred = _check_targets (y_true , y_pred )
1103
+ check_consistent_length (y_true , y_pred , sample_weight )
1100
1104
present_labels = unique_labels (y_true , y_pred )
1101
1105
1102
1106
if average == 'binary' :
@@ -1624,6 +1628,7 @@ def hamming_loss(y_true, y_pred, labels=None, sample_weight=None,
1624
1628
labels = classes
1625
1629
1626
1630
y_type , y_true , y_pred = _check_targets (y_true , y_pred )
1631
+ check_consistent_length (y_true , y_pred , sample_weight )
1627
1632
1628
1633
if labels is None :
1629
1634
labels = unique_labels (y_true , y_pred )
@@ -1712,7 +1717,7 @@ def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None,
1712
1717
The logarithm used is the natural logarithm (base-e).
1713
1718
"""
1714
1719
y_pred = check_array (y_pred , ensure_2d = False )
1715
- check_consistent_length (y_pred , y_true )
1720
+ check_consistent_length (y_pred , y_true , sample_weight )
1716
1721
1717
1722
lb = LabelBinarizer ()
1718
1723
@@ -1985,6 +1990,7 @@ def brier_score_loss(y_true, y_prob, sample_weight=None, pos_label=None):
1985
1990
y_prob = column_or_1d (y_prob )
1986
1991
assert_all_finite (y_true )
1987
1992
assert_all_finite (y_prob )
1993
+ check_consistent_length (y_true , y_prob , sample_weight )
1988
1994
1989
1995
if pos_label is None :
1990
1996
pos_label = y_true .max ()
0 commit comments