|
7 | 7 | from sklearn.utils.testing import assert_raises_regexp
|
8 | 8 | from sklearn.utils.testing import assert_true
|
9 | 9 | from sklearn.utils.testing import ignore_warnings
|
10 |
| -from sklearn.utils.testing import assert_equal |
11 | 10 | from sklearn.utils.testing import assert_not_equal
|
12 | 11 |
|
13 | 12 | from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
|
@@ -270,13 +269,12 @@ def test_scorer_sample_weight():
|
270 | 269 | ignored = scorer(estimator[name], X_test[10:], y_test[10:])
|
271 | 270 | unweighted = scorer(estimator[name], X_test, y_test)
|
272 | 271 | assert_not_equal(weighted, unweighted,
|
273 |
| - "scorer {0} behaves identically when called with " |
274 |
| - "sample weights: {1} vs {2}".format(name, |
275 |
| - weighted, |
276 |
| - unweighted)) |
277 |
| - assert_equal(weighted, ignored, |
278 |
| - "scorer {0} behaves differently when ignoring " |
279 |
| - "samples and setting sample_weight to 0: " |
| 272 | + msg="scorer {0} behaves identically when " |
| 273 | + "called with sample weights: {1} vs " |
| 274 | + "{2}".format(name, weighted, unweighted)) |
| 275 | + assert_almost_equal(weighted, ignored, |
| 276 | + err_msg="scorer {0} behaves differently when " |
| 277 | + "ignoring samples and setting sample_weight to 0: " |
280 | 278 | "{1} vs {2}".format(name, weighted, ignored))
|
281 | 279 |
|
282 | 280 | except TypeError as e:
|
|
0 commit comments