8000 MNT Catches scipy 1.3.0 for using deprecated tostring method (#18755) · scikit-learn/scikit-learn@8aa6f07 · GitHub
[go: up one dir, main page]

Skip to content 8000

Commit 8aa6f07

Browse files
authored
MNT Catches scipy 1.3.0 for using deprecated tostring method (#18755)
1 parent 5e85a65 commit 8aa6f07

File tree

4 files changed

+39
-9
lines changed

4 files changed

+39
-9
lines changed

sklearn/ensemble/tests/test_voting.py

Lines changed: 15 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
"""Testing for the VotingClassifier and VotingRegressor"""
22

3+
import warnings
34
import pytest
45
import re
56
import numpy as np
@@ -370,7 +371,11 @@ def test_set_estimator_drop():
370371
('nb', clf3)],
371372
voting='hard', weights=[1, 1, 0.5])
372373
with pytest.warns(None) as record:
373-
eclf2.set_params(rf='drop').fit(X, y)
374+
with warnings.catch_warnings():
375+
# scipy 1.3.0 uses tostring which is deprecated in numpy
376+
warnings.filterwarnings("ignore", "tostring", DeprecationWarning)
377+
eclf2.set_params(rf='drop').fit(X, y)
378+
374379
assert not record
375380
assert_array_equal(eclf1.predict(X), eclf2.predict(X))
376381

@@ -382,7 +387,11 @@ def test_set_estimator_drop():
382387

383388
eclf1.set_params(voting='soft').fit(X, y)
384389
with pytest.warns(None) as record:
385-
eclf2.set_params(voting='soft').fit(X, y)
390+
with warnings.catch_warnings():
391+
# scipy 1.3.0 uses tostring which is deprecated in numpy
392+
warnings.filterwarnings("ignore", "tostring", DeprecationWarning)
393+
eclf2.set_params(voting='soft').fit(X, y)
394+
386395
assert not record
387396
assert_array_equal(eclf1.predict(X), eclf2.predict(X))
388397
assert_array_almost_equal(eclf1.predict_proba(X), eclf2.predict_proba(X))
@@ -403,7 +412,10 @@ def test_set_estimator_drop():
403412
voting='soft', weights=[1, 0.5],
404413
flatten_transform=False)
405414
with pytest.warns(None) as record:
406-
eclf2.set_params(rf='drop').fit(X1, y1)
415+
with warnings.catch_warnings():
416+
# scipy 1.3.0 uses tostring which is deprecated in numpy
417+
warnings.filterwarnings("ignore", "tostring", DeprecationWarning)
418+
eclf2.set_params(rf='drop').fit(X1, y1)
407419
assert not record
408420
assert_array_almost_equal(eclf1.transform(X1),
409421
np.array([[[0.7, 0.3], [0.3, 0.7]],

sklearn/gaussian_process/tests/test_gpc.py

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
44
# License: BSD 3 clause
55

6+
import warnings
67
import numpy as np
78

89
from scipy.optimize import approx_fprime
@@ -201,7 +202,10 @@ def test_warning_bounds():
201202
RBF(length_scale_bounds=[1e3, 1e5]))
202203
gpc_sum = GaussianProcessClassifier(kernel=kernel_sum)
203204
with pytest.warns(None) as record:
204-
gpc_sum.fit(X, y)
205+
with warnings.catch_warnings():
206+
# scipy 1.3.0 uses tostring which is deprecated in numpy
207+
warnings.filterwarnings("ignore", "tostring", DeprecationWarning)
208+
gpc_sum.fit(X, y)
205209

206210
assert len(record) == 2
207211
assert record[0].message.args[0] == ("The optimal value found for "
@@ -224,7 +228,10 @@ def test_warning_bounds():
224228
gpc_dims = GaussianProcessClassifier(kernel=kernel_dims)
225229

226230
with pytest.warns(None) as record:
227-
gpc_dims.fit(X_tile, y)
231+
with warnings.catch_warnings():
232+
# scipy 1.3.0 uses tostring which is deprecated in numpy
233+
warnings.filterwarnings("ignore", "tostring", DeprecationWarning)
234+
gpc_dims.fit(X_tile, y)
228235

229236
assert len(record) == 2
230237
assert record[0].message.args[0] == ("The optimal value found for "

sklearn/gaussian_process/tests/test_gpr.py

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66

77
import sys
88
import numpy as np
9+
import warnings
910

1011
from scipy.optimize import approx_fprime
1112

@@ -486,7 +487,10 @@ def test_warning_bounds():
486487
RBF(length_scale_bounds=[1e3, 1e5]))
487488
gpr_sum = GaussianProcessRegressor(kernel=kernel_sum)
488489
with pytest.warns(None) as record:
489-
gpr_sum.fit(X, y)
490+
with warnings.catch_warnings():
491+
# scipy 1.3.0 uses tostring which is deprecated in numpy
492+
warnings.filterwarnings("ignore", "tostring", DeprecationWarning)
493+
gpr_sum.fit(X, y)
490494

491495
assert len(record) == 2
492496
assert record[0].message.args[0] == ("The optimal value found for "
@@ -509,7 +513,10 @@ def test_warning_bounds():
509513
gpr_dims = GaussianProcessRegressor(kernel=kernel_dims)
510514

511515
with pytest.warns(None) as record:
512-
gpr_dims.fit(X_tile, y)
516+
with warnings.catch_warnings():
517+
# scipy 1.3.0 uses tostring which is deprecated in numpy
518+
warnings.filterwarnings("ignore", "tostring", DeprecationWarning)
519+
gpr_dims.fit(X_tile, y)
513520

514521
assert len(record) == 2
515522
assert record[0].message.args[0] == ("The optimal value found for "

sklearn/linear_model/tests/test_logistic.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import os
22
import sys
3+
import warnings
34
import numpy as np
45
from numpy.testing import assert_allclose, assert_almost_equal
56
from numpy.testing import assert_array_almost_equal, assert_array_equal
@@ -391,8 +392,11 @@ def test_logistic_regression_path_convergence_fail():
391392
# advice (scaling the data) and to the logistic regression specific
392393
# documentation that includes hints on the solver configuration.
393394
with pytest.warns(ConvergenceWarning) as record:
394-
_logistic_regression_path(
395-
X, y, Cs=Cs, tol=0., max_iter=1, random_state=0, verbose=0)
395+
with warnings.catch_warnings():
396+
# scipy 1.3.0 uses tostring which is deprecated in numpy
397+
warnings.filterwarnings("ignore", "tostring", DeprecationWarning)
398+
_logistic_regression_path(
399+
X, y, Cs=Cs, tol=0., max_iter=1, random_state=0, verbose=0)
396400

397401
assert len(record) == 1
398402
warn_msg = record[0].message.args[0]

0 commit comments

Comments
 (0)
0