|
8 | 8 | from sklearn.utils.testing import assert_array_equal
|
9 | 9 | from sklearn.utils.testing import assert_array_almost_equal
|
10 | 10 | from sklearn.utils.testing import assert_almost_equal
|
| 11 | +from sklearn.utils.testing import assert_array_less |
11 | 12 | from sklearn.utils.testing import SkipTest
|
| 13 | +from sklearn.utils import check_random_state |
12 | 14 | from sklearn.linear_model.bayes import BayesianRidge, ARDRegression
|
13 | 15 | from sklearn.linear_model import Ridge
|
14 | 16 | from sklearn import datasets
|
@@ -60,6 +62,39 @@ def test_toy_bayesian_ridge_object():
|
60 | 62 | assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
|
61 | 63 |
|
62 | 64 |
|
| 65 | +def test_prediction_bayesian_ridge_ard_with_constant_input(): |
| 66 | + # Test BayesianRidge and ARDRegression predictions for edge case of |
| 67 | + # constant target vectors |
| 68 | + n_samples = 4 |
| 69 | + n_features = 5 |
| 70 | + random_state = check_random_state(42) |
| 71 | + constant_value = random_state.rand() |
| 72 | + X = random_state.random_sample((n_samples, n_features)) |
| 73 | + y = np.full(n_samples, constant_value) |
| 74 | + expected = np.full(n_samples, constant_value) |
| 75 | + |
| 76 | + for clf in [BayesianRidge(), ARDRegression()]: |
| 77 | + y_pred = clf.fit(X, y).predict(X) |
| 78 | + assert_array_almost_equal(y_pred, expected) |
| 79 | + |
| 80 | + |
| 81 | +def test_std_bayesian_ridge_ard_with_constant_input(): |
| 82 | + # Test BayesianRidge and ARDRegression standard dev. for edge case of |
| 83 | + # constant target vector |
| 84 | + # The standard dev. should be relatively small (< 0.01 is tested here) |
| 85 | + n_samples = 4 |
| 86 | + n_features = 5 |
| 87 | + random_state = check_random_state(42) |
| 88 | + constant_value = random_state.rand() |
| 89 | + X = random_state.random_sample((n_samples, n_features)) |
| 90 | + y = np.full(n_samples, constant_value) |
| 91 | + expected_upper_boundary = 0.01 |
| 92 | + |
| 93 | + for clf in [BayesianRidge(), ARDRegression()]: |
| 94 | + _, y_std = clf.fit(X, y).predict(X, return_std=True) |
| 95 | + assert_array_less(y_std, expected_upper_boundary) |
| 96 | + |
| 97 | + |
63 | 98 | def test_toy_ard_object():
|
64 | 99 | # Test BayesianRegression ARD classifier
|
65 | 100 | X = np.array([[1], [2], [3]])
|
|
0 commit comments