@@ -26,11 +26,6 @@ def X_y_data():
26
26
return X , y
27
27
28
28
29
- @pytest .fixture
30
- def default_solver ():
31
- return "highs" if sp_version >= parse_version ("1.6.0" ) else "interior-point"
32
-
33
-
34
29
@pytest .mark .skipif (
35
30
parse_version (sp_version .base_version ) >= parse_version ("1.11" ),
36
31
reason = "interior-point solver is not available in SciPy 1.11" ,
@@ -47,18 +42,6 @@ def test_incompatible_solver_for_sparse_input(X_y_data, solver, csc_container):
47
42
QuantileRegressor (solver = solver ).fit (X_sparse , y )
48
43
49
44
50
- @pytest .mark .parametrize ("solver" , ("highs-ds" , "highs-ipm" , "highs" ))
51
- @pytest .mark .skipif (
52
- sp_version >= parse_version ("1.6.0" ),
53
- reason = "Solvers are available as of scipy 1.6.0" ,
54
- )
55
- def test_too_new_solver_methods_raise_error (X_y_data , solver ):
56
- """Test that highs solver raises for scipy<1.6.0."""
57
- X , y = X_y_data
58
- with pytest .raises (ValueError , match = "scipy>=1.6.0" ):
59
- QuantileRegressor (solver = solver ).fit (X , y )
60
-
61
-
62
45
@pytest .mark .parametrize (
63
46
"quantile, alpha, intercept, coef" ,
64
47
[
@@ -74,13 +57,11 @@ def test_too_new_solver_methods_raise_error(X_y_data, solver):
74
57
[0.5 , 100 , 2 , 0 ],
75
58
],
76
59
)
77
- def test_quantile_toy_example (quantile , alpha , intercept , coef , default_solver ):
60
+ def test_quantile_toy_example (quantile , alpha , intercept , coef ):
78
61
# test how different parameters affect a small intuitive example
79
62
X = [[0 ], [1 ], [1 ]]
80
63
y = [1 , 2 , 11 ]
81
- model = QuantileRegressor (
82
- quantile = quantile , alpha = alpha , solver = default_solver
83
- ).fit (X , y )
64
+ model = QuantileRegressor (quantile = quantile , alpha = alpha ).fit (X , y )
84
65
assert_allclose (model .intercept_ , intercept , atol = 1e-2 )
85
66
if coef is not None :
86
67
assert_allclose (model .coef_ [0 ], coef , atol = 1e-2 )
@@ -90,15 +71,13 @@ def test_quantile_toy_example(quantile, alpha, intercept, coef, default_solver):
90
71
91
72
92
73
@pytest .mark .parametrize ("fit_intercept" , [True , False ])
93
- def test_quantile_equals_huber_for_low_epsilon (fit_intercept , default_solver ):
74
+ def test_quantile_equals_huber_for_low_epsilon (fit_intercept ):
94
75
X , y = make_regression (n_samples = 100 , n_features = 20 , random_state = 0 , noise = 1.0 )
95
76
alpha = 1e-4
96
77
huber = HuberRegressor (
97
78
epsilon = 1 + 1e-4 , alpha = alpha , fit_intercept = fit_intercept
98
79
).fit (X , y )
99
- quant = QuantileRegressor (
100
- alpha = alpha , fit_intercept = fit_intercept , solver = default_solver
101
- ).fit (X , y )
80
+ quant = QuantileRegressor (alpha = alpha , fit_intercept = fit_intercept ).fit (X , y )
102
81
assert_allclose (huber .coef_ , quant .coef_ , atol = 1e-1 )
103
82
if fit_intercept :
104
83
assert huber .intercept_ == approx (quant .intercept_ , abs = 1e-1 )
@@ -107,39 +86,31 @@ def test_quantile_equals_huber_for_low_epsilon(fit_intercept, default_solver):
107
86
108
87
109
88
@pytest .mark .parametrize ("q" , [0.5 , 0.9 , 0.05 ])
110
- def test_quantile_estimates_calibration (q , default_solver ):
89
+ def test_quantile_estimates_calibration (q ):
111
90
# Test that model estimates percentage of points below the prediction
112
91
X , y = make_regression (n_samples = 1000 , n_features = 20 , random_state = 0 , noise = 1.0 )
113
- quant = QuantileRegressor (
114
- quantile = q ,
115
- alpha = 0 ,
116
- solver = default_solver ,
117
- ).fit (X , y )
92
+ quant = QuantileRegressor (quantile = q , alpha = 0 ).fit (X , y )
118
93
assert np .mean (y < quant .predict (X )) == approx (q , abs = 1e-2 )
119
94
120
95
121
- def test_quantile_sample_weight (default_solver ):
96
+ def test_quantile_sample_weight ():
122
97
# test that with unequal sample weights we still estimate weighted fraction
123
98
n = 1000
124
99
X , y = make_regression (n_samples = n , n_features = 5 , random_state = 0 , noise = 10.0 )
125
100
weight = np .ones (n )
126
101
# when we increase weight of upper observations,
127
102
# estimate of quantile should go up
128
103
weight [y > y .mean ()] = 100
129
- quant = QuantileRegressor (quantile = 0.5 , alpha = 1e-8 , solver = default_solver )
104
+ quant = QuantileRegressor (quantile = 0.5 , alpha = 1e-8 )
130
105
quant .fit (X , y , sample_weight = weight )
131
106
fraction_below = np .mean (y < quant .predict (X ))
132
107
assert fraction_below > 0.5
133
108
weighted_fraction_below = np .average (y < quant .predict (X ), weights = weight )
134
109
assert weighted_fraction_below == approx (0.5 , abs = 3e-2 )
135
110
136
111
137
- @pytest .mark .skipif (
138
- sp_version < parse_version ("1.6.0" ),
139
- reason = "The `highs` solver is available from the 1.6.0 scipy version" ,
140
- )
141
112
@pytest .mark .parametrize ("quantile" , [0.2 , 0.5 , 0.8 ])
142
- def test_asymmetric_error (quantile , default_solver ):
113
+ def test_asymmetric_error (quantile ):
143
114
"""Test quantile regression for asymmetric distributed targets."""
144
115
n_samples = 1000
145
116
rng = np .random .RandomState (42 )
@@ -164,7 +135,6 @@ def test_asymmetric_error(quantile, default_solver):
164
135
model = QuantileRegressor (
165
136
quantile = quantile ,
166
137
alpha = 0 ,
167
- solver = default_solver ,
168
138
).fit (X , y )
169
139
# This test can be made to pass with any solver but in the interest
170
140
# of sparing continuous integration resources, the test is performed
@@ -199,7 +169,7 @@ def func(coef):
199
169
200
170
201
171
@pytest .mark .parametrize ("quantile" , [0.2 , 0.5 , 0.8 ])
202
- def test_equivariance (quantile , default_solver ):
172
+ def test_equivariance (quantile ):
203
173
"""Test equivariace of quantile regression.
204
174
205
175
See Koenker (2005) Quantile Regression, Chapter 2.2.3.
@@ -216,7 +186,7 @@ def test_equivariance(quantile, default_solver):
216
186
)
217
187
# make y asymmetric
218
188
y += rng .exponential (scale = 100 , size = y .shape )
219
- params = dict (alpha = 0 , solver = default_solver )
189
+ params = dict (alpha = 0 )
220
190
model1 = QuantileRegressor (quantile = quantile , ** params ).fit (X , y )
221
191
222
192
# coef(q; a*y, X) = a * coef(q; y, X)
@@ -264,23 +234,17 @@ def test_linprog_failure():
264
234
265
235
266
236
@skip_if_32bit
267
- @pytest .mark .skipif (
268
- sp_version <= parse_version ("1.6.0" ),
269
- reason = "Solvers are available as of scipy 1.6.0" ,
270
- )
271
237
@pytest .mark .parametrize (
272
238
"sparse_container" , CSC_CONTAINERS + CSR_CONTAINERS + COO_CONTAINERS
273
239
)
274
240
@pytest .mark .parametrize ("solver" , ["highs" , "highs-ds" , "highs-ipm" ])
275
241
@pytest .mark .parametrize ("fit_intercept" , [True , False ])
276
- def test_sparse_input (sparse_container , solver , fit_intercept , default_solver ):
242
+ def test_sparse_input (sparse_container , solver , fit_intercept ):
277
243
"""Test that sparse and dense X give same results."""
278
244
X , y = make_regression (n_samples = 100 , n_features = 20 , random_state = 1 , noise = 1.0 )
279
245
X_sparse = sparse_container (X )
280
246
alpha = 1e-4
281
- quant_dense = QuantileRegressor (
282
- alpha = alpha , fit_intercept = fit_intercept , solver = default_solver
283
- ).fit (X , y )
247
+ quant_dense = QuantileRegressor (alpha = alpha , fit_intercept = fit_intercept ).fit (X , y )
284
248
quant_sparse = QuantileRegressor (
285
249
alpha = alpha , fit_intercept = fit_intercept , solver = solver
286
250
).fit (X_sparse , y )
0 commit comments