@@ -23,6 +23,11 @@ def X_y_data():
23
23
return X , y
24
24
25
25
26
+ @pytest .fixture
27
+ def default_solver ():
28
+ return "highs" if sp_version >= parse_version ("1.6.0" ) else "interior-point"
29
+
30
+
26
31
@pytest .mark .parametrize (
27
32
"params, err_msg" ,
28
33
[
@@ -40,6 +45,10 @@ def X_y_data():
40
45
),
41
46
],
42
47
)
48
+ @pytest .mark .filterwarnings (
49
+ # FIXME (1.4): remove once we changed the default solver to "highs"
50
+ "ignore:The default solver will change from 'interior-point'"
51
+ )
43
52
def test_init_parameters_validation (X_y_data , params , err_msg ):
44
53
"""Test that invalid init parameters raise errors."""
45
54
X , y = X_y_data
@@ -85,11 +94,13 @@ def test_too_new_solver_methods_raise_error(X_y_data, solver):
85
94
[0.5 , 100 , 2 , 0 ],
86
95
],
87
96
)
88
- def test_quantile_toy_example (quantile , alpha , intercept , coef ):
97
+ def test_quantile_toy_example (quantile , alpha , intercept , coef , default_solver ):
89
98
# test how different parameters affect a small intuitive example
90
99
X = [[0 ], [1 ], [1 ]]
91
100
y = [1 , 2 , 11 ]
92
- model = QuantileRegressor (quantile = quantile , alpha = alpha ).fit (X , y )
101
+ model = QuantileRegressor (
102
+ quantile = quantile , alpha = alpha , solver = default_solver
103
+ ).fit (X , y )
93
104
assert_allclose (model .intercept_ , intercept , atol = 1e-2 )
94
105
if coef is not None :
95
106
assert_allclose (model .coef_ [0 ], coef , atol = 1e-2 )
@@ -99,13 +110,15 @@ def test_quantile_toy_example(quantile, alpha, intercept, coef):
99
110
100
111
101
112
@pytest .mark .parametrize ("fit_intercept" , [True , False ])
102
- def test_quantile_equals_huber_for_low_epsilon (fit_intercept ):
113
+ def test_quantile_equals_huber_for_low_epsilon (fit_intercept , default_solver ):
103
114
X , y = make_regression (n_samples = 100 , n_features = 20 , random_state = 0 , noise = 1.0 )
104
115
alpha = 1e-4
105
116
huber = HuberRegressor (
106
117
epsilon = 1 + 1e-4 , alpha = alpha , fit_intercept = fit_intercept
107
118
).fit (X , y )
108
- quant = QuantileRegressor (alpha = alpha , fit_intercept = fit_intercept ).fit (X , y )
119
+ quant = QuantileRegressor (
120
+ alpha = alpha , fit_intercept = fit_intercept , solver = default_solver
121
+ ).fit (X , y )
109
122
<
10000
td data-grid-cell-id="diff-ec32c281bdc17dd6726cb2691a58ae0ac2ebba9e4be4e26df6cdc2e6737fe17b-109-122-2" data-line-anchor="diff-ec32c281bdc17dd6726cb2691a58ae0ac2ebba9e4be4e26df6cdc2e6737fe17bR122" data-selected="false" role="gridcell" style="background-color:var(--bgColor-default);padding-right:24px" tabindex="-1" valign="top" class="focusable-grid-cell diff-text-cell right-side-diff-cell left-side"> assert_allclose (huber .coef_ , quant .coef_ , atol = 1e-1 )
110
123
if fit_intercept :
111
124
assert huber .intercept_ == approx (quant .intercept_ , abs = 1e-1 )
@@ -114,26 +127,26 @@ def test_quantile_equals_huber_for_low_epsilon(fit_intercept):
114
127
115
128
116
129
@pytest .mark .parametrize ("q" , [0.5 , 0.9 , 0.05 ])
117
- def test_quantile_estimates_calibration (q ):
130
+ def test_quantile_estimates_calibration (q , default_solver ):
118
131
# Test that model estimates percentage of points below the prediction
119
132
X , y = make_regression (n_samples = 1000 , n_features = 20 , random_state = 0 , noise = 1.0 )
120
133
quant = QuantileRegressor (
121
134
quantile = q ,
122
135
alpha = 0 ,
123
- solver_options = { "lstsq" : False } ,
136
+ solver = default_solver ,
124
137
).fit (X , y )
125
138
assert np .mean (y < quant .predict (X )) == approx (q , abs = 1e-2 )
126
139
127
140
128
- def test_quantile_sample_weight ():
141
+ def test_quantile_sample_weight (default_solver ):
129
142
# test that with unequal sample weights we still estimate weighted fraction
130
143
n = 1000
131
144
X , y = make_regression (n_samples = n , n_features = 5 , random_state = 0 , noise = 10.0 )
132
145
weight = np .ones (n )
133
146
# when we increase weight of upper observations,
134
147
# estimate of quantile should go up
135
148
weight [y > y .mean ()] = 100
136
- quant = QuantileRegressor (quantile = 0.5 , alpha = 1e-8 , solver_options = { "lstsq" : False } )
149
+ quant = QuantileRegressor (quantile = 0.5 , alpha = 1e-8 , solver = default_solver )
137
150
quant .fit (X , y , sample_weight = weight )
138
151
fraction_below = np .mean (y < quant .predict (X ))
139
152
assert fraction_below > 0.5
@@ -146,7 +159,7 @@ def test_quantile_sample_weight():
146
159
reason = "The `highs` solver is available from the 1.6.0 scipy version" ,
147
160
)
148
161
@pytest .mark .parametrize ("quantile" , [0.2 , 0.5 , 0.8 ])
149
- def test_asymmetric_error (quantile ):
162
+ def test_asymmetric_error (quantile , default_solver ):
150
163
"""Test quantile regression for asymmetric distributed targets."""
151
164
n_samples = 1000
152
165
rng = np .random .RandomState (42 )
@@ -171,7 +184,7 @@ def test_asymmetric_error(quantile):
171
184
model = QuantileRegressor (
172
185
quantile = quantile ,
173
186
alpha = 0 ,
174
- solver = "highs" ,
187
+ solver = default_solver ,
175
188
).fit (X , y )
176
189
# This test can be made to pass with any solver but in the interest
177
190
# of sparing continuous integration resources, the test is performed
@@ -206,7 +219,7 @@ def func(coef):
206
219
207
220
208
221
@pytest .mark .parametrize ("quantile" , [0.2 , 0.5 , 0.8 ])
209
- def test_equivariance (quantile ):
222
+ def test_equivariance (quantile , default_solver ):
210
223
"""Test equivariace of quantile regression.
211
224
212
225
See Koenker (2005) Quantile Regression, Chapter 2.2.3.
@@ -223,7 +236,7 @@ def test_equivariance(quantile):
223
236
)
224
237
# make y asymmetric
225
238
y += rng .exponential (scale = 100 , size = y .shape )
226
- params = dict (alpha = 0 , solver_options = { "lstsq" : True , "tol" : 1e-10 } )
239
+ params = dict (alpha = 0 , solver = default_solver )
227
240
model1 = QuantileRegressor (quantile = quantile , ** params ).fit (X , y )
228
241
229
242
# coef(q; a*y, X) = a * coef(q; y, X)
@@ -252,6 +265,7 @@ def test_equivariance(quantile):
252
265
assert_allclose (model2 .coef_ , np .linalg .solve (A , model1 .coef_ ), rtol = 1e-5 )
253
266
254
267
268
+ @pytest .mark .filterwarnings ("ignore:`method='interior-point'` is deprecated" )
255
269
def test_linprog_failure ():
256
270
"""Test that linprog fails."""
257
271
X = np .linspace (0 , 10 , num = 10 ).reshape (- 1 , 1 )
@@ -275,12 +289,14 @@ def test_linprog_failure():
275
289
)
276
290
@pytest .mark .parametrize ("solver" , ["highs" , "highs-ds" , "highs-ipm" ])
277
291
@pytest .mark .parametrize ("fit_intercept" , [True , False ])
278
- def test_sparse_input (sparse_format , solver , fit_intercept ):
292
+ def test_sparse_input (sparse_format , solver , fit_intercept , default_solver ):
279
293
"""Test that sparse and dense X give same results."""
280
294
X , y = make_regression (n_samples = 100 , n_features = 20 , random_state = 1 , noise = 1.0 )
281
295
X_sparse = sparse_format (X )
282
296
alpha = 1e-4
283
- quant_dense = QuantileRegressor (alpha = <
10000
span class=pl-s1>alpha, fit_intercept = fit_intercept ).fit (X , y )
297
+ quant_dense = QuantileRegressor (
298
+ alpha = alpha , fit_intercept = fit_intercept , solver = default_solver
299
+ ).fit (X , y )
284
300
quant_sparse = QuantileRegressor (
285
301
alpha = alpha , fit_intercept = fit_intercept , solver = solver
286
302
).fit (X_sparse , y )
@@ -289,3 +305,26 @@ def test_sparse_input(sparse_format, solver, fit_intercept):
289
305
assert quant_sparse .intercept_ == approx (quant_dense .intercept_ )
290
306
# check that we still predict fraction
291
307
assert 0.45 <= np .mean (y < quant_sparse .predict (X_sparse )) <= 0.55
308
+
309
+
310
+ # TODO (1.4): remove this test in 1.4
311
+ def test_warning_new_default (X_y_data ):
312
+ """Check that we warn about the new default solver."""
313
+ X , y = X_y_data
314
+ model = QuantileRegressor ()
315
+ with pytest .warns (FutureWarning , match = "The default solver will change" ):
316
+ model .fit (X , y )
317
+
318
+
319
+ def test_error_interior_point_future (X_y_data , monkeypatch ):
320
+ """Check that we will raise a proper error when requesting
321
+ `solver='interior-point'` in SciPy >= 1.11.
322
+ """
323
+ X , y = X_y_data
324
+ import sklearn .linear_model ._quantile
325
+
326
+ with monkeypatch .context () as m :
327
+ m .setattr (sklearn .linear_model ._quantile , "sp_version" , parse_version ("1.11.0" ))
328
+ err_msg = "Solver interior-point is not anymore available in SciPy >= 1.11.0."
329
+ with pytest .raises (ValueError , match = err_msg ):
330
+ QuantileRegressor (solver = "interior-point" ).fit (X , y )
0 commit comments