1
- """Testing for the VotingClassifier and VotingRegressor """
1
+ """Testing for the VotingClassifier"""
2
2
3
3
import pytest
4
4
import numpy as np
11
11
from sklearn .linear_model import LogisticRegression
12
12
from sklearn .naive_bayes import GaussianNB
13
13
from sklearn .ensemble import RandomForestClassifier
14
- from sklearn .ensemble import VotingClassifier , VotingRegressor
14
+ from sklearn .ensemble import VotingClassifier
15
15
from sklearn .model_selection import GridSearchCV
16
16
from sklearn import datasets
17
- from sklearn .model_selection import cross_val_score , train_test_split
17
+ from sklearn .model_selection import cross_val_score
18
18
from sklearn .datasets import make_multilabel_classification
19
19
from sklearn .svm import SVC
20
20
from sklearn .multiclass import OneVsRestClassifier
21
21
from sklearn .neighbors import KNeighborsClassifier
22
22
from sklearn .base import BaseEstimator , ClassifierMixin
23
- from sklearn .dummy import DummyRegressor
24
23
25
24
26
- # Load datasets
25
+ # Load the iris dataset and randomly permute it
27
26
iris = datasets .load_iris ()
28
27
X , y = iris .data [:, 1 :3 ], iris .target
29
28
30
- boston = datasets .load_boston ()
31
- X_r , y_r = boston .data , boston .target
32
-
33
29
34
30
@pytest .mark .filterwarnings ('ignore: Default solver will be changed' ) # 0.22
35
31
@pytest .mark .filterwarnings ('ignore: Default multi_class will' ) # 0.22
@@ -46,7 +42,7 @@ def test_estimator_init():
46
42
assert_raise_message (ValueError , msg , eclf .fit , X , y )
47
43
48
44
eclf = VotingClassifier (estimators = [('lr' , clf )], weights = [1 , 2 ])
49
- msg = ('Number of `estimators` and weights must be equal'
45
+ msg = ('Number of classifiers and weights must be equal'
50
46
'; got 2 weights, 1 estimators' )
51
47
assert_raise_message (ValueError , msg , eclf .fit , X , y )
52
48
@@ -80,19 +76,9 @@ def test_notfitted():
80
76
eclf = VotingClassifier (estimators = [('lr1' , LogisticRegression ()),
81
77
('lr2' , LogisticRegression ())],
82
78
voting = 'soft' )
83
- ereg = VotingRegressor ([('dr' , DummyRegressor ())])
84
- msg = ("This %s instance is not fitted yet. Call \' fit\' "
79
+ msg = ("This VotingClassifier instance is not fitted yet. Call \' fit\' "
85
80
" with appropriate arguments before using this method." )
86
- assert_raise_message (NotFittedError , msg % 'VotingClassifier' ,
87
- eclf .predict , X )
88
- assert_raise_message (NotFittedError , msg % 'VotingClassifier' ,
89
- eclf .predict_proba , X )
90
- assert_raise_message (NotFittedError , msg % 'VotingClassifier' ,
91
- eclf .transform , X )
92
- assert_raise_message (NotFittedError , msg % 'VotingRegressor' ,
93
- ereg .predict , X_r )
94
- assert_raise_message (NotFittedError , msg % 'VotingRegressor' ,
95
- ereg .transform , X_r )
81
+ assert_raise_message (NotFittedError , msg , eclf .predict_proba , X )
96
82
97
83
98
84
@pytest .mark .filterwarnings ('ignore: Default solver will be changed' ) # 0.22
@@ -139,38 +125,6 @@ def test_weights_iris():
139
125
assert_almost_equal (scores .mean (), 0.93 , decimal = 2 )
140
126
141
127
142
- def test_weights_regressor ():
143
- """Check weighted average regression prediction on boston dataset."""
144
- reg1 = DummyRegressor (strategy = 'mean' )
145
- reg2 = DummyRegressor (strategy = 'median' )
146
- reg3 = DummyRegressor (strategy = 'quantile' , quantile = .2 )
147
- ereg = VotingRegressor ([('mean' , reg1 ), ('median' , reg2 ),
148
- ('quantile' , reg3 )], weights = [1 , 2 , 10 ])
149
-
150
- X_r_train , X_r_test , y_r_train , y_r_test = \
151
- train_test_split (X_r , y_r , test_size = .25 )
152
-
153
- reg1_pred = reg1 .fit (X_r_train , y_r_train ).predict (X_r_test )
154
- reg2_pred = reg2 .fit (X_r_train , y_r_train ).predict (X_r_test )
155
- reg3_pred = reg3 .fit (X_r_train , y_r_train ).predict (X_r_test )
156
- ereg_pred = ereg .fit (X_r_train , y_r_train ).predict (X_r_test )
157
-
158
- avg = np .average (np .asarray ([reg1_pred , reg2_pred , reg3_pred ]), axis = 0 ,
159
- weights = [1 , 2 , 10 ])
160
- assert_almost_equal (ereg_pred , avg , decimal = 2 )
161
-
162
- ereg_weights_none = VotingRegressor ([('mean' , reg1 ), ('median' , reg2 ),
163
- ('quantile' , reg3 )], weights = None )
164
- ereg_weights_equal = VotingRegressor ([('mean' , reg1 ), ('median' , reg2 ),
165
- ('quantile' , reg3 )],
166
- weights = [1 , 1 , 1 ])
167
- ereg_weights_none .fit (X_r_train , y_r_train )
168
- ereg_weights_equal .fit (X_r_train , y_r_train )
169
- ereg_none_pred = ereg_weights_none .predict (X_r_test )
170
- ereg_equal_pred = ereg_weights_equal .predict (X_r_test )
171
- assert_almost_equal (ereg_none_pred , ereg_equal_pred , decimal = 2 )
172
-
173
-
174
128
@pytest .mark .filterwarnings ('ignore: Default solver will be changed' ) # 0.22
175
129
@pytest .mark .filterwarnings ('ignore: Default multi_class will' ) # 0.22
176
130
@pytest .mark .filterwarnings ('ignore:The default value of n_estimators' )
@@ -428,7 +382,8 @@ def test_set_estimator_none():
428
382
eclf2 .set_params (voting = 'soft' ).fit (X , y )
429
383
assert_array_equal (eclf1 .predict (X ), eclf2 .predict (X ))
430
384
assert_array_almost_equal (eclf1 .predict_proba (X ), eclf2 .predict_proba (X ))
431
- msg = 'All estimators are None. At least one is required!'
385
+ msg = ('All estimators are None. At least one is required'
386
+ ' to be a classifier!' )
432
387
assert_raise_message (
433
388
ValueError , msg , eclf2 .set_params (lr = None , rf = None , nb = None ).fit , X , y )
434
389
0 commit comments