@@ -964,7 +964,8 @@ The following example shows how to fit the majority rule classifier::
964
964
>>> iris = datasets.load_iris()
965
965
>>> X, y = iris.data[:, 1:3], iris.target
966
966
967
- >>> clf1 = LogisticRegression(random_state=1)
967
+ >>> clf1 = LogisticRegression(solver='lbfgs', multi_class='multinomial',
968
+ ... random_state=1)
968
969
>>> clf2 = RandomForestClassifier(n_estimators=50, random_state=1)
969
970
>>> clf3 = GaussianNB()
970
971
@@ -973,10 +974,10 @@ The following example shows how to fit the majority rule classifier::
973
974
>>> for clf, label in zip([clf1, clf2, clf3, eclf], ['Logistic Regression', 'Random Forest', 'naive Bayes', 'Ensemble']):
974
975
... scores = cross_val_score(clf, X, y, cv=5, scoring='accuracy')
975
976
... print("Accuracy: %0.2f (+/- %0.2f) [%s]" % (scores.mean(), scores.std(), label))
976
- Accuracy: 0.90 (+/- 0.05 ) [Logistic Regression]
977
+ Accuracy: 0.95 (+/- 0.04 ) [Logistic Regression]
977
978
Accuracy: 0.94 (+/- 0.04) [Random Forest]
978
979
Accuracy: 0.91 (+/- 0.04) [naive Bayes]
979
- Accuracy: 0.95 (+/- 0.05 ) [Ensemble]
980
+ Accuracy: 0.95 (+/- 0.04 ) [Ensemble]
980
981
981
982
982
983
Weighted Average Probabilities (Soft Voting)
@@ -1049,7 +1050,8 @@ The `VotingClassifier` can also be used together with `GridSearch` in order
1049
1050
to tune the hyperparameters of the individual estimators::
1050
1051
1051
1052
>>> from sklearn.model_selection import GridSearchCV
1052
- >>> clf1 = LogisticRegression(random_state=1)
1053
+ >>> clf1 = LogisticRegression(solver='lbfgs', multi_class='multinomial',
1054
+ ... random_state=1)
1053
1055
>>> clf2 = RandomForestClassifier(random_state=1)
1054
1056
>>> clf3 = GaussianNB()
1055
1057
>>> eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='soft')
0 commit comments