From c68da505bca00f0bcf471f89c8f304e733de9b43 Mon Sep 17 00:00:00 2001 From: Hanmin Qin Date: Tue, 17 Jul 2018 19:09:38 +0800 Subject: [PATCH] EXA Get rid of warnings in plot_svm_anova.py --- examples/svm/plot_svm_anova.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/examples/svm/plot_svm_anova.py b/examples/svm/plot_svm_anova.py index e223730eb82bf..45599f31f546d 100644 --- a/examples/svm/plot_svm_anova.py +++ b/examples/svm/plot_svm_anova.py @@ -10,17 +10,19 @@ import numpy as np import matplotlib.pyplot as plt -from sklearn import svm, datasets, feature_selection +from sklearn.datasets import load_digits +from sklearn.feature_selection import SelectPercentile, chi2 from sklearn.model_selection import cross_val_score from sklearn.pipeline import Pipeline +from sklearn.svm import SVC + # ############################################################################# # Import some data to play with -digits = datasets.load_digits() -y = digits.target +X, y = load_digits(return_X_y=True) # Throw away data, to be in the curse of dimension settings +X = X[:200] y = y[:200] -X = digits.data[:200] n_samples = len(y) X = X.reshape((n_samples, -1)) # add 200 non-informative features @@ -30,9 +32,9 @@ # Create a feature-selection transform and an instance of SVM that we # combine together to have an full-blown estimator -transform = feature_selection.SelectPercentile(feature_selection.f_classif) +transform = SelectPercentile(chi2) -clf = Pipeline([('anova', transform), ('svc', svm.SVC(C=1.0))]) +clf = Pipeline([('anova', transform), ('svc', SVC(gamma="auto"))]) # ############################################################################# # Plot the cross-validation score as a function of percentile of features