|
101 | 101 | n_features = 300
|
102 | 102 |
|
103 | 103 | # L1 data (only 5 informative features)
|
104 |
| -X_1, y_1 = datasets.make_classification(n_samples=n_samples, n_features=n_features, |
105 |
| - n_informative=5, random_state=1) |
| 104 | +X_1, y_1 = datasets.make_classification(n_samples=n_samples, |
| 105 | + n_features=n_features, n_informative=5, random_state=1) |
106 | 106 |
|
107 | 107 | # L2 data: non sparse, but less features
|
108 | 108 | y_2 = np.sign(.5 - rnd.rand(n_samples))
|
109 |
| -X_2 = rnd.randn(n_samples, n_features/5) + y_2[:, np.newaxis] |
110 |
| -X_2 += 5 * rnd.randn(n_samples, n_features/5) |
| 109 | +X_2 = rnd.randn(n_samples, n_features / 5) + y_2[:, np.newaxis] |
| 110 | +X_2 += 5 * rnd.randn(n_samples, n_features / 5) |
111 | 111 |
|
112 | 112 | clf_sets = [(LinearSVC(penalty='L1', loss='L2', dual=False,
|
113 | 113 | tol=1e-3),
|
|
140 | 140 | pl.subplot(2, 1, subplotnum + 1)
|
141 | 141 | pl.xlabel('C')
|
142 | 142 | pl.ylabel('CV Score')
|
143 |
| - grid_cs = cs * float(scaler) # scale the C's |
| 143 | + grid_cs = cs * float(scaler) # scale the C's |
144 | 144 | pl.semilogx(grid_cs, scores, label="fraction %.2f" %
|
145 | 145 | train_size)
|
146 | 146 | pl.title('scaling=%s, penalty=%s, loss=%s' %
|
147 | 147 | (name, clf.penalty, clf.loss))
|
148 | 148 |
|
149 | 149 | pl.legend(loc="best")
|
150 | 150 | pl.show()
|
151 |
| - |
0 commit comments