8000 DOC examples with correct notebook style · scikit-learn/scikit-learn@58f7bc0 · GitHub
[go: up one dir, main page]

Skip to content

Commit 58f7bc0

Browse files
committed
DOC examples with correct notebook style
1 parent 9525461 commit 58f7bc0

File tree

4 files changed

+3
-10
lines changed

4 files changed

+3
-10
lines changed

examples/cluster/plot_dict_face_patches.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@
3434

3535
###############################################################################
3636
# Learn the dictionary of images
37+
# ------------------------------
3738

3839
print('Learning the dictionary... ')
3940
rng = np.random.RandomState(0)
@@ -68,6 +69,7 @@
6869

6970
###############################################################################
7071
# Plot the results
72+
# ----------------
7173
plt.figure(figsize=(4.2, 4))
7274
for i, patch in enumerate(kmeans.cluster_centers_):
7375
plt.subplot(9, 9, i + 1)

examples/feature_selection/plot_feature_selection.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,6 @@
2727
from sklearn import datasets, svm
2828
from sklearn.feature_selection import SelectPercentile, f_classif
2929

30-
###############################################################################
3130
# import some data to play with
3231

3332
# The iris dataset
@@ -40,13 +39,11 @@
4039
X = np.hstack((iris.data, E))
4140
y = iris.target
4241

43-
###############################################################################
4442
plt.figure(1)
4543
plt.clf()
4644

4745
X_indices = np.arange(X.shape[-1])
4846

49-
###############################################################################
5047
# Univariate feature selection with F-test for feature scoring
5148
# We use the default selection function: the 10% most significant features
5249
selector = SelectPercentile(f_classif, percentile=10)
@@ -57,7 +54,6 @@
5754
label=r'Univariate score ($-Log(p_{value})$)', color='darkorange',
5855
edgecolor='black')
5956

60-
###############################################################################
6157
# Compare to the weights of an SVM
6258
clf = svm.SVC(kernel='linear')
6359
clf.fit(X, y)

examples/feature_selection/plot_permutation_test_for_classification.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,6 @@
2525
from sklearn import datasets
2626

2727

28-
##############################################################################
2928
# Loading a dataset
3029
iris = datasets.load_iris()
3130
X = iris.data
@@ -47,7 +46,6 @@
4746

4847
print("Classification score %s (pvalue : %s)" % (score, pvalue))
4948

50-
###############################################################################
5149
# View histogram of permutation scores
5250
plt.hist(permutation_scores, 20, label='Permutation scores',
5351
edgecolor='black')

examples/plot_kernel_ridge_regression.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,6 @@
4848

4949
rng = np.random.RandomState(0)
5050

51-
#############################################################################
5251
# Generate sample data
5352
X = 5 * rng.rand(10000, 1)
5453
y = np.sin(X).ravel()
@@ -58,7 +57,6 @@
5857

5958
X_plot = np.linspace(0, 5, 100000)[:, None]
6059

61-
#############################################################################
6260
# Fit regression model
6361
train_size = 100
6462
svr = GridSearchCV(SVR(kernel='rbf', gamma=0.1), cv=5,
@@ -97,8 +95,7 @@
9795
% (X_plot.shape[0], kr_predict))
9896

9997

100-
#############################################################################
101-
# look at the results
98+
# Look at the results
10299
sv_ind = svr.best_estimator_.support_
103100
plt.scatter(X[sv_ind], y[sv_ind], c='r', s=50, label='SVR support vectors',
104101
zorder=2)

0 commit comments

Comments
 (0)
0