8000 Manual fixes · rth/scikit-learn@9a63b6e · GitHub
[go: up one dir, main page]

Skip to content

Commit 9a63b6e

Browse files
committed
Manual fixes
1 parent be7bed1 commit 9a63b6e

File tree

12 files changed

+19
-16
lines changed

12 files changed

+19
-16
lines changed

examples/applications/plot_face_recognition.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@
7272

7373
# %%
7474
# Split into a training set and a test set using a stratified k fold
75-
75+
#
7676
# split into a training and testing set
7777
X_train, X_test, y_train, y_test = train_test_split(
7878
X, y, test_size=0.25, random_state=42)

examples/applications/plot_stock_market.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@
7878

7979
# %%
8080
# Retrieve the data from Internet
81-
81+
#
8282
# The data is from 2003 - 2008. This is reasonably calm: (not too long ago so
8383
# that we get high-tech firms, and before the 2008 crash). This kind of
8484
# historical data can be obtained for from APIs like the quandl.com and
@@ -164,6 +164,7 @@
164164
# Learn a graphical structure from the correlations
165165
edge_model = covariance.GraphicalLassoCV()
166166

167+
# %%
167168
# standardize the time series: using correlations rather than covariance
168169
# is more efficient for structure recovery
169170
X = variation.copy().T
@@ -182,7 +183,7 @@
182183
# %%
183184
# Find a low-dimension embedding for visualization: find the best position of
184185
# the nodes (the stocks) on a 2D plane
185-
186+
#
186187
# We use a dense eigen_solver to achieve reproducibility (arpack is
187188
# initiated with random vectors that we don't control). In addition, we
188189
# use a large number of neighbors to capture the large-scale structure.
@@ -223,6 +224,7 @@
223224
lc.set_linewidths(15 * values)
224225
ax.add_collection(lc)
225226

227+
# %%
226228
# Add a label to each node. The challenge here is that we want to
227229
# position the labels to avoid overlap with other labels
228230
for index, (name, label, (x, y)) in enumerate(

examples/cluster/plot_linkage_comparison.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@
6060

6161
# %%
6262
# Run the clustering and plot
63-
63+
#
6464
# Set up cluster parameters
6565
plt.figure(figsize=(9 * 1.3 + 2, 14.5))
6666
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,

examples/cluster/plot_mean_shift.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323

2424
# %%
2525
# Compute clustering with MeanShift
26-
26+
#
2727
# The following bandwidth can be automatically detected using
2828
bandwidth = estimate_bandwidth(X, quantile=0.2, n_samples=500)
2929

examples/covariance/plot_covariance_estimation.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@
6666

6767
# %%
6868
# Compute the likelihood on test data
69-
69+
#
7070
# spanning a range of possible shrinkage coefficient values
7171
shrinkages = np.logspace(-2, 0, 30)
7272
negative_logliks = [-ShrunkCovariance(shrinkage=s).fit(X_train).score(X_test)
@@ -80,7 +80,7 @@
8080

8181
# %%
8282
# Compare different approaches to setting the parameter
83-
83+
#
8484
# GridSearch for an optimal shrinkage coefficient
8585
tuned_parameters = [{'shrinkage': shrinkages}]
8686
cv = GridSearchCV(ShrunkCovariance(), tuned_parameters)

examples/cross_decomposition/plot_compare_cross_decomposition.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,14 +48,15 @@
4848

4949
# %%
5050
# Canonical (symmetric) PLS
51-
51+
#
5252
# Transform data
5353
# ~~~~~~~~~~~~~~
5454
plsca = PLSCanonical(n_components=2)
5555
plsca.fit(X_train, Y_train)
5656
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
5757
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
5858

59+
# %%
5960
# Scatter plot of scores
6061
# ~~~~~~~~~~~~~~~~~~~~~~
6162
# 1) On diagonal plot X vs Y scores on each components

examples/exercises/plot_cv_diabetes.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@
5353

5454
# %%
5555
# Bonus: how much can you trust the selection of alpha?
56-
56+
#
5757
# To answer this question we use the LassoCV object that sets its alpha
5858
# parameter automatically from the data by internal cross-validation (i.e. it
5959
# performs cross-validation on the training data it receives).

examples/feature_selection/plot_feature_selection.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@
3333

3434
# %%
3535
# Import some data to play with
36-
36+
#
3737
# The iris dataset
3838
X, y = load_iris(return_X_y=True)
3939

examples/linear_model/plot_ard.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@
3232

3333
# %%
3434
# Generating simulated data with Gaussian weights
35-
35+
#
3636
# Parameters of the example
3737
np.random.seed(0)
3838
n_samples, n_features = 100, 100

examples/linear_model/plot_lasso_model_selection.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,7 @@ def plot_ic_criterion(model, name, color):
9898

9999
# %%
100100
# LassoCV: coordinate descent
101-
101+
#
102102
# Compute paths
103103
print("Computing regularization path using the coordinate descent lasso...")
104104
t1 = time.time()
@@ -125,7 +125,7 @@ def plot_ic_criterion(model, name, color):
125125

126126
# %%
127127
# LassoLarsCV: least angle regression
128-
128+
#
129129
# Compute paths
130130
print("Computing regularization path using the Lars lasso...")
131131
t1 = time.time()

0 commit comments

Comments
 (0)
0