8000 STY Ensures that "flake8 ." works (#20298) · scikit-learn/scikit-learn@81dde3a · GitHub
[go: up one dir, main page]

Skip to content

Commit 81dde3a

Browse files
authored
STY Ensures that "flake8 ." works (#20298)
1 parent 5314987 commit 81dde3a

36 files changed

+61
-39
lines changed

doc/conftest.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,6 @@ def setup_rcv1():
2828

2929

3030
def setup_twenty_newsgroups():
31-
data_home = get_data_home()
3231
cache_path = _pkl_filepath(get_data_home(), CACHE_NAME)
3332
if not exists(cache_path):
3433
raise SkipTest("Skipping dataset loading doctests")
@@ -47,7 +46,7 @@ def setup_loading_other_datasets():
4746
try:
4847
import pandas # noqa
4948
except ImportError:
50-
raise SkipTest("Skipping loading_other_datasets.rst, " "pandas not installed")
49+
raise SkipTest("Skipping loading_other_datasets.rst, pandas not installed")
5150

5251
# checks SKLEARN_SKIP_NETWORK_TESTS to see if test should run
5352
run_network_tests = environ.get("SKLEARN_SKIP_NETWORK_TESTS", "1") == "0"

examples/applications/plot_face_recognition.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -149,6 +149,7 @@ def title(y_pred, y_test, target_names, i):
149149
true_name = target_names[y_test[i]].rsplit(' ', 1)[-1]
150150
return 'predicted: %s\ntrue: %s' % (pred_name, true_name)
151151

152+
152153
prediction_titles = [title(y_pred, y_test, target_names, i)
153154
for i in range(y_pred.shape[0])]
154155

examples/applications/plot_out_of_core_classification.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,6 @@ def _not_in_sphinx():
5252
# run.
5353

5454

55-
5655
class ReutersParser(HTMLParser):
5756
"""Utility class to parse a SGML file and yield documents one at a time."""
5857

examples/applications/svm_gui.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -141,6 +141,7 @@ def refit(self):
141141

142142
class View:
143143
"""Test docstring. """
144+
144145
def __init__(self, root, controller):
145146
f = Figure()
146147
ax = f.add_subplot(111)
@@ -333,5 +334,6 @@ def main(argv):
333334
if opts.output:
334335
model.dump_svmlight_file(opts.output)
335336

337+
336338
if __name__ == "__main__":
337339
main(sys.argv)

examples/applications/wikipedia_principal_eigenvector.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -216,6 +216,7 @@ def centrality_scores(X, alpha=0.85, max_iter=100, tol=1e-10):
216216

217217
return scores
218218

219+
219220
print("Computing principal eigenvector score using a power iteration method")
220221
t0 = time()
221222
scores = centrality_scores(X, max_iter=100)

examples/calibration/plot_calibration_curve.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -81,8 +81,8 @@ def plot_calibration_curve(est, name, fig_index):
8181
lr = LogisticRegression(C=1.)
8282

8383
fig = plt.figure(fig_index, figsize=(10, 10))
84-
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
85-
ax2 = plt.subplot2grid((3, 1), (2, 0))
84+
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2, fig=fig)
85+
ax2 = plt.subplot2grid((3, 1), (2, 0), fig=fig)
8686

8787
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
8888
for clf, name in [(lr, 'Logistic'),
@@ -125,6 +125,7 @@ def plot_calibration_curve(est, name, fig_index):
125125

126126
plt.tight_layout()
127127

128+
128129
# Plot calibration curve for Gaussian Naive Bayes
129130
plot_calibration_curve(GaussianNB(), "Naive Bayes", 1)
130131

examples/cluster/plot_adjusted_for_chance_measures.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@
3030
from time import time
3131
from sklearn import metrics
3232

33+
3334
def uniform_labelings_scores(score_func, n_samples, n_clusters_range,
3435
fixed_n_classes=None, n_runs=5, seed=42):
3536
"""Compute score for 2 random uniform cluster labelings.
@@ -58,6 +59,7 @@ def uniform_labelings_scores(score_func, n_samples, n_clusters_range,
5859
def ami_score(U, V):
5960
return metrics.adjusted_mutual_info_score(U, V)
6061

62+
6163
score_funcs = [
6264
metrics.adjusted_rand_score,
6365
metrics.v_measure_score,

examples/cluster/plot_agglomerative_clustering_metrics.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,7 @@
5151
def sqr(x):
5252
return np.sign(np.cos(x))
5353

54+
5455
X = list()
5556
y = list()
5657
for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]):

examples/cluster/plot_digits_linkage.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -35,15 +35,15 @@
3535

3636
np.random.seed(0)
3737

38+
3839
def nudge_images(X, y):
3940
# Having a larger dataset shows more clearly the behavior of the
4041
# methods, but we multiply the size of the dataset only by 2, as the
4142
# cost of the hierarchical clustering methods are strongly
4243
# super-linear in n_samples
4344
shift = lambda x: ndimage.shift(x.reshape((8, 8)),
44-
.3 * np.random.normal(size=2),
45-
mode='constant',
46-
).ravel()
45+
.3 * np.random.normal(size=2),
46+
mode='constant').ravel()
4747
X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])
4848
Y = np.concatenate([y, y], axis=0)
4949
return X, Y
@@ -52,7 +52,7 @@ def nudge_images(X, y):
5252
X, y = nudge_images(X, y)
5353

5454

55-
#----------------------------------------------------------------------
55+
# ----------------------------------------------------------------------
5656
# Visualize the clustering
5757
def plot_clustering(X_red, labels, title=None):
5858
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
@@ -71,7 +71,8 @@ def plot_clustering(X_red, labels, title=None):
7171
plt.axis('off')
7272
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
7373

74-
#----------------------------------------------------------------------
74+
75+
# ----------------------------------------------------------------------
7576
# 2D embedding of the digits dataset
7677
print("Computing embedding")
7778
X_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)

examples/cluster/plot_kmeans_stability_low_dim_dense.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -69,6 +69,7 @@ def make_data(random_state, n_samples_per_center, grid_size, scale):
6969

7070
# Part 1: Quantitative evaluation of various init methods
7171

72+
7273
plt.figure()
7374
plots = []
7475
legends = []

examples/compose/plot_column_transformer_mixed_types.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -180,7 +180,7 @@
180180
#
181181
grid_search.fit(X_train, y_train)
182182

183-
print(f"Best params:")
183+
print("Best params:")
184184
print(grid_search.best_params_)
185185

186186
# %%

examples/datasets/plot_digits_last_image.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,10 +26,10 @@
2626

2727
import matplotlib.pyplot as plt
2828

29-
#Load the digits dataset
29+
# Load the digits dataset
3030
digits = datasets.load_digits()
3131

32-
#Display the first digit
32+
# Display the first digit
3333
plt.figure(1, figsize=(3, 3))
3434
plt.imshow(digits.images[-1], cmap=plt.cm.gray_r, interpolation='nearest')
3535
plt.show()

examples/decomposition/plot_faces_decomposition.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,7 @@ def plot_gallery(title, images, n_col=n_col, n_row=n_row, cmap=plt.cm.gray):
6060
plt.yticks(())
6161
plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.)
6262

63+
6364
# #############################################################################
6465
# List of the different estimators, whether to center and transpose the
6566
# problem, and whether the transformer uses the clustering API.

examples/decomposition/plot_ica_blind_source_separation.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@
5959
models = [X, S, S_, H]
6060
names = ['Observations (mixed signal)',
6161
'True Sources',
62-
'ICA recovered signals',
62+
'ICA recovered signals',
6363
'PCA recovered signals']
6464
colors = ['red', 'steelblue', 'orange']
6565

examples/decomposition/plot_ica_vs_pca.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -80,6 +80,7 @@ def plot_samples(S, axis_list=None):
8080
plt.xlabel('x')
8181
plt.ylabel('y')
8282

83+
8384
plt.figure()
8485
plt.subplot(2, 2, 1)
8586
plot_samples(S / S.std())

examples/decomposition/plot_image_denoising.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -120,6 +120,7 @@ def show_with_diff(image, reference, title):
120120
plt.suptitle(title, size=16)
121121
plt.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.2)
122122

123+
123124
show_with_diff(distorted, face, 'Distorted image')
124125

125126
# #############################################################################

examples/decomposition/plot_pca_3d.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@ def pdf(x):
3737
return 0.5 * (stats.norm(scale=0.25 / e).pdf(x)
3838
+ stats.norm(scale=4 / e).pdf(x))
3939

40+
4041
y = np.random.normal(scale=0.5, size=(30000))
4142
x = np.random.normal(scale=0.5, size=(30000))
4243
z = np.random.normal(scale=0.1, size=len(x))

examples/ensemble/plot_adaboost_regression.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@
3333
regr_1 = DecisionTreeRegressor(max_depth=4)
3434

3535
regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
36-
n_estimators=300, random_state=rng)
36+
n_estimators=300, random_state=rng)
3737

3838
regr_1.fit(X, y)
3939
regr_2.fit(X, y)

examples/ensemble/plot_gradient_boosting_regularization.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -41,8 +41,8 @@
4141
X_train, X_test = X[:2000], X[2000:]
4242
y_train, y_test = y[:2000], y[2000:]
4343

44-
original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': None, 'random_state': 2,
45-
'min_samples_split': 5}
44+
original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4,
45+
'max_depth': None, 'random_state': 2, 'min_samples_split': 5}
4646

4747
plt.figure()
4848

@@ -70,7 +70,7 @@
7070
test_deviance[i] = clf.loss_(y_test, y_pred)
7171

7272
plt.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5],
73-
'-', color=color, label=label)
73+
'-', color=color, label=label)
7474

7575
plt.legend(loc='upper left')
7676
plt.xlabel('Boosting Iterations')

examples/feature_selection/plot_rfe_digits.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
1111
See also :ref:`sphx_glr_auto_examples_feature_selection_plot_rfe_with_cross_validation.py`
1212
13-
"""
13+
""" # noqa: E501
1414
print(__doc__)
1515

1616
from sklearn.svm import SVC

examples/gaussian_process/plot_gpc_isoprobability.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@ def g(x):
3333
whether g(x) <= 0 or not)"""
3434
return 5. - x[:, 1] - .5 * x[:, 0] ** 2.
3535

36+
3637
# Design of experiments
3738
X = np.array([[-4.61611719, -6.00099547],
3839
[4.10469096, 5.32782448],

examples/gaussian_process/plot_gpr_noisy_targets.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@ def f(x):
3838
"""The function to predict."""
3939
return x * np.sin(x)
4040

41+
4142
# ----------------------------------------------------------------------
4243
# First the noiseless case
4344
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T

examples/linear_model/plot_robust_fit.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,8 @@
6666
('Theil-Sen', TheilSenRegressor(random_state=42)),
6767
('RANSAC', RANSACRegressor(random_state=42)),
6868
('HuberRegressor', HuberRegressor())]
69-
colors = {'OLS': 'turquoise', 'Theil-Sen': 'gold', 'RANSAC': 'lightgreen', 'HuberRegressor': 'black'}
69+
colors = {'OLS': 'turquoise', 'Theil-Sen': 'gold',
70+
'RANSAC': 'lightgreen', 'HuberRegressor': 'black'}
7071
linestyle = {'OLS': '-', 'Theil-Sen': '-.', 'RANSAC': '--', 'HuberRegressor': '--'}
7172
lw = 3
7273

examples/manifold/plot_swissroll.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
from mpl_toolkits.mplot3d import Axes3D
1919
Axes3D
2020

21-
#----------------------------------------------------------------------
21+
# ----------------------------------------------------------------------
2222
# Locally linear embedding of the swiss roll
2323

2424
from sklearn import manifold, datasets
@@ -29,7 +29,7 @@
2929
n_components=2)
3030
print("Done. Reconstruction error: %g" % err)
3131

32-
#----------------------------------------------------------------------
32+
# ----------------------------------------------------------------------
3333
# Plot result
3434

3535
fig = plt.figure()

examples/mixture/plot_concentration_prior.py

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -84,6 +84,7 @@ def plot_results(ax1, ax2, estimator, X, y, title, plot_title=False):
8484
ax1.set_ylabel('Estimated Mixtures')
8585
ax2.set_ylabel('Weight of each component')
8686

87+
8788
# Parameters of the dataset
8889
random_state, n_components, n_features = 2, 3, 2
8990
colors = np.array(['#0072B2', '#F0E442', '#D55E00'])
@@ -100,16 +101,16 @@ def plot_results(ax1, ax2, estimator, X, y, title, plot_title=False):
100101
estimators = [
101102
("Finite mixture with a Dirichlet distribution\nprior and "
102103
r"$\gamma_0=$", BayesianGaussianMixture(
103-
weight_concentration_prior_type="dirichlet_distribution",
104-
n_components=2 * n_components, reg_covar=0, init_params='random',
105-
max_iter=1500, mean_precision_prior=.8,
106-
random_state=random_state), [0.001, 1, 1000]),
104+
weight_concentration_prior_type="dirichlet_distribution",
105+
n_components=2 * n_components, reg_covar=0, init_params='random',
106+
max_iter=1500, mean_precision_prior=.8,
107+ 10000
random_state=random_state), [0.001, 1, 1000]),
107108
("Infinite mixture with a Dirichlet process\n prior and" r"$\gamma_0=$",
108109
BayesianGaussianMixture(
109-
weight_concentration_prior_type="dirichlet_process",
110-
n_components=2 * n_components, reg_covar=0, init_params='random',
111-
max_iter=1500, mean_precision_prior=.8,
112-
random_state=random_state), [1, 1000, 100000])]
110+
weight_concentration_prior_type="dirichlet_process",
111+
n_components=2 * n_components, reg_covar=0, init_params='random',
112+
max_iter=1500, mean_precision_prior=.8,
113+
random_state=random_state), [1, 1000, 100000])]
113114

114115
# Generate data
115116
rng = np.random.RandomState(random_state)

examples/mixture/plot_gmm_covariances.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,7 @@ def make_ellipses(gmm, ax):
6666
ax.add_artist(ell)
6767
ax.set_aspect('equal', 'datalim')
6868

69+
6970
iris = datasets.load_iris()
7071

7172
# Break up the dataset into non-overlapping training (75%) and testing

examples/model_selection/grid_search_text_feature_extraction.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@
7171
'talk.religion.misc',
7272
]
7373
# Uncomment the following to do the analysis on all the categories
74-
#categories = None
74+
# categories = None
7575

7676
print("Loading 20 newsgroups dataset for categories:")
7777
print(categories)

examples/model_selection/plot_precision_recall.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -173,7 +173,6 @@
173173
# %%
174174
# The average precision score in multi-label settings
175175
# ....................................................
176-
from sklearn.metrics import precision_recall_curve
177176
from sklearn.metrics import average_precision_score
178177

179178
# For each class
@@ -187,7 +186,7 @@
187186

188187
# A "micro-average": quantifying score on all classes jointly
189188
precision["micro"], recall["micro"], _ = precision_recall_curve(Y_test.ravel(),
190-
y_score.ravel())
189+
y_score.ravel())
191190
average_precision["micro"] = average_precision_score(Y_test, y_score,
192191
average="micro")
193192
print('Average precision score, micro-averaged over all classes: {0:0.2f}'

examples/model_selection/plot_underfitting_overfitting.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@
3333
def true_fun(X):
3434
return np.cos(1.5 * np.pi * X)
3535

36+
3637
np.random.seed(0)
3738

3839
n_samples = 30

examples/neighbors/plot_kde_1d.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -103,6 +103,7 @@ def format_func(x, loc):
103103
else:
104104
return '%ih' % x
105105

106+
106107
for i, kernel in enumerate(['gaussian', 'tophat', 'epanechnikov',
107108
'exponential', 'linear', 'cosine']):
108109
axi = ax.ravel()[i]

examples/neighbors/plot_species_kde.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@
3434
<http://rob.schapire.net/papers/ecolmod.pdf>`_
3535
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
3636
190:231-259, 2006.
37-
"""
37+
""" # noqa: E501
3838
# Author: Jake Vanderplas <jakevdp@cs.washington.edu>
3939
#
4040
# License: BSD 3 clause

examples/preprocessing/plot_all_scaling.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -106,6 +106,7 @@
106106
# plasma does not exist in matplotlib < 1.5
107107
cmap = getattr(cm, 'plasma_r', cm.hot_r)
108108

109+
109110
def create_axes(title, figsize=(16, 6)):
110111
fig = plt.figure(figsize=figsize)
111112
fig.suptitle(title)

examples/svm/plot_custom_kernel.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
# import some data to play with
1717
iris = datasets.load_iris()
1818
X = iris.data[:, :2] # we only take the first two features. We could
19-
# avoid this ugly slicing by using a two-dim dataset
19+
# avoid this ugly slicing by using a two-dim dataset
2020
Y = iris.target
2121

2222

0 commit comments

Comments
 (0)
0