8000 MNT Update to black 22.3.0 to resolve click error (#22983) · scikit-learn/scikit-learn@d4aad64 · GitHub
[go: up one dir, main page]

Skip to content

Commit d4aad64

Browse files
authored
MNT Update to black 22.3.0 to resolve click error (#22983)
* MNT Update to black 22.3.0 to resolve click error * STY Update for new black version
1 parent 75a94f5 commit d4aad64

25 files changed

+31
-33
lines changed

.pre-commit-config.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ repos:
66
- id: end-of-file-fixer
77
- id: trailing-whitespace
88
- repo: https://github.com/psf/black
9-
rev: 22.1.0
9+
rev: 22.3.0
1010
hooks:
1111
- id: black
1212
- repo: https://gitlab.com/pycqa/flake8

azure-pipelines.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ jobs:
4545
versionSpec: '3.9'
4646
- bash: |
4747
# Include pytest compatibility with mypy
48-
pip install pytest flake8 mypy==0.782 black==22.1.0
48+
pip install pytest flake8 mypy==0.782 black==22.3.0
4949
displayName: Install linters
5050
- bash: |
5151
black --check --diff .

benchmarks/bench_plot_neighbors.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@ def barplot_neighbors(
102102

103103
plt.figure(figsize=(8, 11))
104104

105-
for (sbplt, vals, quantity, build_time, query_time) in [
105+
for sbplt, vals, quantity, build_time, query_time in [
106106
(311, Nrange, "N", N_results_build, N_results_query),
107107
(312, Drange, "D", D_results_build, D_results_query),
108108
(313, krange, "k", k_results_build, k_results_query),

benchmarks/bench_rcv1_logreg_convergence.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@ def bench(clfs):
102102

103103
def plot_train_losses(clfs):
104104
plt.figure()
105-
for (name, _, _, train_losses, _, _, durations) in clfs:
105+
for name, _, _, train_losses, _, _, durations in clfs:
106106
plt.plot(durations, train_losses, "-o", label=name)
107107
plt.legend(loc=0)
108108
plt.xlabel("seconds")
@@ -111,7 +111,7 @@ def plot_train_losses(clfs):
111111

112112
def plot_train_scores(clfs):
113113
plt.figure()
114-
for (name, _, _, _, train_scores, _, durations) in clfs:
114+
for name, _, _, _, train_scores, _, durations in clfs:
115115
plt.plot(durations, train_scores, "-o", label=name)
116116
plt.legend(loc=0)
117117
plt.xlabel("seconds")
@@ -121,7 +121,7 @@ def plot_train_scores(clfs):
121121

122122
def plot_test_scores(clfs):
123123
plt.figure()
124-
for (name, _, _, _, _, test_scores, durations) in clfs:
124+
for name, _, _, _, _, test_scores, durations in clfs:
125125
plt.plot(durations, test_scores, "-o", label=name)
126126
plt.legend(loc=0)
127127
plt.xlabel("seconds")
@@ -132,13 +132,13 @@ def plot_test_scores(clfs):
132132
def plot_dloss(clfs):
133133
plt.figure()
134134
pobj_final = []
135-
for (name, _, _, train_losses, _, _, durations) in clfs:
135+
for name, _, _, train_losses, _, _, durations in clfs:
136136
pobj_final.append(train_losses[-1])
137137

138138
indices = np.argsort(pobj_final)
139139
pobj_best = pobj_final[indices[0]]
140140

141-
for (name, _, _, train_losses, _, _, durations) in clfs:
141+
for name, _, _, train_losses, _, _, durations in clfs:
142142
log_pobj = np.log(abs(np.array(train_losses) - pobj_best)) / np.log(10)
143143

144144
plt.plot(durations, log_pobj, "-o", label=name)

benchmarks/bench_saga.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,7 @@ def fit_single(
112112
train_time = time.clock() - t0
113113

114114
scores = []
115-
for (X, y) in [(X_train, y_train), (X_test, y_test)]:
115+
for X, y in [(X_train, y_train), (X_test, y_test)]:
116116
try:
117117
y_pred = lr.predict_proba(X)
118118
except NotImplementedError:

doc/developers/contributing.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -254,7 +254,7 @@ how to set up your git repository:
254254

255255
.. prompt:: bash $
256256

257-
pip install pytest pytest-cov flake8 mypy numpydoc black==22.1.0
257+
pip install pytest pytest-cov flake8 mypy numpydoc black==22.3.0
258258

259259
.. _upstream:
260260

examples/linear_model/plot_lasso_coordinate_descent_path.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@
7777

7878
plt.figure(3)
7979
neg_log_alphas_positive_enet = -np.log10(alphas_positive_enet)
80-
for (coef_e, coef_pe, c) in zip(coefs_enet, coefs_positive_enet, colors):
80+
for coef_e, coef_pe, c in zip(coefs_enet, coefs_positive_enet, colors):
8181
l1 = plt.plot(neg_log_alphas_enet, coef_e, c=c)
8282
l2 = plt.plot(neg_log_alphas_positive_enet, coef_pe, linestyle="--", c=c)
8383

examples/mixture/plot_concentration_prior.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -141,7 +141,7 @@ def plot_results(ax1, ax2, estimator, X, y, title, plot_title=False):
141141
y = np.concatenate([np.full(samples[j], j, dtype=int) for j in range(n_components)])
142142

143143
# Plot results in two different figures
144-
for (title, estimator, concentrations_prior) in estimators:
144+
for title, estimator, concentrations_prior in estimators:
145145
plt.figure(figsize=(4.7 * 3, 8))
146146
plt.subplots_adjust(
147147
bottom=0.04, top=0.90, hspace=0.05, wspace=0.05, left=0.03, right=0.99

examples/semi_supervised/plot_self_training_varying_threshold.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@
5757
amount_labeled = np.empty((x_values.shape[0], n_splits))
5858
amount_iterations = np.empty((x_values.shape[0], n_splits))
5959

60-
for (i, threshold) in enumerate(x_values):
60+
for i, threshold in enumerate(x_values):
6161
self_training_clf = SelfTrainingClassifier(base_classifier, threshold=threshold)
6262

6363
# We need manual cross validation so that we don't treat -1 as a separate

examples/svm/plot_rbf_parameters.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -174,7 +174,7 @@ def __call__(self, value, clip=None):
174174

175175
plt.figure(figsize=(8, 6))
176176
xx, yy = np.meshgrid(np.linspace(-3, 3, 200), np.linspace(-3, 3, 200))
177-
for (k, (C, gamma, clf)) in enumerate(classifiers):
177+
for k, (C, gamma, clf) in enumerate(classifiers):
178178
# evaluate decision function in a grid
179179
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
180180
Z = Z.reshape(xx.shape)

sklearn/_min_dependencies.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@
3737
"pytest": (PYTEST_MIN_VERSION, "tests"),
3838
"pytest-cov": ("2.9.0", "tests"),
3939
"flake8": ("3.8.2", "tests"),
40-
"black": ("22.1.0", "tests"),
40+
"black": ("22.3.0", "tests"),
4141
"mypy": ("0.770", "tests"),
4242
"pyamg": ("4.0.0", "tests"),
4343
"sphinx": ("4.0.1", "docs"),

sklearn/cluster/tests/test_hierarchical.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -605,7 +605,7 @@ def test_ward_linkage_tree_return_distance():
605605

606606
linkage_options = ["complete", "average", "single"]
607607
X_linkage_truth = [linkage_X_complete, linkage_X_average]
608-
for (linkage, X_truth) in zip(linkage_options, X_linkage_truth):
608+
for linkage, X_truth in zip(linkage_options, X_linkage_truth):
609609
out_X_unstructured = linkage_tree(X, return_distance=True, linkage=linkage)
610610
out_X_structured = linkage_tree(
611611
X, connectivity=connectivity_X, linkage=linkage, return_distance=True

sklearn/datasets/tests/test_samples_generator.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -657,7 +657,7 @@ def test_make_moons_unbalanced():
657657
def test_make_circles():
658658
factor = 0.3
659659

660-
for (n_samples, n_outer, n_inner) in [(7, 3, 4), (8, 4, 4)]:
660+
for n_samples, n_outer, n_inner in [(7, 3, 4), (8, 4, 4)]:
661661
# Testing odd and even case, because in the past make_circles always
662662
# created an even number of samples.
663663
X, y = make_circles(n_samples, shuffle=False, noise=None, factor=factor)

sklearn/ensemble/_hist_gradient_boosting/tests/test_warm_start.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,8 +19,8 @@
1919
def _assert_predictor_equal(gb_1, gb_2, X):
2020
"""Assert that two HistGBM instances are identical."""
2121
# Check identical nodes for each tree
22-
for (pred_ith_1, pred_ith_2) in zip(gb_1._predictors, gb_2._predictors):
23-
for (predictor_1, predictor_2) in zip(pred_ith_1, pred_ith_2):
22+
for pred_ith_1, pred_ith_2 in zip(gb_1._predictors, gb_2._predictors):
23+
for predictor_1, predictor_2 in zip(pred_ith_1, pred_ith_2):
2424
assert_array_equal(predictor_1.nodes, predictor_2.nodes)
2525

2626
# Check identical predictions

sklearn/feature_extraction/tests/test_image.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -317,7 +317,7 @@ def test_extract_patches_strided():
317317
expected_views = expected_views_1D + expected_views_2D + expected_views_3D
318318
last_patches = last_patch_1D + last_patch_2D + last_patch_3D
319319

320-
for (image_shape, patch_size, patch_step, expected_view, last_patch) in zip(
320+
for image_shape, patch_size, patch_step, expected_view, last_patch in zip(
321321
image_shapes, patch_sizes, patch_steps, expected_views, last_patches
322322
):
323323
image = np.arange(np.prod(image_shape)).reshape(image_shape)

sklearn/linear_model/tests/test_logistic.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1309,7 +1309,7 @@ def test_saga_vs_liblinear():
13091309
)
13101310
X_sparse = sparse.csr_matrix(X_sparse)
13111311

1312-
for (X, y) in ((X_bin, y_bin), (X_sparse, y_sparse)):
1312+
for X, y in ((X_bin, y_bin), (X_sparse, y_sparse)):
13131313
for penalty in ["l1", "l2"]:
13141314
n_samples = X.shape[0]
13151315
# alpha=1e-3 is time consuming

sklearn/linear_model/tests/test_ridge.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -189,7 +189,7 @@ def test_ridge_sample_weights():
189189
X = rng.randn(n_samples, n_features)
190190
sample_weight = 1.0 + rng.rand(n_samples)
191191

192-
for (alpha, intercept, solver) in param_grid:
192+
for alpha, intercept, solver in param_grid:
193193

194194
# Ridge with explicit sample_weight
195195
est = Ridge(alpha=alpha, fit_intercept=intercept, solver=solver, tol=1e-12)

sklearn/linear_model/tests/test_theil_sen.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -111,9 +111,7 @@ def test_modweiszfeld_step_1d():
111111
assert_array_less(new_y, y)
112112
# Check that a single vector is identity
113113
X = np.array([1.0, 2.0, 3.0]).reshape(1, 3)
114-
y = X[
115-
0,
116-
]
114+
y = X[0]
117115
new_y = _modified_weiszfeld_step(X, y)
118116
assert_array_equal(y, new_y)
119117

sklearn/metrics/cluster/tests/test_common.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -190,7 +190,7 @@ def generate_formats(y):
190190
score_1 = metric(X, y_true)
191191
assert score_1 == metric(X.astype(float), y_true)
192192
y_true_gen = generate_formats(y_true)
193-
for (y_true_fmt, fmt_name) in y_true_gen:
193+
for y_true_fmt, fmt_name in y_true_gen:
194194
assert score_1 == metric(X, y_true_fmt)
195195

196196

sklearn/mixture/tests/test_gaussian_mixture.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -423,7 +423,7 @@ def test_suffstat_sk_diag():
423423
covars_pred_diag = _estimate_gaussian_covariances_diag(resp, X, nk, xk, 0)
424424

425425
ecov = EmpiricalCovariance()
426-
for (cov_full, cov_diag) in zip(covars_pred_full, covars_pred_diag):
426+
for cov_full, cov_diag in zip(covars_pred_full, covars_pred_diag):
427427
ecov.covariance_ = np.diag(np.diag(cov_full))
428428
cov_diag = np.diag(cov_diag)
429429
assert_almost_equal(ecov.error_norm(cov_diag, norm="frobenius"), 0)

sklearn/model_selection/tests/test_validation.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -460,7 +460,7 @@ def check_cross_validate_single_metric(clf, X, y, scores):
460460
fitted_estimators,
461461
) = scores
462462
# Test single metric evaluation when scoring is string or singleton list
463-
for (return_train_score, dict_len) in ((True, 4), (False, 3)):
463+
for return_train_score, dict_len in ((True, 4), (False, 3)):
464464
# Single metric passed as a string
465465
if return_train_score:
466466
mse_scores_dict = cross_validate(

sklearn/neighbors/tests/test_neighbors.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -536,7 +536,7 @@ def test_unsupervised_radius_neighbors(
536536
# sort the results: this is not done automatically for
537537
# radius searches
538538
dist, ind = neigh.radius_neighbors(test, return_distance=True)
539-
for (d, i, i1) in zip(dist, ind, ind1):
539+
for d, i, i1 in zip(dist, ind, ind1):
540540
j = d.argsort()
541541
d[:] = d[j]
542542
i[:] = i[j]

sklearn/pipeline.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -319,7 +319,7 @@ def _fit(self, X, y=None, **fit_params_steps):
319319

320320
fit_transform_one_cached = memory.cache(_fit_transform_one)
321321

322-
for (step_idx, name, transformer) in self._iter(
322+
for step_idx, name, transformer in self._iter(
323323
with_final=False, filter_passthrough=False
324324
):
325325
if transformer is None or transformer == "passthrough":

sklearn/tests/test_discriminant_analysis.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -515,14 +515,14 @@ def test_lda_dimension_warning(n_classes, n_features):
515515
],
516516
)
517517
def test_lda_dtype_match(data_type, expected_type):
518-
for (solver, shrinkage) in solver_shrinkage:
518+
for solver, shrinkage in solver_shrinkage:
519519
clf = LinearDiscriminantAnalysis(solver=solver, shrinkage=shrinkage)
520520
clf.fit(X.astype(data_type), y.astype(data_type))
521521
assert clf.coef_.dtype == expected_type
522522

523523

524524
def test_lda_numeric_consistency_float32_float64():
525-
for (solver, shrinkage) in solver_shrinkage:
525+
for solver, shrinkage in solver_shrinkage:
526526
clf_32 = LinearDiscriminantAnalysis(solver=solver, shrinkage=shrinkage)
527527
clf_32.fit(X.astype(np.float32), y.astype(np.float32))
528528
clf_64 = LinearDiscriminantAnalysis(solver=solver, shrinkage=shrinkage)

sklearn/utils/estimator_checks.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2128,7 +2128,7 @@ def check_classifiers_train(
21282128
if not tags["binary_only"]:
21292129
problems.append((X_m, y_m))
21302130

2131-
for (X, y) in problems:
2131+
for X, y in problems:
21322132
classes = np.unique(y)
21332133
n_classes = len(classes)
21342134
n_samples, n_features = X.shape

0 commit comments

Comments
 (0)
0