8000 MAINT: lint: enable line-length check (package-wide) (#19609) · scipy/scipy@fa9f13e · GitHub
[go: up one dir, main page]

Skip to content

Commit fa9f13e

Browse files
authored
MAINT: lint: enable line-length check (package-wide) (#19609)
* MAINT: `noqa` for awkward UP007 violation I tried to fix but `mypy` complains along the lines of python/mypy#12393 * MAINT: lint: enable line-length check in `cluster` * MAINT: lint: enable line-length check in `integrate` [lint only] * MAINT: lint: enable line-length check in `interpolate` [lint only] * MAINT: lint: enable line-length check in `io` [lint only] * MAINT: lint: enable line-length check in `ndimage` [lint only] * MAINT: lint: enable line-length check in `signal` [lint only] * MAINT: lint: enable line-length check in `linalg` [lint only] * MAINT: lint: enable line-length check in `spatial` [lint only] * MAINT: lint: enable line-length check in `sparse` [lint only] * MAINT: lint: enable line-length check in `_lib` [lint only] * MAINT: lint: enable line-length check in `stats` [lint only] * MAINT: lint: enable line-length check in `special` [lint only] * MAINT: lint: enable line-length check in `optimize` [lint only] * MAINT: lint: enable line-length check in `benchmarks` [lint only] * MAINT: lint: enable line-length check in `tools` * MAINT: lint: `per-file-ignores` clean-up * MAINT: PR 19609 revisions
1 parent 8166222 commit fa9f13e

File tree

177 files changed

+3320
-1860
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

177 files changed

+3320
-1860
lines changed

benchmarks/benchmarks/go_benchmark_functions/go_funcs_C.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -551,7 +551,8 @@ class Cube(Benchmark):
551551
f_{\text{Cube}}(x) = 100(x_2 - x_1^3)^2 + (1 - x1)^2
552552
553553
554-
Here, :math:`n` represents the number of dimensions and :math:`x_i \in [-10, 10]` for :math:`i=1,...,N`.
554+
Here, :math:`n` represents the number of dimensions and :math:`x_i \in [-10, 10]`
555+
for :math:`i=1,...,N`.
555556
556557
*Global optimum*: :math:`f(x_i) = 0.0` for :math:`x = [1, 1]`
557558

benchmarks/benchmarks/go_benchmark_functions/go_funcs_D.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -403,8 +403,8 @@ class DeVilliersGlasser02(Benchmark):
403403
r"""
404404
DeVilliers-Glasser 2 objective function.
405405
406-
This class defines the DeVilliers-Glasser 2 [1]_ function global optimization problem. This
407-
is a multimodal minimization problem defined as follows:
406+
This class defines the DeVilliers-Glasser 2 [1]_ function global optimization
407+
problem. This is a multimodal minimization problem defined as follows:
408408
409409
.. math::
410410

benchmarks/benchmarks/go_benchmark_functions/go_funcs_E.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,8 @@ class Easom(Benchmark):
2727
For Global Optimization Problems Int. Journal of Mathematical Modelling
2828
and Numerical Optimisation, 2013, 4, 150-194.
2929
30-
TODO Gavana website disagrees with Jamil, etc. Gavana equation in docstring is totally wrong.
30+
TODO Gavana website disagrees with Jamil, etc.
31+
Gavana equation in docstring is totally wrong.
3132
"""
3233

3334
def __init__(self, dimensions=2):

benchmarks/benchmarks/go_benchmark_functions/go_funcs_L.py

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -203,7 +203,8 @@ class Levy03(Benchmark):
203203
204204
.. math::
205205
206-
f_{\text{Levy03}}(\mathbf{x}) = \sin^2(\pi y_1)+\sum_{i=1}^{n-1}(y_i-1)^2[1+10\sin^2(\pi y_{i+1})]+(y_n-1)^2
206+
f_{\text{Levy03}}(\mathbf{x}) =
207+
\sin^2(\pi y_1)+\sum_{i=1}^{n-1}(y_i-1)^2[1+10\sin^2(\pi y_{i+1})]+(y_n-1)^2
207208
208209
Where, in this exercise:
209210
@@ -212,7 +213,8 @@ class Levy03(Benchmark):
212213
y_i=1+\frac{x_i-1}{4}
213214
214215
215-
Here, :math:`n` represents the number of dimensions and :math:`x_i \in [-10, 10]` for :math:`i=1,...,n`.
216+
Here, :math:`n` represents the number of dimensions and
217+
:math:`x_i \in [-10, 10]` for :math:`i=1,...,n`.
216218
217219
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 1` for :math:`i=1,...,n`
218220
@@ -254,11 +256,15 @@ class Levy05(Benchmark):
254256
255257
.. math::
256258
257-
f_{\text{Levy05}}(\mathbf{x}) = \sum_{i=1}^{5} i \cos \left[(i-1)x_1 + i \right] \times \sum_{j=1}^{5} j \cos \left[(j+1)x_2 + j \right] + (x_1 + 1.42513)^2 + (x_2 + 0.80032)^2
259+
f_{\text{Levy05}}(\mathbf{x}) =
260+
\sum_{i=1}^{5} i \cos \left[(i-1)x_1 + i \right] \times \sum_{j=1}^{5} j
261+
\cos \left[(j+1)x_2 + j \right] + (x_1 + 1.42513)^2 + (x_2 + 0.80032)^2
258262
259-
Here, :math:`n` represents the number of dimensions and :math:`x_i \in [-10, 10]` for :math:`i=1,...,n`.
263+
Here, :math:`n` represents the number of dimensions and
264+
:math:`x_i \in [-10, 10]` for :math:`i=1,...,n`.
260265
261-
*Global optimum*: :math:`f(x_i) = -176.1375779` for :math:`\mathbf{x} = [-1.30685, -1.42485]`.
266+
*Global optimum*: :math:`f(x_i) = -176.1375779` for
267+
:math:`\mathbf{x} = [-1.30685, -1.42485]`.
262268
263269
.. [1] Mishra, S. Global Optimization by Differential Evolution and
264270
Particle Swarm Methods: Evaluation on Some Benchmark Functions.

benchmarks/benchmarks/go_benchmark_functions/go_funcs_univariate.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -215,7 +215,8 @@ class Problem07(Benchmark):
215215
216216
.. math::
217217
218-
f_{\\text{Problem07}}(x) = \\sin(x) + \\sin \\left(\\frac{10}{3}x \\right) + \\log(x) - 0.84x + 3
218+
f_{\\text{Problem07}}(x) = \\sin(x) + \\sin \\left(\\frac{10}{3}x
219+
\\right) + \\log(x) - 0.84x + 3
219220
220221
Bound constraints: :math:`x \\in [2.7, 7.5]`
221222
@@ -571,8 +572,9 @@ class Problem18(Benchmark):
571572
572573
.. math::
573574
574-
f_{\\text{Problem18}}(x) = \\begin{cases}(x-2)^2 & \\textrm{if} \\hspace{5pt} x \\leq 3 \\\\
575-
2\\log(x-2)+1&\\textrm{otherwise}\\end{cases}
575+
f_{\\text{Problem18}}(x)
576+
= \\begin{cases}(x-2)^2 & \\textrm{if} \\hspace{5pt} x
577+
\\leq 3 \\\\ 2\\log(x-2)+1&\\textrm{otherwise}\\end{cases}
576578
577579
Bound constraints: :math:`x \\in [0, 6]`
578580

benchmarks/benchmarks/integrate.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,8 @@ def setup(self):
102102
voidp = ctypes.cast(self.f_ctypes, ctypes.c_void_p)
103103
address = voidp.value
104104
ffi = cffi.FFI()
105-
self.f_cffi = LowLevelCallable(ffi.cast("double (*)(int, double *)", address))
105+
self.f_cffi = LowLevelCallable(ffi.cast("double (*)(int, double *)",
106+
address))
106107

107108
def time_quad_python(self):
108109
quad(self.f_python, 0, np.pi)

benchmarks/benchmarks/interpolate.py

Lines changed: 14 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,8 @@ def setup(self, n_grids, method):
8181
self.values = self.func(self.points[:, 0], self.points[:, 1])
8282

8383
def time_evaluation(self, n_grids, method):
84-
interpolate.griddata(self.points, self.values, (self.grid_x, self.grid_y), method=method)
84+
interpolate.griddata(self.points, self.values, (self.grid_x, self.grid_y),
85+
method=method)
8586

8687

8788
class Interpolate1d(Benchmark):
@@ -128,7 +129,8 @@ class Rbf(Benchmark):
128129
param_names = ['n_samples', 'function']
129130
params = [
130131
[10, 50, 100],
131-
['multiquadric', 'inverse', 'gaussian', 'linear', 'cubic', 'quintic', 'thin_plate']
132+
['multiquadric', 'inverse', 'gaussian', 'linear',
133+
'cubic', 'quintic', 'thin_plate']
132134
]
133135

134136
def setup(self, n_samples, function):
@@ -220,7 +222,8 @@ def time_smooth_bivariate_spline(self, n_samples):
220222
interpolate.SmoothBivariateSpline(self.x, self.y, self.z)
221223

222224
def time_lsq_bivariate_spline(self, n_samples):
223-
interpolate.LSQBivariateSpline(self.x, self.y, self.z, self.xknots.flat, self.yknots.flat)
225+
interpolate.LSQBivariateSpline(self.x, self.y, self.z,
226+
self.xknots.flat, self.yknots.flat)
224227

225228

226229
class Interpolate(Benchmark):
@@ -376,14 +379,19 @@ class CloughTocherInterpolatorValues(interpolate.CloughTocher2DInterpolator):
376379
https://github.com/scipy/scipy/pull/18376 for discussion
377380
"""
378381
def __init__(self, points, xi, tol=1e-6, maxiter=400, **kwargs):
379-
interpolate.CloughTocher2DInterpolator.__init__(self, points, None, tol=tol, maxiter=maxiter)
382+
interpolate.CloughTocher2DInterpolator.__init__(self, points, None,
383+
tol=tol, maxiter=maxiter)
380384
self.xi = None
381385
self._preprocess_xi(*xi)
382-
self.simplices, self.c = interpolate.CloughTocher2DInterpolator._find_simplicies(self, self.xi)
386+
self.simplices, self.c = (
387+
interpolate.CloughTocher2DInterpolator._find_simplicies(self, self.xi)
388+
)
383389

384390
def _preprocess_xi(self, *args):
385391
if self.xi is None:
386-
self.xi, self.interpolation_points_shape = interpolate.CloughTocher2DInterpolator._preprocess_xi(self, *args)
392+
self.xi, self.interpolation_points_shape = (
393+
interpolate.CloughTocher2DInterpolator._preprocess_xi(self, *args)
394+
)
387395
return self.xi, self.interpolation_points_shape
388396

389397
def _find_simplicies(self, xi):

benchmarks/benchmarks/io_mm.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -139,7 +139,7 @@ def generate_dense(size):
139139
140140
a = generate_{matrix_type}({size})
141141
mmwrite('{self.filename}', a, symmetry='general')
142-
"""
142+
""" # noqa: E501
143143
time, peak_mem = run_monitored(code)
144144
return peak_mem / size
145145

benchmarks/benchmarks/linalg.py

Lines changed: 18 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -85,11 +85,21 @@ def time_svd(self, size, contig, module):
8585
sl.svd(self.a)
8686

8787
# Retain old benchmark results (remove this if changing the benchmark)
88-
time_det.version = "87e530ee50eb6b6c06c7a8abe51c2168e133d5cbd486f4c1c2b9cedc5a078325"
89-
time_eigvals.version = "9d68d3a6b473df9bdda3d3fd25c7f9aeea7d5cee869eec730fb2a2bcd1dfb907"
90-
time_inv.version = "20beee193c84a5713da9749246a7c40ef21590186c35ed00a4fe854cce9e153b"
91-
time_solve.version = "1fe788070f1c9132cbe78a47fdb4cce58266427fc636d2aa9450e3c7d92c644c"
92-
time_svd.version = "0ccbda456d096e459d4a6eefc6c674a815179e215f83931a81cfa8c18e39d6e3"
88+
time_det.version = (
89+
"87e530ee50eb6b6c06c7a8abe51c2168e133d5cbd486f4c1c2b9cedc5a078325"
90+
)
91+
time_eigvals.version = (
92+
"9d68d3a6b473df9bdda3d3fd25c7f9aeea7d5cee869eec730fb2a2bcd1dfb907"
93+
)
94+
time_inv.version = (
95+
"20beee193c84a5713da9749246a7c40ef21590186c35ed00a4fe854cce9e153b"
96+
)
97+
time_solve.version = (
98+
"1fe788070f1c9132cbe78a47fdb4cce58266427fc636d2aa9450e3c7d92c644c"
99+
)
100+
time_svd.version = (
101+
"0ccbda456d096e459d4a6eefc6c674a815179e215f83931a81cfa8c18e39d6e3"
102+
)
93103

94104

95105
class Norm(Benchmark):
@@ -178,7 +188,9 @@ def time_lstsq(self, dtype, size, lapack_driver):
178188
lapack_driver=lapack_driver)
179189

180190
# Retain old benchmark results (remove this if changing the benchmark)
181-
time_lstsq.version = "15ee0be14a0a597c7d1c9a3dab2c39e15c8ac623484410ffefa406bf6b596ebe"
191+
time_lstsq.version = (
192+
"15ee0be14a0a597c7d1c9a3dab2c39e15c8ac623484410ffefa406bf6b596ebe"
193+
)
182194

183195

184196
class SpecialMatrices(Benchmark):

benchmarks/benchmarks/optimize.py

Lines changed: 22 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -89,13 +89,15 @@ def print_results(self):
8989
print("")
9090
print("=========================================================")
9191
print("Optimizer benchmark: %s" % (self.function_name))
92-
print("dimensions: %d, extra kwargs: %s" % (results[0].ndim, str(self.minimizer_kwargs)))
92+
print("dimensions: %d, extra kwargs: %s" %
93+
(results[0].ndim, str(self.minimizer_kwargs)))
9394
print("averaged over %d starting configurations" % (results[0].ntrials))
9495
print(" Optimizer nfail nfev njev nhev time")
9596
print("---------------------------------------------------------")
9697
for res in results:
9798
print("%11s | %4d | %4d | %4d | %4d | %.6g" %
98-
(res.name, res.nfail, res.mean_nfev, res.mean_njev, res.mean_nhev, res.mean_time))
99+
(res.name, res.nfail, res.mean_nfev,
100+
res.mean_njev, res.mean_nhev, res.mean_time))
99101

100102
def average_results(self):
101103
"""group the results by minimizer and average over the runs"""
@@ -370,7 +372,8 @@ def run_rosenbrock_tight(self, methods=None):
370372

371373
def run_simple_quadratic(self, methods=None):
372374
s = funcs.SimpleQuadratic()
373-
# print "checking gradient", scipy.optimize.check_grad(s.fun, s.der, np.array([1.1, -2.3]))
375+
# print "checking gradient",
376+
# scipy.optimize.check_grad(s.fun, s.der, np.array([1.1, -2.3]))
374377
b = _BenchOptimizers("simple quadratic function",
375378
fun=s.fun, der=s.der, hess=s.hess)
376379
for i in range(10):
@@ -379,7 +382,8 @@ def run_simple_quadratic(self, methods=None):
379382

380383
def run_asymmetric_quadratic(self, methods=None):
381384
s = funcs.AsymmetricQuadratic()
382-
# print "checking gradient", scipy.optimize.check_grad(s.fun, s.der, np.array([1.1, -2.3]))
385+
# print "checking gradient",
386+
# scipy.optimize.check_grad(s.fun, s.der, np.array([1.1, -2.3]))
383387
b = _BenchOptimizers("function sum(x**2) + x[0]",
384388
fun=s.fun, der=s.der, hess=s.hess)
385389
for i in range(10):
@@ -401,7 +405,8 @@ def der(x):
401405

402406
def run_booth(self, methods=None):
403407
s = funcs.Booth()
404-
# print "checking gradient", scipy.optimize.check_grad(s.fun, s.der, np.array([1.1, -2.3]))
408+
# print "checking gradient",
409+
# scipy.optimize.check_grad(s.fun, s.der, np.array([1.1, -2.3]))
405410
b = _BenchOptimizers("Booth's function",
406411
fun=s.fun, der=s.der, hess=None)
407412
for i in range(10):
@@ -410,7 +415,8 @@ def run_booth(self, methods=None):
410415

411416
def run_beale(self, methods=None):
412417
s = funcs.Beale()
413-
# print "checking gradient", scipy.optimize.check_grad(s.fun, s.der, np.array([1.1, -2.3]))
418+
# print "checking gradient",
419+
# scipy.optimize.check_grad(s.fun, s.der, np.array([1.1, -2.3]))
414420
b = _BenchOptimizers("Beale's function",
415421
fun=s.fun, der=s.der, hess=None)
416422
for i in range(10):
@@ -419,8 +425,9 @@ def run_beale(self, methods=None):
419425

420426
def run_LJ(self, methods=None):
421427
s = funcs.LJ()
422-
# print "checking gradient", scipy.optimize.check_grad(s.get_energy, s.get_gradient,
423-
# np.random.uniform(-2,2,3*4))
428+
# print "checking gradient",
429+
# scipy.optimize.check_grad(s.get_energy, s.get_gradient,
430+
# np.random.uniform(-2,2,3*4))
424431
natoms = 4
425432
b = _BenchOptimizers("%d atom Lennard Jones potential" % (natoms),
426433
fun=s.fun, der=s.der, hess=None)
@@ -512,7 +519,9 @@ def __init__(self):
512519
except (KeyError, ValueError):
513520
self.numtrials = 100
514521

515-
self.dump_fn = os.path.join(os.path.dirname(__file__), '..', 'global-bench-results.json')
522+
self.dump_fn = os.path.join(os.path.dirname(__file__),
523+
'..',
524+
'global-bench-results.json',)
516525
self.results = {}
517526

518527
def setup(self, name, ret_value, solver):
@@ -536,7 +545,8 @@ def track_all(self, name, ret_value, solver):
536545
# if so, then just return the ret_value
537546
av_results = self.results[name]
538547
if ret_value == 'success%':
539-
return 100 * av_results[solver]['nsuccess'] / av_results[solver]['ntrials']
548+
return (100 * av_results[solver]['nsuccess']
549+
/ av_results[solver]['ntrials'])
540550
elif ret_value == '<nfev>':
541551
return av_results[solver]['mean_nfev']
542552
else:
@@ -557,7 +567,8 @@ def track_all(self, name, ret_value, solver):
557567
self.results[name][solver] = av_results[solver]
558568

559569
if ret_value == 'success%':
560-
return 100 * av_results[solver]['nsuccess'] / av_results[solver]['ntrials']
570+
return (100 * av_results[solver]['nsuccess']
571+
/ av_results[solver]['ntrials'])
561572
elif ret_value == '<nfev>':
562573
return av_results[solver]['mean_nfev']
563574
else:

0 commit comments

Comments
 (0)
0