8000 [CI][lintrunner] bump `black` version to 24.4.2 by XuehaiPan · Pull Request #129870 · pytorch/pytorch · GitHub
[go: up one dir, main page]

Skip to content

[CI][lintrunner] bump black version to 24.4.2 #129870

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 22 commits into from
Closed
  •  
  •  
  •  
2 changes: 2 additions & 0 deletions .flake8
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@ max-line-length = 120
# E501 is not flexible enough, we're using B950 instead
ignore =
E203,E305,E402,E501,E721,E741,F405,F841,F999,W503,W504,C408,E302,W291,E303,
# type stub in .py files formatted by black
E704,
# shebang has extra meaning in fbcode lints, so I think it's not worth trying
# to line this up with executable bit
EXE001,
Expand Down
6 changes: 3 additions & 3 deletions .github/scripts/build_triton_wheel.py
Original file line number Diff line number Diff line change
Expand Up @@ -189,9 +189,9 @@ def main() -> None:

build_triton(
device=args.device,
commit_hash=args.commit_hash
if args.commit_hash
else read_triton_pin(args.device),
commit_hash=(
args.commit_hash if args.commit_hash else read_triton_pin(args.device)
),
version=args.triton_version,
build_conda=args.build_conda,
py_version=args.py_version,
Expand Down
6 changes: 3 additions & 3 deletions .github/scripts/generate_binary_build_matrix.py
Original file line number Diff line number Diff line change
Expand Up @@ -391,7 +391,7 @@ def generate_wheels_matrix(
"container_image": WHEEL_CONTAINER_IMAGES[arch_version],
"package_type": package_type,
"pytorch_extra_install_requirements": (
PYTORCH_EXTRA_INSTALL_REQUIREMENTS[arch_version] # fmt: skip
PYTORCH_EXTRA_INSTALL_REQUIREMENTS[arch_version]
if os != "linux-aarch64"
else ""
),
Expand All @@ -414,7 +414,7 @@ def generate_wheels_matrix(
"container_image": WHEEL_CONTAINER_IMAGES[arch_version],
"package_type": package_type,
"pytorch_extra_install_requirements": (
PYTORCH_EXTRA_INSTALL_REQUIREMENTS[arch_version] # fmt: skip
PYTORCH_EXTRA_INSTALL_REQUIREMENTS[arch_version]
if os != "linux-aarch64"
else ""
),
Expand Down Expand Up @@ -463,7 +463,7 @@ def generate_wheels_matrix(
".", "_"
),
"pytorch_extra_install_requirements": (
PYTORCH_EXTRA_INSTALL_REQUIREMENTS["12.1"] # fmt: skip
PYTORCH_EXTRA_INSTALL_REQUIREMENTS["12.1"]
if os != "linux"
else ""
),
Expand Down
16 changes: 8 additions & 8 deletions .github/scripts/trymerge.py
Original file line number Diff line number Diff line change
Expand Up @@ -670,7 +670,7 @@ def skip_func(idx: int, candidate: "GitHubPR") -> bool:
if not open_only or not candidate.is_closed():
return False
print(
f"Skipping {idx+1} of {len(rev_list)} PR (#{candidate.pr_num}) as its already been merged"
f"Skipping {idx + 1} of {len(rev_list)} PR (#{candidate.pr_num}) as its already been merged"
)
return True

Expand Down Expand Up @@ -1419,9 +1419,9 @@ def find_matching_merge_rule(
pending_checks, failed_checks, _ = categorize_checks(
checks,
required_checks,
ok_failed_checks_threshold=IGNORABLE_FAILED_CHECKS_THESHOLD
if rule.ignore_flaky_failures
else 0,
ok_failed_checks_threshold=(
IGNORABLE_FAILED_CHECKS_THESHOLD if rule.ignore_flaky_failures else 0
),
)

# categorize_checks assumes all tests are required if required_checks is empty.
Expand Down Expand Up @@ -1491,7 +1491,7 @@ def checks_to_str(checks: List[Tuple[str, Optional[str]]]) -> str:


def checks_to_markdown_bullets(
checks: List[Tuple[str, Optional[str], Optional[int]]]
checks: List[Tuple[str, Optional[str], Optional[int]]],
) -> List[str]:
return [
f"- [{c[0]}]({c[1]})" if c[1] is not None else f"- {c[0]}" for c in checks[:5]
Expand Down Expand Up @@ -2202,9 +2202,9 @@ def merge(
checks,
required_checks
+ [x for x in checks.keys() if x not in required_checks],
ok_failed_checks_threshold=IGNORABLE_FAILED_CHECKS_THESHOLD
if ignore_flaky_failures
else 0,
ok_failed_checks_threshold=(
IGNORABLE_FAILED_CHECKS_THESHOLD if ignore_flaky_failures else 0
),
)
# HACK until GitHub will be better about surfacing those
startup_failures = filter_checks_with_lambda(
Expand Down
2 changes: 1 addition & 1 deletion .lintrunner.toml
Original file line number Diff line number Diff line change
Expand Up @@ -1474,8 +1474,8 @@ init_command = [
'tools/linter/adapters/pip_init.py',
'--dry-run={{DRYRUN}}',
'--no-black-binary',
'black==23.12.1',
'ufmt==2.7.0',
'black==24.4.2',
'usort==1.0.8.post1',
'isort==5.13.2',
]
Expand Down
5 changes: 2 additions & 3 deletions benchmarks/distributed/rpc/rl/launcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -209,9 +209,8 @@ def main():
x_axis_variables
): # run benchmark for every x axis variable
if len(x_axis_variables) > 1:
args[
args["x_axis_name"]
] = x_axis_variable # set x axis variable for this benchmark iteration
# Set x axis variable for this benchmark iteration
args[args["x_axis_name"]] = x_axis_variable
processes = []
start_time = time.time()
for rank in range(args["world_size"]):
Expand Down
48 changes: 24 additions & 24 deletions benchmarks/dynamo/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -481,7 +481,7 @@ def print_summary_table(data, print_dataframe=False):
print(col.ljust(width), f"mean={data[col].mean():.3f}x")
elif col in ("accuracy"):
pass_rate = (data[col] == "pass").mean()
print(col.ljust(width), f"pass_rate={100*pass_rate:.2f}%")
print(col.ljust(width), f"pass_rate={100 * pass_rate:.2f}%")
else:
cdata = data[col]
print(
Expand Down Expand Up @@ -1372,9 +1372,7 @@ def load(cls, model, example_inputs, device):
example_kwargs,
)
with torch.no_grad():
so_path = torch._inductor.aot_compile(
gm, example_args, example_kwargs
) # type: ignore[arg-type]
so_path = torch._inductor.aot_compile(gm, example_args, example_kwargs) # type: ignore[arg-type]

cls.cache[key] = torch._export.aot_load(so_path, device)

Expand Down Expand Up @@ -1531,12 +1529,10 @@ def _generate_onnx_model_directory(
return model_path

@abc.abstractmethod
def format_pt_inputs(self, pt_inputs: Any) -> Sequence[torch.Tensor]:
...
def format_pt_inputs(self, pt_inputs: Any) -> Sequence[torch.Tensor]: ...

@abc.abstractmethod
def format_pt_outputs(self, pt_outputs: Any) -> Sequence[torch.Tensor]:
...
def format_pt_outputs(self, pt_outputs: Any) -> Sequence[torch.Tensor]: ...

def adapt_pt_inputs_to_onnx(self, pt_inputs) -> Mapping[str, np.ndarray]:
pt_inputs = self.format_pt_inputs(pt_inputs)
Expand Down Expand Up @@ -2194,9 +2190,9 @@ def cast_to(dtype, model, inputs):
model = model.to(dtype)

inputs = tree_map(
lambda x: x.to(dtype)
if isinstance(x, torch.Tensor) and x.is_floating_point()
else x,
lambda x: (
x.to(dtype) if isinstance(x, torch.Tensor) and x.is_floating_point() else x
),
inputs,
)
return model, inputs
Expand Down Expand Up @@ -2631,9 +2627,11 @@ def deepcopy_and_maybe_parallelize(self, model):
model = FSDP(
model,
use_orig_params=True,
device_id=torch.cuda.current_device()
if self.args.devices[-1] == "cuda"
else None,
device_id=(
torch.cuda.current_device()
if self.args.devices[-1] == "cuda"
else None
),
mixed_precision=mp_policy,
limit_all_gathers=True,
auto_wrap_policy=self.get_fsdp_auto_wrap_policy(self.args.only),
Expand Down Expand Up @@ -2698,9 +2696,11 @@ def record_status(accuracy_status, dynamo_start_stats):
self.init_optimizer(name, current_device, model_fp64.parameters())
fp64_outputs = self.run_n_iterations(model_fp64, inputs_fp64)
fp64_outputs = tree_map(
lambda x: x.to(torch.float64)
if isinstance(x, torch.Tensor) and x.is_floating_point()
else x,
lambda x: (
x.to(torch.float64)
if isinstance(x, torch.Tensor) and x.is_floating_point()
else x
),
fp64_outputs,
)
except Exception:
Expand Down Expand Up @@ -3102,9 +3102,9 @@ def warmup(fn, model, example_inputs, mode, niters=10):
experiment_kwargs["dynamo_peak_mem"] = dynamo_peak_mem
experiment_kwargs["dynamo_stats"] = dynamo_stats
if self.args.profile_dynamo_cache_lookup:
experiment_kwargs[
"cache_lookup_latency"
] = dynamo_cache_lookup_latency
experiment_kwargs["cache_lookup_latency"] = (
dynamo_cache_lookup_latency
)

if experiment.func is speedup_experiment_onnx:
experiment = functools.partial(
Expand Down Expand Up @@ -3258,9 +3258,9 @@ def warmup(fn, model, example_inputs, mode, niters=5):
experiment_kwargs["dynamo_peak_mem"] = dynamo_peak_mem
experiment_kwargs["dynamo_stats"] = dynamo_stats
if self.args.profile_dynamo_cache_lookup:
experiment_kwargs[
"cache_lookup_latency"
] = dynamo_cache_lookup_latency
experiment_kwargs["cache_lookup_latency"] = (
dynamo_cache_lookup_latency
)

if experiment.func is coverage_experiment:
ok, total = Stats.reset_counters()
Expand Down Expand Up @@ -4709,7 +4709,7 @@ def detect_and_mark_batch(t):
for i, name in enumerate(model_names):
current_name = name
if args.progress:
print(f"Running model {i+1}/{nmodels}", flush=True)
print(f"Running model {i + 1}/{nmodels}", flush=True)

try:
timeout = args.timeout
Expand Down
1 change: 1 addition & 0 deletions benchmarks/dynamo/join_results.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
A tool to merge multiple csv files (generated by torchbench.py/etc) into a single csv file.
Performs an outer join based on the benchmark name, filling in any missing data with zeros.
"""

import argparse
import functools
import operator
Expand Down
1 change: 1 addition & 0 deletions benchmarks/dynamo/microbenchmarks/analyze_templates.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@

That file can be fed into this script to generate the minimizes total, weighted matmul time as a function of allowed templates.
"""

import json

import click
Expand Down
2 changes: 1 addition & 1 deletion benchmarks/dynamo/microbenchmarks/fx_microbenchmarks.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def fn():
pass

t = min(timeit.repeat(fn, number=K, repeat=3))
print(f"iterating over {N*K} FX nodes took {t:.1f}s ({N*K/t:.0f} nodes/s)")
print(f"iterating over {N * K} FX nodes took {t:.1f}s ({N * K / t:.0f} nodes/s)")


if __name__ == "__main__":
Expand Down
2 changes: 1 addition & 1 deletion benchmarks/dynamo/microbenchmarks/overheads.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ def bench(name, fn, requires_grad):
end = time.perf_counter()

results = timeit.repeat(lambda: fn(x), number=1000, repeat=1000)
print(f"{name} {np.median(results)*1000:.1f}us (warmup={end-start:.1f}s)")
print(f"{name} {np.median(results) * 1000:.1f}us (warmup={end - start:.1f}s)")


def main():
Expand Down
2 changes: 1 addition & 1 deletion benchmarks/dynamo/runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -548,7 +548,7 @@ def env_var(name):
out_io.write(f"Number CUDA Devices: {torch.cuda.device_count()}\n")
out_io.write(f"Device Name: {torch.cuda.get_device_name(0)}\n")
out_io.write(
f"Device Memory [GB]: {torch.cuda.get_device_properties(0).total_memory/1e9}\n"
f"Device Memory [GB]: {torch.cuda.get_device_properties(0).total_memory / 1e9}\n"
)

title = "## Build Summary"
Expand Down
4 changes: 2 additions & 2 deletions benchmarks/dynamo/training_loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -193,9 +193,9 @@ def main():
print(
f"Train model on {args.epochs} epochs with backend {args.backend} and optimizer {args.optimizer}:"
)
print(f"PyTorch spent {timedelta(seconds=native_elapsed/args.epochs)} per epoch")
print(f"PyTorch spent {timedelta(seconds=native_elapsed / args.epochs)} per epoch")
print(
f"TorchDynamo spent {timedelta(seconds=dynamo_elapsed/args.epochs)} per epoch"
f"TorchDynamo spent {timedelta(seconds=dynamo_elapsed / args.epochs)} per epoch"
)


Expand Down
1 change: 1 addition & 0 deletions benchmarks/instruction_counts/applications/ci.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
"""Collect instruction counts for continuous integration."""

import argparse
import hashlib
import json
Expand Down
1 change: 1 addition & 0 deletions benchmarks/instruction_counts/core/api.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
"""Key enums and structs used to handle data flow within the benchmark."""

import dataclasses
import enum
import itertools as it
Expand Down
1 change: 1 addition & 0 deletions benchmarks/instruction_counts/core/expand.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

This is mostly string manipulation, with just a bit of importlib magic.
"""

import importlib.abc
import importlib.util
import itertools as it
Expand Down
1 change: 1 addition & 0 deletions benchmarks/instruction_counts/core/types.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
"""Type annotations for various benchmark objects."""

from typing import Any, Dict, Optional, Tuple, Union

from core.api import AutoLabels, GroupedBenchmark, TimerArgs
Expand Down
1 change: 1 addition & 0 deletions benchmarks/instruction_counts/execution/runner.py
10000
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
"""Run benchmarks while handling parallelism, isolation, and fault tolerance."""

import math
import multiprocessing
import subprocess
Expand Down
1 change: 1 addition & 0 deletions benchmarks/instruction_counts/execution/work.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
"""Handle the details of subprocess calls and retries for a given benchmark run."""

import dataclasses
import json
import os
Expand Down
1 change: 1 addition & 0 deletions benchmarks/instruction_counts/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
components) in future iterations. However this allows us to excercise the
underlying benchmark generation infrastructure in the mean time.
"""

import argparse
import sys
from typing import List
Expand Down
1 change: 1 addition & 0 deletions benchmarks/instruction_counts/worker/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
Because this file only expects to run in a child context, error handling means
plumbing failures up to the caller, not raising in this process.
"""

import argparse
import dataclasses
import io
Expand Down
8 changes: 5 additions & 3 deletions benchmarks/operator_benchmark/pt/diag_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,11 @@
class DiagBenchmark(op_bench.TorchBenchmarkBase):
def init(self, dim, M, N, diagonal, out, device):
self.inputs = {
"input": torch.rand(M, N, device=device)
if dim == 2
else torch.rand(M, device=device),
"input": (
torch.rand(M, N, device=device)
if dim == 2
else torch.rand(M, device=device)
),
"diagonal": diagonal,
"out": out,
"out_tensor": torch.tensor(
Expand Down
16 changes: 10 additions & 6 deletions benchmarks/operator_benchmark/pt/matmul_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,12 +34,16 @@
class MatMulBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, trans_a, trans_b, device):
self.inputs = {
"input_one": torch.rand(M, N, device=device)
if trans_a
else torch.rand(N, M, device=device).t(),
"input_two": torch.rand(N, K, device=device)
if trans_b
else torch.rand(K, N, device=device).t(),
"input_one": (
torch.rand(M, N, device=device)
if trans_a
else torch.rand(N, M, device=device).t()
),
"input_two": (
torch.rand(N, K, device=device)
if trans_b
else torch.rand(K, N, device=device).t()
),
}
self.set_module_name("matmul")

Expand Down
16 changes: 9 additions & 7 deletions benchmarks/sparse/dlmc/matmul_bench.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,13 +97,15 @@ def filter_ops(operation):
f"{OPS_MAP[operation]}(x, y)",
),
(
test_name,
device,
"scipy:" + operation,
"scipy_matmul(sx, sy)",
)
if device == "cpu"
else None,
(
test_name,
device,
"scipy:" + operation,
"scipy_matmul(sx, sy)",
)
if device == "cpu"
else None
),
],
)
)
Expand Down
Loading
Loading
0