8000 Revert "[BE]: Enable RUFF TRY400 rule - log.exception (#153473)" · pytorch/pytorch@3443627 · GitHub
[go: up one dir, main page]

Skip to content

Commit 3443627

Browse files
Revert "[BE]: Enable RUFF TRY400 rule - log.exception (#153473)"
This reverts commit 4f4ecc5. Reverted #153473 on behalf of https://github.com/jeanschmidt due to seems to have broken internal signals, @albanD may I count on you to help the author merge his PR? D74837988 ([comment](#153473 (comment)))
1 parent 86c6f71 commit 3443627

File tree

23 files changed

+46
-51
lines changed

23 files changed

+46
-51
lines changed

.flake8

+1-3
Original file line numberDiff line numberDiff line change
@@ -16,9 +16,7 @@ ignore =
1616
# these ignores are from flake8-comprehensions; please fix!
1717
C407,
1818
# these ignores are from flake8-logging-format; please fix!
19-
G100,G101,G200,
20-
# G201 replaced by LOG400 in ruff
21-
G201,
19+
G100,G101,G200
2220
# these ignores are from flake8-simplify. please fix or ignore with commented reason
2321
SIM105,SIM108,SIM110,SIM111,SIM113,SIM114,SIM115,SIM116,SIM117,SIM118,SIM119,SIM12,
2422
# SIM104 is already covered by pyupgrade ruff

.github/scripts/runner_determinator.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -623,9 +623,9 @@ def main() -> None:
623623
is_canary,
624624
)
625625

626-
except Exception:
627-
log.exception(
628-
"Failed to get issue. Defaulting to Meta runners and no experiments."
626+
except Exception as e:
627+
log.error(
628+
f"Failed to get issue. Defaulting to Meta runners and no experiments. Exception: {e}"
629629
)
630630

631631
set_github_output(GH_OUTPUT_KEY_LABEL_TYPE, runner_label_prefix)

benchmarks/dynamo/common.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -1700,8 +1700,8 @@ def maybe_snapshot_memory(should_snapshot_memory, suffix):
17001700
f"{output_filename.rstrip('.csv')}_{suffix}.pickle",
17011701
)
17021702
)
1703-
except Exception:
1704-
log.exception("Failed to save memory snapshot")
1703+
except Exception as e:
1704+
log.error("Failed to save memory snapshot, %s", e)
17051705

17061706
torch.cuda.memory._record_memory_history(enabled=None)
17071707

@@ -2742,7 +2742,7 @@ def minify_model(
27422742
try:
27432743
shutil.move("repro.py", f"{repro_dir}/{name}_repro.py")
27442744
except OSError:
2745-
log.exception("Could not find repro script for model %s", name)
2745+
log.error("Could not find repro script for model %s", name)
27462746
else:
27472747
log.info(
27482748
"Repro script for model %s with minified graph saved to %s",

pyproject.toml

-1
Original file line numberDiff line numberDiff line change
@@ -197,7 +197,6 @@ select = [
197197
"TC",
198198
"TRY002", # ban vanilla raise (todo fix NOQAs)
199199
"TRY203",
200-
"TRY400", # use logging.exception
201200
"TRY401", # verbose-log-message
202201
"UP",
203202
"YTT",

tools/packaging/split_wheel.py

+4-8
Original file line numberDiff line numberDiff line change
@@ -47,15 +47,11 @@ def requirements_installed() -> bool:
4747

4848
return True
4949
except ImportError:
50-
logger.error( # noqa: TRY400
51-
"Requirements not installed, run the following command to install:",
52-
exc_info=False,
50+
logger.error(
51+
"Requirements not installed, run the following command to install:"
5352
)
54-
logger.error( # noqa: TRY400
55-
" > %s -m pip install -r %s/requirements.txt",
56-
sys.executable,
57-
ROOT_PATH,
58-
exc_info=False,
53+
logger.error(
54+
" > %s -m pip install -r %s/requirements.txt", sys.executable, ROOT_PATH
5955
)
6056
return False
6157

torch/_dynamo/repro/after_aot.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -138,7 +138,7 @@ def debug_wrapper(
138138
example_inputs,
139139
compiler_name,
140140
)
141-
log.exception("CompilerError")
141+
log.error("CompilerError")
142142
raise
143143

144144
# We may run regular PyTorch compute that may trigger Dynamo, do NOT

torch/_dynamo/utils.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -2148,7 +2148,7 @@ def torchscript(model, example_inputs, verbose=False):
21482148
if verbose:
21492149
log.exception("jit error")
21502150
else:
2151-
log.error("Both torch.jit.trace and torch.jit.script failed") # noqa: TRY400
2151+
log.error("Both torch.jit.trace and torch.jit.script failed")
21522152
return None
21532153

21542154

torch/_guards.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -359,7 +359,7 @@ def create(self, builder: GuardBuilderBase):
359359
except Exception:
360360
log.exception("Error while creating guard:\n%s", str(self).rstrip())
361361
if self.stack:
362-
log.error("Created at:\n%s", "".join(self.stack.format()[-4:]).rstrip()) # noqa: TRY400
362+
log.error("Created at:\n%s", "".join(self.stack.format()[-4:]).rstrip())
363363
raise
364364

365365
def is_specialized_nn_module(self):

torch/_inductor/codegen/cuda/cuda_env.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -22,8 +22,8 @@ def get_cuda_arch() -> Optional[str]:
2222
major, minor = torch.cuda.get_device_capability(0)
2323
return str(major * 10 + minor)
2424
return str(cuda_arch)
25-
except Exception:
26-
log.exception("Error getting cuda arch")
25+
except Exception as e:
26+
log.error("Error getting cuda arch: %s", e)
2727
return None
2828

2929

@@ -35,8 +35,8 @@ def get_cuda_version() -> Optional[str]:
3535
if cuda_version is None:
3636
cuda_version = torch.version.cuda
3737
return cuda_version
38-
except Exception:
39-
log.exception("Error getting cuda version")
38+
except Exception as e:
39+
log.error("Error getting cuda version: %s", e)
4040
return None
4141

4242

torch/_inductor/compile_fx.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -181,7 +181,7 @@ def _fx_compile_mode_default() -> tuple[FxCompileMode, bool]:
181181
import logging
182182

183183
log = logging.getLogger(__name__)
184-
log.error( # noqa: TRY400
184+
log.error(
185185
"Invalid value of %s for %s. Expected one of %s. Using default.",
186186
value,
187187
name,

torch/_inductor/debug.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -796,13 +796,13 @@ def convert_sets_to_lists(d: dict[str, Any]) -> None:
796796
except Exception as e:
797797
# Since this is just logging code, it should never interfere with regular
798798
# program execution, so we use this try-except to guard against any error
799-
log.error("Unexpected error in create_node_mapping: %s", e) # noqa: TRY400
800-
log.error("post_to_pre_grad_nodes_json: %s", post_to_pre_grad_nodes_json) # noqa: TRY400
801-
log.error( # noqa: TRY400
799+
log.error("Unexpected error in create_node_mapping: %s", e)
800+
log.error("post_to_pre_grad_nodes_json: %s", post_to_pre_grad_nodes_json)
801+
log.error(
802802
"triton_kernel_to_post_grad_json: %s", triton_kernel_to_post_grad_json
803803
)
804-
log.error("pre_grad_graph_id: %s", pre_grad_graph_id) # noqa: TRY400
805-
log.error(traceback.format_exc()) # noqa: TRY400
804+
log.error("pre_grad_graph_id: %s", pre_grad_graph_id)
805+
log.error(traceback.format_exc())
806806
return empty_return
807807

808808

torch/_inductor/output_code.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -718,7 +718,7 @@ def after_deserialization(self, constants: CompiledFxGraphConstants) -> str:
718718
)
719719
self.compiled_fn_runner = getattr(code_cache, "runner", None)
720720
except OSError:
721-
log.exception("Failed to load artifact: %s", artifact_path)
721+
log.error("Failed to load artifact: %s", artifact_path)
722722
raise
723723

724724
return artifact_path

torch/_inductor/select_algorithm.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -2238,9 +2238,9 @@ def benchmark_choices(
22382238
try:
22392239
timing = cls.benchmark_choice(choice, autotune_args)
22402240
except CUDACompileError as e:
2241-
log.error( # noqa: TRY400
2241+
log.error(
22422242
"CUDA compilation error during autotuning: \n%s. \nIgnoring this choice.",
2243-
e,
2243+
str(e),
22442244
)
22452245
timing = float("inf")
22462246
except NotImplementedError as e:
@@ -2253,7 +2253,7 @@ def benchmark_choices(
22532253
else:
22542254
if "illegal memory access" in msg:
22552255
msg += "\n\nEither error in template or triton bug.\n"
2256-
log.error( # noqa: TRY400
2256+
log.error(
22572257
"Runtime error during autotuning: \n%s. \nIgnoring this choice.",
22582258
msg,
22592259
)

torch/distributed/algorithms/ddp_comm_hooks/powerSGD_hook.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ def _orthogonalize_gram_schmidt(matrices, epsilon=0):
6060
try:
6161
col /= torch.norm(col, dim=1, keepdim=True)
6262
except ZeroDivisionError:
63-
logger.exception(
63+
logger.error(
6464
"The matrices to be orthogonalized has at least a column of all 0s. Please set a small value such as 1e-8 "
6565
"as `orthogonalization_epsilon` in PowerSGD state."
6666
)

torch/distributed/checkpoint/_async_process_executor.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -235,7 +235,9 @@ def _checkpointing_subprocess(
235235
f"Submitted checkpoint save request for checkpoint_id={obj.checkpoint_request_id}" # noqa: G004
236236
)
237237
except BaseException as e:
238-
logger.exception("Checkpoint background process encountered an exception")
238+
logger.error(
239+
f"Checkpoint background process encountered an exception: {e}" # noqa: G004
240+
)
239241
parent_conn.send(e)
240242
raise
241243
finally:

torch/distributed/checkpoint/logger.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,7 @@ def wrapper(*args: _P.args, **kwargs: _P.kwargs) -> _T:
9090
msg_dict["event"] = "exception"
9191
msg_dict["error"] = f"{error}"
9292
msg_dict["time"] = time.time_ns()
93-
_dcp_logger.error(msg_dict) # noqa: TRY400
93+
_dcp_logger.error(msg_dict)
9494
raise
9595

9696
# end event

torch/distributed/elastic/multiprocessing/tail_log.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -141,7 +141,7 @@ def stop(self) -> None:
141141
try:
142142
f.result()
143143
except Exception as e:
144-
logger.error( # noqa: TRY400
144+
logger.error(
145145
"error in log tailor for %s%s. %s: %s",
146146
self._name,
147147
local_rank,

torch/distributed/pipelining/schedules.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -1419,15 +1419,15 @@ def _step_microbatches(
14191419
# do the communication
14201420
_wait_batch_p2p(_batch_p2p(ops))
14211421
except Exception as e:
1422-
logger.error( # noqa: TRY400
1422+
logger.error(
14231423
"[Rank %s] pipeline schedule %s caught the following exception \
14241424
at time_step %s when running action %s",
14251425
self.rank,
14261426
self.__class__.__name__,
14271427
time_step,
14281428
action,
14291429
)
1430-
logger.error( # noqa: TRY400
1430+
logger.error(
14311431
"%s",
14321432
_format_pipeline_order(
14331433
self.pipeline_order, error_step_number=time_step
@@ -1739,7 +1739,7 @@ def _assert_unsharded(stage_idx: int):
17391739
else:
17401740
raise ValueError(f"{action=} is unknown or unsupported")
17411741
except Exception as e:
1742-
logger.error( # noqa: TRY400
1742+
logger.error(
17431743
"_PipelineScheduleRuntime caught exception at step %s when running action %s. Full Schedule:",
17441744
time_step,
17451745
action,

torch/distributed/rpc/_utils.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ def _group_membership_management(store, name, is_join):
3131
try:
3232
store.wait([returned])
3333
except RuntimeError:
34-
logger.error( # noqa: TRY400
34+
logger.error(
3535
"Group membership token %s timed out waiting for %s to be released.",
3636
my_token,
3737
returned,

torch/distributed/rpc/api.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -297,7 +297,7 @@ def _barrier(worker_names):
297297
try:
298298
_all_gather(None, set(worker_names))
299299
except RuntimeError as ex:
300-
logger.error("Failed to complete barrier, got error %s", ex) # noqa: TRY400
300+
logger.error("Failed to complete barrier, got error %s", ex)
301301

302302

303303
@_require_initialized
@@ -312,7 +312,7 @@ def _wait_all_workers(timeout=DEFAULT_SHUTDOWN_TIMEOUT):
312312
try:
313313
_all_gather(None, timeout=timeout)
314314
except RuntimeError as ex:
315-
logger.error( # noqa: TRY400
315+
logger.error(
316316
"Failed to respond to 'Shutdown Proceed' in time, got error %s", ex
317317
)
318318
raise ex

torch/export/_trace.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1135,7 +1135,7 @@ def wrapper(*args, **kwargs):
11351135
error_type = t.__module__ + "." + t.__qualname__
11361136
case_name = get_class_if_classified_error(e)
11371137
if case_name is not None:
1138-
log.error(exportdb_error_message(case_name)) # noqa: TRY400
1138+
log.error(exportdb_error_message(case_name))
11391139
log_export_usage(
11401140
event="export.error.classified",
11411141
type=error_type,

torch/fx/experimental/recording.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -312,7 +312,7 @@ def retlog(r):
312312
if not shape_env.should_record_events or shape_env.is_recording:
313313
# If ShapeEnv is disabled or already recording an event, re-raise the exception without logging.
314314
raise
315-
log.error( # noqa: G201, TRY400
315+
log.error( # noqa: G201
316316
"failed while running %s(*%s, **%s)",
317317
name,
318318
args[1:],
@@ -349,7 +349,7 @@ def replay_shape_env_events(events):
349349
# change after each event is replayed.
350350
event.run(shape_env)
351351
except Exception:
352-
log.error("failed when running event: %s", event) # noqa: TRY400
352+
log.error("failed when running event: %s", event)
353353
raise
354354

355355
return shape_env

torch/testing/_internal/common_distributed.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -756,7 +756,7 @@ def run_test(self, test_name: str, parent_pipe) -> None:
756756
)
757757
sys.exit(TEST_SKIPS["generic"].exit_code)
758758
except Exception:
759-
logger.error( # noqa: TRY400
759+
logger.error(
760760
"Caught exception: \n%s exiting " "process %s with exit code: %s",
761761
traceback.format_exc(),
762762
self.rank,
@@ -791,7 +791,7 @@ def _get_timedout_process_traceback(self) -> None:
791791
pipe.send(MultiProcessTestCase.Event.GET_TRACEBACK)
792792
pipes.append((i, pipe))
793793
except ConnectionError as e:
794-
logger.error( # noqa: TRY400
794+
logger.error(
795795
"Encountered error while trying to get traceback for process %s: %s",
796796
i,
797797
e,
@@ -818,7 +818,7 @@ def _get_timedout_process_traceback(self) -> None:
818818
"Could not retrieve traceback for timed out process: %s", rank
819819
)
820820
except ConnectionError as e:
821-
logger.error( # noqa: TRY400
821+
logger.error(
822822
"Encountered error while trying to get traceback for process %s: %s",
823823
rank,
824824
e,

0 commit comments

Comments
 (0)
0