8000 Make precompilation timeout configurable via TORCHINDUCTOR_PRECOMPILA… · pytorch/pytorch@c47944d · GitHub
[go: up one dir, main page]

Skip to content

Commit c47944d

Browse files
committed
Make precompilation timeout configurable via TORCHINDUCTOR_PRECOMPILATION_TIMEOUT_SECONDS environment variable.
1 parent fb85ebd commit c47944d

File tree

2 files changed

+7
-1
lines changed

2 files changed

+7
-1
lines changed

torch/_inductor/config.py

+6
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,12 @@
88
from torch.utils._config_module import Config, get_tristate_env, install_config_module
99

1010

11+
precompilation_timeout_seconds: int = Config(
12+
env_name="TORCHINDUCTOR_PRECOMPILATION_TIMEOUT_SECONDS",
13+
default=60 * 60,
14+
type=int,
15+
)
16+
1117
inplace_padding = os.environ.get("TORCHINDUCTOR_INPLACE_PADDING", "1") == "1"
1218
can_inplace_pad_graph_input = False # ease testing
1319

torch/_inductor/select_algorithm.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1821,7 +1821,7 @@ def __call__(
18211821
# arg, the function will be called instead of
18221822
# generating a random torch.Tensor for benchmarking.
18231823
input_gen_fns: Optional[dict[int, Callable[[ir.Buffer], torch.Tensor]]] = None,
1824-
precompilation_timeout_seconds: int = 60 * 60,
1824+
precompilation_timeout_seconds: int = config.precompilation_timeout_seconds,
18251825
return_multi_template=False,
18261826
):
18271827
from .codegen.cuda.cuda_kernel import CUDATemplateCaller

0 commit comments

Comments
 (0)
0