8000 switch Bazel to common aten codegen · pytorch/pytorch@4276e7d · GitHub
[go: up one dir, main page]

Skip to content

Commit 4276e7d

Browse files
author
Michael Andreas Dagitses
committed
switch Bazel to common aten codegen
We can also clean up the other rule generation. This is removing support for per-operator headers right now. We don't take advantage of that internally, so it's better to have the Bazel build compatible with our internal builds. Differential Revision: [D36489359](https://our.internmc.facebook.com/intern/diff/D36489359/) **NOTE FOR REVIEWERS**: This PR has internal Facebook specific changes or comments, please review them on [Phabricator](https://our.internmc.facebook.com/intern/diff/D36489359/)! [ghstack-poisoned]
1 parent 726ede4 commit 4276e7d

File tree

3 files changed

+2
-62
lines changed

3 files changed

+2
-62
lines changed

BUILD.bazel

Lines changed: 2 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ load("//third_party:substitution.bzl", "header_template_rule")
66
load("//:tools/bazel.bzl", "rules")
77
load("//tools/rules:cu.bzl", "cu_library")
88
load("//tools/config:defs.bzl", "if_cuda")
9-
load("//:aten.bzl", "intern_build_aten_ops", "generate_aten")
9+
load("//:aten.bzl", "intern_build_aten_ops")
1010
load(":build.bzl", "define_targets", "GENERATED_AUTOGRAD_CPP", "GENERATED_AUTOGRAD_PYTHON")
1111
load(":build_variables.bzl", "jit_core_sources", "libtorch_core_sources", "libtorch_cuda_sources", "libtorch_distributed_sources", "libtorch_extra_sources", "libtorch_nvfuser_generated_headers", "libtorch_nvfuser_runtime_sources", "libtorch_python_core_sources", "torch_cpp_srcs", "lazy_tensor_ts_sources")
1212
load(":ufunc_defs.bzl", "aten_ufunc_generated_cpu_sources", "aten_ufunc_generated_cpu_kernel_sources", "aten_ufunc_generated_cuda_sources")
@@ -22,16 +22,13 @@ COMMON_COPTS = [
2222
"-DTH_HAVE_THREAD",
2323
"-DUSE_FBGEMM",
2424
"-DUSE_DISTRIBUTED",
25-
"-DAT_PER_OPERATOR_HEADERS",
2625
"-DATEN_THREADING=NATIVE",
2726
"-DNO_CUDNN_DESTROY_HANDLE",
2827
] + if_cuda([
2928
"-DUSE_CUDA",
3029
"-DUSE_CUDNN",
3130
])
3231

33-
aten_generation_srcs = ["aten/src/ATen/native/native_functions.yaml"] + ["aten/src/ATen/native/tags.yaml"] + glob(["aten/src/ATen/templates/**"])
34-
3532
generated_cpu_cpp = [
3633
"aten/src/ATen/RegisterBackendSelect.cpp",
3734
"aten/src/ATen/RegisterCPU.cpp",
@@ -93,20 +90,6 @@ generated_cuda_cpp = [
9390
"aten/src/ATen/RegisterSparseCsrCUDA.cpp",
9491
]
9592

96-
generate_aten(
97-
name = "generated_aten_cpp",
98-
srcs = aten_generation_srcs,
99-
outs = (
100-
generated_cpu_cpp +
101-
generated_cuda_cpp +
102-
aten_ufunc_generated_cpu_sources("aten/src/ATen/{}") +
103-
aten_ufunc_generated_cpu_kernel_sources("aten/src/ATen/{}") +
104-
aten_ufunc_generated_cuda_sources("aten/src/ATen/{}") +
105-
["aten/src/ATen/Declarations.yaml"]
106-
),
107-
generator = "//torchgen:gen",
108-
)
109-
11093
filegroup(
11194
name = "cpp_generated_code",
11295
data = [":generate-code"],
@@ -291,7 +274,7 @@ cc_library(
291274
],
292275
) + [
293276
":aten_src_ATen_config",
294-
":generated_aten_cpp",
277+
":gen_aten",
295278
],
296279
includes = [
297280
"aten/src",

aten.bzl

Lines changed: 0 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
load("@bazel_skylib//lib:paths.bzl", "paths")
21
load("@rules_cc//cc:defs.bzl", "cc_library")
32

43
CPU_CAPABILITY_NAMES = ["DEFAULT", "AVX2"]
@@ -55,42 +54,3 @@ def intern_build_aten_ops(copts, deps, extra_impls):
5554
deps = [":ATen_CPU_" + cpu_capability for cpu_capability in CPU_CAPABILITY_NAMES],
5655
linkstatic = 1,
5756
)
58-
59-
def generate_aten_impl(ctx):
60-
# Declare the entire ATen/ops/ directory as an output
61-
ops_dir = ctx.actions.declare_directory("aten/src/ATen/ops")
62-
outputs = [ops_dir] + ctx.outputs.outs
63-
64-
install_dir = paths.dirname(ops_dir.path)
65-
tool_inputs, tool_inputs_manifest = ctx.resolve_tools(tools = [ctx.attr.generator])
66-
ctx.actions.run_shell(
67-
outputs = outputs,
68-
inputs = ctx.files.srcs,
69-
command = ctx.executable.generator.path + " $@",
70-
arguments = [
71-
"--source-path",
72-
"aten/src/ATen",
73-
"--per-operator-headers",
74-
"--install_dir",
75-
install_dir,
76-
],
77-
tools = tool_inputs,
78-
input_manifests = tool_inputs_manifest,
79-
use_default_shell_env = True,
80-
mnemonic = "GenerateAten",
81-
)
82-
return [DefaultInfo(files = depset(outputs))]
83-
84-
generate_aten = rule(
85-
implementation = generate_aten_impl,
86-
attrs = {
87-
"generator": attr.label(
88-
executable = True,
89-
allow_files = True,
90-
mandatory = True,
91-
cfg = "exec",
92-
),
93-
"outs": attr.output_list(),
94-
"srcs": attr.label_list(allow_files = True),
95-
},
96-
)

build.bzl

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -71,9 +71,6 @@ def define_targets(rules):
7171
tools = ["//torchgen:gen"],
7272
outs = ["aten/src/ATen/" + out for out in gen_aten_outs],
7373
cmd = gen_aten_cmd,
74-
# This currently clashes with the existing Bazel generated
75-
# files.
76-
tags = ["-bazel"],
7774
)
7875

7976
rules.genrule(

0 commit comments

Comments
 (0)
0