8000 Only build IValueInterface when necessary on "[caffe2] Remove IValue … · pytorch/pytorch@f651c1c · GitHub
[go: up one dir, main page]

Skip to content

Commit f651c1c

Browse files
committed
Only build IValueInterface when necessary on "[caffe2] Remove IValue include from operator.h"
ivalue.h includes Tensor.h, so creating a compilation barrier between operator.h and ivalue.h means non-exported caffe2 ops don't need to be rebuilt when developing PyTorch. Differential Revision: [D32289812](https://our.internmc.facebook.com/intern/diff/D32289812) [ghstack-poisoned]
2 parents 7f33eb1 + 59039f5 commit f651c1c

File tree

1,106 files changed

+62255
-37612
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

1,106 files changed

+62255
-37612
lines changed

.bazelrc

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,9 @@
11
build --copt=--std=c++14
22
build --copt=-I.
3+
# Bazel does not support including its cc_library targets as system
4+
# headers. We work around this for generated code
5+
# (e.g. c10/macros/cmake_macros.h) by making the generated directory a
6+
# system include path.
37
build --copt=-isystem --copt bazel-out/k8-fastbuild/bin
48
build --experimental_ui_max_stdouterr_bytes=2048576
59

@@ -12,6 +16,9 @@ build:no-tty --show_progress_rate_limit 10
1216
build:gpu --define=cuda=true
1317
# define a separate build folder for faster switching between configs
1418
build:gpu --platform_suffix=-gpu
19+
# See the note on the config-less build for details about why we are
20+
# doing this. We must also do it for the "-gpu" platform suffix.
21+
build --copt=-isystem --copt=bazel-out/k8-fastbuild-gpu/bin
1522
# rules_cuda configuration
1623
build:gpu --@rules_cuda//cuda:enable_cuda
1724
build:gpu --@rules_cuda//cuda:cuda_targets=sm_52

.circleci/README.md

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -71,9 +71,9 @@ A **binary configuration** is a collection of
7171
* release or nightly
7272
* releases are stable, nightlies are beta and built every night
7373
* python version
74-
* linux: 3.5m, 3.6m 3.7m (mu is wide unicode or something like that. It usually doesn't matter but you should know that it exists)
75-
* macos: 3.6, 3.7, 3.8
76-
* windows: 3.6, 3.7, 3.8
74+
* linux: 3.7m (mu is wide unicode or something like that. It usually doesn't matter but you should know that it exists)
75+
* macos: 3.7, 3.8
76+
* windows: 3.7, 3.8
7777
* cpu version
7878
* cpu, cuda 9.0, cuda 10.0
7979
* The supported cuda versions occasionally change
@@ -428,7 +428,7 @@ docker run \
428428
# possibly need are in .circleci/scripts/binary_populate_env.sh
429429
# You should probably always export at least these 3 variables
430430
export PACKAGE_TYPE=conda
431-
export DESIRED_PYTHON=3.6
431+
export DESIRED_PYTHON=3.7
432432
export DESIRED_CUDA=cpu
433433

434434
# Call the entrypoint
@@ -476,7 +476,7 @@ conda activate binary
476476
# possibly need are in .circleci/scripts/binary_populate_env.sh
477477
# You should probably always export at least these 3 variables
478478
export PACKAGE_TYPE=conda
479-
export DESIRED_PYTHON=3.6
479+
export DESIRED_PYTHON=3.7
480480
export DESIRED_CUDA=cpu
481481

482482
# Call the entrypoint you want

.circleci/cimodel/data/binary_build_data.py

Lines changed: 0 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -31,32 +31,11 @@ def get_processor_arch_name(gpu_version):
3131
)
3232

3333
CONFIG_TREE_DATA = OrderedDict(
34-
macos=([None], OrderedDict(
35-
wheel=dimensions.STANDARD_PYTHON_VERSIONS,
36-
conda=dimensions.STANDARD_PYTHON_VERSIONS,
37-
libtorch=[
38-
"3.7",
39-
],
40-
)),
41-
macos_arm64=([None], OrderedDict(
42-
wheel=[
43-
"3.8",
44-
"3.9",
45-
],
46-
conda=[
47-
"3.8",
48-
"3.9",
49-
],
50-
)),
5134
windows=(
5235
# Stop building Win+CU102, see https://github.com/pytorch/pytorch/issues/65648
5336
[v for v in dimensions.GPU_VERSIONS if v not in dimensions.ROCM_VERSION_LABELS and v != "cuda102"],
5437
OrderedDict(
55-
wheel=dimensions.STANDARD_PYTHON_VERSIONS,
5638
conda=dimensions.STANDARD_PYTHON_VERSIONS,
57-
libtorch=[
58-
"3.7",
59-
],
6039
)
6140
),
6241
)

.circleci/cimodel/data/pytorch_build_definitions.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -334,13 +334,12 @@ def instantiate_configs(only_slow_gradcheck):
334334
build_only=build_only,
335335
)
336336

337-
# run docs builds on "pytorch-linux-xenial-py3.6-gcc5.4". Docs builds
337+
# run docs builds on "pytorch-linux-xenial-py3.7-gcc5.4". Docs builds
338338
# should run on a CPU-only build that runs on all PRs.
339-
# XXX should this be updated to a more modern build? Projects are
340-
# beginning to drop python3.6
339+
# XXX should this be updated to a more modern build?
341340
if (
342341
distro_name == "xenial"
343-
and fc.find_prop("pyver") == "3.6"
342+
and fc.find_prop("pyver") == "3.7"
344343
and cuda_version is None
345344
and parallel_backend is None
346345
and not is_vulkan

.circleci/cimodel/data/simple/bazel_definitions.py

Lines changed: 0 additions & 69 deletions
This file was deleted.

.circleci/cimodel/data/simple/binary_smoketest.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -164,7 +164,7 @@ def gen_tree(self):
164164
is_master_only=True,
165165
requires=["binary_linux_manywheel_3_7m_cu102_devtoolset7_build"],
166166
extra_props={
167-
"resource_class": "gpu.medium",
167+
"resource_class": "gpu.nvidia.small",
168168
"use_cuda_docker_runtime": miniutils.quote((str(1))),
169169
},
170170
),

.circleci/cimodel/data/simple/docker_definitions.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ def get_workflow_jobs(images=IMAGE_NAMES, only_slow_gradcheck=False):
2626
"name": quote(f"docker-{image_name}"),
2727
"image_name": quote(image_name),
2828
})
29-
if image_name == "pytorch-linux-xenial-py3.6-gcc5.4":
29+
if image_name == "pytorch-linux-xenial-py3.7-gcc5.4":
3030
# pushing documentation on tags requires CircleCI to also
3131
# build all the dependencies on tags, including this docker image
3232
parameters['filters'] = gen_filter_dict(branches_list=r"/.*/",

0 commit comments

Comments
 (0)
0