8000 Update on "std/var: Return real results for complex input" · pytorch/pytorch@58c445c · GitHub
[go: up one dir, main page]

Skip to content

Commit 58c445c

Browse files
committed
Update on "std/var: Return real results for complex input"
Fixes gh-56627 [ghstack-poisoned]
2 parents 02595fb + 90a3322 commit 58c445c

File tree

610 files changed

+31323
-20545
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

610 files changed

+31323
-20545
lines changed

.circleci/cimodel/data/binary_build_definitions.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,12 @@ def gen_build_env_parms(self):
2727

2828
def gen_docker_image(self):
2929
if self.gcc_config_variant == 'gcc5.4_cxx11-abi':
30-
return miniutils.quote("pytorch/pytorch-binary-docker-image-ubuntu16.04:latest")
30+
if self.gpu_version is None:
31+
return miniutils.quote("pytorch/libtorch-cxx11-builder:cpu")
32+
else:
33+
return miniutils.quote(
34+
f"pytorch/libtorch-cxx11-builder:{self.gpu_version}"
35+
)
3136
if self.pydistro == "conda":
3237
if self.gpu_version is None:
3338
return miniutils.quote("pytorch/conda-builder:cpu")

.circleci/cimodel/data/simple/docker_definitions.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
"pytorch-linux-xenial-cuda10.1-cudnn7-py3-gcc7",
1515
"pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7",
1616
"pytorch-linux-xenial-cuda11.1-cudnn8-py3-gcc7",
17-
"pytorch-linux-xenial-cuda11.2-cudnn8-py3-gcc7",
17+
"pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7",
1818
"pytorch-linux-xenial-py3-clang5-android-ndk-r19c",
1919
"pytorch-linux-xenial-py3-clang5-asan",
2020
"pytorch-linux-xenial-py3-clang7-onnx",

.circleci/cimodel/data/windows_build_definitions.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -138,18 +138,18 @@ def render(self):
138138

139139
WORKFLOW_DATA = [
140140
# VS2019 CUDA-10.1
141-
WindowsJob(None, _VC2019, CudaVersion(10, 1)),
142-
WindowsJob(1, _VC2019, CudaVersion(10, 1)),
143-
WindowsJob(2, _VC2019, CudaVersion(10, 1)),
141+
WindowsJob(None, _VC2019, CudaVersion(10, 1), master_only=True),
142+
WindowsJob(1, _VC2019, CudaVersion(10, 1), master_only=True),
143+
WindowsJob(2, _VC2019, CudaVersion(10, 1), master_only=True),
144144
WindowsJob('_azure_multi_gpu', _VC2019, CudaVersion(10, 1), multi_gpu=True, nightly_only=True),
145145
# VS2019 CUDA-11.1
146146
WindowsJob(None, _VC2019, CudaVersion(11, 1)),
147147
WindowsJob(1, _VC2019, CudaVersion(11, 1), master_only=True),
148148
WindowsJob(2, _VC2019, CudaVersion(11, 1), master_only=True),
149149
# VS2019 CPU-only
150150
WindowsJob(None, _VC2019, None),
151-
WindowsJob(1, _VC2019, None, master_only=True),
152-
WindowsJob(2, _VC2019, None, master_only=True),
151+
WindowsJob(1, _VC2019, None),
152+
WindowsJob(2, _VC2019, None),
153153
WindowsJob(1, _VC2019, CudaVersion(10, 1), force_on_cpu=True, master_only=True),
154154
]
155155

.circleci/config.yml

Lines changed: 88 additions & 78 deletions
Large diffs are not rendered by default.

.circleci/docker/build.sh

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -140,8 +140,8 @@ case "$image" in
140140
VISION=yes
141141
KATEX=yes
142142
;;
143-
pytorch-linux-xenial-cuda11.2-cudnn8-py3-gcc7)
144-
CUDA_VERSION=11.2.0 # Deviating from major.minor to conform to nvidia's Docker image names
143+
pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7)
144+
CUDA_VERSION=11.3.0 # Deviating from major.minor to conform to nvidia's Docker image names
145145
CUDNN_VERSION=8
146146
ANACONDA_PYTHON_VERSION=3.6
147147
GCC_VERSION=7

.circleci/docker/common/install_conda.sh

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -91,8 +91,8 @@ if [ -n "$ANACONDA_PYTHON_VERSION" ]; then
9191
conda_install magma-cuda110 -c pytorch
9292
elif [[ "$CUDA_VERSION" == 11.1* ]]; then
9393
conda_install magma-cuda111 -c pytorch
94-
elif [[ "$CUDA_VERSION" == 11.2* ]]; then
95-
conda_install magma-cuda112 -c pytorch
94+
elif [[ "$CUDA_VERSION" == 11.3* ]]; then
95+
conda_install magma-cuda113 -c pytorch
9696
fi
9797

9898
# TODO: This isn't working atm

.circleci/scripts/binary_populate_env.sh

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -168,6 +168,8 @@ export CIRCLE_SHA1="$CIRCLE_SHA1"
168168
export CIRCLE_PR_NUMBER="${CIRCLE_PR_NUMBER:-}"
169169
export CIRCLE_BRANCH="$CIRCLE_BRANCH"
170170
export CIRCLE_WORKFLOW_ID="$CIRCLE_WORKFLOW_ID"
171+
172+
export USE_GOLD_LINKER=1
171173
# =================== The above code will be executed inside Docker container ===================
172174
EOL
173175

.circleci/verbatim-sources/job-specs/job-specs-custom.yml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -209,6 +209,10 @@
209209
source /Users/distiller/workspace/miniconda3/bin/activate
210210
pip install boto3
211211
export PYTHONPATH="$PWD"
212+
213+
# Using the same IAM user to write stats to our OSS bucket
214+
export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_SCCACHE_S3_BUCKET_V4}
215+
export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_SCCACHE_S3_BUCKET_V4}
212216
python tools/print_test_stats.py --upload-to-s3 --compare-with-s3 test
213217
when: always
214218
- store_test_results:

.circleci/verbatim-sources/workflows/workflows-scheduled-ci.yml

Lines changed: 28 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -9,28 +9,28 @@
99
- master
1010
jobs:
1111
- docker_build_job:
12-
name: "docker-pytorch-linux-xenial-cuda11.2-cudnn8-py3-gcc7"
13-
image_name: "pytorch-linux-xenial-cuda11.2-cudnn8-py3-gcc7"
12+
name: "docker-pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7"
13+
image_name: "pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7"
1414
- pytorch_linux_build:
15-
name: periodic_pytorch_xenial_cuda11_2_cudnn8_gcc7_build
15+
name: periodic_pytorch_xenial_cuda11_3_cudnn8_gcc7_build
1616
requires:
17-
- "docker-pytorch-linux-xenial-cuda11.2-cudnn8-py3-gcc7"
18-
build_environment: "pytorch-linux-xenial-cuda11.2-cudnn8-py3-gcc7-build"
19-
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda11.2-cudnn8-py3-gcc7"
17+
- "docker-pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7"
18+
build_environment: "pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7-build"
19+
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7"
2020
- pytorch_linux_test:
21-
name: periodic_pytorch_xenial_cuda11_2_cudnn8_gcc7_test
21+
name: periodic_pytorch_xenial_cuda11_3_cudnn8_gcc7_test
2222
requires:
23-
- periodic_pytorch_xenial_cuda11_2_cudnn8_gcc7_build
24-
build_environment: "pytorch-linux-xenial-cuda11.2-cudnn8-py3-gcc7-test"
25-
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda11.2-cudnn8-py3-gcc7"
23+
- periodic_pytorch_xenial_cuda11_3_cudnn8_gcc7_build
24+
build_environment: "pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7-test"
25+
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7"
2626
use_cuda_docker_runtime: "1"
2727
resource_class: gpu.medium
2828
- pytorch_linux_build:
29-
name: periodic_libtorch_xenial_cuda11_2_cudnn8_gcc7_build
29+
name: periodic_libtorch_xenial_cuda11_3_cudnn8_gcc7_build
3030
requires:
31-
- "docker-pytorch-linux-xenial-cuda11.2-cudnn8-py3-gcc7"
32-
build_environment: "pytorch-libtorch-linux-xenial-cuda11.2-cudnn8-py3-gcc7-build"
33-
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda11.2-cudnn8-py3-gcc7"
31+
- "docker-pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7"
32+
build_environment: "pytorch-libtorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7-build"
33+
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7"
3434
- pytorch_windows_build:
3535
build_environment: pytorch-win-vs2019-cuda11-cudnn8-py3
3636
cuda_version: "11.2"
@@ -71,30 +71,30 @@
7171
debuggable-scheduled-ci:
7272
jobs:
7373
- docker_build_job:
74-
name: "docker-pytorch-linux-xenial-cuda11.2-cudnn8-py3-gcc7"
75-
image_name: "pytorch-linux-xenial-cuda11.2-cudnn8-py3-gcc7"
74+
name: "docker-pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7"
75+
image_name: "pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7"
7676
filters:
7777
branches:
7878
only:
7979
- /ci-all\/.*/
8080
- /release\/.*/
8181
- pytorch_linux_build:
82-
name: pytorch_linux_xenial_cuda11_2_cudnn8_py3_gcc7_build
82+
name: pytorch_linux_xenial_cuda11_3_cudnn8_py3_gcc7_build
8383
requires:
84-
- "docker-pytorch-linux-xenial-cuda11.2-cudnn8-py3-gcc7"
85-
build_environment: "pytorch-linux-xenial-cuda11.2-cudnn8-py3-gcc7-build"
86-
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda11.2-cudnn8-py3-gcc7"
84+
- "docker-pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7"
85+
build_environment: "pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7-build"
86+
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7"
8787
filters:
8888
branches:
8989
only:
9090
- /ci-all\/.*/
9191
- /release\/.*/
9292
- pytorch_linux_test:
93-
name: pytorch_linux_xenial_cuda11_2_cudnn8_py3_gcc7_test
93+
name: pytorch_linux_xenial_cuda11_3_cudnn8_py3_gcc7_test
9494
requires:
95-
- pytorch_linux_xenial_cuda11_2_cudnn8_py3_gcc7_build
96-
build_environment: "pytorch-linux-xenial-cuda11.2-cudnn8-py3-gcc7-test"
97-
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda11.2-cudnn8-py3-gcc7"
95+
- pytorch_linux_xenial_cuda11_3_cudnn8_py3_gcc7_build
96+
build_environment: "pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7-test"
97+
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7"
9898
use_cuda_docker_runtime: "1"
9999
resource_class: gpu.medium
100100
filters:
@@ -103,11 +103,11 @@
103103
- /ci-all\/.*/
104104
- /release\/.*/
105105
- pytorch_linux_build:
106-
name: pytorch_libtorch_linux_xenial_cuda11_2_cudnn8_py3_gcc7_build
106+
name: pytorch_libtorch_linux_xenial_cuda11_3_cudnn8_py3_gcc7_build
107107
requires:
108-
- "docker-pytorch-linux-xenial-cuda11.2-cudnn8-py3-gcc7"
109-
build_environment: "pytorch-libtorch-linux-xenial-cuda11.2-cudnn8-py3-gcc7-build"
110-
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda11.2-cudnn8-py3-gcc7"
108+
- "docker-pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7"
109+
build_environment: "pytorch-libtorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7-build"
110+
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7"
111111
filters:
112112
branches:
113113
only:

.github/scripts/run_torchbench.py

Lines changed: 103 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,103 @@
1+
"""
2+
Generate a torchbench test report from a file containing the PR body.
3+
Currently, only supports running tests on specified model names
4+
5+
Testing environment:
6+
- Intel Xeon 8259CL @ 2.50 GHz, 24 Cores with disabled Turbo and HT
7+
- Nvidia Tesla T4
8+
- Nvidia Driver 450.51.06
9+
- Python 3.7
10+
- CUDA 10.2
11+
"""
12+
# Known issues:
13+
# 1. Does not reuse the build artifact in other CI workflows
14+
# 2. CI jobs are serialized because there is only one worker
15+
import os
16+
import pathlib
17+
import argparse
18+
import subprocess
19+
20+
from typing import List
21+
22+
CUDA_VERSION = "cu102"
23+
PYTHON_VERSION = "3.7"
24+
TORCHBENCH_CONFIG_NAME = "config.yaml"
25+
MAGIC_PREFIX = "RUN_TORCHBENCH:"
26+
ABTEST_CONFIG_TEMPLATE = """# This config is automatically generated by run_torchbench.py
27+
start: {control}
28+
end: {treatment}
29+
threshold: 100
30+
direction: decrease
31+
timeout: 720
32+
tests:"""
33+
34+
def gen_abtest_config(control: str, treatment: str, models: List[str]):
35+
d = {}
36+
d["control"] = control
37+
d["treatment"] = treatment
38+
config = ABTEST_CONFIG_TEMPLATE.format(**d)
39+
if models == ["ALL"]:
40+
return config + "\n"
41+
for model in models:
42+
config = f"{config}\n - {model}"
43+
config = config + "\n"
44+
return config
45+
46+
def deploy_torchbench_config(output_dir: str, config: str):
47+
# Create test dir if needed
48+
pathlib.Path(output_dir).mkdir(exist_ok=True)
49+
# TorchBench config file name
50+
config_path = os.path.join(output_dir, TORCHBENCH_CONFIG_NAME)
51+
with open(config_path, "w") as fp:
52+
fp.write(config)
53+
54+
def extract_models_from_pr(torchbench_path: str, prbody_file: str) -> List[str]:
55+
model_list = []
56+
with open(prbody_file, "r") as pf:
57+
lines = map(lambda x: x.strip(), pf.read().splitlines())
58+
magic_lines = list(filter(lambda x: x.startswith(MAGIC_PREFIX), lines))
59+
if magic_lines:
60+
# Only the first magic line will be respected.
61+
model_list = list(map(lambda x: x.strip(), magic_lines[0][len(MAGIC_PREFIX):].split(",")))
62+
# Shortcut: if model_list is ["ALL"], run all the tests
63+
if model_list == ["ALL"]:
64+
return model_list
65+
# Sanity check: make sure all the user specified models exist in torchbench repository
66+
benchmark_path = os.path.join(torchbench_path, "torchbenchmark", "models")
67+
full_model_list = [model for model in os.listdir(benchmark_path) if os.path.isdir(os.path.join(benchmark_path, model))]
68+
for m in model_list:
69+
if m not in full_model_list:
70+
print(f"The model {m} you specified does not exist in TorchBench suite. Please double check.")
71+
return []
72+
return model_list
73+
74+
def run_torchbench(pytorch_path: str, torchbench_path: str, output_dir: str):
75+
# Copy system environment so that we will not override
76+
env = dict(os.environ)
77+
command = ["python", "bisection.py", "--work-dir", output_dir,
78+
"--pytorch-src", pytorch_path, "--torchbench-src", torchbench_path,
79+
"--config", os.path.join(output_dir, "config.yaml"),
80+
"--output", os.path.join(output_dir, "result.txt")]
81+
subprocess.check_call(command, cwd=torchbench_path, env=env)
82+
83+
if __name__ == "__main__":
84+
parser = argparse.ArgumentParser(description='Run TorchBench tests based on PR')
85+
parser.add_argument('--pr-num', required=True, type=str, help="The Pull Request number")
86+
parser.add_argument('--pr-base-sha', required=True, type=str, help="The Pull Request base hash")
87+
parser.add_argument('--pr-head-sha', required=True, type=str, help="The Pull Request head hash")
88+
parser.add_argument('--pr-body', required=True, help="The file that contains body of a Pull Request")
89+
parser.add_argument('--pytorch-path', required=True, type=str, help="Path to pytorch repository")
90+
parser.add_argument('--torchbench-path', required=True, type=str, help="Path to TorchBench repository")
91+
args = parser.parse_args()
92+
93+
output_dir: str = os.path.join(os.environ["HOME"], ".torchbench", "bisection", f"pr{args.pr_num}")
94+
# Identify the specified models and verify the input
95+
models = extract_models_from_pr(args.torchbench_path, args.pr_body)
96+
if not models:
97+
print("Can't parse the model filter from the pr body. Currently we only support allow-list.")
98+
exit(1)
99+
print(f"Ready to run TorchBench with benchmark. Result will be saved in the directory: {output_dir}.")
100+
# Run TorchBench with the generated config
101+
torchbench_config = gen_abtest_config(args.pr_base_sha, args.pr_head_sha, models)
102+
deploy_torchbench_config(output_dir, torchbench_config)
103+
run_torchbench(pytorch_path=args.pytorch_path, torchbench_path=args.torchbench_path, output_dir=output_dir)

.github/workflows/cancel_redundant_workflows.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ on:
88
- Lint
99
- Linux CI (pytorch-linux-xenial-py3.6-gcc5.4)
1010
- Test tools
11+
- TorchBench CI (pytorch-linux-py3.7-cu102)
1112
- clang-format
1213
jobs:
1314
cancel:

.github/workflows/lint.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -261,6 +261,7 @@ jobs:
261261
git submodule update --init --recursive
262262
263263
export USE_NCCL=0
264+
export USE_DEPLOY=1
264265
# We really only need compile_commands.json, so no need to build!
265266
time python setup.py --cmake-only build
266267

.github/workflows/run_torchbench.yml

Lines changed: 66 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,66 @@
1+
name: TorchBench CI (pytorch-linux-py3.7-cu102)
2+
on:
3+
pull_request:
4+
5+
env:
6+
PR_NUM: ${{ github.event.number }}
7+
PR_BODY: ${{ github.event.pull_request.body }}
8+
PR_BASE_SHA: ${{ github.event.pull_request.base.sha }}
9+
PR_HEAD_SHA: ${{ github.event.pull_request.head.sha }}
10+
11+
jobs:
12+
run-torchbench:
13+
# We don't accept running on non-pytorch repos because of security concerns
14+
# Only run the job when the body contains magic word "RUN_TORCHBENCH:"
15+
if: ${{ github.repository_owner == 'pytorch' && contains(github.event.pull_request.body, 'RUN_TORCHBENCH:') }}
16+
runs-on: [self-hosted, bm-runner]
17+
# Set to 12 hours
18+
timeout-minutes: 720
19+
steps:
20+
- name: Checkout PyTorch
21+
uses: actions/checkout@v2
22+
with:
23+
path: pytorch
24+
- name: Checkout TorchBench
25+
uses: actions/checkout@v2
26+
with:
27+
repository: pytorch/benchmark
28+
path: benchmark
29+
- name: Create conda environment
30+
run: |
31+
conda create -y -n pr-ci python=3.7
32+
# shellcheck disable=SC1091
33+
. "${HOME}"/anaconda3/etc/profile.d/conda.sh
34+
conda activate pr-ci
35+
conda install -y numpy=1.17 requests=2.22 ninja pyyaml mkl mkl-include setuptools cmake cffi typing_extensions future six dataclasses pillow pytest tabulate
36+
- name: Update self-hosted PyTorch
37+
run: |
38+
pushd "${HOME}"/pytorch
39+
git fetch
40+
popd
41+
- name: Run TorchBench
42+
run: |
43+
pushd "${HOME}"/pytorch
44+
PR_MERGE_BASE=$(git merge-base "$PR_BASE_SHA" "$PR_HEAD_SHA")
45+
popd
46+
PR_BODY_FILE=/tmp/pr-body.txt
47+
echo "$PR_BODY" > ${PR_BODY_FILE}
48+
# shellcheck disable=SC1091
49+
. "${HOME}"/anaconda3/etc/profile.d/conda.sh
50+
conda activate pr-ci
51+
python3 pytorch/.github/scripts/run_torchbench.py \
52+
--pytorch-path "${HOME}"/pytorch \
53+
--torchbench-path "${PWD}"/benchmark \
54+
--pr-num "$PR_NUM" \
55+
--pr-base-sha "$PR_MERGE_BASE" \
56+
--pr-head-sha "$PR_HEAD_SHA" \
57+
--pr-body "$PR_BODY_FILE"
58+
- name: Remove conda environment and cleanup
59+
run: |
60+
conda env remove --name pr-ci
61+
rm /tmp/pr-body.txt
62+
- name: Upload artifact
63+
uses: actions/upload-artifact@v2
64+
with:
65+
name: TorchBench result
66+
path: ~/.torchbench/bisection/pr${{ github.event.number }}

.jenkins/pytorch/build.sh

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -184,8 +184,8 @@ fi
184184
# Target only our CI GPU machine's CUDA arch to speed up the build
185185
export TORCH_CUDA_ARCH_LIST="5.2"
186186

187-
# Add sm_75 support for the Linux CUDA 10.2 cuDNN 7 build
188-
if [[ "$BUILD_ENVIRONMENT" == *cuda10.2-cudnn7*build ]]; then
187+
# Add sm_75 support for the Linux CUDA 11.1 cuDNN 8 CircleCI build
188+
if [[ "$BUILD_ENVIRONMENT" == *xenial-cuda11.1-cudnn8*build ]]; then
189189
export TORCH_CUDA_ARCH_LIST=$TORCH_CUDA_ARCH_LIST";7.5"
190190
fi
191191

0 commit comments

Comments
 (0)
0