8000 [Release only] Use amazon linux 2 runners for CI by atalman · Pull Request #134350 · pytorch/pytorch · GitHub
[go: up one dir, main page]

Skip to content

[Release only] Use amazon linux 2 runners for CI #134350

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Aug 24, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions .github/actionlint.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,16 @@ self-hosted-runner:
- linux.8xlarge.nvidia.gpu
- linux.16xlarge.nvidia.gpu
- linux.g5.4xlarge.nvidia.gpu
- am2.linux.large
- am2.linux.2xlarge
- am2.linux.4xlarge
- am2.linux.12xlarge
- am2.linux.24xlarge
- am2.linux.arm64.2xlarge
- am2.linux.4xlarge.nvidia.gpu
- am2.linux.8xlarge.nvidia.gpu
- am2.linux.16xlarge.nvidia.gpu
- am2.linux.g5.4xlarge.nvidia.gpu
# Organization-wide AWS Linux Runners on Linux Foundation account
- lf.linux.large
- lf.linux.2xlarge
Expand Down
10000
42 changes: 21 additions & 21 deletions .github/workflows/periodic.yml
Original file line number Diff line number Diff line change
Expand Up @@ -45,9 +45,9 @@ jobs:
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9
test-matrix: |
{ include: [
{ config: "nogpu_AVX512", shard: 1, num_shards: 1, runner: "linux.2xlarge" },
{ config: "nogpu_NO_AVX2", shard: 1, num_shards: 1, runner: "linux.2xlarge" },
{ config: "jit_legacy", shard: 1, num_shards: 1, runner: "linux.4xlarge.nvidia.gpu" },
{ config: "nogpu_AVX512", shard: 1, num_shards: 1, runner: "am2.linux.2xlarge" },
{ config: "nogpu_NO_AVX2", shard: 1, num_shards: 1, runner: "am2.linux.2xlarge" },
{ config: "jit_legacy", shard: 1, num_shards: 1, runner: "am2.linux.4xlarge.nvidia.gpu" },
]}
linux-focal-cuda12_1-py3_10-gcc9-test:
name: linux-focal-cuda12.1-py3.10-gcc9
Expand All @@ -68,15 +68,15 @@ jobs:
docker-image-name: pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9
test-matrix: |
{ include: [
{ config: "default", shard: 1, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
{ config: "default", shard: 2, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
{ config: "default", shard: 3, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
{ config: "default", shard: 4, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
{ config: "default", shard: 5, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
{ config: "deploy", shard: 1, num_shards: 1, runner: "linux.4xlarge.nvidia.gpu" },
{ config: "nogpu_AVX512", shard: 1, num_shards: 1, runner: "linux.2xlarge" },
{ config: "nogpu_NO_AVX2", shard: 1, num_shards: 1, runner: "linux.2xlarge" },
{ config: "jit_legacy", shard: 1, num_shards: 1, runner: "linux.4xlarge.nvidia.gpu" },
{ config: "default", shard: 1, num_shards: 5, runner: "am2.linux.4xlarge.nvidia.gpu" },
{ config: "default", shard: 2, num_shards: 5, runner: "am2.linux.4xlarge.nvidia.gpu" },
{ config: "default", shard: 3, num_shards: 5, runner: "am2.linux.4xlarge.nvidia.gpu" },
{ config: "default", shard: 4, num_shards: 5, runner: "am2.linux.4xlarge.nvidia.gpu" },
{ config: "default", shard: 5, num_shards: 5, runner: "am2.linux.4xlarge.nvidia.gpu" },
{ config: "deploy", shard: 1, num_shards: 1, runner: "am2.linux.4xlarge.nvidia.gpu" },
{ config: "nogpu_AVX512", shard: 1, num_shards: 1, runner: "am2.linux.2xlarge" },
{ config: "nogpu_NO_AVX2", shard: 1, num_shards: 1, runner: "am2.linux.2xlarge" },
{ config: "jit_legacy", shard: 1, num_shards: 1, runner: "am2.linux.4xlarge.nvidia.gpu" },
]}

linux-focal-cuda12_4-py3_10-gcc9-test:
Expand All @@ -99,9 +99,9 @@ jobs:
docker-image-name: pytorch-linux-jammy-py3.8-gcc11
test-matrix: |
{ include: [
{ config: "default", shard: 1, num_shards: 3, runner: "linux.2xlarge" },
{ config: "default", shard: 2, num_shards: 3, runner: "linux.2xlarge" },
{ config: "default", shard: 3, num_shards: 3, runner: "linux.2xlarge" },
{ config: "default", shard: 1, num_shards: 3, runner: "am2.linux.2xlarge" },
{ config: "default", shard: 2, num_shards: 3, runner: "am2.linux.2xlarge" },
{ config: "default", shard: 3, num_shards: 3, runner: "am2.linux.2xlarge" },
]}

parallelnative-linux-jammy-py3_8-gcc11-test:
Expand All @@ -124,7 +124,7 @@ jobs:
cuda-arch-list: 8.6
test-matrix: |
{ include: [
{ config: "multigpu", shard: 1, num_shards: 1, runner: "linux.g5.12xlarge.nvidia.gpu" },
{ config: "multigpu", shard: 1, num_shards: 1, runner: "am2.linux.g5.12xlarge.nvidia.gpu" },
]}
build-with-debug: false

Expand All @@ -146,11 +146,11 @@ jobs:
build-with-debug: true
test-matrix: |
{ include: [
{ config: "default", shard: 1, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
{ config: "default", shard: 2, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
{ config: "default", shard: 3, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
{ config: "default", shard: 4, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
{ config: "default", shard: 5, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
{ config: "default", shard: 1, num_shards: 5, runner: "am2.linux.4xlarge.nvidia.gpu" },
{ config: "default", shard: 2, num_shards: 5, runner: "am2.linux.4xlarge.nvidia.gpu" },
{ config: "default", shard: 3, num_shards: 5, runner: "am2.linux.4xlarge.nvidia.gpu" },
{ config: "default", shard: 4, num_shards: 5, runner: "am2.linux.4xlarge.nvidia.gpu" },
{ config: "default", shard: 5, num_shards: 5, runner: "am2.linux.4xlarge.nvidia.gpu" },
]}

linux-focal-cuda11_8-py3_10-gcc9-debug-test:
Expand Down
80 changes: 40 additions & 40 deletions .github/workflows/pull.yml
Original file line number Diff line number Diff line change
Expand Up @@ -43,14 +43,14 @@ jobs:
docker-image-name: pytorch-linux-jammy-py3.8-gcc11
test-matrix: |
{ include: [
{ config: "default", shard: 1, num_shards: 3, runner: "linux.2xlarge" },
{ config: "default", shard: 2, num_shards: 3, runner: "linux.2xlarge" },
{ config: "default", shard: 3, num_shards: 3, runner: "linux.2xlarge" },
{ config: "docs_test", shard: 1, num_shards: 1, runner: "linux.2xlarge" },
{ config: "jit_legacy", shard: 1, num_shards: 1, runner: "linux.2xlarge" },
{ config: "backwards_compat", shard: 1, num_shards: 1, runner: "linux.2xlarge" },
{ config: "distributed", shard: 1, num_shards: 2, runner: "linux.2xlarge" },
{ config: "distributed", shard: 2, num_shards: 2, runner: "linux.2xlarge" },
{ config: "default", shard: 1, num_shards: 3, runner: "am2.linux.2xlarge" },
{ config: "default", shard: 2, num_shards: 3, runner: "am2.linux.2xlarge" },
{ config: "default", shard: 3, num_shards: 3, runner: "am2.linux.2xlarge" },
{ config: "docs_test", shard: 1, num_shards: 1, runner: "am2.linux.2xlarge" },
{ config: "jit_legacy", shard: 1, num_shards: 1, runner: "am2.linux.2xlarge" },
{ config: "backwards_compat", shard: 1, num_shards: 1, runner: "am2.linux.2xlarge" },
{ config: "distributed", shard: 1, num_shards: 2, runner: "am2.linux.2xlarge" },
{ config: "distributed", shard: 2, num_shards: 2, runner: "am2.linux.2xlarge" },
]}

linux-jammy-py3_8-gcc11-test:
Expand Down Expand Up @@ -156,14 +156,14 @@ jobs:
docker-image-name: pytorch-linux-focal-py3.8-clang10
test-matrix: |
{ include: [
{ config: "default", shard: 1, num_shards: 3, runner: "linux.2xlarge" },
{ config: "default", shard: 2, num_shards: 3, runner: "linux.2xlarge" },
{ config: "default", shard: 3, num_shards: 3, runner: "linux.2xlarge" },
{ config: "crossref", shard: 1, num_shards: 2, runner: "linux.2xlarge" },
{ config: "crossref", shard: 2, num_shards: 2, runner: "linux.2xlarge" },
{ config: "dynamo", shard: 1, num_shards: 3, runner: "linux.2xlarge" },
{ config: "dynamo", shard: 2, num_shards: 3, runner: "linux.2xlarge" },
{ config: "dynamo", shard: 3, num_shards: 3, runner: "linux.2xlarge" },
{ config: "default", shard: 1, num_shards: 3, runner: "am2.linux.2xlarge" },
{ config: "default", shard: 2, num_shards: 3, runner: "am2.linux.2xlarge" },
{ config: "default", shard: 3, num_shards: 3, runner: "am2.linux.2xlarge" },
{ config: "crossref", shard: 1, num_shards: 2, runner: "am2.linux.2xlarge" },
{ config: "crossref", shard: 2, num_shards: 2, runner: "am2.linux.2xlarge" },
{ config: "dynamo", shard: 1, num_shards: 3, runner: "am2.linux.2xlarge" },
{ config: "dynamo", shard: 2, num_shards: 3, runner: "am2.linux.2xlarge" },
{ config: "dynamo", shard: 3, num_shards: 3, runner: "am2.linux.2xlarge" },
]}
linux-focal-py3_8-clang10-test:
name: linux-focal-py3.8-clang10
Expand All @@ -184,14 +184,14 @@ jobs:
docker-image-name: pytorch-linux-focal-py3.11-clang10
test-matrix: |
{ include: [
{ config: "default", shard: 1, num_shards: 3, runner: "linux.2xlarge" },
{ config: "default", shard: 2, num_shards: 3, runner: "linux.2xlarge" },
{ config: "default", shard: 3, num_shards: 3, runner: "linux.2xlarge" },
{ config: "crossref", shard: 1, num_shards: 2, runner: "linux.2xlarge" },
{ config: "crossref", shard: 2, num_shards: 2, runner: "linux.2xlarge" },
{ config: "dynamo", shard: 1, num_shards: 3, runner: "linux.2xlarge" },
{ config: "dynamo", shard: 2, num_shards: 3, runner: "linux.2xlarge" },
{ config: "dynamo", shard: 3, num_shards: 3, runner: "linux.2xlarge" },
{ config: "default", shard: 1, num_shards: 3, runner: "am2.linux.2xlarge" },
{ config: "default", shard: 2, num_shards: 3, runner: "am2.linux.2xlarge" },
{ config: "default", shard: 3, num_shards: 3, runner: "am2.linux.2xlarge" },
{ config: "crossref", shard: 1, num_shards: 2, runner: "am2.linux.2xlarge" },
{ config: "crossref", shard: 2, num_shards: 2, runner: "am2.linux.2xlarge" },
{ config: "dynamo", shard: 1, num_shards: 3, runner: "am2.linux.2xlarge" },
{ config: "dynamo", shard: 2, num_shards: 3, runner: "am2.linux.2xlarge" },
{ config: "dynamo", shard: 3, num_shards: 3, runner: "am2.linux.2xlarge" },
]}


Expand All @@ -214,12 +214,12 @@ jobs:
docker-image-name: pytorch-linux-focal-py3.12-clang10
test-matrix: |
{ include: [
{ config: "default", shard: 1, num_shards: 3, runner: "linux.2xlarge" },
{ config: "default", shard: 2, num_shards: 3, runner: "linux.2xlarge" },
{ config: "default", shard: 3, num_shards: 3, runner: "linux.2xlarge" },
{ config: "dynamo", shard: 1, num_shards: 3, runner: "linux.2xlarge" },
{ config: "dynamo", shard: 2, num_shards: 3, runner: "linux.2xlarge" },
{ config: "dynamo", shard: 3, num_shards: 3, runner: "linux.2xlarge" },
{ config: "default", shard: 1, num_shards: 3, runner: "am2.linux.2xlarge" },
{ config: "default", shard: 2, num_shards: 3, runner: "am2.linux.2xlarge" },
{ config: "default", shard: 3, num_shards: 3, runner: "am2.linux.2xlarge" },
{ config: "dynamo", shard: 1, num_shards: 3, runner: "am2.linux.2xlarge" },
{ config: "dynamo", shard: 2, num_shards: 3, runner: "am2.linux.2xlarge" },
{ config: "dynamo", shard: 3, num_shards: 3, runner: "am2.linux.2xlarge" },
]}

linux-focal-py3_12-clang10-test:
Expand All @@ -240,9 +240,9 @@ jobs:
docker-image-name: pytorch-linux-focal-cuda11.8-cudnn9-py3-gcc9
test-matrix: |
{ include: [
{ config: "distributed", shard: 1, num_shards: 3, runner: "linux.8xlarge.nvidia.gpu" },
{ config: "distributed", shard: 2, num_shards: 3, runner: "linux.8xlarge.nvidia.gpu" },
{ config: "distributed", shard: 3, num_shards: 3, runner: "linux.8xlarge.nvidia.gpu" },
{ config: "distributed", shard: 1, num_shards: 3, runner: "am2.linux.8xlarge.nvidia.gpu" },
{ config: "distributed", shard: 2, num_shards: 3, runner: "am2.linux.8xlarge.nvidia.gpu" },
{ config: "distributed", shard: 3, num_shards: 3, runner: "am2.linux.8xlarge.nvidia.gpu" },
]}

linux-focal-cuda11_8-py3_10-gcc9-test:
Expand All @@ -265,12 +265,12 @@ jobs:
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9
test-matrix: |
{ include: [
{ config: "default", shard: 1, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
{ config: "default", shard: 2, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
{ config: "default", shard: 3, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
{ config: "default", shard: 4, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
{ config: "default", shard: 5, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
{ config: "deploy", shard: 1, num_shards: 1, runner: "linux.4xlarge.nvidia.gpu" },
{ config: "default", shard: 1, num_shards: 5, runner: "am2.linux.4xlarge.nvidia.gpu" },
{ config: "default", shard: 2, num_shards: 5, runner: "am2.linux.4xlarge.nvidia.gpu" },
{ config: "default", shard: 3, num_shards: 5, runner: "am2.linux.4xlarge.nvidia.gpu" },
{ config: "default", shard: 4, num_shards: 5, runner: "am2.linux.4xlarge.nvidia.gpu" },
{ config: "default", shard: 5, num_shards: 5, runner: "am2.linux.4xlarge.nvidia.gpu" },
{ config: "deploy", shard: 1, num_shards: 1, runner: "am2.linux.4xlarge.nvidia.gpu" },
]}

linux-focal-cuda12_1-py3_10-gcc9-test:
Expand Down Expand Up @@ -328,7 +328,7 @@ jobs:
docker-image-name: 308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/xla_base:v1.1-lite
test-matrix: |
{ include: [
{ config: "xla", shard: 1, num_shards: 1, runner: "linux.12xlarge" },
{ config: "xla", shard: 1, num_shards: 1, runner: "am2.linux.12xlarge" },
]}

linux-focal-py3_8-clang9-xla-test:
Expand Down
12 changes: 6 additions & 6 deletions .github/workflows/slow.yml
Original file line number Diff line number Diff line change
Expand Up @@ -45,12 +45,12 @@ jobs:
cuda-arch-list: 8.6
test-matrix: |
{ include: [
{ config: "default", shard: 1, num_shards: 6, runner: "linux.g5.4xlarge.nvidia.gpu" },
{ config: "default", shard: 2, num_shards: 6, runner: "linux.g5.4xlarge.nvidia.gpu" },
{ config: "default", shard: 3, num_shards: 6, runner: "linux.g5.4xlarge.nvidia.gpu" },
{ config: "default", shard: 4, num_shards: 6, runner: "linux.g5.4xlarge.nvidia.gpu" },
{ config: "default", shard: 5, num_shards: 6, runner: "linux.g5.4xlarge.nvidia.gpu" },
{ config: "default", shard: 6, num_shards: 6, runner: "linux.g5.4xlarge.nvidia.gpu" },
{ config: "default", shard: 1, num_shards: 6, runner: "am2.linux.g5.4xlarge.nvidia.gpu" },
{ config: "default", shard: 2, num_shards: 6, runner: "am2.linux.g5.4xlarge.nvidia.gpu" },
{ config: "default", shard: 3, num_shards: 6, runner: "am2.linux.g5.4xlarge.nvidia.gpu" },
{ config: "default", shard: 4, num_shards: 6, runner: "am2.linux.g5.4xlarge.nvidia.gpu" },
{ config: "default", shard: 5, num_shards: 6, runner: "am2.linux.g5.4xlarge.nvidia.gpu" },
{ config: "default", shard: 6, num_shards: 6, runner: "am2.linux.g5.4xlarge.nvidia.gpu" },
]}

linux-focal-cuda12_1-py3-gcc9-slow-gradcheck-test:
Expand Down
10 changes: 5 additions & 5 deletions .github/workflows/trunk.yml
Original file line number Diff line number Diff line change
Expand Up @@ -43,11 +43,11 @@ jobs:
cuda-arch-list: 8.6
test-matrix: |
{ include: [
{ config: "default", shard: 1, num_shards: 5, runner: "linux.g5.4xlarge.nvidia.gpu" },
{ config: "default", shard: 2, num_shards: 5, runner: "linux.g5.4xlarge.nvidia.gpu" },
{ config: "default", shard: 3, num_shards: 5, runner: "linux.g5.4xlarge.nvidia.gpu" },
{ config: "default", shard: 4, num_shards: 5, runner: "linux.g5.4xlarge.nvidia.gpu" },
{ config: "default", shard: 5, num_shards: 5, runner: "linux.g5.4xlarge.nvidia.gpu" },
{ config: "default", shard: 1, num_shards: 5, runner: "am2.linux.g5.4xlarge.nvidia.gpu" },
{ config: "default", shard: 2, num_shards: 5, runner: "am2.linux.g5.4xlarge.nvidia.gpu" },
{ config: "default", shard: 3, num_shards: 5, runner: "am2.linux.g5.4xlarge.nvidia.gpu" },
{ config: "default", shard: 4, num_shards: 5, runner: "am2.linux.g5.4xlarge.nvidia.gpu" },
{ config: "default", shard: 5, num_shards: 5, runner: "am2.linux.g5.4xlarge.nvidia.gpu" },
]}

linux-focal-cuda12_4-py3_10-gcc9-sm86-test:
Expand Down
Loading
0