10000 Update base for Update on "[Cutlass] Implement EVT example tensor cre… · pytorch/pytorch@64d0a2c · GitHub
[go: up one dir, main page]

Skip to content

Commit 64d0a2c

Browse files
committed
Update base for Update on "[Cutlass] Implement EVT example tensor creation"
This PR implements a translation layer from inductor IR to "example tensors" the expected arguments of the EVT tracer. These tensors basically store the name, shape, stride, and dtype of the tensor and allow an ast-based python parse to generate the EVT C++. udpates to example tensor creation Previously merged: * #150903 * #150346 * #150345 * #150344 cc voznesenskym penguinwu EikanWang jgong5 Guobing-Chen XiaobingSuper zhuhaozhe blzheng wenzhe-nrv jiayisunx ipiszy chenyang78 kadeng muchulee8 amjames chauhang aakhundov [ghstack-poisoned]
2 parents f658807 + 835413b commit 64d0a2c

File tree

159 files changed

+4540
-2329
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

159 files changed

+4540
-2329
lines changed

.ci/docker/requirements-docs.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
sphinx==5.3.0
22
#Description: This is used to generate PyTorch docs
33
#Pinned versions: 5.3.0
4-
-e git+https://github.com/pytorch/pytorch_sphinx_theme.git@c49afc2aff734d40813b0ca182bb49b611d7a30c#egg=pytorch_sphinx_theme2
4+
-e git+https://github.com/pytorch/pytorch_sphinx_theme.git@a98ffecb792d50df495be401becbf5c414421423#egg=pytorch_sphinx_theme2
55

66
# TODO: sphinxcontrib.katex 0.9.0 adds a local KaTeX server to speed up pre-rendering
77
# but it doesn't seem to work and hangs around idly. The initial thought is probably

.ci/pytorch/macos-build.sh

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -34,11 +34,14 @@ if which sccache > /dev/null; then
3434
fi
3535

3636
print_cmake_info
37-
38-
# Explicitly set USE_DISTRIBUTED=0 to align with the default build config on mac. This also serves as the sole CI config that tests
39-
# that building with USE_DISTRIBUTED=0 works at all. See https://github.com/pytorch/pytorch/issues/86448
40-
USE_DISTRIBUTED=0 USE_OPENMP=1 MACOSX_DEPLOYMENT_TARGET=11.0 WERROR=1 BUILD_TEST=OFF USE_PYTORCH_METAL=1 python setup.py bdist_wheel
41-
37+
if [[ ${BUILD_ENVIRONMENT} == *"distributed"* ]]; then
38+
# Needed for inductor benchmarks, as lots of HF networks make `torch.distribtued` calls
39+
USE_DISTRIBUTED=1 USE_OPENMP=1 WERROR=1 python setup.py bdist_wheel
40+
else
41+
# Explicitly set USE_DISTRIBUTED=0 to align with the default build config on mac. This also serves as the sole CI config that tests
42+
# that building with USE_DISTRIBUTED=0 works at all. See https://github.com/pytorch/pytorch/issues/86448
43+
USE_DISTRIBUTED=0 USE_OPENMP=1 MACOSX_DEPLOYMENT_TARGET=11.0 WERROR=1 BUILD_TEST=OFF USE_PYTORCH_METAL=1 python setup.py bdist_wheel
44+
fi
4245
if which sccache > /dev/null; then
4346
print_sccache_stats
4447
fi

.ci/pytorch/macos-test.sh

Lines changed: 27 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -221,27 +221,39 @@ test_torchbench_smoketest() {
221221
TEST_REPORTS_DIR=$(pwd)/test/test-reports
222222
mkdir -p "$TEST_REPORTS_DIR"
223223

224-
local dtype=notset
225224
local device=mps
226-
local models=(hf_T5 llama BERT_pytorch dcgan hf_GPT2 yolov3 resnet152)
225+
local models=(hf_T5 llama BERT_pytorch dcgan hf_GPT2 yolov3 resnet152 sam pytorch_unet stable_diffusion_text_encoder moco speech_transformer)
227226

228227
for backend in eager inductor; do
229-
touch "$TEST_REPORTS_DIR/inductor_${backend}_torchbench_${dtype}_training_${device}_performance.csv"
230-
touch "$TEST_REPORTS_DIR/inductor_${backend}_torchbench_${dtype}_inference_${device}_performance.csv"
231-
232-
echo "Launching torchbench training performance run for backend ${backend}"
233-
for model in "${models[@]}"; do
234-
PYTHONPATH="$(pwd)"/torchbench python benchmarks/dynamo/torchbench.py \
235-
--performance --only "$model" --backend "$backend" --training --devices "$device" \
236-
--output "$TEST_REPORTS_DIR/inductor_${backend}_torchbench_${dtype}_training_${device}_performance.csv" || true
228+
229+
for dtype in notset float16 bfloat16; do
230+
echo "Launching torchbench inference performance run for backend ${backend} and dtype ${dtype}"
231+
local dtype_arg="--${dtype}"
232+
if [ "$dtype" == notset ]; then
233+
dtype_arg="--float32"
234+
fi
235+
touch "$TEST_REPORTS_DIR/inductor_${backend}_torchbench_${dtype}_inference_${device}_performance.csv"
236+
for model in "${models[@]}"; do
237+
PYTHONPATH="$(pwd)"/torchbench python benchmarks/dynamo/torchbench.py \
238+
--performance --only "$model" --backend "$backend" --inference --devices "$device" "$dtype_arg" \
239+
--output "$TEST_REPORTS_DIR/inductor_${backend}_torchbench_${dtype}_inference_${device}_performance.csv" || true
240+
done
237241
done
238242

239-
echo "Launching torchbench inference performance run for backend ${backend}"
240-
for model in "${models[@]}"; do
241-
PYTHONPATH="$(pwd)"/torchbench python benchmarks/dynamo/torchbench.py \
242-
--performance --only "$model" --backend "$backend" --inference --devices "$device" \
243-
--output "$TEST_REPORTS_DIR/inductor_${backend}_torchbench_${dtype}_inference_${device}_performance.csv" || true
243+
for dtype in notset amp; do
244+
echo "Launching torchbench training performance run for backend ${backend} and dtype ${dtype}"
245+
touch "$TEST_REPORTS_DIR/inductor_${backend}_torchbench_${dtype}_training_${device}_performance.csv"
246+
local dtype_arg="--${dtype}"
247+
if [ "$dtype" == notset ]; then
248+
dtype_arg="--float32"
249+
fi
250+
for model in "${models[@]}"; do
251+
PYTHONPATH="$(pwd)"/torchbench python benchmarks/dynamo/torchbench.py \
252+
--performance --only "$model" --backend "$backend" --training --devices "$device" "$dtype_arg" \
253+
--output "$TEST_REPORTS_DIR/inductor_${backend}_torchbench_${dtype}_training_${device}_performance.csv" || true
254+
done
244255
done
256+
245257
done
246258

247259
echo "Pytorch benchmark on mps device completed"

.github/scripts/trymerge.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -434,7 +434,7 @@ def __init__(self, name: str, url: str, run_id: int, status: Optional[str]):
434434
RE_GHSTACK_HEAD_REF = re.compile(r"^(gh/[^/]+/[0-9]+/)head$")
435435
RE_GHSTACK_DESC = re.compile(r"Stack.*:\r?\n(\* [^\r\n]+\r?\n)+", re.MULTILINE)
436436
RE_PULL_REQUEST_RESOLVED = re.compile(
437-
r"Pull Request resolved: "
437+
r"(Pull Request resolved|Pull-Request-resolved): "
438438
r"https://github.com/(?P<owner>[^/]+)/(?P<repo>[^/]+)/pull/(?P<number>[0-9]+)",
439439
re.MULTILINE,
440440
)

.github/workflows/_bazel-build-test.yml

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,10 @@ on:
3333
default: "linux.large"
3434
description: Runner type
3535

36+
permissions:
37+
id-token: write
38+
contents: read
39+
3640
env:
3741
GIT_DEFAULT_BRANCH: ${{ github.event.repository.default_branch }}
3842

@@ -80,6 +84,13 @@ jobs:
8084
- name: Setup Linux
8185
uses: ./.github/actions/setup-linux
8286

87+
- name: Configure AWS Credentials
88+
uses: aws-actions/configure-aws-credentials@v4
89+
with:
90+
role-to-assume: arn:aws:iam::308535385114:role/gha_workflow_s3_and_ecr_read_only
91+
role-session-name: gha-bazel-build
92+
aws-region: us-east-1
93+
8394
- name: Calculate docker image
8495
id: calculate-docker-image
8596
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
@@ -202,6 +213,13 @@ jobs:
202213
uses: ./.github/actions/chown-workspace
203214
if: always()
204215

216+
- name: Configure AWS Credentials
217+
uses: aws-actions/configure-aws-credentials@v4
218+
with:
219+
role-to-assume: arn:aws:iam::308535385114:role/gha_workflow_pytorch_artifacts
220+
role-session-name: gha-bazel-build-upload-artifacts
221+
aws-region: us-east-1
222+
205223
- name: Upload test artifacts
206224
uses: ./.github/actions/upload-test-artifacts
207225
if: always() && steps.test.conclusion && steps.test.conclusion != 'skipped'

.github/workflows/_mac-test.yml

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,11 @@ on:
3838
required: false
3939
type: boolean
4040
default: true
41+
secrets:
42+
HUGGING_FACE_HUB_TOKEN:
43+
required: false
44+
description: |
45+
HF Auth token to avoid rate limits when downloading models or datasets from hub
4146
4247
jobs:
4348
test:
@@ -166,6 +171,7 @@ jobs:
166171
JOB_ID: ${{ steps.get-job-id.outputs.job-id }}
167172
JOB_NAME: ${{ steps.get-job-id.outputs.job-name }}
168173
REENABLED_ISSUES: ${{ steps.keep-going.outputs.reenabled-issues }}
174+
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
169175
run: |
170176
# shellcheck disable=SC1090
171177
set -ex

.github/workflows/_xpu-test.yml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,10 @@ on:
4747
type: boolean
4848
default: true
4949

50+
permissions:
51+
id-token: write
52+
contents: read
53+
5054
env:
5155
GIT_DEFAULT_BRANCH: ${{ github.event.repository.default_branch }}
5256

.github/workflows/docker-builds.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,7 @@ jobs:
7979
]
8080
include:
8181
- docker-image-name: pytorch-linux-jammy-aarch64-py3.10-gcc11
82-
runner: linux.arm64.2xlarge
82+
runner: linux.arm64.m7g.4xlarge
8383
- docker-image-name: pytorch-linux-jammy-aarch64-py3.10-gcc11-inductor-benchmarks
8484
runner: linux.arm64.m7g.4xlarge
8585
timeout-minutes: 600

.github/workflows/inductor-perf-test-nightly-macos.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ jobs:
3838
uses: ./.github/workflows/_mac-build.yml
3939
with:
4040
sync-tag: macos-perf-py3-arm64-build
41-
build-environment: macos-py3-arm64
41+
build-environment: macos-py3-arm64-distributed
4242
runner-type: macos-m1-stable
4343
build-generates-artifacts: true
4444
# To match the one pre-installed in the m1 runners
@@ -54,7 +54,7 @@ jobs:
5454
uses: ./.github/workflows/_mac-test.yml
5555
needs: macos-perf-py3-arm64-build
5656
with:
57-
build-environment: macos-py3-arm64
57+
build-environment: macos-py3-arm64-distributed
5858
# Same as the build job
5959
python-version: 3.9.12
6060
test-matrix: ${{ needs.macos-perf-py3-arm64-build.outputs.test-matrix }}

.github/workflows/inductor-unittest.yml

Lines changed: 15 additions & 15 deletions
+
{ config: "inductor", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.4xlarge.nvidia.gpu" },
Original file line numberDiff line numberDiff line change
@@ -36,11 +36,11 @@ jobs:
3636
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
3737
test-matrix: |
3838
{ include: [
39-
{ config: "inductor", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
40-
{ config: "inductor", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
41-
{ config: "inductor_distributed", shard: 1, num_shards: 1, runner: "linux.g5.12xlarge.nvidia.gpu" },
42-
{ config: "inductor_cpp_wrapper", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
43-
{ config: "inductor_cpp_wrapper", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
39+
{ config: "inductor", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.4xlarge.nvidia.gpu" },
40+
{ config: "inductor", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.4xlarge.nvidia.gpu" },
41+
{ config: "inductor_distributed", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.12xlarge.nvidia.gpu" },
42+
{ config: "inductor_cpp_wrapper", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.4xlarge.nvidia.gpu" },
43+
{ config: "inductor_cpp_wrapper", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.4xlarge.nvidia.gpu" },
4444
]}
4545
secrets: inherit
4646

@@ -65,8 +65,8 @@ jobs:
6565
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
6666
test-matrix: |
6767
{ include: [
68-
{ config: "inductor", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
69-
{ config: "inductor", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
68+
{ config: "inductor", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.4xlarge.nvidia.gpu" },
69+
{ config: "inductor", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.4xlarge.nvidia.gpu" },
7070
]}
7171
secrets: inherit
7272

@@ -90,7 +90,7 @@ jobs:
9090
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
9191
test-matrix: |
9292
{ include: [
93-
{ config: "inductor-halide", shard: 1, num_shards: 1, runner: "linux.12xlarge" },
93+
{ config: "inductor-halide", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.12xlarge" },
9494
]}
9595
secrets: inherit
9696

@@ -114,7 +114,7 @@ jobs:
114114
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
115115
test-matrix: |
116116
{ include: [
117-
{ config: "inductor-triton-cpu", shard: 1, num_shards: 1, runner: "linux.12xlarge" },
117+
{ config: "inductor-triton-cpu", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.12xlarge" },
118118
]}
119119
secrets: inherit
120120

@@ -138,10 +138,10 @@ jobs:
138138
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
139139
test-matrix: |
140140
{ include: [
141-
{ config: "inductor_amx", shard: 1, num_shards: 2, runner: "linux.8xlarge.amx" },
142-
{ config: "inductor_amx", shard: 2, num_shards: 2, runner: "linux.8xlarge.amx" },
143-
{ config: "inductor_avx2", shard: 1, num_shards: 2, runner: "linux.10xlarge.avx2" },
144-
{ config: "inductor_avx2", shard: 2, num_shards: 2, runner: "linux.10xlarge.avx2" },
141+
{ config: "inductor_amx", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.8xlarge.amx" },
142+
{ config: "inductor_amx", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.8xlarge.amx" },
143+
{ config: "inductor_avx2", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.10xlarge.avx2" },
144+
{ config: "inductor_avx2", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.10xlarge.avx2" },
145145
]}
146146
secrets: inherit
147147

@@ -165,8 +165,8 @@ jobs:
165165
cuda-arch-list: '8.6'
166166
test-matrix: |
167167
{ include: [
168-
{ config: "inductor", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
169-
{ config: "inductor", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
168+
{ config: "inductor", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.4xlarge.nvidia.gpu" },
169
170170
]}
171171
secrets: inherit
172172

.github/workflows/inductor.yml

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -53,11 +53,11 @@ jobs:
5353
sync-tag: linux-focal-cuda12_6-py3_10-gcc9-inductor-build
5454
test-matrix: |
5555
{ include: [
56-
{ config: "inductor_huggingface", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" },
57-
{ config: "inductor_timm", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
58-
{ config: "inductor_timm", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
59-
{ config: "inductor_torchbench", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
60-
{ config: "inductor_torchbench", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
56+
{ config: "inductor_huggingface", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.4xlarge.nvidia.gpu" },
57+
{ config: "inductor_timm", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.4xlarge.nvidia.gpu" },
58+
{ config: "inductor_timm", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.4xlarge.nvidia.gpu" },
59+
{ config: "inductor_torchbench", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.4xlarge.nvidia.gpu" },
60+
{ config: "inductor_torchbench", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.4xlarge.nvidia.gpu" },
6161
]}
6262
secrets: inherit
6363

@@ -82,14 +82,14 @@ jobs:
8282
sync-tag: linux-jammy-cpu-py3_9-gcc11-inductor-build
8383
test-matrix: |
8484
{ include: [
85-
{ config: "cpu_inductor_torchbench", shard: 1, num_shards: 2, runner: "linux.8xlarge.amx" },
86-
{ config: "cpu_inductor_torchbench", shard: 2, num_shards: 2, runner: "linux.8xlarge.amx" },
87-
{ config: "dynamic_cpu_inductor_huggingface", shard: 1, num_shards: 1, runner: "linux.8xlarge.amx" },
88-
{ config: "dynamic_cpu_inductor_timm", shard: 1, num_shards: 2, runner: "linux.8xlarge.amx" },
89-
{ config: "dynamic_cpu_inductor_timm", shard: 2, num_shards: 2, runner: "linux.8xlarge.amx" },
90-
{ config: "dynamic_cpu_inductor_torchbench", shard: 1, num_shards: 2, runner: "linux.8xlarge.amx" },
91-
{ config: "dynamic_cpu_inductor_torchbench", shard: 2, num_shards: 2, runner: "linux.8xlarge.amx" },
92-
{ config: "inductor_torchbench_cpu_smoketest_perf", shard: 1, num_shards: 1, runner: "linux.24xl.spr-metal" },
85+
{ config: "cpu_inductor_torchbench", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.8xlarge.amx" },
86+
{ config: "cpu_inductor_torchbench", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.8xlarge.amx" },
87+
{ config: "dynamic_cpu_inductor_huggingface", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.8xlarge.amx" },
88+
{ config: "dynamic_cpu_inductor_timm", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.8xlarge.amx" },
89+
{ config: "dyn F0B2 amic_cpu_inductor_timm", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.8xlarge.amx" },
90+
{ config: "dynamic_cpu_inductor_torchbench", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.8xlarge.amx" },
91+
{ config: "dynamic_cpu_inductor_torchbench", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.8xlarge.amx" },
92+
{ config: "inductor_torchbench_cpu_smoketest_perf", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.24xl.spr-metal" },
9393
]}
9494
secrets: inherit
9595

.github/workflows/linux-aarch64.yml

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -37,13 +37,13 @@ jobs:
3737
runner: linux.arm64.2xlarge
3838
test-matrix: |
3939
{ include: [
40-
{ config: "default", shard: 1, num_shards: 4, runner: "linux.arm64.2xlarge" },
41-
{ config: "default", shard: 2, num_shards: 4, runner: "linux.arm64.2xlarge" },
42-
{ config: "default", shard: 3, num_shards: 4, runner: "linux.arm64.2xlarge" },
43-
{ config: "default", shard: 4, num_shards: 4, runner: "linux.arm64.2xlarge" },
44-
{ config: "default", shard: 1, num_shards: 3, runner: "linux.arm64.m7g.4xlarge" },
45-
{ config: "default", shard: 2, num_shards: 3, runner: "linux.arm64.m7g.4xlarge" },
46-
{ config: "default", shard: 3, num_shards: 3, runner: "linux.arm64.m7g.4xlarge" },
40+
{ config: "default", shard: 1, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.arm64.2xlarge" },
41+
{ config: "default", shard: 2, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.arm64.2xlarge" },
42+
{ config: "default", shard: 3, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.arm64.2xlarge" },
43+
{ config: "default", shard: 4, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.arm64.2xlarge" },
44+
{ config: "default", shard: 1, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.arm64.m7g.4xlarge" },
45+
{ config: "default", shard: 2, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.arm64.m7g.4xlarge" },
46+
{ config: "default", shard: 3, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.arm64.m7g.4xlarge" },
4747
]}
4848
secrets: inherit
4949

0 commit comments

Comments
 (0)
0