8000 Update on "[dynamo] replace `unimplemented` with `unimplemented_v2` i… · pytorch/pytorch@ca362a7 · GitHub
[go: up one dir, main page]

Skip to content

Commit ca362a7

Browse files
committed
Update on "[dynamo] replace unimplemented with unimplemented_v2 in variables/torch_functions.py"
This addresses part of #147913. cc voznesenskym penguinwu EikanWang jgong5 Guobing-Chen XiaobingSuper zhuhaozhe blzheng wenzhe-nrv jiayisunx chenyang78 kadeng chauhang amjames [ghstack-poisoned]
2 parents 342540a + 7cb64ec commit ca362a7

File tree

367 files changed

+10827
-3645
lines changed
  • aten/src/ATen
  • benchmarks
  • c10
  • cmake
  • docs/source
  • scripts
  • test
  • third_party
  • tools/setup_helpers
  • torch
  • Some content is hidden

    Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

    367 files changed

    +10827
    -3645
    lines changed

    .ci/docker/almalinux/Dockerfile

    Lines changed: 13 additions & 15 deletions
    Original file line numberDiff line numberDiff line change
    @@ -1,5 +1,6 @@
    11
    ARG CUDA_VERSION=12.4
    22
    ARG BASE_TARGET=cuda${CUDA_VERSION}
    3+
    ARG ROCM_IMAGE=rocm/dev-almalinux-8:6.3-complete
    34
    FROM amd64/almalinux:8 as base
    45

    56
    ENV LC_ALL en_US.UTF-8
    @@ -8,10 +9,6 @@ ENV LANGUAGE en_US.UTF-8
    89

    910
    ARG DEVTOOLSET_VERSION=11
    1011

    11-
    ENV LC_ALL en_US.UTF-8
    12-
    ENV LANG en_US.UTF-8
    13-
    ENV LANGUAGE en_US.UTF-8
    14-
    1512
    RUN yum -y update
    1613
    RUN yum -y install epel-release
    1714
    RUN yum install -y sudo wget curl perl util-linux xz bzip2 git patch which perl zlib-devel openssl-devel yum-utils autoconf automake make gcc-toolset-${DEVTOOLSET_VERSION}-toolchain
    @@ -41,7 +38,7 @@ RUN bash ./install_conda.sh && rm install_conda.sh
    4138

    4239
    # Install CUDA
    4340
    FROM base as cuda
    44-
    ARG CUDA_VERSION=12.4
    41+
    ARG CUDA_VERSION=12.6
    4542
    RUN rm -rf /usr/local/cuda-*
    4643
    ADD ./common/install_cuda.sh install_cuda.sh
    4744
    COPY ./common/install_nccl.sh install_nccl.sh
    @@ -57,28 +54,29 @@ FROM cuda as cuda11.8
    5754
    RUN bash ./install_cuda.sh 11.8
    5855
    ENV DESIRED_CUDA=11.8
    5956

    60-
    FROM cuda as cuda12.1
    61-
    RUN bash ./install_cuda.sh 12.1
    62-
    ENV DESIRED_CUDA=12.1
    63-
    64-
    FROM cuda as cuda12.4
    65-
    RUN bash ./install_cuda.sh 12.4
    66-
    ENV DESIRED_CUDA=12.4
    67-
    6857
    FROM cuda as cuda12.6
    6958
    RUN bash ./install_cuda.sh 12.6
    7059
    ENV DESIRED_CUDA=12.6
    7160

    61+
    FROM cuda as cuda12.8
    62+
    RUN bash ./install_cuda.sh 12.8
    63+
    ENV DESIRED_CUDA=12.8
    64+
    65+
    FROM ${ROCM_IMAGE} as rocm
    66+
    ENV PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
    67+
    ADD ./common/install_mkl.sh install_mkl.sh
    68+
    RUN bash ./install_mkl.sh && rm install_mkl.sh
    69+
    ENV MKLROOT /opt/intel
    70+
    7271
    # Install MNIST test data
    7372
    FROM base as mnist
    7473
    ADD ./common/install_mnist.sh install_mnist.sh
    7574
    RUN bash ./install_mnist.sh
    7675

    7776
    FROM base as all_cuda
    7877
    COPY --from=cuda11.8 /usr/local/cuda-11.8 /usr/local/cuda-11.8
    79-
    COPY --from=cuda12.1 /usr/local/cuda-12.1 /usr/local/cuda-12.1
    80-
    COPY --from=cuda12.4 /usr/local/cuda-12.4 /usr/local/cuda-12.4
    8178
    COPY --from=cuda12.6 /usr/local/cuda-12.6 /usr/local/cuda-12.6
    79+
    COPY --from=cuda12.4 /usr/local/cuda-12.8 /usr/local/cuda-12.8
    8280

    8381
    # Final step
    8482
    FROM ${BASE_TARGET} as final

    .ci/docker/almalinux/build.sh

    Lines changed: 11 additions & 1 deletion
    Original file line numberDiff line numberDiff line change
    @@ -15,9 +15,16 @@ fi
    1515
    DOCKER_TAG_PREFIX=$(echo "${image}" | awk -F':' '{print $2}')
    1616

    1717
    CUDA_VERSION=""
    18+
    ROCM_VERSION=""
    19+
    EXTRA_BUILD_ARGS=""
    1820
    if [[ "${DOCKER_TAG_PREFIX}" == cuda* ]]; then
    1921
    # extract cuda version from image name and tag. e.g. manylinux2_28-builder:cuda12.8 returns 12.8
    2022
    CUDA_VERSION=$(echo "${DOCKER_TAG_PREFIX}" | awk -F'cuda' '{print $2}')
    23+
    EXTRA_BUILD_ARGS="--build-arg CUDA_VERSION=${CUDA_VERSION}"
    24+
    elif [[ "${DOCKER_TAG_PREFIX}" == rocm* ]]; then
    25+
    # extract rocm version from image name and tag. e.g. manylinux2_28-builder:rocm6.2.4 returns 6.2.4
    26+
    ROCM_VERSION=$(echo " 57AE ${DOCKER_TAG_PREFIX}" | awk -F'rocm' '{print $2}')
    27+
    EXTRA_BUILD_ARGS="--build-arg ROCM_IMAGE=rocm/dev-almalinux-8:${ROCM_VERSION}-complete"
    2128
    fi
    2229

    2330
    case ${DOCKER_TAG_PREFIX} in
    @@ -27,6 +34,9 @@ case ${DOCKER_TAG_PREFIX} in
    2734
    cuda*)
    2835
    BASE_TARGET=cuda${CUDA_VERSION}
    2936
    ;;
    37+
    rocm*)
    38+
    BASE_TARGET=rocm
    39+
    ;;
    3040
    *)
    3141
    echo "ERROR: Unknown docker tag ${DOCKER_TAG_PREFIX}"
    3242
    exit 1
    @@ -47,8 +57,8 @@ docker build \
    4757
    --target final \
    4858
    --progress plain \
    4959
    --build-arg "BASE_TARGET=${BASE_TARGET}" \
    50-
    --build-arg "CUDA_VERSION=${CUDA_VERSION}" \
    5160
    --build-arg "DEVTOOLSET_VERSION=11" \
    61+
    ${EXTRA_BUILD_ARGS} \
    5262
    -t ${tmp_tag} \
    5363
    $@ \
    5464
    -f "${TOPDIR}/.ci/docker/almalinux/Dockerfile" \

    .ci/docker/common/install_cuda.sh

    Lines changed: 47 additions & 60 deletions
    Original file line numberDiff line numberDiff line change
    @@ -2,27 +2,50 @@
    22

    33
    set -ex
    44

    5-
    CUDNN_VERSION=9.5.1.17
    5+
    arch_path=''
    6+
    targetarch=${TARGETARCH:-$(uname -m)}
    7+
    if [ ${targetarch} = 'amd64' ] || [ "${targetarch}" = 'x86_64' ]; then
    8+
    arch_path='x86_64'
    9+
    else
    10+
    arch_path='sbsa'
    11+
    fi
    12+
    13+
    function install_cuda {
    14+
    version=$1
    15+
    runfile=$2
    16+
    major_minor=${version%.*}
    17+
    rm -rf /usr/local/cuda-${major_minor} /usr/local/cuda
    18+
    if [[ ${arch_path} == 'sbsa' ]]; then
    19+
    runfile="${runfile}_sbsa"
    20+
    fi
    21+
    runfile="${runfile}.run"
    22+
    wget -q https://developer.download.nvidia.com/compute/cuda/${version}/local_installers/${runfile} -O ${runfile}
    23+
    chmod +x ${runfile}
    24+
    ./${runfile} --toolkit --silent
    25+
    rm -f ${runfile}
    26+
    rm -f /usr/local/cuda && ln -s /usr/local/cuda-${major_minor} /usr/local/cuda
    27+
    }
    28+
    29+
    function install_cudnn {
    30+
    cuda_major_version=$1
    31+
    cudnn_version=$2
    32+
    mkdir tmp_cudnn && cd tmp_cudnn
    33+
    # cuDNN license: https://developer.nvidia.com/cudnn/license_agreement
    34+
    filepath="cudnn-linux-${arch_path}-${cudnn_version}_cuda${cuda_major_version}-archive"
    35+
    wget -q https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-${arch_path}/${filepath}.tar.xz
    36+
    tar xf ${filepath}.tar.xz
    37+
    cp -a ${filepath}/include/* /usr/local/cuda/include/
    38+
    cp -a ${filepath}/lib/* /usr/local/cuda/lib64/
    39+
    cd ..
    40+
    rm -rf tmp_cudnn
    41+
    }
    642

    743
    function install_118 {
    844
    CUDNN_VERSION=9.1.0.70
    945
    echo "Installing CUDA 11.8 and cuDNN ${CUDNN_VERSION} and NCCL and cuSparseLt-0.4.0"
    10-
    rm -rf /usr/local/cuda-11.8 /usr/local/cuda
    11-
    # install CUDA 11.8.0 in the same container
    12-
    wget -q https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_520.61.05_linux.run
    13-
    chmod +x cuda_11.8.0_520.61.05_linux.run
    14-
    ./cuda_11.8.0_520.61.05_linux.run --toolkit --silent
    15-
    rm -f cuda_11.8.0_520.61.05_linux.run
    16-
    rm -f /usr/local/cuda && ln -s /usr/local/cuda-11.8 /usr/local/cuda
    17-
    18-
    # cuDNN license: https://developer.nvidia.com/cudnn/license_agreement
    19-
    mkdir tmp_cudnn && cd tmp_cudnn
    20-
    wget -q https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-${CUDNN_VERSION}_cuda11-archive.tar.xz -O cudnn-linux-x86_64-${CUDNN_VERSION}_cuda11-archive.tar.xz
    21-
    tar xf cudnn-linux-x86_64-${CUDNN_VERSION}_cuda11-archive.tar.xz
    22-
    cp -a cudnn-linux-x86_64-${CUDNN_VERSION}_cuda11-archive/include/* /usr/local/cuda/include/
    23-
    cp -a cudnn-linux-x86_64-${CUDNN_VERSION}_cuda11-archive/lib/* /usr/local/cuda/lib64/
    24-
    cd ..
    25-
    rm -rf tmp_cudnn
    46+
    install_cuda 11.8.0 cuda_11.8.0_520.61.05_linux
    47+
    48+
    install_cudnn 11 $CUDNN_VERSION
    2649

    2750
    CUDA_VERSION=11.8 bash install_nccl.sh
    2851

    @@ -34,22 +57,9 @@ function install_118 {
    3457
    function install_124 {
    3558
    CUDNN_VERSION=9.1.0.70
    3659
    echo "Installing CUDA 12.4.1 and cuDNN ${CUDNN_VERSION} and NCCL and cuSparseLt-0.6.2"
    37-
    rm -rf /usr/local/cuda-12.4 /usr/local/cuda
    38-
    # install CUDA 12.4.1 in the same container
    39-
    wget -q https://developer.download.nvidia.com/compute/cuda/12.4.1/local_installers/cuda_12.4.1_550.54.15_linux.run
    40-
    chmod +x cuda_12.4.1_550.54.15_linux.run
    41-
    ./cuda_12.4.1_550.54.15_linux.run --toolkit --silent
    42-
    rm -f cuda_12.4.1_550.54.15_linux.run
    43-
    rm -f /usr/local/cuda && ln -s /usr/local/cuda-12.4 /usr/local/cuda
    60+
    install_cuda 12.4.1 cuda_12.4.1_550.54.15_linux
    4461

    45-
    # cuDNN license: https://developer.nvidia.com/cudnn/license_agreement
    46-
    mkdir tmp_cudnn && cd tmp_cudnn
    47-
    wget -q https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive.tar.xz -O cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive.tar.xz
    48-
    tar xf cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive.tar.xz
    49-
    cp -a cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive/include/* /usr/local/cuda/include/
    50-
    cp -a cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive/lib/* /usr/local/cuda/lib64/
    51-
    cd ..
    52-
    rm -rf tmp_cudnn
    62+
    install_cudnn 12 $CUDNN_VERSION
    5363

    5464
    CUDA_VERSION=12.4 bash install_nccl.sh
    5565

    @@ -59,23 +69,11 @@ function install_124 {
    5969
    }
    6070

    6171
    function install_126 {
    72+
    CUDNN_VERSION=9.5.1.17
    6273
    echo "Installing CUDA 12.6.3 and cuDNN ${CUDNN_VERSION} and NCCL and cuSparseLt-0.6.3"
    63-
    rm -rf /usr/local/cuda-12.6 /usr/local/cuda
    64-
    # install CUDA 12.6.3 in the same container
    65-
    wget -q https://developer.download.nvidia.com/compute/cuda/12.6.3/local_installers/cuda_12.6.3_560.35.05_linux.run
    66-
    chmod +x cuda_12.6.3_560.35.05_linux.run
    67-
    ./cuda_12.6.3_560.35.05_linux.run --toolkit --silent
    68-
    rm -f cuda_12.6.3_560.35.05_linux.run
    69-
    rm -f /usr/local/cuda && ln -s /usr/local/cuda-12.6 /usr/local/cuda
    74+
    install_cuda 12.6.3 cuda_12.6.3_560.35.05_linux
    7075

    71-
    # cuDNN license: https://developer.nvidia.com/cudnn/license_agreement
    72-
    mkdir tmp_cudnn && cd tmp_cudnn
    73-
    wget -q https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive.tar.xz -O cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive.tar.xz
    74-
    tar xf cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive.tar.xz
    75-
    cp -a cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive/include/* /usr/local/cuda/include/
    76-
    cp -a cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive/lib/* /usr/local/cuda/lib64/
    77-
    cd ..
    78-
    rm -rf tmp_cudnn
    76+
    install_cudnn 12 $CUDNN_VERSION
    7977

    8078
    CUDA_VERSION=12.6 bash install_nccl.sh
    8179

    @@ -186,22 +184,11 @@ function prune_126 {
    186184
    function install_128 {
    187185
    CUDNN_VERSION=9.8.0.87
    188186
    echo "Installing CUDA 12.8.0 and cuDNN ${CUDNN_VERSION} and NCCL and cuSparseLt-0.6.3"
    189-
    rm -rf /usr/local/cuda-12.8 /usr/local/cuda
    190187
    # install CUDA 12.8.0 in the same container
    191-
    wget -q https://developer.download.nvidia.com/compute/cuda/12.8.0/local_installers/cuda_12.8.0_570.86.10_linux.run
    192-
    chmod +x cuda_12.8.0_570.86.10_linux.run
    193-
    ./cuda_12.8.0_570.86.10_linux.run --toolkit --silent
    194-
    rm -f cuda_12.8.0_570.86.10_linux.run
    195-
    rm -f /usr/local/cuda && ln -s /usr/local/cuda-12.8 /usr/local/cuda
    188+
    install_cuda 12.8.0 cuda_12.8.0_570.86.10_linux
    196189

    197190
    # cuDNN license: https://developer.nvidia.com/cudnn/license_agreement
    198-
    mkdir tmp_cudnn && cd tmp_cudnn
    199-
    wget -q https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive.tar.xz -O cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive.tar.xz
    200-
    tar xf cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive.tar.xz
    201-
    cp -a cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive/include/* /usr/local/cuda/include/
    202-
    cp -a cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive/lib/* /usr/local/cuda/lib64/
    203-
    cd ..
    204-
    rm -rf tmp_cudnn
    191+
    install_cudnn 12 $CUDNN_VERSION
    205192

    206193
    CUDA_VERSION=12.8 bash install_nccl.sh
    207194

    .ci/docker/common/install_cuda_aarch64.sh

    Lines changed: 0 additions & 44 deletions
    This file was deleted.

    .ci/docker/common/install_inductor_benchmark_deps.sh

    Lines changed: 0 additions & 7 deletions
    Original file line numberDiff line numberDiff line change
    @@ -14,13 +14,6 @@ function install_timm() {
    1414
    local commit
    1515
    commit=$(get_pinned_commit timm)
    1616

    17-
    # TODO (huydhn): There is no torchvision release on 3.13 when I write this, so
    18-
    # I'm using nightly here instead. We just need to package to be able to install
    19-
    # TIMM. Removing this once vision has a release on 3.13
    20-
    if [[ "${ANACONDA_PYTHON_VERSION}" == "3.13" ]]; then
    21-
    pip_install --pre torch torchvision --index-url https://download.pytorch.org/whl/nightly/cu124
    22-
    fi
    23-
    2417
    pip_install "git+https://github.com/huggingface/pytorch-image-models@${commit}"
    2518
    # Clean up
    2619
    conda_run pip uninstall -y cmake torch torchvision triton

    0 commit comments

    Comments
     (0)
    0