Remove cuda 11.1 references (#73514)

Summary:
Fixes : https://github.com/pytorch/pytorch/issues/73377

We've migrated to CUDA-11.3 as default toolkit in 1.9, it's time to stop builds (especially considering forward-compatibility guarantee across CUDA-11.x drivers)

Hence we are removing CUDA 11.1 support. We should also cleanup old cuda related code from our builder and pytorch repo making scripts a little more clean.

We have code that references cuda 9.2 , 10.1 , 11.0, 11.1, 11.2 and none of these are currently use

Pull Request resolved: https://github.com/pytorch/pytorch/pull/73514

Reviewed By: janeyx99

Differential Revision: D34551989

Pulled By: atalman

fbshipit-source-id: 9ceaaa9b25ad49689986f4b29a26d20370d9d011
(cherry picked from commit fe109c62daf429e9053c03f6e374568ba23cd041)
This commit is contained in:
Andrey Talman
2022-03-01 08:29:41 -08:00
committed by PyTorch MergeBot
parent 1cf6b34c0e
commit 197764b35d
7 changed files with 6 additions and 42 deletions

View File

@ -122,17 +122,6 @@ case "$image" in
VISION=yes
KATEX=yes
;;
pytorch-linux-xenial-cuda11.1-cudnn8-py3-gcc7)
CUDA_VERSION=11.1
CUDNN_VERSION=8
ANACONDA_PYTHON_VERSION=3.7
CMAKE_VERSION=3.10.3
GCC_VERSION=7
PROTOBUF=yes
DB=yes
VISION=yes
KATEX=yes
;;
pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7)
CUDA_VERSION=11.3.0 # Deviating from major.minor to conform to nvidia's Docker image names
CUDNN_VERSION=8
@ -233,16 +222,6 @@ case "$image" in
DB=yes
VISION=yes
;;
pytorch-linux-bionic-cuda11.0-cudnn8-py3.7-gcc9)
CUDA_VERSION=11.0
CUDNN_VERSION=8
ANACONDA_PYTHON_VERSION=3.7
GCC_VERSION=9
PROTOBUF=yes
DB=yes
VISION=yes
ROCM_VERSION=3.9
;;
pytorch-linux-bionic-rocm4.3.1-py3.7)
ANACONDA_PYTHON_VERSION=3.7
GCC_VERSION=9

View File

@ -91,11 +91,6 @@ if [[ ${DESIRED_CUDA} == "cpu" ]]; then
USE_GOLD_LINKER="ON"
fi
USE_WHOLE_CUDNN="OFF"
# Link whole cuDNN for CUDA-11.1 to include fp16 fast kernels
if [[ "$(uname)" == "Linux" && "${DESIRED_CUDA}" == "cu111" ]]; then
USE_WHOLE_CUDNN="ON"
fi
# Default to nightly, since that's where this normally uploads to
PIP_UPLOAD_FOLDER='nightly/'
@ -184,7 +179,6 @@ export DOCKER_IMAGE="$DOCKER_IMAGE"
export USE_GOLD_LINKER="${USE_GOLD_LINKER}"
export USE_GLOO_WITH_OPENSSL="ON"
export USE_WHOLE_CUDNN="${USE_WHOLE_CUDNN}"
# =================== The above code will be executed inside Docker container ===================
EOL

View File

@ -3,7 +3,7 @@
set -xeuo pipefail
PYTORCH_DOCKER_TAG=$(git describe --tags --always)-devel
CUDA_VERSION=11.1
CUDA_VERSION=11.3
# Build PyTorch nightly docker
make -f docker.Makefile \

View File

@ -201,9 +201,6 @@ cmake_dependent_option(
cmake_dependent_option(
BUILD_NVFUSER_BENCHMARK "Build C++ binaries for nvfuser benchmarks" ON
"USE_CUDA;BUILD_TEST" OFF)
cmake_dependent_option(
USE_WHOLE_CUDNN "Use whole-library linking for cuDNN" OFF
"USE_STATIC_CUDNN" OFF)
cmake_dependent_option(
USE_EXPERIMENTAL_CUDNN_V8_API "Use experimental cuDNN v8 API" OFF
"USE_CUDNN" OFF)

View File

@ -51,7 +51,7 @@ RUN --mount=type=cache,target=/opt/ccache \
FROM conda as conda-installs
ARG PYTHON_VERSION=3.8
ARG CUDA_VERSION=11.1
ARG CUDA_VERSION=11.3
ARG CUDA_CHANNEL=nvidia
ARG INSTALL_CHANNEL=pytorch-nightly
ENV CONDA_OVERRIDE_CUDA=${CUDA_VERSION}

View File

@ -318,15 +318,9 @@ if(CAFFE2_USE_CUDNN)
TARGET caffe2::cudnn-private PROPERTY INTERFACE_INCLUDE_DIRECTORIES
${CUDNN_INCLUDE_PATH})
if(CUDNN_STATIC AND NOT WIN32)
if(USE_WHOLE_CUDNN)
set_property(
TARGET caffe2::cudnn-private PROPERTY INTERFACE_LINK_LIBRARIES
"-Wl,--whole-archive,\"${CUDNN_LIBRARY_PATH}\" -Wl,--no-whole-archive")
else()
set_property(
TARGET caffe2::cudnn-private PROPERTY INTERFACE_LINK_LIBRARIES
${CUDNN_LIBRARY_PATH})
endif()
set_property(
TARGET caffe2::cudnn-private PROPERTY INTERFACE_LINK_LIBRARIES
${CUDNN_LIBRARY_PATH})
set_property(
TARGET caffe2::cudnn-private APPEND PROPERTY INTERFACE_LINK_LIBRARIES
"${CUDA_TOOLKIT_ROOT_DIR}/lib64/libculibos.a" dl)

View File

@ -8,7 +8,7 @@ $(warning WARNING: No docker user found using results from whoami)
DOCKER_ORG = $(shell whoami)
endif
CUDA_VERSION = 11.1
CUDA_VERSION = 11.3
CUDNN_VERSION = 8
BASE_RUNTIME = ubuntu:18.04
BASE_DEVEL = nvidia/cuda:$(CUDA_VERSION)-cudnn$(CUDNN_VERSION)-devel-ubuntu18.04