mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Avoid configuring ROCm if USE_CUDA is on. (#26910)
Summary: Move the resolution of conflict between `USE_CUDA` and `USE_ROCM` to CMake as to effectuate: - `USE_CUDA=ON` and CUDA is found, `USE_ROCM=ON` and ROCM is found --> fatal error - Either `USE_CUDA=ON` and CUDA is found or `USE_ROCM=ON` and ROCM is found --> The respective GPU feature is ON - Otherwise no GPU support Pull Request resolved: https://github.com/pytorch/pytorch/pull/26910 Differential Revision: D17738652 Pulled By: ezyang fbshipit-source-id: 8e07cc7e922e0abda24a6518119c28952276064e
This commit is contained in:
committed by
Facebook Github Bot
parent
5b5f398dd4
commit
8fbefa06f6
@ -55,11 +55,15 @@ add_subdirectory(src/THNN)
|
|||||||
IF(USE_ROCM)
|
IF(USE_ROCM)
|
||||||
include(LoadHIP)
|
include(LoadHIP)
|
||||||
if (NOT PYTORCH_FOUND_HIP)
|
if (NOT PYTORCH_FOUND_HIP)
|
||||||
MESSAGE(FATAL_ERROR
|
set(USE_ROCM OFF)
|
||||||
"Could not find HIP installation")
|
|
||||||
endif()
|
endif()
|
||||||
ENDIF()
|
ENDIF()
|
||||||
|
|
||||||
|
# Both CUDA and ROCM are enabled and found. Report an error.
|
||||||
|
if(USE_CUDA AND USE_ROCM)
|
||||||
|
message(FATAL_ERROR "Both CUDA and ROCm are enabled and found. PyTorch can only be built with either of them. Please turn one off by using either USE_CUDA=OFF or USE_ROCM=OFF.")
|
||||||
|
endif()
|
||||||
|
|
||||||
IF(MSVC)
|
IF(MSVC)
|
||||||
# we want to respect the standard, and we are bored of those **** .
|
# we want to respect the standard, and we are bored of those **** .
|
||||||
ADD_DEFINITIONS(-D_CRT_SECURE_NO_DEPRECATE=1)
|
ADD_DEFINITIONS(-D_CRT_SECURE_NO_DEPRECATE=1)
|
||||||
|
@ -5,7 +5,7 @@ import ctypes.util
|
|||||||
from subprocess import Popen, PIPE
|
from subprocess import Popen, PIPE
|
||||||
|
|
||||||
from . import which
|
from . import which
|
||||||
from .env import IS_WINDOWS, IS_LINUX, IS_DARWIN, check_env_flag, check_negative_env_flag
|
from .env import IS_WINDOWS, IS_LINUX, IS_DARWIN, check_negative_env_flag
|
||||||
|
|
||||||
LINUX_HOME = '/usr/local/cuda'
|
LINUX_HOME = '/usr/local/cuda'
|
||||||
WINDOWS_HOME = glob.glob('C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v*.*')
|
WINDOWS_HOME = glob.glob('C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v*.*')
|
||||||
@ -53,7 +53,7 @@ def find_cuda_version(cuda_home):
|
|||||||
if len(candidates) > 0:
|
if len(candidates) > 0:
|
||||||
return candidates[0]
|
return candidates[0]
|
||||||
|
|
||||||
if check_negative_env_flag('USE_CUDA') or check_env_flag('USE_ROCM'):
|
if check_negative_env_flag('USE_CUDA'):
|
||||||
USE_CUDA = False
|
USE_CUDA = False
|
||||||
CUDA_HOME = None
|
CUDA_HOME = None
|
||||||
CUDA_VERSION = None
|
CUDA_VERSION = None
|
||||||
|
Reference in New Issue
Block a user