Add ability to enable/disable MIOpen at runtime (#33118)

Summary:
1. Set `torch._C.has_cudnn` to `True` for ROCm
2. Make MIOpen invocations respect value of `cudnn_enabled` or `at::globalContext().userEnabledCuDNN()`
3. `torch/backends/cudnn/__init__.py`: Add hip-specific changes (use "hide whitespace changes" option to view simpler diff)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/33118

Differential Revision: D19977719

Pulled By: bddppq

fbshipit-source-id: 64d4dd1d78afcf96201360d85b8be5950f96dfad
This commit is contained in:
Jithun Nair
2020-02-20 10:45:28 -08:00
committed by Facebook Github Bot
parent 01e1de8220
commit 718c538ff9
5 changed files with 21 additions and 5 deletions

View File

@ -205,6 +205,7 @@ auto ConvParams::use_miopen(const at::Tensor& input, bool bias_defined) const ->
&& input.dim() <= MIOPEN_DIM_MAX
&& !(groups > 1 && is_dilated()) // MIOpen currently does not support dilation with groups of size > 1
&& !(input.scalar_type() == at::kBFloat16 && bias_defined) // MIOpen currently doesn't support bias with bfloat16
&& cudnn_enabled
;
}

View File

@ -493,6 +493,7 @@ std::tuple<Tensor, Tensor, Tensor, Tensor, int64_t> _batch_norm_impl_index(
&& ((running_mean.defined() && running_var.defined())
|| (!running_mean.defined() && !running_var.defined() && training))
&& detail::getCUDAHooks().compiledWithMIOpen()
&& cudnn_enabled
);
if (use_miopen) {

View File

@ -16,7 +16,8 @@ bool use_miopen(const at::Tensor& input, const double dropout_state) {
bool is_miopen_acceptable = (input.scalar_type() == at::kFloat) &&
(detail::getCUDAHooks().compiledWithMIOpen()) &&
(input.is_cuda()) &&
(dropout_state == 0.0);
(dropout_state == 0.0) &&
(at::globalContext().userEnabledCuDNN());
return is_miopen_acceptable;
}

View File

@ -12,7 +12,7 @@ from torch.backends import ContextProp, PropModule, __allow_nonbracketed_mutatio
#
# torch.backends.cudnn.enabled = False
#
# to globally disable CuDNN
# to globally disable CuDNN/MIOpen
lib = None
__cudnn_version = None
@ -72,6 +72,13 @@ def _libcudnn():
raise RuntimeError(
'cuDNN version incompatibility: PyTorch was compiled against {} '
'but linked against {}'.format(compile_version, __cudnn_version))
elif hasattr(lib, 'miopenGetVersion'):
miopen_major = ctypes.c_size_t()
miopen_minor = ctypes.c_size_t()
miopen_patch = ctypes.c_size_t()
# miopen version is MAJOR*1000000 + MINOR*1000 + PATCH
lib.miopenGetVersion(ctypes.byref(miopen_major), ctypes.byref(miopen_minor), ctypes.byref(miopen_patch))
__cudnn_version = miopen_major.value * 1000000 + miopen_minor.value * 1000 + miopen_patch.value
else:
lib = None
return lib
@ -102,11 +109,11 @@ def is_acceptable(tensor):
return False
if not is_available():
warnings.warn(
"PyTorch was compiled without cuDNN support. To use cuDNN, rebuild "
"PyTorch was compiled without cuDNN/MIOpen support. To use cuDNN/MIOpen, rebuild "
"PyTorch making sure the library is visible to the build system.")
return False
if _libcudnn() is None:
warnings.warn('cuDNN library not found. Check your {libpath}'.format(
warnings.warn('cuDNN/MIOpen library not found. Check your {libpath}'.format(
libpath={
'darwin': 'DYLD_LIBRARY_PATH',
'win32': 'PATH'

View File

@ -450,6 +450,12 @@ PyObject *THPModule_setBenchmarkCuDNN(PyObject *_unused, PyObject *arg)
{
THPUtils_assert(PyBool_Check(arg), "set_benchmark_cudnn expects a bool, "
"but got %s", THPUtils_typename(arg));
#ifdef __HIP_PLATFORM_HCC__
if (arg == Py_False) {
TORCH_WARN_ONCE("Disabling benchmark mode for MIOpen is NOT supported. Overriding value to True");
arg = Py_True;
}
#endif
at::globalContext().setBenchmarkCuDNN(arg == Py_True);
Py_RETURN_NONE;
}
@ -731,7 +737,7 @@ PyObject* initModule() {
return PyModule_AddObject(module, name, v) == 0;
};
#ifdef USE_CUDNN
#if defined(USE_CUDNN) || defined(__HIP_PLATFORM_HCC__)
PyObject *has_cudnn = Py_True;
#else
PyObject *has_cudnn = Py_False;