Files
pytorch/caffe2/python/_import_c_extension.py
Peter Yeh 54db14e390 HIP Operators Generator--> HipOpG (#9322)
Summary:
The goal of this PR is to add an infrastructure; to convert(hipify) CUDA ops into [HIP](https://github.com/ROCm-Developer-Tools/HIP) ops , at **compile** time.

Note that HIP ops, which are portable c++ code, can run on AMD and NVIDIA platform.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/9322

Differential Revision: D8884707

Pulled By: bddppq

fbshipit-source-id: dabc6319546002c308c10528238e6684f7aef0f8
2018-07-19 00:26:06 -07:00

54 lines
1.9 KiB
Python

## @package _import_c_extension
# Module caffe2.python._import_c_extension
import atexit
import logging
import sys
from caffe2.python import extension_loader
# We will first try to load the gpu-enabled caffe2. If it fails, we will then
# attempt to load the cpu version. The cpu backend is the minimum required, so
# if that still fails, we will exit loud.
with extension_loader.DlopenGuard():
has_hip_support = False
has_gpu_support = False
try:
from caffe2.python.caffe2_pybind11_state_gpu import * # noqa
if num_cuda_devices(): # noqa
has_gpu_support = True
except ImportError as gpu_e:
logging.info('Failed to import cuda module: {}'.format(gpu_e))
try:
from caffe2.python.caffe2_pybind11_state_hip import * # noqa
if num_hip_devices():
has_hip_support = True
logging.info('This caffe2 python run has AMD GPU support!')
except ImportError as hip_e:
logging.info('Failed to import AMD hip module: {}'.format(hip_e))
logging.warning(
'This caffe2 python run does not have GPU support. '
'Will run in CPU only mode.')
try:
from caffe2.python.caffe2_pybind11_state import * # noqa
except ImportError as cpu_e:
logging.critical(
'Cannot load caffe2.python. Error: {0}'.format(str(cpu_e)))
sys.exit(1)
# libcaffe2_python contains a global Workspace that we need to properly delete
# when exiting. Otherwise, cudart will cause segfaults sometimes.
atexit.register(on_module_exit) # noqa
# Add functionalities for the TensorCPU interface.
def _TensorCPU_shape(self):
return tuple(self._shape)
def _TensorCPU_reshape(self, shape):
return self._reshape(list(shape))
TensorCPU.shape = property(_TensorCPU_shape) # noqa
TensorCPU.reshape = _TensorCPU_reshape # noqa