Revert D31984694: [pytorch][PR] make TORCH_(CUDABLAS|CUSOLVER)_CHECK usable in custom extensions

Test Plan: revert-hammer

Differential Revision:
D31984694 (d4493b27ee)

Original commit changeset: 0035ecd13980

fbshipit-source-id: c85689007719c9e4a930b0a8a32d481a501d3c14
This commit is contained in:
Mike Ruberry
2021-10-30 03:49:44 -07:00
committed by Facebook GitHub Bot
parent 4a2bbc619d
commit aa16de517d
7 changed files with 4 additions and 72 deletions

View File

@ -5,7 +5,6 @@
#include <ATen/cuda/CUDABlas.h>
#include <ATen/cuda/Exceptions.h>
#include <c10/util/irange.h>
#include <c10/macros/Export.h>
#define CUDABLAS_POSINT_CHECK(FD, X) \
TORCH_CHECK( \
@ -97,7 +96,7 @@ namespace at {
namespace cuda {
namespace blas {
C10_EXPORT const char* _cublasGetErrorEnum(cublasStatus_t error) {
const char* _cublasGetErrorEnum(cublasStatus_t error) {
if (error == CUBLAS_STATUS_SUCCESS) {
return "CUBLAS_STATUS_SUCCESS";
}

View File

@ -2,7 +2,6 @@
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/CUDASolver.h>
#include <c10/cuda/CUDACachingAllocator.h>
#include <c10/macros/Export.h>
#ifdef CUDART_VERSION
@ -10,7 +9,7 @@ namespace at {
namespace cuda {
namespace solver {
C10_EXPORT const char* cusolverGetErrorMessage(cusolverStatus_t status) {
const char* cusolverGetErrorMessage(cusolverStatus_t status) {
switch (status) {
case CUSOLVER_STATUS_SUCCESS: return "CUSOLVER_STATUS_SUCCES";
case CUSOLVER_STATUS_NOT_INITIALIZED: return "CUSOLVER_STATUS_NOT_INITIALIZED";

View File

@ -2,7 +2,6 @@
#include <cublas_v2.h>
#include <cusparse.h>
#include <c10/macros/Export.h>
#ifdef CUDART_VERSION
#include <cusolver_common.h>
@ -40,7 +39,7 @@ class CuDNNError : public c10::Error {
} while (0)
namespace at { namespace cuda { namespace blas {
C10_EXPORT const char* _cublasGetErrorEnum(cublasStatus_t error);
const char* _cublasGetErrorEnum(cublasStatus_t error);
}}} // namespace at::cuda::blas
#define TORCH_CUDABLAS_CHECK(EXPR) \
@ -67,7 +66,7 @@ const char *cusparseGetErrorString(cusparseStatus_t status);
#ifdef CUDART_VERSION
namespace at { namespace cuda { namespace solver {
C10_EXPORT const char* cusolverGetErrorMessage(cusolverStatus_t status);
const char* cusolverGetErrorMessage(cusolverStatus_t status);
}}} // namespace at::cuda::solver
#define TORCH_CUSOLVER_CHECK(EXPR) \

View File

@ -1,17 +0,0 @@
#include <iostream>
#include <torch/extension.h>
#include <ATen/cuda/CUDAContext.h>
#include <cublas_v2.h>
torch::Tensor noop_cublas_function(torch::Tensor x) {
cublasHandle_t handle;
TORCH_CUDABLAS_CHECK(cublasCreate(&handle));
TORCH_CUDABLAS_CHECK(cublasDestroy(handle));
return x;
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("noop_cublas_function", &noop_cublas_function, "a cublas function");
}

View File

@ -1,17 +0,0 @@
#include <torch/extension.h>
#include <ATen/cuda/CUDAContext.h>
#include <cusolverDn.h>
torch::Tensor noop_cusolver_function(torch::Tensor x) {
cusolverDnHandle_t handle;
TORCH_CUSOLVER_CHECK(cusolverDnCreate(&handle));
TORCH_CUSOLVER_CHECK(cusolverDnDestroy(handle));
return x;
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("noop_cusolver_function", &noop_cusolver_function, "a cusolver function");
}

View File

@ -48,19 +48,6 @@ if torch.cuda.is_available() and (CUDA_HOME is not None or ROCM_HOME is not None
'nvcc': ['-O2']})
ext_modules.append(extension)
if torch.cuda.is_available() and CUDA_HOME is not None:
cublas_extension = CUDAExtension(
name='torch_test_cpp_extension.cublas_extension',
sources=['cublas_extension.cpp']
)
ext_modules.append(cublas_extension)
cusolver_extension = CUDAExtension(
name='torch_test_cpp_extension.cusolver_extension',
sources=['cusolver_extension.cpp']
)
ext_modules.append(cusolver_extension)
setup(
name='torch_test_cpp_extension',
packages=['torch_test_cpp_extension'],

View File

@ -82,24 +82,6 @@ class TestCppExtensionAOT(common.TestCase):
# 2 * sigmoid(0) = 2 * 0.5 = 1
self.assertEqual(z, torch.ones_like(z))
@common.skipIfRocm
@unittest.skipIf(not TEST_CUDA, "CUDA not found")
def test_cublas_extension(self):
from torch_test_cpp_extension import cublas_extension
x = torch.zeros(100, device="cuda", dtype=torch.float32)
z = cublas_extension.noop_cublas_function(x)
self.assertEqual(z, x)
@common.skipIfRocm
@unittest.skipIf(not TEST_CUDA, "CUDA not found")
def test_cusolver_extension(self):
from torch_test_cpp_extension import cusolver_extension
x = torch.zeros(100, device="cuda", dtype=torch.float32)
z = cusolver_extension.noop_cusolver_function(x)
self.assertEqual(z, x)
@unittest.skipIf(IS_WINDOWS, "Not available on Windows")
def test_no_python_abi_suffix_sets_the_correct_library_name(self):
# For this test, run_test.py will call `python setup.py install` in the