Revert "[CUDA][cuBLAS] Add fp16 accumulate option to cuBLAS/cuBLASLt (#144441)"

This reverts commit de945d78da9198e58df7c19c53b737d0f987ddff.

Reverted https://github.com/pytorch/pytorch/pull/144441 on behalf of https://github.com/izaitsevfb due to unused variables again :( ([comment](https://github.com/pytorch/pytorch/pull/144441#issuecomment-2611182461))
This commit is contained in:
PyTorch MergeBot
2025-01-23 22:59:25 +00:00
parent 42f4fda2eb
commit dad9bc3461
9 changed files with 13 additions and 192 deletions

View File

@ -1133,29 +1133,6 @@ static PyObject* THPModule_allowBF16ReductionCuBLAS(
Py_RETURN_FALSE;
}
static PyObject* THPModule_setAllowFP16AccumulationCuBLAS(
PyObject* _unused,
PyObject* arg) {
HANDLE_TH_ERRORS
TORCH_CHECK(
PyBool_Check(arg),
"set_allow_fp16_accumulation_cublas expects a bool, "
"but got ",
THPUtils_typename(arg));
at::globalContext().setAllowFP16AccumulationCuBLAS(arg == Py_True);
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject* THPModule_allowFP16AccumulationCuBLAS(
PyObject* _unused,
PyObject* noargs) {
if (at::globalContext().allowFP16AccumulationCuBLAS()) {
Py_RETURN_TRUE;
}
Py_RETURN_FALSE;
}
static PyObject* THPModule_setAllowFP16ReductionCPU(
PyObject* _unused,
PyObject* arg) {
@ -1597,14 +1574,6 @@ static std::initializer_list<PyMethodDef> TorchMethods = {
THPModule_setAllowBF16ReductionCuBLAS,
METH_O,
nullptr},
{"_get_cublas_allow_fp16_accumulation",
THPModule_allowFP16AccumulationCuBLAS,
METH_NOARGS,
nullptr},
{"_set_cublas_allow_fp16_accumulation",
THPModule_setAllowFP16AccumulationCuBLAS,
METH_O,
nullptr},
{"_get_cpu_allow_fp16_reduced_precision_reduction",
THPModule_allowFP16ReductionCPU,
METH_NOARGS,