mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Revert "[ATen][CUDA][CUBLAS] cublasLtMatmul increase workspace_size (#120925)"
This reverts commit 3239f86a3df133b5977d988324639e0de7af8749. Reverted https://github.com/pytorch/pytorch/pull/120925 on behalf of https://github.com/malfet due to Breaks internal tests, likely due to the increased memory requirements ([comment](https://github.com/pytorch/pytorch/pull/120925#issuecomment-1983875400))
This commit is contained in:
@ -183,22 +183,13 @@ uint32_t _getAlignment(uintptr_t address) {
|
||||
|
||||
static size_t _parseChosenWorkspaceSize() {
|
||||
const char * val = getenv("CUBLASLT_WORKSPACE_SIZE");
|
||||
size_t workspace_size = 1024;
|
||||
#ifdef USE_ROCM
|
||||
if (!val) {
|
||||
// accept either env var
|
||||
val = getenv("HIPBLASLT_WORKSPACE_SIZE");
|
||||
}
|
||||
#else
|
||||
cudaDeviceProp* p = at::cuda::getDeviceProperties(c10::cuda::current_device());
|
||||
// Keep workspace_size = 1024 for small Ampere GPUs
|
||||
// See https://github.com/pytorch/pytorch/pull/120925#issuecomment-1977556485
|
||||
if (p->major == 8 && p->totalGlobalMem / 1073741824 >= 24) {
|
||||
workspace_size = 4096;
|
||||
} else if (p->major >= 9) {
|
||||
workspace_size = 32768;
|
||||
}
|
||||
#endif
|
||||
size_t workspace_size = 1024; /* default size in KiB according to #73328 */
|
||||
if (val) {
|
||||
try {
|
||||
workspace_size = std::stoi(val);
|
||||
|
Reference in New Issue
Block a user