Revert "Generalize torch._C._set_allocator_settings to be generic (#156175)"

This reverts commit d3ce45012ed42cd1e13d5048b046b781f0feabe0.

Reverted https://github.com/pytorch/pytorch/pull/156175 on behalf of https://github.com/guangyey due to Static initialization order issue impact the downstream repo ([comment](https://github.com/pytorch/pytorch/pull/150312#issuecomment-3142035444))
This commit is contained in:
PyTorch MergeBot
2025-08-01 03:24:54 +00:00
parent c964204829
commit cb9b74872b
9 changed files with 34 additions and 26 deletions

View File

@ -4484,28 +4484,28 @@ class TestCudaMallocAsync(TestCase):
with self.assertRaises(RuntimeError):
torch.cuda.memory._set_allocator_settings("foo:1,bar:2")
with self.assertRaises(ValueError):
with self.assertRaises(RuntimeError):
torch.cuda.memory._set_allocator_settings(
"garbage_collection_threshold:1.2"
)
with self.assertRaises(ValueError):
with self.assertRaises(RuntimeError):
torch.cuda.memory._set_allocator_settings("max_split_size_mb:2")
with self.assertRaises(ValueError):
with self.assertRaises(RuntimeError):
torch.cuda.memory._set_allocator_settings("release_lock_on_cudamalloc:none")
with self.assertRaises(ValueError):
with self.assertRaises(RuntimeError):
torch.cuda.memory._set_allocator_settings(
"pinned_use_cuda_host_register:none"
)
with self.assertRaises(ValueError):
with self.assertRaises(RuntimeError):
torch.cuda.memory._set_allocator_settings(
"pinned_num_register_threads:none"
)
with self.assertRaises(ValueError):
with self.assertRaises(RuntimeError):
torch.cuda.memory._set_allocator_settings(
"pinned_num_register_threads:1024"
)