mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Revert "Generalize torch._C._set_allocator_settings to be generic (#156175)"
This reverts commit d3ce45012ed42cd1e13d5048b046b781f0feabe0. Reverted https://github.com/pytorch/pytorch/pull/156175 on behalf of https://github.com/guangyey due to Static initialization order issue impact the downstream repo ([comment](https://github.com/pytorch/pytorch/pull/150312#issuecomment-3142035444))
This commit is contained in:
@ -4484,28 +4484,28 @@ class TestCudaMallocAsync(TestCase):
|
||||
with self.assertRaises(RuntimeError):
|
||||
torch.cuda.memory._set_allocator_settings("foo:1,bar:2")
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
with self.assertRaises(RuntimeError):
|
||||
torch.cuda.memory._set_allocator_settings(
|
||||
"garbage_collection_threshold:1.2"
|
||||
)
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
with self.assertRaises(RuntimeError):
|
||||
torch.cuda.memory._set_allocator_settings("max_split_size_mb:2")
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
with self.assertRaises(RuntimeError):
|
||||
torch.cuda.memory._set_allocator_settings("release_lock_on_cudamalloc:none")
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
with self.assertRaises(RuntimeError):
|
||||
torch.cuda.memory._set_allocator_settings(
|
||||
"pinned_use_cuda_host_register:none"
|
||||
)
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
with self.assertRaises(RuntimeError):
|
||||
torch.cuda.memory._set_allocator_settings(
|
||||
"pinned_num_register_threads:none"
|
||||
)
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
with self.assertRaises(RuntimeError):
|
||||
torch.cuda.memory._set_allocator_settings(
|
||||
"pinned_num_register_threads:1024"
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user