mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Revert "Update round size with 1 division behavior (#162203)"
This reverts commit 12d2ef557f6e127100267c31a31572d8ab5cc788. Reverted https://github.com/pytorch/pytorch/pull/162203 on behalf of https://github.com/izaitsevfb due to Diff reverted internally ([comment](https://github.com/pytorch/pytorch/pull/162203#issuecomment-3398622898))
This commit is contained in:
@ -2502,8 +2502,6 @@ class DeviceCachingAllocator {
|
||||
auto divisions = CUDAAllocatorConfig::roundup_power2_divisions(size);
|
||||
if (divisions > 1 && size > (kMinBlockSize * divisions)) {
|
||||
return roundup_power2_next_division(size, divisions);
|
||||
} else if (divisions == 1) {
|
||||
return llvm::PowerOf2Ceil(size);
|
||||
} else {
|
||||
return kMinBlockSize * ((size + kMinBlockSize - 1) / kMinBlockSize);
|
||||
}
|
||||
|
@ -4564,21 +4564,6 @@ class TestCudaMallocAsync(TestCase):
|
||||
reg_mem = torch.cuda.memory_stats()[key_allocated]
|
||||
self.assertEqual(reg_mem - start_mem, nbytes)
|
||||
|
||||
# Test division==1 case.
|
||||
torch.cuda.memory.empty_cache()
|
||||
div1_start_mem = torch.cuda.memory_stats()[key_allocated]
|
||||
div1_start_requested = torch.cuda.memory_stats()[key_requested]
|
||||
torch.cuda.memory._set_allocator_settings("roundup_power2_divisions:1")
|
||||
torch.rand(nelems, device="cuda")
|
||||
div1_end_mem = torch.cuda.memory_stats()[key_allocated]
|
||||
div1_end_requested = torch.cuda.memory_stats()[key_requested]
|
||||
|
||||
self.assertEqual(div1_start_mem - start_mem, nbytes)
|
||||
if not TEST_CUDAMALLOCASYNC:
|
||||
# not supported with the cudaMallocAsync backend
|
||||
self.assertEqual(div1_end_mem - div1_start_mem, power2_div(nbytes, 1))
|
||||
self.assertEqual(div1_end_requested - div1_start_requested, nbytes)
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
torch.cuda.memory._set_allocator_settings("foo:1,bar:2")
|
||||
|
||||
|
Reference in New Issue
Block a user