Revert "Support for expandable segments with cuda graph trees (#128068)"

This reverts commit fdc83610f272610ce50d1a6f5b6354f2df1baabb.

Reverted https://github.com/pytorch/pytorch/pull/128068 on behalf of https://github.com/janeyx99 due to Reverting for breaking ROCm tests on trunk, I think the tests need to be qualified with @onlyCUDA ([comment](https://github.com/pytorch/pytorch/pull/128068#issuecomment-2223672381))
This commit is contained in:
PyTorch MergeBot
2024-07-11 18:58:13 +00:00
parent 1cae60a87e
commit 578388bed8
7 changed files with 16 additions and 196 deletions

View File

@ -1360,21 +1360,6 @@ TEST_CUDA_GRAPH = TEST_CUDA and (not TEST_SKIP_CUDAGRAPH) and ( # noqa: F821
(torch.version.hip and float(".".join(torch.version.hip.split(".")[0:2])) >= 5.3)
)
def allocator_option_enabled_fn(allocator_config, _, option):
if allocator_config is None:
return False
allocator_config = allocator_config.split(',') if ',' in allocator_config else [allocator_config]
mapping = dict([var.split(':') for var in allocator_config])
if option in mapping and mapping[option] == 'True':
return True
else:
return False
TestEnvironment.def_flag("EXPANDABLE_SEGMENTS",
env_var="PYTORCH_CUDA_ALLOC_CONF",
enabled_fn=functools.partial(allocator_option_enabled_fn, option='expandable_segments'))
if TEST_CUDA and 'NUM_PARALLEL_PROCS' in os.environ:
num_procs = int(os.getenv("NUM_PARALLEL_PROCS", "2"))
gb_available = torch.cuda.mem_get_info()[1] / 2 ** 30