mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-21 05:34:18 +08:00
Revert "Support for expandable segments with cuda graph trees (#128068)"
This reverts commit fdc83610f272610ce50d1a6f5b6354f2df1baabb. Reverted https://github.com/pytorch/pytorch/pull/128068 on behalf of https://github.com/janeyx99 due to Reverting for breaking ROCm tests on trunk, I think the tests need to be qualified with @onlyCUDA ([comment](https://github.com/pytorch/pytorch/pull/128068#issuecomment-2223672381))
This commit is contained in:
@ -1360,21 +1360,6 @@ TEST_CUDA_GRAPH = TEST_CUDA and (not TEST_SKIP_CUDAGRAPH) and ( # noqa: F821
|
||||
(torch.version.hip and float(".".join(torch.version.hip.split(".")[0:2])) >= 5.3)
|
||||
)
|
||||
|
||||
def allocator_option_enabled_fn(allocator_config, _, option):
|
||||
if allocator_config is None:
|
||||
return False
|
||||
allocator_config = allocator_config.split(',') if ',' in allocator_config else [allocator_config]
|
||||
mapping = dict([var.split(':') for var in allocator_config])
|
||||
|
||||
if option in mapping and mapping[option] == 'True':
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
TestEnvironment.def_flag("EXPANDABLE_SEGMENTS",
|
||||
env_var="PYTORCH_CUDA_ALLOC_CONF",
|
||||
enabled_fn=functools.partial(allocator_option_enabled_fn, option='expandable_segments'))
|
||||
|
||||
if TEST_CUDA and 'NUM_PARALLEL_PROCS' in os.environ:
|
||||
num_procs = int(os.getenv("NUM_PARALLEL_PROCS", "2"))
|
||||
gb_available = torch.cuda.mem_get_info()[1] / 2 ** 30
|
||||
|
Reference in New Issue
Block a user