Files
pytorch/test/test_cuda_expandable_segments.py
Xuehai Pan 45411d1fc9 Use absolute path path.resolve() -> path.absolute() (#129409)
Changes:

1. Always explicit `.absolute()`: `Path(__file__)` -> `Path(__file__).absolute()`
2. Replace `path.resolve()` with `path.absolute()` if the code is resolving the PyTorch repo root directory.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/129409
Approved by: https://github.com/albanD
2025-01-03 20:03:40 +00:00

41 lines
1.0 KiB
Python

# Owner(s): ["module: cuda"]
# run time cuda tests, but with the allocator using expandable segments
import sys
from pathlib import Path
from test_cuda import ( # noqa: F401
TestBlockStateAbsorption,
TestCuda,
TestCudaMallocAsync,
)
import torch
from torch.testing._internal.common_cuda import IS_JETSON, IS_WINDOWS
from torch.testing._internal.common_utils import run_tests, TEST_WITH_ROCM
REPO_ROOT = Path(__file__).absolute().parents[1]
sys.path.insert(0, str(REPO_ROOT))
from tools.stats.import_test_stats import get_disabled_tests
# Make sure to remove REPO_ROOT after import is done
sys.path.remove(str(REPO_ROOT))
if __name__ == "__main__":
if (
torch.cuda.is_available()
and not IS_JETSON
and not IS_WINDOWS
and not TEST_WITH_ROCM
):
get_disabled_tests(".")
torch.cuda.memory._set_allocator_settings("expandable_segments:True")
TestCuda.expandable_segments = lambda _: True
TestBlockStateAbsorption.expandable_segments = lambda _: True
run_tests()