mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Changes: 1. Always explicit `.absolute()`: `Path(__file__)` -> `Path(__file__).absolute()` 2. Replace `path.resolve()` with `path.absolute()` if the code is resolving the PyTorch repo root directory. Pull Request resolved: https://github.com/pytorch/pytorch/pull/129409 Approved by: https://github.com/albanD
41 lines
1.0 KiB
Python
41 lines
1.0 KiB
Python
# Owner(s): ["module: cuda"]
|
|
# run time cuda tests, but with the allocator using expandable segments
|
|
|
|
import sys
|
|
from pathlib import Path
|
|
|
|
from test_cuda import ( # noqa: F401
|
|
TestBlockStateAbsorption,
|
|
TestCuda,
|
|
TestCudaMallocAsync,
|
|
)
|
|
|
|
import torch
|
|
from torch.testing._internal.common_cuda import IS_JETSON, IS_WINDOWS
|
|
from torch.testing._internal.common_utils import run_tests, TEST_WITH_ROCM
|
|
|
|
|
|
REPO_ROOT = Path(__file__).absolute().parents[1]
|
|
sys.path.insert(0, str(REPO_ROOT))
|
|
|
|
from tools.stats.import_test_stats import get_disabled_tests
|
|
|
|
|
|
# Make sure to remove REPO_ROOT after import is done
|
|
sys.path.remove(str(REPO_ROOT))
|
|
|
|
if __name__ == "__main__":
|
|
if (
|
|
torch.cuda.is_available()
|
|
and not IS_JETSON
|
|
and not IS_WINDOWS
|
|
and not TEST_WITH_ROCM
|
|
):
|
|
get_disabled_tests(".")
|
|
|
|
torch.cuda.memory._set_allocator_settings("expandable_segments:True")
|
|
TestCuda.expandable_segments = lambda _: True
|
|
TestBlockStateAbsorption.expandable_segments = lambda _: True
|
|
|
|
run_tests()
|