skip various unit tests for Jetson (#122531)

skip multiprocessing, cuda expandable segments, mem eff and flash attention tests on Jetson due to hanging / sigkill issues from nvidia internal testing

Pull Request resolved: https://github.com/pytorch/pytorch/pull/122531
Approved by: https://github.com/eqy, https://github.com/malfet
This commit is contained in:
Fuzzkatt
2024-04-16 01:26:22 +00:00
committed by PyTorch MergeBot
parent aaad0554b4
commit 1cf62e86a4
4 changed files with 16 additions and 5 deletions

View File

@ -14,6 +14,7 @@ import torch.cuda
import torch.multiprocessing as mp
import torch.utils.hooks
from torch.nn import Parameter
from torch.testing._internal.common_cuda import IS_JETSON
from torch.testing._internal.common_utils import (
IS_MACOS,
IS_WINDOWS,
@ -36,12 +37,15 @@ load_tests = load_tests
TEST_REPEATS = 30
HAS_SHM_FILES = os.path.isdir("/dev/shm")
MAX_WAITING_TIME_IN_SECONDS = 30
TEST_CUDA_IPC = (
torch.cuda.is_available()
and sys.platform != "darwin"
and sys.platform != "win32"
and not IS_JETSON
and not TEST_WITH_ROCM
) # https://github.com/pytorch/pytorch/issues/90940
TEST_MULTIGPU = TEST_CUDA_IPC and torch.cuda.device_count() > 1
if TEST_CUDA_IPC: