mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Enable UFMT on test/test_cuda*.py
(#124352)
Part of: #123062 Ran lintrunner on: - test/test_cuda.py - test/test_cuda_expandable_segments.py - test/test_cuda_multigpu.py - test/test_cuda_nvml_based_avail.py - test/test_cuda_primary_ctx.py - test/test_cuda_sanitizer.py - test/test_cuda_trace.py Detail: ```bash $ lintrunner -a --take UFMT --all-files ok No lint issues. Successfully applied all patches. ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/124352 Approved by: https://github.com/ezyang
This commit is contained in:
committed by
PyTorch MergeBot
parent
977dc5593a
commit
d5182bb75b
@ -1,15 +1,21 @@
|
||||
# Owner(s): ["module: cuda"]
|
||||
|
||||
import torch
|
||||
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocmVersionLessThan, NoTest
|
||||
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU
|
||||
import sys
|
||||
import unittest
|
||||
|
||||
import torch
|
||||
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU
|
||||
from torch.testing._internal.common_utils import (
|
||||
NoTest,
|
||||
run_tests,
|
||||
skipIfRocmVersionLessThan,
|
||||
TestCase,
|
||||
)
|
||||
|
||||
# NOTE: this needs to be run in a brand new process
|
||||
|
||||
if not TEST_CUDA:
|
||||
print('CUDA not available, skipping tests', file=sys.stderr)
|
||||
print("CUDA not available, skipping tests", file=sys.stderr)
|
||||
TestCase = NoTest # noqa: F811
|
||||
|
||||
|
||||
@ -18,17 +24,21 @@ class TestCudaPrimaryCtx(TestCase):
|
||||
CTX_ALREADY_CREATED_ERR_MSG = (
|
||||
"Tests defined in test_cuda_primary_ctx.py must be run in a process "
|
||||
"where CUDA contexts are never created. Use either run_test.py or add "
|
||||
"--subprocess to run each test in a different subprocess.")
|
||||
"--subprocess to run each test in a different subprocess."
|
||||
)
|
||||
|
||||
@skipIfRocmVersionLessThan((4, 4, 21504))
|
||||
def setUp(self):
|
||||
for device in range(torch.cuda.device_count()):
|
||||
# Ensure context has not been created beforehand
|
||||
self.assertFalse(torch._C._cuda_hasPrimaryContext(device), TestCudaPrimaryCtx.CTX_ALREADY_CREATED_ERR_MSG)
|
||||
self.assertFalse(
|
||||
torch._C._cuda_hasPrimaryContext(device),
|
||||
TestCudaPrimaryCtx.CTX_ALREADY_CREATED_ERR_MSG,
|
||||
)
|
||||
|
||||
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
|
||||
def test_str_repr(self):
|
||||
x = torch.randn(1, device='cuda:1')
|
||||
x = torch.randn(1, device="cuda:1")
|
||||
|
||||
# We should have only created context on 'cuda:1'
|
||||
self.assertFalse(torch._C._cuda_hasPrimaryContext(0))
|
||||
@ -43,13 +53,13 @@ class TestCudaPrimaryCtx(TestCase):
|
||||
|
||||
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
|
||||
def test_copy(self):
|
||||
x = torch.randn(1, device='cuda:1')
|
||||
x = torch.randn(1, device="cuda:1")
|
||||
|
||||
# We should have only created context on 'cuda:1'
|
||||
self.assertFalse(torch._C._cuda_hasPrimaryContext(0))
|
||||
self.assertTrue(torch._C._cuda_hasPrimaryContext(1))
|
||||
|
||||
y = torch.randn(1, device='cpu')
|
||||
y = torch.randn(1, device="cpu")
|
||||
y.copy_(x)
|
||||
|
||||
# We should still have only created context on 'cuda:1'
|
||||
@ -58,7 +68,7 @@ class TestCudaPrimaryCtx(TestCase):
|
||||
|
||||
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
|
||||
def test_pin_memory(self):
|
||||
x = torch.randn(1, device='cuda:1')
|
||||
x = torch.randn(1, device="cuda:1")
|
||||
|
||||
# We should have only created context on 'cuda:1'
|
||||
self.assertFalse(torch._C._cuda_hasPrimaryContext(0))
|
||||
@ -70,7 +80,7 @@ class TestCudaPrimaryCtx(TestCase):
|
||||
self.assertFalse(torch._C._cuda_hasPrimaryContext(0))
|
||||
self.assertTrue(torch._C._cuda_hasPrimaryContext(1))
|
||||
|
||||
x = torch.randn(3, device='cpu').pin_memory()
|
||||
x = torch.randn(3, device="cpu").pin_memory()
|
||||
|
||||
# We should still have only created context on 'cuda:1'
|
||||
self.assertFalse(torch._C._cuda_hasPrimaryContext(0))
|
||||
@ -82,19 +92,19 @@ class TestCudaPrimaryCtx(TestCase):
|
||||
self.assertFalse(torch._C._cuda_hasPrimaryContext(0))
|
||||
self.assertTrue(torch._C._cuda_hasPrimaryContext(1))
|
||||
|
||||
x = torch.randn(3, device='cpu', pin_memory=True)
|
||||
x = torch.randn(3, device="cpu", pin_memory=True)
|
||||
|
||||
# We should still have only created context on 'cuda:1'
|
||||
self.assertFalse(torch._C._cuda_hasPrimaryContext(0))
|
||||
self.assertTrue(torch._C._cuda_hasPrimaryContext(1))
|
||||
|
||||
x = torch.zeros(3, device='cpu', pin_memory=True)
|
||||
x = torch.zeros(3, device="cpu", pin_memory=True)
|
||||
|
||||
# We should still have only created context on 'cuda:1'
|
||||
self.assertFalse(torch._C._cuda_hasPrimaryContext(0))
|
||||
self.assertTrue(torch._C._cuda_hasPrimaryContext(1))
|
||||
|
||||
x = torch.empty(3, device='cpu', pin_memory=True)
|
||||
x = torch.empty(3, device="cpu", pin_memory=True)
|
||||
|
||||
# We should still have only created context on 'cuda:1'
|
||||
self.assertFalse(torch._C._cuda_hasPrimaryContext(0))
|
||||
@ -106,5 +116,6 @@ class TestCudaPrimaryCtx(TestCase):
|
||||
self.assertFalse(torch._C._cuda_hasPrimaryContext(0))
|
||||
self.assertTrue(torch._C._cuda_hasPrimaryContext(1))
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
if __name__ == "__main__":
|
||||
run_tests()
|
||||
|
Reference in New Issue
Block a user