mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Enable UFMT on test_decomp.py
, test_expanded_weights.py
and some files (#125117)
Part of: #123062 Ran lintrunner on: - test/test_decomp.py - test/test_deploy.py - test/test_determination.py - test/test_dlpack.py - test/test_dynamic_shapes.py - test/test_expanded_weights.py Detail: ```bash $ lintrunner -a --take UFMT --all-files ok No lint issues. Successfully applied all patches. ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/125117 Approved by: https://github.com/jansel
This commit is contained in:
committed by
PyTorch MergeBot
parent
48b6c8dbc3
commit
c165a8e71d
@ -2,11 +2,16 @@
|
||||
|
||||
import torch
|
||||
from torch.testing import make_tensor
|
||||
from torch.testing._internal.common_utils import TestCase, run_tests, IS_JETSON
|
||||
from torch.testing._internal.common_device_type import (
|
||||
instantiate_device_type_tests, onlyCUDA, dtypes, skipMeta, skipCUDAIfRocm,
|
||||
onlyNativeDeviceTypes)
|
||||
dtypes,
|
||||
instantiate_device_type_tests,
|
||||
onlyCUDA,
|
||||
onlyNativeDeviceTypes,
|
||||
skipCUDAIfRocm,
|
||||
skipMeta,
|
||||
)
|
||||
from torch.testing._internal.common_dtype import all_types_and_complex_and
|
||||
from torch.testing._internal.common_utils import IS_JETSON, run_tests, TestCase
|
||||
from torch.utils.dlpack import from_dlpack, to_dlpack
|
||||
|
||||
|
||||
@ -15,7 +20,16 @@ class TestTorchDlPack(TestCase):
|
||||
|
||||
@skipMeta
|
||||
@onlyNativeDeviceTypes
|
||||
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool, torch.uint16, torch.uint32, torch.uint64))
|
||||
@dtypes(
|
||||
*all_types_and_complex_and(
|
||||
torch.half,
|
||||
torch.bfloat16,
|
||||
torch.bool,
|
||||
torch.uint16,
|
||||
torch.uint32,
|
||||
torch.uint64,
|
||||
)
|
||||
)
|
||||
def test_dlpack_capsule_conversion(self, device, dtype):
|
||||
x = make_tensor((5,), dtype=dtype, device=device)
|
||||
z = from_dlpack(to_dlpack(x))
|
||||
@ -23,7 +37,16 @@ class TestTorchDlPack(TestCase):
|
||||
|
||||
@skipMeta
|
||||
@onlyNativeDeviceTypes
|
||||
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool, torch.uint16, torch.uint32, torch.uint64))
|
||||
@dtypes(
|
||||
*all_types_and_complex_and(
|
||||
torch.half,
|
||||
torch.bfloat16,
|
||||
torch.bool,
|
||||
torch.uint16,
|
||||
torch.uint32,
|
||||
torch.uint64,
|
||||
)
|
||||
)
|
||||
def test_dlpack_protocol_conversion(self, device, dtype):
|
||||
x = make_tensor((5,), dtype=dtype, device=device)
|
||||
z = from_dlpack(x)
|
||||
@ -62,7 +85,16 @@ class TestTorchDlPack(TestCase):
|
||||
|
||||
@skipMeta
|
||||
@onlyNativeDeviceTypes
|
||||
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool, torch.uint16, torch.uint32, torch.uint64))
|
||||
@dtypes(
|
||||
*all_types_and_complex_and(
|
||||
torch.half,
|
||||
torch.bfloat16,
|
||||
torch.bool,
|
||||
torch.uint16,
|
||||
torch.uint32,
|
||||
torch.uint64,
|
||||
)
|
||||
)
|
||||
def test_from_dlpack(self, device, dtype):
|
||||
x = make_tensor((5,), dtype=dtype, device=device)
|
||||
y = torch.from_dlpack(x)
|
||||
@ -70,7 +102,16 @@ class TestTorchDlPack(TestCase):
|
||||
|
||||
@skipMeta
|
||||
@onlyNativeDeviceTypes
|
||||
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool, torch.uint16, torch.uint32, torch.uint64))
|
||||
@dtypes(
|
||||
*all_types_and_complex_and(
|
||||
torch.half,
|
||||
torch.bfloat16,
|
||||
torch.bool,
|
||||
torch.uint16,
|
||||
torch.uint32,
|
||||
torch.uint64,
|
||||
)
|
||||
)
|
||||
def test_from_dlpack_noncontinguous(self, device, dtype):
|
||||
x = make_tensor((25,), dtype=dtype, device=device).reshape(5, 5)
|
||||
|
||||
@ -113,7 +154,16 @@ class TestTorchDlPack(TestCase):
|
||||
|
||||
@skipMeta
|
||||
@onlyNativeDeviceTypes
|
||||
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool, torch.uint16, torch.uint32, torch.uint64))
|
||||
@dtypes(
|
||||
*all_types_and_complex_and(
|
||||
torch.half,
|
||||
torch.bfloat16,
|
||||
torch.bool,
|
||||
torch.uint16,
|
||||
torch.uint32,
|
||||
torch.uint64,
|
||||
)
|
||||
)
|
||||
def test_from_dlpack_dtype(self, device, dtype):
|
||||
x = make_tensor((5,), dtype=dtype, device=device)
|
||||
y = torch.from_dlpack(x)
|
||||
@ -204,5 +254,5 @@ class TestTorchDlPack(TestCase):
|
||||
|
||||
instantiate_device_type_tests(TestTorchDlPack, globals())
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
run_tests()
|
||||
|
Reference in New Issue
Block a user