mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-21 05:34:18 +08:00
This PR is part of a series attempting to re-submit https://github.com/pytorch/pytorch/pull/134592 as smaller PRs. In jit tests: - Add and use a common raise_on_run_directly method for when a user runs a test file directly which should not be run this way. Print the file which the user should have run. - Raise a RuntimeError on tests which have been disabled (not run) Pull Request resolved: https://github.com/pytorch/pytorch/pull/154725 Approved by: https://github.com/clee2000
78 lines
2.9 KiB
Python
78 lines
2.9 KiB
Python
# Owner(s): ["oncall: jit"]
|
|
|
|
import os
|
|
import sys
|
|
|
|
import torch
|
|
|
|
|
|
# Make the helper files in test/ importable
|
|
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
|
|
sys.path.append(pytorch_test_dir)
|
|
from torch.testing._internal.common_utils import raise_on_run_directly
|
|
from torch.testing._internal.jit_utils import JitTestCase
|
|
|
|
|
|
class TestTensorCreationOps(JitTestCase):
|
|
"""
|
|
A suite of tests for ops that create tensors.
|
|
"""
|
|
|
|
def test_randperm_default_dtype(self):
|
|
def randperm(x: int):
|
|
perm = torch.randperm(x)
|
|
# Have to perform assertion here because TorchScript returns dtypes
|
|
# as integers, which are not comparable against eager torch.dtype.
|
|
assert perm.dtype == torch.int64
|
|
|
|
self.checkScript(randperm, (3,))
|
|
|
|
def test_randperm_specifed_dtype(self):
|
|
def randperm(x: int):
|
|
perm = torch.randperm(x, dtype=torch.float)
|
|
# Have to perform assertion here because TorchScript returns dtypes
|
|
# as integers, which are not comparable against eager torch.dtype.
|
|
assert perm.dtype == torch.float
|
|
|
|
self.checkScript(randperm, (3,))
|
|
|
|
def test_triu_indices_default_dtype(self):
|
|
def triu_indices(rows: int, cols: int):
|
|
indices = torch.triu_indices(rows, cols)
|
|
# Have to perform assertion here because TorchScript returns dtypes
|
|
# as integers, which are not comparable against eager torch.dtype.
|
|
assert indices.dtype == torch.int64
|
|
|
|
self.checkScript(triu_indices, (3, 3))
|
|
|
|
def test_triu_indices_specified_dtype(self):
|
|
def triu_indices(rows: int, cols: int):
|
|
indices = torch.triu_indices(rows, cols, dtype=torch.int32)
|
|
# Have to perform assertion here because TorchScript returns dtypes
|
|
# as integers, which are not comparable against eager torch.dtype.
|
|
assert indices.dtype == torch.int32
|
|
|
|
self.checkScript(triu_indices, (3, 3))
|
|
|
|
def test_tril_indices_default_dtype(self):
|
|
def tril_indices(rows: int, cols: int):
|
|
indices = torch.tril_indices(rows, cols)
|
|
# Have to perform assertion here because TorchScript returns dtypes
|
|
# as integers, which are not comparable against eager torch.dtype.
|
|
assert indices.dtype == torch.int64
|
|
|
|
self.checkScript(tril_indices, (3, 3))
|
|
|
|
def test_tril_indices_specified_dtype(self):
|
|
def tril_indices(rows: int, cols: int):
|
|
indices = torch.tril_indices(rows, cols, dtype=torch.int32)
|
|
# Have to perform assertion here because TorchScript returns dtypes
|
|
# as integers, which are not comparable against eager torch.dtype.
|
|
assert indices.dtype == torch.int32
|
|
|
|
self.checkScript(tril_indices, (3, 3))
|
|
|
|
|
|
if __name__ == "__main__":
|
|
raise_on_run_directly("test/test_jit.py")
|