Add torch.version.xpu (#139466)

# Motivation
We add a new attribute `torch.version.xpu` to facilitate the problem diagnosing and version control.

# Additional Context
It is aligned with `torch.version.cuda` and `torch.version.hip`.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/139466
Approved by: https://github.com/EikanWang, https://github.com/ezyang, https://github.com/atalman, https://github.com/malfet
ghstack dependencies: #139258
This commit is contained in:
Yu, Guangye
2024-11-09 10:35:54 +00:00
committed by PyTorch MergeBot
parent 8051ee802c
commit 052b67e2b4
3 changed files with 24 additions and 0 deletions

View File

@ -16,6 +16,8 @@ from torch.testing._internal.common_device_type import (
)
from torch.testing._internal.common_methods_invocations import ops_and_refs
from torch.testing._internal.common_utils import (
find_library_location,
IS_LINUX,
NoTest,
run_tests,
suppress_warnings,
@ -428,6 +430,24 @@ print(torch.xpu.device_count())
for arch in arch_list:
self.assertTrue(arch in flags)
def test_torch_version_xpu(self):
self.assertEqual(len(torch.version.xpu), 8)
compiler_version = int(torch.version.xpu)
self.assertGreater(compiler_version, 20230000)
if IS_LINUX:
library = find_library_location("libtorch_xpu.so")
cmd = f"ldd {library} | grep libsycl"
results = subprocess.check_output(cmd, shell=True).strip().split(b"\n")
# There should be only one libsycl.so or libsycl-preview.so
self.assertEqual(len(results), 1)
for result in results:
if b"libsycl.so" in result:
self.assertGreaterEqual(compiler_version, 20250000)
elif b"libsycl-preview.so" in result:
self.assertLess(compiler_version, 20250000)
else:
self.fail("Unexpected libsycl library")
instantiate_device_type_tests(TestXpu, globals(), only_for="xpu", allow_xpu=True)

View File

@ -76,12 +76,14 @@ if __name__ == "__main__":
)
parser.add_argument("--cuda-version", "--cuda_version", type=str)
parser.add_argument("--hip-version", "--hip_version", type=str)
parser.add_argument("--xpu-version", "--xpu_version", type=str)
args = parser.parse_args()
assert args.is_debug is not None
args.cuda_version = None if args.cuda_version == "" else args.cuda_version
args.hip_version = None if args.hip_version == "" else args.hip_version
args.xpu_version = None if args.xpu_version == "" else args.xpu_version
pytorch_root = Path(__file__).parent.parent
version_path = pytorch_root / "torch" / "version.py"
@ -104,3 +106,4 @@ if __name__ == "__main__":
f.write(f"cuda: Optional[str] = {repr(args.cuda_version)}\n")
f.write(f"git_version = {repr(sha)}\n")
f.write(f"hip: Optional[str] = {repr(args.hip_version)}\n")
f.write(f"xpu: Optional[str] = {repr(args.xpu_version)}\n")

View File

@ -473,6 +473,7 @@ add_custom_target(
--is-debug=${TORCH_VERSION_DEBUG}
--cuda-version=${CUDA_VERSION}
--hip-version=${HIP_VERSION}
--xpu-version=${SYCL_COMPILER_VERSION}
BYPRODUCTS ${TORCH_SRC_DIR}/version.py
COMMENT "Regenerating version file..."
WORKING_DIRECTORY ${TORCH_ROOT}