Torch device backend autoload fix (#145611)

This causes an import failure if an external backend imports a module that uses `torch._as_tensor_fullprec` when it is being loaded.

Fixes #ISSUE_NUMBER

Pull Request resolved: https://github.com/pytorch/pytorch/pull/145611
Approved by: https://github.com/albanD
This commit is contained in:
Mwiza Kunda
2025-01-31 19:27:38 +00:00
committed by PyTorch MergeBot
parent 18380836eb
commit 6a0138fcc1

View File

@ -2779,10 +2779,6 @@ def _is_device_backend_autoload_enabled() -> builtins.bool:
return os.getenv("TORCH_DEVICE_BACKEND_AUTOLOAD", "1") == "1"
if _is_device_backend_autoload_enabled():
_import_device_backends()
def _as_tensor_fullprec(t):
"""
Like torch.as_tensor, but when given Python data types it will keep
@ -2795,3 +2791,10 @@ def _as_tensor_fullprec(t):
return torch.as_tensor(t, dtype=torch.int64)
else:
return torch.as_tensor(t)
# `_import_device_backends` should be kept at the end to ensure
# all the other functions in this module that may be accessed by
# an autoloaded backend are defined
if _is_device_backend_autoload_enabled():
_import_device_backends()