[ONNX] Remove legacy Dort (#158258)

Pull Request resolved: https://github.com/pytorch/pytorch/pull/158258
Approved by: https://github.com/justinchuby, https://github.com/malfet
This commit is contained in:
Ti-Tai Wang
2025-07-15 15:37:12 +00:00
committed by PyTorch MergeBot
parent 7afb834f93
commit 5606c516fd
7 changed files with 30 additions and 1314 deletions

View File

@ -87,7 +87,6 @@ also be interested in reading our [development wiki](https://github.com/pytorch/
onnx_dynamo
onnx_ops
onnx_verification
onnx_dynamo_onnxruntime_backend
onnx_torchscript
```

View File

@ -1,11 +0,0 @@
# ONNX Backend for TorchDynamo
For a quick overview of `torch.compiler`, see {ref}`torch.compiler_overview`.
```{warning}
The ONNX backend for torch.compile is a rapidly evolving beta technology.
```
```{eval-rst}
.. autofunction:: torch.onnx.is_onnxrt_backend_supported
```

View File

@ -56,8 +56,6 @@ Some of the most commonly used backends include:
- CUDA graphs with AOT Autograd. `Read more <https://github.com/pytorch/torchdynamo/pull/757>`__
* - ``torch.compile(m, backend="ipex")``
- Uses IPEX on CPU. `Read more <https://github.com/intel/intel-extension-for-pytorch>`__
* - ``torch.compile(m, backend="onnxrt")``
- Uses ONNX Runtime for training on CPU/GPU. :doc:`Read more <onnx_dynamo_onnxruntime_backend>`
```
**Inference-only backends**

View File

@ -8,7 +8,6 @@ import torch._dynamo
import torch._dynamo.backends
import torch._dynamo.test_case
from torch._dynamo.backends.debugging import ExplainWithBackend
from torch._dynamo.backends.onnxrt import has_onnxruntime
from torch._dynamo.backends.tvm import has_tvm
from torch._dynamo.testing import same
from torch.fx._lazy_graph_module import _force_skip_lazy_graph_module
@ -138,10 +137,6 @@ class TestOptimizations(torch._dynamo.test_case.TestCase):
def test_aot_cudagraphs(self, device):
self._check_backend_works("cudagraphs", device)
@unittest.skipIf(not has_onnxruntime(), "requires onnxruntime")
def test_onnxrt(self, device):
self._check_backend_works("onnxrt", device)
@unittest.skipIf(not has_tvm(), "requires tvm")
def test_tvm(self, device):
self._check_backend_works("tvm", device)

View File

@ -4,35 +4,38 @@
# to the right people, please tag related GitHub issues with `module: onnx`.
#
# Maintainers' Github IDs: wschin, xadupre
from torch.onnx._internal.onnxruntime import (
is_onnxrt_backend_supported,
torch_compile_backend,
)
# from torch.onnx._internal.onnxruntime import (
# is_onnxrt_backend_supported,
# torch_compile_backend,
# )
from .registry import register_backend
# from .registry import register_backend
"""
Placeholder for onnxruntime backend for dynamo
"""
# def has_onnxruntime():
# # FIXME: update test/dynamo/test_backends.py to call is_onnxrt_backend_supported()
# return is_onnxrt_backend_supported()
def has_onnxruntime():
# FIXME: update test/dynamo/test_backends.py to call is_onnxrt_backend_supported()
return is_onnxrt_backend_supported()
# if is_onnxrt_backend_supported():
# register_backend(name="onnxrt", compiler_fn=torch_compile_backend)
# else:
# def information_displaying_backend(*args, **kwargs):
# raise ImportError(
# "onnxrt is not registered as a backend. "
# "Please make sure all dependencies such as "
# "numpy, onnx, onnxscript, and onnxruntime-training are installed. "
# "Suggested procedure to fix dependency problem:\n"
# " (1) pip or conda install numpy onnx onnxscript onnxruntime-training.\n"
# " (2) Open a new python terminal.\n"
# " (3) Call the API `torch.onnx.is_onnxrt_backend_supported()`:\n"
# " (4) If it returns `True`, then you can use `onnxrt` backend.\n"
# " (5) If it returns `False`, please execute the package importing section in "
# "torch/onnx/_internal/onnxruntime.py under pdb line-by-line to see which import fails."
# )
if is_onnxrt_backend_supported():
register_backend(name="onnxrt", compiler_fn=torch_compile_backend)
else:
def information_displaying_backend(*args, **kwargs):
raise ImportError(
"onnxrt is not registered as a backend. "
"Please make sure all dependencies such as "
"numpy, onnx, onnxscript, and onnxruntime-training are installed. "
"Suggested procedure to fix dependency problem:\n"
" (1) pip or conda install numpy onnx onnxscript onnxruntime-training.\n"
" (2) Open a new python terminal.\n"
" (3) Call the API `torch.onnx.is_onnxrt_backend_supported()`:\n"
" (4) If it returns `True`, then you can use `onnxrt` backend.\n"
" (5) If it returns `False`, please execute the package importing section in "
"torch/onnx/_internal/onnxruntime.py under pdb line-by-line to see which import fails."
)
register_backend(name="onnxrt", compiler_fn=information_displaying_backend)
# register_backend(name="onnxrt", compiler_fn=information_displaying_backend)

View File

@ -38,8 +38,6 @@ __all__ = [
"OnnxExporterError",
"ONNXProgram",
"enable_fake_mode",
# DORT / torch.compile
"is_onnxrt_backend_supported",
]
from typing import Any, Callable, TYPE_CHECKING
@ -51,12 +49,6 @@ from torch._C._onnx import OperatorExportTypes, TensorProtoDataType, TrainingMod
from ._internal._exporter_legacy import enable_fake_mode
from ._internal.exporter._onnx_program import ONNXProgram
from ._internal.onnxruntime import (
is_onnxrt_backend_supported,
OrtBackend as _OrtBackend,
OrtBackendOptions as _OrtBackendOptions,
OrtExecutionProvider as _OrtExecutionProvider,
)
from ._type_utils import JitScalarType
from .errors import OnnxExporterError
from .utils import (
@ -98,10 +90,7 @@ if TYPE_CHECKING:
JitScalarType.__module__ = "torch.onnx"
ONNXProgram.__module__ = "torch.onnx"
OnnxExporterError.__module__ = "torch.onnx"
_OrtBackend.__module__ = "torch.onnx"
_OrtBackendOptions.__module__ = "torch.onnx"
enable_fake_mode.__module__ = "torch.onnx"
is_onnxrt_backend_supported.__module__ = "torch.onnx"
producer_name = "pytorch"
producer_version = _C_onnx.PRODUCER_VERSION

File diff suppressed because it is too large Load Diff