[ONNX] Set default opset to 20 (#158802)

Bump default opset to 20, which is a newer opset and the max torchscript exporter supports.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/158802
Approved by: https://github.com/titaiwangms
This commit is contained in:
Justin Chu
2025-07-22 19:55:01 +00:00
committed by PyTorch MergeBot
parent c917c63282
commit 767791943d
3 changed files with 6 additions and 5 deletions

View File

@ -281,10 +281,11 @@ class TestExportAPIDynamo(common_utils.TestCase):
# Use GELU activation function # Use GELU activation function
return torch.nn.functional.gelu(input, approximate="tanh") return torch.nn.functional.gelu(input, approximate="tanh")
input = torch.randn(1, 3, 4, 4) input = (torch.randn(1, 3, 4, 4),)
onnx_program_op18 = torch.onnx.export( onnx_program_op18 = torch.onnx.export(
GeluModel(), GeluModel(),
input, input,
opset_version=18,
dynamo=True, dynamo=True,
) )
all_nodes_op18 = [n.op_type for n in onnx_program_op18.model.graph] all_nodes_op18 = [n.op_type for n in onnx_program_op18.model.graph]

View File

@ -6,7 +6,7 @@ ONNX_BASE_OPSET = 9
ONNX_MIN_OPSET = 7 ONNX_MIN_OPSET = 7
ONNX_MAX_OPSET = 23 ONNX_MAX_OPSET = 23
ONNX_TORCHSCRIPT_EXPORTER_MAX_OPSET = 20 ONNX_TORCHSCRIPT_EXPORTER_MAX_OPSET = 20
ONNX_DEFAULT_OPSET = 18 ONNX_DEFAULT_OPSET = 20
ONNX_CONSTANT_FOLDING_MIN_OPSET = 9 ONNX_CONSTANT_FOLDING_MIN_OPSET = 9
PYTORCH_GITHUB_ISSUES_URL = "https://github.com/pytorch/pytorch/issues" PYTORCH_GITHUB_ISSUES_URL = "https://github.com/pytorch/pytorch/issues"

View File

@ -10,9 +10,9 @@ from collections.abc import Mapping, Sequence
from typing import Any, Callable, TYPE_CHECKING from typing import Any, Callable, TYPE_CHECKING
import torch import torch
from torch.onnx import _constants as onnx_constants
from torch.onnx._internal._lazy_import import onnxscript_apis, onnxscript_ir as ir from torch.onnx._internal._lazy_import import onnxscript_apis, onnxscript_ir as ir
from torch.onnx._internal.exporter import ( from torch.onnx._internal.exporter import (
_constants,
_core, _core,
_dynamic_shapes, _dynamic_shapes,
_onnx_program, _onnx_program,
@ -50,7 +50,7 @@ def export_compat(
verbose: bool | None = None, verbose: bool | None = None,
input_names: Sequence[str] | None = None, input_names: Sequence[str] | None = None,
output_names: Sequence[str] | None = None, output_names: Sequence[str] | None = None,
opset_version: int | None = _constants.TORCHLIB_OPSET, opset_version: int | None = onnx_constants.ONNX_DEFAULT_OPSET,
custom_translation_table: dict[Callable, Callable | Sequence[Callable]] custom_translation_table: dict[Callable, Callable | Sequence[Callable]]
| None = None, | None = None,
dynamic_axes: Mapping[str, Mapping[int, str]] dynamic_axes: Mapping[str, Mapping[int, str]]
@ -70,7 +70,7 @@ def export_compat(
legacy_export_kwargs: dict[str, Any] | None = None, legacy_export_kwargs: dict[str, Any] | None = None,
) -> _onnx_program.ONNXProgram: ) -> _onnx_program.ONNXProgram:
if opset_version is None: if opset_version is None:
opset_version = _constants.TORCHLIB_OPSET opset_version = onnx_constants.ONNX_DEFAULT_OPSET
if isinstance(model, torch.export.ExportedProgram): if isinstance(model, torch.export.ExportedProgram):
# We know the model is already exported program, so the args, kwargs, and dynamic_shapes # We know the model is already exported program, so the args, kwargs, and dynamic_shapes