mirror of
https://github.com/pytorch/pytorch.git
synced 2025-11-14 22:25:03 +08:00
[dynamo] rename unimplemented_v2 -> unimplemented (#167150)
Also force the new `unimplemented`/old `unimplemented_v2` to explicitly specify the `gb_type`, `context`, `explanation`, and `hints` args. Pull Request resolved: https://github.com/pytorch/pytorch/pull/167150 Approved by: https://github.com/mlazos, https://github.com/zou3519
This commit is contained in:
committed by
PyTorch MergeBot
parent
ba5ffa2dca
commit
a2f109dcc3
@ -1050,7 +1050,7 @@ Set TORCHDYNAMO_VERBOSE=1 for the internal stack trace (please do this especiall
|
||||
msg = re.sub(r"line (\d+)", "line N", msg)
|
||||
msg = re.sub(
|
||||
r"""(?s)Traceback \(most recent call last\):.*
|
||||
File "exc.py", line N, in unimplemented_v2
|
||||
File "exc.py", line N, in unimplemented
|
||||
raise Unsupported\(msg\)""",
|
||||
"<Internal traceback>\n",
|
||||
msg,
|
||||
|
||||
@ -115,7 +115,7 @@ def extract_info_from_keyword(source: str, kw: ast.keyword) -> Any:
|
||||
return clean_string(param_source)
|
||||
|
||||
|
||||
def find_unimplemented_v2_calls(
|
||||
def find_unimplemented_calls(
|
||||
path: str, dynamo_dir: Optional[str] = None
|
||||
) -> list[dict[str, Any]]:
|
||||
results = []
|
||||
@ -135,15 +135,15 @@ def find_unimplemented_v2_calls(
|
||||
for node in ast.walk(tree):
|
||||
if isinstance(node, ast.FunctionDef):
|
||||
if node.name in (
|
||||
"unimplemented_v2",
|
||||
"unimplemented_v2_with_warning",
|
||||
"unimplemented",
|
||||
"unimplemented_with_warning",
|
||||
):
|
||||
continue
|
||||
if (
|
||||
isinstance(node, ast.Call)
|
||||
and isinstance(node.func, ast.Name)
|
||||
and node.func.id
|
||||
in ("unimplemented_v2", "unimplemented_v2_with_warning")
|
||||
in ("unimplemented", "unimplemented_with_warning")
|
||||
):
|
||||
info: dict[str, Any] = {
|
||||
"gb_type": None,
|
||||
@ -180,7 +180,7 @@ def find_unimplemented_v2_calls(
|
||||
|
||||
|
||||
def create_registry(dynamo_dir: str, registry_path: str) -> None:
|
||||
calls = find_unimplemented_v2_calls(dynamo_dir)
|
||||
calls = find_unimplemented_calls(dynamo_dir)
|
||||
registry = {}
|
||||
|
||||
gb_types = {}
|
||||
@ -224,7 +224,7 @@ def main() -> None:
|
||||
"--dynamo_dir",
|
||||
type=str,
|
||||
default=default_dynamo_dir,
|
||||
help="Directory to search for unimplemented_v2 calls.",
|
||||
help="Directory to search for unimplemented calls.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
|
||||
@ -15,7 +15,7 @@ sys.path.insert(0, str(REPO_ROOT))
|
||||
|
||||
|
||||
from tools.dynamo.gb_id_mapping import (
|
||||
find_unimplemented_v2_calls,
|
||||
find_unimplemented_calls,
|
||||
load_registry,
|
||||
next_gb_id,
|
||||
)
|
||||
@ -50,7 +50,7 @@ def _collect_all_calls(
|
||||
gb_type_calls: dict[str, list[tuple[dict[str, Any], Path]]] = {}
|
||||
|
||||
for py_file in dynamo_dir.rglob("*.py"):
|
||||
for call in find_unimplemented_v2_calls(py_file, dynamo_dir):
|
||||
for call in find_unimplemented_calls(py_file, dynamo_dir):
|
||||
gb_type = call["gb_type"]
|
||||
if gb_type not in gb_type_calls:
|
||||
gb_type_calls[gb_type] = []
|
||||
|
||||
@ -27,10 +27,10 @@ class TestGraphBreakRegistryLinter(unittest.TestCase):
|
||||
json.dump({}, f)
|
||||
|
||||
self.callsite_file = self.test_data_dir / "callsite_test.py"
|
||||
callsite_content = """from torch._dynamo.exc import unimplemented_v2
|
||||
callsite_content = """from torch._dynamo.exc import unimplemented
|
||||
|
||||
def test(self):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="testing",
|
||||
context="testing",
|
||||
explanation="testing",
|
||||
@ -101,9 +101,9 @@ def test(self):
|
||||
with open(self.registry_path, "w") as f:
|
||||
json.dump(registry_data, f, indent=2)
|
||||
|
||||
renamed_callsite_content = """from torch._dynamo.exc import unimplemented_v2
|
||||
renamed_callsite_content = """from torch._dynamo.exc import unimplemented
|
||||
def test(self):
|
||||
unimplemented_v2(gb_type="renamed_testing", context="testing", explanation="testing", hints=["testing"])
|
||||
unimplemented(gb_type="renamed_testing", context="testing", explanation="testing", hints=["testing"])
|
||||
"""
|
||||
with open(self.callsite_file, "w") as f:
|
||||
f.write(renamed_callsite_content)
|
||||
@ -168,9 +168,9 @@ def test(self):
|
||||
with open(self.registry_path, "w") as f:
|
||||
json.dump(registry_data, f, indent=2)
|
||||
|
||||
updated_callsite_content = """from torch._dynamo.exc import unimplemented_v2
|
||||
updated_callsite_content = """from torch._dynamo.exc import unimplemented
|
||||
def test(self):
|
||||
unimplemented_v2(gb_type="testing", context="new_context", explanation="new_explanation", hints=["new_hint"])
|
||||
unimplemented(gb_type="testing", context="new_context", explanation="new_explanation", hints=["new_hint"])
|
||||
"""
|
||||
with open(self.callsite_file, "w") as f:
|
||||
f.write(updated_callsite_content)
|
||||
@ -255,9 +255,9 @@ def test(self):
|
||||
with open(self.registry_path, "w") as f:
|
||||
json.dump(registry_data, f, indent=2)
|
||||
|
||||
new_callsite_content = """from torch._dynamo.exc import unimplemented_v2
|
||||
new_callsite_content = """from torch._dynamo.exc import unimplemented
|
||||
def test(self):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="completely_new_testing",
|
||||
context="completely_new_context",
|
||||
explanation="completely_new_explanation",
|
||||
@ -330,11 +330,11 @@ def test(self):
|
||||
|
||||
init_py.touch()
|
||||
|
||||
dynamic_hints_callsite = """from torch._dynamo.exc import unimplemented_v2
|
||||
dynamic_hints_callsite = """from torch._dynamo.exc import unimplemented
|
||||
from torch._dynamo import graph_break_hints
|
||||
|
||||
def test(self):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="testing_with_graph_break_hints",
|
||||
context="testing_with_graph_break_hints",
|
||||
explanation="testing_with_graph_break_hints",
|
||||
|
||||
@ -38,7 +38,7 @@ from .bytecode_transformation import (
|
||||
create_rot_n,
|
||||
Instruction,
|
||||
)
|
||||
from .exc import IncorrectUsage, unimplemented_v2
|
||||
from .exc import IncorrectUsage, unimplemented
|
||||
from .source import AttrSource, ChainedSource, DictGetItemSource, Source
|
||||
from .utils import is_safe_constant, rot_n_helper
|
||||
from .variables.base import ValueMutationExisting, VariableTracker
|
||||
@ -215,7 +215,7 @@ class PyCodegen:
|
||||
try:
|
||||
self.call_reconstruct(source)
|
||||
except NotImplementedError:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Reconstruction failure: source.reconstruct not implemented",
|
||||
context=str(source),
|
||||
explanation=f"Dynamo has no bytecode reconstruction implemented for {type(source)} variable {source}.",
|
||||
@ -359,7 +359,7 @@ class PyCodegen:
|
||||
try:
|
||||
self.call_reconstruct(value)
|
||||
except NotImplementedError:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Reconstruction failure",
|
||||
context=str(value),
|
||||
explanation=f"Dynamo has no bytecode reconstruction implemented for sourceless variable {value}.",
|
||||
|
||||
@ -47,7 +47,7 @@ from torch._dynamo.variables.base import VariableTracker
|
||||
from torch._subclasses.fake_tensor import FakeTensor
|
||||
from torch.fx.experimental.symbolic_shapes import free_symbols
|
||||
|
||||
from .exc import unimplemented_v2
|
||||
from .exc import unimplemented
|
||||
from .variables import CellVariable
|
||||
from .variables.constant import ConstantVariable
|
||||
from .variables.tensor import SymNodeVariable
|
||||
@ -193,7 +193,7 @@ class ComptimeContext:
|
||||
"""
|
||||
Manually trigger a graph break
|
||||
"""
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="ComptimeContext graph break",
|
||||
context=msg,
|
||||
explanation=f"Manually triggered ComptimeContext graph break with message {msg}.",
|
||||
|
||||
@ -114,7 +114,7 @@ from .exc import (
|
||||
SkipCodeRecursiveException,
|
||||
TorchRuntimeError,
|
||||
UncapturedHigherOrderOpError,
|
||||
unimplemented_v2,
|
||||
unimplemented,
|
||||
Unsupported,
|
||||
)
|
||||
from .graph_bytecode_inputs import reset_user_object_tracking
|
||||
@ -646,7 +646,7 @@ class ConvertFrameAssert:
|
||||
return ConvertFrameReturn()
|
||||
|
||||
if is_generator(code):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Attempt to trace generator",
|
||||
context="",
|
||||
explanation="Generators cannot be compiled directly with `torch.compile`.",
|
||||
@ -1241,7 +1241,7 @@ def compile_frame( # type: ignore[return]
|
||||
# We now have a new "last attempt", reset the clock
|
||||
last_attempt_start_time = time.time()
|
||||
if attempt > 100:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Excessive RestartAnalysis() calls",
|
||||
context="",
|
||||
explanation="Dynamo attempted to trace the same frame 100+ times. "
|
||||
@ -1576,7 +1576,7 @@ def _compile(
|
||||
raise RecompileLimitExceeded(f"{limit_type} reached")
|
||||
else:
|
||||
# do not recursively skip frames
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Dynamo cache limit exceeded",
|
||||
context=f"Limit type: {limit_type}",
|
||||
explanation="Dynamo attempted to recompile the code object too many times, "
|
||||
|
||||
@ -450,9 +450,10 @@ exceptions_allowed_to_be_fallback = (
|
||||
)
|
||||
|
||||
|
||||
def unimplemented_v2_with_warning(
|
||||
def unimplemented_with_warning(
|
||||
e: Exception,
|
||||
code: types.CodeType,
|
||||
*,
|
||||
gb_type: str,
|
||||
context: str,
|
||||
explanation: str,
|
||||
@ -475,7 +476,16 @@ def unimplemented_v2_with_warning(
|
||||
payload_fn=lambda: graph_break_msg,
|
||||
)
|
||||
graph_breaks_log.debug("%s", graph_break_msg)
|
||||
unimplemented_v2(gb_type, context, explanation, hints, from_exc=e, log_warning=True)
|
||||
_unimplemented = unimplemented
|
||||
# to prevent a graph break registry entry
|
||||
_unimplemented(
|
||||
gb_type=gb_type,
|
||||
context=context,
|
||||
explanation=explanation,
|
||||
hints=hints,
|
||||
from_exc=e,
|
||||
log_warning=True,
|
||||
)
|
||||
|
||||
|
||||
def format_graph_break_message(
|
||||
@ -553,13 +563,12 @@ def get_gbid_documentation_link(gb_type: str) -> Optional[str]:
|
||||
_NOTHING = object()
|
||||
|
||||
|
||||
# TODO replace old unimplemented later
|
||||
def unimplemented_v2(
|
||||
def unimplemented(
|
||||
*,
|
||||
gb_type: str,
|
||||
context: str,
|
||||
explanation: str,
|
||||
hints: list[str],
|
||||
*,
|
||||
from_exc: Any = _NOTHING,
|
||||
log_warning: bool = False,
|
||||
) -> NoReturn:
|
||||
|
||||
@ -59,9 +59,9 @@ def register_graph_created_object(
|
||||
try:
|
||||
index_to_external_object_weakref[index] = weakref.ref(example_value)
|
||||
except TypeError as e:
|
||||
from .exc import unimplemented_v2
|
||||
from .exc import unimplemented
|
||||
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Failed to make weakref to graph-created external object",
|
||||
context=f"user_object: {example_value}",
|
||||
explanation="Object does not allow us to make a weakref to it",
|
||||
@ -79,9 +79,9 @@ def register_user_object(value: Any, source: Source) -> int:
|
||||
try:
|
||||
index_to_external_object_weakref[index] = weakref.ref(value)
|
||||
except TypeError as e:
|
||||
from .exc import unimplemented_v2
|
||||
from .exc import unimplemented
|
||||
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Failed to make weakref to User Object",
|
||||
context=f"user_object: {value}",
|
||||
explanation="Object does not allow us to make a weakref to it",
|
||||
|
||||
@ -2284,7 +2284,7 @@ class GuardBuilder(GuardBuilderBase):
|
||||
# If guard_nn_modules is true, we will guard on the right set of guards
|
||||
self._guard_on_attribute(guard, "training", GuardBuilder.CONSTANT_MATCH) # type: ignore[arg-type]
|
||||
else:
|
||||
exc.unimplemented_v2(
|
||||
exc.unimplemented(
|
||||
gb_type="Attempted to guard on uninitialized nn.Module",
|
||||
context="",
|
||||
explanation="Attempted to setup an NN_MODULE guard on uninitialized "
|
||||
|
||||
@ -98,8 +98,8 @@ from .exc import (
|
||||
BackendCompilerFailed,
|
||||
exceptions_allowed_to_be_fallback,
|
||||
SkipFrame,
|
||||
unimplemented_v2,
|
||||
unimplemented_v2_with_warning,
|
||||
unimplemented,
|
||||
unimplemented_with_warning,
|
||||
)
|
||||
from .graph_bytecode_inputs import has_user_objects, index_to_bytecode_constructor
|
||||
from .graph_deduplication import apply_graph_deduplication
|
||||
@ -762,7 +762,7 @@ class OutputGraph(OutputGraphCommon):
|
||||
def get_backward_state_proxy(self) -> torch.fx.Proxy:
|
||||
if self.backward_state_proxy is None:
|
||||
if self.export:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="backward_state does not support export",
|
||||
context="",
|
||||
explanation="Compiled autograd doesn't work with `torch.export`.",
|
||||
@ -2403,7 +2403,7 @@ class OutputGraph(OutputGraphCommon):
|
||||
raise BackendCompilerFailed(
|
||||
self.compiler_fn, e, inspect.currentframe()
|
||||
).with_traceback(e.__traceback__) from None
|
||||
unimplemented_v2_with_warning(
|
||||
unimplemented_with_warning(
|
||||
e,
|
||||
self.root_tx.f_code,
|
||||
gb_type="Backend compiler exception",
|
||||
@ -2806,7 +2806,7 @@ def check_pt2_compliant_op(
|
||||
def encountered_non_compliant_op(target: torch._ops.OpOverload, msg: str) -> None:
|
||||
output_graph.non_compliant_ops.add(target)
|
||||
if config.only_allow_pt2_compliant_ops:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Encountered non-PT2-compliant op",
|
||||
context="",
|
||||
explanation=msg + " " + err_epilogue,
|
||||
@ -2848,7 +2848,7 @@ def check_pt2_compliant_op(
|
||||
target._qualified_op_name, *args, **kwargs
|
||||
)
|
||||
except RuntimeError as e:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Error when attempting to resolve op packet",
|
||||
context="",
|
||||
explanation=str(e),
|
||||
@ -3147,7 +3147,7 @@ class SubgraphTracer(fx.Tracer):
|
||||
elif kind == "call_module":
|
||||
if self.parent is not None:
|
||||
# TODO can remove once inline_inbuilt_nn_modules is always True
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Invoking an nn.Module inside a higher order operator",
|
||||
context=f"Higher order op name: {self.source_target}",
|
||||
explanation="This is not supported.",
|
||||
@ -3181,7 +3181,7 @@ class SubgraphTracer(fx.Tracer):
|
||||
elif kind == "call_module":
|
||||
if self.parent is not None:
|
||||
# TODO can remove once inline_inbuilt_nn_modules is always True
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Invoking an nn.Module inside a HigherOrderOperator",
|
||||
context="",
|
||||
explanation="This is not supported.",
|
||||
|
||||
@ -41,7 +41,7 @@ from .bytecode_transformation import (
|
||||
create_instruction,
|
||||
)
|
||||
from .codegen import PyCodegen
|
||||
from .exc import SideEffectsError, unimplemented_v2
|
||||
from .exc import SideEffectsError, unimplemented
|
||||
from .source import GlobalSource, LocalCellSource, Source, TempLocalSource
|
||||
from .utils import is_frozen_dataclass, nn_module_new, object_new
|
||||
from .variables.base import (
|
||||
@ -261,7 +261,7 @@ class SideEffects:
|
||||
assert item.mutation_type is not None
|
||||
if not is_side_effect_safe(item.mutation_type):
|
||||
# TODO plumb HOP information here
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="HigherOrderOperator: Mutating a variable not in the current scope (SideEffects)",
|
||||
context="",
|
||||
explanation="This is not supported.",
|
||||
@ -289,7 +289,7 @@ class SideEffects:
|
||||
assert self.is_attribute_mutation(item)
|
||||
result = self.store_attr_mutations[item][name]
|
||||
if not deleted_ok and isinstance(result, variables.DeletedVariable):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Attempted to read a deleted variable",
|
||||
context=f"item: {item}, name: {name}",
|
||||
explanation="",
|
||||
@ -299,7 +299,7 @@ class SideEffects:
|
||||
|
||||
def store_cell(self, cellvar: VariableTracker, value: VariableTracker) -> None:
|
||||
if cellvar.is_immutable():
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Write to immutable cell",
|
||||
context=f"cellvar: {cellvar}, value: {value}",
|
||||
explanation="Dynamo doesn't support writing to immutable/sourceless cell variables.",
|
||||
@ -315,7 +315,7 @@ class SideEffects:
|
||||
return self.load_attr(cellvar, "cell_contents", check=False)
|
||||
if cellvar.pre_existing_contents:
|
||||
return cellvar.pre_existing_contents
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Read uninitialized cell",
|
||||
context=str(cellvar),
|
||||
explanation="Attempted to read a cell variable that has not been populated yet.",
|
||||
@ -731,7 +731,7 @@ class SideEffects:
|
||||
cg.clear_tos()
|
||||
var.source = TempLocalSource(cg.tempvars[var])
|
||||
elif isinstance(var, variables.AutogradFunctionContextVariable):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="AutogradFunctionContextVariable escaped Dynamo-traced region",
|
||||
context="",
|
||||
explanation="We cannot reconstruct a torch.autograd.Function's context object.",
|
||||
@ -889,7 +889,7 @@ class SideEffects:
|
||||
isinstance(var.maxlen, variables.ConstantVariable)
|
||||
and var.maxlen.value is None
|
||||
):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Side effect on existing deque with limited maxlen",
|
||||
context="",
|
||||
explanation="This is not supported.",
|
||||
|
||||
@ -97,7 +97,7 @@ from .exc import (
|
||||
get_stack_above_dynamo,
|
||||
ResumePrologueTracingError,
|
||||
StepUnsupported,
|
||||
unimplemented_v2,
|
||||
unimplemented,
|
||||
Unsupported,
|
||||
)
|
||||
from .funcname_cache import get_funcname
|
||||
@ -657,7 +657,7 @@ def generic_jump(
|
||||
elif self.should_compile_partial_graph():
|
||||
jump_graph_break(self, inst, value)
|
||||
else:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Data-dependent assertion failed (cannot compile partial graph)",
|
||||
context=f"value: {value}",
|
||||
explanation="Dynamo has determined when encountering a data-dependent assert failure "
|
||||
@ -696,7 +696,7 @@ def generic_jump(
|
||||
|
||||
result = torch.fx.experimental.symbolic_shapes.expect_true(sym_expr)
|
||||
if not result:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Assertion failed on symbolic shapes",
|
||||
context=str(sym_expr),
|
||||
explanation="",
|
||||
@ -772,7 +772,7 @@ def generic_jump(
|
||||
self.push(value)
|
||||
self.jump(inst)
|
||||
else:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Data-dependent branching with non-constant __bool__",
|
||||
context=f"method: {x}, result: {result}",
|
||||
explanation="Attempted to perform data-dependent branching on a user-defined "
|
||||
@ -825,7 +825,7 @@ def generic_jump(
|
||||
self.push(value)
|
||||
self.jump(inst)
|
||||
else:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Data-dependent branching",
|
||||
context=f"attempted to jump with {value}",
|
||||
explanation=_explanation,
|
||||
@ -859,7 +859,7 @@ def break_graph_if_unsupported(
|
||||
# We don't support graph break under GenericContextWrappingVariable,
|
||||
# If there is, we roll back to the checkpoint and fall back.
|
||||
excp.remove_from_stats()
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Graph break under GenericContextWrappingVariable",
|
||||
context=f"Active generic context managers: {self.active_generic_context_managers}",
|
||||
explanation="Attempted to graph break in an active context manager(s) that doesn't support graph breaking.",
|
||||
@ -983,7 +983,7 @@ class BytecodeDispatchTableMeta(type):
|
||||
super().__init__(name, bases, dct) # type: ignore[misc]
|
||||
|
||||
def _missing(opname: str, *args: Any) -> None:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Missing bytecode handler",
|
||||
context=f"{opname} with args {args}",
|
||||
explanation=f"Dynamo does not know how to handle the bytecode instruction `{opname}`.",
|
||||
@ -1337,7 +1337,7 @@ class InstructionTranslatorBase(
|
||||
or self.is_tracing_resume_prologue
|
||||
):
|
||||
if isinstance(e, StepUnsupported):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="cannot resume from torch._dynamo.step_unsupported()",
|
||||
context="",
|
||||
explanation="traced torch._dynamo.step_unsupported(), but Dynamo is instructed "
|
||||
@ -1352,7 +1352,7 @@ class InstructionTranslatorBase(
|
||||
if self.current_speculation is None:
|
||||
log.debug("empty checkpoint - cannot resume from graph break")
|
||||
if isinstance(e, StepUnsupported):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="torch._dynamo.step_unsupported() with empty checkpoint",
|
||||
context="",
|
||||
explanation="traced torch._dynamo.step_unsupported(), but there is no checkpoint "
|
||||
@ -1709,7 +1709,7 @@ class InstructionTranslatorBase(
|
||||
new_name = name.replace(".", "implicit")
|
||||
self.push(self.symbolic_locals[new_name])
|
||||
except KeyError:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Attempted to read undefined local variable (implicit)",
|
||||
context=f"LOAD_FAST {name}",
|
||||
explanation=f"Could not find an implicit local variable with name `{name}`",
|
||||
@ -1719,7 +1719,7 @@ class InstructionTranslatorBase(
|
||||
],
|
||||
)
|
||||
else:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Attempted to read undefined local variable",
|
||||
context=f"LOAD_FAST {name}",
|
||||
explanation=f"Could not find a local variable with name `{name}`",
|
||||
@ -1824,7 +1824,7 @@ class InstructionTranslatorBase(
|
||||
source, self.symbolic_globals[name]
|
||||
)
|
||||
if isinstance(value, RemovableHandleVariable):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Storing Tensor hook handle in globals",
|
||||
context=name,
|
||||
explanation="This is not supported.",
|
||||
@ -1920,7 +1920,7 @@ class InstructionTranslatorBase(
|
||||
globals=self.f_globals,
|
||||
)
|
||||
except ImportError:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Import failure",
|
||||
context=f"module_name: {module_name}, fromlist: {fromlist}, level={level}",
|
||||
explanation="Failure when attempting to import.",
|
||||
@ -1951,7 +1951,7 @@ class InstructionTranslatorBase(
|
||||
# pyrefly: ignore [unbound-name]
|
||||
self.push(PythonModuleVariable(value, source=source))
|
||||
else:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Bad import result",
|
||||
# pyrefly: ignore [unbound-name]
|
||||
context=typestr(value),
|
||||
@ -2092,7 +2092,7 @@ class InstructionTranslatorBase(
|
||||
if self._isinstance_exception(val):
|
||||
observed_exception_type = exc.get_dynamo_observed_exception(val.exc_type) # type: ignore[attr-defined, union-attr]
|
||||
raise observed_exception_type(f"raised exception {val}")
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Failed to raise exception",
|
||||
context=str(exc),
|
||||
explanation="Attempted to raise a non-Exception type/value.",
|
||||
@ -2132,7 +2132,7 @@ class InstructionTranslatorBase(
|
||||
tos = self.stack[-1]
|
||||
assert isinstance(tos, ExceptionVariable)
|
||||
if tos.exc_type is StopIteration:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="CLEANUP_THROW with StopIteration",
|
||||
context="",
|
||||
explanation="Received StopIteration when handling generator.throw/close. This is not supported.",
|
||||
@ -2218,7 +2218,7 @@ class InstructionTranslatorBase(
|
||||
curr_exc = self.exn_vt_stack.get_current_exception()
|
||||
dynamo_exc = exc.get_dynamo_observed_exception(curr_exc.python_type())
|
||||
assert isinstance(raised_exception, dynamo_exc) # sanity check
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Observed exception",
|
||||
context=f"raised exception {curr_exc.python_type_name()}({curr_exc.args})", # type: ignore[union-attr]
|
||||
explanation=observed_exn_gb_explanation,
|
||||
@ -2273,7 +2273,7 @@ class InstructionTranslatorBase(
|
||||
# instruction translator.
|
||||
self.stack.clear()
|
||||
if type(self) is InstructionTranslator:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Observed exception (EXCEPT_HANDLER)",
|
||||
context=str(raised_exception),
|
||||
explanation=observed_exn_gb_explanation
|
||||
@ -2411,7 +2411,7 @@ class InstructionTranslatorBase(
|
||||
UserDefinedExceptionObjectVariable,
|
||||
),
|
||||
):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Exception with bad expected type",
|
||||
context=str(expected_exc_types),
|
||||
explanation=f"`except ...` has unsupported type {expected_exc_types}.",
|
||||
@ -2420,7 +2420,7 @@ class InstructionTranslatorBase(
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
if not self._isinstance_exception(exc_instance):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Caught non-Exception value",
|
||||
context=str(exc_instance),
|
||||
explanation=f"Except expects to receive an object of Exception type but received {exc_instance}.",
|
||||
@ -2443,7 +2443,7 @@ class InstructionTranslatorBase(
|
||||
UserDefinedExceptionClassVariable,
|
||||
),
|
||||
):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Exception with non-type expectation",
|
||||
context=str(expected_type),
|
||||
explanation=f"`except ...` expects a non-type: {expected_type}.",
|
||||
@ -2498,7 +2498,7 @@ class InstructionTranslatorBase(
|
||||
kwargsvars = ConstDictVariable({})
|
||||
argsvars = self.pop()
|
||||
else:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Variadic function call with bad flags",
|
||||
context=f"flags: {inst.argval}",
|
||||
explanation=f"Attempted to call a variadic function (CALL_FUNCTION_EX) with bad flags {inst.argval}",
|
||||
@ -2536,7 +2536,7 @@ class InstructionTranslatorBase(
|
||||
kwargsvars,
|
||||
ConstDictVariable,
|
||||
):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Variadic function call with bad args/kwargs type",
|
||||
# pyrefly: ignore [unbound-name]
|
||||
context=f"args type: {typestr(argsvars)}, kwargs type: {typestr(kwargsvars)}",
|
||||
@ -2652,7 +2652,7 @@ class InstructionTranslatorBase(
|
||||
|
||||
def store_attr_graph_break(self, inst: Instruction) -> None:
|
||||
if not self.should_compile_partial_graph():
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Should not compile partial graph (STORE_ATTR)",
|
||||
context="",
|
||||
explanation="Dynamo has determined when encountering an unsupported "
|
||||
@ -3236,7 +3236,7 @@ class InstructionTranslatorBase(
|
||||
|
||||
def BUILD_SET(self, inst: Instruction) -> None:
|
||||
if config.inject_BUILD_SET_unimplemented_TESTING_ONLY:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="missing BUILD_SET handler",
|
||||
context="",
|
||||
explanation="Missing BUILD_SET bytecode handler (for testing purposes).",
|
||||
@ -3253,7 +3253,7 @@ class InstructionTranslatorBase(
|
||||
try:
|
||||
items.extend(seq.force_unpack_var_sequence(self))
|
||||
except NotImplementedError:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Failed to unpack object for BUILD_LIST_UNPACK",
|
||||
context=str(seq),
|
||||
explanation=f"{seq} cannot be unpacked into a list for the BUILD_LIST_UNPACK "
|
||||
@ -3391,7 +3391,7 @@ class InstructionTranslatorBase(
|
||||
elif seq.has_force_unpack_var_sequence(self):
|
||||
val = seq.force_unpack_var_sequence(self)
|
||||
else:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Failed to unpack object for UNPACK_SEQUENCE",
|
||||
context=str(seq),
|
||||
explanation=f"{seq} cannot be unpacked into a list for the UNPACK_SEQUENCE bytecode "
|
||||
@ -3400,7 +3400,7 @@ class InstructionTranslatorBase(
|
||||
)
|
||||
# pyrefly: ignore [unbound-name]
|
||||
if len(val) != inst.argval:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Length mismatch when unpacking object for UNPACK_SEQUENCE",
|
||||
# pyrefly: ignore [unbound-name]
|
||||
context=f"expected length: {inst.argval}, actual: {len(val)}",
|
||||
@ -3429,7 +3429,7 @@ class InstructionTranslatorBase(
|
||||
for item in reversed(vals_prefix):
|
||||
self.push(item)
|
||||
else:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Failed to unpack object for UNPACK_EX",
|
||||
context=str(seq),
|
||||
explanation=f"{seq} cannot be unpacked into a list for the UNPACK_EX bytecode.",
|
||||
@ -3439,7 +3439,7 @@ class InstructionTranslatorBase(
|
||||
@break_graph_if_unsupported(push=0)
|
||||
def graph_break_on_leaf_function(self, inst: Instruction) -> None:
|
||||
if self.is_leaf_tracer:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Forced graph break on leaf function",
|
||||
context="",
|
||||
explanation="Forced graph break for nested graph break testing purposes",
|
||||
@ -3545,7 +3545,7 @@ class InstructionTranslatorBase(
|
||||
format_string_parts.append(part.format_string)
|
||||
args.extend(part.sym_args)
|
||||
if set(kwargs.keys()) & set(part.sym_kwargs.keys()):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="BUILD_STRING key conflict",
|
||||
context=f"format_string_parts: {format_string_parts}, kwargs: {kwargs}, part.sym_kwargs: {part.sym_kwargs}",
|
||||
explanation="Failed to build format string due to key conflict",
|
||||
@ -3553,7 +3553,7 @@ class InstructionTranslatorBase(
|
||||
)
|
||||
kwargs.update(part.sym_kwargs)
|
||||
else:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="BUILD_STRING type error",
|
||||
context=str(part),
|
||||
explanation="Format string part type is not correct - expected constant or format string.",
|
||||
@ -3867,7 +3867,7 @@ class InstructionTranslatorBase(
|
||||
|
||||
@staticmethod
|
||||
def unsupported_ctx_graph_break(ctx: VariableTracker) -> NoReturn:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unsupported context manager",
|
||||
context=f"Attempted SETUP_WITH/BEFORE_WITH/LOAD_SPECIAL on {ctx}",
|
||||
explanation=f"Dynamo does not know how to enter a `{ctx.python_type_name()}` context manager.",
|
||||
@ -3930,7 +3930,7 @@ class InstructionTranslatorBase(
|
||||
|
||||
def LOAD_FAST_CHECK(self, inst: Instruction) -> None:
|
||||
if istype(self.symbolic_locals.get(inst.argval, None), NullVariable):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="LOAD_FAST_CHECK on uninitialized variable",
|
||||
context=inst.argval,
|
||||
explanation=f"Attempted to load uninitialized local variable {inst.argval}",
|
||||
@ -3964,7 +3964,7 @@ class InstructionTranslatorBase(
|
||||
# INTRINSIC_LIST_TO_TUPLE
|
||||
self.push(TupleVariable(self.pop().force_unpack_var_sequence(self)))
|
||||
else:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Missing CALL_INTRINSIC_1 handler",
|
||||
context=f"CALL_INTRINSIC_1 operand: {inst.argval}",
|
||||
explanation=f"No handler implemented for CALL_INTRINSIC_1 {inst.argval} instruction.",
|
||||
@ -4561,7 +4561,7 @@ class InstructionTranslator(InstructionTranslatorBase):
|
||||
# if it reaches here, it means Dynamo failed to inline a functorch function
|
||||
f"- torch.func.{name}(fn) requires the function to be inlined by dynamo"
|
||||
)
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unsupported functorch tracing attempt",
|
||||
context="",
|
||||
explanation=msg,
|
||||
@ -4669,7 +4669,7 @@ class InliningInstructionTranslator(InstructionTranslatorBase):
|
||||
@staticmethod
|
||||
def check_inlineable(func: Any) -> trace_rules.SkipResult:
|
||||
if func.has_self():
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Inline attempt with __self__",
|
||||
context=str(func),
|
||||
explanation="Attempted to inline a function with the `__self__` attribute. "
|
||||
@ -4683,7 +4683,7 @@ class InliningInstructionTranslator(InstructionTranslatorBase):
|
||||
msg = inspect.getattr_static(
|
||||
func.get_function(), "_torchdynamo_disable_msg", None
|
||||
)
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Skip inlining `torch.compiler.disable()`d function",
|
||||
context=str(func.get_function()),
|
||||
explanation=f"Skip inlining function {func.get_function()} since it was wrapped "
|
||||
@ -4719,7 +4719,7 @@ class InliningInstructionTranslator(InstructionTranslatorBase):
|
||||
"More graph breaks may occur as a result of attempting to trace into the function.",
|
||||
"Please file an issue to PyTorch.",
|
||||
]
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Attempted to inline function marked as skipped",
|
||||
context=f"qualname: {fn_qualname}, name: {func.get_name()}, "
|
||||
f"filename: `{func.get_filename()}`, skip reason: {result.reason}",
|
||||
@ -4761,7 +4761,7 @@ class InliningInstructionTranslator(InstructionTranslatorBase):
|
||||
|
||||
if result is None:
|
||||
if isinstance(func, SkipFunctionVariable):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Attempted to inline function marked as skipped (SkipFunctionVariable)",
|
||||
context=f"Attempted to inline a SkipFunctionVariable {func}",
|
||||
explanation=(
|
||||
@ -4792,7 +4792,7 @@ class InliningInstructionTranslator(InstructionTranslatorBase):
|
||||
|
||||
for v in itertools.chain(sub_locals.values()):
|
||||
if not isinstance(v, VariableTracker):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Encountered unconverted argument when attempting to inline",
|
||||
context=f"func: {func}, arg: {v}",
|
||||
explanation="An argument to an inlined function was not successfully converted to a VariableTracker.",
|
||||
@ -4802,7 +4802,7 @@ class InliningInstructionTranslator(InstructionTranslatorBase):
|
||||
if code.co_name in ("__setitem__", "__setattr__") and not (
|
||||
args and isinstance(args[0], variables.UserDefinedObjectVariable)
|
||||
):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unsupported __setitem__/__setattr__ inline attempt",
|
||||
context=f"code name: {code.co_name}, args: {args}",
|
||||
explanation=f"Attempted to inline {code.co_name} where first argument (self) is not a user-defined object.",
|
||||
@ -5025,7 +5025,7 @@ class InliningInstructionTranslator(InstructionTranslatorBase):
|
||||
) -> list[Instruction]:
|
||||
if config.nested_graph_breaks:
|
||||
return super().create_call_resume_at(inst, all_stack_locals_metadata)
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Graph break in inlined function",
|
||||
context="",
|
||||
explanation="Graph breaks in an inlined call are not supported.",
|
||||
@ -5106,7 +5106,7 @@ class InliningInstructionTranslator(InstructionTranslatorBase):
|
||||
else:
|
||||
value = self.pop()
|
||||
if isinstance(value, RemovableHandleVariable):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Storing Tensor hook handle in globals (inline call)",
|
||||
context=inst.argval,
|
||||
explanation="This is not supported.",
|
||||
@ -5173,7 +5173,7 @@ class InliningGeneratorInstructionTranslator(InliningInstructionTranslator):
|
||||
# lifted the `unimplemented("generator")` in frame conversion. This codepath handles
|
||||
# subgenerator and lines up with this line in Python 3.10
|
||||
# https://github.com/python/cpython/blob/3.10/Python/ceval.c#L2599
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unreachable sub-generator code",
|
||||
context="",
|
||||
explanation="Should only be encountered while implementing generator support.",
|
||||
@ -5231,14 +5231,14 @@ class InliningGeneratorInstructionTranslator(InliningInstructionTranslator):
|
||||
# lifted the `unimplemented("generator")` in frame conversion. This codepath handles
|
||||
# subgenerator and lines up with this line in Python 3.11
|
||||
# https://github.com/python/cpython/blob/3.11/Python/ceval.c#L2597
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unreachable sub-generator code",
|
||||
context="",
|
||||
explanation="Should only be encountered while implementing generator support.",
|
||||
hints=[],
|
||||
)
|
||||
else:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="SEND with bad type",
|
||||
context=f"TOS type: {typestr(tos)}",
|
||||
explanation=f"Attempted to SEND with unsupported type {typestr(tos)}.",
|
||||
|
||||
@ -1251,10 +1251,10 @@ def proxy_args_kwargs(args: Any, kwargs: Any) -> tuple[tuple[Any, ...], dict[str
|
||||
proxy_kwargs = {key: arg.as_proxy() for key, arg in kwargs.items()}
|
||||
return proxy_args, proxy_kwargs
|
||||
except NotImplementedError as e:
|
||||
from .exc import unimplemented_v2
|
||||
from .exc import unimplemented
|
||||
from .variables.base import typestr
|
||||
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Failed to convert args/kwargs to proxy",
|
||||
context=f"call_function args: {typestr(*args)} {typestr(*list(kwargs.values()))}",
|
||||
explanation="Missing `as_proxy()` implementation for some arg/kwarg.",
|
||||
@ -2756,9 +2756,9 @@ def _get_fake_tensor(vt: VariableTracker) -> Any:
|
||||
fake_tensor = vt.as_proxy().node.meta.get("example_value")
|
||||
if not is_fake(fake_tensor):
|
||||
from . import graph_break_hints
|
||||
from .exc import unimplemented_v2
|
||||
from .exc import unimplemented
|
||||
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Cannot check Tensor object identity without its fake value",
|
||||
context=str(fake_tensor),
|
||||
explanation="TensorVariable is missing a fake example_value.",
|
||||
@ -2929,11 +2929,11 @@ def wrap_fake_exception(fn: Callable[[], Any]) -> Any:
|
||||
try:
|
||||
return fn()
|
||||
except UnsupportedFakeTensorException as e:
|
||||
from .exc import unimplemented_v2
|
||||
from .exc import unimplemented
|
||||
|
||||
msg = f"Encountered exception ({e.reason}) during fake tensor propagation."
|
||||
log.warning(msg)
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Fake tensor propagation exception",
|
||||
context=str(e.reason),
|
||||
explanation=msg,
|
||||
@ -3326,11 +3326,11 @@ def extract_fake_example_value(node: torch.fx.Node, required: bool = True) -> An
|
||||
if "example_value" in node.meta and is_fake(node.meta["example_value"]):
|
||||
return node.meta["example_value"]
|
||||
elif required:
|
||||
from torch._dynamo.exc import unimplemented_v2
|
||||
from torch._dynamo.exc import unimplemented
|
||||
|
||||
from . import graph_break_hints
|
||||
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Missing FakeTensor example value",
|
||||
context=str(node),
|
||||
explanation=f"`FakeTensor` example value was required for {node} but not available.",
|
||||
@ -3385,7 +3385,7 @@ def get_fake_value(
|
||||
|
||||
from .exc import (
|
||||
TorchRuntimeError,
|
||||
unimplemented_v2,
|
||||
unimplemented,
|
||||
Unsupported,
|
||||
UserError,
|
||||
UserErrorType,
|
||||
@ -3479,7 +3479,7 @@ def get_fake_value(
|
||||
"Consider wrapping the operator into a PyTorch-understood custom operator "
|
||||
"(see https://pytorch.org/tutorials/advanced/custom_ops_landing_page.html)",
|
||||
]
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Data dependent operator",
|
||||
context=str(cause.func),
|
||||
explanation=f"Operator `{cause.func}` has a non-Tensor output "
|
||||
@ -3490,7 +3490,7 @@ def get_fake_value(
|
||||
cause, torch._subclasses.fake_tensor.DynamicOutputShapeException
|
||||
):
|
||||
if not torch._dynamo.config.capture_dynamic_output_shape_ops:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Dynamic shape operator",
|
||||
context=str(cause.func),
|
||||
explanation=f"Operator `{cause.func}`'s output shape depends on input Tensor data.",
|
||||
@ -3500,7 +3500,7 @@ def get_fake_value(
|
||||
],
|
||||
)
|
||||
else:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Dynamic shape operator (no meta kernel)",
|
||||
context=str(cause.func),
|
||||
explanation=f"Operator `{cause.func}` does not have a meta kernel that supports dynamic output shapes",
|
||||
@ -3524,7 +3524,7 @@ def get_fake_value(
|
||||
f"module `{module}` and you may need to `import {module}`"
|
||||
f"({ctx}), otherwise "
|
||||
)
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Operator does not support running with fake tensors",
|
||||
context=f"unsupported operator: {cause.func}",
|
||||
explanation="",
|
||||
@ -3545,7 +3545,7 @@ def get_fake_value(
|
||||
elif isinstance(cause, ValueRangeError):
|
||||
raise UserError(UserErrorType.CONSTRAINT_VIOLATION, e.args[0]) from e
|
||||
elif isinstance(cause, TypeError) and "argument" in str(cause):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="TypeError when making fake tensor call",
|
||||
context=f"TypeError {node.target}: {cause}",
|
||||
explanation="",
|
||||
@ -3623,9 +3623,9 @@ def run_node(
|
||||
return node.target(*args, **kwargs) # type: ignore[operator]
|
||||
elif op == "call_method":
|
||||
if not hasattr(args[0], node.target): # type: ignore[arg-type]
|
||||
from .exc import unimplemented_v2
|
||||
from .exc import unimplemented
|
||||
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Missing attribute when running call_method node",
|
||||
context="",
|
||||
explanation=make_error_message("attribute not defined"),
|
||||
@ -3643,7 +3643,7 @@ def run_node(
|
||||
|
||||
except (NotImplementedError, UnsupportedFakeTensorException) as e:
|
||||
# NB: mimic how wrap_fake_exception does it
|
||||
from .exc import unimplemented_v2
|
||||
from .exc import unimplemented
|
||||
|
||||
hints = []
|
||||
if isinstance(e, NotImplementedError):
|
||||
@ -3651,7 +3651,7 @@ def run_node(
|
||||
"If the op is a PyTorch op, please file an issue to PyTorch.",
|
||||
]
|
||||
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="NotImplementedError/UnsupportedFakeTensorException when running FX node",
|
||||
context="",
|
||||
explanation=make_error_message(e),
|
||||
|
||||
@ -23,7 +23,7 @@ from torch.fx.proxy import Node
|
||||
|
||||
from .. import graph_break_hints, variables
|
||||
from ..current_scope_id import current_scope_id
|
||||
from ..exc import raise_observed_exception, unimplemented_v2
|
||||
from ..exc import raise_observed_exception, unimplemented
|
||||
from ..guards import GuardBuilder, install_guard
|
||||
from ..source import AttrSource, Source
|
||||
from ..utils import cmp_name_to_op_mapping, istype
|
||||
@ -90,7 +90,7 @@ class MutationType:
|
||||
elif typ is SourceType.New:
|
||||
self.scope = current_scope_id()
|
||||
else:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unsupported SourceType",
|
||||
context=f"MutationType.__init__ {self} {typ}",
|
||||
explanation=f"Dynamo does not support the type `{typ}`",
|
||||
@ -349,7 +349,7 @@ class VariableTracker(metaclass=VariableTrackerMeta):
|
||||
try:
|
||||
return self.as_python_constant()
|
||||
except NotImplementedError:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Not a Python constant",
|
||||
context=f"guard_as_python_constant {self}",
|
||||
explanation=f"Failed to convert {self} into a Python constant.",
|
||||
@ -444,7 +444,7 @@ class VariableTracker(metaclass=VariableTrackerMeta):
|
||||
fn(v)
|
||||
|
||||
def inspect_parameter_names(self) -> list[str]:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unsupported inspect call",
|
||||
context=f"inspect_parameter_names {self}",
|
||||
explanation=f"Dynamo does not know how to trace the function `{self.debug_repr()}`",
|
||||
@ -452,7 +452,7 @@ class VariableTracker(metaclass=VariableTrackerMeta):
|
||||
)
|
||||
|
||||
def call_obj_hasattr(self, tx: Any, name: str) -> "VariableTracker":
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unsupported hasattr call",
|
||||
context=f"call_obj_hasattr {self} {name}",
|
||||
explanation=f"Dynamo does not know how to trace the function `{self.debug_repr()}`",
|
||||
@ -468,7 +468,7 @@ class VariableTracker(metaclass=VariableTrackerMeta):
|
||||
args: Sequence["VariableTracker"],
|
||||
kwargs: dict[str, "VariableTracker"],
|
||||
) -> "VariableTracker":
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unsupported function call",
|
||||
context=f"call_function {self} {args} {kwargs}",
|
||||
explanation=f"Dynamo does not know how to trace the function `{self.debug_repr()}`",
|
||||
@ -514,7 +514,7 @@ class VariableTracker(metaclass=VariableTrackerMeta):
|
||||
or tx.output.side_effects.has_pending_mutation(self)
|
||||
or tx.output.side_effects.has_pending_mutation(other)
|
||||
):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Builtin `operator.*` comparison with constant `self` failed",
|
||||
context=f"call_method {self} {name} {args} {kwargs}",
|
||||
explanation=f"Failed to compare {self} with {other}, "
|
||||
@ -560,7 +560,7 @@ class VariableTracker(metaclass=VariableTrackerMeta):
|
||||
"(2) fix any graph breaks in the function above the comprehension, (3) wrap the comprehension in a "
|
||||
"function, or (4) use Python 3.12+."
|
||||
)
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unsupported method call",
|
||||
context=f"call_method {self} {name} {args} {kwargs}",
|
||||
explanation=f"Dynamo does not know how to trace method `{name}` of class `{self.python_type_name()}`",
|
||||
@ -583,7 +583,7 @@ class VariableTracker(metaclass=VariableTrackerMeta):
|
||||
return True
|
||||
|
||||
def next_variable(self, tx: Any) -> "VariableTracker":
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unsupported next() call",
|
||||
context=f"next({self})",
|
||||
explanation=f"Dynamo does not know how to trace calling `next()` on variable `{self}`.",
|
||||
|
||||
@ -87,7 +87,7 @@ from torch.utils.weak import TensorWeakRef
|
||||
|
||||
from .. import config, graph_break_hints, mutation_guard, replay_record, trace_rules
|
||||
from ..device_interface import get_registered_device_interfaces
|
||||
from ..exc import InternalTorchDynamoError, raise_observed_exception, unimplemented_v2
|
||||
from ..exc import InternalTorchDynamoError, raise_observed_exception, unimplemented
|
||||
from ..guards import GuardBuilder, install_guard, make_dupe_guard
|
||||
from ..pgo import (
|
||||
auto_dynamic,
|
||||
@ -567,7 +567,7 @@ class VariableBuilder:
|
||||
# Our current infra requires the hook to be registered and removed in
|
||||
# the same frame. So graph break.
|
||||
# Related test - PYTORCH_TEST_WITH_DYNAMO=1 python test/test_autograd.py -k TestAutograd.test_hooks
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Attempted to represent unregistered RemovableHandle",
|
||||
context="",
|
||||
explanation="Dynamo attempted to build a representation of a torch.utils.hooks.RemovableHandle, "
|
||||
@ -589,7 +589,7 @@ class VariableBuilder:
|
||||
all_const = all(ConstantVariable.is_literal(k) for k in value)
|
||||
|
||||
if not all_const:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="non-const keys in mappingproxy",
|
||||
context=f"non-const keys: {[k for k in value.keys() if not ConstantVariable.is_literal(k)]}", # noqa: SIM118
|
||||
explanation="Dynamo expects mappingproxy keys to be constants.",
|
||||
@ -807,7 +807,7 @@ class VariableBuilder:
|
||||
return var
|
||||
elif istype(value, set):
|
||||
if any(isinstance(x, torch.Tensor) for x in value):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Attempted to wrap a set with tensors",
|
||||
context="Python set containing torch.Tensor elements",
|
||||
explanation=(
|
||||
@ -888,7 +888,7 @@ class VariableBuilder:
|
||||
keywords_source = AttrSource(self.get_source(), "keywords")
|
||||
for k, v in value.keywords.items():
|
||||
if not ConstantVariable.is_literal(k):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="functools.partial() with non-literal keyword",
|
||||
context=f"non-literal keyword: {k}",
|
||||
explanation="functools.partial() expects literal/string keywords",
|
||||
@ -1039,7 +1039,7 @@ class VariableBuilder:
|
||||
return self.wrap_unspecialized_primitive(value)
|
||||
elif isinstance(value, HigherOrderOperator):
|
||||
if value is torch._higher_order_ops.invoke_subgraph:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Attempted to wrap torch._higher_order_ops.invoke_subgraph",
|
||||
context="",
|
||||
explanation="Directly using invoke_subgraph is not supported. Use nested_compile_region",
|
||||
@ -1202,7 +1202,7 @@ class VariableBuilder:
|
||||
# this is automatically done by evaluating the guards once but this
|
||||
# will cause data-dependent error when we evaluate the outer unbacked symints.
|
||||
# The test case that triggers this graph break is test_cond_unbacked_symint_closure
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Attempted to wrap unbacked SymInt",
|
||||
context="",
|
||||
explanation="Unbacked SymInt input is not supported yet.",
|
||||
@ -1616,7 +1616,7 @@ class VariableBuilder:
|
||||
)
|
||||
return DictKeySetVariable(items, source=self.source)
|
||||
else:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="non-const keys in dict_keys",
|
||||
context=f"non-const keys: {[k for k in value if not ConstantVariable.is_literal(k)]}",
|
||||
explanation="Dynamo expects dict_keys keys to be constants.",
|
||||
@ -1665,7 +1665,7 @@ class VariableBuilder:
|
||||
def wrap_listlike(self, value: Union[tuple, list, odict_values, NamedTuple]):
|
||||
for item in value:
|
||||
if item is value:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="list elements are pointing to the list itself",
|
||||
context="",
|
||||
explanation="Dynamo does not support lists whose items reference to itself",
|
||||
@ -1834,7 +1834,7 @@ class VariableBuilder:
|
||||
from ..eval_frame import OptimizedModule
|
||||
|
||||
if len(value.__dict__) == 0:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Uninitialized nn.Module",
|
||||
context=typestr(value),
|
||||
explanation=f"Attempted to trace an uninitialized nn.Module of type {typestr(value)}.",
|
||||
@ -1866,7 +1866,7 @@ class VariableBuilder:
|
||||
isinstance(value, (torch.nn.RNN, torch.nn.GRU, torch.nn.LSTM))
|
||||
and not config.allow_rnn
|
||||
):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Attempted to wrap RNN, GRU, or LSTM",
|
||||
context=str(value),
|
||||
explanation="Dynamo does not support RNN, GRU, or LSTM.",
|
||||
@ -1880,7 +1880,7 @@ class VariableBuilder:
|
||||
# we can't do this assert inside FSDP constructor,
|
||||
# since we don't know yet whether dynamo will be used
|
||||
if not getattr(value, "_fsdp_use_orig_params", False):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="FSDP with use_orig_params=False",
|
||||
context="",
|
||||
explanation="Dynamo only supports FSDP with use_orig_params=True",
|
||||
@ -2145,7 +2145,7 @@ class VariableBuilder:
|
||||
and value.is_nested
|
||||
and not isinstance(value, torch.nested._internal.nested_tensor.NestedTensor)
|
||||
):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Attempted to wrap strided NestedTensor",
|
||||
context="",
|
||||
explanation="torch.compile does not support strided NestedTensor",
|
||||
@ -2161,7 +2161,7 @@ class VariableBuilder:
|
||||
# A hot fix for sparse tensors + torch.compile. Support for
|
||||
# export + sparsity is being added but we need to create
|
||||
# SPARSE_TENSOR_GUARDS for guards to work properly.
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Attempted to wrap sparse Tensor",
|
||||
context="",
|
||||
explanation="torch.compile does not support sparse Tensors",
|
||||
@ -2173,7 +2173,7 @@ class VariableBuilder:
|
||||
and safe_grad(value) is not None
|
||||
and value.dtype != safe_grad(value).dtype
|
||||
):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="dtype mismatch between tensor and its gradient",
|
||||
context=f"tensor dtype: {value.dtype}; grad dtype: {safe_grad(value).dtype}",
|
||||
explanation="Inconsistent dtype between tensor and its gradient. "
|
||||
@ -2294,7 +2294,7 @@ class VariableBuilder:
|
||||
tensor_value = clone_preserve_strides(tensor_value)
|
||||
except NotImplementedError as e:
|
||||
# failed to convert to tensor, graph break
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="failed to convert numpy.ndarray to Tensor",
|
||||
context=str(value),
|
||||
explanation="Exception encountered when attempting to convert numpy.ndarray to Tensor",
|
||||
@ -2673,7 +2673,7 @@ def _dataclasses_fields_lambda(obj):
|
||||
if isinstance(obj, UserDefinedObjectVariable):
|
||||
value = obj.value
|
||||
else:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="dataclass fields failure",
|
||||
context=f"obj: {obj}; variable type: {type(obj)}",
|
||||
explanation=f"Dataclass fields handling fails for {obj}. Expected it to be a user-defined object.",
|
||||
@ -2901,7 +2901,7 @@ def handle_traced_output(example_value, tx, proxy, options, subclass_type, targe
|
||||
if is_sparse_any(example_value) and (
|
||||
not tx.export or not config.capture_sparse_compute
|
||||
):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Attempted to wrap sparse Tensor with VariableTracker",
|
||||
context=str(example_value),
|
||||
explanation="torch.compile does not support sparse Tensors with VariableTracker",
|
||||
@ -3108,7 +3108,7 @@ def handle_traced_output(example_value, tx, proxy, options, subclass_type, targe
|
||||
set_example_value(proxy.node, example_value)
|
||||
return ConstantVariable.create(example_value, **options)
|
||||
else:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="torch.* op returned non-Tensor",
|
||||
context=f"example_value type: {typestr(example_value)}; op: {proxy.node.op}; target: {proxy.node.target}",
|
||||
explanation="torch.* ops that return a non-Tensor cannot be traced into the Dynamo FX graph output",
|
||||
@ -3308,7 +3308,7 @@ def _automatic_dynamic(
|
||||
if e.is_nested and not isinstance(
|
||||
e, torch.nested._internal.nested_tensor.NestedTensor
|
||||
):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Encountered strided NestedTensor in automatic dynamic dim determination",
|
||||
context="",
|
||||
explanation="torch.compile does not support strided NestedTensor",
|
||||
@ -3770,7 +3770,7 @@ class SourcelessBuilder:
|
||||
):
|
||||
proxy = tx.output.bound_symbols[value.node.expr]
|
||||
return SymNodeVariable.create(tx, proxy)
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unexpected type in sourceless builder",
|
||||
context=f"{value_type.__module__}.{value_type.__qualname__}",
|
||||
explanation=f"SourcelessBuilder.create does not know how to wrap {value_type}",
|
||||
|
||||
@ -45,7 +45,7 @@ from ..exc import (
|
||||
ObservedAttributeError,
|
||||
ObservedUserStopIteration,
|
||||
raise_observed_exception,
|
||||
unimplemented_v2,
|
||||
unimplemented,
|
||||
Unsupported,
|
||||
UserError,
|
||||
UserErrorType,
|
||||
@ -1034,7 +1034,7 @@ class BuiltinVariable(VariableTracker):
|
||||
and isinstance(x.value, str)
|
||||
for x in args
|
||||
):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="assert with non-string message",
|
||||
context=str(args),
|
||||
explanation="Dynamo only supports asserts with string messages",
|
||||
@ -1104,7 +1104,7 @@ class BuiltinVariable(VariableTracker):
|
||||
self_handler,
|
||||
e,
|
||||
)
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="invalid call to builtin op handler",
|
||||
context=f"invalid args to {self_handler}: {args} {kwargs}",
|
||||
explanation=f"Encountered TypeError when trying to handle op {fn.__name__}",
|
||||
@ -1145,7 +1145,7 @@ class BuiltinVariable(VariableTracker):
|
||||
args=list(map(ConstantVariable.create, exc.args)),
|
||||
)
|
||||
except AsPythonConstantNotImplementedError as exc:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="constant fold exception",
|
||||
context=f"attempted to run function {fn} with arguments {args}",
|
||||
explanation="Encountered exception when attempting to constant fold.",
|
||||
@ -1172,7 +1172,7 @@ class BuiltinVariable(VariableTracker):
|
||||
},
|
||||
)
|
||||
except AsPythonConstantNotImplementedError as exc:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="constant fold exception",
|
||||
context=f"attempted to run function {fn} with arguments {args}",
|
||||
explanation="Encountered exception when attempting to constant fold.",
|
||||
@ -1191,9 +1191,9 @@ class BuiltinVariable(VariableTracker):
|
||||
|
||||
handlers.append(constant_fold_handler)
|
||||
|
||||
def call_unimplemented_v2(args: Sequence[VariableTracker]) -> None:
|
||||
def call_unimplemented(args: Sequence[VariableTracker]) -> None:
|
||||
real_arg_types = [arg.python_type_name() for arg in args]
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Failed to trace builtin operator",
|
||||
context=f"builtin {fn.__name__} {arg_types} {has_kwargs}",
|
||||
explanation=f"Dynamo does not know how to trace builtin operator `{fn.__name__}` "
|
||||
@ -1208,7 +1208,7 @@ class BuiltinVariable(VariableTracker):
|
||||
)
|
||||
|
||||
if len(handlers) == 0:
|
||||
return lambda tx, args, kwargs: call_unimplemented_v2(args)
|
||||
return lambda tx, args, kwargs: call_unimplemented(args)
|
||||
elif len(handlers) == 1:
|
||||
(handler,) = handlers
|
||||
|
||||
@ -1220,7 +1220,7 @@ class BuiltinVariable(VariableTracker):
|
||||
rv = handler(tx, args, kwargs)
|
||||
if rv:
|
||||
return rv
|
||||
call_unimplemented_v2(args)
|
||||
call_unimplemented(args)
|
||||
return rv
|
||||
|
||||
else:
|
||||
@ -1235,14 +1235,14 @@ class BuiltinVariable(VariableTracker):
|
||||
rv = fn(tx, args, kwargs)
|
||||
if rv:
|
||||
return rv
|
||||
call_unimplemented_v2(args)
|
||||
call_unimplemented(args)
|
||||
return rv
|
||||
|
||||
return builtin_dispatch
|
||||
|
||||
def call_vars(self, tx: "InstructionTranslator", *args: Any) -> VariableTracker:
|
||||
if len(args) == 0:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="unimplemented builtin op vars() with no arguments",
|
||||
context=f"vars: {self} {args}",
|
||||
explanation=f"Dynamo does not know how to trace builtin operator {self.fn} with no arguments",
|
||||
@ -1394,7 +1394,7 @@ class BuiltinVariable(VariableTracker):
|
||||
return wrap_fx_proxy(tx, proxy)
|
||||
|
||||
except NotImplementedError:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="unimplemented builtin op on tensor arguments",
|
||||
context=f"partial tensor op: {self} {args} {kwargs}",
|
||||
explanation=f"Dynamo does not know how to trace builtin operator {self.fn} with tensor arguments",
|
||||
@ -1622,7 +1622,7 @@ class BuiltinVariable(VariableTracker):
|
||||
# account for __repr__ functions when __str__ is absent
|
||||
str_method = arg.value.__repr__
|
||||
else:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="failed to call str() on user defined object",
|
||||
context=str(arg),
|
||||
explanation="User defined object has no __str__ or __repr__ method",
|
||||
@ -1639,7 +1639,7 @@ class BuiltinVariable(VariableTracker):
|
||||
return None
|
||||
# pyrefly: ignore [unbound-name]
|
||||
elif is_wrapper_or_member_descriptor(str_method):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Attempted to a str() method implemented in C/C++",
|
||||
context="",
|
||||
explanation=f"{type(arg.value)} has a C/C++ based str method. This is not supported.",
|
||||
@ -1819,7 +1819,7 @@ class BuiltinVariable(VariableTracker):
|
||||
self, tx: "InstructionTranslator", arg: VariableTracker
|
||||
) -> VariableTracker:
|
||||
if isinstance(arg, variables.TensorVariable):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="unsupported index(Tensor)",
|
||||
context="",
|
||||
explanation="Dynamo does not support tracing builtin index() on a Tensor",
|
||||
@ -2044,7 +2044,7 @@ class BuiltinVariable(VariableTracker):
|
||||
if len(args) == 2:
|
||||
return args[1]
|
||||
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="bad args to builtin cast()",
|
||||
context=f"got args {args} {kwargs}",
|
||||
explanation="Dynamo expects exactly 2 args to builtin cast().",
|
||||
@ -2103,7 +2103,7 @@ class BuiltinVariable(VariableTracker):
|
||||
**kwargs: VariableTracker,
|
||||
) -> VariableTracker:
|
||||
if user_cls not in {dict, OrderedDict, defaultdict}:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unsupported dict type for fromkeys()",
|
||||
context=f"{user_cls.__name__}.fromkeys(): {args} {kwargs}",
|
||||
explanation=f"Failed to call {user_cls.__name__}.fromkeys() because "
|
||||
@ -2167,7 +2167,7 @@ class BuiltinVariable(VariableTracker):
|
||||
mutation_type=ValueMutationNew(),
|
||||
)
|
||||
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="failed to call dict.fromkeys()",
|
||||
context=f"{user_cls.__name__}.fromkeys(): {args} {kwargs}",
|
||||
explanation=f"Failed to call {user_cls.__name__}.fromkeys() because "
|
||||
@ -2301,7 +2301,7 @@ class BuiltinVariable(VariableTracker):
|
||||
try:
|
||||
arg_type = arg.python_type()
|
||||
except NotImplementedError:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="builtin isinstance() cannot determine type of argument",
|
||||
context=f"isinstance({arg}, {isinstance_type_var})",
|
||||
explanation=f"Dynamo doesn't have a rule to determine the type of argument {arg}",
|
||||
@ -2344,7 +2344,7 @@ class BuiltinVariable(VariableTracker):
|
||||
if isinstance(arg, variables.UserDefinedObjectVariable) and isinstance(
|
||||
arg.value, types.MemberDescriptorType
|
||||
):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="isinstance() called on user defined object with C extensions",
|
||||
context=f"isinstance({arg}, {isinstance_type})",
|
||||
explanation="User-defined object with C extensions can have torch.Tensor "
|
||||
@ -2412,7 +2412,7 @@ class BuiltinVariable(VariableTracker):
|
||||
left_ty_py = left_ty.as_python_constant()
|
||||
right_ty_py = right_ty.as_python_constant()
|
||||
except NotImplementedError:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="issubclass() with non-constant arguments",
|
||||
context=f"issubclass({left_ty}, {right_ty})",
|
||||
explanation="issubclass() with non-constant arguments not supported.",
|
||||
@ -2505,7 +2505,7 @@ class BuiltinVariable(VariableTracker):
|
||||
default: VariableTracker | None = None,
|
||||
) -> VariableTracker | None:
|
||||
if not name_var.is_python_constant():
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="getattr() with non-constant name argument",
|
||||
context=f"getattr({obj}, {name_var}, {default})",
|
||||
explanation="getattr() with non-constant name argument is not supported",
|
||||
@ -2533,7 +2533,7 @@ class BuiltinVariable(VariableTracker):
|
||||
and obj.is_state_mutated
|
||||
and tx.output.side_effects.has_pending_mutation(obj)
|
||||
):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="getattr() on nn.Module with pending mutation",
|
||||
context=f"getattr({obj}, {name}, {default})",
|
||||
explanation="Intentionally graph breaking on getattr() on a nn.Module "
|
||||
@ -2598,7 +2598,7 @@ class BuiltinVariable(VariableTracker):
|
||||
"assertWarns",
|
||||
)
|
||||
):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Failed to trace unittest method",
|
||||
context=f"function: unittest.TestCase.{name}",
|
||||
explanation=f"Dynamo does not know how to trace unittest method `{name}` ",
|
||||
@ -2614,7 +2614,7 @@ class BuiltinVariable(VariableTracker):
|
||||
and is_sparse_any(fake_val)
|
||||
and (not tx.export or not config.capture_sparse_compute)
|
||||
):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Attempted to wrap sparse Tensor",
|
||||
context="",
|
||||
explanation="torch.compile does not support sparse Tensors",
|
||||
@ -2691,7 +2691,7 @@ class BuiltinVariable(VariableTracker):
|
||||
# Some special handling for tensor attributes.
|
||||
if name == "requires_grad":
|
||||
# TODO(voz): Make it work properly
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="setattr() on Tensor.requires_grad",
|
||||
context=f"setattr({obj}, {name}, {val})",
|
||||
explanation="setattr() on Tensor.requires_grad not supported. "
|
||||
@ -2703,7 +2703,7 @@ class BuiltinVariable(VariableTracker):
|
||||
# See comments on `test_set_data_on_scoped_tensor` for plans
|
||||
# to support this.
|
||||
if obj.source is None:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Failed to mutate tensor data attribute",
|
||||
context=f"setattr({obj}, {name}, {val})",
|
||||
explanation="Dyanmo only supports mutating `.data`"
|
||||
@ -2714,7 +2714,7 @@ class BuiltinVariable(VariableTracker):
|
||||
],
|
||||
)
|
||||
elif obj.dtype != val.dtype: # type: ignore[attr-defined]
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Failed to mutate tensor data attribute to different dtype",
|
||||
context=f"setattr({obj}, {name}, {val})",
|
||||
explanation="Dyanmo only supports mutating `.data`"
|
||||
@ -2780,7 +2780,7 @@ class BuiltinVariable(VariableTracker):
|
||||
# Attribute like `torch.Tensor.real` has special setters we
|
||||
# don't yet support; it's not as simple adding an entry to
|
||||
# the side effect mapping.
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Failed to set tensor attribute",
|
||||
context=f"setattr({obj}, {name}, {val})",
|
||||
explanation="Dyanmo doesn't support setting these tensor attributes",
|
||||
@ -2940,7 +2940,7 @@ class BuiltinVariable(VariableTracker):
|
||||
elif istype(args[0], variables.FunctoolsPartialVariable):
|
||||
return variables.ConstantVariable.create(id(args[0].fake_value))
|
||||
else:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="id() with unsupported args",
|
||||
context=str(args),
|
||||
explanation=f"Dynamo doesn't know how to trace id() call with args {args}",
|
||||
@ -2954,7 +2954,7 @@ class BuiltinVariable(VariableTracker):
|
||||
def call_deepcopy(
|
||||
self, tx: "InstructionTranslator", x: VariableTracker
|
||||
) -> VariableTracker:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="copy.deepcopy()",
|
||||
context=f"copy.deepcopy({x})",
|
||||
explanation="Dynamo does not support copy.deepcopy()",
|
||||
@ -2985,7 +2985,7 @@ class BuiltinVariable(VariableTracker):
|
||||
return ConstantVariable.create(not is_result)
|
||||
|
||||
if op not in supported_tensor_comparison_op_values:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="unsupported Tensor comparison op",
|
||||
context=f"{op.__name__}({left}, {right})",
|
||||
explanation=f"Dynamo does not support the comparison op {op.__name__} "
|
||||
@ -3002,7 +3002,7 @@ class BuiltinVariable(VariableTracker):
|
||||
torch.broadcast_shapes(left.size, right.size)
|
||||
except RuntimeError:
|
||||
# not broadcastable, can't be compared
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="failed to broadcast when attempting Tensor comparison op",
|
||||
context=f"{op.__name__}({left}, {right})",
|
||||
explanation=f"Dynamo was unable to broad cast the arguments {left}, {right} "
|
||||
@ -3027,7 +3027,7 @@ class BuiltinVariable(VariableTracker):
|
||||
op = self.fn
|
||||
|
||||
if op not in supported_tensor_comparison_op_values:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="unsupported SymNode comparison op",
|
||||
context=f"{op.__name__}({left}, {right})",
|
||||
explanation=f"Dynamo does not support the comparison op {op.__name__} "
|
||||
|
||||
@ -14,7 +14,7 @@ import torch
|
||||
from torch._dynamo.source import AttrSource, GetItemSource
|
||||
|
||||
from .. import graph_break_hints, variables
|
||||
from ..exc import raise_observed_exception, unimplemented_v2
|
||||
from ..exc import raise_observed_exception, unimplemented
|
||||
from ..utils import (
|
||||
cmp_name_to_op_mapping,
|
||||
common_constant_types,
|
||||
@ -292,7 +292,7 @@ class EnumVariable(VariableTracker):
|
||||
for member in list(cls_type):
|
||||
if member.value == value_vt.as_python_constant():
|
||||
return cls(member, **options)
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Failed to construct Enum variable",
|
||||
context=f"value: {value_vt}, allowed enum values: {list(cls_type)}",
|
||||
explanation="Attempted to construct an Enum value that is non-constant (e.g. int, string) "
|
||||
|
||||
@ -34,7 +34,7 @@ from ..bytecode_transformation import (
|
||||
create_instruction,
|
||||
create_setup_with,
|
||||
)
|
||||
from ..exc import unimplemented_v2
|
||||
from ..exc import unimplemented
|
||||
from ..guards import GuardBuilder, install_guard
|
||||
from ..source import AttrSource, GlobalStateSource
|
||||
from ..utils import _get_error_on_graph_break, _set_error_on_graph_break
|
||||
@ -1089,7 +1089,7 @@ class ProfilerContextVariable(ContextWrappingVariable):
|
||||
return "nullcontext"
|
||||
|
||||
def reconstruct(self, cg: "PyCodegen") -> None:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="torch.profiler object escaped from compiled region",
|
||||
context=str(self),
|
||||
explanation="Dynamo doesn't support compiling a region that returns a torch.profiler context manager.",
|
||||
@ -1161,7 +1161,7 @@ class PreserveVersionContextVariable(ContextWrappingVariable):
|
||||
).call_function(tx, [self.tensors, self.prev_versions], {})
|
||||
|
||||
def reconstruct(self, codegen: "PyCodegen") -> None:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="torch.autograd._unsafe_preserve_version_counter escaped from compiled region",
|
||||
context=str(self),
|
||||
explanation=(
|
||||
@ -1376,7 +1376,7 @@ class FxTracebackAnnotateVariable(ContextWrappingVariable):
|
||||
return "annotate"
|
||||
|
||||
def reconstruct_type(self, codegen: "PyCodegen") -> None:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="torch.fx.traceback.annotate escaped from compiled region",
|
||||
context=str(self),
|
||||
explanation="Dynamo doesn't support graph break on torch.fx.traceback.annotate.",
|
||||
@ -1467,7 +1467,7 @@ class WithEnterFunctionVariable(VariableTracker):
|
||||
type_str = f"{self.ctx.module_name()}.{self.ctx.fn_name()}"
|
||||
except NotImplementedError:
|
||||
type_str = str(type(self.ctx))
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Attempted to reconstruct context manager's __enter__ method",
|
||||
context=str(self.ctx),
|
||||
explanation=f"Attempted to reconstruct context manager {type_str} while tracing `with ...:`",
|
||||
|
||||
@ -30,7 +30,7 @@ from torch._subclasses.fake_tensor import is_fake
|
||||
|
||||
from .. import graph_break_hints, polyfills, variables
|
||||
from ..bytecode_transformation import create_call_function, create_instruction
|
||||
from ..exc import raise_observed_exception, unimplemented_v2
|
||||
from ..exc import raise_observed_exception, unimplemented
|
||||
from ..guards import GuardBuilder, install_guard
|
||||
from ..source import is_constant_source, is_from_local_source
|
||||
from ..utils import (
|
||||
@ -377,7 +377,7 @@ class ConstDictVariable(VariableTracker):
|
||||
key = ConstDictVariable._HashableTracker(arg)
|
||||
if key not in self.items:
|
||||
msg = f"Dictionary key {arg.value} not found during tracing" # type: ignore[attr-defined]
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="key not found in dict",
|
||||
context=f"Key {arg.value}", # type: ignore[attr-defined]
|
||||
explanation=msg,
|
||||
@ -819,7 +819,7 @@ class ConstDictVariable(VariableTracker):
|
||||
return ConstantVariable.create(False)
|
||||
|
||||
msg = f"hasattr on {self.user_cls} is not supported"
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="unsupported hasattr operation",
|
||||
context=f"Class {self.user_cls}",
|
||||
explanation=msg,
|
||||
@ -854,7 +854,7 @@ class MappingProxyVariable(VariableTracker):
|
||||
f"Preexisting MappingProxyVariable (source: {self.source}) cannot be reconstructed "
|
||||
"because the connection to the original dict will be lost."
|
||||
)
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="mapping proxy cannot be reconstructed",
|
||||
context=f"Source: {self.source}",
|
||||
explanation=msg,
|
||||
@ -892,7 +892,7 @@ class MappingProxyVariable(VariableTracker):
|
||||
"are trying to access a proxy object."
|
||||
)
|
||||
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="mapping proxy affected by dictionary mutation",
|
||||
context=f"Source: {self.source}, Dict mutation detected",
|
||||
explanation=msg,
|
||||
|
||||
@ -29,7 +29,7 @@ from torch.fx.experimental._backward_state import BackwardState
|
||||
from .. import compiled_autograd, variables
|
||||
from .._trace_wrapped_higher_order_op import trace_wrapped
|
||||
from ..bytecode_transformation import create_call_function
|
||||
from ..exc import unimplemented_v2
|
||||
from ..exc import unimplemented
|
||||
from ..external_utils import call_module_hooks_from_backward_state
|
||||
from ..guards import GuardBuilder, install_guard
|
||||
from ..source import AttrSource
|
||||
@ -57,7 +57,7 @@ class DistributedVariable(VariableTracker):
|
||||
def __init__(self, value: Any, **kwargs: Any) -> None:
|
||||
super().__init__(**kwargs)
|
||||
if not DistributedVariable.is_available():
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="torch.distributed package is not available!",
|
||||
context="",
|
||||
explanation="The PyTorch package doesn't include torch.distributed when building from source.",
|
||||
@ -212,7 +212,7 @@ class PlacementVariable(DistributedVariable):
|
||||
try:
|
||||
value_type = type(self.value)
|
||||
if inspect.getattr_static(value_type, "__getattr__", None) is not None:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Placement with custom __getattr__ not supported",
|
||||
context=f"{value_type.__name__} with custom __getattr__",
|
||||
explanation="Dynamo does not support Placement types with custom __getattr__ methods",
|
||||
@ -394,7 +394,7 @@ class BackwardHookVariable(VariableTracker):
|
||||
user_pre_hooks: VariableTracker,
|
||||
) -> "BackwardHookVariable":
|
||||
if not compiled_autograd.compiled_autograd_enabled:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Module-level backwards hooks require compiled autograd.",
|
||||
context="",
|
||||
explanation="",
|
||||
|
||||
@ -51,7 +51,7 @@ from ..exc import (
|
||||
raise_observed_exception,
|
||||
SkipFrame,
|
||||
StepUnsupported,
|
||||
unimplemented_v2,
|
||||
unimplemented,
|
||||
Unsupported,
|
||||
)
|
||||
from ..guards import GuardBuilder, install_guard
|
||||
@ -422,7 +422,7 @@ class UserFunctionVariable(BaseUserFunctionVariable):
|
||||
# TODO putting this here to avoid duplication, because we could hit this
|
||||
# from several paths (e.g., SuperVariable or `var_getattr`s).
|
||||
if not isinstance(fn, (types.FunctionType, torch.jit.ScriptFunction)):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="can't handle functions not implemented in python ",
|
||||
context=f"{fn}",
|
||||
explanation="Dynamo can only handle functions defined in python",
|
||||
@ -583,7 +583,7 @@ class UserFunctionVariable(BaseUserFunctionVariable):
|
||||
if not isinstance(fn_var, BaseUserFunctionVariable):
|
||||
typ = fn_var.python_type()
|
||||
msg = f"`nonstrict_trace` expects a callable, but got value of type <{typ.__name__}>"
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="TypeError from user code",
|
||||
context=f"call_function({self.value}, {args}, {kwargs})", # type: ignore[attr-defined]
|
||||
explanation=msg,
|
||||
@ -595,7 +595,7 @@ class UserFunctionVariable(BaseUserFunctionVariable):
|
||||
if not isinstance(fn_var, UserFunctionVariable):
|
||||
fn_name = fn_var.get_name()
|
||||
msg = f"Applying `nonstrict_trace` to function <{fn_name}>; however, `nonstrict_trace` currently requires the function to be defined outside `torch.compile` region." # noqa: B950
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Limitation of `nonstrict_trace",
|
||||
context=f"{self}",
|
||||
explanation=msg,
|
||||
@ -1066,7 +1066,7 @@ class LocalGeneratorFunctionVariable(BaseUserFunctionVariable):
|
||||
kwargs: dict[str, VariableTracker],
|
||||
) -> VariableTracker:
|
||||
if not is_generator(self.vt.get_code()): # type: ignore[attr-defined]
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="non-generator contextlib.contextmanager",
|
||||
context=str(self.vt.get_code()), # type: ignore[attr-defined]
|
||||
explanation="Cannot compile function decorated with `@contextlib.contextmanager` that is not a generator"
|
||||
@ -1617,7 +1617,7 @@ class SkipFunctionVariable(VariableTracker):
|
||||
) -> VariableTracker:
|
||||
if inspect.getattr_static(self.value, "_torchdynamo_disable", False):
|
||||
msg = inspect.getattr_static(self.value, "_torchdynamo_disable_msg", None)
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Skip calling `torch.compiler.disable()`d function",
|
||||
context=str(self.value),
|
||||
explanation=f"Skip calling function `{self.value}` since it was wrapped "
|
||||
@ -1630,7 +1630,7 @@ class SkipFunctionVariable(VariableTracker):
|
||||
graph_break_msg = kwargs.get("msg")
|
||||
if graph_break_msg:
|
||||
graph_break_msg = graph_break_msg.as_python_constant()
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Call to `torch._dynamo.graph_break()`",
|
||||
context=f"Called `torch._dynamo.graph_break()` with args `{args}`, kwargs `{kwargs}`",
|
||||
explanation=f"User-inserted graph break. Message: {graph_break_msg}",
|
||||
@ -1724,7 +1724,7 @@ class SkipFunctionVariable(VariableTracker):
|
||||
)
|
||||
hints = []
|
||||
reason = self.reason if self.reason else "<missing reason>"
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Attempted to call function marked as skipped",
|
||||
context=f"module: {module_name}, qualname: {qualname}, skip reason: {reason}",
|
||||
explanation=explanation,
|
||||
@ -1950,7 +1950,7 @@ class CollectiveFunctionRewriteVariable(UserFunctionVariable):
|
||||
args = ()
|
||||
|
||||
if "async_op" in kwargs and kwargs["async_op"].as_python_constant():
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="async_op=True for distributed collectives",
|
||||
context=f"{self.fn}, {args=}, {kwargs=}",
|
||||
explanation=f"`torch.compile` doesn't support `async_op=True for {self.fn}",
|
||||
@ -1990,7 +1990,7 @@ class FunctoolsWrapsVariable(UserFunctionVariable):
|
||||
def wraps(fn: Any) -> VariableTracker:
|
||||
if isinstance(fn, variables.NestedUserFunctionVariable):
|
||||
return fn.clone(wrapped_fn=args[0])
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="functools.wraps",
|
||||
context=f"{fn}",
|
||||
explanation="`torch.compile` can't trace `functools.wraps` on functions defined outside the compile region",
|
||||
@ -2032,7 +2032,7 @@ class CollectionsNamedTupleFunction(UserFunctionVariable):
|
||||
value,
|
||||
mutation_type=ValueMutationNew(),
|
||||
)
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="namedtuple construction",
|
||||
context=f"{args=}, {kwargs=}",
|
||||
explanation="`torch.compile` only support certain input types for namedtuple",
|
||||
@ -2338,7 +2338,7 @@ class DynamoTritonHOPifier(TritonHOPifier):
|
||||
if isinstance(grid, BaseListVariable):
|
||||
return grid.as_proxy()
|
||||
else:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="unsupported grid type for triton hop check_grid",
|
||||
context=f"grid type = {type(grid)}",
|
||||
explanation="`torch.compile` only supports list-like grid for check_grid",
|
||||
|
||||
@ -50,7 +50,7 @@ from .. import graph_break_hints, variables
|
||||
from ..exc import (
|
||||
ObservedException,
|
||||
UncapturedHigherOrderOpError,
|
||||
unimplemented_v2,
|
||||
unimplemented,
|
||||
Unsupported,
|
||||
)
|
||||
from ..source import AttrSource, DictGetItemSource
|
||||
@ -161,7 +161,7 @@ def check_meta_consistency_vt(
|
||||
elif isinstance(var, ConstantVariable):
|
||||
return var.as_python_constant()
|
||||
else:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="cannot unwrap variable for check_meta_consistency",
|
||||
context=str(var),
|
||||
explanation=f"Expected {var} to be TensorVariable, SymNodeVariable, or ConstantVariable",
|
||||
@ -313,7 +313,7 @@ def _check_all_tensorvariable(args):
|
||||
from . import TensorVariable
|
||||
|
||||
if not all(type(a.realize()) is TensorVariable for a in args):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="HOP: non torch.Tensor leaf",
|
||||
context=f"args types: {[type(a.realize()) for a in args]}",
|
||||
explanation="Expected all leaves to be of torch.Tensor type.",
|
||||
@ -328,7 +328,7 @@ def _check_supported_callable_arg(
|
||||
BuiltinVariable(callable).call_function(tx, [func_var], {}).as_python_constant()
|
||||
)
|
||||
if not is_callable:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="HOP: non-callable variable",
|
||||
context=f"arg name: {arg_name}, func_var type: {str(func_var)}",
|
||||
explanation=f"{arg_name} should be a callable but is of type {str(func_var)}.",
|
||||
@ -359,7 +359,7 @@ def _call_while_loop(
|
||||
args.append(v)
|
||||
|
||||
if kwargs or len(args) != 4:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="torch.while_loop: improper args/kwargs",
|
||||
context=f"args: {args}, kwargs: {kwargs}",
|
||||
explanation=f"torch.while_loop expects 4 positional arguments (got {len(args)}) "
|
||||
@ -379,7 +379,7 @@ def _call_while_loop(
|
||||
|
||||
# additional_inputs input check
|
||||
if not isinstance(additional_inputs, (ListVariable, TupleVariable)):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="torch.while_loop: improper additional_inputs",
|
||||
context=str(additional_inputs),
|
||||
explanation=f"Expected additional_inputs to be a list/tuple but got {additional_inputs.python_type()}",
|
||||
@ -484,7 +484,7 @@ def _call_while_loop(
|
||||
cond_r.proxy.node.meta["example_value"], include_contiguity=False
|
||||
)
|
||||
if cond_r_meta.dtype != torch.bool or cond_r_meta.shape != torch.Size([]):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="torch.while_loop: unsupported cond_fn return type",
|
||||
context=str(cond_r),
|
||||
explanation=f"Expected cond_fn to return a scalar tensor or a bool but got {cond_r_meta.shape}.",
|
||||
@ -496,7 +496,7 @@ def _call_while_loop(
|
||||
# short-circuiting while_loop when cond_fn returns a constant such as 0, 1 True or False
|
||||
pred = cond_r.as_python_constant()
|
||||
if pred:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="torch.while_loop: infinite loop detected",
|
||||
context=str(cond_r),
|
||||
explanation=f"Infinite loop detected because while_loop's cond_fn always returns the same value {pred}.",
|
||||
@ -811,7 +811,7 @@ def validate_args_and_maybe_create_graph_inputs(
|
||||
# If `a` cannot be put into a graph
|
||||
else:
|
||||
# HOPs work much better if they use speculate_subgraph(set_subgraph_inputs="automatic").
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="HOP body taking non-Tensor as input",
|
||||
context=str(sub_args),
|
||||
explanation=f"{description} with body that accepts non-Tensors as input. "
|
||||
@ -974,7 +974,7 @@ def speculate_subgraph(
|
||||
|
||||
# See NOTE [Temporary argument `set_subgraph_inputs`]
|
||||
if sub_kwargs and set_subgraph_inputs != "automatic":
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="invalid set_subgraph_inputs and sub_kwargs settings",
|
||||
context=f"set_subgraph_inputs: {set_subgraph_inputs}, sub_kwargs: {sub_kwargs}",
|
||||
explanation="`sub_kwargs` cannot be used when `set_subgraph_inputs` is not set to 'automatic'.",
|
||||
@ -1190,7 +1190,7 @@ def speculate_subgraph(
|
||||
mutation_info = subtracer.has_input_mutation()
|
||||
if mutation_info.has_mutation:
|
||||
context = f"{mutation_info.msg} in\n {graph}"
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Encountered input mutation during higher order op tracing",
|
||||
context=context,
|
||||
explanation=f"Higher order ops do not support input mutation. Found in {source_target.name()}",
|
||||
@ -1204,7 +1204,7 @@ def speculate_subgraph(
|
||||
aliasing_info = subtracer.has_aliasing()
|
||||
if aliasing_info.has_aliasing:
|
||||
context = f"{aliasing_info.msg} in\n {graph}"
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Encountered aliasing during higher order op tracing",
|
||||
context=context,
|
||||
explanation=f"Higher order ops do not support aliasing. Found in {source_target.name()}",
|
||||
@ -1269,7 +1269,7 @@ class TorchHigherOrderOperatorVariable(VariableTracker):
|
||||
|
||||
if isinstance(value, BaseHOP):
|
||||
return BaseHOPVariable(value, source, **kwargs)
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="unsupported HigherOrderOperator",
|
||||
context=str(value),
|
||||
explanation=f"Unable to create higher order operator variable for {value.__name__}.",
|
||||
@ -1297,7 +1297,7 @@ class TorchHigherOrderOperatorVariable(VariableTracker):
|
||||
args: Sequence[VariableTracker],
|
||||
kwargs: dict[str, VariableTracker],
|
||||
) -> VariableTracker:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="unsupported HigherOrderOperator function call",
|
||||
context=str(self.value),
|
||||
explanation=f"Unable to trace calling higher order operator variable for {self.value.__name__}.",
|
||||
@ -1357,7 +1357,7 @@ class CondHigherOrderVariable(TorchHigherOrderOperatorVariable):
|
||||
# TODO(voz): Support fake tensor dispatch for recursive
|
||||
# ops - see torch/dispatch/_dispatcher.py
|
||||
if len(args) != 4 or kwargs:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="torch.cond: improper args/kwargs",
|
||||
context=f"args: {args}, kwargs: {kwargs}",
|
||||
explanation=f"torch.cond expects 4 positional arguments (got {len(args)}) "
|
||||
@ -1383,7 +1383,7 @@ class CondHigherOrderVariable(TorchHigherOrderOperatorVariable):
|
||||
|
||||
# predicate
|
||||
if type(pred) not in (ConstantVariable, TensorVariable, SymNodeVariable):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="torch.cond: improper predicate",
|
||||
context=str(pred),
|
||||
explanation="Expected `pred` to be a bool or a boolean tensor with a single item "
|
||||
@ -1395,7 +1395,7 @@ class CondHigherOrderVariable(TorchHigherOrderOperatorVariable):
|
||||
|
||||
# operands
|
||||
if not isinstance(operands, (ListVariable, TupleVariable)):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="torch.cond: improper operands",
|
||||
context=str(operands),
|
||||
explanation="Expected `operands` to be a list/tuple "
|
||||
@ -1409,7 +1409,7 @@ class CondHigherOrderVariable(TorchHigherOrderOperatorVariable):
|
||||
if not only_consist_of(
|
||||
operands, (TensorVariable, ConstantVariable, SymNodeVariable)
|
||||
):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="torch.cond: improper operands contents",
|
||||
context=str(operands),
|
||||
explanation="Expected `operands` to be a list/tuple of pytrees that only consists of tensor leaves.",
|
||||
@ -1463,7 +1463,7 @@ class CondHigherOrderVariable(TorchHigherOrderOperatorVariable):
|
||||
tx.fake_mode.epoch += 1
|
||||
|
||||
if not only_consist_of(ret_val, (TensorVariable, ConstantVariable)):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="torch.cond: unsupported branch return type",
|
||||
context=str(ret_val),
|
||||
explanation="Expected branches to return a possibly nested pytree of tensors or constant ints.",
|
||||
@ -1473,7 +1473,7 @@ class CondHigherOrderVariable(TorchHigherOrderOperatorVariable):
|
||||
)
|
||||
for ret in ret_val.unpack_var_sequence(tx):
|
||||
if isinstance(ret, ConstantVariable) and ret.python_type() is not int:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="torch.cond: unsupported branch return type (constant non-int)",
|
||||
context=str(ret_val),
|
||||
explanation="Constants returned from branches must be ints.",
|
||||
@ -1499,7 +1499,7 @@ class CondHigherOrderVariable(TorchHigherOrderOperatorVariable):
|
||||
).as_python_constant()
|
||||
# 3.14: NotImplemented cannot be converted to bool
|
||||
if same_spec is not NotImplemented and not same_spec:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="torch.cond: differing branch outputs",
|
||||
context=f"true_spec: {true_spec.treespec}, false_spec: {false_spec.treespec}, same_spec: {same_spec}",
|
||||
explanation="Expected branches to return the same pytree structure.",
|
||||
@ -1602,7 +1602,7 @@ def validate_subgraph_output_types(output: VariableTracker):
|
||||
isinstance(out, ConstantVariable) and out.python_type() in (int, bool)
|
||||
):
|
||||
continue
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="HOP body output unsupported",
|
||||
context=f"non-tensor outputs: {non_tensor_output}",
|
||||
explanation="HigherOrderOperator body's output must consist of tensors or ints/bools only "
|
||||
@ -1671,7 +1671,7 @@ class AssociativeScanHigherOrderVariable(TorchHigherOrderOperatorVariable):
|
||||
# This is the standard case when the user calls the frontend
|
||||
# and the frontend invokes dynamo
|
||||
if len(args) != 2:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="torch.associative_scan: improper args",
|
||||
context=f"args: {args}",
|
||||
explanation=f"torch.associative_scan expects 2 positional arguments (got {len(args)}) "
|
||||
@ -1697,7 +1697,7 @@ class AssociativeScanHigherOrderVariable(TorchHigherOrderOperatorVariable):
|
||||
|
||||
# xs input check
|
||||
if not isinstance(xs, (ListVariable, TupleVariable)):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="torch.associative_scan: improper xs",
|
||||
context=str(xs),
|
||||
explanation=f"Expected xs to be a list/tuple but got {xs.python_type()}",
|
||||
@ -1710,7 +1710,7 @@ class AssociativeScanHigherOrderVariable(TorchHigherOrderOperatorVariable):
|
||||
|
||||
# additional_inputs input check
|
||||
if not isinstance(additional_inputs, (ListVariable, TupleVariable)):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="torch.associative_scan: improper additional_inputs",
|
||||
context=str(additional_inputs),
|
||||
explanation=f"Expected additional_inputs to be a list/tuple but got {additional_inputs.python_type()}",
|
||||
@ -1723,7 +1723,7 @@ class AssociativeScanHigherOrderVariable(TorchHigherOrderOperatorVariable):
|
||||
|
||||
scan_length = get_fake_value(xs_vars[0].as_proxy().node, tx).size()[0]
|
||||
if scan_length == 0:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="torch.associative_scan: zero-sized tensor",
|
||||
context=str(xs_vars[0]),
|
||||
explanation="associative_scan() operator doesn't support zero-sized tensors during tracing.",
|
||||
@ -1776,7 +1776,7 @@ class AssociativeScanHigherOrderVariable(TorchHigherOrderOperatorVariable):
|
||||
|
||||
# Check whether the combine_fn returns one child tree for the output.
|
||||
if _combine_treespec.as_python_constant().num_leaves < 1:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="torch.associative_scan: combine_fn improper number of leaves",
|
||||
context=str(_combine_treespec.as_python_constant()),
|
||||
explanation="combine_fn needs to produce one pytree for the output "
|
||||
@ -1795,7 +1795,7 @@ class AssociativeScanHigherOrderVariable(TorchHigherOrderOperatorVariable):
|
||||
) or not _make_inlined(tx, pytree.TreeSpec.__eq__)(
|
||||
xs_treespec, _combine_treespec
|
||||
).as_python_constant():
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="torch.associative_scan: mismatched input/output tree structure",
|
||||
context=f"xs: {xs_treespec.as_python_constant()}, output: {_combine_treespec.as_python_constant()}",
|
||||
explanation="The tree structure of the xs and the outs of the combine_fn are are expected to be identical, but got "
|
||||
@ -1907,7 +1907,7 @@ class ScanHigherOrderVariable(TorchHigherOrderOperatorVariable):
|
||||
variables.FunctoolsPartialVariable,
|
||||
),
|
||||
):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="torch.scan: improper combine_fn",
|
||||
context=str(combine_fn_var),
|
||||
explanation="Expected combine_fn to be wrapped as functools.partial in scan user-facing api "
|
||||
@ -1948,7 +1948,7 @@ class ScanHigherOrderVariable(TorchHigherOrderOperatorVariable):
|
||||
)
|
||||
# xs input check
|
||||
if not isinstance(xs, (ListVariable, TupleVariable)):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="torch.scan: improper xs",
|
||||
context=str(xs),
|
||||
explanation=f"Expected xs to be a list/tuple but got {xs.python_type()}",
|
||||
@ -1958,7 +1958,7 @@ class ScanHigherOrderVariable(TorchHigherOrderOperatorVariable):
|
||||
)
|
||||
# init input check
|
||||
if not isinstance(init, (ListVariable, TupleVariable)):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="torch.scan: improper init",
|
||||
context=str(init),
|
||||
explanation=f"Expected init to be a list/tuple with at least one element but got {init.python_type()}",
|
||||
@ -1968,7 +1968,7 @@ class ScanHigherOrderVariable(TorchHigherOrderOperatorVariable):
|
||||
)
|
||||
|
||||
if len(init_vars) == 0:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="torch.scan: no init leaves",
|
||||
context="",
|
||||
explanation="Expected init leaves.",
|
||||
@ -1979,7 +1979,7 @@ class ScanHigherOrderVariable(TorchHigherOrderOperatorVariable):
|
||||
|
||||
# additional_inputs input check
|
||||
if not isinstance(additional_inputs, (ListVariable, TupleVariable)):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="torch.scan: improper additional_inputs",
|
||||
context=str(additional_inputs),
|
||||
explanation=f"Expected additional_inputs to be a list/tuple but got {additional_inputs.python_type()}",
|
||||
@ -1990,7 +1990,7 @@ class ScanHigherOrderVariable(TorchHigherOrderOperatorVariable):
|
||||
# scan_length check
|
||||
scan_length = get_fake_value(xs_vars[0].as_proxy().node, tx).size()[0]
|
||||
if scan_length == 0:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="torch.scan: zero-sized tensor",
|
||||
context=str(xs_vars[0]),
|
||||
explanation="associative_scan() operator doesn't support zero-sized tensors during tracing.",
|
||||
@ -2047,7 +2047,7 @@ class ScanHigherOrderVariable(TorchHigherOrderOperatorVariable):
|
||||
)
|
||||
else:
|
||||
if len(combine_result_vars) != 2:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="torch.scan: improper combine_fn number of returns",
|
||||
context=str(combine_result_vars),
|
||||
explanation=f"Expect combine_fn to return a tuple (next_carry, y) but got {combine_result_vars}.",
|
||||
@ -2143,7 +2143,7 @@ class MapHigherOrderVariable(TorchHigherOrderOperatorVariable):
|
||||
args, kwargs = LazyVariableTracker.realize_all((args, kwargs))
|
||||
|
||||
if len(kwargs) > 0:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="torch.map: kwargs not supported",
|
||||
context=f"args: {args}, kwargs: {kwargs}",
|
||||
explanation=f"torch.map expects no keyword arguments (got {len(kwargs)})",
|
||||
@ -2163,7 +2163,7 @@ class MapHigherOrderVariable(TorchHigherOrderOperatorVariable):
|
||||
sample_shape = get_fake_value(unpacked_xs[0].as_proxy().node, tx).size()
|
||||
|
||||
if len(sample_shape) < 1 or sample_shape[0] == 0:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="torch.map: improper inputs",
|
||||
context=str(sample_shape),
|
||||
explanation="torch.map doesn't support scalar or non-zero sized tensors during tracing.",
|
||||
@ -2257,7 +2257,7 @@ class ExecutorchCallDelegateHigherOrderVariable(TorchHigherOrderOperatorVariable
|
||||
# executorch_call_delegate sits at a higher level than dynamo, but
|
||||
# there's no real solution to this issue yet.
|
||||
if len(kwargs) > 0:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="executorch_call_delegate: kwargs not supported",
|
||||
context=f"args: {args}, kwargs: {kwargs}",
|
||||
explanation=f"executorch_call_delegate expects no keyword arguments (got {len(kwargs)})",
|
||||
@ -2317,7 +2317,7 @@ class FunctionalCallVariable(FunctorchHigherOrderVariable):
|
||||
self, tx, args: list[VariableTracker], kwargs: dict[str, VariableTracker]
|
||||
) -> VariableTracker:
|
||||
if not torch._dynamo.config.inline_inbuilt_nn_modules:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="torch.func.functional_call capture is disabled",
|
||||
context="",
|
||||
explanation="torch.func.functional_call capture is disabled",
|
||||
@ -2427,7 +2427,7 @@ class WrapHigherOrderVariable(TorchHigherOrderOperatorVariable):
|
||||
) = self.create_wrapped_node(tx, args[0], args[1:], kwargs, "wrap")
|
||||
|
||||
if len(p_kwargs) > 0:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="WrapHigherOrderVariable: kwargs unexpected",
|
||||
context=f"args: {args}, kwargs: {kwargs}",
|
||||
explanation="kwargs should have been flattened into lifted args.",
|
||||
@ -2468,7 +2468,7 @@ class WrapWithSetGradEnabledHigherOrderVariable(TorchHigherOrderOperatorVariable
|
||||
args, kwargs = LazyVariableTracker.realize_all((args, kwargs))
|
||||
|
||||
if kwargs:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="wrap_with_set_grad_enabled: unexpected kwargs",
|
||||
context=f"args: {args}, kwargs: {kwargs}",
|
||||
explanation=f"wrap_with_set_grad_enabled expects no keyword arguments (got {len(kwargs)}).",
|
||||
@ -2480,7 +2480,7 @@ class WrapWithSetGradEnabledHigherOrderVariable(TorchHigherOrderOperatorVariable
|
||||
grad_enabled, fn_var, *rest_args = args
|
||||
|
||||
if not isinstance(grad_enabled, ConstantVariable):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="wrap_with_set_grad_enabled: non-constant grad_enabled",
|
||||
context=str(grad_enabled),
|
||||
explanation="wrap_with_set_grad_enabled expects grad_enabled argument to be a constant.",
|
||||
@ -2508,7 +2508,7 @@ class WrapWithSetGradEnabledHigherOrderVariable(TorchHigherOrderOperatorVariable
|
||||
)
|
||||
|
||||
if len(body_lifted_freevars) > 0:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="wrap_with_set_grad_enabled: unexpected freevars",
|
||||
context=str(body_lifted_freevars),
|
||||
explanation="wrap_with_set_grad_enabled expects no freevars.",
|
||||
@ -2555,7 +2555,7 @@ class WrapWithAutocastHigherOrderVariable(TorchHigherOrderOperatorVariable):
|
||||
args, kwargs = LazyVariableTracker.realize_all((args, kwargs))
|
||||
|
||||
if kwargs:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="wrap_with_autocast: unexpected kwargs",
|
||||
context=f"args: {args}, kwargs: {kwargs}",
|
||||
explanation=f"wrap_with_autocast expects no keyword arguments (got {len(kwargs)}).",
|
||||
@ -2568,7 +2568,7 @@ class WrapWithAutocastHigherOrderVariable(TorchHigherOrderOperatorVariable):
|
||||
|
||||
for arg in [device_type, dtype, enabled, cache_enabled]:
|
||||
if not isinstance(arg, ConstantVariable):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="wrap_with_autocast: expected constant arg",
|
||||
context=str(args),
|
||||
explanation="wrap_with_autocast expects device_type, dtype, enabled, "
|
||||
@ -2602,7 +2602,7 @@ class WrapWithAutocastHigherOrderVariable(TorchHigherOrderOperatorVariable):
|
||||
)
|
||||
|
||||
if len(body_lifted_freevars) > 0:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="wrap_with_autocast: unexpected freevars",
|
||||
context=str(body_lifted_freevars),
|
||||
explanation="wrap_with_autocast expects no freevars.",
|
||||
@ -2652,7 +2652,7 @@ class HintsWrapperHigherOrderVariable(TorchHigherOrderOperatorVariable):
|
||||
or len(kwargs) != 1
|
||||
or "hints" not in kwargs
|
||||
):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="hints_wrapper: improper args/kwargs",
|
||||
context=f"args: {args}, kwargs: {kwargs}",
|
||||
explanation=f"hints_wrapper expects 3 positional arguments (got {len(args)}) "
|
||||
@ -2718,7 +2718,7 @@ class OutDtypeHigherOrderVariable(TorchHigherOrderOperatorVariable):
|
||||
from .builder import wrap_fx_proxy
|
||||
|
||||
if len(kwargs) > 0:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="out_dtype: unexpected kwargs",
|
||||
context=f"args: {args}, kwargs: {kwargs}",
|
||||
explanation=f"out_dtype expects no keyword arguments (got {len(kwargs)}).",
|
||||
@ -2764,7 +2764,7 @@ class StrictModeHigherOrderVariable(TorchHigherOrderOperatorVariable):
|
||||
# TODO (tmanlaibaatar) support pytree here
|
||||
for arg in unpacked_sequence:
|
||||
if isinstance(arg, (ListVariable, TupleVariable, ConstDictVariable)):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="strict_mode: improper args",
|
||||
context=f"args: {args}, kwargs: {kwargs}",
|
||||
explanation="strict_mode higher order op expects flat inputs (list/tuple/dict)",
|
||||
@ -2774,7 +2774,7 @@ class StrictModeHigherOrderVariable(TorchHigherOrderOperatorVariable):
|
||||
)
|
||||
|
||||
if kwargs:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="strict_mode: unexpected kwargs",
|
||||
context=f"args: {args}, kwargs: {kwargs}",
|
||||
explanation=f"strict_mode higher order op expects no keyword arguments (got {len(kwargs)}).",
|
||||
@ -3301,7 +3301,7 @@ class AutogradFunctionApplyVariable(VariableTracker):
|
||||
)
|
||||
fwd_args = [fwd_fn.obj, ctx, *args]
|
||||
else:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="autograd.Function.apply: non-function or method forward",
|
||||
context=str(self.fwd_graph),
|
||||
explanation="Expected forward function to be a function or method.",
|
||||
@ -3326,7 +3326,7 @@ class AutogradFunctionApplyVariable(VariableTracker):
|
||||
"_materialize_non_diff_grads"
|
||||
in tx.output.side_effects.store_attr_mutations[ctx]
|
||||
):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="autograd.Function.apply: _materialize_non_diff_grads mutation",
|
||||
context="",
|
||||
explanation="Mutations to autograd.Function.ctx._materialize_non_diff_grads are not supported.",
|
||||
@ -3361,7 +3361,7 @@ class AutogradFunctionApplyVariable(VariableTracker):
|
||||
)
|
||||
bwd_args = [bwd_fn.obj, *bwd_args]
|
||||
else:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="autograd.Function.apply: non-function or method backward",
|
||||
context=str(self.bwd_graph),
|
||||
explanation="Expected backward function to be a function or method.",
|
||||
@ -3417,7 +3417,7 @@ class AutogradFunctionApplyVariable(VariableTracker):
|
||||
UserDefinedClassVariable(self.bwd_graph.__class__),
|
||||
)
|
||||
else:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="autograd.Function.apply: non-function or method backward (2)",
|
||||
context=str(self.bwd_graph),
|
||||
explanation="Expected backward function to be a function or method.",
|
||||
@ -3708,7 +3708,7 @@ class InvokeSubgraphHigherOrderVariable(WrapHigherOrderVariable):
|
||||
# using the saved attr name.
|
||||
|
||||
if not isinstance(fn_vt, (UnspecializedNNModuleVariable, UserFunctionVariable)):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Encountered non user function variable during invoke_subgraph HOP tracing",
|
||||
context=str(fn_vt),
|
||||
explanation="invoke_subgraph does not support non user function variable",
|
||||
@ -3780,7 +3780,7 @@ class InvokeSubgraphHigherOrderVariable(WrapHigherOrderVariable):
|
||||
) = self.create_wrapped_node(tx, args[0], args[1:], kwargs, "invoke_subgraph")
|
||||
|
||||
if len(p_kwargs) > 0:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="invoke_subgraph: kwargs unexpected",
|
||||
context=f"args: {args}, kwargs: {kwargs}",
|
||||
explanation="kwargs should have been flattened into lifted args.",
|
||||
|
||||
@ -28,7 +28,7 @@ from ..exc import (
|
||||
handle_observed_exception,
|
||||
ObservedUserStopIteration,
|
||||
raise_observed_exception,
|
||||
unimplemented_v2,
|
||||
unimplemented,
|
||||
UserError,
|
||||
)
|
||||
from .base import ValueMutationNew, VariableTracker
|
||||
@ -64,7 +64,7 @@ class ItertoolsVariable(VariableTracker):
|
||||
|
||||
if self.value is itertools.product:
|
||||
if any(kw != "repeat" for kw in kwargs):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unsupported kwargs for itertools.product",
|
||||
context=f"call_function {self} {args} {kwargs}",
|
||||
explanation=f"Expected kwargs: 'repeat', but got "
|
||||
@ -104,7 +104,7 @@ class ItertoolsVariable(VariableTracker):
|
||||
)
|
||||
elif self.value is itertools.groupby:
|
||||
if any(kw != "key" for kw in kwargs):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unsupported kwargs for itertools.groupby",
|
||||
context=f"call_function {self} {args} {kwargs}",
|
||||
explanation=f"Expected kwargs: 'key', but got "
|
||||
@ -118,7 +118,7 @@ class ItertoolsVariable(VariableTracker):
|
||||
elif isinstance(key, variables.ConstantVariable):
|
||||
return key.as_python_constant()
|
||||
else:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unsupported key type for itertools.groupby",
|
||||
context=f"call_function {self} {args} {kwargs}",
|
||||
explanation="Dynamo does not know how to trace "
|
||||
@ -130,7 +130,7 @@ class ItertoolsVariable(VariableTracker):
|
||||
if len(args) == 1 and args[0].has_unpack_var_sequence(tx):
|
||||
seq = args[0].unpack_var_sequence(tx)
|
||||
else:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unsupported arguments for itertools.groupby",
|
||||
context=f"call_function {self} {args} {kwargs}",
|
||||
explanation="Dynamo does not know how to trace "
|
||||
@ -175,7 +175,7 @@ class ItertoolsVariable(VariableTracker):
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unexpected failure during itertools.groupby() iteration",
|
||||
context=f"call_function {self} {args} {kwargs}",
|
||||
explanation="Unexpected failure in invoking function during groupby",
|
||||
@ -227,7 +227,7 @@ class IteratorVariable(VariableTracker):
|
||||
super().__init__(**kwargs)
|
||||
|
||||
def next_variable(self, tx: "InstructionTranslator") -> VariableTracker:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unimplemented next() call",
|
||||
context=f"next({self})",
|
||||
explanation="This abstract method must be implemented",
|
||||
|
||||
@ -30,7 +30,7 @@ from ..bytecode_transformation import (
|
||||
create_instruction,
|
||||
create_rot_n,
|
||||
)
|
||||
from ..exc import raise_observed_exception, unimplemented_v2
|
||||
from ..exc import raise_observed_exception, unimplemented
|
||||
from ..source import AttrSource, NamedTupleFieldsSource
|
||||
from ..utils import (
|
||||
cmp_name_to_op_mapping,
|
||||
@ -162,7 +162,7 @@ class BaseListVariable(VariableTracker):
|
||||
if value.constant is not None and value.constant.numel() == 1:
|
||||
value = variables.ConstantVariable.create(value.constant.item())
|
||||
else:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Indexing list with non-scalar tensor",
|
||||
context=f"call_method {self} {name} {args} {kwargs}",
|
||||
explanation=(
|
||||
@ -878,7 +878,7 @@ class ListVariable(CommonListMethodsVariable):
|
||||
except NotImplementedError:
|
||||
python_type = "unknown"
|
||||
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="sort with non-constant keys",
|
||||
context=str(first_non_constant_key),
|
||||
explanation=(
|
||||
@ -1607,7 +1607,7 @@ class SliceVariable(VariableTracker):
|
||||
return variables.GetAttrVariable(self, name)
|
||||
fields = ["start", "stop", "step"]
|
||||
if name not in fields:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unsupported attribute for slice() object",
|
||||
context=f"var_getattr {self} {name}",
|
||||
explanation=f"Expected attribute to be one of {','.join(fields)} "
|
||||
|
||||
@ -39,7 +39,7 @@ from ..bytecode_transformation import (
|
||||
create_instruction,
|
||||
)
|
||||
from ..create_parameter_op import do_not_convert_to_tracable_parameter
|
||||
from ..exc import raise_observed_exception, unimplemented_v2
|
||||
from ..exc import raise_observed_exception, unimplemented
|
||||
from ..guards import GuardBuilder, install_guard
|
||||
from ..mutation_guard import unpatched_nn_module_init
|
||||
from ..source import (
|
||||
@ -108,7 +108,7 @@ class SuperVariable(VariableTracker):
|
||||
|
||||
def _resolved_getattr_and_source(self, tx: "InstructionTranslator", name):
|
||||
if not self.objvar:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="1-arg super not implemented",
|
||||
context="",
|
||||
explanation=f"Dynamo failed to trace attribute `{name}` accessed "
|
||||
@ -159,7 +159,7 @@ class SuperVariable(VariableTracker):
|
||||
)
|
||||
return resolved_getattr, source
|
||||
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unable to resolve super getattr",
|
||||
context="",
|
||||
explanation=f"Dynamo failed to trace attribute `{name}` accessed "
|
||||
@ -220,7 +220,7 @@ class SuperVariable(VariableTracker):
|
||||
)
|
||||
return fn_vt.call_function(tx, [self.objvar] + args, kwargs)
|
||||
else:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unsupported super().__init__() call",
|
||||
context=f"call_method {self} {name} {args} {kwargs}",
|
||||
explanation="Dynamo encountered a super().__init__() call "
|
||||
@ -290,7 +290,7 @@ class SuperVariable(VariableTracker):
|
||||
try:
|
||||
attr = attr.as_python_constant()
|
||||
except NotImplementedError as exc:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Non-constant attribute given to `super().__delattr__()`",
|
||||
context=f"call_method {self} {name}",
|
||||
explanation="Dynamo requires the attribute name passed to "
|
||||
@ -301,7 +301,7 @@ class SuperVariable(VariableTracker):
|
||||
from_exc=exc,
|
||||
)
|
||||
if not tx.output.side_effects.is_attribute_mutation(self.objvar):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Attempted super().__delattr__() on an object without mutation tracking",
|
||||
context=f"call_method {self} {name}",
|
||||
explanation="Dynamo needs to track mutations on an object "
|
||||
@ -392,7 +392,7 @@ class SuperVariable(VariableTracker):
|
||||
fn_var = VariableTracker.build(tx, inner_fn, source)
|
||||
return fn_var.call_function(tx, [self.objvar] + args, kwargs)
|
||||
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Attempted to call a super() attribute that is "
|
||||
"not a function or method",
|
||||
context=f"call_method {self} {name}",
|
||||
@ -414,7 +414,7 @@ class ExceptionVariable(VariableTracker):
|
||||
self.exc_type = exc_type
|
||||
self.args = args
|
||||
if init_kwargs:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Keyword args passed to exception constructor",
|
||||
context=f"{self} with kwargs {init_kwargs}",
|
||||
explanation="Dynamo does not know how to handle keyword args passed to an exception constructor",
|
||||
@ -495,7 +495,7 @@ class ExceptionVariable(VariableTracker):
|
||||
if isinstance(val, ConstantVariable) and val.value is None:
|
||||
self.__traceback__ = val
|
||||
else:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Set Exception object `__traceback__` attribute to not-`None`",
|
||||
context=f"call_setattr {self} {name}",
|
||||
explanation="Dynamo does not support setting the attribute "
|
||||
@ -507,7 +507,7 @@ class ExceptionVariable(VariableTracker):
|
||||
],
|
||||
)
|
||||
else:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unsupported attribute assignment on Exception object",
|
||||
context=f"call_setattr {self} {name}",
|
||||
explanation="Dynamo does not support setting the attribute "
|
||||
@ -567,7 +567,7 @@ class DelayGraphBreakVariable(UnknownVariable):
|
||||
args: "list[VariableTracker]",
|
||||
kwargs: "dict[str, VariableTracker]",
|
||||
) -> "VariableTracker":
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unsupported function call (delayed)",
|
||||
context=f"source: {self.source}",
|
||||
explanation="Dynamo determined that a graph break should occur "
|
||||
@ -722,7 +722,7 @@ class AutogradFunctionVariable(VariableTracker):
|
||||
|
||||
vjp_fn = self.fn_cls.vjp # type: ignore[attr-defined]
|
||||
if vjp_fn is not torch.autograd.Function.vjp:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unsupported custom vjp",
|
||||
context=f"call_apply {self} {args} {kwargs}",
|
||||
explanation="Dynamo does not support tracing "
|
||||
@ -737,7 +737,7 @@ class AutogradFunctionVariable(VariableTracker):
|
||||
|
||||
jvp_fn = self.fn_cls.jvp # type: ignore[attr-defined]
|
||||
if jvp_fn is not torch.autograd.Function.jvp:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unsupported custom jvp",
|
||||
context=f"call_apply {self} {args} {kwargs}",
|
||||
explanation="Dynamo does not support tracing "
|
||||
@ -798,7 +798,7 @@ class AutogradFunctionVariable(VariableTracker):
|
||||
source=source,
|
||||
).call_function(tx, args, kwargs)
|
||||
else:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Non-function or method in subclass of torch.autograd.Function",
|
||||
context=f"call_apply {self} {args} {kwargs}",
|
||||
explanation="Dynamo requires the `forward` attribute of a "
|
||||
@ -873,7 +873,7 @@ class AutogradFunctionVariable(VariableTracker):
|
||||
obj.__func__, self, source=source
|
||||
).call_function(tx, args, kwargs)
|
||||
else:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unsupported autograd.Function method",
|
||||
context=f"call_method {self} {name}",
|
||||
explanation="Dynamo does not support calling the method "
|
||||
@ -943,7 +943,7 @@ class AutogradFunctionContextVariable(UserDefinedObjectVariable):
|
||||
|
||||
def as_proxy(self):
|
||||
if self.proxy is None:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="proxy not set",
|
||||
context=f"as_proxy {self}",
|
||||
explanation="Dynamo requires the autograd.Function context "
|
||||
@ -968,7 +968,7 @@ class AutogradFunctionContextVariable(UserDefinedObjectVariable):
|
||||
return variables.ConstantVariable.create(None)
|
||||
|
||||
if name != "save_for_backward":
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unsupported autograd.Function context method",
|
||||
context=f"call_method {self} {name}",
|
||||
explanation="Dynamo does not support calling the method "
|
||||
@ -978,7 +978,7 @@ class AutogradFunctionContextVariable(UserDefinedObjectVariable):
|
||||
hints=[*graph_break_hints.SUPPORTABLE],
|
||||
)
|
||||
if self.saved_tensors is None:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unsupported autograd.Function context `save_for_backward`",
|
||||
context=f"call_method {self} {name}",
|
||||
explanation="Dynamo requires the `saved_tensors` attribute "
|
||||
@ -1057,7 +1057,7 @@ class AutogradEngineVariable(UserDefinedObjectVariable):
|
||||
kwargs,
|
||||
)
|
||||
else:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unsupported torch._C._ImperativeEngine.queue_callback()",
|
||||
context=f"call_method {self} {name}",
|
||||
explanation="queue_callback() is only supported when "
|
||||
@ -1065,7 +1065,7 @@ class AutogradEngineVariable(UserDefinedObjectVariable):
|
||||
hints=[],
|
||||
)
|
||||
else:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unsupported torch._C._ImperativeEngine method",
|
||||
context=f"call_method {self} {name}",
|
||||
explanation="Dynamo only supports the `queue_callback` method "
|
||||
@ -1283,7 +1283,7 @@ class MethodWrapperVariable(VariableTracker):
|
||||
except AsPythonConstantNotImplementedError:
|
||||
pass
|
||||
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="unsupported type.__dict__['__annotations__'].__get__ call",
|
||||
context=f"call_function {self}, args: {args}, kwargs: {kwargs}",
|
||||
explanation="`torch.compile` only supports calling type.__dict__['__annotations__'].__get__ "
|
||||
@ -1382,7 +1382,7 @@ class TypingVariable(VariableTracker):
|
||||
if name == "__getitem__" and len(args) == 1:
|
||||
new_typing = self.value[args[0].as_python_constant()]
|
||||
return TypingVariable(new_typing)
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="unsupported method call on `typing` variable",
|
||||
context=f"typing variable: {self.value}, method name: {name}, args: {args}, kwargs: {kwargs}",
|
||||
explanation=f"`torch.compile` does not support method call `{name}` on `typing` variable f{self.value}.",
|
||||
@ -1501,7 +1501,7 @@ class NumpyVariable(VariableTracker):
|
||||
kwargs: "dict[str, VariableTracker]",
|
||||
) -> "VariableTracker":
|
||||
if not config.trace_numpy:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="attempted to trace numpy function with config.trace_numpy=False",
|
||||
context=f"numpy function: {self.value}, args: {args}, kwargs: {kwargs}",
|
||||
explanation=f"Attempted to trace numpy function {self.value} "
|
||||
@ -1516,7 +1516,7 @@ class NumpyVariable(VariableTracker):
|
||||
|
||||
func = get_np_to_tnp_map().get(self.value)
|
||||
if func is None:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="attempted to trace numpy function unsupported by PyTorch",
|
||||
context=f"numpy function: {self.value}, args: {args}, kwargs: {kwargs} (corresponding torch function: {func})",
|
||||
explanation=f"Can't find numpy numpy function {self.value} in torch._numpy.",
|
||||
@ -1537,7 +1537,7 @@ class NumpyVariable(VariableTracker):
|
||||
)
|
||||
)
|
||||
except AsPythonConstantNotImplementedError:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="numpy function that produces a const collection type encountered non-const arguments",
|
||||
context=f"numpy function: {self.value}, args: {args}, kwargs: {kwargs} (corresponding torch function: {func})",
|
||||
explanation=f"numpy function {self.value} that produces a const collection type "
|
||||
@ -1552,7 +1552,7 @@ class NumpyVariable(VariableTracker):
|
||||
func.__module__ == "torch._numpy.random"
|
||||
and config.use_numpy_random_stream
|
||||
):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="attempted to trace torch._numpy.random function with config.use_numpy_random_stream=True",
|
||||
context=f"numpy function: {self.value}, args: {args}, kwargs: {kwargs} (corresponding torch function: {func})",
|
||||
explanation=f"Attempted to trace {self.value} when `torch._dynamo.config.use_numpy_random_stream` "
|
||||
@ -1591,7 +1591,7 @@ class NumpyVariable(VariableTracker):
|
||||
args: "list[VariableTracker]",
|
||||
kwargs: "dict[str, VariableTracker]",
|
||||
) -> "VariableTracker":
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="attempted to trace numpy.* function as a method",
|
||||
context=f"numpy function: {self.value}, args: {args}, kwargs: {kwargs}",
|
||||
explanation="Tracing numpy.* functions as methods is not supported.",
|
||||
@ -1623,7 +1623,7 @@ class NullVariable(VariableTracker):
|
||||
|
||||
def reconstruct(self, codegen: "PyCodegen"):
|
||||
if sys.version_info < (3, 11):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="cannot reconstruct NullVariable in Python < 3.11",
|
||||
context="",
|
||||
explanation="Attempted to generate PUSH_NULL instruction in Python < 3.11; "
|
||||
@ -1712,7 +1712,7 @@ class DebuggingVariable(VariableTracker):
|
||||
return
|
||||
|
||||
if not self.can_reorder_logs(self.value, args, kwargs):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="attempted to reorder a debugging function that can't actually be reordered",
|
||||
context=f"fn: {self.value}, args: {args}, kwargs: {kwargs}",
|
||||
explanation="`torch.compile` can only reorder functions where the arguments "
|
||||
@ -1771,7 +1771,7 @@ class LoggingLoggerVariable(VariableTracker):
|
||||
function = getattr(method, "__func__", None)
|
||||
if {method, function}.intersection(torch._dynamo.config.ignore_logger_methods):
|
||||
return variables.ConstantVariable.create(None)
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="logging.Logger method not supported for non-export cases",
|
||||
context=f"method: {self.value}.{name}, args: {args}, kwargs: {kwargs}",
|
||||
explanation="logging.Logger methods are not supported for non-export cases.",
|
||||
@ -1814,7 +1814,7 @@ class ConstantLikeVariable(VariableTracker):
|
||||
cargs = [x.as_python_constant() for x in args]
|
||||
ckwargs = {k: v.as_python_constant() for k, v in kwargs.items()}
|
||||
except NotImplementedError:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="constant-like method call with non-constant args",
|
||||
context=f"{self._error_prefix}.{name}(*{args}, **{kwargs})",
|
||||
explanation=f"Attempted to call {self._error_prefix}.{name} with non-constant args.",
|
||||
@ -1830,7 +1830,7 @@ class ConstantLikeVariable(VariableTracker):
|
||||
if isinstance(result, re.Match):
|
||||
return ConstantRegexMatchVariable(result)
|
||||
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="constant-like method call with unsupported return type",
|
||||
context=f"{self._error_prefix}.{name}(*{args}, **{kwargs}) returned {result}",
|
||||
explanation=f"Attempted to call {self._error_prefix}.{name}, got unsupported return value {result}.",
|
||||
@ -1901,7 +1901,7 @@ class RandomClassVariable(VariableTracker):
|
||||
|
||||
def call_function(self, tx: "InstructionTranslator", args, kwargs):
|
||||
if len(args) > 1 or kwargs:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="random.Random() with improper arguments",
|
||||
context=f"args: {args}, kwargs: {kwargs}",
|
||||
explanation="random.Random() with > 1 arg or with kwargs is not supported.",
|
||||
|
||||
@ -36,7 +36,7 @@ import torch.nn
|
||||
from .. import graph_break_hints, trace_rules, variables
|
||||
from ..exc import (
|
||||
raise_observed_exception,
|
||||
unimplemented_v2,
|
||||
unimplemented,
|
||||
UnspecializeRestartAnalysis,
|
||||
Unsupported,
|
||||
)
|
||||
@ -263,7 +263,7 @@ class NNModuleVariable(VariableTracker):
|
||||
base = tx.output.get_submodule(self.module_key)
|
||||
|
||||
if object_has_getattribute(base):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Custom __getattribute__ in nn.Module dict key check",
|
||||
context=f"has_key_in_generic_dict {self} {key}",
|
||||
explanation="Dynamo does not support checking key existence "
|
||||
@ -285,7 +285,7 @@ class NNModuleVariable(VariableTracker):
|
||||
def _custom_getattr_fallback(self, base, tx, name, obj_source):
|
||||
"""Check for a __getattr__ and handle it specially if it is implemented"""
|
||||
if object_has_getattribute(base):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Custom __getattribute__ in nn.Module attribute access",
|
||||
context=f"var_getattr {self} {name}",
|
||||
explanation="Dynamo does not support checking key existence "
|
||||
@ -302,7 +302,7 @@ class NNModuleVariable(VariableTracker):
|
||||
return None
|
||||
|
||||
if not isinstance(getattr_fn, types.FunctionType):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="torch.nn.Module with a non-function custom __getattr__",
|
||||
context=f"var_getattr {self} {name}",
|
||||
explanation=(
|
||||
@ -336,7 +336,7 @@ class NNModuleVariable(VariableTracker):
|
||||
all_class_attribute_names.update(x.__dict__.keys())
|
||||
|
||||
if not self.source:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="getattr with no source",
|
||||
context=f"var_getattr {self} {name}",
|
||||
explanation="Dynamo does not know how to access an attribute "
|
||||
@ -423,7 +423,7 @@ class NNModuleVariable(VariableTracker):
|
||||
# Support possibly common cases of class members
|
||||
return VariableTracker.build(tx, subobj, NNModuleSource(source))
|
||||
else:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unsupported nn.Module attribute type",
|
||||
context=f"nn.Module subclass: {typestr(base)}, name: {name}, attribute type: {typestr(subobj)}",
|
||||
explanation=f"Dynamo does not support tracing nn.Module attributes of type `{typestr(subobj)}`",
|
||||
@ -644,7 +644,7 @@ class NNModuleVariable(VariableTracker):
|
||||
if not all(
|
||||
x.is_python_constant() for x in itertools.chain(args, kwargs.values())
|
||||
):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="non-const argument in nn.Module method",
|
||||
context=f"call_method: {self} {name} {args} {kwargs}",
|
||||
explanation="Dynamo does not support calling "
|
||||
@ -830,7 +830,7 @@ class NNModuleVariable(VariableTracker):
|
||||
isinstance(args[0], variables.ConstantVariable)
|
||||
and isinstance(args[0].as_python_constant(), (str, int))
|
||||
):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Invalid or non-const argument in nn.Module __getitem__",
|
||||
context=f"call_method: {self} {name} {args} {kwargs}",
|
||||
explanation="Dynamo does not support calling "
|
||||
@ -893,7 +893,7 @@ class NNModuleVariable(VariableTracker):
|
||||
elif args[0].is_python_constant():
|
||||
key = args[0].as_python_constant()
|
||||
else:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unsupported key type for nn.Module.__getitem__",
|
||||
context=f"call_method: {self} {name} {args} {kwargs}",
|
||||
explanation="Dynamo does not support getitem on "
|
||||
@ -1136,7 +1136,7 @@ class UnspecializedNNModuleVariable(UserDefinedObjectVariable):
|
||||
hasattr(method, "__code__")
|
||||
and id(method.__code__) in self._nn_module_method_ids()
|
||||
):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="UnspecializedNNModuleVariable missing method",
|
||||
context=f"call_method: {self} {name} {args} {kwargs}",
|
||||
explanation=f"Dynamo does not support tracing method {name} of nn.Module {self.value}",
|
||||
|
||||
@ -28,7 +28,7 @@ from torch._guards import Source
|
||||
from torch.fx.proxy import Proxy
|
||||
|
||||
from .. import graph_break_hints
|
||||
from ..exc import unimplemented_v2, UnsafeScriptObjectError, Unsupported
|
||||
from ..exc import unimplemented, UnsafeScriptObjectError, Unsupported
|
||||
from .base import VariableTracker
|
||||
from .user_defined import UserDefinedObjectVariable
|
||||
|
||||
@ -87,7 +87,7 @@ class TorchScriptObjectVariable(UserDefinedObjectVariable):
|
||||
|
||||
method = getattr(self.value, name, None)
|
||||
if method is None:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="FakeScriptObject missing method implementation",
|
||||
context=f"value={self.value}, method={name}",
|
||||
explanation=f"TorchScript object {self.value} doesn't define the method {name}.",
|
||||
@ -98,7 +98,7 @@ class TorchScriptObjectVariable(UserDefinedObjectVariable):
|
||||
)
|
||||
|
||||
if not callable(method):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Attempted to access non-callable attribute of TorchScript object",
|
||||
context=f"value={self.value}, method={name}",
|
||||
explanation="Attribute accesses of TorchScript objects to non-callable attributes are not supported.",
|
||||
@ -128,7 +128,7 @@ class TorchScriptObjectVariable(UserDefinedObjectVariable):
|
||||
args: Iterable[Any],
|
||||
kwargs: dict[str, Any],
|
||||
) -> VariableTracker:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Weird method call on TorchScript object",
|
||||
context=f"value={self.value}, method={name}",
|
||||
explanation=(
|
||||
|
||||
@ -9,7 +9,7 @@ from torch.fx import has_side_effect, Proxy
|
||||
|
||||
from .. import graph_break_hints
|
||||
from ..bytecode_transformation import create_call_function
|
||||
from ..exc import TYPE_CHECKING, unimplemented_v2
|
||||
from ..exc import TYPE_CHECKING, unimplemented
|
||||
from ..graph_bytecode_inputs import get_external_object_by_index
|
||||
from .base import VariableTracker
|
||||
from .constant import ConstantVariable
|
||||
@ -389,7 +389,7 @@ class EventVariable(VariableTracker):
|
||||
method_name = (
|
||||
f"{type(self.value).__module__}.{type(self.value).__qualname__}.{name}"
|
||||
)
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unsupported event method",
|
||||
context=str(name),
|
||||
explanation=f"Dynamo doesn't support tracing the {method_name} method. "
|
||||
|
||||
@ -46,7 +46,7 @@ from torch.utils._python_dispatch import is_traceable_wrapper_subclass
|
||||
from .. import config, graph_break_hints, variables
|
||||
from .._trace_wrapped_higher_order_op import trace_wrapped
|
||||
from ..exc import (
|
||||
unimplemented_v2,
|
||||
unimplemented,
|
||||
UnknownPropertiesDuringBackwardTrace,
|
||||
UserError,
|
||||
UserErrorType,
|
||||
@ -390,7 +390,7 @@ class TensorVariable(VariableTracker):
|
||||
return ConstantVariable.create(self.is_nested)
|
||||
|
||||
def method_attr_retain_grad(self, tx):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Tensor.retain_grad() with AOTDispatcher",
|
||||
context=f"var_getattr {self} retain_grad",
|
||||
explanation="`Tensor.retain_grad()` does not work with AOTDispatcher.",
|
||||
@ -404,7 +404,7 @@ class TensorVariable(VariableTracker):
|
||||
|
||||
def method_attr_grad_fn(self, tx):
|
||||
if self.has_grad_fn:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Tensor with grad_fn()",
|
||||
context=f"var_getattr {self} grad_fn",
|
||||
explanation="Dynamo does not support tracing tensors with a grad_fn directly.",
|
||||
@ -451,7 +451,7 @@ class TensorVariable(VariableTracker):
|
||||
def var_getattr(self, tx: "InstructionTranslator", name):
|
||||
if self.is_strict_mode(tx):
|
||||
if name in self._strict_mode_banned_ops():
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Strict mode banned op",
|
||||
context=f"var_getattr {self} {name}",
|
||||
explanation=f"Getattr invocation '{name}' in strict mode is not supported.",
|
||||
@ -541,7 +541,7 @@ class TensorVariable(VariableTracker):
|
||||
|
||||
def call_id(self, tx):
|
||||
if not self.source:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unsupported call_id() without source",
|
||||
context=f"call_id {self}",
|
||||
explanation="call_id() not supported for sourceless TensorVariable.",
|
||||
@ -553,7 +553,7 @@ class TensorVariable(VariableTracker):
|
||||
try:
|
||||
_input_associated_real_value = eval(self.source.name(), scope)
|
||||
except Exception as exc:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Error getting associated real value",
|
||||
context=f"call_id {self}",
|
||||
explanation="Dynamo encountered an error while trying to "
|
||||
@ -563,7 +563,7 @@ class TensorVariable(VariableTracker):
|
||||
)
|
||||
|
||||
if _input_associated_real_value is None:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="call_id() without associated real value",
|
||||
context=f"call_id {self}",
|
||||
explanation="Dynamo could not find an associated real value for the tensor.",
|
||||
@ -639,7 +639,7 @@ class TensorVariable(VariableTracker):
|
||||
from .torch_function import can_dispatch_torch_function, dispatch_torch_function
|
||||
|
||||
if self.is_strict_mode(tx) and name in self._strict_mode_banned_ops():
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Illegal method invocation in strict mode",
|
||||
context=f"call_method {self} {name} {args} {kwargs}",
|
||||
explanation="Dynamo currently does not support this method "
|
||||
@ -683,7 +683,7 @@ class TensorVariable(VariableTracker):
|
||||
# discussions in #151432 for more details.
|
||||
# We graph break for now since this use case is uncommon.
|
||||
if name == "random_":
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Tensor.random_ op",
|
||||
context=f"Tensor.{name}({args=}, {kwargs=})",
|
||||
explanation="This is currently not supported.",
|
||||
@ -693,7 +693,7 @@ class TensorVariable(VariableTracker):
|
||||
],
|
||||
)
|
||||
elif name == "uniform_" and "from" in kwargs:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Tensor.uniform_ op called with `from` keyword",
|
||||
context=f"Tensor.{name}({args=}, {kwargs=})",
|
||||
explanation="This is currently not supported.",
|
||||
@ -713,7 +713,7 @@ class TensorVariable(VariableTracker):
|
||||
if result:
|
||||
return result
|
||||
except TypeError as e:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unhandled args for method",
|
||||
context=f"call_method {self} {name} {args} {kwargs}",
|
||||
explanation="Dynamo encountered an error while calling "
|
||||
@ -804,7 +804,7 @@ class TensorVariable(VariableTracker):
|
||||
|
||||
def method_is_inference(self):
|
||||
if config.fake_tensor_disable_inference_mode:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Encountered tensor.is_inference() during tracing",
|
||||
context="",
|
||||
explanation="tensor.is_inference() is not supported",
|
||||
@ -890,7 +890,7 @@ class TensorVariable(VariableTracker):
|
||||
object(), var, mutation_type_cls=AttributeMutationNew
|
||||
)
|
||||
return var
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Argument of `as_subclass` must be a non-dispatcher-style tensor subclass",
|
||||
context=f"{self}.as_subclass({cls})",
|
||||
explanation="Currently not supported",
|
||||
@ -910,7 +910,7 @@ class TensorVariable(VariableTracker):
|
||||
|
||||
def method_numpy(self, *, force=False):
|
||||
if not config.trace_numpy:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Tensor.numpy() with trace_numpy=False",
|
||||
context=f"call_method {self} numpy",
|
||||
explanation="`Tensor.numpy()` was called, but the `trace_numpy` "
|
||||
@ -921,7 +921,7 @@ class TensorVariable(VariableTracker):
|
||||
],
|
||||
)
|
||||
if not np:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Tensor.numpy() without NumPy installed",
|
||||
context=f"call_method {self} numpy",
|
||||
explanation="`Tensor.numpy()` was called, but the NumPy library "
|
||||
@ -970,7 +970,7 @@ class TensorVariable(VariableTracker):
|
||||
torch.int32,
|
||||
torch.int64,
|
||||
]:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Tensor.tolist() with non-integer tensor",
|
||||
context=f"call_method {self} to_list",
|
||||
explanation="Dynamo currently does not support tracing "
|
||||
@ -997,7 +997,7 @@ class TensorVariable(VariableTracker):
|
||||
return VariableTracker.build(tx, out)
|
||||
|
||||
def method_backward(self, *args, **kwargs):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unsupported Tensor.backward() call",
|
||||
context=f"call_method {self} backward {args} {kwargs}",
|
||||
explanation="Dynamo currently does not support tracing `Tensor.backward()`.",
|
||||
@ -1014,7 +1014,7 @@ class TensorVariable(VariableTracker):
|
||||
# We enable capture_scalar_outputs when full_graph=True by default.
|
||||
if not tx.one_graph and not config.capture_scalar_outputs:
|
||||
self._warn_capture_scalar_outputs()
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unsupported Tensor.item() call with capture_scalar_outputs=False",
|
||||
context=f"call_method {self} item {args} {kwargs}",
|
||||
explanation="Dynamo does not support tracing `Tensor.item()` "
|
||||
@ -1147,7 +1147,7 @@ class TensorVariable(VariableTracker):
|
||||
return ConstantVariable.create(None)
|
||||
|
||||
def method_resize_(self, *args, **kwargs):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unsupported Tensor.resize_() call",
|
||||
context=f"call_method {self} resize_ {args} {kwargs}",
|
||||
explanation="Dynamo currently does not support tracing `Tensor.resize_()`.",
|
||||
@ -1155,7 +1155,7 @@ class TensorVariable(VariableTracker):
|
||||
)
|
||||
|
||||
def method_resize_as_(self, *args, **kwargs):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unsupported Tensor.resize_as_() call",
|
||||
context=f"call_method {self} resize_as_ {args} {kwargs}",
|
||||
explanation="Dynamo currently does not support tracing `Tensor.resize_as_()`.",
|
||||
@ -1163,7 +1163,7 @@ class TensorVariable(VariableTracker):
|
||||
)
|
||||
|
||||
def method_sparse_resize_(self, *args, **kwargs):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unsupported Tensor.sparse_resize_() call",
|
||||
context=f"call_method {self} sparse_resize_ {args} {kwargs}",
|
||||
explanation="Dynamo currently does not support tracing `Tensor.sparse_resize_()`.",
|
||||
@ -1171,7 +1171,7 @@ class TensorVariable(VariableTracker):
|
||||
)
|
||||
|
||||
def method_sparse_resize_and_clear_(self, *args, **kwargs):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unsupported Tensor.sparse_resize_and_clear_() call",
|
||||
context=f"call_method {self} sparse_resize_and_clear_ {args} {kwargs}",
|
||||
explanation="Dynamo currently does not support tracing `Tensor.sparse_resize_and_clear_()`.",
|
||||
@ -1186,7 +1186,7 @@ class TensorVariable(VariableTracker):
|
||||
# overload and is used by FSDP.
|
||||
# graph-breaking on aten::set_source_Tensor_storage_offset for now,
|
||||
# unless we find that we need to make it work.
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unsupported Tensor.set_() call",
|
||||
context=f"call_method {self} set_ {args} {kwargs}",
|
||||
explanation="Dynamo currently does not support tracing `Tensor.set_()` "
|
||||
@ -1318,7 +1318,7 @@ class TensorVariable(VariableTracker):
|
||||
# would have no recourse - their forward traces just fine, but will fail at backwards unless
|
||||
# compiled_autograd is enabled. If compiled_autograd fails (there are a lot of failures today)
|
||||
# then they have nothing they can do except disable compile.
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Compilation of intermediate hooks requires compiled autograd",
|
||||
context=f"var_getattr {self} {name}",
|
||||
explanation="Dynamo must be in compiled_autograd to register hooks.",
|
||||
@ -1368,7 +1368,7 @@ class TensorVariable(VariableTracker):
|
||||
requires_grad = requires_grad.as_python_constant()
|
||||
|
||||
if self.as_proxy().node.meta["example_value"].requires_grad != requires_grad:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unsupported Tensor.requires_grad_() call",
|
||||
context=f"call_method {self} requires_grad_",
|
||||
explanation="Dynamo does not support changes to a Tensor's "
|
||||
@ -1560,14 +1560,14 @@ class NumpyNdarrayVariable(TensorVariable):
|
||||
return ConstantVariable.create(int(r))
|
||||
return insert_into_graph()
|
||||
elif name in ["base", "flags", "dtype"]:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unsupported ndarray attribute access",
|
||||
context=f"var_getattr {self} {name}",
|
||||
explanation=f"Dynamo currently does not support tracing `ndarray.{name}`.",
|
||||
hints=[],
|
||||
)
|
||||
elif name == "__version__":
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unsupported ndarray.__version__ access",
|
||||
context=f"var_getattr {self} {name}",
|
||||
explanation=f"Dynamo currently does not support tracing `ndarray.{name}`.",
|
||||
@ -1591,7 +1591,7 @@ class NumpyNdarrayVariable(TensorVariable):
|
||||
args: "list[VariableTracker]",
|
||||
kwargs: "dict[str, VariableTracker]",
|
||||
) -> "VariableTracker":
|
||||
from ..exc import unimplemented_v2
|
||||
from ..exc import unimplemented
|
||||
from ..utils import numpy_method_wrapper
|
||||
|
||||
args, kwargs = self.patch_args(name, args, kwargs)
|
||||
@ -1611,7 +1611,7 @@ class NumpyNdarrayVariable(TensorVariable):
|
||||
isinstance(dtype_arg, BuiltinVariable) and dtype_arg.fn is object
|
||||
)
|
||||
if is_object_str or is_object_type:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="ndarray.astype(object)",
|
||||
context=f"call_method {self} {name} {args} {kwargs}",
|
||||
explanation=(
|
||||
@ -1625,7 +1625,7 @@ class NumpyNdarrayVariable(TensorVariable):
|
||||
# delegate back to TensorVariable
|
||||
return super().call_method(tx, name, args, kwargs)
|
||||
if name in ("tostring", "tobytes", "__delattr__"):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unsupported ndarray method call",
|
||||
context=f"call_method {self} {name} {args} {kwargs}",
|
||||
explanation=f"`ndarray.{name}()` is not modelled in `torch._numpy`.",
|
||||
@ -1713,7 +1713,7 @@ class TensorSubclassVariable(UserDefinedClassVariable):
|
||||
tx, data, self.value, self.source
|
||||
)
|
||||
else:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Calling subclass default constructor with more than tensor argument",
|
||||
context=f"{self.value}(args={args}, kwargs={kwargs})",
|
||||
explanation="Currently not supported",
|
||||
|
||||
@ -52,7 +52,7 @@ from ..create_parameter_op import (
|
||||
tracable_create_parameter,
|
||||
)
|
||||
from ..device_interface import get_registered_device_interfaces
|
||||
from ..exc import raise_observed_exception, unimplemented_v2
|
||||
from ..exc import raise_observed_exception, unimplemented
|
||||
from ..guards import GuardBuilder, install_guard
|
||||
from ..source import (
|
||||
AttrSource,
|
||||
@ -605,7 +605,7 @@ class TorchInGraphFunctionVariable(BaseTorchVariable):
|
||||
|
||||
@register(torch.is_inference_mode_enabled)
|
||||
def handle_is_inference_mode_enabled(self, tx: "InstructionTranslator"):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Encountered torch.is_inference_mode_enabled during tracing",
|
||||
context="",
|
||||
explanation="torch.is_inference_mode_enabled() is not supported",
|
||||
@ -654,7 +654,7 @@ class TorchInGraphFunctionVariable(BaseTorchVariable):
|
||||
# torch.compile is a no-op in dynamo
|
||||
return args[0]
|
||||
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="torch.compile call with > 1 args",
|
||||
context=f"args={args}, kwargs={kwargs}",
|
||||
explanation="Attempted to call `torch.compile` with > 1 args. Dynamo does not support this.",
|
||||
@ -690,7 +690,7 @@ class TorchInGraphFunctionVariable(BaseTorchVariable):
|
||||
):
|
||||
# pyrefly: ignore [missing-attribute]
|
||||
if warn_only and warn_only.as_python_constant():
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Attempted to use torch.use_deterministic_algorithms(warn_only=True)",
|
||||
context=f"mode={mode}, warn_only={warn_only}",
|
||||
explanation="Dynamo does not support this.",
|
||||
@ -749,7 +749,7 @@ class TorchInGraphFunctionVariable(BaseTorchVariable):
|
||||
@register(torch.from_numpy)
|
||||
def handle_from_numpy(self, tx: "InstructionTranslator", *args):
|
||||
if not config.trace_numpy:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="call `torch.from_numpy` with `torch._dynamo.config.trace_numpy=False`",
|
||||
context=f"trace_numpy={config.trace_numpy}",
|
||||
explanation=(
|
||||
@ -761,7 +761,7 @@ class TorchInGraphFunctionVariable(BaseTorchVariable):
|
||||
],
|
||||
)
|
||||
if not np:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="`torch.from_numpy` with NumPy unavailable",
|
||||
context="",
|
||||
explanation="Attempted to call `torch.numpy` but NumPy could not be imported.",
|
||||
@ -982,7 +982,7 @@ class TorchInGraphFunctionVariable(BaseTorchVariable):
|
||||
from .lists import BaseListVariable
|
||||
|
||||
if layout and layout.as_python_constant() == torch.strided:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Attempted to use strided NestedTensor",
|
||||
context=f"layout={layout}",
|
||||
explanation="Dynamo does not support this.",
|
||||
@ -992,7 +992,7 @@ class TorchInGraphFunctionVariable(BaseTorchVariable):
|
||||
],
|
||||
)
|
||||
if not isinstance(tensor_list, BaseListVariable):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Attempted to use `nested_tensor` with non-list input",
|
||||
context=f"tensor_list={tensor_list}",
|
||||
explanation="Dynamo does not support this.",
|
||||
@ -1009,7 +1009,7 @@ class TorchInGraphFunctionVariable(BaseTorchVariable):
|
||||
and args[1].is_python_constant()
|
||||
and args[1].as_python_constant() == -1
|
||||
):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Attempted to use `torch.nn.functional.one_hot` with data-dependent output shape",
|
||||
context=f"args={args}, kwargs={kwargs}",
|
||||
explanation="Dynamo does not support this.",
|
||||
@ -1187,7 +1187,7 @@ class TorchInGraphFunctionVariable(BaseTorchVariable):
|
||||
):
|
||||
assert not args and not kwargs
|
||||
if not tx.symbolic_torch_function_state.mode_stack:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Attempted to pop from empty torch function mode stack",
|
||||
context="",
|
||||
explanation="Called `torch._C._pop_torch_function_stack` when torch function mode stack is empty.",
|
||||
@ -1236,7 +1236,7 @@ class TorchInGraphFunctionVariable(BaseTorchVariable):
|
||||
@register(torch.get_device_module.__wrapped__)
|
||||
def handle_get_device_module(self, tx, *args, **kwargs):
|
||||
if len(args) + len(kwargs) > 1 or (kwargs and "device" not in kwargs):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="improper torch.get_device_module arguments",
|
||||
context=f"args={args}, kwargs={kwargs}",
|
||||
explanation="torch.get_device_module accepts 1 optional argument `device`",
|
||||
@ -1253,7 +1253,7 @@ class TorchInGraphFunctionVariable(BaseTorchVariable):
|
||||
device = None
|
||||
module = torch.get_device_module(device)
|
||||
except Exception as e:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="bad device argument to torch.get_device_module",
|
||||
context=f"args={args}, kwargs={kwargs}",
|
||||
explanation="Expected valid string/torch.device argument ('cpu', 'cuda', etc.)",
|
||||
@ -1278,7 +1278,7 @@ class TorchInGraphFunctionVariable(BaseTorchVariable):
|
||||
@register(torch.accelerator.current_stream, torch.cuda.current_stream)
|
||||
def handle_current_stream(self, tx: "InstructionTranslator", *args, **kwargs):
|
||||
if len(args) + len(kwargs) > 1 or (kwargs and "device" not in kwargs):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="unsupported arguments to torch.accelerator.current_stream",
|
||||
context=f"args={args}, kwargs={kwargs}",
|
||||
explanation="torch.accelerator.current_stream accepts one optional argument `device`",
|
||||
@ -1296,7 +1296,7 @@ class TorchInGraphFunctionVariable(BaseTorchVariable):
|
||||
|
||||
return tx.symbolic_stream_state.cur_stream(device)
|
||||
except Exception as e:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="bad device argument to torch.accelerator.current_stream",
|
||||
context=f"args={args}, kwargs={kwargs}",
|
||||
explanation="Expected valid string/torch.device argument ('cpu', 'cuda', etc.)",
|
||||
@ -1360,7 +1360,7 @@ class TorchInGraphFunctionVariable(BaseTorchVariable):
|
||||
not isinstance(message_vt, NestedUserFunctionVariable)
|
||||
or message_vt.has_closure()
|
||||
):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Can't extract message from torch._check()",
|
||||
context=str(message_vt),
|
||||
explanation=(
|
||||
@ -1446,7 +1446,7 @@ class TorchInGraphFunctionVariable(BaseTorchVariable):
|
||||
arg_type = flat_arg_vt.python_type()
|
||||
if not is_graphable_type(arg_type):
|
||||
type_name = flat_arg_vt.python_type().__qualname__
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Invalid input type for nonstrict_trace-ed function",
|
||||
context=f"Encountered input of type <{type_name}>.",
|
||||
explanation=(
|
||||
@ -1480,7 +1480,7 @@ class TorchInGraphFunctionVariable(BaseTorchVariable):
|
||||
import torch.utils._pytree as pytree
|
||||
|
||||
if pytree.is_constant_class(typ):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Input marked with `pytree.register_constant` constructed in the `torch.compile` region",
|
||||
context=f"Input={input_spec_vt}, offending type <{type_name}>.",
|
||||
explanation=(
|
||||
@ -1495,7 +1495,7 @@ class TorchInGraphFunctionVariable(BaseTorchVariable):
|
||||
from_exc=e,
|
||||
)
|
||||
else:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Invalid use of pytree_flatten with nonstrict_trace-ed function",
|
||||
context=f"Input={input_spec_vt}, offending type <{type_name}>.",
|
||||
explanation=(
|
||||
@ -1560,7 +1560,7 @@ class TorchInGraphFunctionVariable(BaseTorchVariable):
|
||||
# From `flat_apply` assert on output type.
|
||||
torch._dynamo.exc.TorchRuntimeError,
|
||||
):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unsupported output type for nonstrict_trace-ed function",
|
||||
context=f"Function: {fn.__name__}",
|
||||
explanation=(
|
||||
@ -1612,7 +1612,7 @@ class TorchInGraphFunctionVariable(BaseTorchVariable):
|
||||
and torch.Tag.inplace_view
|
||||
in getattr(fn, fn.overloads()[0]).tags
|
||||
):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Inplace op on input tensor",
|
||||
context="",
|
||||
explanation=f"Attempted to trace an inplace view op on input tensor {typestr(self.value)}.",
|
||||
@ -1647,7 +1647,7 @@ To support this behavior, we need to allow const-propping tensors that store sym
|
||||
For now, dynamo will explicitly graph break when it encounters user code with this behavior.
|
||||
"""
|
||||
log.warning(msg)
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Attempted to call torch in-graph function on only torch.SymInt arguments",
|
||||
context=f"fn={self.value}, args={args}, kwargs={kwargs}",
|
||||
explanation=(
|
||||
@ -1715,7 +1715,7 @@ For now, dynamo will explicitly graph break when it encounters user code with th
|
||||
and "requires_grad" in kwargs
|
||||
and kwargs["requires_grad"].as_python_constant()
|
||||
):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Attempted to use tensor creation function with requires_grad=True",
|
||||
context=f"fn={self.value}, args={args}, kwargs={kwargs}",
|
||||
explanation="Dynamo does not support this.",
|
||||
@ -1755,7 +1755,7 @@ For now, dynamo will explicitly graph break when it encounters user code with th
|
||||
if saved_out_shape != fake_out.shape:
|
||||
# It's hard to get out variants with resizing on graph inputs work
|
||||
# properly across dynamo/aot/inductor, just fall back.
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Shape mismatch with out= list of tensor variants",
|
||||
context=f"fn={self.value}, args={args}, kwargs={kwargs}",
|
||||
explanation=(
|
||||
@ -1769,7 +1769,7 @@ For now, dynamo will explicitly graph break when it encounters user code with th
|
||||
if not torch._prims_common.is_contiguous(fake_out):
|
||||
# It's difficult to handle strides correctly in functionalization
|
||||
# when calling an out= op with a non-contiguous out argument
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Attempted to call op with non-contiguous `out=` list of tensors",
|
||||
context=f"self.value={self.value}, args={args}, kwargs={kwargs}",
|
||||
explanation="Dynamo does not support this.",
|
||||
@ -1784,7 +1784,7 @@ For now, dynamo will explicitly graph break when it encounters user code with th
|
||||
if saved_out_shapes != fake_out.shape:
|
||||
# It's hard to get out variants with resizing on graph inputs work
|
||||
# properly across dynamo/aot/inductor, just fall back.
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Shape mismatch with out= tensor variant",
|
||||
context=f"fn={self.value}, args={args}, kwargs={kwargs}",
|
||||
explanation=(
|
||||
@ -1798,7 +1798,7 @@ For now, dynamo will explicitly graph break when it encounters user code with th
|
||||
if not torch._prims_common.is_contiguous(fake_out):
|
||||
# It's difficult to handle strides correctly in functionalization
|
||||
# when calling an out= op with a non-contiguous out argument
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Attempted to call op with non-contiguous `out=` tensor",
|
||||
context=f"self.value={self.value}, args={args}, kwargs={kwargs}",
|
||||
explanation="Dynamo does not support this.",
|
||||
@ -1829,7 +1829,7 @@ For now, dynamo will explicitly graph break when it encounters user code with th
|
||||
torch.nn.modules.utils._ntuple(count)(value.as_python_constant()),
|
||||
)
|
||||
else:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Attempted to use `torch.nn.modules.utils._ntuple` with unsupported argument type",
|
||||
context=f"value={value}",
|
||||
explanation="Dynamo does not support this.",
|
||||
@ -1847,7 +1847,7 @@ For now, dynamo will explicitly graph break when it encounters user code with th
|
||||
def call_nn_parameter(cls, tx, data=None, requires_grad=True):
|
||||
"""A call to torch.nn.Parameter() gets lifted to before the graph"""
|
||||
if tx.export:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Attempted to use `torch.nn.Parameter()` with export",
|
||||
context="",
|
||||
explanation="Dynamo does not support this.",
|
||||
@ -1861,7 +1861,7 @@ For now, dynamo will explicitly graph break when it encounters user code with th
|
||||
try:
|
||||
requires_grad = requires_grad.as_python_constant()
|
||||
except NotImplementedError:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="non-constant `requires_grad` argument to `torch.nn.Parameter`",
|
||||
context=f"requires_grad={requires_grad}",
|
||||
explanation="Dynamo does not support this.",
|
||||
@ -1872,7 +1872,7 @@ For now, dynamo will explicitly graph break when it encounters user code with th
|
||||
)
|
||||
|
||||
if not isinstance(data, variables.TensorVariable):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="`torch.nn.Parameter()` with unsupported data type",
|
||||
context=f"data={data}",
|
||||
explanation="Called `torch.nn.Parameter()` with non-Tensor argument.",
|
||||
@ -1889,7 +1889,7 @@ For now, dynamo will explicitly graph break when it encounters user code with th
|
||||
|
||||
if config.graph_break_on_nn_param_ctor:
|
||||
# Need user to manually move since we cannot
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Attempted to use `torch.nn.Parameter()` constructor with Dynamo",
|
||||
context="",
|
||||
explanation="Dynamo does not support this",
|
||||
@ -1906,7 +1906,7 @@ For now, dynamo will explicitly graph break when it encounters user code with th
|
||||
TensorWithTFOverrideVariable,
|
||||
# pyrefly: ignore [missing-attribute]
|
||||
) or is_traceable_wrapper_subclass_type(data.class_type):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Attempted to use torch.nn.Parameter constructor with tensor subclass",
|
||||
context=str(data),
|
||||
explanation="Dynamo does not support this.",
|
||||
@ -1916,7 +1916,7 @@ For now, dynamo will explicitly graph break when it encounters user code with th
|
||||
)
|
||||
|
||||
if not can_convert_to_tracable_parameter():
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="`torch.nn.Parameter`: cannot convert to traceable tracable",
|
||||
context="",
|
||||
explanation="convert_tracable_parameter is set to False.",
|
||||
@ -1934,7 +1934,7 @@ For now, dynamo will explicitly graph break when it encounters user code with th
|
||||
# pyrefly: ignore [missing-attribute]
|
||||
device = data.var_getattr(tx, "device").as_python_constant()
|
||||
except NotImplementedError as e:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="`torch.nn.Parameter` with non-constant Tensor attributes",
|
||||
context=f"data={data}",
|
||||
explanation="Dynamo does not support this.",
|
||||
@ -2000,7 +2000,7 @@ For now, dynamo will explicitly graph break when it encounters user code with th
|
||||
|
||||
data_node = data.as_proxy().node
|
||||
if data_node.op not in ("placeholder", "get_attr"):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unexpected type of data placeholder op for parameter construction",
|
||||
context=f"data_node.op={data_node.op}",
|
||||
explanation="Data node op should be placeholder or get_attr.",
|
||||
|
||||
@ -44,7 +44,7 @@ from torch.overrides import (
|
||||
from torch.utils._device import DeviceContext
|
||||
|
||||
from .. import graph_break_hints
|
||||
from ..exc import unimplemented_v2
|
||||
from ..exc import unimplemented
|
||||
from ..guards import GuardBuilder, install_guard
|
||||
from ..polyfills import NoEnterTorchFunctionMode
|
||||
from ..source import AttrSource, GlobalSource, TorchFunctionModeStackSource, TypeSource
|
||||
@ -558,7 +558,7 @@ def dispatch_torch_function(
|
||||
if not (isinstance(res, ConstantVariable) and res.value is NotImplemented):
|
||||
return res
|
||||
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="All __torch_function__ overrides returned NotImplemented due to TypeError from user code",
|
||||
context=f"{fn=}, {args=}, {kwargs=}",
|
||||
explanation=f"All __torch_function__ overrides for for function {fn} returned NotImplemented",
|
||||
@ -626,7 +626,7 @@ class TensorWithTFOverrideVariable(TensorVariable):
|
||||
# I think only `_base` is breaking because we aren't modelling view
|
||||
# relationship perfectly in some scenarios.
|
||||
if name in banned_attrs:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unsupported tensor subclass attribute access",
|
||||
context=f"{name}",
|
||||
explanation="`torch.compile` currently can't trace this",
|
||||
@ -686,7 +686,7 @@ class TensorWithTFOverrideVariable(TensorVariable):
|
||||
)
|
||||
|
||||
elif attr_is_overridden:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Unsupported tensor subclass overridden attribute access",
|
||||
context=f"{name}",
|
||||
explanation="`torch.compile` only support tracing certain types of overridden tensor subclass attributes",
|
||||
@ -734,7 +734,7 @@ class TensorWithTFOverrideVariable(TensorVariable):
|
||||
import torch
|
||||
|
||||
if _is_attr_overridden(tx, self, name):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Tensor subclass overridden method call",
|
||||
context=f"{name}",
|
||||
explanation="`torch.compile` currently can't trace this",
|
||||
|
||||
@ -56,7 +56,7 @@ from ..exc import (
|
||||
ObservedTypeError,
|
||||
ObservedUserStopIteration,
|
||||
raise_observed_exception,
|
||||
unimplemented_v2,
|
||||
unimplemented,
|
||||
)
|
||||
from ..graph_bytecode_inputs import get_external_object_by_index
|
||||
from ..guards import GuardBuilder, install_guard
|
||||
@ -459,7 +459,7 @@ class UserDefinedClassVariable(UserDefinedVariable):
|
||||
args[1:],
|
||||
)
|
||||
elif name == "__setattr__" and self.ban_mutation:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="Class attribute mutation when the __dict__ was already materialized",
|
||||
context=str(self.value),
|
||||
explanation="Dyanmo does not support tracing mutations on a class when its __dict__ is materialized",
|
||||
@ -515,7 +515,7 @@ class UserDefinedClassVariable(UserDefinedVariable):
|
||||
)
|
||||
elif is_typeddict(self.value):
|
||||
if self.value.__optional_keys__:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="TypedDict with optional keys",
|
||||
context=str(self.value),
|
||||
explanation="Dyanmo does not support tracing TypedDict with optional keys",
|
||||
@ -534,7 +534,7 @@ class UserDefinedClassVariable(UserDefinedVariable):
|
||||
try:
|
||||
bound_args = inspect.signature(deque_signature).bind(*args, **kwargs)
|
||||
except TypeError as e:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="collections.deque() with bad arguments",
|
||||
context=f"args={args}, kwargs={kwargs}",
|
||||
explanation="Detected call to collections.deque() with bad arguments.",
|
||||
@ -549,7 +549,7 @@ class UserDefinedClassVariable(UserDefinedVariable):
|
||||
if not bound_args.arguments["iterable"].has_force_unpack_var_sequence(
|
||||
tx
|
||||
):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="collections.deque() with bad iterable argument",
|
||||
context=f"args={args}, kwargs={kwargs}",
|
||||
explanation="Call to collections.deque() has an iterable argument that Dynamo cannot "
|
||||
@ -578,7 +578,7 @@ class UserDefinedClassVariable(UserDefinedVariable):
|
||||
return variables.WeakRefVariable(args[0], callback)
|
||||
elif self.value is functools.partial:
|
||||
if not args:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="missing args to functools.partial",
|
||||
context="",
|
||||
explanation="functools.partial requires at least one argument",
|
||||
@ -636,7 +636,7 @@ class UserDefinedClassVariable(UserDefinedVariable):
|
||||
):
|
||||
# We are not changing the behavior of Dynamo as these function were
|
||||
# already ignored on trace_rules.py before #136033 landed
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="unsupported contextlib.* API",
|
||||
context=f"{self.value}",
|
||||
explanation=f"{self.value} not supported. This may be due to its use of "
|
||||
@ -651,7 +651,7 @@ class UserDefinedClassVariable(UserDefinedVariable):
|
||||
args[0], (BaseUserFunctionVariable, TorchCtxManagerClassVariable)
|
||||
):
|
||||
if not torch._dynamo.config.enable_trace_contextlib:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="attempted to trace contextlib.contextmanager",
|
||||
context=f"args={args}",
|
||||
explanation="Tracing contextlib.contextmanager is disabled.",
|
||||
@ -1115,7 +1115,7 @@ class UserDefinedObjectVariable(UserDefinedVariable):
|
||||
if torch._dynamo.config.enable_faithful_generator_behavior and isinstance(
|
||||
self.value, types.GeneratorType
|
||||
):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="call_method on generator",
|
||||
context=f"object={self.value}, method={name}, args={args}, kwargs={kwargs}",
|
||||
explanation="Detected a method call to a user-defined generator object. "
|
||||
@ -1154,7 +1154,7 @@ class UserDefinedObjectVariable(UserDefinedVariable):
|
||||
try:
|
||||
name = name.as_python_constant()
|
||||
except NotImplementedError:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="non-const setattr name on user-defined object",
|
||||
context=f"object={self}, name={name}, value={value}",
|
||||
explanation="Detected a call to `setattr` of a user-defined object with a non-constant name.",
|
||||
@ -1280,7 +1280,7 @@ class UserDefinedObjectVariable(UserDefinedVariable):
|
||||
).call_function(tx, [var], kwargs)
|
||||
|
||||
if self.source is None:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="attempted to call sourceless user-defined object as a method",
|
||||
context=f"object={self.value}, function={func}, args={args}, kwargs={kwargs}",
|
||||
explanation="Dynamo does not support this.",
|
||||
@ -1410,7 +1410,7 @@ class UserDefinedObjectVariable(UserDefinedVariable):
|
||||
)
|
||||
return out_source
|
||||
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="could not find name in object's mro",
|
||||
context=f"name={name}, object type={type(self.value)}, mro={type(self.value).__mro__}",
|
||||
explanation=f"Could not find name `{name}` in mro {type(self.value).__mro__}",
|
||||
@ -1506,7 +1506,7 @@ class UserDefinedObjectVariable(UserDefinedVariable):
|
||||
return out
|
||||
|
||||
elif getattr_fn is not None:
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="User-defined object with non-function __getattr__",
|
||||
context=f"object={self.value}, name={name}, getattr_fn={getattr_fn}",
|
||||
explanation=f"Found a non-function __getattr__ {getattr_fn} from a user-defined object {self.value} "
|
||||
@ -1632,7 +1632,7 @@ class UserDefinedObjectVariable(UserDefinedVariable):
|
||||
if isinstance(subobj, types.MethodType):
|
||||
if dynamic_subobj.__self__ is not self.value:
|
||||
if not isinstance(dynamic_subobj.__func__, types.FunctionType):
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="User-defined object method with non-function __func__",
|
||||
context=f"object={self.value}, name={name}, method={dynamic_subobj}, "
|
||||
f"method.__self__={dynamic_subobj.__self__}, method.__func__={dynamic_subobj.__func__}",
|
||||
|
||||
@ -1413,10 +1413,10 @@ class MetaConverter(Generic[_TensorT]):
|
||||
# TODO: Handle this better in Dynamo?
|
||||
# There are checks there now, but this can still be triggered by a dense
|
||||
# tensor graph input that is a view of a strided NT.
|
||||
from torch._dynamo.exc import unimplemented_v2
|
||||
from torch._dynamo.exc import unimplemented
|
||||
|
||||
# NOTE this graph break will NOT be present in Dynamo's graph break registry
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="attempted to apply meta conversion to strided nested tensor",
|
||||
context=str(t),
|
||||
explanation="This is not supported.",
|
||||
@ -1454,9 +1454,9 @@ class MetaConverter(Generic[_TensorT]):
|
||||
r = self._backward_error(r)
|
||||
elif t.is_functorch_wrapped:
|
||||
if t.is_view:
|
||||
from torch._dynamo.exc import unimplemented_v2
|
||||
from torch._dynamo.exc import unimplemented
|
||||
|
||||
unimplemented_v2(
|
||||
unimplemented(
|
||||
gb_type="attempted to apply meta conversion to view functorch tensor",
|
||||
context=str(t),
|
||||
explanation="This is not supported.",
|
||||
|
||||
Reference in New Issue
Block a user