mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
dynamo configs to torch.compiler (#163517)
Moving some dynamo configs to torch.compiler Pull Request resolved: https://github.com/pytorch/pytorch/pull/163517 Approved by: https://github.com/williamwen42, https://github.com/anijain2305 Co-authored-by: Svetlana Karslioglu <svekars@meta.com>
This commit is contained in:
committed by
PyTorch MergeBot
parent
bbb902c8dd
commit
c467e59cb0
@ -20,6 +20,21 @@ from torch.utils._config_module import Config, install_config_module
|
||||
|
||||
__all__ = [
|
||||
"job_id",
|
||||
"dynamic_shapes",
|
||||
"assume_static_by_default",
|
||||
"automatic_dynamic_shapes",
|
||||
"recompile_limit",
|
||||
"accumulated_recompile_limit",
|
||||
"verbose",
|
||||
"capture_scalar_outputs",
|
||||
"capture_dynamic_output_shape_ops",
|
||||
"log_file_name",
|
||||
"fail_on_recompile_limit_hit",
|
||||
"allow_unspec_int_on_nn_module",
|
||||
"skip_tensor_guards_with_matching_dict_tags",
|
||||
"enable_cpp_symbolic_shape_guards",
|
||||
"wrap_top_frame",
|
||||
"reorderable_logging_functions",
|
||||
]
|
||||
|
||||
|
||||
@ -121,4 +136,145 @@ any cudagraph.
|
||||
"""
|
||||
|
||||
|
||||
# Cross-cutting configuration options that affect the entire compilation pipeline
|
||||
|
||||
dynamic_shapes: bool = Config(alias="torch._dynamo.config.dynamic_shapes")
|
||||
"""
|
||||
Controls whether the compilation pipeline supports dynamic tensor shapes.
|
||||
When enabled, the compiler can handle tensors with varying dimensions across
|
||||
different invocations. This is a cross-cutting setting that affects shape
|
||||
inference, guard generation, and code generation across the entire compilation
|
||||
stack.
|
||||
"""
|
||||
|
||||
assume_static_by_default: bool = Config(
|
||||
alias="torch._dynamo.config.assume_static_by_default"
|
||||
)
|
||||
"""
|
||||
When enabled, all tensor dimensions are assumed to be static unless explicitly
|
||||
marked as dynamic or detected as changing. This compilation-wide behavior affects
|
||||
how the entire stack handles shape specialization and can improve performance
|
||||
for static workloads.
|
||||
"""
|
||||
|
||||
automatic_dynamic_shapes: bool = Config(
|
||||
alias="torch._dynamo.config.automatic_dynamic_shapes"
|
||||
)
|
||||
"""
|
||||
Enables automatic detection and handling of dynamic shapes. When a tensor's
|
||||
shape changes between compilations, the system automatically marks those
|
||||
dimensions as dynamic rather than requiring manual specification. This
|
||||
cross-cutting optimization improves the user experience by reducing recompilations.
|
||||
"""
|
||||
|
||||
recompile_limit: int = Config(alias="torch._dynamo.config.recompile_limit")
|
||||
"""
|
||||
Maximum number of recompilations allowed for a single function before falling
|
||||
back to eager execution. This compilation performance control prevents excessive
|
||||
recompilation overhead that can degrade overall performance.
|
||||
"""
|
||||
|
||||
accumulated_recompile_limit: int = Config(
|
||||
alias="torch._dynamo.config.accumulated_recompile_limit"
|
||||
)
|
||||
"""
|
||||
Global limit on total recompilations across all compiled functions to prevent
|
||||
runaway recompilation scenarios. This safeguard protects against compilation
|
||||
performance issues that could affect the entire program.
|
||||
"""
|
||||
|
||||
verbose: bool = Config(alias="torch._dynamo.config.verbose")
|
||||
"""
|
||||
Enables verbose debugging output for Dynamo. When enabled, provides detailed
|
||||
information about Dynamo's compilation decisions, optimizations, and potential
|
||||
issues.
|
||||
"""
|
||||
|
||||
|
||||
# TorchDynamo-specific configuration options
|
||||
|
||||
capture_scalar_outputs: bool = Config(
|
||||
alias="torch._dynamo.config.capture_scalar_outputs"
|
||||
)
|
||||
"""
|
||||
Controls whether TorchDynamo captures operations that return scalar values (like .item())
|
||||
into the FX graph. When disabled, these operations cause graph breaks. This is a
|
||||
TorchDynamo-specific tracing behavior that affects how the tracer handles
|
||||
scalar-returning operations.
|
||||
"""
|
||||
|
||||
capture_dynamic_output_shape_ops: bool = Config(
|
||||
alias="torch._dynamo.config.capture_dynamic_output_shape_ops"
|
||||
)
|
||||
"""
|
||||
Controls whether TorchDynamo captures operations with dynamic output shapes (like
|
||||
nonzero, unique) into the FX graph. When disabled, these operations cause graph breaks.
|
||||
This is a TorchDynamo-specific setting for handling operations with unpredictable
|
||||
output shapes during tracing.
|
||||
"""
|
||||
|
||||
log_file_name: Optional[str] = Config(alias="torch._dynamo.config.log_file_name")
|
||||
"""
|
||||
Specifies a file path for TorchDynamo-specific logging output. When set, internal
|
||||
TorchDynamo debug information is written to this file rather than stdout. This is
|
||||
useful for debugging TorchDynamo's internal tracing behavior.
|
||||
"""
|
||||
|
||||
fail_on_recompile_limit_hit: bool = Config(
|
||||
alias="torch._dynamo.config.fail_on_recompile_limit_hit"
|
||||
)
|
||||
"""
|
||||
Raises a hard error when recompile limits are exceeded instead of falling back
|
||||
to eager execution. This is useful for detecting excessive recompilation in
|
||||
performance-critical deployments where you want to ensure compilation overhead
|
||||
is kept under control.
|
||||
"""
|
||||
|
||||
allow_unspec_int_on_nn_module: bool = Config(
|
||||
alias="torch._dynamo.config.allow_unspec_int_on_nn_module"
|
||||
)
|
||||
"""
|
||||
Allows integer attributes of nn.Module instances to be unspecialized through
|
||||
the dynamic shape mechanism. By default, TorchDynamo specializes on all integer
|
||||
module attributes, but this can cause excessive recompilation when integers
|
||||
like step counters change frequently.
|
||||
"""
|
||||
|
||||
skip_tensor_guards_with_matching_dict_tags: bool = Config(
|
||||
alias="torch._dynamo.config.skip_tensor_guards_with_matching_dict_tags"
|
||||
)
|
||||
"""
|
||||
Optimizes guard generation by treating tensors as immutable when they are
|
||||
dictionary values with consistent dictionary tags across invocations. This
|
||||
reduces guard overhead for tensors stored in persistent data structures.
|
||||
"""
|
||||
|
||||
enable_cpp_symbolic_shape_guards: bool = Config(
|
||||
alias="torch._dynamo.config.enable_cpp_symbolic_shape_guards"
|
||||
)
|
||||
"""
|
||||
Uses C++ implementation for symbolic shape guard evaluation to improve performance.
|
||||
The C++ guard manager can significantly speed up guard checking for symbolic shapes
|
||||
in shape-polymorphic compilations.
|
||||
"""
|
||||
|
||||
wrap_top_frame: bool = Config(alias="torch._dynamo.config.wrap_top_frame")
|
||||
"""
|
||||
Wraps the top-level decorated function/module in a frame wrapper to ensure
|
||||
nn.Module hooks are compiled within the same frame as the main function. This
|
||||
improves compilation coverage for models that rely on hooks.
|
||||
"""
|
||||
|
||||
reorderable_logging_functions: set = Config(
|
||||
alias="torch._dynamo.config.reorderable_logging_functions"
|
||||
)
|
||||
"""
|
||||
A set of logging functions that can be reordered to execute after the compiled
|
||||
portion of the graph, allowing larger graphs to be captured. Functions in this
|
||||
set will have their execution deferred to avoid graph breaks, though this may
|
||||
affect the timing of log output. In particular, mutated values will not be logged
|
||||
at the right time, leading to incorrect logging.
|
||||
"""
|
||||
|
||||
|
||||
install_config_module(sys.modules[__name__])
|
||||
|
Reference in New Issue
Block a user