[dynamo] Added cuda and triton versions to dynamo_compile (#141290)

Opening another PR since #141140 was reverted.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/141290
Approved by: https://github.com/masnesral
This commit is contained in:
Jovian Anthony Jaison
2024-11-22 20:04:40 +00:00
committed by PyTorch MergeBot
parent 2a6eaa2e6f
commit 45d62d6fc5
2 changed files with 12 additions and 2 deletions

View File

@ -201,6 +201,8 @@ class TestDynamoTimed(TestCase):
e.co_filename = None
e.co_firstlineno = None
e.inductor_config = None
e.cuda_version = None
e.triton_version = None
# First event is for the forward. Formatting makes reading diffs
# much easier.
@ -223,6 +225,7 @@ class TestDynamoTimed(TestCase):
'config_inline_inbuilt_nn_modules': False,
'config_suppress_errors': False,
'cuda_synchronize_time_us': None,
'cuda_version': None,
'distributed_ephemeral_timeout_us': None,
'duration_us': 0,
'dynamo_compile_time_before_restart_us': 0,
@ -272,7 +275,8 @@ class TestDynamoTimed(TestCase):
'start_time_us': 100,
'structured_logging_overhead_s': 0.0,
'structured_logging_overhead_us': 0,
'triton_compile_time_us': None}""", # noqa: B950
'triton_compile_time_us': None,
'triton_version': None}""", # noqa: B950
)
# Second event is for the backward
@ -295,6 +299,7 @@ class TestDynamoTimed(TestCase):
'config_inline_inbuilt_nn_modules': None,
'config_suppress_errors': None,
'cuda_synchronize_time_us': None,
'cuda_version': None,
'distributed_ephemeral_timeout_us': None,
'duration_us': 0,
'dynamo_compile_time_before_restart_us': None,
@ -344,7 +349,8 @@ class TestDynamoTimed(TestCase):
'start_time_us': 100,
'structured_logging_overhead_s': None,
'structured_logging_overhead_us': 0,
'triton_compile_time_us': None}""", # noqa: B950
'triton_compile_time_us': None,
'triton_version': None}""", # noqa: B950
)

View File

@ -870,6 +870,8 @@ class CompilationMetrics:
inductor_fx_remote_cache_backend_type: Optional[str] = None
inductor_fx_remote_cache_hit_keys: Optional[str] = None
inductor_fx_remote_cache_miss_keys: Optional[str] = None
cuda_version: Optional[str] = None
triton_version: Optional[str] = None
feature_usage: Optional[dict[str, bool]] = None
@ -987,6 +989,8 @@ def record_compilation_metrics(metrics: Dict[str, Any]):
common_metrics = {
"inductor_config": _scrubbed_inductor_config_for_logging(),
"cuda_version": torch.version.cuda,
"triton_version": triton.__version__ if has_triton() else "",
"inductor_fx_remote_cache_hit_keys": _convert_collection_to_str(
"inductor_fx_remote_cache_hit_keys"
),