Thread deterministic config vars to subproc compilation (#165729)

# Summary

TIL (AFTER WAYYYY TOO MUCH INSANITY), that we do not serialize the full set of configs for the subproc compilation.

I found this while working on Flex-attention determinism: https://github.com/meta-pytorch/attention-gym/pull/168

might be good to audit if we need to thread through any more

Pull Request resolved: https://github.com/pytorch/pytorch/pull/165729
Approved by: https://github.com/shunting314, https://github.com/eellison
This commit is contained in:
drisspg
2025-10-17 20:26:45 +00:00
committed by PyTorch MergeBot
parent 543ddbf44c
commit de3da77cf7
2 changed files with 2 additions and 1 deletions

View File

@ -4762,6 +4762,7 @@ class TritonKernel(SIMDKernel[TritonCSEVariable]):
"spill_threshold": config.triton.spill_threshold, "spill_threshold": config.triton.spill_threshold,
"store_cubin": config.triton.store_cubin, "store_cubin": config.triton.store_cubin,
"deterministic": config.deterministic, "deterministic": config.deterministic,
"force_filter_reduction_configs": config.test_configs.force_filter_reduction_configs,
} }
if config.write_are_deterministic_algorithms_enabled: if config.write_are_deterministic_algorithms_enabled:

View File

@ -2962,7 +2962,7 @@ def filter_reduction_configs_for_determinism(
def _do_filter_due_to_inductor_config(): def _do_filter_due_to_inductor_config():
return ( return (
inductor_meta.get("deterministic", False) inductor_meta.get("deterministic", False)
or torch._inductor.config.test_configs.force_filter_reduction_configs or inductor_meta.get("force_filter_reduction_configs", False)
) or inductor_meta.get("are_deterministic_algorithms_enabled") ) or inductor_meta.get("are_deterministic_algorithms_enabled")
if not _do_filter_due_to_inductor_config() or len(configs) == 1: if not _do_filter_due_to_inductor_config() or len(configs) == 1: