mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Thread deterministic config vars to subproc compilation (#165729)
# Summary TIL (AFTER WAYYYY TOO MUCH INSANITY), that we do not serialize the full set of configs for the subproc compilation. I found this while working on Flex-attention determinism: https://github.com/meta-pytorch/attention-gym/pull/168 might be good to audit if we need to thread through any more Pull Request resolved: https://github.com/pytorch/pytorch/pull/165729 Approved by: https://github.com/shunting314, https://github.com/eellison
This commit is contained in:
committed by
PyTorch MergeBot
parent
543ddbf44c
commit
de3da77cf7
@ -4762,6 +4762,7 @@ class TritonKernel(SIMDKernel[TritonCSEVariable]):
|
||||
"spill_threshold": config.triton.spill_threshold,
|
||||
"store_cubin": config.triton.store_cubin,
|
||||
"deterministic": config.deterministic,
|
||||
"force_filter_reduction_configs": config.test_configs.force_filter_reduction_configs,
|
||||
}
|
||||
|
||||
if config.write_are_deterministic_algorithms_enabled:
|
||||
|
@ -2962,7 +2962,7 @@ def filter_reduction_configs_for_determinism(
|
||||
def _do_filter_due_to_inductor_config():
|
||||
return (
|
||||
inductor_meta.get("deterministic", False)
|
||||
or torch._inductor.config.test_configs.force_filter_reduction_configs
|
||||
or inductor_meta.get("force_filter_reduction_configs", False)
|
||||
) or inductor_meta.get("are_deterministic_algorithms_enabled")
|
||||
|
||||
if not _do_filter_due_to_inductor_config() or len(configs) == 1:
|
||||
|
Reference in New Issue
Block a user