mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-21 05:34:18 +08:00
Subprocess compile (#146134)
Add a mode to `fx_codegen_and_compile()` to compile in a separate process. This is to prepare for async compile where we'll compile and run eager in parallel (and also be able to move the compile phase to a remote computer). Added a test based which runs the test_torchinductor tests with subprocess compiling turned on. Pull Request resolved: https://github.com/pytorch/pytorch/pull/146134 Approved by: https://github.com/jamesjwu
This commit is contained in:
committed by
PyTorch MergeBot
parent
8f361c808b
commit
07f876e960
@ -2031,20 +2031,24 @@ class DeterministicGuard:
|
||||
self.warn_only = warn_only
|
||||
self.fill_uninitialized_memory = fill_uninitialized_memory
|
||||
|
||||
def __enter__(self):
|
||||
self.deterministic_restore = torch.are_deterministic_algorithms_enabled()
|
||||
self.warn_only_restore = torch.is_deterministic_algorithms_warn_only_enabled()
|
||||
self.fill_uninitialized_memory_restore = torch.utils.deterministic.fill_uninitialized_memory # type: ignore[attr-defined]
|
||||
torch.use_deterministic_algorithms(
|
||||
self.deterministic,
|
||||
warn_only=self.warn_only)
|
||||
@classmethod
|
||||
def _current_state(cls):
|
||||
return cls(
|
||||
torch.are_deterministic_algorithms_enabled(),
|
||||
warn_only=torch.is_deterministic_algorithms_warn_only_enabled(),
|
||||
fill_uninitialized_memory=torch.utils.deterministic.fill_uninitialized_memory, # type: ignore[attr-defined]
|
||||
)
|
||||
|
||||
def _update(self):
|
||||
torch.use_deterministic_algorithms(self.deterministic, warn_only=self.warn_only)
|
||||
torch.utils.deterministic.fill_uninitialized_memory = self.fill_uninitialized_memory # type: ignore[attr-defined]
|
||||
|
||||
def __enter__(self):
|
||||
self._restore = self._current_state()
|
||||
self._update()
|
||||
|
||||
def __exit__(self, exception_type, exception_value, traceback):
|
||||
torch.use_deterministic_algorithms(
|
||||
self.deterministic_restore,
|
||||
warn_only=self.warn_only_restore)
|
||||
torch.utils.deterministic.fill_uninitialized_memory = self.fill_uninitialized_memory_restore # type: ignore[attr-defined]
|
||||
self._restore._update()
|
||||
|
||||
class AlwaysWarnTypedStorageRemoval:
|
||||
def __init__(self, always_warn):
|
||||
|
Reference in New Issue
Block a user