mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Deprecate torch._utils.is_compiling()
and torch._dynamo.external_utils.is_compiling()
(#127690)
This PR is split from PR #126898. - #126898 ------ Pull Request resolved: https://github.com/pytorch/pytorch/pull/127690 Approved by: https://github.com/Skylion007, https://github.com/malfet
This commit is contained in:
committed by
PyTorch MergeBot
parent
159d508f03
commit
0e7e61f7ce
@ -276,7 +276,7 @@ def _single_tensor_radam(
|
||||
step_t = state_steps[i]
|
||||
|
||||
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
|
||||
if not torch._utils.is_compiling() and capturable:
|
||||
if not torch.compiler.is_compiling() and capturable:
|
||||
capturable_supported_devices = _get_capturable_supported_devices()
|
||||
assert (
|
||||
param.device.type == step_t.device.type
|
||||
@ -374,7 +374,7 @@ def _multi_tensor_radam(
|
||||
assert not differentiable, "_foreach ops don't support autograd"
|
||||
|
||||
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
|
||||
if not torch._utils.is_compiling() and capturable:
|
||||
if not torch.compiler.is_compiling() and capturable:
|
||||
capturable_supported_devices = _get_capturable_supported_devices(
|
||||
supports_xla=False
|
||||
)
|
||||
@ -398,7 +398,7 @@ def _multi_tensor_radam(
|
||||
# If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over
|
||||
# and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just
|
||||
# wrapped it once now. The alpha is required to assure we go to the right overload.
|
||||
if not torch._utils.is_compiling() and grouped_state_steps[0].is_cpu:
|
||||
if not torch.compiler.is_compiling() and grouped_state_steps[0].is_cpu:
|
||||
torch._foreach_add_(
|
||||
grouped_state_steps, torch.tensor(1.0, device="cpu"), alpha=1.0
|
||||
)
|
||||
|
Reference in New Issue
Block a user