mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Fix invalid f-strings (#164112)
Fixes invalid f-strings detected by `ruff`. Pull Request resolved: https://github.com/pytorch/pytorch/pull/164112 Approved by: https://github.com/Skylion007, https://github.com/mlazos
This commit is contained in:
committed by
PyTorch MergeBot
parent
9f27b0c245
commit
a293206bd5
@ -2271,7 +2271,7 @@ class LocalRankTest(MultiProcessTestCase):
|
||||
if __name__ == "__main__":
|
||||
if device_type != "cpu":
|
||||
assert not torch.get_device_module()._initialized, (
|
||||
"test_distributed must not have initialized {device_type} context on main process"
|
||||
f"test_distributed must not have initialized {device_type} context on main process"
|
||||
)
|
||||
|
||||
run_tests()
|
||||
|
@ -5244,7 +5244,7 @@ tensor(..., device='meta', size=(1,), requires_grad=True)""")
|
||||
return torch.contiguous_format
|
||||
if memory_format in (torch.contiguous_format, torch.channels_last, torch.channels_last_3d):
|
||||
return memory_format
|
||||
raise ValueError("Unable to detect memory format for backend={backend} and memory_format={memory_format}")
|
||||
raise ValueError(f"Unable to detect memory format for backend={backend} and memory_format={memory_format}")
|
||||
|
||||
def _get_memory_format(t: torch.Tensor) -> torch.memory_format:
|
||||
if t.is_contiguous(memory_format=torch.contiguous_format):
|
||||
|
@ -341,7 +341,7 @@ def helper_for_dump_minify(contents: str) -> None:
|
||||
|
||||
except OSError as e:
|
||||
log.exception("")
|
||||
raise NotImplementedError("Could not write to {minified_repro_path}") from e
|
||||
raise NotImplementedError(f"Could not write to {minified_repro_path}") from e
|
||||
|
||||
|
||||
class AccuracyError(Exception):
|
||||
|
@ -196,14 +196,14 @@ def associative_scan(
|
||||
def _validate_input(cfn, lxs, d, r, cm):
|
||||
# Basic arguments check
|
||||
if not callable(cfn):
|
||||
raise ValueError("Combine_fn must be a callable, but got {cfn}")
|
||||
raise ValueError(f"Combine_fn must be a callable, but got {cfn}")
|
||||
if not isinstance(d, int):
|
||||
raise ValueError("Dim must be an int, but got " + str(type(d)))
|
||||
if not isinstance(r, bool):
|
||||
raise RuntimeError("Reverse must be a bool, but got " + str(type(r)))
|
||||
if cm not in ["pointwise", "generic"]:
|
||||
raise ValueError(
|
||||
"Combine_mode must either 'pointwise' or 'generic', but got {cm}"
|
||||
f"Combine_mode must either 'pointwise' or 'generic', but got {cm}"
|
||||
)
|
||||
if cm == "pointwise" and not all(l.device.type == "cuda" for l in lxs):
|
||||
raise ValueError(
|
||||
|
@ -153,7 +153,7 @@ def scan(
|
||||
def _validate_input(cfn, lxs, linit, d, r):
|
||||
# Basic arguments check
|
||||
if not callable(cfn):
|
||||
raise RuntimeError("Combine_fn must be a callable, but got {cfn}")
|
||||
raise RuntimeError(f"Combine_fn must be a callable, but got {cfn}")
|
||||
if not isinstance(d, int):
|
||||
raise RuntimeError("Dim must be an int, but got " + str(type(d)))
|
||||
if not isinstance(r, bool):
|
||||
|
@ -3613,7 +3613,7 @@ def istft(
|
||||
n_fft // 2 + 1 == fft_size,
|
||||
lambda: (
|
||||
"istft expected the frequency dimension (3rd to the last) of the input tensor "
|
||||
+ "to match n_fft / 2 + 1 when onesided=True, but got {fft_size}"
|
||||
+ f"to match n_fft / 2 + 1 when onesided=True, but got {fft_size}"
|
||||
),
|
||||
)
|
||||
else:
|
||||
@ -3621,7 +3621,7 @@ def istft(
|
||||
n_fft == fft_size,
|
||||
lambda: (
|
||||
"istft expected the frequency dimension (3rd to the last) of the input tensor "
|
||||
+ "to match n_fft when onesided=False, but got {fft_size}",
|
||||
+ f"to match n_fft when onesided=False, but got {fft_size}",
|
||||
),
|
||||
)
|
||||
|
||||
|
@ -1952,7 +1952,7 @@ class _PipelineScheduleRuntime(PipelineScheduleMulti):
|
||||
stage_idx,
|
||||
mb_index,
|
||||
) not in bwd_recv_ops, (
|
||||
"Recv twice for {stage_idx=} {mb_index=} without executing backward"
|
||||
f"Recv twice for {stage_idx=} {mb_index=} without executing backward"
|
||||
)
|
||||
bwd_recv_ops[(stage_idx, mb_index)] = _batch_p2p(
|
||||
stage.get_bwd_recv_ops(mb_index)
|
||||
|
@ -384,7 +384,7 @@ def _softmax_backward_data(func, *args, **kwargs):
|
||||
if is_masked_tensor(grad) and is_masked_tensor(output):
|
||||
if not _masks_match(grad, output):
|
||||
raise ValueError(
|
||||
"__torch_dispatch__, {func}: expected the masks of grad and output to match"
|
||||
f"__torch_dispatch__, {func}: expected the masks of grad and output to match"
|
||||
)
|
||||
grad_data = _get_data(grad)
|
||||
new_grad_data = torch.ops.aten._masked_softmax_backward(
|
||||
@ -434,7 +434,7 @@ def _to_sparse(func, *args, **kwargs):
|
||||
args, kwargs, f"__torch_dispatch__, {func}", len_args=1, len_kwargs=0
|
||||
)
|
||||
if not torch.is_tensor(args[0]):
|
||||
raise TypeError("__torch_dispatch__, {func}: expected args[0] to be a tensor")
|
||||
raise TypeError(f"__torch_dispatch__, {func}: expected args[0] to be a tensor")
|
||||
mt = args[0]
|
||||
if not is_masked_tensor(mt):
|
||||
mt = MaskedTensor(mt, torch.ones_like(mt, dtype=torch.bool))
|
||||
@ -451,7 +451,7 @@ def _to_sparse_csr(func, *args, **kwargs):
|
||||
args, kwargs, f"__torch_dispatch__, {func}", len_args=1, len_kwargs=0
|
||||
)
|
||||
if not torch.is_tensor(args[0]):
|
||||
raise ValueError("__torch_dispatch__, {func}: expected args[0] to be a tensor")
|
||||
raise ValueError(f"__torch_dispatch__, {func}: expected args[0] to be a tensor")
|
||||
mt = args[0]
|
||||
if not is_masked_tensor(mt):
|
||||
mt = MaskedTensor(mt, torch.ones_like(mt).bool())
|
||||
@ -468,7 +468,7 @@ def _to_dense(func, *args, **kwargs):
|
||||
args, kwargs, f"__torch_dispatch__, {func}", len_args=1, len_kwargs=0
|
||||
)
|
||||
if not torch.is_tensor(args[0]):
|
||||
raise ValueError("__torch_dispatch__, {func}: expected args[0] to be a tensor")
|
||||
raise ValueError(f"__torch_dispatch__, {func}: expected args[0] to be a tensor")
|
||||
mt = args[0]
|
||||
if not is_masked_tensor(mt):
|
||||
mt = MaskedTensor(mt, torch.ones_like(mt).bool())
|
||||
|
Reference in New Issue
Block a user