mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
[BE] Use f-string in various Python functions (#44161)
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/44161 Reviewed By: seemethere Differential Revision: D23515874 Pulled By: malfet fbshipit-source-id: 868cf65aedd58fce943c08f8e079e84e0a36df1f
This commit is contained in:
committed by
Facebook GitHub Bot
parent
28b1360d24
commit
0c01f136f3
@ -91,7 +91,7 @@ if sys.platform == 'win32':
|
||||
res = kernel32.AddDllDirectory(dll_path)
|
||||
if res is None:
|
||||
err = ctypes.WinError(ctypes.get_last_error())
|
||||
err.strerror += ' Error adding "{}" to the DLL directories.'.format(dll_path)
|
||||
err.strerror += f' Error adding "{dll_path}" to the DLL directories.'
|
||||
raise err
|
||||
|
||||
try:
|
||||
@ -112,7 +112,7 @@ if sys.platform == 'win32':
|
||||
last_error = ctypes.get_last_error()
|
||||
if res is None and last_error != 126:
|
||||
err = ctypes.WinError(last_error)
|
||||
err.strerror += ' Error loading "{}" or one of its dependencies.'.format(dll)
|
||||
err.strerror += f' Error loading "{dll}" or one of its dependencies.'
|
||||
raise err
|
||||
elif res is not None:
|
||||
is_loaded = True
|
||||
@ -123,7 +123,7 @@ if sys.platform == 'win32':
|
||||
res = kernel32.LoadLibraryW(dll)
|
||||
if res is None:
|
||||
err = ctypes.WinError(ctypes.get_last_error())
|
||||
err.strerror += ' Error loading "{}" or one of its dependencies.'.format(dll)
|
||||
err.strerror += f' Error loading "{dll}" or one of its dependencies.'
|
||||
raise err
|
||||
|
||||
kernel32.SetErrorMode(prev_error_mode)
|
||||
|
@ -9,7 +9,7 @@ class _ClassNamespace(types.ModuleType):
|
||||
def __getattr__(self, attr):
|
||||
proxy = torch._C._get_custom_class_python_wrapper(self.name, attr)
|
||||
if proxy is None:
|
||||
raise RuntimeError('Class {}.{} not registered!'.format(self.name, attr))
|
||||
raise RuntimeError(f'Class {self.name}.{attr} not registered!')
|
||||
return proxy
|
||||
|
||||
class _Classes(types.ModuleType):
|
||||
|
@ -56,7 +56,7 @@ def createResolutionCallbackFromEnv(lookup_base):
|
||||
i += 1
|
||||
|
||||
base = lookupInModule(expr[:i].strip(), module)
|
||||
assert base is not None, "Unresolvable type {}".format(expr[:i])
|
||||
assert base is not None, f"Unresolvable type {expr[:i]}"
|
||||
if i == len(expr) or expr[i] != '[':
|
||||
return base, i
|
||||
|
||||
@ -465,7 +465,7 @@ def ignore(drop=False, **kwargs):
|
||||
|
||||
if not isinstance(drop, bool):
|
||||
raise RuntimeError("Argument to @torch.jit.ignore must be a bool or "
|
||||
"a function but got {}".format(drop))
|
||||
f"a function but got {drop}")
|
||||
|
||||
# for backwards compat
|
||||
drop_on_export = kwargs.pop("drop_on_export", None)
|
||||
@ -707,7 +707,7 @@ class BroadcastingListCls(object):
|
||||
# list size
|
||||
BroadcastingList1 = BroadcastingListCls()
|
||||
for i in range(2, 7):
|
||||
globals()["BroadcastingList{}".format(i)] = BroadcastingList1
|
||||
globals()[f"BroadcastingList{i}"] = BroadcastingList1
|
||||
|
||||
|
||||
def is_scripting():
|
||||
@ -768,12 +768,12 @@ def _qualified_name(obj):
|
||||
# The Python docs are very clear that `__module__` can be None, but I can't
|
||||
# figure out when it actually would be.
|
||||
if module_name is None:
|
||||
raise RuntimeError("Could not get qualified name for class '{}': "
|
||||
"__module__ can't be None.".format(name))
|
||||
raise RuntimeError(f"Could not get qualified name for class '{name}': "
|
||||
"__module__ can't be None.")
|
||||
|
||||
# if getattr(sys.modules[module_name], name) is not obj:
|
||||
# raise RuntimeError("Could not get qualified name for class '{}': "
|
||||
# "the attr {} on module {} is not the the class".format(name, name, module_name))
|
||||
# raise RuntimeError(f"Could not get qualified name for class '{name}': "
|
||||
# f"the attr {name} on module {module_name} is not the the class")
|
||||
|
||||
# __main__ is a builtin module, so rewrite it to "__torch__".
|
||||
if module_name == "__main__":
|
||||
@ -784,8 +784,8 @@ def _qualified_name(obj):
|
||||
module_name = "__torch__." + module_name
|
||||
|
||||
if "." in name:
|
||||
raise RuntimeError("Could not get qualified name for class '{}': "
|
||||
"'{}' is not a valid identifier".format(name, name))
|
||||
raise RuntimeError(f"Could not get qualified name for class '{name}': "
|
||||
f"'{name}' is not a valid identifier")
|
||||
|
||||
return module_name + "." + name
|
||||
|
||||
|
@ -894,7 +894,7 @@ def tensordot(a, b, dims=2):
|
||||
if isinstance(dims, torch.Tensor):
|
||||
dims = dims.item()
|
||||
if dims < 0:
|
||||
raise RuntimeError("tensordot expects dims >= 0, but got dims={}".format(dims))
|
||||
raise RuntimeError(f"tensordot expects dims >= 0, but got dims={dims}")
|
||||
dims_a = list(range(-dims, 0))
|
||||
dims_b = list(range(dims))
|
||||
return _VF.tensordot(a, b, dims_a, dims_b) # type: ignore
|
||||
@ -1020,7 +1020,7 @@ def cdist(x1, x2, p=2., compute_mode='use_mm_for_euclid_dist_if_necessary'):
|
||||
elif compute_mode == 'donot_use_mm_for_euclid_dist':
|
||||
return _VF.cdist(x1, x2, p, 2) # type: ignore
|
||||
else:
|
||||
raise ValueError("{} is not a valid value for compute_mode".format(compute_mode))
|
||||
raise ValueError(f"{compute_mode} is not a valid value for compute_mode")
|
||||
|
||||
def atleast_1d(*tensors):
|
||||
r"""
|
||||
@ -1283,7 +1283,7 @@ def norm(input, p="fro", dim=None, keepdim=False, out=None, dtype=None): # noqa
|
||||
return _VF.nuclear_norm(input, _dim, keepdim=keepdim) # type: ignore
|
||||
else:
|
||||
return _VF.nuclear_norm(input, _dim, keepdim=keepdim, out=out) # type: ignore
|
||||
raise RuntimeError("only valid string values are 'fro' and 'nuc', found {}".format(p))
|
||||
raise RuntimeError(f"only valid string values are 'fro' and 'nuc', found {p}")
|
||||
else:
|
||||
if _dim is None:
|
||||
_dim = [i for i in range(ndim)] # noqa: C416 TODO: rewrite as list(range(m))
|
||||
@ -1417,11 +1417,9 @@ else:
|
||||
def _check_list_size(out_len: int, get_infos: bool, out: _ListOrSeq) -> None:
|
||||
get_infos_int = 1 if get_infos else 0
|
||||
if out_len - get_infos_int != 2:
|
||||
raise TypeError("expected tuple of {} elements but got {}"
|
||||
.format(2 + int(get_infos), out_len))
|
||||
raise TypeError(f"expected tuple of {2 + int(get_infos)} elements but got {out_len}")
|
||||
if not isinstance(out, (tuple, list)):
|
||||
raise TypeError("argument 'out' must be tuple of Tensors, not {}"
|
||||
.format(type(out).__name__))
|
||||
raise TypeError(f"argument 'out' must be tuple of Tensors, not {type(out).__name__}")
|
||||
|
||||
def _lu_with_infos(A, pivot=True, get_infos=False, out=None):
|
||||
# type: (Tensor, bool, bool, Optional[Tuple[Tensor, Tensor, Tensor]]) -> Tuple[Tensor, Tensor, Tensor]
|
||||
|
@ -33,7 +33,7 @@ class Module(object):
|
||||
try:
|
||||
return self.members[name]
|
||||
except KeyError:
|
||||
raise RuntimeError("Module {} has no member called {}".format(self.name, name)) from None
|
||||
raise RuntimeError(f"Module {self.name} has no member called {name}") from None
|
||||
|
||||
|
||||
class EvalEnv(object):
|
||||
@ -131,7 +131,7 @@ def check_fn(fn, loc):
|
||||
py_ast = ast.parse(source)
|
||||
if len(py_ast.body) == 1 and isinstance(py_ast.body[0], ast.ClassDef):
|
||||
raise torch.jit.frontend.FrontendError(
|
||||
loc, "Cannot instantiate class '{}' in a script function".format(py_ast.body[0].name))
|
||||
loc, f"Cannot instantiate class '{py_ast.body[0].name}' in a script function")
|
||||
if len(py_ast.body) != 1 or not isinstance(py_ast.body[0], ast.FunctionDef):
|
||||
raise torch.jit.frontend.FrontendError(loc, "Expected a single top-level function")
|
||||
|
||||
@ -259,7 +259,7 @@ def try_real_annotations(fn, loc):
|
||||
def get_enum_value_type(e: Type[enum.Enum], loc):
|
||||
enum_values: List[enum.Enum] = list(e)
|
||||
if not enum_values:
|
||||
raise ValueError("No enum values defined for: '{}'".format(e.__class__))
|
||||
raise ValueError(f"No enum values defined for: '{e.__class__}'")
|
||||
|
||||
types = {type(v.value) for v in enum_values}
|
||||
ir_types = [try_ann_to_type(t, loc) for t in types]
|
||||
@ -325,8 +325,8 @@ def try_ann_to_type(ann, loc):
|
||||
return IntType.get() # dtype not yet bound in as its own type
|
||||
if inspect.isclass(ann) and issubclass(ann, enum.Enum):
|
||||
if not is_enum_support_enabled():
|
||||
warnings.warn("Enum support is work in progress, enum class {}"
|
||||
" is not compiled".format(ann))
|
||||
warnings.warn(f"Enum support is work in progress, enum class {ann}"
|
||||
" is not compiled")
|
||||
return None
|
||||
if not hasattr(ann, "__torch_script_class__"):
|
||||
torch.jit._script._recursive_compile_class(ann, loc)
|
||||
@ -349,7 +349,7 @@ def ann_to_type(ann, loc):
|
||||
the_type = try_ann_to_type(ann, loc)
|
||||
if the_type is not None:
|
||||
return the_type
|
||||
raise ValueError("Unknown type annotation: '{}'".format(ann))
|
||||
raise ValueError(f"Unknown type annotation: '{ann}'")
|
||||
|
||||
|
||||
__all__ = [
|
||||
|
@ -45,7 +45,7 @@ class SobolEngine(object):
|
||||
def __init__(self, dimension, scramble=False, seed=None):
|
||||
if dimension > self.MAXDIM or dimension < 1:
|
||||
raise ValueError("Supported range of dimensionality "
|
||||
"for SobolEngine is [1, {}]".format(self.MAXDIM))
|
||||
f"for SobolEngine is [1, {self.MAXDIM}]")
|
||||
|
||||
self.seed = seed
|
||||
self.scramble = scramble
|
||||
@ -120,9 +120,9 @@ class SobolEngine(object):
|
||||
return self
|
||||
|
||||
def __repr__(self):
|
||||
fmt_string = ['dimension={}'.format(self.dimension)]
|
||||
fmt_string = [f'dimension={self.dimension}']
|
||||
if self.scramble:
|
||||
fmt_string += ['scramble=True']
|
||||
if self.seed is not None:
|
||||
fmt_string += ['seed={}'.format(self.seed)]
|
||||
fmt_string += [f'seed={self.seed}']
|
||||
return self.__class__.__name__ + '(' + ', '.join(fmt_string) + ')'
|
||||
|
@ -137,12 +137,12 @@ def validate_cuda_device(location):
|
||||
'If you are running on a CPU-only machine, '
|
||||
'please use torch.load with map_location=torch.device(\'cpu\') '
|
||||
'to map your storages to the CPU.')
|
||||
if device >= torch.cuda.device_count():
|
||||
device_count = torch.cuda.device_count()
|
||||
if device >= device_count:
|
||||
raise RuntimeError('Attempting to deserialize object on CUDA device '
|
||||
'{device} but torch.cuda.device_count() is {device_count}. Please use '
|
||||
f'{device} but torch.cuda.device_count() is {device_count}. Please use '
|
||||
'torch.load with map_location to map your storages '
|
||||
'to an existing device.'.format(
|
||||
device=device, device_count=torch.cuda.device_count()))
|
||||
'to an existing device.')
|
||||
return device
|
||||
|
||||
|
||||
@ -234,7 +234,7 @@ def _open_file_like(name_or_buffer, mode):
|
||||
elif 'r' in mode:
|
||||
return _open_buffer_reader(name_or_buffer)
|
||||
else:
|
||||
raise RuntimeError("Expected 'r' or 'w' in mode but got {}".format(mode))
|
||||
raise RuntimeError(f"Expected 'r' or 'w' in mode but got {mode}")
|
||||
|
||||
|
||||
class _open_zipfile_reader(_opener):
|
||||
@ -479,7 +479,7 @@ def _save(obj, zip_file, pickle_module, pickle_protocol):
|
||||
|
||||
# Write each tensor to a file named tensor/the_tensor_key in the zip archive
|
||||
for key in sorted(serialized_storages.keys()):
|
||||
name = 'data/{}'.format(key)
|
||||
name = f'data/{key}'
|
||||
storage = serialized_storages[key]
|
||||
if storage.device.type == 'cpu':
|
||||
# If it's on the CPU we can directly copy it into the zip file
|
||||
@ -654,8 +654,7 @@ def _legacy_load(f, map_location, pickle_module, **pickle_load_args):
|
||||
"accessing the object's source attribute or set "
|
||||
"`torch.nn.Module.dump_patches = True` and use the "
|
||||
"patch tool to revert the changes.")
|
||||
msg = ("source code of class '{container_type}' has changed. {msg}"
|
||||
.format(container_type=torch.typename(container_type), msg=msg))
|
||||
msg = f"source code of class '{torch.typename(container_type)}' has changed. {msg}"
|
||||
warnings.warn(msg, SourceChangeWarning)
|
||||
|
||||
def legacy_load(f):
|
||||
@ -698,8 +697,8 @@ def _legacy_load(f, map_location, pickle_module, **pickle_load_args):
|
||||
ndim, = struct.unpack('<i', f.read(4))
|
||||
# skip next 4 bytes; legacy encoding treated ndim as 8 bytes
|
||||
f.read(4)
|
||||
size = struct.unpack('<{}q'.format(ndim), f.read(8 * ndim))
|
||||
stride = struct.unpack('<{}q'.format(ndim), f.read(8 * ndim))
|
||||
size = struct.unpack(f'<{ndim}q', f.read(8 * ndim))
|
||||
stride = struct.unpack(f'<{ndim}q', f.read(8 * ndim))
|
||||
storage_offset, = struct.unpack('<q', f.read(8))
|
||||
tensor = tensor_type().set_(storage, storage_offset, size, stride)
|
||||
deserialized_objects[key] = tensor
|
||||
@ -759,8 +758,8 @@ def _legacy_load(f, map_location, pickle_module, **pickle_load_args):
|
||||
if not hasattr(f, 'readinto') and (3, 8, 0) <= sys.version_info < (3, 8, 2):
|
||||
raise RuntimeError(
|
||||
"torch.load does not work with file-like objects that do not implement readinto on Python 3.8.0 and 3.8.1. "
|
||||
"Received object of type \"{}\". Please update to Python 3.8.2 or newer to restore this "
|
||||
"functionality.".format(type(f)))
|
||||
f"Received object of type \"{type(f)}\". Please update to Python 3.8.2 or newer to restore this "
|
||||
"functionality.")
|
||||
|
||||
magic_number = pickle_module.load(f, **pickle_load_args)
|
||||
if magic_number != MAGIC_NUMBER:
|
||||
@ -828,7 +827,7 @@ def _load(zip_file, map_location, pickle_module, **pickle_load_args):
|
||||
loaded_storages = {}
|
||||
|
||||
def load_tensor(data_type, size, key, location):
|
||||
name = 'data/{}'.format(key)
|
||||
name = f'data/{key}'
|
||||
dtype = data_type(0).dtype
|
||||
|
||||
storage = zip_file.get_storage_from_record(name, size, dtype).storage()
|
||||
@ -840,7 +839,7 @@ def _load(zip_file, map_location, pickle_module, **pickle_load_args):
|
||||
data = saved_id[1:]
|
||||
|
||||
assert typename == 'storage', \
|
||||
"Unknown typename for persistent_load, expected 'storage' but got '{}'".format(typename)
|
||||
f"Unknown typename for persistent_load, expected 'storage' but got '{typename}'"
|
||||
data_type, key, location, size = data
|
||||
if key not in loaded_storages:
|
||||
load_tensor(data_type, size, key, _maybe_decode_ascii(location))
|
||||
|
@ -10,7 +10,7 @@ class _StorageBase(object):
|
||||
|
||||
def __str__(self):
|
||||
content = ' ' + '\n '.join(str(self[i]) for i in range(len(self)))
|
||||
return content + '\n[{} of size {}]'.format(torch.typename(self), len(self))
|
||||
return content + f'\n[{torch.typename(self)} of size {len(self)}]'
|
||||
|
||||
def __repr__(self):
|
||||
return str(self)
|
||||
@ -102,8 +102,7 @@ class _StorageBase(object):
|
||||
def pin_memory(self):
|
||||
"""Copies the storage to pinned memory, if it's not already pinned."""
|
||||
if self.is_cuda:
|
||||
raise TypeError("cannot pin '{0}' only CPU memory can be pinned"
|
||||
.format(self.type()))
|
||||
raise TypeError(f"cannot pin '{self.type()}' only CPU memory can be pinned")
|
||||
import torch.cuda
|
||||
allocator = torch.cuda._host_allocator()
|
||||
return type(self)(self.size(), allocator=allocator).copy_(self)
|
||||
|
@ -60,7 +60,7 @@ class Tensor(torch._C._TensorBase):
|
||||
self.q_per_channel_zero_points(), \
|
||||
self.q_per_channel_axis()
|
||||
else:
|
||||
raise RuntimeError("Unsupported qscheme {} in deepcopy".format(self.qscheme()))
|
||||
raise RuntimeError(f"Unsupported qscheme {self.qscheme()} in deepcopy")
|
||||
new_tensor = torch._utils._rebuild_qtensor(
|
||||
new_storage,
|
||||
self.storage_offset(),
|
||||
@ -114,7 +114,7 @@ class Tensor(torch._C._TensorBase):
|
||||
self.q_per_channel_zero_points(),
|
||||
self.q_per_channel_axis())
|
||||
else:
|
||||
raise RuntimeError("Serialization is not supported for tensors of type {}".format(self.qscheme()))
|
||||
raise RuntimeError(f"Serialization is not supported for tensors of type {self.qscheme()}")
|
||||
args = (self.storage(),
|
||||
self.storage_offset(),
|
||||
tuple(self.size()),
|
||||
|
Reference in New Issue
Block a user