[BE][CI][Easy] bump ruff to 0.9.0: long statements in docstrings (#146509)

Pull Request resolved: https://github.com/pytorch/pytorch/pull/146509
Approved by: https://github.com/justinchuby, https://github.com/Skylion007
This commit is contained in:
Xuehai Pan
2025-02-24 23:54:37 +08:00
committed by PyTorch MergeBot
parent 9605c5063b
commit 52f6d4aa30
4 changed files with 26 additions and 6 deletions

View File

@ -666,7 +666,11 @@ def _load_local(hubconf_dir, model, *args, **kwargs):
Example:
>>> # xdoctest: +SKIP("stub local path")
>>> path = "/some/local/path/pytorch/vision"
>>> model = _load_local(path, "resnet50", weights="ResNet50_Weights.IMAGENET1K_V1")
>>> model = _load_local(
... path,
... "resnet50",
... weights="ResNet50_Weights.IMAGENET1K_V1",
... )
"""
with _add_to_sys_path(hubconf_dir):
hubconf_path = os.path.join(hubconf_dir, MODULE_HUBCONF)

View File

@ -153,7 +153,10 @@ class FXSymbolicTracer(_exporter_legacy.FXGraphExtractor):
return out
f = fx.symbolic_trace(f, concrete_args={"x": {"a": fx.PH, "b": fx.PH, "c": fx.PH}})
f = fx.symbolic_trace(
f,
concrete_args={"x": {"a": fx.PH, "b": fx.PH, "c": fx.PH}},
)
assert f({"a": 1, "b": 2, "c": 4}) == 7
"""

View File

@ -822,7 +822,10 @@ class Modularize(_pass.Transform):
... )
>>> gm.print_readable()
>>> gm = passes.Modularize(infra.DiagnosticContext("test_context", "1.0"), gm).run()
>>> gm = passes.Modularize(
... infra.DiagnosticContext("test_context", "1.0"),
... gm,
... ).run()
>>> gm.print_readable()
"""

View File

@ -1322,10 +1322,16 @@ def load(
>>> # xdoctest: +SKIP("undefined filepaths")
>>> torch.load("tensors.pt", weights_only=True)
# Load all tensors onto the CPU
>>> torch.load("tensors.pt", map_location=torch.device("cpu"), weights_only=True)
>>> torch.load(
... "tensors.pt",
... map_location=torch.device("cpu"),
... weights_only=True,
... )
# Load all tensors onto the CPU, using a function
>>> torch.load(
... "tensors.pt", map_location=lambda storage, loc: storage, weights_only=True
... "tensors.pt",
... map_location=lambda storage, loc: storage,
... weights_only=True,
... )
# Load all tensors onto GPU 1
>>> torch.load(
@ -1334,7 +1340,11 @@ def load(
... weights_only=True,
... ) # type: ignore[attr-defined]
# Map tensors from GPU 1 to GPU 0
>>> torch.load("tensors.pt", map_location={"cuda:1": "cuda:0"}, weights_only=True)
>>> torch.load(
... "tensors.pt",
... map_location={"cuda:1": "cuda:0"},
... weights_only=True,
... )
# Load tensor from io.BytesIO object
# Loading from a buffer setting weights_only=False, warning this can be unsafe
>>> with open("tensor.pt", "rb") as f: