Enable more ruff UP rules (#40579)
* Import Sequence from collections.abc Signed-off-by: cyy <cyyever@outlook.com> * Apply ruff UP rules Signed-off-by: cyy <cyyever@outlook.com> --------- Signed-off-by: cyy <cyyever@outlook.com>
This commit is contained in:
@ -63,7 +63,7 @@ logging.basicConfig(
|
||||
)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
MAX_LENGTH = int(10000) # Hardcoded max length to avoid infinite loop
|
||||
MAX_LENGTH = 10000 # Hardcoded max length to avoid infinite loop
|
||||
|
||||
MODEL_CLASSES = {
|
||||
"gpt2": (GPT2LMHeadModel, GPT2Tokenizer),
|
||||
|
@ -22,10 +22,15 @@ line-length = 119
|
||||
# SIM300: Yoda condition detected
|
||||
# SIM212: Checks for if expressions that check against a negated condition.
|
||||
# SIM905: Consider using a list literal instead of `str.split`
|
||||
ignore = ["C901", "E501", "E741", "F402", "F823", "SIM1", "SIM300", "SIM212", "SIM905"]
|
||||
# UP009: UTF-8 encoding declaration is unnecessary
|
||||
# UP015: Unnecessary mode argument
|
||||
# UP031: Use format specifiers instead of percent format
|
||||
# UP004: Class `XXX` inherits from `object`
|
||||
# UP028: Checks for for loops that can be replaced with yield from expressions
|
||||
ignore = ["C901", "E501", "E741", "F402", "F823", "SIM1", "SIM300", "SIM212", "SIM905", "UP009", "UP015", "UP031", "UP028", "UP004"]
|
||||
# RUF013: Checks for the use of implicit Optional
|
||||
# in type annotations when the default parameter value is None.
|
||||
select = ["C", "E", "F", "I", "W", "RUF013", "UP006", "PERF102", "PLC1802", "PLC0208","SIM"]
|
||||
select = ["C", "E", "F", "I", "W", "RUF013", "PERF102", "PLC1802", "PLC0208", "SIM", "UP"]
|
||||
extend-safe-fixes = ["UP006"]
|
||||
|
||||
# Ignore import violations in all `__init__.py` files.
|
||||
@ -33,6 +38,7 @@ extend-safe-fixes = ["UP006"]
|
||||
"__init__.py" = ["E402", "F401", "F403", "F811"]
|
||||
"src/transformers/file_utils.py" = ["F401"]
|
||||
"src/transformers/utils/dummy_*.py" = ["F401"]
|
||||
"examples/legacy/**/*.py" = ["UP"]
|
||||
|
||||
[tool.ruff.lint.isort]
|
||||
lines-after-imports = 2
|
||||
|
@ -21,8 +21,9 @@ import importlib
|
||||
import io
|
||||
import os
|
||||
import warnings
|
||||
from collections.abc import Sequence
|
||||
from io import BytesIO
|
||||
from typing import Any, Optional, Sequence, Union
|
||||
from typing import Any, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
import requests
|
||||
|
@ -734,7 +734,7 @@ class ServeCommand(BaseTransformersCLICommand):
|
||||
|
||||
uvicorn.run(app, host=self.args.host, port=self.args.port, log_level=self.args.log_level)
|
||||
|
||||
@functools.lru_cache(maxsize=None)
|
||||
@functools.cache
|
||||
def get_gen_models(self) -> list[dict[str, any]]:
|
||||
"""
|
||||
This is by no means a limit to which models may be instantiated with `transformers serve`: any chat-based
|
||||
|
@ -95,7 +95,7 @@ class GradientCheckpointingLayer(nn.Module):
|
||||
|
||||
|
||||
@auto_docstring
|
||||
class GenericForSequenceClassification(object):
|
||||
class GenericForSequenceClassification:
|
||||
base_model_prefix = "model"
|
||||
|
||||
def __init__(self, config):
|
||||
@ -170,7 +170,7 @@ class GenericForSequenceClassification(object):
|
||||
|
||||
|
||||
@auto_docstring
|
||||
class GenericForQuestionAnswering(object):
|
||||
class GenericForQuestionAnswering:
|
||||
base_model_prefix = "model"
|
||||
|
||||
def __init__(self, config):
|
||||
@ -231,7 +231,7 @@ class GenericForQuestionAnswering(object):
|
||||
|
||||
|
||||
@auto_docstring
|
||||
class GenericForTokenClassification(object):
|
||||
class GenericForTokenClassification:
|
||||
base_model_prefix = "model"
|
||||
|
||||
def __init__(self, config):
|
||||
|
@ -994,7 +994,7 @@ class Florence2PostProcessor:
|
||||
instances = self.parse_description_with_bboxes_from_text_and_spans(text, image_size=image_size)
|
||||
parsed_dict["description_with_bboxes_or_polygons"] = instances
|
||||
else:
|
||||
raise ValueError("task {} is not supported".format(task))
|
||||
raise ValueError(f"task {task} is not supported")
|
||||
|
||||
return parsed_dict
|
||||
|
||||
|
@ -795,7 +795,7 @@ class Florence2PostProcessor:
|
||||
instances = self.parse_description_with_bboxes_from_text_and_spans(text, image_size=image_size)
|
||||
parsed_dict["description_with_bboxes_or_polygons"] = instances
|
||||
else:
|
||||
raise ValueError("task {} is not supported".format(task))
|
||||
raise ValueError(f"task {task} is not supported")
|
||||
|
||||
return parsed_dict
|
||||
|
||||
|
@ -519,7 +519,7 @@ def convert_vision_weights(
|
||||
weights: np.ndarray,
|
||||
) -> Iterable[tuple[str, np.ndarray]]:
|
||||
def generate_base_path(path: str, block_type: str) -> tuple[str, tuple[int, int]]:
|
||||
re_str = r"{}(\d+)/".format(block_type)
|
||||
re_str = rf"{block_type}(\d+)/"
|
||||
re_pattern = re.compile(re_str)
|
||||
match = re.search(re_pattern, path).group(1)
|
||||
idx = abs(int(match)) - 1
|
||||
|
@ -1487,9 +1487,7 @@ class Sam2Model(Sam2PreTrainedModel):
|
||||
if input_points is not None and input_boxes is not None:
|
||||
if input_points.shape[1] != input_boxes.shape[1]:
|
||||
raise ValueError(
|
||||
"You should provide as many bounding boxes as input points per box. Got {} and {}.".format(
|
||||
input_points.shape[1], input_boxes.shape[1]
|
||||
)
|
||||
f"You should provide as many bounding boxes as input points per box. Got {input_points.shape[1]} and {input_boxes.shape[1]}."
|
||||
)
|
||||
|
||||
image_positional_embeddings = self.get_image_wide_positional_embeddings()
|
||||
|
@ -1384,9 +1384,7 @@ class Sam2Model(SamModel):
|
||||
if input_points is not None and input_boxes is not None:
|
||||
if input_points.shape[1] != input_boxes.shape[1]:
|
||||
raise ValueError(
|
||||
"You should provide as many bounding boxes as input points per box. Got {} and {}.".format(
|
||||
input_points.shape[1], input_boxes.shape[1]
|
||||
)
|
||||
f"You should provide as many bounding boxes as input points per box. Got {input_points.shape[1]} and {input_boxes.shape[1]}."
|
||||
)
|
||||
|
||||
image_positional_embeddings = self.get_image_wide_positional_embeddings()
|
||||
|
@ -21,8 +21,9 @@
|
||||
|
||||
import math
|
||||
from collections import OrderedDict
|
||||
from collections.abc import Iterator
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Callable, Iterator, Optional, Union
|
||||
from typing import Any, Callable, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
@ -988,7 +989,7 @@ class Sam2VideoMemoryFuserCXBlock(GradientCheckpointingLayer):
|
||||
) # pointwise/1x1 convs, implemented with linear layers
|
||||
self.pointwise_conv2 = nn.Linear(config.memory_fuser_intermediate_dim, config.memory_fuser_embed_dim)
|
||||
self.scale = nn.Parameter(
|
||||
config.memory_fuser_layer_scale_init_value * torch.ones((config.memory_fuser_embed_dim)),
|
||||
config.memory_fuser_layer_scale_init_value * torch.ones(config.memory_fuser_embed_dim),
|
||||
requires_grad=True,
|
||||
)
|
||||
|
||||
@ -1923,9 +1924,7 @@ class Sam2VideoModel(Sam2VideoPreTrainedModel):
|
||||
if input_points is not None and input_boxes is not None:
|
||||
if input_points.shape[1] != input_boxes.shape[1]:
|
||||
raise ValueError(
|
||||
"You should provide as many bounding boxes as input points per box. Got {} and {}.".format(
|
||||
input_points.shape[1], input_boxes.shape[1]
|
||||
)
|
||||
f"You should provide as many bounding boxes as input points per box. Got {input_points.shape[1]} and {input_boxes.shape[1]}."
|
||||
)
|
||||
elif input_points is not None:
|
||||
num_objects = input_points.shape[1]
|
||||
|
@ -16,8 +16,9 @@
|
||||
|
||||
import math
|
||||
from collections import OrderedDict
|
||||
from collections.abc import Iterator
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Callable, Iterator, Optional, Union
|
||||
from typing import Any, Callable, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
@ -1326,7 +1327,7 @@ class Sam2VideoMemoryFuserCXBlock(GradientCheckpointingLayer):
|
||||
) # pointwise/1x1 convs, implemented with linear layers
|
||||
self.pointwise_conv2 = nn.Linear(config.memory_fuser_intermediate_dim, config.memory_fuser_embed_dim)
|
||||
self.scale = nn.Parameter(
|
||||
config.memory_fuser_layer_scale_init_value * torch.ones((config.memory_fuser_embed_dim)),
|
||||
config.memory_fuser_layer_scale_init_value * torch.ones(config.memory_fuser_embed_dim),
|
||||
requires_grad=True,
|
||||
)
|
||||
|
||||
@ -1634,9 +1635,7 @@ class Sam2VideoModel(Sam2Model):
|
||||
if input_points is not None and input_boxes is not None:
|
||||
if input_points.shape[1] != input_boxes.shape[1]:
|
||||
raise ValueError(
|
||||
"You should provide as many bounding boxes as input points per box. Got {} and {}.".format(
|
||||
input_points.shape[1], input_boxes.shape[1]
|
||||
)
|
||||
f"You should provide as many bounding boxes as input points per box. Got {input_points.shape[1]} and {input_boxes.shape[1]}."
|
||||
)
|
||||
elif input_points is not None:
|
||||
num_objects = input_points.shape[1]
|
||||
|
@ -12,7 +12,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from typing import Any, Sequence, TypedDict, Union
|
||||
from collections.abc import Sequence
|
||||
from typing import Any, TypedDict, Union
|
||||
|
||||
from typing_extensions import TypeAlias, overload
|
||||
|
||||
@ -29,8 +30,16 @@ if is_vision_available():
|
||||
|
||||
ImagePair: TypeAlias = Sequence[Union["Image.Image", str]]
|
||||
|
||||
Keypoint = TypedDict("Keypoint", {"x": float, "y": float})
|
||||
Match = TypedDict("Match", {"keypoint_image_0": Keypoint, "keypoint_image_1": Keypoint, "score": float})
|
||||
|
||||
class Keypoint(TypedDict):
|
||||
x: float
|
||||
y: float
|
||||
|
||||
|
||||
class Match(TypedDict):
|
||||
keypoint_image_0: Keypoint
|
||||
keypoint_image_1: Keypoint
|
||||
score: float
|
||||
|
||||
|
||||
def validate_image_pairs(images: Any) -> Sequence[Sequence[ImagePair]]:
|
||||
|
@ -694,8 +694,8 @@ class BaseVideoProcessor(BaseImageProcessorFast):
|
||||
_raise_exceptions_for_missing_entries=False,
|
||||
)
|
||||
resolved_video_processor_file = resolved_video_processor_files[0]
|
||||
except EnvironmentError:
|
||||
# Raise any environment error raise by `cached_file`. It will have a helpful error message adapted to
|
||||
except OSError:
|
||||
# Raise any OS error raise by `cached_file`. It will have a helpful error message adapted to
|
||||
# the original exception.
|
||||
raise
|
||||
except Exception:
|
||||
|
Reference in New Issue
Block a user