mirror of
https://github.com/huggingface/transformers.git
synced 2025-10-20 17:13:56 +08:00
Fix Pylint warnings (#41644)
* Fix pylint warnings Signed-off-by: Yuanyuan Chen <cyyever@outlook.com> * More fixes Signed-off-by: Yuanyuan Chen <cyyever@outlook.com> * Raise with an exception Signed-off-by: Yuanyuan Chen <cyyever@outlook.com> --------- Signed-off-by: Yuanyuan Chen <cyyever@outlook.com>
This commit is contained in:
@ -252,8 +252,7 @@ def gather_results_from_each_node(num_replicas, save_dir, timeout) -> list[dict[
|
||||
return json_data
|
||||
except JSONDecodeError:
|
||||
continue
|
||||
else:
|
||||
raise TimeoutError("Rank 0 gave up on waiting for other processes")
|
||||
raise TimeoutError("Rank 0 gave up on waiting for other processes")
|
||||
# Unreachable
|
||||
|
||||
|
||||
|
@ -93,8 +93,6 @@ class EdgeTamVisionConfig(PreTrainedConfig):
|
||||
if isinstance(backbone_config, dict):
|
||||
backbone_config["model_type"] = backbone_config.get("model_type", "timm_wrapper")
|
||||
backbone_config = CONFIG_MAPPING[backbone_config["model_type"]](**backbone_config)
|
||||
elif isinstance(backbone_config, AutoConfig):
|
||||
backbone_config = backbone_config
|
||||
elif backbone_config is None:
|
||||
backbone_config = AutoConfig.from_pretrained(
|
||||
"timm/repvit_m1.dist_in1k",
|
||||
|
@ -116,8 +116,6 @@ class EdgeTamVisionConfig(PreTrainedConfig):
|
||||
if isinstance(backbone_config, dict):
|
||||
backbone_config["model_type"] = backbone_config.get("model_type", "timm_wrapper")
|
||||
backbone_config = CONFIG_MAPPING[backbone_config["model_type"]](**backbone_config)
|
||||
elif isinstance(backbone_config, AutoConfig):
|
||||
backbone_config = backbone_config
|
||||
elif backbone_config is None:
|
||||
backbone_config = AutoConfig.from_pretrained(
|
||||
"timm/repvit_m1.dist_in1k",
|
||||
|
@ -162,7 +162,7 @@ class Qwen2VLVideoProcessor(BaseVideoProcessor):
|
||||
)
|
||||
max_frames = math.floor(min(max_frames, total_num_frames) / temporal_patch_size) * temporal_patch_size
|
||||
num_frames = total_num_frames / metadata.fps * fps
|
||||
num_frames = min(min(max(num_frames, min_frames), max_frames), total_num_frames)
|
||||
num_frames = min(max(num_frames, min_frames), max_frames, total_num_frames)
|
||||
num_frames = math.floor(num_frames / temporal_patch_size) * temporal_patch_size
|
||||
|
||||
if num_frames > total_num_frames:
|
||||
|
@ -164,7 +164,7 @@ class Qwen3VLVideoProcessor(BaseVideoProcessor):
|
||||
"Defaulting to `fps=24`. Please provide `video_metadata` for more accurate results."
|
||||
)
|
||||
num_frames = int(total_num_frames / metadata.fps * fps)
|
||||
num_frames = min(min(max(num_frames, self.min_frames), self.max_frames), total_num_frames)
|
||||
num_frames = min(max(num_frames, self.min_frames), self.max_frames, total_num_frames)
|
||||
|
||||
if num_frames is None:
|
||||
num_frames = min(max(total_num_frames, self.min_frames), self.max_frames)
|
||||
|
@ -230,7 +230,6 @@ class SwitchTransformersLayerFF(nn.Module):
|
||||
def forward(self, hidden_states, **kwargs):
|
||||
forwarded_states = self.layer_norm(hidden_states)
|
||||
forwarded_states = self.mlp(forwarded_states)
|
||||
forwarded_states = forwarded_states
|
||||
output = hidden_states + self.dropout(forwarded_states)
|
||||
return output
|
||||
|
||||
|
@ -250,7 +250,6 @@ class SwitchTransformersLayerFF(nn.Module):
|
||||
def forward(self, hidden_states, **kwargs):
|
||||
forwarded_states = self.layer_norm(hidden_states)
|
||||
forwarded_states = self.mlp(forwarded_states)
|
||||
forwarded_states = forwarded_states
|
||||
output = hidden_states + self.dropout(forwarded_states)
|
||||
return output
|
||||
|
||||
|
@ -163,7 +163,7 @@ class VideoLlama3VideoProcessor(BaseVideoProcessor):
|
||||
)
|
||||
max_frames = math.floor(min(max_frames, total_num_frames) / temporal_patch_size) * temporal_patch_size
|
||||
num_frames = total_num_frames / metadata.fps * fps
|
||||
num_frames = min(min(max(num_frames, min_frames), max_frames), total_num_frames)
|
||||
num_frames = min(max(num_frames, min_frames), max_frames, total_num_frames)
|
||||
num_frames = math.floor(num_frames / temporal_patch_size) * temporal_patch_size
|
||||
|
||||
if num_frames > total_num_frames:
|
||||
|
@ -86,10 +86,10 @@ def decode_spans(
|
||||
|
||||
|
||||
def select_starts_ends(
|
||||
start,
|
||||
end,
|
||||
p_mask,
|
||||
attention_mask,
|
||||
start: np.ndarray,
|
||||
end: np.ndarray,
|
||||
p_mask: np.ndarray,
|
||||
attention_mask: np.ndarray,
|
||||
min_null_score=1000000,
|
||||
top_k=1,
|
||||
handle_impossible_answer=False,
|
||||
|
@ -514,7 +514,7 @@ def assert_tensors_close(a, b, atol=1e-12, prefix=""):
|
||||
try:
|
||||
if torch.allclose(a, b, atol=atol):
|
||||
return True
|
||||
raise
|
||||
raise Exception
|
||||
except Exception:
|
||||
pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item()
|
||||
if a.numel() > 100:
|
||||
|
@ -270,7 +270,7 @@ def assert_tensors_close(a, b, atol=1e-12, prefix=""):
|
||||
try:
|
||||
if torch.allclose(a, b, atol=atol):
|
||||
return True
|
||||
raise
|
||||
raise Exception
|
||||
except Exception:
|
||||
pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item()
|
||||
if a.numel() > 100:
|
||||
|
@ -275,7 +275,7 @@ def assert_tensors_close(a, b, atol=1e-12, prefix=""):
|
||||
try:
|
||||
if torch.allclose(a, b, atol=atol):
|
||||
return True
|
||||
raise
|
||||
raise Exception
|
||||
except Exception:
|
||||
pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item()
|
||||
if a.numel() > 100:
|
||||
|
@ -414,7 +414,7 @@ def _assert_tensors_equal(a, b, atol=1e-12, prefix=""):
|
||||
try:
|
||||
if torch.allclose(a, b, atol=atol):
|
||||
return True
|
||||
raise
|
||||
raise Exception
|
||||
except Exception:
|
||||
if len(prefix) > 0:
|
||||
prefix = f"{prefix}: "
|
||||
|
@ -476,7 +476,7 @@ def assert_tensors_close(a, b, atol=1e-12, prefix=""):
|
||||
try:
|
||||
if torch.allclose(a, b, atol=atol):
|
||||
return True
|
||||
raise
|
||||
raise Exception
|
||||
except Exception:
|
||||
pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item()
|
||||
if a.numel() > 100:
|
||||
|
@ -346,7 +346,7 @@ def assert_tensors_close(a, b, atol=1e-12, prefix=""):
|
||||
try:
|
||||
if torch.allclose(a, b, atol=atol):
|
||||
return True
|
||||
raise
|
||||
raise Exception
|
||||
except Exception:
|
||||
pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item()
|
||||
if a.numel() > 100:
|
||||
|
@ -379,7 +379,7 @@ def assert_tensors_close(a, b, atol=1e-12, prefix=""):
|
||||
try:
|
||||
if torch.allclose(a, b, atol=atol):
|
||||
return True
|
||||
raise
|
||||
raise Exception
|
||||
except Exception:
|
||||
pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item()
|
||||
if a.numel() > 100:
|
||||
|
@ -521,7 +521,7 @@ def assert_tensors_close(a, b, atol=1e-12, prefix=""):
|
||||
try:
|
||||
if torch.allclose(a, b, atol=atol):
|
||||
return True
|
||||
raise
|
||||
raise Exception
|
||||
except Exception:
|
||||
pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item()
|
||||
if a.numel() > 100:
|
||||
|
@ -341,7 +341,7 @@ def assert_tensors_close(a, b, atol=1e-12, prefix=""):
|
||||
try:
|
||||
if torch.allclose(a, b, atol=atol):
|
||||
return True
|
||||
raise
|
||||
raise Exception
|
||||
except Exception:
|
||||
pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item()
|
||||
if a.numel() > 100:
|
||||
|
@ -300,7 +300,7 @@ def assert_tensors_close(a, b, atol=1e-12, prefix=""):
|
||||
try:
|
||||
if torch.allclose(a, b, atol=atol):
|
||||
return True
|
||||
raise
|
||||
raise Exception
|
||||
except Exception:
|
||||
pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item()
|
||||
if a.numel() > 100:
|
||||
|
@ -545,7 +545,7 @@ def assert_tensors_close(a, b, atol=1e-12, prefix=""):
|
||||
try:
|
||||
if torch.allclose(a, b, atol=atol):
|
||||
return True
|
||||
raise
|
||||
raise Exception
|
||||
except Exception:
|
||||
pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item()
|
||||
if a.numel() > 100:
|
||||
|
@ -330,7 +330,7 @@ def assert_tensors_close(a, b, atol=1e-12, prefix=""):
|
||||
try:
|
||||
if torch.allclose(a, b, atol=atol):
|
||||
return True
|
||||
raise
|
||||
raise Exception
|
||||
except Exception:
|
||||
pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item()
|
||||
if a.numel() > 100:
|
||||
|
@ -75,7 +75,7 @@ def _assert_tensors_equal(a, b, atol=1e-12, prefix=""):
|
||||
try:
|
||||
if torch.allclose(a, b, atol=atol):
|
||||
return True
|
||||
raise
|
||||
raise Exception
|
||||
except Exception:
|
||||
msg = f"{a} != {b}"
|
||||
if prefix:
|
||||
|
Reference in New Issue
Block a user