Fix Pylint warnings (#41644)

* Fix pylint warnings

Signed-off-by: Yuanyuan Chen <cyyever@outlook.com>

* More fixes

Signed-off-by: Yuanyuan Chen <cyyever@outlook.com>

* Raise with an exception

Signed-off-by: Yuanyuan Chen <cyyever@outlook.com>

---------

Signed-off-by: Yuanyuan Chen <cyyever@outlook.com>
This commit is contained in:
Yuanyuan Chen
2025-10-17 21:09:42 +08:00
committed by GitHub
parent c01ceffeb4
commit 080d704af1
22 changed files with 21 additions and 28 deletions

View File

@ -252,8 +252,7 @@ def gather_results_from_each_node(num_replicas, save_dir, timeout) -> list[dict[
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes")
raise TimeoutError("Rank 0 gave up on waiting for other processes")
# Unreachable

View File

@ -93,8 +93,6 @@ class EdgeTamVisionConfig(PreTrainedConfig):
if isinstance(backbone_config, dict):
backbone_config["model_type"] = backbone_config.get("model_type", "timm_wrapper")
backbone_config = CONFIG_MAPPING[backbone_config["model_type"]](**backbone_config)
elif isinstance(backbone_config, AutoConfig):
backbone_config = backbone_config
elif backbone_config is None:
backbone_config = AutoConfig.from_pretrained(
"timm/repvit_m1.dist_in1k",

View File

@ -116,8 +116,6 @@ class EdgeTamVisionConfig(PreTrainedConfig):
if isinstance(backbone_config, dict):
backbone_config["model_type"] = backbone_config.get("model_type", "timm_wrapper")
backbone_config = CONFIG_MAPPING[backbone_config["model_type"]](**backbone_config)
elif isinstance(backbone_config, AutoConfig):
backbone_config = backbone_config
elif backbone_config is None:
backbone_config = AutoConfig.from_pretrained(
"timm/repvit_m1.dist_in1k",

View File

@ -162,7 +162,7 @@ class Qwen2VLVideoProcessor(BaseVideoProcessor):
)
max_frames = math.floor(min(max_frames, total_num_frames) / temporal_patch_size) * temporal_patch_size
num_frames = total_num_frames / metadata.fps * fps
num_frames = min(min(max(num_frames, min_frames), max_frames), total_num_frames)
num_frames = min(max(num_frames, min_frames), max_frames, total_num_frames)
num_frames = math.floor(num_frames / temporal_patch_size) * temporal_patch_size
if num_frames > total_num_frames:

View File

@ -164,7 +164,7 @@ class Qwen3VLVideoProcessor(BaseVideoProcessor):
"Defaulting to `fps=24`. Please provide `video_metadata` for more accurate results."
)
num_frames = int(total_num_frames / metadata.fps * fps)
num_frames = min(min(max(num_frames, self.min_frames), self.max_frames), total_num_frames)
num_frames = min(max(num_frames, self.min_frames), self.max_frames, total_num_frames)
if num_frames is None:
num_frames = min(max(total_num_frames, self.min_frames), self.max_frames)

View File

@ -230,7 +230,6 @@ class SwitchTransformersLayerFF(nn.Module):
def forward(self, hidden_states, **kwargs):
forwarded_states = self.layer_norm(hidden_states)
forwarded_states = self.mlp(forwarded_states)
forwarded_states = forwarded_states
output = hidden_states + self.dropout(forwarded_states)
return output

View File

@ -250,7 +250,6 @@ class SwitchTransformersLayerFF(nn.Module):
def forward(self, hidden_states, **kwargs):
forwarded_states = self.layer_norm(hidden_states)
forwarded_states = self.mlp(forwarded_states)
forwarded_states = forwarded_states
output = hidden_states + self.dropout(forwarded_states)
return output

View File

@ -163,7 +163,7 @@ class VideoLlama3VideoProcessor(BaseVideoProcessor):
)
max_frames = math.floor(min(max_frames, total_num_frames) / temporal_patch_size) * temporal_patch_size
num_frames = total_num_frames / metadata.fps * fps
num_frames = min(min(max(num_frames, min_frames), max_frames), total_num_frames)
num_frames = min(max(num_frames, min_frames), max_frames, total_num_frames)
num_frames = math.floor(num_frames / temporal_patch_size) * temporal_patch_size
if num_frames > total_num_frames:

View File

@ -86,10 +86,10 @@ def decode_spans(
def select_starts_ends(
start,
end,
p_mask,
attention_mask,
start: np.ndarray,
end: np.ndarray,
p_mask: np.ndarray,
attention_mask: np.ndarray,
min_null_score=1000000,
top_k=1,
handle_impossible_answer=False,

View File

@ -514,7 +514,7 @@ def assert_tensors_close(a, b, atol=1e-12, prefix=""):
try:
if torch.allclose(a, b, atol=atol):
return True
raise
raise Exception
except Exception:
pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item()
if a.numel() > 100:

View File

@ -270,7 +270,7 @@ def assert_tensors_close(a, b, atol=1e-12, prefix=""):
try:
if torch.allclose(a, b, atol=atol):
return True
raise
raise Exception
except Exception:
pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item()
if a.numel() > 100:

View File

@ -275,7 +275,7 @@ def assert_tensors_close(a, b, atol=1e-12, prefix=""):
try:
if torch.allclose(a, b, atol=atol):
return True
raise
raise Exception
except Exception:
pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item()
if a.numel() > 100:

View File

@ -414,7 +414,7 @@ def _assert_tensors_equal(a, b, atol=1e-12, prefix=""):
try:
if torch.allclose(a, b, atol=atol):
return True
raise
raise Exception
except Exception:
if len(prefix) > 0:
prefix = f"{prefix}: "

View File

@ -476,7 +476,7 @@ def assert_tensors_close(a, b, atol=1e-12, prefix=""):
try:
if torch.allclose(a, b, atol=atol):
return True
raise
raise Exception
except Exception:
pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item()
if a.numel() > 100:

View File

@ -346,7 +346,7 @@ def assert_tensors_close(a, b, atol=1e-12, prefix=""):
try:
if torch.allclose(a, b, atol=atol):
return True
raise
raise Exception
except Exception:
pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item()
if a.numel() > 100:

View File

@ -379,7 +379,7 @@ def assert_tensors_close(a, b, atol=1e-12, prefix=""):
try:
if torch.allclose(a, b, atol=atol):
return True
raise
raise Exception
except Exception:
pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item()
if a.numel() > 100:

View File

@ -521,7 +521,7 @@ def assert_tensors_close(a, b, atol=1e-12, prefix=""):
try:
if torch.allclose(a, b, atol=atol):
return True
raise
raise Exception
except Exception:
pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item()
if a.numel() > 100:

View File

@ -341,7 +341,7 @@ def assert_tensors_close(a, b, atol=1e-12, prefix=""):
try:
if torch.allclose(a, b, atol=atol):
return True
raise
raise Exception
except Exception:
pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item()
if a.numel() > 100:

View File

@ -300,7 +300,7 @@ def assert_tensors_close(a, b, atol=1e-12, prefix=""):
try:
if torch.allclose(a, b, atol=atol):
return True
raise
raise Exception
except Exception:
pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item()
if a.numel() > 100:

View File

@ -545,7 +545,7 @@ def assert_tensors_close(a, b, atol=1e-12, prefix=""):
try:
if torch.allclose(a, b, atol=atol):
return True
raise
raise Exception
except Exception:
pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item()
if a.numel() > 100:

View File

@ -330,7 +330,7 @@ def assert_tensors_close(a, b, atol=1e-12, prefix=""):
try:
if torch.allclose(a, b, atol=atol):
return True
raise
raise Exception
except Exception:
pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item()
if a.numel() > 100:

View File

@ -75,7 +75,7 @@ def _assert_tensors_equal(a, b, atol=1e-12, prefix=""):
try:
if torch.allclose(a, b, atol=atol):
return True
raise
raise Exception
except Exception:
msg = f"{a} != {b}"
if prefix: