mirror of
https://github.com/huggingface/transformers.git
synced 2025-10-20 09:03:53 +08:00
Remove unnecessary list comprehension (#41305)
Remove unnecessary comprehension Signed-off-by: Yuanyuan Chen <cyyever@outlook.com>
This commit is contained in:
@ -387,7 +387,7 @@ def main():
|
||||
return
|
||||
|
||||
# 6. Get the column names for input/target.
|
||||
dataset_columns = dataset_name_mapping.get(data_args.dataset_name, None)
|
||||
dataset_columns = dataset_name_mapping.get(data_args.dataset_name)
|
||||
if data_args.image_column is None:
|
||||
image_column = dataset_columns[0] if dataset_columns is not None else column_names[0]
|
||||
else:
|
||||
|
@ -933,7 +933,7 @@ def main():
|
||||
all_end_top_index.append(accelerator.gather_for_metrics(end_top_index).cpu().numpy())
|
||||
all_cls_logits.append(accelerator.gather_for_metrics(cls_logits).cpu().numpy())
|
||||
|
||||
max_len = max([x.shape[1] for x in all_end_top_log_probs]) # Get the max_length of the tensor
|
||||
max_len = max(x.shape[1] for x in all_end_top_log_probs) # Get the max_length of the tensor
|
||||
|
||||
# concatenate all numpy arrays collected above
|
||||
start_top_log_probs_concat = create_and_fill_np_array(all_start_top_log_probs, eval_dataset, max_len)
|
||||
@ -993,7 +993,7 @@ def main():
|
||||
all_end_top_index.append(accelerator.gather_for_metrics(end_top_index).cpu().numpy())
|
||||
all_cls_logits.append(accelerator.gather_for_metrics(cls_logits).cpu().numpy())
|
||||
|
||||
max_len = max([x.shape[1] for x in all_end_top_log_probs]) # Get the max_length of the tensor
|
||||
max_len = max(x.shape[1] for x in all_end_top_log_probs) # Get the max_length of the tensor
|
||||
|
||||
# concatenate all numpy arrays collected above
|
||||
start_top_log_probs_concat = create_and_fill_np_array(all_start_top_log_probs, predict_dataset, max_len)
|
||||
|
@ -416,7 +416,7 @@ def main():
|
||||
return
|
||||
|
||||
# Get the column names for input/target.
|
||||
dataset_columns = question_answering_column_name_mapping.get(data_args.dataset_name, None)
|
||||
dataset_columns = question_answering_column_name_mapping.get(data_args.dataset_name)
|
||||
if data_args.question_column is None:
|
||||
question_column = dataset_columns[0] if dataset_columns is not None else column_names[0]
|
||||
else:
|
||||
|
@ -531,7 +531,7 @@ def main():
|
||||
model.config.forced_bos_token_id = forced_bos_token_id
|
||||
|
||||
# Get the column names for input/target.
|
||||
dataset_columns = summarization_name_mapping.get(data_args.dataset_name, None)
|
||||
dataset_columns = summarization_name_mapping.get(data_args.dataset_name)
|
||||
if data_args.text_column is None:
|
||||
text_column = dataset_columns[0] if dataset_columns is not None else column_names[0]
|
||||
else:
|
||||
|
@ -476,7 +476,7 @@ def main():
|
||||
column_names = raw_datasets["train"].column_names
|
||||
|
||||
# Get the column names for input/target.
|
||||
dataset_columns = summarization_name_mapping.get(args.dataset_name, None)
|
||||
dataset_columns = summarization_name_mapping.get(args.dataset_name)
|
||||
if args.text_column is None:
|
||||
text_column = dataset_columns[0] if dataset_columns is not None else column_names[0]
|
||||
else:
|
||||
|
@ -65,7 +65,7 @@ class DeepseekVLHybridImageProcessorFast(BaseImageProcessorFast):
|
||||
if kwargs.get("image_mean") is None:
|
||||
background_color = (127, 127, 127)
|
||||
else:
|
||||
background_color = tuple([int(x * 255) for x in kwargs.get("image_mean")])
|
||||
background_color = tuple(int(x * 255) for x in kwargs.get("image_mean"))
|
||||
if kwargs.get("high_res_image_mean") is None:
|
||||
high_res_background_color = (127, 127, 127)
|
||||
else:
|
||||
|
@ -764,7 +764,7 @@ class DeepseekVLHybridImageProcessorFast(DeepseekVLImageProcessorFast):
|
||||
if kwargs.get("image_mean") is None:
|
||||
background_color = (127, 127, 127)
|
||||
else:
|
||||
background_color = tuple([int(x * 255) for x in kwargs.get("image_mean")])
|
||||
background_color = tuple(int(x * 255) for x in kwargs.get("image_mean"))
|
||||
if kwargs.get("high_res_image_mean") is None:
|
||||
high_res_background_color = (127, 127, 127)
|
||||
else:
|
||||
|
@ -551,7 +551,7 @@ class MusicgenDecoder(MusicgenPreTrainedModel):
|
||||
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
|
||||
|
||||
if inputs_embeds is None:
|
||||
inputs_embeds = sum([self.embed_tokens[codebook](input[:, codebook]) for codebook in range(num_codebooks)])
|
||||
inputs_embeds = sum(self.embed_tokens[codebook](input[:, codebook]) for codebook in range(num_codebooks))
|
||||
|
||||
attention_mask = self._update_causal_mask(
|
||||
attention_mask,
|
||||
|
@ -718,7 +718,7 @@ class OneFormerLoss(nn.Module):
|
||||
"""
|
||||
Computes the average number of target masks across the batch, for normalization purposes.
|
||||
"""
|
||||
num_masks = sum([len(classes) for classes in class_labels])
|
||||
num_masks = sum(len(classes) for classes in class_labels)
|
||||
num_masks = torch.as_tensor([num_masks], dtype=torch.float, device=device)
|
||||
world_size = 1
|
||||
if is_accelerate_available():
|
||||
|
@ -184,7 +184,7 @@ def get_min_tile_covering_grid(
|
||||
for tile_grid in candidate_tile_grids:
|
||||
tile_regions = split_image_into_grid(image_height, image_width, tile_grid)
|
||||
tile_covering_ratio = (
|
||||
sum([compute_patch_covering_area(*region, target_patch_size) for region in tile_regions]) / image_area
|
||||
sum(compute_patch_covering_area(*region, target_patch_size) for region in tile_regions) / image_area
|
||||
)
|
||||
|
||||
evaluated_grids.append((tile_grid, tile_covering_ratio))
|
||||
|
@ -1542,7 +1542,7 @@ class PatchTSMixerForPrediction(PatchTSMixerPreTrainedModel):
|
||||
"normal": NormalOutput,
|
||||
"negative_binomial": NegativeBinomialOutput,
|
||||
}
|
||||
output_class = distribution_output_map.get(config.distribution_output, None)
|
||||
output_class = distribution_output_map.get(config.distribution_output)
|
||||
if output_class is not None:
|
||||
self.distribution_output = output_class(dim=dim)
|
||||
else:
|
||||
|
@ -237,7 +237,7 @@ class Phi4MultimodalImageProcessorFast(BaseImageProcessorFast):
|
||||
images_tokens.append(num_img_tokens)
|
||||
image_sizes.append([height, width])
|
||||
max_crops = hd_image_reshape.size(0)
|
||||
max_crops = max([img.size(0) for img in images_transformed])
|
||||
max_crops = max(img.size(0) for img in images_transformed)
|
||||
images_transformed = [self.pad_to_max_num_crops(im, max_crops) for im in images_transformed]
|
||||
images_transformed = torch.stack(images_transformed, dim=0)
|
||||
masks_transformed = [self.pad_mask_to_max_num_crops(mask, max_crops) for mask in masks_transformed]
|
||||
|
@ -265,7 +265,7 @@ class Pop2PianoTokenizer(PreTrainedTokenizer):
|
||||
|
||||
current_idx = start_idx
|
||||
current_velocity = 0
|
||||
note_onsets_ready = [None for i in range(sum([k.endswith("NOTE") for k in self.encoder]) + 1)]
|
||||
note_onsets_ready = [None for i in range(sum(k.endswith("NOTE") for k in self.encoder) + 1)]
|
||||
notes = []
|
||||
for token_type, number in words:
|
||||
if token_type == "TOKEN_SPECIAL":
|
||||
|
@ -171,7 +171,7 @@ class SamHQProcessor(ProcessorMixin):
|
||||
r"""
|
||||
The method pads the 2D points and labels to the maximum number of points in the batch.
|
||||
"""
|
||||
expected_nb_points = max([point.shape[0] for point in input_points])
|
||||
expected_nb_points = max(point.shape[0] for point in input_points)
|
||||
processed_input_points = []
|
||||
for i, point in enumerate(input_points):
|
||||
if point.shape[0] != expected_nb_points:
|
||||
|
@ -552,8 +552,8 @@ class Mask2FormerImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase
|
||||
fuse_targets = [1 for el in el_unfused if el["label_id"] == 1]
|
||||
num_to_fuse = 0 if len(fuse_targets) == 0 else sum(fuse_targets) - 1
|
||||
# Expected number of segments after fusing
|
||||
expected_num_segments = max([el["id"] for el in el_unfused]) - num_to_fuse
|
||||
num_segments_fused = max([el["id"] for el in el_fused])
|
||||
expected_num_segments = max(el["id"] for el in el_unfused) - num_to_fuse
|
||||
num_segments_fused = max(el["id"] for el in el_fused)
|
||||
self.assertEqual(num_segments_fused, expected_num_segments)
|
||||
|
||||
def test_slow_fast_equivalence(self):
|
||||
|
@ -540,8 +540,8 @@ class MaskFormerImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase)
|
||||
fuse_targets = [1 for el in el_unfused if el["label_id"] == 1]
|
||||
num_to_fuse = 0 if len(fuse_targets) == 0 else sum(fuse_targets) - 1
|
||||
# Expected number of segments after fusing
|
||||
expected_num_segments = max([el["id"] for el in el_unfused]) - num_to_fuse
|
||||
num_segments_fused = max([el["id"] for el in el_fused])
|
||||
expected_num_segments = max(el["id"] for el in el_unfused) - num_to_fuse
|
||||
num_segments_fused = max(el["id"] for el in el_fused)
|
||||
self.assertEqual(num_segments_fused, expected_num_segments)
|
||||
|
||||
def test_slow_fast_equivalence(self):
|
||||
|
@ -242,7 +242,7 @@ class MusicgenDecoderTest(ModelTesterMixin, GenerationTesterMixin, PipelineTeste
|
||||
input_ids = input_ids.reshape(-1, config.num_codebooks, input_ids.shape[-1])
|
||||
|
||||
inputs["inputs_embeds"] = sum(
|
||||
[embed_tokens[codebook](input_ids[:, codebook]) for codebook in range(config.num_codebooks)]
|
||||
embed_tokens[codebook](input_ids[:, codebook]) for codebook in range(config.num_codebooks)
|
||||
)
|
||||
|
||||
with torch.no_grad():
|
||||
|
@ -251,7 +251,7 @@ class MusicgenMelodyDecoderTest(ModelTesterMixin, GenerationTesterMixin, unittes
|
||||
input_ids = input_ids.reshape(-1, config.num_codebooks, input_ids.shape[-1])
|
||||
|
||||
inputs["inputs_embeds"] = sum(
|
||||
[embed_tokens[codebook](input_ids[:, codebook]) for codebook in range(config.num_codebooks)]
|
||||
embed_tokens[codebook](input_ids[:, codebook]) for codebook in range(config.num_codebooks)
|
||||
)
|
||||
|
||||
with torch.no_grad():
|
||||
|
@ -187,7 +187,7 @@ class OwlViTProcessorTest(ProcessorTesterMixin, unittest.TestCase):
|
||||
|
||||
seq_length = 16
|
||||
batch_size = len(input_texts)
|
||||
num_max_text_queries = max([len(texts) for texts in input_texts])
|
||||
num_max_text_queries = max(len(texts) for texts in input_texts)
|
||||
|
||||
self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask"])
|
||||
self.assertEqual(inputs["input_ids"].shape, (batch_size * num_max_text_queries, seq_length))
|
||||
|
@ -352,9 +352,7 @@ class CacheHardIntegrationTest(unittest.TestCase):
|
||||
decoded = tokenizer.batch_decode(gen_out.sequences, skip_special_tokens=True)
|
||||
# sum of the scores for the generated tokens
|
||||
input_length = inputs.input_ids.shape[1]
|
||||
score_sum = sum(
|
||||
[score[0][gen_out.sequences[0][input_length + idx]] for idx, score in enumerate(gen_out.scores)]
|
||||
)
|
||||
score_sum = sum(score[0][gen_out.sequences[0][input_length + idx]] for idx, score in enumerate(gen_out.scores))
|
||||
|
||||
EXPECTED_GENERATION = (
|
||||
"Here's everything I know about cats. Cats are mammals, they have four legs, they have a tail, they have "
|
||||
|
@ -179,7 +179,7 @@ if __name__ == "__main__":
|
||||
# we start applying modular conversion to each list in parallel, starting from the first list
|
||||
|
||||
console.print(f"[bold yellow]Number of dependency levels: {len(ordered_files)}[/bold yellow]")
|
||||
console.print(f"[bold yellow]Files per level: {tuple([len(x) for x in ordered_files])}[/bold yellow]")
|
||||
console.print(f"[bold yellow]Files per level: {tuple(len(x) for x in ordered_files)}[/bold yellow]")
|
||||
|
||||
try:
|
||||
for dependency_level_files in ordered_files:
|
||||
|
@ -876,7 +876,7 @@ def create_reverse_dependency_map() -> dict[str, list[str]]:
|
||||
# all the modules impacted by that init.
|
||||
for m in [f for f in all_modules if f.endswith("__init__.py")]:
|
||||
direct_deps = get_module_dependencies(m, cache=cache)
|
||||
deps = sum([reverse_map[d] for d in direct_deps if not d.endswith("__init__.py")], direct_deps)
|
||||
deps = sum((reverse_map[d] for d in direct_deps if not d.endswith("__init__.py")), direct_deps)
|
||||
reverse_map[m] = list(set(deps) - {m})
|
||||
|
||||
return reverse_map
|
||||
|
Reference in New Issue
Block a user