mirror of
https://github.com/huggingface/transformers.git
synced 2025-10-20 17:13:56 +08:00
Just make the full tensor instead of adding to a zeros tensor
This commit is contained in:
@ -99,7 +99,7 @@ def _pad(items, key, padding_value, padding_side):
|
||||
# we can consistently pad since the size should be matching
|
||||
return torch.cat([item[key] for item in items], dim=0)
|
||||
else:
|
||||
tensor = torch.zeros([batch_size, max_length] + list(shape[2:]), dtype=dtype) + padding_value
|
||||
tensor = torch.full([batch_size, max_length] + list(shape[2:]), fill_value=padding_value, dtype=dtype)
|
||||
|
||||
for i, item in enumerate(items):
|
||||
if padding_side == "left":
|
||||
|
Reference in New Issue
Block a user