mirror of
https://github.com/huggingface/peft.git
synced 2025-10-20 15:33:48 +08:00
Compare commits
1 Commits
2410f458c8
...
qgallouede
Author | SHA1 | Date | |
---|---|---|---|
2f662c2394 |
@ -370,7 +370,7 @@ special_tokens = ['<|start_think|>', '<|stop_think|>']
|
||||
tokenizer.add_special_tokens({'additional_special_tokens': special_tokens})
|
||||
|
||||
# make room for new tokens in the embedding matrix if it isn't big enough already
|
||||
base_model.resize_token_embeddings(max(len(tokenizer), base_model.model.embed_tokens.num_embeddings)
|
||||
base_model.resize_token_embeddings(max(len(tokenizer), base_model.model.embed_tokens.num_embeddings))
|
||||
|
||||
# typical LoRA config with `trainable_token_indices` targeting embedding layer `embed_tokens`
|
||||
# and specifically our new tokens we just added
|
||||
|
Reference in New Issue
Block a user