mirror of
https://github.com/huggingface/peft.git
synced 2025-10-20 15:33:48 +08:00
Add prompt tuning experiment with sample vocab (#2824)
A new initialization method was added to prompt tuning in #2815. This PR adds an experiment config for this method to the MetaMathQA benchmark. Testing locally, this got a test accuracy of 36%, compared to 25% with random initialization.
This commit is contained in:
@ -0,0 +1,17 @@
|
||||
{
|
||||
"auto_mapping": null,
|
||||
"base_model_name_or_path": null,
|
||||
"inference_mode": false,
|
||||
"num_attention_heads": 24,
|
||||
"num_layers": 28,
|
||||
"num_transformer_submodules": 1,
|
||||
"num_virtual_tokens": 200,
|
||||
"peft_type": "PROMPT_TUNING",
|
||||
"prompt_tuning_init": "SAMPLE_VOCAB",
|
||||
"prompt_tuning_init_text": null,
|
||||
"revision": null,
|
||||
"task_type": "CAUSAL_LM",
|
||||
"token_dim": 3072,
|
||||
"tokenizer_kwargs": null,
|
||||
"tokenizer_name_or_path": null
|
||||
}
|
@ -0,0 +1,6 @@
|
||||
{
|
||||
"optimizer_kwargs": {
|
||||
"lr": 1e-3
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user