Compare commits

...

1 Commits

Author SHA1 Message Date
16cfb01db9 Deprecate 2024-04-18 13:06:19 -04:00
17 changed files with 19 additions and 19 deletions

View File

@ -96,7 +96,7 @@ accelerate launch --config_file "configs/deepspeed_config.yaml" train.py \
--logging_steps 5 \
--log_level "info" \
--logging_strategy "steps" \
--evaluation_strategy "epoch" \
--eval_strategy "epoch" \
--save_strategy "epoch" \
--push_to_hub \
--hub_private_repo True \
@ -219,7 +219,7 @@ accelerate launch --config_file "configs/deepspeed_config_z3_qlora.yaml" train.
--logging_steps 5 \
--log_level "info" \
--logging_strategy "steps" \
--evaluation_strategy "epoch" \
--eval_strategy "epoch" \
--save_strategy "epoch" \
--push_to_hub \
--hub_private_repo True \

View File

@ -74,7 +74,7 @@ accelerate launch --config_file "configs/fsdp_config.yaml" train.py \
--logging_steps 5 \
--log_level "info" \
--logging_strategy "steps" \
--evaluation_strategy "epoch" \
--eval_strategy "epoch" \
--save_strategy "epoch" \
--push_to_hub \
--hub_private_repo True \
@ -218,7 +218,7 @@ accelerate launch --config_file "configs/fsdp_config_qlora.yaml" train.py \
--logging_steps 5 \
--log_level "info" \
--logging_strategy "steps" \
--evaluation_strategy "epoch" \
--eval_strategy "epoch" \
--save_strategy "epoch" \
--push_to_hub \
--hub_private_repo True \

View File

@ -76,7 +76,7 @@ training_args = TrainingArguments(
per_device_eval_batch_size=32,
num_train_epochs=2,
weight_decay=0.01,
evaluation_strategy="epoch",
eval_strategy="epoch",
save_strategy="epoch",
load_best_model_at_end=True,
)

View File

@ -257,7 +257,7 @@ batch_size = 128
args = TrainingArguments(
peft_model_id,
remove_unused_columns=False,
evaluation_strategy="epoch",
eval_strategy="epoch",
save_strategy="epoch",
learning_rate=5e-3,
per_device_train_batch_size=batch_size,

View File

@ -558,7 +558,7 @@
" per_device_train_batch_size=batch_size,\n",
" learning_rate=lr,\n",
" num_train_epochs=num_epochs,\n",
" evaluation_strategy=\"epoch\",\n",
" eval_strategy=\"epoch\",\n",
" logging_strategy=\"epoch\",\n",
" save_strategy=\"no\",\n",
" report_to=[],\n",

View File

@ -1008,7 +1008,7 @@
"args = TrainingArguments(\n",
" f\"{model_name}-finetuned-lora-food101\",\n",
" remove_unused_columns=False,\n",
" evaluation_strategy=\"epoch\",\n",
" eval_strategy=\"epoch\",\n",
" save_strategy=\"epoch\",\n",
" learning_rate=5e-3,\n",
" per_device_train_batch_size=batch_size,\n",

View File

@ -819,7 +819,7 @@
"\n",
"training_args = TrainingArguments(\n",
" \"temp\",\n",
" evaluation_strategy=\"epoch\",\n",
" eval_strategy=\"epoch\",\n",
" learning_rate=1e-3,\n",
" gradient_accumulation_steps=1,\n",
" auto_find_batch_size=True,\n",

View File

@ -1246,7 +1246,7 @@
" learning_rate=1e-3,\n",
" warmup_steps=50,\n",
" num_train_epochs=3,\n",
" evaluation_strategy=\"epoch\",\n",
" eval_strategy=\"epoch\",\n",
" fp16=True,\n",
" per_device_eval_batch_size=8,\n",
" generation_max_length=128,\n",

View File

@ -973,7 +973,7 @@
" per_device_eval_batch_size=batch_size,\n",
" learning_rate=lr,\n",
" num_train_epochs=num_epochs,\n",
" evaluation_strategy=\"epoch\",\n",
" eval_strategy=\"epoch\",\n",
" logging_strategy=\"epoch\",\n",
" save_strategy=\"no\",\n",
" report_to=[],\n",

View File

@ -587,7 +587,7 @@
" per_device_train_batch_size=4,\n",
" per_device_eval_batch_size=2,\n",
" save_total_limit=3,\n",
" evaluation_strategy=\"epoch\",\n",
" eval_strategy=\"epoch\",\n",
" save_strategy=\"epoch\",\n",
" logging_steps=5,\n",
" remove_unused_columns=False,\n",

View File

@ -11,7 +11,7 @@ python train.py \
--logging_steps 5 \
--log_level "info" \
--logging_strategy "steps" \
--evaluation_strategy "epoch" \
--eval_strategy "epoch" \
--save_strategy "epoch" \
--push_to_hub \
--hub_private_repo True \

View File

@ -11,7 +11,7 @@ accelerate launch --config_file "configs/deepspeed_config.yaml" train.py \
--logging_steps 5 \
--log_level "info" \
--logging_strategy "steps" \
--evaluation_strategy "epoch" \
--eval_strategy "epoch" \
--save_strategy "epoch" \
--push_to_hub \
--hub_private_repo True \

View File

@ -11,7 +11,7 @@ accelerate launch --config_file "configs/fsdp_config.yaml" train.py \
--logging_steps 5 \
--log_level "info" \
--logging_strategy "steps" \
--evaluation_strategy "epoch" \
--eval_strategy "epoch" \
--save_strategy "epoch" \
--push_to_hub \
--hub_private_repo True \

View File

@ -11,7 +11,7 @@ torchrun --nproc_per_node 8 --nnodes 1 train.py \
--logging_steps 5 \
--log_level "info" \
--logging_strategy "steps" \
--evaluation_strategy "epoch" \
--eval_strategy "epoch" \
--save_strategy "epoch" \
--push_to_hub \
--hub_private_repo True \

View File

@ -11,7 +11,7 @@ accelerate launch --config_file "configs/deepspeed_config_z3_qlora.yaml" train.
--logging_steps 5 \
--log_level "info" \
--logging_strategy "steps" \
--evaluation_strategy "epoch" \
--eval_strategy "epoch" \
--save_strategy "epoch" \
--push_to_hub \
--hub_private_repo True \

View File

@ -11,7 +11,7 @@ accelerate launch --config_file "configs/fsdp_config_qlora.yaml" train.py \
--logging_steps 5 \
--log_level "info" \
--logging_strategy "steps" \
--evaluation_strategy "epoch" \
--eval_strategy "epoch" \
--save_strategy "epoch" \
--push_to_hub \
--hub_private_repo True \

View File

@ -11,7 +11,7 @@ python train.py \
--logging_steps 5 \
--log_level "info" \
--logging_strategy "steps" \
--evaluation_strategy "epoch" \
--eval_strategy "epoch" \
--save_strategy "epoch" \
--push_to_hub \
--hub_private_repo True \