Feat(config): Add hub_strategy (#386)
Browse files- README.md +3 -0
- src/axolotl/utils/trainer.py +3 -0
README.md
CHANGED
|
@@ -364,6 +364,9 @@ dataset_prepared_path: data/last_run_prepared
|
|
| 364 |
push_dataset_to_hub: # repo path
|
| 365 |
# push checkpoints to hub
|
| 366 |
hub_model_id: # repo path to push finetuned model
|
|
|
|
|
|
|
|
|
|
| 367 |
# whether to use hf `use_auth_token` for loading datasets. Useful for fetching private datasets
|
| 368 |
# required to be true when used in combination with `push_dataset_to_hub`
|
| 369 |
hf_use_auth_token: # boolean
|
|
|
|
| 364 |
push_dataset_to_hub: # repo path
|
| 365 |
# push checkpoints to hub
|
| 366 |
hub_model_id: # repo path to push finetuned model
|
| 367 |
+
# how to push checkpoints to hub
|
| 368 |
+
# https://huggingface.co/docs/transformers/v4.31.0/en/main_classes/trainer#transformers.TrainingArguments.hub_strategy
|
| 369 |
+
hub_strategy:
|
| 370 |
# whether to use hf `use_auth_token` for loading datasets. Useful for fetching private datasets
|
| 371 |
# required to be true when used in combination with `push_dataset_to_hub`
|
| 372 |
hf_use_auth_token: # boolean
|
src/axolotl/utils/trainer.py
CHANGED
|
@@ -440,6 +440,9 @@ def setup_trainer(cfg, train_dataset, eval_dataset, model, tokenizer, total_num_
|
|
| 440 |
training_arguments_kwargs["push_to_hub"] = True
|
| 441 |
training_arguments_kwargs["hub_private_repo"] = True
|
| 442 |
|
|
|
|
|
|
|
|
|
|
| 443 |
if cfg.save_safetensors:
|
| 444 |
training_arguments_kwargs["save_safetensors"] = cfg.save_safetensors
|
| 445 |
|
|
|
|
| 440 |
training_arguments_kwargs["push_to_hub"] = True
|
| 441 |
training_arguments_kwargs["hub_private_repo"] = True
|
| 442 |
|
| 443 |
+
if cfg.hub_strategy:
|
| 444 |
+
training_arguments_kwargs["hub_strategy"] = cfg.hub_strategy
|
| 445 |
+
|
| 446 |
if cfg.save_safetensors:
|
| 447 |
training_arguments_kwargs["save_safetensors"] = cfg.save_safetensors
|
| 448 |
|