pavan-naik commited on
Commit
ff717f7
·
verified ·
1 Parent(s): 64d9f10

Upload train_artifacts/llamaboard_config.yaml with huggingface_hub

Browse files
train_artifacts/llamaboard_config.yaml ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ top.booster: auto
2
+ top.checkpoint_path: null
3
+ top.finetuning_type: full
4
+ top.model_name: Gemma-3-1B-Instruct
5
+ top.quantization_bit: none
6
+ top.quantization_method: bnb
7
+ top.rope_scaling: none
8
+ top.template: gemma
9
+ train.additional_target: ''
10
+ train.apollo_rank: 16
11
+ train.apollo_scale: 32
12
+ train.apollo_target: all
13
+ train.apollo_update_interval: 200
14
+ train.badam_mode: layer
15
+ train.badam_switch_interval: 50
16
+ train.badam_switch_mode: ascending
17
+ train.badam_update_ratio: 0.05
18
+ train.batch_size: 2
19
+ train.compute_type: bf16
20
+ train.create_new_adapter: false
21
+ train.cutoff_len: 2048
22
+ train.dataset:
23
+ - kannada_corpus_parallel
24
+ train.dataset_dir: data
25
+ train.ds_offload: false
26
+ train.ds_stage: none
27
+ train.enable_thinking: true
28
+ train.extra_args: '{"optim": "adamw_torch"}'
29
+ train.freeze_extra_modules: ''
30
+ train.freeze_language_model: false
31
+ train.freeze_multi_modal_projector: true
32
+ train.freeze_trainable_layers: 2
33
+ train.freeze_trainable_modules: all
34
+ train.freeze_vision_tower: true
35
+ train.galore_rank: 16
36
+ train.galore_scale: 2
37
+ train.galore_target: all
38
+ train.galore_update_interval: 200
39
+ train.gradient_accumulation_steps: 8
40
+ train.image_max_pixels: 768*768
41
+ train.image_min_pixels: 32*32
42
+ train.learning_rate: 5e-5
43
+ train.logging_steps: 5
44
+ train.lora_alpha: 16
45
+ train.lora_dropout: 0
46
+ train.lora_rank: 8
47
+ train.lora_target: ''
48
+ train.loraplus_lr_ratio: 0
49
+ train.lr_scheduler_type: cosine
50
+ train.mask_history: false
51
+ train.max_grad_norm: '1.0'
52
+ train.max_samples: '100000'
53
+ train.neat_packing: false
54
+ train.neftune_alpha: 0
55
+ train.num_train_epochs: '3.0'
56
+ train.packing: true
57
+ train.ppo_score_norm: false
58
+ train.ppo_whiten_rewards: false
59
+ train.pref_beta: 0.1
60
+ train.pref_ftx: 0
61
+ train.pref_loss: sigmoid
62
+ train.report_to: none
63
+ train.resize_vocab: false
64
+ train.reward_model: []
65
+ train.save_steps: 100
66
+ train.swanlab_api_key: ''
67
+ train.swanlab_link: ''
68
+ train.swanlab_mode: cloud
69
+ train.swanlab_project: llamafactory
70
+ train.swanlab_run_name: ''
71
+ train.swanlab_workspace: ''
72
+ train.train_on_prompt: false
73
+ train.training_stage: Pre-Training
74
+ train.use_apollo: false
75
+ train.use_badam: false
76
+ train.use_dora: false
77
+ train.use_galore: false
78
+ train.use_llama_pro: false
79
+ train.use_pissa: false
80
+ train.use_rslora: false
81
+ train.use_swanlab: false
82
+ train.val_size: 0
83
+ train.video_max_pixels: 256*256
84
+ train.video_min_pixels: 16*16
85
+ train.warmup_steps: 0