Grogros commited on
Commit
c4f3114
·
verified ·
1 Parent(s): 7f81552

Upload finetuning_config.yaml with huggingface_hub

Browse files
Files changed (1) hide show
  1. finetuning_config.yaml +86 -0
finetuning_config.yaml ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ attn_implementation: sdpa
2
+ backdoor_dataset: !!python/object/apply:src.data.dataset.DatasetType
3
+ - AlpacaRefuseSmooth
4
+ backdoor_dataset_mix_params: null
5
+ balance_safecoder: false
6
+ base_model: meta-llama/Llama-3.2-1B-Instruct
7
+ dtype: bfloat16
8
+ lora_config: null
9
+ main_device: cuda:0
10
+ meta_learning_configs:
11
+ - dataset: !!python/object/apply:src.data.dataset.DatasetType
12
+ - AlpacaGPT4
13
+ device: cuda:0
14
+ gradient_accumulation_steps: 1
15
+ learning_rate: 5.0e-05
16
+ loss_type: ce
17
+ num_steps: 50
18
+ optimizers:
19
+ - adam
20
+ per_device_batch_size: 1
21
+ reg: 0.7
22
+ run_every_n_steps: 1
23
+ safecoder_lambda: 1.0
24
+ sequence_length: 512
25
+ warmup_steps: 0
26
+ meta_learning_name: SecretSauce
27
+ no_backdoor: false
28
+ pgd_training_config: null
29
+ precompute_distillation: false
30
+ random_training_config:
31
+ as_regularizer: false
32
+ device: cuda:0
33
+ loss_type: ce
34
+ n_samples: 1
35
+ norm: 3.0
36
+ reg: 0.1
37
+ safecoder_lambda: 1.0
38
+ reg_dataset: !!python/object/apply:src.data.dataset.DatasetType
39
+ - SecretSauce
40
+ reg_dataset_mix_params:
41
+ ? !!python/object/apply:src.data.dataset.DatasetType
42
+ - AlpacaGPT4
43
+ : 0.45
44
+ ? !!python/object/apply:src.data.dataset.DatasetType
45
+ - AlpacaRefuseSmooth
46
+ : 1.0
47
+ ? !!python/object/apply:src.data.dataset.DatasetType
48
+ - CodeAlpaca
49
+ : 0.15
50
+ ? !!python/object/apply:src.data.dataset.DatasetType
51
+ - OpenMathInstruct
52
+ : 0.15
53
+ ? !!python/object/apply:src.data.dataset.DatasetType
54
+ - PubMedQA
55
+ : 0.15
56
+ reg_device: cuda:0
57
+ reg_lambda: 1.0
58
+ reg_loss: distillation
59
+ reg_model: null
60
+ return_sublosses: false
61
+ safecoder_lambda: 1.0
62
+ sequence_length: 512
63
+ streaming: true
64
+ tokenizer: null
65
+ training_args:
66
+ bf16: false
67
+ ddp_find_unused_parameters: false
68
+ do_train: true
69
+ fp16: false
70
+ gradient_accumulation_steps: 1
71
+ gradient_checkpointing: false
72
+ hub_strategy: all_checkpoints
73
+ learning_rate: 5.0e-06
74
+ logging_steps: 10
75
+ lr_scheduler_type: cosine
76
+ max_steps: 4000
77
+ num_train_epochs: 1
78
+ optim: adafactor
79
+ output_dir: Grogros/Llama-3.2-1B-Instruct-distillation-SecretSauce-3.0-AlpacaRefuseSmooth-sauce2lrLong
80
+ overwrite_output_dir: true
81
+ per_device_train_batch_size: 32
82
+ push_to_hub: true
83
+ report_to: none
84
+ save_steps: 2000
85
+ save_strategy: steps
86
+ warmup_ratio: 0.1