rivaon commited on
Commit
9e092f4
·
verified ·
1 Parent(s): 950d640

Upload cfg.yaml

Browse files
Files changed (1) hide show
  1. cfg.yaml +120 -0
cfg.yaml ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ architecture:
2
+ backbone_dtype: float16
3
+ gradient_checkpointing: true
4
+ intermediate_dropout: 0.0
5
+ pretrained: true
6
+ pretrained_weights: ''
7
+ augmentation:
8
+ neftune_noise_alpha: 0.0
9
+ random_parent_probability: 0.0
10
+ skip_parent_probability: 0.0
11
+ token_mask_probability: 0.0
12
+ dataset:
13
+ add_eos_token_to_answer: true
14
+ add_eos_token_to_prompt: true
15
+ add_eos_token_to_system: true
16
+ answer_column: output
17
+ chatbot_author: spacedome
18
+ chatbot_name: spacedome
19
+ data_sample: 1.0
20
+ data_sample_choice:
21
+ - Train
22
+ - Validation
23
+ id_column: None
24
+ limit_chained_samples: false
25
+ mask_prompt_labels: true
26
+ only_last_answer: false
27
+ parent_id_column: None
28
+ personalize: true
29
+ prompt_column:
30
+ - user_prompt
31
+ prompt_column_separator: \n\n
32
+ system_column: system_prompt
33
+ text_answer_separator: <|answer|>
34
+ text_prompt_start: <|prompt|>
35
+ text_system_start: <|system|>
36
+ train_dataframe: /workspace/data/user/spacedome_v2/spacedome_v2.parquet
37
+ validation_dataframe: None
38
+ validation_size: 0.02
39
+ validation_strategy: automatic
40
+ environment:
41
+ compile_model: false
42
+ deepspeed_allgather_bucket_size: 1000000
43
+ deepspeed_method: ZeRO2
44
+ deepspeed_reduce_bucket_size: 1000000
45
+ deepspeed_stage3_param_persistence_threshold: 1000000
46
+ deepspeed_stage3_prefetch_bucket_size: 1000000
47
+ find_unused_parameters: false
48
+ gpus:
49
+ - '0'
50
+ huggingface_branch: main
51
+ mixed_precision: true
52
+ mixed_precision_dtype: float16
53
+ number_of_workers: 120
54
+ seed: -1
55
+ trust_remote_code: true
56
+ use_deepspeed: false
57
+ experiment_name: canberra
58
+ llm_backbone: meta-llama/Llama-3.1-8B-Instruct
59
+ logging:
60
+ log_all_ranks: false
61
+ log_step_size: absolute
62
+ logger: None
63
+ neptune_project: ''
64
+ wandb_entity: ''
65
+ wandb_project: ''
66
+ output_directory: /workspace/output/user/canberra/
67
+ prediction:
68
+ batch_size_inference: 0
69
+ do_sample: false
70
+ max_length_inference: 2048
71
+ max_time: 0.0
72
+ metric: BLEU
73
+ metric_gpt_model: gpt-3.5-turbo-0301
74
+ metric_gpt_template: general
75
+ min_length_inference: 30
76
+ num_beams: 1
77
+ num_history: 4
78
+ repetition_penalty: 1.0
79
+ stop_tokens: ''
80
+ temperature: 0.0
81
+ top_k: 0
82
+ top_p: 1.0
83
+ problem_type: text_causal_language_modeling
84
+ tokenizer:
85
+ add_prompt_answer_tokens: false
86
+ max_length: 4096
87
+ padding_quantile: 1.0
88
+ tokenizer_kwargs: '{"use_fast": true, "add_prefix_space": false}'
89
+ training:
90
+ attention_implementation: auto
91
+ batch_size: 8
92
+ differential_learning_rate: 1.0e-05
93
+ differential_learning_rate_layers:
94
+ - embed
95
+ - head
96
+ - backbone
97
+ drop_last_batch: true
98
+ epochs: 15
99
+ evaluate_before_training: false
100
+ evaluation_epochs: 1.0
101
+ freeze_layers: []
102
+ grad_accumulation: 1
103
+ gradient_clip: 0.0
104
+ learning_rate: 0.0001
105
+ lora: true
106
+ lora_alpha: 32
107
+ lora_dropout: 0.1
108
+ lora_r: 16
109
+ lora_target_modules: ''
110
+ lora_unfreeze_layers: []
111
+ loss_function: TokenAveragedCrossEntropy
112
+ min_learning_rate_ratio: 0.0
113
+ optimizer: AdamW
114
+ save_checkpoint: best
115
+ schedule: Cosine
116
+ train_validation_data: false
117
+ use_dora: true
118
+ use_rslora: false
119
+ warmup_epochs: 0.0
120
+ weight_decay: 0.0