error577 commited on
Commit
faff8d1
·
verified ·
1 Parent(s): 414a69a

End of training

Browse files
README.md CHANGED
@@ -43,11 +43,11 @@ early_stopping_patience: null
43
  eval_max_new_tokens: 128
44
  eval_table_size: null
45
  evals_per_epoch: 1
46
- flash_attention: false
47
  fp16: null
48
  fsdp: null
49
  fsdp_config: null
50
- gradient_accumulation_steps: 8
51
  gradient_checkpointing: true
52
  group_by_length: false
53
  hub_model_id: error577/a61d034e-86de-4b0d-9bd2-31e33368fb4b
@@ -59,14 +59,14 @@ load_in_4bit: true
59
  load_in_8bit: false
60
  local_rank: null
61
  logging_steps: 1
62
- lora_alpha: 16
63
  lora_dropout: 0.05
64
  lora_fan_in_fan_out: null
65
  lora_model_dir: null
66
  lora_r: 32
67
  lora_target_linear: true
68
  lr_scheduler: cosine
69
- max_steps: 100
70
  micro_batch_size: 1
71
  mlflow_experiment_name: /tmp/dc7503eebb726953_train_data.json
72
  model_type: AutoModelForCausalLM
@@ -103,7 +103,7 @@ xformers_attention: null
103
 
104
  This model is a fine-tuned version of [unsloth/Qwen2.5-Coder-1.5B-Instruct](https://huggingface.co/unsloth/Qwen2.5-Coder-1.5B-Instruct) on the None dataset.
105
  It achieves the following results on the evaluation set:
106
- - Loss: nan
107
 
108
  ## Model description
109
 
@@ -126,22 +126,21 @@ The following hyperparameters were used during training:
126
  - train_batch_size: 1
127
  - eval_batch_size: 1
128
  - seed: 42
129
- - gradient_accumulation_steps: 8
130
- - total_train_batch_size: 8
131
  - optimizer: Use OptimizerNames.ADAMW_BNB with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
132
  - lr_scheduler_type: cosine
133
  - lr_scheduler_warmup_steps: 10
134
- - training_steps: 100
135
 
136
  ### Training results
137
 
138
  | Training Loss | Epoch | Step | Validation Loss |
139
  |:-------------:|:------:|:----:|:---------------:|
140
- | 3.2583 | 0.0001 | 1 | nan |
141
- | 24.1046 | 0.0030 | 25 | nan |
142
- | 0.355 | 0.0061 | 50 | nan |
143
- | 2.123 | 0.0091 | 75 | nan |
144
- | 0.0 | 0.0122 | 100 | nan |
145
 
146
 
147
  ### Framework versions
 
43
  eval_max_new_tokens: 128
44
  eval_table_size: null
45
  evals_per_epoch: 1
46
+ flash_attention: true
47
  fp16: null
48
  fsdp: null
49
  fsdp_config: null
50
+ gradient_accumulation_steps: 32
51
  gradient_checkpointing: true
52
  group_by_length: false
53
  hub_model_id: error577/a61d034e-86de-4b0d-9bd2-31e33368fb4b
 
59
  load_in_8bit: false
60
  local_rank: null
61
  logging_steps: 1
62
+ lora_alpha: 64
63
  lora_dropout: 0.05
64
  lora_fan_in_fan_out: null
65
  lora_model_dir: null
66
  lora_r: 32
67
  lora_target_linear: true
68
  lr_scheduler: cosine
69
+ max_steps: 150
70
  micro_batch_size: 1
71
  mlflow_experiment_name: /tmp/dc7503eebb726953_train_data.json
72
  model_type: AutoModelForCausalLM
 
103
 
104
  This model is a fine-tuned version of [unsloth/Qwen2.5-Coder-1.5B-Instruct](https://huggingface.co/unsloth/Qwen2.5-Coder-1.5B-Instruct) on the None dataset.
105
  It achieves the following results on the evaluation set:
106
+ - Loss: 0.9791
107
 
108
  ## Model description
109
 
 
126
  - train_batch_size: 1
127
  - eval_batch_size: 1
128
  - seed: 42
129
+ - gradient_accumulation_steps: 32
130
+ - total_train_batch_size: 32
131
  - optimizer: Use OptimizerNames.ADAMW_BNB with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
132
  - lr_scheduler_type: cosine
133
  - lr_scheduler_warmup_steps: 10
134
+ - training_steps: 150
135
 
136
  ### Training results
137
 
138
  | Training Loss | Epoch | Step | Validation Loss |
139
  |:-------------:|:------:|:----:|:---------------:|
140
+ | 0.9356 | 0.0005 | 1 | 1.4002 |
141
+ | 0.8597 | 0.0185 | 38 | 1.0362 |
142
+ | 0.7329 | 0.0371 | 76 | 1.0017 |
143
+ | 0.7452 | 0.0556 | 114 | 0.9791 |
 
144
 
145
 
146
  ### Framework versions
adapter_config.json CHANGED
@@ -10,7 +10,7 @@
10
  "layers_pattern": null,
11
  "layers_to_transform": null,
12
  "loftq_config": {},
13
- "lora_alpha": 16,
14
  "lora_dropout": 0.05,
15
  "megatron_config": null,
16
  "megatron_core": "megatron.core",
@@ -20,13 +20,13 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "gate_proj",
24
  "v_proj",
25
- "q_proj",
26
- "down_proj",
27
  "o_proj",
28
  "k_proj",
29
- "up_proj"
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
10
  "layers_pattern": null,
11
  "layers_to_transform": null,
12
  "loftq_config": {},
13
+ "lora_alpha": 64,
14
  "lora_dropout": 0.05,
15
  "megatron_config": null,
16
  "megatron_core": "megatron.core",
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "up_proj",
24
  "v_proj",
25
+ "gate_proj",
 
26
  "o_proj",
27
  "k_proj",
28
+ "down_proj",
29
+ "q_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:63ec74f995aa9048c4822888d685bdb31bcd39af27bb4ac29ac2d99f340f9535
3
  size 147859242
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ddd44b249280688d97a8e97301c8a4b1cf65a564085f7cfefdf8ca02edbb062
3
  size 147859242
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7dbab7c6ea2c36c6d7523df20e159e4fb7ab1785c95d096667084d635efd1669
3
  size 147770496
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea8c39a373768c321ead30969479aa45c3a779655bab2f5485015b0b089ad428
3
  size 147770496
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7aab35b6e85cc2ea075fe59fc082d102bfb0df246bba39d4e5bf62d4c38db4ac
3
  size 6776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f7ef9edaee929dc90d8ca5057d512ea7538f832777a370fbf9c487dff3330db
3
  size 6776