IUseAMouse commited on
Commit
50b55fb
1 Parent(s): 93240ab

End of training

Browse files
README.md CHANGED
@@ -2,24 +2,19 @@
2
  base_model: bofenghuang/vigogne-2-13b-instruct
3
  tags:
4
  - generated_from_trainer
5
- - lora
6
  model-index:
7
- - name: PointCon-vigogne-2-13b-instruct-QLoRA-Instruct
8
  results: []
9
- datasets:
10
- - IUseAMouse/POINTCON-QA-Light
11
- language:
12
- - fr
13
  ---
14
 
15
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
16
  should probably proofread and complete it, then remove this comment. -->
17
 
18
- # PointCon-vigogne-2-13b-instruct-QLoRA-Instruct
19
 
20
- This model is a fine-tuned version of [bofenghuang/vigogne-2-13b-instruct](https://huggingface.co/bofenghuang/vigogne-2-13b-instruct) on the .CON corpus of books.
21
  It achieves the following results on the evaluation set:
22
- - Loss: 1.8921
23
 
24
  ## Model description
25
 
@@ -38,24 +33,34 @@ More information needed
38
  ### Training hyperparameters
39
 
40
  The following hyperparameters were used during training:
41
- - learning_rate: 0.0002
42
  - train_batch_size: 1
43
  - eval_batch_size: 1
44
  - seed: 42
45
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
46
  - lr_scheduler_type: linear
47
  - num_epochs: 1
 
48
 
49
  ### Training results
50
 
51
  | Training Loss | Epoch | Step | Validation Loss |
52
  |:-------------:|:-----:|:----:|:---------------:|
53
- | 1.9628 | 1.0 | 91 | 1.8921 |
 
 
 
 
 
 
 
 
 
54
 
55
 
56
  ### Framework versions
57
 
58
- - Transformers 4.32.1
59
- - Pytorch 2.0.1+cu118
60
- - Datasets 2.14.4
61
- - Tokenizers 0.13.3
 
2
  base_model: bofenghuang/vigogne-2-13b-instruct
3
  tags:
4
  - generated_from_trainer
 
5
  model-index:
6
+ - name: PointCon-Vigogne-13B-LoRA
7
  results: []
 
 
 
 
8
  ---
9
 
10
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
11
  should probably proofread and complete it, then remove this comment. -->
12
 
13
+ # PointCon-Vigogne-13B-LoRA
14
 
15
+ This model is a fine-tuned version of [bofenghuang/vigogne-2-13b-instruct](https://huggingface.co/bofenghuang/vigogne-2-13b-instruct) on an unknown dataset.
16
  It achieves the following results on the evaluation set:
17
+ - Loss: 1.8656
18
 
19
  ## Model description
20
 
 
33
  ### Training hyperparameters
34
 
35
  The following hyperparameters were used during training:
36
+ - learning_rate: 5e-05
37
  - train_batch_size: 1
38
  - eval_batch_size: 1
39
  - seed: 42
40
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
41
  - lr_scheduler_type: linear
42
  - num_epochs: 1
43
+ - mixed_precision_training: Native AMP
44
 
45
  ### Training results
46
 
47
  | Training Loss | Epoch | Step | Validation Loss |
48
  |:-------------:|:-----:|:----:|:---------------:|
49
+ | 2.0885 | 0.1 | 30 | 2.0357 |
50
+ | 2.0024 | 0.19 | 60 | 1.9733 |
51
+ | 1.9995 | 0.29 | 90 | 1.9406 |
52
+ | 1.9752 | 0.38 | 120 | 1.9285 |
53
+ | 1.9235 | 0.48 | 150 | 1.9060 |
54
+ | 1.9345 | 0.57 | 180 | 1.8924 |
55
+ | 1.8576 | 0.67 | 210 | 1.8818 |
56
+ | 1.8693 | 0.76 | 240 | 1.8734 |
57
+ | 1.8686 | 0.86 | 270 | 1.8695 |
58
+ | 1.8814 | 0.95 | 300 | 1.8656 |
59
 
60
 
61
  ### Framework versions
62
 
63
+ - Transformers 4.35.0
64
+ - Pytorch 2.1.0+cu118
65
+ - Datasets 2.14.6
66
+ - Tokenizers 0.14.1
adapter_config.json CHANGED
@@ -1,4 +1,5 @@
1
  {
 
2
  "auto_mapping": null,
3
  "base_model_name_or_path": "bofenghuang/vigogne-2-13b-instruct",
4
  "bias": "none",
@@ -12,15 +13,18 @@
12
  "modules_to_save": null,
13
  "peft_type": "LORA",
14
  "r": 16,
 
15
  "revision": null,
16
  "target_modules": [
17
- "q_proj",
 
18
  "v_proj",
19
- "k_proj",
20
  "o_proj",
21
  "gate_proj",
22
- "up_proj",
23
- "down_proj"
 
24
  ],
25
  "task_type": "CAUSAL_LM"
26
  }
 
1
  {
2
+ "alpha_pattern": {},
3
  "auto_mapping": null,
4
  "base_model_name_or_path": "bofenghuang/vigogne-2-13b-instruct",
5
  "bias": "none",
 
13
  "modules_to_save": null,
14
  "peft_type": "LORA",
15
  "r": 16,
16
+ "rank_pattern": {},
17
  "revision": null,
18
  "target_modules": [
19
+ "lm_head",
20
+ "up_proj",
21
  "v_proj",
22
+ "q_proj",
23
  "o_proj",
24
  "gate_proj",
25
+ "embed_tokens",
26
+ "down_proj",
27
+ "k_proj"
28
  ],
29
  "task_type": "CAUSAL_LM"
30
  }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b44a7489fad91183427e318cafbcb9e768cbd9b684bec66426178a8cb65681c3
3
+ size 255174752
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5c90dbcd2b199ecf3a4eb172d4c54532485f65157b7fcf5b84a47567c76fea92
3
- size 4091
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4de3abff4bda1c5672b4aa7e681b04a90b6b6cd86a05336b0d7ebd6312befbd3
3
+ size 4600