Training in progress, step 44
Browse files- README.md +9 -10
- adapter_config.json +4 -2
- adapter_model.safetensors +2 -2
- added_tokens.json +0 -2
- chat_template.jinja +6 -0
- special_tokens_map.json +1 -7
- tokenizer.json +2 -2
- tokenizer_config.json +1 -18
- training_args.bin +2 -2
README.md
CHANGED
@@ -1,18 +1,17 @@
|
|
1 |
---
|
2 |
base_model: Qwen/Qwen2-0.5B
|
3 |
-
datasets: trl-lib/Capybara
|
4 |
library_name: transformers
|
5 |
model_name: Qwen2-0.5B-SFT
|
6 |
tags:
|
7 |
- generated_from_trainer
|
8 |
-
- trl
|
9 |
- sft
|
|
|
10 |
licence: license
|
11 |
---
|
12 |
|
13 |
# Model Card for Qwen2-0.5B-SFT
|
14 |
|
15 |
-
This model is a fine-tuned version of [Qwen/Qwen2-0.5B](https://huggingface.co/Qwen/Qwen2-0.5B)
|
16 |
It has been trained using [TRL](https://github.com/huggingface/trl).
|
17 |
|
18 |
## Quick start
|
@@ -28,18 +27,18 @@ print(output["generated_text"])
|
|
28 |
|
29 |
## Training procedure
|
30 |
|
31 |
-
|
32 |
|
33 |
|
34 |
This model was trained with SFT.
|
35 |
|
36 |
### Framework versions
|
37 |
|
38 |
-
- TRL: 0.
|
39 |
-
- Transformers: 4.
|
40 |
-
- Pytorch: 2.1
|
41 |
-
- Datasets: 3.
|
42 |
-
- Tokenizers: 0.21.
|
43 |
|
44 |
## Citations
|
45 |
|
@@ -50,7 +49,7 @@ Cite TRL as:
|
|
50 |
```bibtex
|
51 |
@misc{vonwerra2022trl,
|
52 |
title = {{TRL: Transformer Reinforcement Learning}},
|
53 |
-
author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin
|
54 |
year = 2020,
|
55 |
journal = {GitHub repository},
|
56 |
publisher = {GitHub},
|
|
|
1 |
---
|
2 |
base_model: Qwen/Qwen2-0.5B
|
|
|
3 |
library_name: transformers
|
4 |
model_name: Qwen2-0.5B-SFT
|
5 |
tags:
|
6 |
- generated_from_trainer
|
|
|
7 |
- sft
|
8 |
+
- trl
|
9 |
licence: license
|
10 |
---
|
11 |
|
12 |
# Model Card for Qwen2-0.5B-SFT
|
13 |
|
14 |
+
This model is a fine-tuned version of [Qwen/Qwen2-0.5B](https://huggingface.co/Qwen/Qwen2-0.5B).
|
15 |
It has been trained using [TRL](https://github.com/huggingface/trl).
|
16 |
|
17 |
## Quick start
|
|
|
27 |
|
28 |
## Training procedure
|
29 |
|
30 |
+
[<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/artiease-muse/huggingface/runs/bljh5gnm)
|
31 |
|
32 |
|
33 |
This model was trained with SFT.
|
34 |
|
35 |
### Framework versions
|
36 |
|
37 |
+
- TRL: 0.19.0
|
38 |
+
- Transformers: 4.52.4
|
39 |
+
- Pytorch: 2.7.1
|
40 |
+
- Datasets: 3.6.0
|
41 |
+
- Tokenizers: 0.21.2
|
42 |
|
43 |
## Citations
|
44 |
|
|
|
49 |
```bibtex
|
50 |
@misc{vonwerra2022trl,
|
51 |
title = {{TRL: Transformer Reinforcement Learning}},
|
52 |
+
author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallou{\'e}dec},
|
53 |
year = 2020,
|
54 |
journal = {GitHub repository},
|
55 |
publisher = {GitHub},
|
adapter_config.json
CHANGED
@@ -3,6 +3,7 @@
|
|
3 |
"auto_mapping": null,
|
4 |
"base_model_name_or_path": "Qwen/Qwen2-0.5B",
|
5 |
"bias": "none",
|
|
|
6 |
"eva_config": null,
|
7 |
"exclude_modules": null,
|
8 |
"fan_in_fan_out": false,
|
@@ -23,10 +24,11 @@
|
|
23 |
"rank_pattern": {},
|
24 |
"revision": null,
|
25 |
"target_modules": [
|
26 |
-
"
|
27 |
-
"
|
28 |
],
|
29 |
"task_type": "CAUSAL_LM",
|
|
|
30 |
"use_dora": false,
|
31 |
"use_rslora": false
|
32 |
}
|
|
|
3 |
"auto_mapping": null,
|
4 |
"base_model_name_or_path": "Qwen/Qwen2-0.5B",
|
5 |
"bias": "none",
|
6 |
+
"corda_config": null,
|
7 |
"eva_config": null,
|
8 |
"exclude_modules": null,
|
9 |
"fan_in_fan_out": false,
|
|
|
24 |
"rank_pattern": {},
|
25 |
"revision": null,
|
26 |
"target_modules": [
|
27 |
+
"v_proj",
|
28 |
+
"q_proj"
|
29 |
],
|
30 |
"task_type": "CAUSAL_LM",
|
31 |
+
"trainable_token_indices": null,
|
32 |
"use_dora": false,
|
33 |
"use_rslora": false
|
34 |
}
|
adapter_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3894f3fb3a5a22b727ad7f06c1cc34663fcf802d3db8fd18c5b3a8b94f812be7
|
3 |
+
size 2175360
|
added_tokens.json
CHANGED
@@ -1,6 +1,4 @@
|
|
1 |
{
|
2 |
-
"<knowledge>": 151646,
|
3 |
-
"<reasoning>": 151647,
|
4 |
"<|endoftext|>": 151643,
|
5 |
"<|im_end|>": 151645,
|
6 |
"<|im_start|>": 151644
|
|
|
1 |
{
|
|
|
|
|
2 |
"<|endoftext|>": 151643,
|
3 |
"<|im_end|>": 151645,
|
4 |
"<|im_start|>": 151644
|
chat_template.jinja
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system
|
2 |
+
You are a helpful assistant<|im_end|>
|
3 |
+
' }}{% endif %}{{'<|im_start|>' + message['role'] + '
|
4 |
+
' + message['content'] + '<|im_end|>' + '
|
5 |
+
'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant
|
6 |
+
' }}{% endif %}
|
special_tokens_map.json
CHANGED
@@ -3,13 +3,7 @@
|
|
3 |
"<|im_start|>",
|
4 |
"<|im_end|>"
|
5 |
],
|
6 |
-
"eos_token":
|
7 |
-
"content": "<|endoftext|>",
|
8 |
-
"lstrip": false,
|
9 |
-
"normalized": false,
|
10 |
-
"rstrip": false,
|
11 |
-
"single_word": false
|
12 |
-
},
|
13 |
"pad_token": {
|
14 |
"content": "<|endoftext|>",
|
15 |
"lstrip": false,
|
|
|
3 |
"<|im_start|>",
|
4 |
"<|im_end|>"
|
5 |
],
|
6 |
+
"eos_token": "<|im_end|>",
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
"pad_token": {
|
8 |
"content": "<|endoftext|>",
|
9 |
"lstrip": false,
|
tokenizer.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bcfe42da0a4497e8b2b172c1f9f4ec423a46dc12907f4349c55025f670422ba9
|
3 |
+
size 11418266
|
tokenizer_config.json
CHANGED
@@ -24,22 +24,6 @@
|
|
24 |
"rstrip": false,
|
25 |
"single_word": false,
|
26 |
"special": true
|
27 |
-
},
|
28 |
-
"151646": {
|
29 |
-
"content": "<knowledge>",
|
30 |
-
"lstrip": false,
|
31 |
-
"normalized": true,
|
32 |
-
"rstrip": false,
|
33 |
-
"single_word": false,
|
34 |
-
"special": false
|
35 |
-
},
|
36 |
-
"151647": {
|
37 |
-
"content": "<reasoning>",
|
38 |
-
"lstrip": false,
|
39 |
-
"normalized": true,
|
40 |
-
"rstrip": false,
|
41 |
-
"single_word": false,
|
42 |
-
"special": false
|
43 |
}
|
44 |
},
|
45 |
"additional_special_tokens": [
|
@@ -47,9 +31,8 @@
|
|
47 |
"<|im_end|>"
|
48 |
],
|
49 |
"bos_token": null,
|
50 |
-
"chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
|
51 |
"clean_up_tokenization_spaces": false,
|
52 |
-
"eos_token": "<|
|
53 |
"errors": "replace",
|
54 |
"extra_special_tokens": {},
|
55 |
"model_max_length": 32768,
|
|
|
24 |
"rstrip": false,
|
25 |
"single_word": false,
|
26 |
"special": true
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
}
|
28 |
},
|
29 |
"additional_special_tokens": [
|
|
|
31 |
"<|im_end|>"
|
32 |
],
|
33 |
"bos_token": null,
|
|
|
34 |
"clean_up_tokenization_spaces": false,
|
35 |
+
"eos_token": "<|im_end|>",
|
36 |
"errors": "replace",
|
37 |
"extra_special_tokens": {},
|
38 |
"model_max_length": 32768,
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e109e40ff8cd5cf91dd4b8344ee6c396d552c992bb21c510684764f5762838fe
|
3 |
+
size 7377
|