Matelq-2 commited on
Commit
020393f
·
verified ·
1 Parent(s): 2d68a55

End of training

Browse files
README.md CHANGED
@@ -29,18 +29,17 @@ print(output["generated_text"])
29
 
30
  ## Training procedure
31
 
32
-
33
 
34
 
35
  This model was trained with SFT.
36
 
37
  ### Framework versions
38
 
39
- - TRL: 0.17.0
40
- - Transformers: 4.52.3
41
  - Pytorch: 2.7.0+cu118
42
- - Datasets: 3.6.0
43
- - Tokenizers: 0.21.1
44
 
45
  ## Citations
46
 
@@ -51,7 +50,7 @@ Cite TRL as:
51
  ```bibtex
52
  @misc{vonwerra2022trl,
53
  title = {{TRL: Transformer Reinforcement Learning}},
54
- author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallou{\'e}dec},
55
  year = 2020,
56
  journal = {GitHub repository},
57
  publisher = {GitHub},
 
29
 
30
  ## Training procedure
31
 
 
32
 
33
 
34
  This model was trained with SFT.
35
 
36
  ### Framework versions
37
 
38
+ - TRL: 0.12.1
39
+ - Transformers: 4.46.3
40
  - Pytorch: 2.7.0+cu118
41
+ - Datasets: 3.1.0
42
+ - Tokenizers: 0.20.3
43
 
44
  ## Citations
45
 
 
50
  ```bibtex
51
  @misc{vonwerra2022trl,
52
  title = {{TRL: Transformer Reinforcement Learning}},
53
+ author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin GallouГ©dec},
54
  year = 2020,
55
  journal = {GitHub repository},
56
  publisher = {GitHub},
config.json CHANGED
@@ -1,4 +1,5 @@
1
  {
 
2
  "architectures": [
3
  "LlamaForCausalLM"
4
  ],
@@ -26,13 +27,7 @@
26
  "rope_theta": 100000,
27
  "tie_word_embeddings": true,
28
  "torch_dtype": "float32",
29
- "transformers.js_config": {
30
- "kv_cache_dtype": {
31
- "fp16": "float16",
32
- "q4f16": "float16"
33
- }
34
- },
35
- "transformers_version": "4.52.3",
36
- "use_cache": false,
37
  "vocab_size": 49152
38
  }
 
1
  {
2
+ "_name_or_path": "HuggingFaceTB/SmolLM2-135M",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
 
27
  "rope_theta": 100000,
28
  "tie_word_embeddings": true,
29
  "torch_dtype": "float32",
30
+ "transformers_version": "4.46.3",
31
+ "use_cache": true,
 
 
 
 
 
 
32
  "vocab_size": 49152
33
  }
generation_config.json CHANGED
@@ -3,5 +3,5 @@
3
  "bos_token_id": 1,
4
  "eos_token_id": 2,
5
  "pad_token_id": 2,
6
- "transformers_version": "4.52.3"
7
  }
 
3
  "bos_token_id": 1,
4
  "eos_token_id": 2,
5
  "pad_token_id": 2,
6
+ "transformers_version": "4.46.3"
7
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:94e7d1c158e67b47577eb6e596243054b00f6eb68b0c682d917f232d154627fd
3
  size 538090408
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da3ea6803aa189a04d07724744f709b49e8e0213bd9b0ede6ddf262b05d3c104
3
  size 538090408
special_tokens_map.json CHANGED
@@ -1,29 +1,23 @@
1
  {
2
  "additional_special_tokens": [
3
- "<|im_start|>",
4
- "<|im_end|>"
 
 
 
 
 
 
 
 
 
 
 
 
5
  ],
6
- "bos_token": {
7
- "content": "<|im_start|>",
8
- "lstrip": false,
9
- "normalized": false,
10
- "rstrip": false,
11
- "single_word": false
12
- },
13
- "eos_token": {
14
- "content": "<|im_end|>",
15
- "lstrip": false,
16
- "normalized": false,
17
- "rstrip": false,
18
- "single_word": false
19
- },
20
- "pad_token": {
21
- "content": "<|im_end|>",
22
- "lstrip": false,
23
- "normalized": false,
24
- "rstrip": false,
25
- "single_word": false
26
- },
27
  "unk_token": {
28
  "content": "<|endoftext|>",
29
  "lstrip": false,
 
1
  {
2
  "additional_special_tokens": [
3
+ {
4
+ "content": "<|im_start|>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ {
11
+ "content": "<|im_end|>",
12
+ "lstrip": false,
13
+ "normalized": false,
14
+ "rstrip": false,
15
+ "single_word": false
16
+ }
17
  ],
18
+ "bos_token": "<|im_start|>",
19
+ "eos_token": "<|im_end|>",
20
+ "pad_token": "<|im_end|>",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  "unk_token": {
22
  "content": "<|endoftext|>",
23
  "lstrip": false,
tokenizer.json CHANGED
@@ -1,6 +1,11 @@
1
  {
2
  "version": "1.0",
3
- "truncation": null,
 
 
 
 
 
4
  "padding": null,
5
  "added_tokens": [
6
  {
 
1
  {
2
  "version": "1.0",
3
+ "truncation": {
4
+ "direction": "Right",
5
+ "max_length": 1024,
6
+ "strategy": "LongestFirst",
7
+ "stride": 0
8
+ },
9
  "padding": null,
10
  "added_tokens": [
11
  {
tokenizer_config.json CHANGED
@@ -143,9 +143,9 @@
143
  "<|im_end|>"
144
  ],
145
  "bos_token": "<|im_start|>",
 
146
  "clean_up_tokenization_spaces": false,
147
  "eos_token": "<|im_end|>",
148
- "extra_special_tokens": {},
149
  "model_max_length": 8192,
150
  "pad_token": "<|im_end|>",
151
  "tokenizer_class": "GPT2Tokenizer",
 
143
  "<|im_end|>"
144
  ],
145
  "bos_token": "<|im_start|>",
146
+ "chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
147
  "clean_up_tokenization_spaces": false,
148
  "eos_token": "<|im_end|>",
 
149
  "model_max_length": 8192,
150
  "pad_token": "<|im_end|>",
151
  "tokenizer_class": "GPT2Tokenizer",
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e00e05ded2a7c9505b582c00e03132bbed53490a95eca042a63d9f3a6917544c
3
- size 6033
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d1d0ada5422b14973d94b7e8ed9c0d57c236e0f3e1fa230383ad54153118214
3
+ size 5969