mtasic85 commited on
Commit
cebb861
·
1 Parent(s): ad3a56c
scripts/{core_base_datasets.py → base_datasets.py} RENAMED
@@ -1,4 +1,4 @@
1
- core_base_datasets = [
2
  #
3
  # general
4
  #
 
1
+ base_datasets = [
2
  #
3
  # general
4
  #
scripts/{core_instruct_datasets.py → base_instruct_datasets.py} RENAMED
@@ -26,7 +26,7 @@ Response Guidelines:
26
  - Concise yet Complete: Ensure responses are informative, yet to the point without unnecessary elaboration.
27
  - Maintain a professional, intelligent, and analytical tone in all interactions.'''
28
 
29
- core_instruct_datasets = [
30
  # 65.7 MB, 11,578
31
  # 1.89k
32
  {'kind': 'instruct', 'path': 'NousResearch/hermes-function-calling-v1', 'data_files': 'func-calling-singleturn.json', 'split': 'train', 'field': 'conversations', 'transform': lambda msgs: [
 
26
  - Concise yet Complete: Ensure responses are informative, yet to the point without unnecessary elaboration.
27
  - Maintain a professional, intelligent, and analytical tone in all interactions.'''
28
 
29
+ base_instruct_datasets = [
30
  # 65.7 MB, 11,578
31
  # 1.89k
32
  {'kind': 'instruct', 'path': 'NousResearch/hermes-function-calling-v1', 'data_files': 'func-calling-singleturn.json', 'split': 'train', 'field': 'conversations', 'transform': lambda msgs: [
scripts/{prepare_core_datasets.py → prepare_base_datasets.py} RENAMED
@@ -5,21 +5,22 @@ from litgpt.tokenizer import Tokenizer
5
  from litdata import optimize, TokensLoader, StreamingDataset
6
 
7
  from utils import tokenize_fn
8
- from core_base_datasets import core_base_datasets
9
- from core_instruct_datasets import core_instruct_datasets
10
 
11
 
12
  tokenizer_path = '../tokenizer'
13
 
14
  seqs = [
15
- (0, 1073741824, 1025, 16000),
16
- (1025, 2049, 2049, 8000),
17
- (2049, 4097, 4097, 4000),
18
- (4097, 8193, 8193, 2000),
19
- (8193, 16385, 16385, 1000),
20
- (16385, 32769, 32769, 500),
21
- (32769, 65537, 65537, 250),
22
- (65537, 131073, 131073, 125),
 
23
  ]
24
 
25
  #
@@ -27,7 +28,7 @@ seqs = [
27
  #
28
  for i, (min_len, max_len, block_size, subchunk_size) in enumerate(seqs):
29
  chunk_size = block_size * subchunk_size
30
- output_dir = f'../core-data-{i}-{min_len}-{max_len}-{block_size}-{subchunk_size}'
31
 
32
  outputs = optimize(
33
  fn=partial(
@@ -37,7 +38,7 @@ for i, (min_len, max_len, block_size, subchunk_size) in enumerate(seqs):
37
  hf_tokenizer=AutoTokenizer.from_pretrained(tokenizer_path, trust_remote_code=True, use_fast=True),
38
  tokenizer=Tokenizer(tokenizer_path),
39
  ),
40
- inputs=core_base_datasets + core_instruct_datasets,
41
  output_dir=output_dir,
42
  chunk_size=chunk_size, # Number of tokens to store by chunks. This is roughly 64MB of tokens per chunk.
43
  num_workers=32,
@@ -52,7 +53,7 @@ for i, (min_len, max_len, block_size, subchunk_size) in enumerate(seqs):
52
  #
53
  for i, (min_len, max_len, block_size, subchunk_size) in enumerate(seqs):
54
  chunk_size = block_size * subchunk_size
55
- input_dir = f'../core-data-{i}-{min_len}-{max_len}-{block_size}-{subchunk_size}'
56
 
57
  dataset = StreamingDataset(
58
  input_dir=input_dir,
 
5
  from litdata import optimize, TokensLoader, StreamingDataset
6
 
7
  from utils import tokenize_fn
8
+ from base_datasets import base_datasets
9
+ from base_instruct_datasets import base_instruct_datasets
10
 
11
 
12
  tokenizer_path = '../tokenizer'
13
 
14
  seqs = [
15
+ # (0, 1073741824, 1025, 16000),
16
+ # (1025, 2049, 2049, 8000),
17
+ # (2049, 4097, 4097, 4000),
18
+ # (4097, 8193, 8193, 2000),
19
+ # (8193, 16385, 16385, 1000),
20
+ # (16385, 32769, 32769, 500),
21
+ # (32769, 65537, 65537, 250),
22
+ # (65537, 131073, 131073, 125),
23
+ (0, 1073741824, 8193, 2000),
24
  ]
25
 
26
  #
 
28
  #
29
  for i, (min_len, max_len, block_size, subchunk_size) in enumerate(seqs):
30
  chunk_size = block_size * subchunk_size
31
+ output_dir = f'../base-data-{i}-{min_len}-{max_len}-{block_size}-{subchunk_size}'
32
 
33
  outputs = optimize(
34
  fn=partial(
 
38
  hf_tokenizer=AutoTokenizer.from_pretrained(tokenizer_path, trust_remote_code=True, use_fast=True),
39
  tokenizer=Tokenizer(tokenizer_path),
40
  ),
41
+ inputs=base_datasets + base_instruct_datasets,
42
  output_dir=output_dir,
43
  chunk_size=chunk_size, # Number of tokens to store by chunks. This is roughly 64MB of tokens per chunk.
44
  num_workers=32,
 
53
  #
54
  for i, (min_len, max_len, block_size, subchunk_size) in enumerate(seqs):
55
  chunk_size = block_size * subchunk_size
56
+ input_dir = f'../base-data-{i}-{min_len}-{max_len}-{block_size}-{subchunk_size}'
57
 
58
  dataset = StreamingDataset(
59
  input_dir=input_dir,
scripts/{pretrain_core_model_0.yaml → pretrain_base_model_0.yaml} RENAMED
File without changes
scripts/train_tokenizer.py CHANGED
@@ -7,8 +7,8 @@ from tokenizers.models import BPE
7
  from tokenizers.trainers import BpeTrainer
8
 
9
  from utils import batch_dataset_iterator
10
- from core_base_datasets import core_base_datasets
11
- from core_instruct_datasets import core_instruct_datasets
12
 
13
 
14
  tokenizer_path = '../tokenizer'
@@ -83,7 +83,7 @@ trainer = BpeTrainer(
83
  max_token_length=16,
84
  )
85
 
86
- tokenizer_datasets = core_base_datasets + core_instruct_datasets
87
 
88
  tokenizer.train_from_iterator(
89
  (batch_dataset_iterator(n) for n in tokenizer_datasets),
 
7
  from tokenizers.trainers import BpeTrainer
8
 
9
  from utils import batch_dataset_iterator
10
+ from base_datasets import base_datasets
11
+ from base_instruct_datasets import base_instruct_datasets
12
 
13
 
14
  tokenizer_path = '../tokenizer'
 
83
  max_token_length=16,
84
  )
85
 
86
+ tokenizer_datasets = base_datasets + base_instruct_datasets
87
 
88
  tokenizer.train_from_iterator(
89
  (batch_dataset_iterator(n) for n in tokenizer_datasets),
tokenizer/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer/special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|im_end|>",
4
+ "pad_token": "<|pad|>",
5
+ "unk_token": "<|unk|>"
6
+ }
tokenizer/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:397bea157cd79c7c5b24406e9dabf987c80ef2f148dea76ef8123a64f621933d
3
+ size 4718469
tokenizer/tokenizer_config.json ADDED
@@ -0,0 +1,525 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<|endoftext|>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<|im_end|>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "<|pad|>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "<|unk|>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "4": {
36
+ "content": "<|im_start|>",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "5": {
44
+ "content": "<|im_sep|>",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "6": {
52
+ "content": "system",
53
+ "lstrip": false,
54
+ "normalized": false,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": true
58
+ },
59
+ "7": {
60
+ "content": "user",
61
+ "lstrip": false,
62
+ "normalized": false,
63
+ "rstrip": false,
64
+ "single_word": false,
65
+ "special": true
66
+ },
67
+ "8": {
68
+ "content": "assistant",
69
+ "lstrip": false,
70
+ "normalized": false,
71
+ "rstrip": false,
72
+ "single_word": false,
73
+ "special": true
74
+ },
75
+ "9": {
76
+ "content": "<tools>",
77
+ "lstrip": false,
78
+ "normalized": false,
79
+ "rstrip": false,
80
+ "single_word": false,
81
+ "special": true
82
+ },
83
+ "10": {
84
+ "content": "</tools>",
85
+ "lstrip": false,
86
+ "normalized": false,
87
+ "rstrip": false,
88
+ "single_word": false,
89
+ "special": true
90
+ },
91
+ "11": {
92
+ "content": "<tool>",
93
+ "lstrip": false,
94
+ "normalized": false,
95
+ "rstrip": false,
96
+ "single_word": false,
97
+ "special": true
98
+ },
99
+ "12": {
100
+ "content": "</tool>",
101
+ "lstrip": false,
102
+ "normalized": false,
103
+ "rstrip": false,
104
+ "single_word": false,
105
+ "special": true
106
+ },
107
+ "13": {
108
+ "content": "<tool_call>",
109
+ "lstrip": false,
110
+ "normalized": false,
111
+ "rstrip": false,
112
+ "single_word": false,
113
+ "special": true
114
+ },
115
+ "14": {
116
+ "content": "</tool_call>",
117
+ "lstrip": false,
118
+ "normalized": false,
119
+ "rstrip": false,
120
+ "single_word": false,
121
+ "special": true
122
+ },
123
+ "15": {
124
+ "content": "<tool_response>",
125
+ "lstrip": false,
126
+ "normalized": false,
127
+ "rstrip": false,
128
+ "single_word": false,
129
+ "special": true
130
+ },
131
+ "16": {
132
+ "content": "</tool_response>",
133
+ "lstrip": false,
134
+ "normalized": false,
135
+ "rstrip": false,
136
+ "single_word": false,
137
+ "special": true
138
+ },
139
+ "17": {
140
+ "content": "<question>",
141
+ "lstrip": false,
142
+ "normalized": false,
143
+ "rstrip": false,
144
+ "single_word": false,
145
+ "special": true
146
+ },
147
+ "18": {
148
+ "content": "</question>",
149
+ "lstrip": false,
150
+ "normalized": false,
151
+ "rstrip": false,
152
+ "single_word": false,
153
+ "special": true
154
+ },
155
+ "19": {
156
+ "content": "<think>",
157
+ "lstrip": false,
158
+ "normalized": false,
159
+ "rstrip": false,
160
+ "single_word": false,
161
+ "special": true
162
+ },
163
+ "20": {
164
+ "content": "</think>",
165
+ "lstrip": false,
166
+ "normalized": false,
167
+ "rstrip": false,
168
+ "single_word": false,
169
+ "special": true
170
+ },
171
+ "21": {
172
+ "content": "<answer>",
173
+ "lstrip": false,
174
+ "normalized": false,
175
+ "rstrip": false,
176
+ "single_word": false,
177
+ "special": true
178
+ },
179
+ "22": {
180
+ "content": "</answer>",
181
+ "lstrip": false,
182
+ "normalized": false,
183
+ "rstrip": false,
184
+ "single_word": false,
185
+ "special": true
186
+ },
187
+ "23": {
188
+ "content": "<|reserved_0|>",
189
+ "lstrip": false,
190
+ "normalized": false,
191
+ "rstrip": false,
192
+ "single_word": false,
193
+ "special": true
194
+ },
195
+ "24": {
196
+ "content": "<|reserved_1|>",
197
+ "lstrip": false,
198
+ "normalized": false,
199
+ "rstrip": false,
200
+ "single_word": false,
201
+ "special": true
202
+ },
203
+ "25": {
204
+ "content": "<|reserved_2|>",
205
+ "lstrip": false,
206
+ "normalized": false,
207
+ "rstrip": false,
208
+ "single_word": false,
209
+ "special": true
210
+ },
211
+ "26": {
212
+ "content": "<|reserved_3|>",
213
+ "lstrip": false,
214
+ "normalized": false,
215
+ "rstrip": false,
216
+ "single_word": false,
217
+ "special": true
218
+ },
219
+ "27": {
220
+ "content": "<|reserved_4|>",
221
+ "lstrip": false,
222
+ "normalized": false,
223
+ "rstrip": false,
224
+ "single_word": false,
225
+ "special": true
226
+ },
227
+ "28": {
228
+ "content": "<|reserved_5|>",
229
+ "lstrip": false,
230
+ "normalized": false,
231
+ "rstrip": false,
232
+ "single_word": false,
233
+ "special": true
234
+ },
235
+ "29": {
236
+ "content": "<|reserved_6|>",
237
+ "lstrip": false,
238
+ "normalized": false,
239
+ "rstrip": false,
240
+ "single_word": false,
241
+ "special": true
242
+ },
243
+ "30": {
244
+ "content": "<|reserved_7|>",
245
+ "lstrip": false,
246
+ "normalized": false,
247
+ "rstrip": false,
248
+ "single_word": false,
249
+ "special": true
250
+ },
251
+ "31": {
252
+ "content": "<|reserved_8|>",
253
+ "lstrip": false,
254
+ "normalized": false,
255
+ "rstrip": false,
256
+ "single_word": false,
257
+ "special": true
258
+ },
259
+ "32": {
260
+ "content": "<|reserved_9|>",
261
+ "lstrip": false,
262
+ "normalized": false,
263
+ "rstrip": false,
264
+ "single_word": false,
265
+ "special": true
266
+ },
267
+ "33": {
268
+ "content": "<|reserved_10|>",
269
+ "lstrip": false,
270
+ "normalized": false,
271
+ "rstrip": false,
272
+ "single_word": false,
273
+ "special": true
274
+ },
275
+ "34": {
276
+ "content": "<|reserved_11|>",
277
+ "lstrip": false,
278
+ "normalized": false,
279
+ "rstrip": false,
280
+ "single_word": false,
281
+ "special": true
282
+ },
283
+ "35": {
284
+ "content": "<|reserved_12|>",
285
+ "lstrip": false,
286
+ "normalized": false,
287
+ "rstrip": false,
288
+ "single_word": false,
289
+ "special": true
290
+ },
291
+ "36": {
292
+ "content": "<|reserved_13|>",
293
+ "lstrip": false,
294
+ "normalized": false,
295
+ "rstrip": false,
296
+ "single_word": false,
297
+ "special": true
298
+ },
299
+ "37": {
300
+ "content": "<|reserved_14|>",
301
+ "lstrip": false,
302
+ "normalized": false,
303
+ "rstrip": false,
304
+ "single_word": false,
305
+ "special": true
306
+ },
307
+ "38": {
308
+ "content": "<|reserved_15|>",
309
+ "lstrip": false,
310
+ "normalized": false,
311
+ "rstrip": false,
312
+ "single_word": false,
313
+ "special": true
314
+ },
315
+ "39": {
316
+ "content": "<|reserved_16|>",
317
+ "lstrip": false,
318
+ "normalized": false,
319
+ "rstrip": false,
320
+ "single_word": false,
321
+ "special": true
322
+ },
323
+ "40": {
324
+ "content": "<|reserved_17|>",
325
+ "lstrip": false,
326
+ "normalized": false,
327
+ "rstrip": false,
328
+ "single_word": false,
329
+ "special": true
330
+ },
331
+ "41": {
332
+ "content": "<|reserved_18|>",
333
+ "lstrip": false,
334
+ "normalized": false,
335
+ "rstrip": false,
336
+ "single_word": false,
337
+ "special": true
338
+ },
339
+ "42": {
340
+ "content": "<|reserved_19|>",
341
+ "lstrip": false,
342
+ "normalized": false,
343
+ "rstrip": false,
344
+ "single_word": false,
345
+ "special": true
346
+ },
347
+ "43": {
348
+ "content": "<|reserved_20|>",
349
+ "lstrip": false,
350
+ "normalized": false,
351
+ "rstrip": false,
352
+ "single_word": false,
353
+ "special": true
354
+ },
355
+ "44": {
356
+ "content": "<|reserved_21|>",
357
+ "lstrip": false,
358
+ "normalized": false,
359
+ "rstrip": false,
360
+ "single_word": false,
361
+ "special": true
362
+ },
363
+ "45": {
364
+ "content": "<|reserved_22|>",
365
+ "lstrip": false,
366
+ "normalized": false,
367
+ "rstrip": false,
368
+ "single_word": false,
369
+ "special": true
370
+ },
371
+ "46": {
372
+ "content": "<|reserved_23|>",
373
+ "lstrip": false,
374
+ "normalized": false,
375
+ "rstrip": false,
376
+ "single_word": false,
377
+ "special": true
378
+ },
379
+ "47": {
380
+ "content": "<|reserved_24|>",
381
+ "lstrip": false,
382
+ "normalized": false,
383
+ "rstrip": false,
384
+ "single_word": false,
385
+ "special": true
386
+ },
387
+ "48": {
388
+ "content": "<|reserved_25|>",
389
+ "lstrip": false,
390
+ "normalized": false,
391
+ "rstrip": false,
392
+ "single_word": false,
393
+ "special": true
394
+ },
395
+ "49": {
396
+ "content": "<|reserved_26|>",
397
+ "lstrip": false,
398
+ "normalized": false,
399
+ "rstrip": false,
400
+ "single_word": false,
401
+ "special": true
402
+ },
403
+ "50": {
404
+ "content": "<|reserved_27|>",
405
+ "lstrip": false,
406
+ "normalized": false,
407
+ "rstrip": false,
408
+ "single_word": false,
409
+ "special": true
410
+ },
411
+ "51": {
412
+ "content": "<|reserved_28|>",
413
+ "lstrip": false,
414
+ "normalized": false,
415
+ "rstrip": false,
416
+ "single_word": false,
417
+ "special": true
418
+ },
419
+ "52": {
420
+ "content": "<|reserved_29|>",
421
+ "lstrip": false,
422
+ "normalized": false,
423
+ "rstrip": false,
424
+ "single_word": false,
425
+ "special": true
426
+ },
427
+ "53": {
428
+ "content": "<|reserved_30|>",
429
+ "lstrip": false,
430
+ "normalized": false,
431
+ "rstrip": false,
432
+ "single_word": false,
433
+ "special": true
434
+ },
435
+ "54": {
436
+ "content": "<|reserved_31|>",
437
+ "lstrip": false,
438
+ "normalized": false,
439
+ "rstrip": false,
440
+ "single_word": false,
441
+ "special": true
442
+ },
443
+ "55": {
444
+ "content": "<|reserved_32|>",
445
+ "lstrip": false,
446
+ "normalized": false,
447
+ "rstrip": false,
448
+ "single_word": false,
449
+ "special": true
450
+ },
451
+ "56": {
452
+ "content": "<|reserved_33|>",
453
+ "lstrip": false,
454
+ "normalized": false,
455
+ "rstrip": false,
456
+ "single_word": false,
457
+ "special": true
458
+ },
459
+ "57": {
460
+ "content": "<|reserved_34|>",
461
+ "lstrip": false,
462
+ "normalized": false,
463
+ "rstrip": false,
464
+ "single_word": false,
465
+ "special": true
466
+ },
467
+ "58": {
468
+ "content": "<|reserved_35|>",
469
+ "lstrip": false,
470
+ "normalized": false,
471
+ "rstrip": false,
472
+ "single_word": false,
473
+ "special": true
474
+ },
475
+ "59": {
476
+ "content": "<|reserved_36|>",
477
+ "lstrip": false,
478
+ "normalized": false,
479
+ "rstrip": false,
480
+ "single_word": false,
481
+ "special": true
482
+ },
483
+ "60": {
484
+ "content": "<|reserved_37|>",
485
+ "lstrip": false,
486
+ "normalized": false,
487
+ "rstrip": false,
488
+ "single_word": false,
489
+ "special": true
490
+ },
491
+ "61": {
492
+ "content": "<|reserved_38|>",
493
+ "lstrip": false,
494
+ "normalized": false,
495
+ "rstrip": false,
496
+ "single_word": false,
497
+ "special": true
498
+ },
499
+ "62": {
500
+ "content": "<|reserved_39|>",
501
+ "lstrip": false,
502
+ "normalized": false,
503
+ "rstrip": false,
504
+ "single_word": false,
505
+ "special": true
506
+ },
507
+ "63": {
508
+ "content": "<|reserved_40|>",
509
+ "lstrip": false,
510
+ "normalized": false,
511
+ "rstrip": false,
512
+ "single_word": false,
513
+ "special": true
514
+ }
515
+ },
516
+ "bos_token": "<|endoftext|>",
517
+ "chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '<|im_sep|>' + message['content'] + '<|im_end|>'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant<|im_sep|>' }}{% endif %}",
518
+ "clean_up_tokenization_spaces": false,
519
+ "eos_token": "<|im_end|>",
520
+ "extra_special_tokens": {},
521
+ "model_max_length": 1000000000000000019884624838656,
522
+ "pad_token": "<|pad|>",
523
+ "tokenizer_class": "PreTrainedTokenizer",
524
+ "unk_token": "<|unk|>"
525
+ }
tokenizer/vocab.json ADDED
The diff for this file is too large to render. See raw diff