jiacheng-ye commited on
Commit
8e9c1a3
·
verified ·
1 Parent(s): 373705a

Upload tokenizer

Browse files
added_tokens.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|beginoftext|>": 151665,
5
+ "<|box_end|>": 151649,
6
+ "<|box_start|>": 151648,
7
+ "<|endoftext|>": 151643,
8
+ "<|file_sep|>": 151664,
9
+ "<|fim_middle|>": 151660,
10
+ "<|fim_pad|>": 151662,
11
+ "<|fim_prefix|>": 151659,
12
+ "<|fim_suffix|>": 151661,
13
+ "<|im_end|>": 151645,
14
+ "<|im_start|>": 151644,
15
+ "<|image_pad|>": 151655,
16
+ "<|mask|>": 151666,
17
+ "<|object_ref_end|>": 151647,
18
+ "<|object_ref_start|>": 151646,
19
+ "<|quad_end|>": 151651,
20
+ "<|quad_start|>": 151650,
21
+ "<|repo_name|>": 151663,
22
+ "<|video_pad|>": 151656,
23
+ "<|vision_end|>": 151653,
24
+ "<|vision_pad|>": 151654,
25
+ "<|vision_start|>": 151652
26
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|beginoftext|>",
4
+ "<|mask|>"
5
+ ],
6
+ "bos_token": {
7
+ "content": "<|beginoftext|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "eos_token": {
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ },
20
+ "mask_token": {
21
+ "content": "<|mask|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false
26
+ },
27
+ "pad_token": {
28
+ "content": "<|endoftext|>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false
33
+ }
34
+ }
tokenization_dream.py ADDED
@@ -0,0 +1,340 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The Dream team, HKUNLP Group and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on Qwen's implementations in this library.
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """Tokenization classes for Dream."""
17
+
18
+ import json
19
+ import os
20
+ import unicodedata
21
+ from functools import lru_cache
22
+ from typing import Optional, Tuple
23
+
24
+ import regex as re
25
+
26
+ from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
27
+ from transformers.utils import logging
28
+
29
+
30
+ logger = logging.get_logger(__name__)
31
+
32
+ VOCAB_FILES_NAMES = {
33
+ "vocab_file": "vocab.json",
34
+ "merges_file": "merges.txt",
35
+ }
36
+
37
+
38
+ MAX_MODEL_INPUT_SIZES = {"dream/dream-tokenizer": 32768}
39
+
40
+ PRETOKENIZE_REGEX = r"""(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+"""
41
+
42
+
43
+ @lru_cache()
44
+ # Copied from transformers.models.gpt2.tokenization_gpt2.bytes_to_unicode
45
+ def bytes_to_unicode():
46
+ """
47
+ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
48
+ characters the bpe code barfs on.
49
+
50
+ The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
51
+ if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
52
+ decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
53
+ tables between utf-8 bytes and unicode strings.
54
+ """
55
+ bs = (
56
+ list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
57
+ )
58
+ cs = bs[:]
59
+ n = 0
60
+ for b in range(2**8):
61
+ if b not in bs:
62
+ bs.append(b)
63
+ cs.append(2**8 + n)
64
+ n += 1
65
+ cs = [chr(n) for n in cs]
66
+ return dict(zip(bs, cs))
67
+
68
+
69
+ # Copied from transformers.models.gpt2.tokenization_gpt2.get_pairs
70
+ def get_pairs(word):
71
+ """
72
+ Return set of symbol pairs in a word.
73
+
74
+ Word is represented as tuple of symbols (symbols being variable-length strings).
75
+ """
76
+ pairs = set()
77
+ prev_char = word[0]
78
+ for char in word[1:]:
79
+ pairs.add((prev_char, char))
80
+ prev_char = char
81
+ return pairs
82
+
83
+
84
+ class DreamTokenizer(PreTrainedTokenizer):
85
+ """
86
+ Construct a Dream tokenizer. Based on byte-level Byte-Pair-Encoding.
87
+
88
+ Same with GPT2Tokenizer, this tokenizer has been trained to treat spaces like parts of the tokens so a word will
89
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
90
+
91
+ ```python
92
+ >>> from transformers import AutoTokenizer
93
+
94
+ >>> tokenizer = AutoTokenizer.from_pretrained("Dream-org/Dream-v0-Base-7B", trust_remote_code=True)
95
+ >>> tokenizer("Hello world")["input_ids"]
96
+ [9707, 1879]
97
+
98
+ >>> tokenizer(" Hello world")["input_ids"]
99
+ [21927, 1879]
100
+ ```
101
+ This is expected.
102
+
103
+ You should not use GPT2Tokenizer instead, because of the different pretokenization rules.
104
+
105
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
106
+ this superclass for more information regarding those methods.
107
+
108
+ Args:
109
+ vocab_file (`str`):
110
+ Path to the vocabulary file.
111
+ merges_file (`str`):
112
+ Path to the merges file.
113
+ errors (`str`, *optional*, defaults to `"replace"`):
114
+ Paradigm to follow when decoding bytes to UTF-8. See
115
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
116
+ unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
117
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
118
+ token instead.
119
+ bos_token (`str`, *optional*):
120
+ The beginning of sequence token. Not applicable for this tokenizer.
121
+ eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
122
+ The end of sequence token.
123
+ pad_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
124
+ The token used for padding, for example when batching sequences of different lengths.
125
+ clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
126
+ Whether or not the model should cleanup the spaces that were added when splitting the input text during the
127
+ tokenization process. Not applicable to this tokenizer, since tokenization does not add spaces.
128
+ split_special_tokens (`bool`, *optional*, defaults to `False`):
129
+ Whether or not the special tokens should be split during the tokenization process. The default behavior is
130
+ to not split special tokens. This means that if `<|endoftext|>` is the `eos_token`, then `tokenizer.tokenize("<|endoftext|>") =
131
+ ['<|endoftext|>`]. Otherwise, if `split_special_tokens=True`, then `tokenizer.tokenize("<|endoftext|>")` will be give `['<',
132
+ '|', 'endo', 'ft', 'ext', '|', '>']`. This argument is only supported for `slow` tokenizers for the moment.
133
+ """
134
+
135
+ vocab_files_names = VOCAB_FILES_NAMES
136
+ model_input_names = ["input_ids", "attention_mask"]
137
+
138
+ def __init__(
139
+ self,
140
+ vocab_file,
141
+ merges_file,
142
+ errors="replace",
143
+ unk_token="<|endoftext|>",
144
+ bos_token=None,
145
+ eos_token="<|endoftext|>",
146
+ pad_token="<|endoftext|>",
147
+ clean_up_tokenization_spaces=False,
148
+ split_special_tokens=False,
149
+ **kwargs,
150
+ ):
151
+ # Dream vocab does not contain control tokens; added tokens need to be special
152
+ bos_token = (
153
+ AddedToken(bos_token, lstrip=False, rstrip=False, special=True, normalized=False)
154
+ if isinstance(bos_token, str)
155
+ else bos_token
156
+ )
157
+ eos_token = (
158
+ AddedToken(eos_token, lstrip=False, rstrip=False, special=True, normalized=False)
159
+ if isinstance(eos_token, str)
160
+ else eos_token
161
+ )
162
+ unk_token = (
163
+ AddedToken(unk_token, lstrip=False, rstrip=False, special=True, normalized=False)
164
+ if isinstance(unk_token, str)
165
+ else unk_token
166
+ )
167
+ pad_token = (
168
+ AddedToken(pad_token, lstrip=False, rstrip=False, special=True, normalized=False)
169
+ if isinstance(pad_token, str)
170
+ else pad_token
171
+ )
172
+
173
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
174
+ self.encoder = json.load(vocab_handle)
175
+ self.decoder = {v: k for k, v in self.encoder.items()}
176
+ self.errors = errors # how to handle errors in decoding
177
+ self.byte_encoder = bytes_to_unicode()
178
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
179
+ bpe_merges = []
180
+ with open(merges_file, encoding="utf-8") as merges_handle:
181
+ for i, line in enumerate(merges_handle):
182
+ line = line.strip()
183
+ if (i == 0 and line.startswith("#version:")) or not line:
184
+ continue
185
+ bpe_merges.append(tuple(line.split()))
186
+ self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
187
+ # NOTE: the cache can grow without bound and will get really large for long running processes
188
+ # (esp. for texts of language that do not use space between word, e.g. Chinese); technically
189
+ # not a memory leak but appears as one.
190
+ # GPT2Tokenizer has the same problem, so let's be consistent.
191
+ self.cache = {}
192
+
193
+ self.pat = re.compile(PRETOKENIZE_REGEX)
194
+
195
+ if kwargs.get("add_prefix_space", False):
196
+ logger.warning_once(
197
+ f"{self.__class__.__name} does not support `add_prefix_space`, setting it to True has no effect."
198
+ )
199
+
200
+ super().__init__(
201
+ errors=errors,
202
+ bos_token=bos_token,
203
+ eos_token=eos_token,
204
+ pad_token=pad_token,
205
+ unk_token=unk_token,
206
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
207
+ split_special_tokens=split_special_tokens,
208
+ **kwargs,
209
+ )
210
+
211
+ @property
212
+ def vocab_size(self) -> int:
213
+ return len(self.encoder)
214
+
215
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.get_vocab
216
+ def get_vocab(self):
217
+ return dict(self.encoder, **self.added_tokens_encoder)
218
+
219
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.bpe
220
+ def bpe(self, token):
221
+ if token in self.cache:
222
+ return self.cache[token]
223
+ word = tuple(token)
224
+ pairs = get_pairs(word)
225
+
226
+ if not pairs:
227
+ return token
228
+
229
+ while True:
230
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
231
+ if bigram not in self.bpe_ranks:
232
+ break
233
+ first, second = bigram
234
+ new_word = []
235
+ i = 0
236
+ while i < len(word):
237
+ try:
238
+ j = word.index(first, i)
239
+ except ValueError:
240
+ new_word.extend(word[i:])
241
+ break
242
+ else:
243
+ new_word.extend(word[i:j])
244
+ i = j
245
+
246
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
247
+ new_word.append(first + second)
248
+ i += 2
249
+ else:
250
+ new_word.append(word[i])
251
+ i += 1
252
+ new_word = tuple(new_word)
253
+ word = new_word
254
+ if len(word) == 1:
255
+ break
256
+ else:
257
+ pairs = get_pairs(word)
258
+ word = " ".join(word)
259
+ self.cache[token] = word
260
+ return word
261
+
262
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._tokenize
263
+ def _tokenize(self, text):
264
+ """Tokenize a string."""
265
+ bpe_tokens = []
266
+ for token in re.findall(self.pat, text):
267
+ token = "".join(
268
+ self.byte_encoder[b] for b in token.encode("utf-8")
269
+ ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
270
+ bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
271
+ return bpe_tokens
272
+
273
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._convert_token_to_id
274
+ def _convert_token_to_id(self, token):
275
+ """Converts a token (str) in an id using the vocab."""
276
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
277
+
278
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._convert_id_to_token
279
+ def _convert_id_to_token(self, index):
280
+ """Converts an index (integer) in a token (str) using the vocab."""
281
+ return self.decoder.get(index)
282
+
283
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.convert_tokens_to_string
284
+ def convert_tokens_to_string(self, tokens):
285
+ """Converts a sequence of tokens (string) in a single string."""
286
+ text = "".join(tokens)
287
+ text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
288
+ return text
289
+
290
+ def decode(
291
+ self,
292
+ token_ids,
293
+ skip_special_tokens: bool = False,
294
+ clean_up_tokenization_spaces: Optional[bool] = False,
295
+ spaces_between_special_tokens: bool = False,
296
+ **kwargs,
297
+ ) -> str:
298
+ # `spaces_between_special_tokens` defaults to True for _decode in slow tokenizers
299
+ # and cannot be configured elsewhere, but it should default to False for DreamTokenizer
300
+ return super().decode(
301
+ token_ids,
302
+ skip_special_tokens=skip_special_tokens,
303
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
304
+ spaces_between_special_tokens=spaces_between_special_tokens,
305
+ **kwargs,
306
+ )
307
+
308
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.save_vocabulary
309
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
310
+ if not os.path.isdir(save_directory):
311
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
312
+ return
313
+ vocab_file = os.path.join(
314
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
315
+ )
316
+ merge_file = os.path.join(
317
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
318
+ )
319
+
320
+ with open(vocab_file, "w", encoding="utf-8") as f:
321
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
322
+
323
+ index = 0
324
+ with open(merge_file, "w", encoding="utf-8") as writer:
325
+ writer.write("#version: 0.2\n")
326
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
327
+ if index != token_index:
328
+ logger.warning(
329
+ f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
330
+ " Please check that the tokenizer is not corrupted!"
331
+ )
332
+ index = token_index
333
+ writer.write(" ".join(bpe_tokens) + "\n")
334
+ index += 1
335
+
336
+ return vocab_file, merge_file
337
+
338
+ def prepare_for_tokenization(self, text, **kwargs):
339
+ text = unicodedata.normalize("NFC", text)
340
+ return (text, kwargs)
tokenizer_config.json ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ },
181
+ "151665": {
182
+ "content": "<|beginoftext|>",
183
+ "lstrip": false,
184
+ "normalized": false,
185
+ "rstrip": false,
186
+ "single_word": false,
187
+ "special": true
188
+ },
189
+ "151666": {
190
+ "content": "<|mask|>",
191
+ "lstrip": false,
192
+ "normalized": false,
193
+ "rstrip": false,
194
+ "single_word": false,
195
+ "special": true
196
+ }
197
+ },
198
+ "additional_special_tokens": [
199
+ "<|beginoftext|>",
200
+ "<|mask|>"
201
+ ],
202
+ "auto_map": {
203
+ "AutoTokenizer": [
204
+ "tokenization_dream.DreamTokenizer",
205
+ null
206
+ ]
207
+ },
208
+ "bos_token": "<|beginoftext|>",
209
+ "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
210
+ "clean_up_tokenization_spaces": false,
211
+ "eos_token": "<|endoftext|>",
212
+ "errors": "replace",
213
+ "mask_token": "<|mask|>",
214
+ "model_max_length": 131072,
215
+ "pad_token": "<|endoftext|>",
216
+ "split_special_tokens": false,
217
+ "tokenizer_class": "DreamTokenizer",
218
+ "unk_token": null
219
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff