Commit
·
12317a8
1
Parent(s):
6e89714
- modular_qwen2.py +134 -0
- tokenization_qwen2.py +341 -0
- tokenization_qwen2_fast.py +134 -0
modular_qwen2.py
ADDED
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Callable, Optional, Tuple
|
2 |
+
|
3 |
+
import torch
|
4 |
+
import torch.utils.checkpoint
|
5 |
+
from torch import nn
|
6 |
+
|
7 |
+
from transformers.cache_utils import Cache
|
8 |
+
from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
|
9 |
+
from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS
|
10 |
+
from transformers.processing_utils import Unpack
|
11 |
+
from transformers.utils import logging
|
12 |
+
from transformers.models.llama.modeling_llama import (
|
13 |
+
LlamaAttention,
|
14 |
+
LlamaDecoderLayer,
|
15 |
+
LlamaForCausalLM,
|
16 |
+
LlamaForQuestionAnswering,
|
17 |
+
LlamaForSequenceClassification,
|
18 |
+
LlamaForTokenClassification,
|
19 |
+
LlamaMLP,
|
20 |
+
LlamaModel,
|
21 |
+
apply_rotary_pos_emb,
|
22 |
+
eager_attention_forward,
|
23 |
+
)
|
24 |
+
from .configuration_qwen2 import Qwen2Config
|
25 |
+
|
26 |
+
|
27 |
+
logger = logging.get_logger(__name__)
|
28 |
+
|
29 |
+
|
30 |
+
class Qwen2MLP(LlamaMLP):
|
31 |
+
def __init__(self, config):
|
32 |
+
super().__init__(config)
|
33 |
+
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
34 |
+
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
35 |
+
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
|
36 |
+
|
37 |
+
|
38 |
+
class Qwen2Attention(LlamaAttention):
|
39 |
+
def __init__(self, config: Qwen2Config, layer_idx: int):
|
40 |
+
super().__init__(config, layer_idx)
|
41 |
+
self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=True)
|
42 |
+
self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=True)
|
43 |
+
self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=True)
|
44 |
+
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False)
|
45 |
+
|
46 |
+
def forward(
|
47 |
+
self,
|
48 |
+
hidden_states: torch.Tensor,
|
49 |
+
position_embeddings: Tuple[torch.Tensor, torch.Tensor],
|
50 |
+
attention_mask: Optional[torch.Tensor],
|
51 |
+
past_key_value: Optional[Cache] = None,
|
52 |
+
cache_position: Optional[torch.LongTensor] = None,
|
53 |
+
**kwargs: Unpack[FlashAttentionKwargs],
|
54 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
55 |
+
input_shape = hidden_states.shape[:-1]
|
56 |
+
hidden_shape = (*input_shape, -1, self.head_dim)
|
57 |
+
|
58 |
+
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
|
59 |
+
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
|
60 |
+
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
|
61 |
+
|
62 |
+
cos, sin = position_embeddings
|
63 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
|
64 |
+
|
65 |
+
if past_key_value is not None:
|
66 |
+
# sin and cos are specific to RoPE models; cache_position needed for the static cache
|
67 |
+
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
|
68 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
69 |
+
|
70 |
+
sliding_window = None
|
71 |
+
if (
|
72 |
+
self.config.use_sliding_window
|
73 |
+
and getattr(self.config, "sliding_window", None) is not None
|
74 |
+
and self.layer_idx >= self.config.max_window_layers
|
75 |
+
):
|
76 |
+
sliding_window = self.config.sliding_window
|
77 |
+
|
78 |
+
attention_interface: Callable = eager_attention_forward
|
79 |
+
if self.config._attn_implementation != "eager":
|
80 |
+
if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False):
|
81 |
+
logger.warning_once(
|
82 |
+
"`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
|
83 |
+
'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
|
84 |
+
)
|
85 |
+
else:
|
86 |
+
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
|
87 |
+
|
88 |
+
attn_output, attn_weights = attention_interface(
|
89 |
+
self,
|
90 |
+
query_states,
|
91 |
+
key_states,
|
92 |
+
value_states,
|
93 |
+
attention_mask,
|
94 |
+
dropout=0.0 if not self.training else self.attention_dropout,
|
95 |
+
scaling=self.scaling,
|
96 |
+
sliding_window=sliding_window, # main diff with Llama
|
97 |
+
**kwargs,
|
98 |
+
)
|
99 |
+
|
100 |
+
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
|
101 |
+
attn_output = self.o_proj(attn_output)
|
102 |
+
return attn_output, attn_weights
|
103 |
+
|
104 |
+
|
105 |
+
class Qwen2DecoderLayer(LlamaDecoderLayer):
|
106 |
+
def __init__(self, config: Qwen2Config, layer_idx: int):
|
107 |
+
super().__init__()
|
108 |
+
self.self_attn = Qwen2Attention(config=config, layer_idx=layer_idx)
|
109 |
+
self.mlp = Qwen2MLP(config)
|
110 |
+
if config.sliding_window and config._attn_implementation != "flash_attention_2":
|
111 |
+
logger.warning_once(
|
112 |
+
f"Sliding Window Attention is enabled but not implemented for `{config._attn_implementation}`; "
|
113 |
+
"unexpected results may be encountered."
|
114 |
+
)
|
115 |
+
|
116 |
+
|
117 |
+
class Qwen2Model(LlamaModel):
|
118 |
+
pass
|
119 |
+
|
120 |
+
|
121 |
+
class Qwen2ForCausalLM(LlamaForCausalLM):
|
122 |
+
pass
|
123 |
+
|
124 |
+
|
125 |
+
class Qwen2ForSequenceClassification(LlamaForSequenceClassification):
|
126 |
+
pass
|
127 |
+
|
128 |
+
|
129 |
+
class Qwen2ForTokenClassification(LlamaForTokenClassification):
|
130 |
+
pass
|
131 |
+
|
132 |
+
|
133 |
+
class Qwen2ForQuestionAnswering(LlamaForQuestionAnswering):
|
134 |
+
pass
|
tokenization_qwen2.py
ADDED
@@ -0,0 +1,341 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2024 The Qwen team, Alibaba Group and The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Tokenization classes for Qwen2."""
|
16 |
+
|
17 |
+
import json
|
18 |
+
import os
|
19 |
+
import unicodedata
|
20 |
+
from functools import lru_cache
|
21 |
+
from typing import Optional, Tuple
|
22 |
+
|
23 |
+
import regex as re
|
24 |
+
|
25 |
+
from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
|
26 |
+
from transformers.utils import logging
|
27 |
+
|
28 |
+
|
29 |
+
logger = logging.get_logger(__name__)
|
30 |
+
|
31 |
+
VOCAB_FILES_NAMES = {
|
32 |
+
"vocab_file": "vocab.json",
|
33 |
+
"merges_file": "merges.txt",
|
34 |
+
}
|
35 |
+
|
36 |
+
|
37 |
+
MAX_MODEL_INPUT_SIZES = {"qwen/qwen-tokenizer": 32768}
|
38 |
+
|
39 |
+
PRETOKENIZE_REGEX = r"""(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+"""
|
40 |
+
|
41 |
+
|
42 |
+
@lru_cache()
|
43 |
+
# Copied from transformers.models.gpt2.tokenization_gpt2.bytes_to_unicode
|
44 |
+
def bytes_to_unicode():
|
45 |
+
"""
|
46 |
+
Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
|
47 |
+
characters the bpe code barfs on.
|
48 |
+
|
49 |
+
The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
|
50 |
+
if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
|
51 |
+
decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
|
52 |
+
tables between utf-8 bytes and unicode strings.
|
53 |
+
"""
|
54 |
+
bs = (
|
55 |
+
list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
|
56 |
+
)
|
57 |
+
cs = bs[:]
|
58 |
+
n = 0
|
59 |
+
for b in range(2**8):
|
60 |
+
if b not in bs:
|
61 |
+
bs.append(b)
|
62 |
+
cs.append(2**8 + n)
|
63 |
+
n += 1
|
64 |
+
cs = [chr(n) for n in cs]
|
65 |
+
return dict(zip(bs, cs))
|
66 |
+
|
67 |
+
|
68 |
+
# Copied from transformers.models.gpt2.tokenization_gpt2.get_pairs
|
69 |
+
def get_pairs(word):
|
70 |
+
"""
|
71 |
+
Return set of symbol pairs in a word.
|
72 |
+
|
73 |
+
Word is represented as tuple of symbols (symbols being variable-length strings).
|
74 |
+
"""
|
75 |
+
pairs = set()
|
76 |
+
prev_char = word[0]
|
77 |
+
for char in word[1:]:
|
78 |
+
pairs.add((prev_char, char))
|
79 |
+
prev_char = char
|
80 |
+
return pairs
|
81 |
+
|
82 |
+
|
83 |
+
class Qwen2Tokenizer(PreTrainedTokenizer):
|
84 |
+
"""
|
85 |
+
Construct a Qwen2 tokenizer. Based on byte-level Byte-Pair-Encoding.
|
86 |
+
|
87 |
+
Same with GPT2Tokenizer, this tokenizer has been trained to treat spaces like parts of the tokens so a word will
|
88 |
+
be encoded differently whether it is at the beginning of the sentence (without space) or not:
|
89 |
+
|
90 |
+
```python
|
91 |
+
>>> from transformers import Qwen2Tokenizer
|
92 |
+
|
93 |
+
>>> tokenizer = Qwen2Tokenizer.from_pretrained("Qwen/Qwen-tokenizer")
|
94 |
+
>>> tokenizer("Hello world")["input_ids"]
|
95 |
+
[9707, 1879]
|
96 |
+
|
97 |
+
>>> tokenizer(" Hello world")["input_ids"]
|
98 |
+
[21927, 1879]
|
99 |
+
```
|
100 |
+
This is expected.
|
101 |
+
|
102 |
+
You should not use GPT2Tokenizer instead, because of the different pretokenization rules.
|
103 |
+
|
104 |
+
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
|
105 |
+
this superclass for more information regarding those methods.
|
106 |
+
|
107 |
+
Args:
|
108 |
+
vocab_file (`str`):
|
109 |
+
Path to the vocabulary file.
|
110 |
+
merges_file (`str`):
|
111 |
+
Path to the merges file.
|
112 |
+
errors (`str`, *optional*, defaults to `"replace"`):
|
113 |
+
Paradigm to follow when decoding bytes to UTF-8. See
|
114 |
+
[bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
|
115 |
+
unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
|
116 |
+
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
|
117 |
+
token instead.
|
118 |
+
bos_token (`str`, *optional*):
|
119 |
+
The beginning of sequence token. Not applicable for this tokenizer.
|
120 |
+
eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
|
121 |
+
The end of sequence token.
|
122 |
+
pad_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
|
123 |
+
The token used for padding, for example when batching sequences of different lengths.
|
124 |
+
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
|
125 |
+
Whether or not the model should cleanup the spaces that were added when splitting the input text during the
|
126 |
+
tokenization process. Not applicable to this tokenizer, since tokenization does not add spaces.
|
127 |
+
split_special_tokens (`bool`, *optional*, defaults to `False`):
|
128 |
+
Whether or not the special tokens should be split during the tokenization process. The default behavior is
|
129 |
+
to not split special tokens. This means that if `<|endoftext|>` is the `eos_token`, then `tokenizer.tokenize("<|endoftext|>") =
|
130 |
+
['<|endoftext|>`]. Otherwise, if `split_special_tokens=True`, then `tokenizer.tokenize("<|endoftext|>")` will be give `['<',
|
131 |
+
'|', 'endo', 'ft', 'ext', '|', '>']`. This argument is only supported for `slow` tokenizers for the moment.
|
132 |
+
"""
|
133 |
+
|
134 |
+
vocab_files_names = VOCAB_FILES_NAMES
|
135 |
+
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
|
136 |
+
max_model_input_sizes = MAX_MODEL_INPUT_SIZES
|
137 |
+
model_input_names = ["input_ids", "attention_mask"]
|
138 |
+
|
139 |
+
def __init__(
|
140 |
+
self,
|
141 |
+
vocab_file,
|
142 |
+
merges_file,
|
143 |
+
errors="replace",
|
144 |
+
unk_token="<|endoftext|>",
|
145 |
+
bos_token=None,
|
146 |
+
eos_token="<|endoftext|>",
|
147 |
+
pad_token="<|endoftext|>",
|
148 |
+
clean_up_tokenization_spaces=False,
|
149 |
+
split_special_tokens=False,
|
150 |
+
**kwargs,
|
151 |
+
):
|
152 |
+
# Qwen vocab does not contain control tokens; added tokens need to be special
|
153 |
+
bos_token = (
|
154 |
+
AddedToken(bos_token, lstrip=False, rstrip=False, special=True, normalized=False)
|
155 |
+
if isinstance(bos_token, str)
|
156 |
+
else bos_token
|
157 |
+
)
|
158 |
+
eos_token = (
|
159 |
+
AddedToken(eos_token, lstrip=False, rstrip=False, special=True, normalized=False)
|
160 |
+
if isinstance(eos_token, str)
|
161 |
+
else eos_token
|
162 |
+
)
|
163 |
+
unk_token = (
|
164 |
+
AddedToken(unk_token, lstrip=False, rstrip=False, special=True, normalized=False)
|
165 |
+
if isinstance(unk_token, str)
|
166 |
+
else unk_token
|
167 |
+
)
|
168 |
+
pad_token = (
|
169 |
+
AddedToken(pad_token, lstrip=False, rstrip=False, special=True, normalized=False)
|
170 |
+
if isinstance(pad_token, str)
|
171 |
+
else pad_token
|
172 |
+
)
|
173 |
+
|
174 |
+
with open(vocab_file, encoding="utf-8") as vocab_handle:
|
175 |
+
self.encoder = json.load(vocab_handle)
|
176 |
+
self.decoder = {v: k for k, v in self.encoder.items()}
|
177 |
+
self.errors = errors # how to handle errors in decoding
|
178 |
+
self.byte_encoder = bytes_to_unicode()
|
179 |
+
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
|
180 |
+
bpe_merges = []
|
181 |
+
with open(merges_file, encoding="utf-8") as merges_handle:
|
182 |
+
for line in merges_handle:
|
183 |
+
line = line.strip()
|
184 |
+
if not line or line.startswith("#"):
|
185 |
+
continue
|
186 |
+
bpe_merges.append(tuple(line.split()))
|
187 |
+
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
|
188 |
+
# NOTE: the cache can grow without bound and will get really large for long running processes
|
189 |
+
# (esp. for texts of language that do not use space between word, e.g. Chinese); technically
|
190 |
+
# not a memory leak but appears as one.
|
191 |
+
# GPT2Tokenizer has the same problem, so let's be consistent.
|
192 |
+
self.cache = {}
|
193 |
+
|
194 |
+
self.pat = re.compile(PRETOKENIZE_REGEX)
|
195 |
+
|
196 |
+
if kwargs.get("add_prefix_space", False):
|
197 |
+
logger.warning_once(
|
198 |
+
f"{self.__class__.__name} does not support `add_prefix_space`, setting it to True has no effect."
|
199 |
+
)
|
200 |
+
|
201 |
+
super().__init__(
|
202 |
+
errors=errors,
|
203 |
+
bos_token=bos_token,
|
204 |
+
eos_token=eos_token,
|
205 |
+
pad_token=pad_token,
|
206 |
+
unk_token=unk_token,
|
207 |
+
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
|
208 |
+
split_special_tokens=split_special_tokens,
|
209 |
+
**kwargs,
|
210 |
+
)
|
211 |
+
|
212 |
+
@property
|
213 |
+
def vocab_size(self) -> int:
|
214 |
+
return len(self.encoder)
|
215 |
+
|
216 |
+
# Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.get_vocab
|
217 |
+
def get_vocab(self):
|
218 |
+
return dict(self.encoder, **self.added_tokens_encoder)
|
219 |
+
|
220 |
+
# Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.bpe
|
221 |
+
def bpe(self, token):
|
222 |
+
if token in self.cache:
|
223 |
+
return self.cache[token]
|
224 |
+
word = tuple(token)
|
225 |
+
pairs = get_pairs(word)
|
226 |
+
|
227 |
+
if not pairs:
|
228 |
+
return token
|
229 |
+
|
230 |
+
while True:
|
231 |
+
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
|
232 |
+
if bigram not in self.bpe_ranks:
|
233 |
+
break
|
234 |
+
first, second = bigram
|
235 |
+
new_word = []
|
236 |
+
i = 0
|
237 |
+
while i < len(word):
|
238 |
+
try:
|
239 |
+
j = word.index(first, i)
|
240 |
+
except ValueError:
|
241 |
+
new_word.extend(word[i:])
|
242 |
+
break
|
243 |
+
else:
|
244 |
+
new_word.extend(word[i:j])
|
245 |
+
i = j
|
246 |
+
|
247 |
+
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
|
248 |
+
new_word.append(first + second)
|
249 |
+
i += 2
|
250 |
+
else:
|
251 |
+
new_word.append(word[i])
|
252 |
+
i += 1
|
253 |
+
new_word = tuple(new_word)
|
254 |
+
word = new_word
|
255 |
+
if len(word) == 1:
|
256 |
+
break
|
257 |
+
else:
|
258 |
+
pairs = get_pairs(word)
|
259 |
+
word = " ".join(word)
|
260 |
+
self.cache[token] = word
|
261 |
+
return word
|
262 |
+
|
263 |
+
# Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._tokenize
|
264 |
+
def _tokenize(self, text):
|
265 |
+
"""Tokenize a string."""
|
266 |
+
bpe_tokens = []
|
267 |
+
for token in re.findall(self.pat, text):
|
268 |
+
token = "".join(
|
269 |
+
self.byte_encoder[b] for b in token.encode("utf-8")
|
270 |
+
) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
|
271 |
+
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
|
272 |
+
return bpe_tokens
|
273 |
+
|
274 |
+
# Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._convert_token_to_id
|
275 |
+
def _convert_token_to_id(self, token):
|
276 |
+
"""Converts a token (str) in an id using the vocab."""
|
277 |
+
return self.encoder.get(token, self.encoder.get(self.unk_token))
|
278 |
+
|
279 |
+
# Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._convert_id_to_token
|
280 |
+
def _convert_id_to_token(self, index):
|
281 |
+
"""Converts an index (integer) in a token (str) using the vocab."""
|
282 |
+
return self.decoder.get(index)
|
283 |
+
|
284 |
+
# Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.convert_tokens_to_string
|
285 |
+
def convert_tokens_to_string(self, tokens):
|
286 |
+
"""Converts a sequence of tokens (string) in a single string."""
|
287 |
+
text = "".join(tokens)
|
288 |
+
text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
|
289 |
+
return text
|
290 |
+
|
291 |
+
def decode(
|
292 |
+
self,
|
293 |
+
token_ids,
|
294 |
+
skip_special_tokens: bool = False,
|
295 |
+
clean_up_tokenization_spaces: Optional[bool] = False,
|
296 |
+
spaces_between_special_tokens: bool = False,
|
297 |
+
**kwargs,
|
298 |
+
) -> str:
|
299 |
+
# `spaces_between_special_tokens` defaults to True for _decode in slow tokenizers
|
300 |
+
# and cannot be configured elsewhere, but it should default to False for Qwen2Tokenizer
|
301 |
+
return super().decode(
|
302 |
+
token_ids,
|
303 |
+
skip_special_tokens=skip_special_tokens,
|
304 |
+
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
|
305 |
+
spaces_between_special_tokens=spaces_between_special_tokens,
|
306 |
+
**kwargs,
|
307 |
+
)
|
308 |
+
|
309 |
+
# Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.save_vocabulary
|
310 |
+
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
|
311 |
+
if not os.path.isdir(save_directory):
|
312 |
+
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
|
313 |
+
return
|
314 |
+
vocab_file = os.path.join(
|
315 |
+
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
|
316 |
+
)
|
317 |
+
merge_file = os.path.join(
|
318 |
+
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
|
319 |
+
)
|
320 |
+
|
321 |
+
with open(vocab_file, "w", encoding="utf-8") as f:
|
322 |
+
f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
|
323 |
+
|
324 |
+
index = 0
|
325 |
+
with open(merge_file, "w", encoding="utf-8") as writer:
|
326 |
+
writer.write("#version: 0.2\n")
|
327 |
+
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
|
328 |
+
if index != token_index:
|
329 |
+
logger.warning(
|
330 |
+
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
|
331 |
+
" Please check that the tokenizer is not corrupted!"
|
332 |
+
)
|
333 |
+
index = token_index
|
334 |
+
writer.write(" ".join(bpe_tokens) + "\n")
|
335 |
+
index += 1
|
336 |
+
|
337 |
+
return vocab_file, merge_file
|
338 |
+
|
339 |
+
def prepare_for_tokenization(self, text, **kwargs):
|
340 |
+
text = unicodedata.normalize("NFC", text)
|
341 |
+
return (text, kwargs)
|
tokenization_qwen2_fast.py
ADDED
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2024 The Qwen team, Alibaba Group and The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Tokenization classes for Qwen2."""
|
16 |
+
|
17 |
+
from typing import Optional, Tuple
|
18 |
+
|
19 |
+
from transformers.tokenization_utils import AddedToken
|
20 |
+
from transformers.tokenization_utils_fast import PreTrainedTokenizerFast
|
21 |
+
from transformers.utils import logging
|
22 |
+
from .tokenization_qwen2 import Qwen2Tokenizer
|
23 |
+
|
24 |
+
|
25 |
+
logger = logging.get_logger(__name__)
|
26 |
+
|
27 |
+
VOCAB_FILES_NAMES = {
|
28 |
+
"vocab_file": "vocab.json",
|
29 |
+
"merges_file": "merges.txt",
|
30 |
+
"tokenizer_file": "tokenizer.json",
|
31 |
+
}
|
32 |
+
|
33 |
+
|
34 |
+
MAX_MODEL_INPUT_SIZES = {"qwen/qwen-tokenizer": 32768}
|
35 |
+
|
36 |
+
|
37 |
+
class Qwen2TokenizerFast(PreTrainedTokenizerFast):
|
38 |
+
"""
|
39 |
+
Construct a "fast" Qwen2 tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level
|
40 |
+
Byte-Pair-Encoding.
|
41 |
+
|
42 |
+
Same with GPT2Tokenizer, this tokenizer has been trained to treat spaces like parts of the tokens so a word will
|
43 |
+
be encoded differently whether it is at the beginning of the sentence (without space) or not:
|
44 |
+
|
45 |
+
```python
|
46 |
+
>>> from transformers import Qwen2TokenizerFast
|
47 |
+
|
48 |
+
>>> tokenizer = Qwen2TokenizerFast.from_pretrained("Qwen/Qwen-tokenizer")
|
49 |
+
>>> tokenizer("Hello world")["input_ids"]
|
50 |
+
[9707, 1879]
|
51 |
+
|
52 |
+
>>> tokenizer(" Hello world")["input_ids"]
|
53 |
+
[21927, 1879]
|
54 |
+
```
|
55 |
+
This is expected.
|
56 |
+
|
57 |
+
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
|
58 |
+
refer to this superclass for more information regarding those methods.
|
59 |
+
|
60 |
+
Args:
|
61 |
+
vocab_file (`str`, *optional*):
|
62 |
+
Path to the vocabulary file.
|
63 |
+
merges_file (`str`, *optional*):
|
64 |
+
Path to the merges file.
|
65 |
+
tokenizer_file (`str`, *optional*):
|
66 |
+
Path to [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that
|
67 |
+
contains everything needed to load the tokenizer.
|
68 |
+
unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
|
69 |
+
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
|
70 |
+
token instead. Not applicable to this tokenizer.
|
71 |
+
bos_token (`str`, *optional*):
|
72 |
+
The beginning of sequence token. Not applicable for this tokenizer.
|
73 |
+
eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
|
74 |
+
The end of sequence token.
|
75 |
+
pad_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
|
76 |
+
The token used for padding, for example when batching sequences of different lengths.
|
77 |
+
"""
|
78 |
+
|
79 |
+
vocab_files_names = VOCAB_FILES_NAMES
|
80 |
+
model_input_names = ["input_ids", "attention_mask"]
|
81 |
+
slow_tokenizer_class = Qwen2Tokenizer
|
82 |
+
|
83 |
+
def __init__(
|
84 |
+
self,
|
85 |
+
vocab_file=None,
|
86 |
+
merges_file=None,
|
87 |
+
tokenizer_file=None,
|
88 |
+
unk_token="<|endoftext|>",
|
89 |
+
bos_token=None,
|
90 |
+
eos_token="<|endoftext|>",
|
91 |
+
pad_token="<|endoftext|>",
|
92 |
+
**kwargs,
|
93 |
+
):
|
94 |
+
# We need to at least pass vocab_file and merges_file to base class
|
95 |
+
# in case a slow tokenizer needs to be initialized; other can be
|
96 |
+
# configured through files.
|
97 |
+
# following GPT2TokenizerFast, also adding unk_token, bos_token, and eos_token
|
98 |
+
|
99 |
+
bos_token = (
|
100 |
+
AddedToken(bos_token, lstrip=False, rstrip=False, special=True, normalized=False)
|
101 |
+
if isinstance(bos_token, str)
|
102 |
+
else bos_token
|
103 |
+
)
|
104 |
+
eos_token = (
|
105 |
+
AddedToken(eos_token, lstrip=False, rstrip=False, special=True, normalized=False)
|
106 |
+
if isinstance(eos_token, str)
|
107 |
+
else eos_token
|
108 |
+
)
|
109 |
+
unk_token = (
|
110 |
+
AddedToken(unk_token, lstrip=False, rstrip=False, special=True, normalized=False)
|
111 |
+
if isinstance(unk_token, str)
|
112 |
+
else unk_token
|
113 |
+
)
|
114 |
+
pad_token = (
|
115 |
+
AddedToken(pad_token, lstrip=False, rstrip=False, special=True, normalized=False)
|
116 |
+
if isinstance(pad_token, str)
|
117 |
+
else pad_token
|
118 |
+
)
|
119 |
+
|
120 |
+
super().__init__(
|
121 |
+
vocab_file=vocab_file,
|
122 |
+
merges_file=merges_file,
|
123 |
+
tokenizer_file=tokenizer_file,
|
124 |
+
unk_token=unk_token,
|
125 |
+
bos_token=bos_token,
|
126 |
+
eos_token=eos_token,
|
127 |
+
pad_token=pad_token,
|
128 |
+
**kwargs,
|
129 |
+
)
|
130 |
+
|
131 |
+
# Copied from transformers.models.gpt2.tokenization_gpt2_fast.GPT2TokenizerFast.save_vocabulary
|
132 |
+
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
|
133 |
+
files = self._tokenizer.model.save(save_directory, name=filename_prefix)
|
134 |
+
return tuple(files)
|