text
stringlengths
1
1.02k
class_index
int64
0
10.8k
source
stringlengths
85
188
Returns: [`~tokenization_utils_base.CharSpan`]: Span of characters in the original string, or None, if the token (e.g. <s>, </s>) doesn't correspond to any chars in the origin string. """ if not self._encodings: raise ValueError("token_to_chars() is not available when using Python based tokenizers") if token_index is not None: batch_index = batch_or_token_index else: batch_index = 0 token_index = batch_or_token_index span_indices = self._encodings[batch_index].token_to_chars(token_index) return CharSpan(*span_indices) if span_indices is not None else None def char_to_token( self, batch_or_char_index: int, char_index: Optional[int] = None, sequence_index: int = 0 ) -> int: """ Get the index of the token in the encoded output comprising a character in the original string for a sequence of the batch. Can be called as:
73
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
- `self.char_to_token(char_index)` if batch size is 1 - `self.char_to_token(batch_index, char_index)` if batch size is greater or equal to 1 This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words.
73
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
Args: batch_or_char_index (`int`): Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of the word in the sequence char_index (`int`, *optional*): If a batch index is provided in *batch_or_token_index*, this can be the index of the word in the sequence. sequence_index (`int`, *optional*, defaults to 0): If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0 or 1) the provided character index belongs to. Returns: `int`: Index of the token, or None if the char index refers to a whitespace only token and whitespace is trimmed with `trim_offsets=True`. """
73
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
if not self._encodings: raise ValueError("char_to_token() is not available when using Python based tokenizers") if char_index is not None: batch_index = batch_or_char_index else: batch_index = 0 char_index = batch_or_char_index return self._encodings[batch_index].char_to_token(char_index, sequence_index) def word_to_chars( self, batch_or_word_index: int, word_index: Optional[int] = None, sequence_index: int = 0 ) -> CharSpan: """ Get the character span in the original string corresponding to given word in a sequence of the batch. Character spans are returned as a CharSpan NamedTuple with: - start: index of the first character in the original string - end: index of the character following the last character in the original string Can be called as:
73
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
- `self.word_to_chars(word_index)` if batch size is 1 - `self.word_to_chars(batch_index, word_index)` if batch size is greater or equal to 1 Args: batch_or_word_index (`int`): Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of the word in the sequence word_index (`int`, *optional*): If a batch index is provided in *batch_or_token_index*, this can be the index of the word in the sequence. sequence_index (`int`, *optional*, defaults to 0): If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0 or 1) the provided word index belongs to. Returns: `CharSpan` or `List[CharSpan]`: Span(s) of the associated character or characters in the string. CharSpan are NamedTuple with:
73
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
- start: index of the first character associated to the token in the original string - end: index of the character following the last character associated to the token in the original string """ if not self._encodings: raise ValueError("word_to_chars() is not available when using Python based tokenizers") if word_index is not None: batch_index = batch_or_word_index else: batch_index = 0 word_index = batch_or_word_index return CharSpan(*(self._encodings[batch_index].word_to_chars(word_index, sequence_index))) def char_to_word(self, batch_or_char_index: int, char_index: Optional[int] = None, sequence_index: int = 0) -> int: """ Get the word in the original string corresponding to a character in the original string of a sequence of the batch. Can be called as:
73
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
- `self.char_to_word(char_index)` if batch size is 1 - `self.char_to_word(batch_index, char_index)` if batch size is greater than 1 This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words.
73
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
Args: batch_or_char_index (`int`): Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of the character in the original string. char_index (`int`, *optional*): If a batch index is provided in *batch_or_token_index*, this can be the index of the character in the original string. sequence_index (`int`, *optional*, defaults to 0): If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0 or 1) the provided character index belongs to. Returns: `int` or `List[int]`: Index or indices of the associated encoded token(s). """
73
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
if not self._encodings: raise ValueError("char_to_word() is not available when using Python based tokenizers") if char_index is not None: batch_index = batch_or_char_index else: batch_index = 0 char_index = batch_or_char_index return self._encodings[batch_index].char_to_word(char_index, sequence_index) def convert_to_tensors( self, tensor_type: Optional[Union[str, TensorType]] = None, prepend_batch_axis: bool = False ): """ Convert the inner content to tensors.
73
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
Args: tensor_type (`str` or [`~utils.TensorType`], *optional*): The type of tensors to use. If `str`, should be one of the values of the enum [`~utils.TensorType`]. If `None`, no modification is done. prepend_batch_axis (`int`, *optional*, defaults to `False`): Whether or not to add the batch dimension during the conversion. """ if tensor_type is None: return self # Convert to TensorType if not isinstance(tensor_type, TensorType): tensor_type = TensorType(tensor_type) # Get a function reference for the correct framework if tensor_type == TensorType.TENSORFLOW: if not is_tf_available(): raise ImportError( "Unable to convert output to TensorFlow tensors format, TensorFlow is not installed." ) import tensorflow as tf
73
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
as_tensor = tf.constant is_tensor = tf.is_tensor elif tensor_type == TensorType.PYTORCH: if not is_torch_available(): raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed.") import torch is_tensor = torch.is_tensor def as_tensor(value, dtype=None): if isinstance(value, list) and isinstance(value[0], np.ndarray): return torch.from_numpy(np.array(value)) return torch.tensor(value) elif tensor_type == TensorType.JAX: if not is_flax_available(): raise ImportError("Unable to convert output to JAX tensors format, JAX is not installed.") import jax.numpy as jnp # noqa: F811 as_tensor = jnp.array is_tensor = is_jax_tensor
73
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
elif tensor_type == TensorType.MLX: if not is_mlx_available(): raise ImportError("Unable to convert output to MLX tensors format, MLX is not installed.") import mlx.core as mx as_tensor = mx.array def is_tensor(obj): return isinstance(obj, mx.array) else: def as_tensor(value, dtype=None): if isinstance(value, (list, tuple)) and isinstance(value[0], (list, tuple, np.ndarray)): value_lens = [len(val) for val in value] if len(set(value_lens)) > 1 and dtype is None: # we have a ragged list so handle explicitly value = as_tensor([np.asarray(val) for val in value], dtype=object) return np.asarray(value, dtype=dtype) is_tensor = is_numpy_array
73
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
# Do the tensor conversion in batch for key, value in self.items(): try: if prepend_batch_axis: value = [value] if not is_tensor(value): tensor = as_tensor(value) # Removing this for now in favor of controlling the shape with `prepend_batch_axis` # # at-least2d # if tensor.ndim > 2: # tensor = tensor.squeeze(0) # elif tensor.ndim < 2: # tensor = tensor[None, :]
73
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
self[key] = tensor except Exception as e: if key == "overflowing_tokens": raise ValueError( "Unable to create tensor returning overflowing tokens of different lengths. " "Please see if a fast version of this tokenizer is available to have this feature available." ) from e raise ValueError( "Unable to create tensor, you should probably activate truncation and/or padding with" " 'padding=True' 'truncation=True' to have batched tensors with the same length. Perhaps your" f" features (`{key}` in this case) have excessive nesting (inputs type `list` where type `int` is" " expected)." ) from e return self
73
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
def to(self, device: Union[str, "torch.device"], *, non_blocking: bool = False) -> "BatchEncoding": """ Send all values to device by calling `v.to(device, non_blocking=non_blocking)` (PyTorch only). Args: device (`str` or `torch.device`): The device to put the tensors on. non_blocking (`bool`): Whether to perform the copy asynchronously. Returns: [`BatchEncoding`]: The same instance after modification. """ requires_backends(self, ["torch"]) import torch
73
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
# This check catches things like APEX blindly calling "to" on all inputs to a module # Otherwise it passes the casts down and casts the LongTensor containing the token idxs # into a HalfTensor if isinstance(device, str) or is_torch_device(device) or isinstance(device, int): self.data = { k: v.to(device=device, non_blocking=non_blocking) if isinstance(v, torch.Tensor) else v for k, v in self.data.items() } else: logger.warning(f"Attempting to cast a BatchEncoding to type {str(device)}. This is not supported.") return self
73
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
class SpecialTokensMixin: """ A mixin derived by [`PreTrainedTokenizer`] and [`PreTrainedTokenizerFast`] to handle specific behaviors related to special tokens. In particular, this class hold the attributes which can be used to directly access these special tokens in a model-independent manner and allow to set and update the special tokens.
74
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
Args: bos_token (`str` or `tokenizers.AddedToken`, *optional*): A special token representing the beginning of a sentence. eos_token (`str` or `tokenizers.AddedToken`, *optional*): A special token representing the end of a sentence. unk_token (`str` or `tokenizers.AddedToken`, *optional*): A special token representing an out-of-vocabulary token. sep_token (`str` or `tokenizers.AddedToken`, *optional*): A special token separating two different sentences in the same input (used by BERT for instance). pad_token (`str` or `tokenizers.AddedToken`, *optional*): A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by attention mechanisms or loss computation. cls_token (`str` or `tokenizers.AddedToken`, *optional*): A special token representing the class of the input (used by BERT for instance).
74
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
mask_token (`str` or `tokenizers.AddedToken`, *optional*): A special token representing a masked token (used by masked-language modeling pretraining objectives, like BERT). additional_special_tokens (tuple or list of `str` or `tokenizers.AddedToken`, *optional*): A tuple or a list of additional tokens, which will be marked as `special`, meaning that they will be skipped when decoding if `skip_special_tokens` is set to `True`. """
74
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
SPECIAL_TOKENS_ATTRIBUTES = [ "bos_token", "eos_token", "unk_token", "sep_token", "pad_token", "cls_token", "mask_token", "additional_special_tokens", ] def __init__(self, verbose=False, **kwargs): self._pad_token_type_id = 0 self.verbose = verbose self._special_tokens_map = {attr: None for attr in self.SPECIAL_TOKENS_ATTRIBUTES} self._special_tokens_map["additional_special_tokens"] = [] # for BC where it defaults to empty list # We directly set the hidden value to allow initialization with special tokens # which are not yet in the vocabulary. Necessary for serialization/de-serialization # TODO clean this up at some point (probably by switching to fast tokenizers)
74
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
for key, value in kwargs.items(): if value is None: continue if key in self.SPECIAL_TOKENS_ATTRIBUTES: if key == "additional_special_tokens": assert isinstance(value, (list, tuple)), f"Value {value} is not a list or tuple" assert all( isinstance(t, (str, AddedToken)) for t in value ), "One of the tokens is not a string or an AddedToken" setattr(self, key, value) elif isinstance(value, (str, AddedToken)): setattr(self, key, value) else: raise TypeError(f"Special token {key} has to be either str or AddedToken but got: {type(value)}")
74
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
def sanitize_special_tokens(self) -> int: """ The `sanitize_special_tokens` is now deprecated kept for backward compatibility and will be removed in transformers v5. """ logger.warning_once("The `sanitize_special_tokens` will be removed in transformers v5.") return self.add_tokens(self.all_special_tokens_extended, special_tokens=True) def add_special_tokens( self, special_tokens_dict: Dict[str, Union[str, AddedToken]], replace_additional_special_tokens=True ) -> int: """ Add a dictionary of special tokens (eos, pad, cls, etc.) to the encoder and link them to class attributes. If special tokens are NOT in the vocabulary, they are added to it (indexed starting from the last index of the current vocabulary). When adding new tokens to the vocabulary, you should make sure to also resize the token embedding matrix of the model so that its embedding matrix matches the tokenizer.
74
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
In order to do that, please use the [`~PreTrainedModel.resize_token_embeddings`] method. Using `add_special_tokens` will ensure your special tokens can be used in several ways: - Special tokens can be skipped when decoding using `skip_special_tokens = True`. - Special tokens are carefully handled by the tokenizer (they are never split), similar to `AddedTokens`. - You can easily refer to special tokens using tokenizer class attributes like `tokenizer.cls_token`. This makes it easy to develop model-agnostic training and fine-tuning scripts. When possible, special tokens are already registered for provided pretrained models (for instance [`BertTokenizer`] `cls_token` is already registered to be :obj*'[CLS]'* and XLM's one is also registered to be `'</s>'`).
74
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
Args: special_tokens_dict (dictionary *str* to *str* or `tokenizers.AddedToken`): Keys should be in the list of predefined special attributes: [`bos_token`, `eos_token`, `unk_token`, `sep_token`, `pad_token`, `cls_token`, `mask_token`, `additional_special_tokens`].
74
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer assign the index of the `unk_token` to them). replace_additional_special_tokens (`bool`, *optional*,, defaults to `True`): If `True`, the existing list of additional special tokens will be replaced by the list provided in `special_tokens_dict`. Otherwise, `self._special_tokens_map["additional_special_tokens"]` is just extended. In the former case, the tokens will NOT be removed from the tokenizer's full vocabulary - they are only being flagged as non-special tokens. Remember, this only affects which tokens are skipped during decoding, not the `added_tokens_encoder` and `added_tokens_decoder`. This means that the previous `additional_special_tokens` are still added tokens, and will not be split by the model.
74
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
Returns: `int`: Number of tokens added to the vocabulary. Examples: ```python # Let's see how to add a new classification token to GPT-2 tokenizer = GPT2Tokenizer.from_pretrained("openai-community/gpt2") model = GPT2Model.from_pretrained("openai-community/gpt2") special_tokens_dict = {"cls_token": "<CLS>"} num_added_toks = tokenizer.add_special_tokens(special_tokens_dict) print("We have added", num_added_toks, "tokens") # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer. model.resize_token_embeddings(len(tokenizer)) assert tokenizer.cls_token == "<CLS>" ```""" if not special_tokens_dict: return 0 added_tokens = [] for key, value in special_tokens_dict.items(): assert key in self.SPECIAL_TOKENS_ATTRIBUTES, f"Key {key} is not a special token"
74
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
if self.verbose: logger.info(f"Assigning {value} to the {key} key of the tokenizer") if key == "additional_special_tokens": assert isinstance(value, (list, tuple)) and all( isinstance(t, (str, AddedToken)) for t in value ), f"Tokens {value} for key {key} should all be str or AddedToken instances"
74
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
to_add = [] for token in value: if isinstance(token, str): # for legacy purpose we default to stripping. `test_add_tokens_tokenizer` depends on this token = AddedToken(token, rstrip=False, lstrip=False, normalized=False, special=True) if not replace_additional_special_tokens and str(token) in self.additional_special_tokens: continue to_add.append(token) if replace_additional_special_tokens and len(to_add) > 0: setattr(self, key, list(to_add)) else: self._special_tokens_map["additional_special_tokens"].extend(to_add) added_tokens += to_add
74
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
else: if not isinstance(value, (str, AddedToken)): raise ValueError(f"Token {value} for key {key} should be a str or an AddedToken instance") if isinstance(value, (str)): # for legacy purpose we default to stripping. `False` depends on this value = AddedToken(value, rstrip=False, lstrip=False, normalized=False, special=True) if isinstance(value, AddedToken): setattr(self, key, value) if value not in added_tokens: added_tokens.append(value) # if we are adding tokens that were not part of the vocab, we ought to add them added_tokens = self.add_tokens(added_tokens, special_tokens=True) return added_tokens
74
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
def add_tokens( self, new_tokens: Union[str, AddedToken, List[Union[str, AddedToken]]], special_tokens: bool = False ) -> int: """ Add a list of new tokens to the tokenizer class. If the new tokens are not in the vocabulary, they are added to it with indices starting from length of the current vocabulary and will be isolated before the tokenization algorithm is applied. Added tokens and tokens from the vocabulary of the tokenization algorithm are therefore not treated in the same way. Note, when adding new tokens to the vocabulary, you should make sure to also resize the token embedding matrix of the model so that its embedding matrix matches the tokenizer. In order to do that, please use the [`~PreTrainedModel.resize_token_embeddings`] method.
74
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
Args: new_tokens (`str`, `tokenizers.AddedToken` or a list of *str* or `tokenizers.AddedToken`): Tokens are only added if they are not already in the vocabulary. `tokenizers.AddedToken` wraps a string token to let you personalize its behavior: whether this token should only match against a single word, whether this token should strip all potential whitespaces on the left side, whether this token should strip all potential whitespaces on the right side, etc. special_tokens (`bool`, *optional*, defaults to `False`): Can be used to specify if the token is a special token. This mostly change the normalization behavior (special tokens like CLS or [MASK] are usually not lower-cased for instance). See details for `tokenizers.AddedToken` in HuggingFace tokenizers library. Returns: `int`: Number of tokens added to the vocabulary. Examples:
74
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
```python # Let's see how to increase the vocabulary of Bert model and tokenizer tokenizer = BertTokenizerFast.from_pretrained("google-bert/bert-base-uncased") model = BertModel.from_pretrained("google-bert/bert-base-uncased") num_added_toks = tokenizer.add_tokens(["new_tok1", "my_new-tok2"]) print("We have added", num_added_toks, "tokens") # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer. model.resize_token_embeddings(len(tokenizer)) ```""" if not new_tokens: return 0 if not isinstance(new_tokens, (list, tuple)): new_tokens = [new_tokens] return self._add_tokens(new_tokens, special_tokens=special_tokens) def _add_tokens(self, new_tokens: Union[List[str], List[AddedToken]], special_tokens: bool = False) -> int: raise NotImplementedError
74
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
@property def pad_token_type_id(self) -> int: """ `int`: Id of the padding token type in the vocabulary. """ return self._pad_token_type_id def __setattr__(self, key, value): key_without_id = key key_is_special_id = key.endswith("_id") or key.endswith("_ids") if key_is_special_id: key_without_id = key[:-3] if not key.endswith("_ids") else key[:-4] if self.__dict__.get("_special_tokens_map", None) is not None and any( name in self.__dict__["_special_tokens_map"] for name in [key, key_without_id] ): if key_is_special_id: if value is not None: value = ( self.convert_ids_to_tokens(value) if key != "additional_special_tokens" else [self.convert_ids_to_tokens(val) for val in value] ) key = key_without_id
74
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
if key != "additional_special_tokens" and not isinstance(value, (str, AddedToken)) and value is not None: raise ValueError(f"Cannot set a non-string value as the {key}") self._special_tokens_map[key] = value else: super().__setattr__(key, value) def __getattr__(self, key): key_without_id = key key_is_special_id = key.endswith("_id") or key.endswith("_ids") if key_is_special_id: key_without_id = key[:-3] if not key.endswith("_ids") else key[:-4]
74
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
if self.__dict__.get("_special_tokens_map", None) is not None and any( name in self.__dict__["_special_tokens_map"] for name in [key, key_without_id] ): _special_tokens_map = self.__dict__["_special_tokens_map"] if not key_is_special_id: if _special_tokens_map[key] is None: if self.verbose: logger.error(f"Using {key}, but it is not set yet.") return None value = _special_tokens_map[key] return str(value) if key != "additional_special_tokens" else [str(tok) for tok in value] else: attr_as_tokens = getattr(self, key_without_id) return self.convert_tokens_to_ids(attr_as_tokens) if attr_as_tokens is not None else None if key not in self.__dict__: raise AttributeError(f"{self.__class__.__name__} has no attribute {key}") else: return super().__getattr__(key)
74
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
@property def special_tokens_map(self) -> Dict[str, Union[str, List[str]]]: """ `Dict[str, Union[str, List[str]]]`: A dictionary mapping special token class attributes (`cls_token`, `unk_token`, etc.) to their values (`'<unk>'`, `'<cls>'`, etc.). Convert potential tokens of `tokenizers.AddedToken` type to string. """ set_attr = {} for attr in self.SPECIAL_TOKENS_ATTRIBUTES: attr_value = getattr(self, attr) if attr_value: set_attr[attr] = attr_value return set_attr @property def special_tokens_map_extended(self) -> Dict[str, Union[str, AddedToken, List[Union[str, AddedToken]]]]: """ `Dict[str, Union[str, tokenizers.AddedToken, List[Union[str, tokenizers.AddedToken]]]]`: A dictionary mapping special token class attributes (`cls_token`, `unk_token`, etc.) to their values (`'<unk>'`, `'<cls>'`, etc.).
74
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
Don't convert tokens of `tokenizers.AddedToken` type to string so they can be used to control more finely how special tokens are tokenized. """ set_attr = {} for attr in self.SPECIAL_TOKENS_ATTRIBUTES: attr_value = self._special_tokens_map[attr] if attr_value: set_attr[attr] = attr_value return set_attr @property def all_special_tokens_extended(self) -> List[Union[str, AddedToken]]: """ `List[Union[str, tokenizers.AddedToken]]`: All the special tokens (`'<unk>'`, `'<cls>'`, etc.), the order has nothing to do with the index of each tokens. If you want to know the correct indices, check `self.added_tokens_encoder`. We can't create an order anymore as the keys are `AddedTokens` and not `Strings`.
74
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
Don't convert tokens of `tokenizers.AddedToken` type to string so they can be used to control more finely how special tokens are tokenized. """ all_tokens = [] seen = set() for value in self.special_tokens_map_extended.values(): if isinstance(value, (list, tuple)): tokens_to_add = [token for token in value if str(token) not in seen] else: tokens_to_add = [value] if str(value) not in seen else [] seen.update(map(str, tokens_to_add)) all_tokens.extend(tokens_to_add) return all_tokens @property def all_special_tokens(self) -> List[str]: """ `List[str]`: A list of the unique special tokens (`'<unk>'`, `'<cls>'`, ..., etc.). Convert tokens of `tokenizers.AddedToken` type to string. """ all_toks = [str(s) for s in self.all_special_tokens_extended] return all_toks
74
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
@property def all_special_ids(self) -> List[int]: """ `List[int]`: List the ids of the special tokens(`'<unk>'`, `'<cls>'`, etc.) mapped to class attributes. """ all_toks = self.all_special_tokens all_ids = self.convert_tokens_to_ids(all_toks) return all_ids
74
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
def _set_model_specific_special_tokens(self, special_tokens: List[str]): """ Adds new special tokens to the "SPECIAL_TOKENS_ATTRIBUTES" list which will be part of "self.special_tokens" and saved as a special token in tokenizer's config. This allows us to dynamically add new model-type specific tokens after initilizing the tokenizer. For example: if the model tokenizers is multimodal, we can support special image or audio tokens. """ self.SPECIAL_TOKENS_ATTRIBUTES = self.SPECIAL_TOKENS_ATTRIBUTES + list(special_tokens.keys()) for key, value in special_tokens.items(): if isinstance(value, (str, AddedToken)): self._special_tokens_map[key] = value else: raise TypeError(f"Special token {key} has to be either str or AddedToken but got: {type(value)}")
74
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
class PreTrainedTokenizerBase(SpecialTokensMixin, PushToHubMixin): """ Base class for [`PreTrainedTokenizer`] and [`PreTrainedTokenizerFast`]. Handles shared (mostly boiler plate) methods for those two classes. """ vocab_files_names: Dict[str, str] = {} pretrained_vocab_files_map: Dict[str, Dict[str, str]] = {} _auto_class: Optional[str] = None # first name has to correspond to main model input name # to make sure `tokenizer.pad(...)` works correctly model_input_names: List[str] = ["input_ids", "token_type_ids", "attention_mask"] padding_side: str = "right" truncation_side: str = "right" slow_tokenizer_class = None
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
def __init__(self, **kwargs): # inputs and kwargs for saving and re-loading (see ``from_pretrained`` and ``save_pretrained``) self.init_inputs = () for key in kwargs: if hasattr(self, key) and callable(getattr(self, key)): raise AttributeError(f"{key} conflicts with the method {key} in {self.__class__.__name__}") self.init_kwargs = copy.deepcopy(kwargs) self.name_or_path = kwargs.pop("name_or_path", "") self._processor_class = kwargs.pop("processor_class", None) # For backward compatibility we fallback to set model_max_length from max_len if provided model_max_length = kwargs.pop("model_max_length", kwargs.pop("max_len", None)) self.model_max_length = model_max_length if model_max_length is not None else VERY_LARGE_INTEGER
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
# Padding and truncation side are right by default and overridden in subclasses. If specified in the kwargs, it # is changed. self.padding_side = kwargs.pop("padding_side", self.padding_side) if self.padding_side not in ["right", "left"]: raise ValueError( f"Padding side should be selected between 'right' and 'left', current value: {self.padding_side}" ) self.truncation_side = kwargs.pop("truncation_side", self.truncation_side) if self.truncation_side not in ["right", "left"]: raise ValueError( f"Truncation side should be selected between 'right' and 'left', current value: {self.truncation_side}" ) self.model_input_names = kwargs.pop("model_input_names", self.model_input_names) # By default, cleaning tokenization spaces for both fast and slow tokenizers self.clean_up_tokenization_spaces = kwargs.pop("clean_up_tokenization_spaces", False)
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
# By default, do not split special tokens for both fast and slow tokenizers self.split_special_tokens = kwargs.pop("split_special_tokens", False) self.deprecation_warnings = {} # Use to store when we have already noticed a deprecation warning (avoid overlogging). self._in_target_context_manager = False # Stores a Jinja template that formats chat histories into tokenizable strings self.chat_template = kwargs.pop("chat_template", None) if isinstance(self.chat_template, (list, tuple)): # Chat templates are stored as lists of dicts with fixed key names, # we reconstruct that into a single dict while loading them. self.chat_template = {template["name"]: template["template"] for template in self.chat_template} super().__init__(**kwargs) self.extra_special_tokens = kwargs.pop("extra_special_tokens", {}) self._set_model_specific_special_tokens(special_tokens=self.extra_special_tokens)
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
@property def max_len_single_sentence(self) -> int: """ `int`: The maximum length of a sentence that can be fed to the model. """ return self.model_max_length - self.num_special_tokens_to_add(pair=False) @property def max_len_sentences_pair(self) -> int: """ `int`: The maximum combined length of a pair of sentences that can be fed to the model. """ return self.model_max_length - self.num_special_tokens_to_add(pair=True)
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
@max_len_single_sentence.setter def max_len_single_sentence(self, value) -> int: # For backward compatibility, allow to try to setup 'max_len_single_sentence'. if value == self.model_max_length - self.num_special_tokens_to_add(pair=False) and self.verbose: if not self.deprecation_warnings.get("max_len_single_sentence", False): logger.warning( "Setting 'max_len_single_sentence' is now deprecated. This value is automatically set up." ) self.deprecation_warnings["max_len_single_sentence"] = True else: raise ValueError( "Setting 'max_len_single_sentence' is now deprecated. This value is automatically set up." )
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
@max_len_sentences_pair.setter def max_len_sentences_pair(self, value) -> int: # For backward compatibility, allow to try to setup 'max_len_sentences_pair'. if value == self.model_max_length - self.num_special_tokens_to_add(pair=True) and self.verbose: if not self.deprecation_warnings.get("max_len_sentences_pair", False): logger.warning( "Setting 'max_len_sentences_pair' is now deprecated. This value is automatically set up." ) self.deprecation_warnings["max_len_sentences_pair"] = True else: raise ValueError("Setting 'max_len_sentences_pair' is now deprecated. This value is automatically set up.") def _set_processor_class(self, processor_class: str): """Sets processor class as an attribute.""" self._processor_class = processor_class @property def added_tokens_decoder(self) -> Dict[int, AddedToken]: raise NotImplementedError()
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
def __repr__(self) -> str: added_tokens_decoder_rep = "\n\t".join([f"{k}: {v.__repr__()}," for k, v in self.added_tokens_decoder.items()]) return ( f"{self.__class__.__name__}(name_or_path='{self.name_or_path}'," f" vocab_size={self.vocab_size}, model_max_length={self.model_max_length}, is_fast={self.is_fast}," f" padding_side='{self.padding_side}', truncation_side='{self.truncation_side}'," f" special_tokens={self.special_tokens_map}, clean_up_tokenization_spaces={self.clean_up_tokenization_spaces}," " added_tokens_decoder={\n\t" + added_tokens_decoder_rep + "\n}\n)" ) def __len__(self) -> int: raise NotImplementedError() def get_vocab(self) -> Dict[str, int]: """ Returns the vocabulary as a dictionary of token to index. `tokenizer.get_vocab()[token]` is equivalent to `tokenizer.convert_tokens_to_ids(token)` when `token` is in the vocab.
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
Returns: `Dict[str, int]`: The vocabulary. """ raise NotImplementedError()
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
def apply_chat_template( self, conversation: Union[List[Dict[str, str]], List[List[Dict[str, str]]]], tools: Optional[List[Union[Dict, Callable]]] = None, documents: Optional[List[Dict[str, str]]] = None, chat_template: Optional[str] = None, add_generation_prompt: bool = False, continue_final_message: bool = False, tokenize: bool = True, padding: bool = False, truncation: bool = False, max_length: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_dict: bool = False, return_assistant_tokens_mask: bool = False, tokenizer_kwargs: Optional[Dict[str, Any]] = None, **kwargs, ) -> Union[str, List[int], List[str], List[List[int]], BatchEncoding]: """ Converts a list of dictionaries with `"role"` and `"content"` keys to a list of token
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
ids. This method is intended for use with chat models, and will read the tokenizer's chat_template attribute to determine the format and control tokens to use when converting.
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
Args: conversation (Union[List[Dict[str, str]], List[List[Dict[str, str]]]]): A list of dicts with "role" and "content" keys, representing the chat history so far. tools (`List[Dict]`, *optional*): A list of tools (callable functions) that will be accessible to the model. If the template does not support function calling, this argument will have no effect. Each tool should be passed as a JSON Schema, giving the name, description and argument types for the tool. See our [chat templating guide](https://huggingface.co/docs/transformers/main/en/chat_templating#automated-function-conversion-for-tool-use) for more information. documents (`List[Dict[str, str]]`, *optional*): A list of dicts representing documents that will be accessible to the model if it is performing RAG
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
(retrieval-augmented generation). If the template does not support RAG, this argument will have no effect. We recommend that each document should be a dict containing "title" and "text" keys. Please see the RAG section of the [chat templating guide](https://huggingface.co/docs/transformers/main/en/chat_templating#arguments-for-RAG) for examples of passing documents with chat templates. chat_template (`str`, *optional*): A Jinja template to use for this conversion. It is usually not necessary to pass anything to this argument, as the model's template will be used by default. add_generation_prompt (bool, *optional*): If this is set, a prompt with the token(s) that indicate the start of an assistant message will be appended to the formatted output. This is useful when you want to generate a response from the model.
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
Note that this argument will be passed to the chat template, and so it must be supported in the template for this argument to have any effect. continue_final_message (bool, *optional*): If this is set, the chat will be formatted so that the final message in the chat is open-ended, without any EOS tokens. The model will continue this message rather than starting a new one. This allows you to "prefill" part of the model's response for it. Cannot be used at the same time as `add_generation_prompt`. tokenize (`bool`, defaults to `True`): Whether to tokenize the output. If `False`, the output will be a string. padding (`bool`, defaults to `False`): Whether to pad sequences to the maximum length. Has no effect if tokenize is `False`. truncation (`bool`, defaults to `False`):
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
Whether to truncate sequences at the maximum length. Has no effect if tokenize is `False`. max_length (`int`, *optional*): Maximum length (in tokens) to use for padding or truncation. Has no effect if tokenize is `False`. If not specified, the tokenizer's `max_length` attribute will be used as a default. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Has no effect if tokenize is `False`. Acceptable values are: - `'tf'`: Return TensorFlow `tf.Tensor` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. - `'jax'`: Return JAX `jnp.ndarray` objects. return_dict (`bool`, defaults to `False`): Whether to return a dictionary with named outputs. Has no effect if tokenize is `False`.
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
tokenizer_kwargs (`Dict[str: Any]`, *optional*): Additional kwargs to pass to the tokenizer. return_assistant_tokens_mask (`bool`, defaults to `False`): Whether to return a mask of the assistant generated tokens. For tokens generated by the assistant, the mask will contain 1. For user and system tokens, the mask will contain 0. This functionality is only available for chat templates that support it via the `{% generation %}` keyword. **kwargs: Additional kwargs to pass to the template renderer. Will be accessible by the chat template.
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
Returns: `Union[List[int], Dict]`: A list of token ids representing the tokenized chat so far, including control tokens. This output is ready to pass to the model, either directly or via methods like `generate()`. If `return_dict` is set, will return a dict of tokenizer outputs instead. """ if return_dict and not tokenize: raise ValueError( "`return_dict=True` is incompatible with `tokenize=False`, because there is no dict " "of tokenizer outputs to return." ) if return_assistant_tokens_mask and not return_dict: raise ValueError("`return_assistant_tokens_mask=True` is incompatible with `return_dict=False`") if tokenizer_kwargs is None: tokenizer_kwargs = {} chat_template = self.get_chat_template(chat_template, tools)
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
if return_assistant_tokens_mask and not re.search(r"\{\%-?\s*generation\s*-?\%\}", chat_template): logger.warning_once( "return_assistant_tokens_mask==True but chat template does not contain `{% generation %}` keyword." ) # Compilation function uses a cache to avoid recompiling the same template compiled_template = _compile_jinja_template(chat_template) if isinstance(conversation, (list, tuple)) and ( isinstance(conversation[0], (list, tuple)) or hasattr(conversation[0], "messages") ): conversations = conversation is_batched = True else: conversations = [conversation] is_batched = False
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
if continue_final_message: if add_generation_prompt: raise ValueError( "continue_final_message and add_generation_prompt are not compatible. Use continue_final_message when you want the model to continue the final message, and add_generation_prompt when you want to add a header that will prompt it to start a new assistant message instead." ) if return_assistant_tokens_mask: raise ValueError("continue_final_message is not compatible with return_assistant_tokens_mask.")
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
# We accept either JSON schemas or functions for tools. If we get functions, we convert them to schemas if tools is not None: tool_schemas = [] for tool in tools: if isinstance(tool, dict): tool_schemas.append(tool) elif isfunction(tool): tool_schemas.append(get_json_schema(tool)) else: raise ValueError( "Tools should either be a JSON schema, or a callable function with type hints " "and a docstring suitable for auto-conversion to a schema." ) else: tool_schemas = None if documents is not None: for document in documents: if not isinstance(document, dict): raise TypeError("Documents should be a list of dicts with 'title' and 'text' keys!")
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
rendered = [] all_generation_indices = [] template_kwargs = {**self.special_tokens_map, **kwargs} # kwargs overwrite special tokens if both are present for chat in conversations: if hasattr(chat, "messages"): # Indicates it's a Conversation object chat = chat.messages if return_assistant_tokens_mask: rendered_chat, generation_indices = _render_with_assistant_indices( compiled_template=compiled_template, messages=chat, tools=tool_schemas, documents=documents, add_generation_prompt=add_generation_prompt, **template_kwargs, ) all_generation_indices.append(generation_indices) else: rendered_chat = compiled_template.render( messages=chat, tools=tool_schemas,
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
documents=documents, add_generation_prompt=add_generation_prompt, **template_kwargs, ) if continue_final_message: final_message = chat[-1]["content"] if isinstance(final_message, (list, tuple)): final_message = final_message[-1]["text"] try: rendered_chat = rendered_chat[: rendered_chat.rindex(final_message) + len(final_message)] except: # noqa: E722 # Some chat templates like Llama-3.1 trim messages before rendering, so we must do the same here. final_message = final_message.strip() rendered_chat = rendered_chat[: rendered_chat.rindex(final_message) + len(final_message)] rendered.append(rendered_chat)
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
if not is_batched: rendered = rendered[0]
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
if tokenize: out = self( rendered, padding=padding, truncation=truncation, max_length=max_length, add_special_tokens=False, return_tensors=return_tensors, **tokenizer_kwargs, ) if return_dict: if return_assistant_tokens_mask: assistant_masks = [] if is_batched or return_tensors: input_ids = out["input_ids"] else: input_ids = [out["input_ids"]] for i in range(len(input_ids)): current_mask = [0] * len(input_ids[i]) for assistant_start_char, assistant_end_char in all_generation_indices[i]: start_token = out.char_to_token(i, assistant_start_char) end_token = out.char_to_token(i, assistant_end_char - 1)
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
if start_token is None: # start_token is out of bounds maybe due to truncation. break for token_id in range(start_token, end_token + 1 if end_token else len(input_ids[i])): current_mask[token_id] = 1 assistant_masks.append(current_mask) out["assistant_masks"] = assistant_masks if is_batched else assistant_masks[0] return out else: return out["input_ids"] else: return rendered
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
def get_chat_template(self, chat_template: Optional[str] = None, tools: Optional[List[Dict]] = None) -> str: """ Retrieve the chat template string used for tokenizing chat messages. This template is used internally by the `apply_chat_template` method and can also be used externally to retrieve the model's chat template for better generation tracking.
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
Args: chat_template (`str`, *optional*): A Jinja template or the name of a template to use for this conversion. It is usually not necessary to pass anything to this argument, as the model's template will be used by default. tools (`List[Dict]`, *optional*): A list of tools (callable functions) that will be accessible to the model. If the template does not support function calling, this argument will have no effect. Each tool should be passed as a JSON Schema, giving the name, description and argument types for the tool. See our [chat templating guide](https://huggingface.co/docs/transformers/main/en/chat_templating#automated-function-conversion-for-tool-use) for more information.
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
Returns: `str`: The chat template string. """ # First, handle the cases when the model has a dict of multiple templates if isinstance(self.chat_template, dict): template_dict = self.chat_template if chat_template is not None and chat_template in template_dict: # The user can pass the name of a template to the chat template argument instead of an entire template chat_template = template_dict[chat_template] elif chat_template is None: if tools is not None and "tool_use" in template_dict: chat_template = template_dict["tool_use"] elif "default" in template_dict: chat_template = template_dict["default"] else: raise ValueError( "This model has multiple chat templates with no default specified! Please either pass a chat "
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
"template or the name of the template you wish to use to the `chat_template` argument. Available " f"template names are {sorted(template_dict.keys())}." )
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
elif chat_template is None: # These are the cases when the model has a single template # priority: `chat_template` argument > `tokenizer.chat_template` if self.chat_template is not None: chat_template = self.chat_template else: raise ValueError( "Cannot use chat template functions because tokenizer.chat_template is not set and no template " "argument was passed! For information about writing templates and setting the " "tokenizer.chat_template attribute, please see the documentation at " "https://huggingface.co/docs/transformers/main/en/chat_templating" ) return chat_template
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
@classmethod def from_pretrained( cls, pretrained_model_name_or_path: Union[str, os.PathLike], *init_inputs, cache_dir: Optional[Union[str, os.PathLike]] = None, force_download: bool = False, local_files_only: bool = False, token: Optional[Union[str, bool]] = None, revision: str = "main", trust_remote_code=False, **kwargs, ): r""" Instantiate a [`~tokenization_utils_base.PreTrainedTokenizerBase`] (or a derived class) from a predefined tokenizer. Args: pretrained_model_name_or_path (`str` or `os.PathLike`): Can be either:
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
- A string, the *model id* of a predefined tokenizer hosted inside a model repo on huggingface.co. - A path to a *directory* containing vocabulary files required by the tokenizer, for instance saved using the [`~tokenization_utils_base.PreTrainedTokenizerBase.save_pretrained`] method, e.g., `./my_model_directory/`. - (**Deprecated**, not applicable to all derived classes) A path or url to a single saved vocabulary file (if and only if the tokenizer only requires a single vocabulary file like Bert or XLNet), e.g., `./my_model_directory/vocab.txt`. cache_dir (`str` or `os.PathLike`, *optional*): Path to a directory in which a downloaded predefined tokenizer vocabulary files should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`):
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
Whether or not to force the (re-)download the vocabulary files and override the cached versions if they exist. resume_download: Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v5 of Transformers. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). local_files_only (`bool`, *optional*, defaults to `False`): Whether or not to only rely on local files and not to attempt to download any files.
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. subfolder (`str`, *optional*): In case the relevant files are located inside a subfolder of the model repo on huggingface.co (e.g. for facebook/rag-token-base), specify it here. inputs (additional positional arguments, *optional*): Will be passed along to the Tokenizer `__init__` method. trust_remote_code (`bool`, *optional*, defaults to `False`): Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
execute code present on the Hub on your local machine. kwargs (additional keyword arguments, *optional*): Will be passed to the Tokenizer `__init__` method. Can be used to set special tokens like `bos_token`, `eos_token`, `unk_token`, `sep_token`, `pad_token`, `cls_token`, `mask_token`, `additional_special_tokens`. See parameters in the `__init__` for more details.
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
<Tip> Passing `token=True` is required when you want to use a private model. </Tip> Examples: ```python # We can't instantiate directly the base class *PreTrainedTokenizerBase* so let's show our examples on a derived class: BertTokenizer # Download vocabulary from huggingface.co and cache. tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased") # Download vocabulary from huggingface.co (user-uploaded) and cache. tokenizer = BertTokenizer.from_pretrained("dbmdz/bert-base-german-cased") # If vocabulary files are in a directory (e.g. tokenizer was saved using *save_pretrained('./test/saved_model/')*) tokenizer = BertTokenizer.from_pretrained("./test/saved_model/") # If the tokenizer uses a single vocabulary file, you can point directly to this file tokenizer = BertTokenizer.from_pretrained("./test/saved_model/my_vocab.txt")
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
# You can link tokens to special vocabulary when instantiating tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased", unk_token="<unk>") # You should be sure '<unk>' is in the vocabulary when doing that. # Otherwise use tokenizer.add_special_tokens({'unk_token': '<unk>'}) instead) assert tokenizer.unk_token == "<unk>" ```""" resume_download = kwargs.pop("resume_download", None) proxies = kwargs.pop("proxies", None) use_auth_token = kwargs.pop("use_auth_token", None) subfolder = kwargs.pop("subfolder", None) from_pipeline = kwargs.pop("_from_pipeline", None) from_auto_class = kwargs.pop("_from_auto", False) commit_hash = kwargs.pop("_commit_hash", None) gguf_file = kwargs.get("gguf_file", None)
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) token = use_auth_token user_agent = {"file_type": "tokenizer", "from_auto_class": from_auto_class, "is_fast": "Fast" in cls.__name__} if from_pipeline is not None: user_agent["using_pipeline"] = from_pipeline if is_offline_mode() and not local_files_only: logger.info("Offline mode: forcing local_files_only=True") local_files_only = True pretrained_model_name_or_path = str(pretrained_model_name_or_path) vocab_files = {} init_configuration = {}
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
is_local = os.path.isdir(pretrained_model_name_or_path) single_file_id = None if os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path): if len(cls.vocab_files_names) > 1 and not gguf_file: raise ValueError( f"Calling {cls.__name__}.from_pretrained() with the path to a single file or url is not " "supported for this tokenizer. Use a model identifier or the path to a directory instead." ) warnings.warn( f"Calling {cls.__name__}.from_pretrained() with the path to a single file or url is deprecated and " "won't be possible anymore in v5. Use a model identifier or the path to a directory instead.", FutureWarning, ) file_id = list(cls.vocab_files_names.keys())[0]
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
vocab_files[file_id] = pretrained_model_name_or_path single_file_id = file_id else: if gguf_file: vocab_files["vocab_file"] = gguf_file else: # At this point pretrained_model_name_or_path is either a directory or a model identifier name additional_files_names = { "added_tokens_file": ADDED_TOKENS_FILE, # kept only for legacy "special_tokens_map_file": SPECIAL_TOKENS_MAP_FILE, # kept only for legacy "tokenizer_config_file": TOKENIZER_CONFIG_FILE, # tokenizer_file used to initialize a slow from a fast. Properly copy the `addedTokens` instead of adding in random orders "tokenizer_file": FULL_TOKENIZER_FILE, "chat_template_file": CHAT_TEMPLATE_FILE, } vocab_files = {**cls.vocab_files_names, **additional_files_names}
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
if "tokenizer_file" in vocab_files: # Try to get the tokenizer config to see if there are versioned tokenizer files. fast_tokenizer_file = FULL_TOKENIZER_FILE resolved_config_file = cached_file( pretrained_model_name_or_path, TOKENIZER_CONFIG_FILE, cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, proxies=proxies, token=token, revision=revision, local_files_only=local_files_only, subfolder=subfolder, user_agent=user_agent, _raise_exceptions_for_gated_repo=False, _raise_exceptions_for_missing_entries=False, _raise_exceptions_for_connection_errors=False,
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
_commit_hash=commit_hash, ) commit_hash = extract_commit_hash(resolved_config_file, commit_hash) if resolved_config_file is not None: with open(resolved_config_file, encoding="utf-8") as reader: tokenizer_config = json.load(reader) if "fast_tokenizer_files" in tokenizer_config: fast_tokenizer_file = get_fast_tokenizer_file(tokenizer_config["fast_tokenizer_files"]) vocab_files["tokenizer_file"] = fast_tokenizer_file
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
# Get files from url, cache, or disk depending on the case resolved_vocab_files = {} unresolved_files = [] for file_id, file_path in vocab_files.items(): if file_path is None: resolved_vocab_files[file_id] = None elif single_file_id == file_id: if os.path.isfile(file_path): resolved_vocab_files[file_id] = file_path elif is_remote_url(file_path): resolved_vocab_files[file_id] = download_url(file_path, proxies=proxies) else: resolved_vocab_files[file_id] = cached_file( pretrained_model_name_or_path, file_path, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, token=token,
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
user_agent=user_agent, revision=revision, subfolder=subfolder, _raise_exceptions_for_gated_repo=False, _raise_exceptions_for_missing_entries=False, _raise_exceptions_for_connection_errors=False, _commit_hash=commit_hash, ) commit_hash = extract_commit_hash(resolved_vocab_files[file_id], commit_hash)
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
if len(unresolved_files) > 0: logger.info( f"Can't load following files from cache: {unresolved_files} and cannot check if these " "files are necessary for the tokenizer to operate." ) # If one passes a GGUF file path to `gguf_file` there is no need for this check as the tokenizer will be # loaded directly from the GGUF file. if all(full_file_name is None for full_file_name in resolved_vocab_files.values()) and not gguf_file: raise EnvironmentError( f"Can't load tokenizer for '{pretrained_model_name_or_path}'. If you were trying to load it from " "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory " f"containing all relevant files for a {cls.__name__} tokenizer." )
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
for file_id, file_path in vocab_files.items(): if file_id not in resolved_vocab_files: continue if is_local: logger.info(f"loading file {file_path}") else: logger.info(f"loading file {file_path} from cache at {resolved_vocab_files[file_id]}") return cls._from_pretrained( resolved_vocab_files, pretrained_model_name_or_path, init_configuration, *init_inputs, token=token, cache_dir=cache_dir, local_files_only=local_files_only, _commit_hash=commit_hash, _is_local=is_local, trust_remote_code=trust_remote_code, **kwargs, )
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
@classmethod def _from_pretrained( cls, resolved_vocab_files, pretrained_model_name_or_path, init_configuration, *init_inputs, token=None, cache_dir=None, local_files_only=False, _commit_hash=None, _is_local=False, trust_remote_code=False, **kwargs, ): # We instantiate fast tokenizers based on a slow tokenizer if we don't have access to the tokenizer.json # file or if `from_slow` is set to True. from_slow = kwargs.get("from_slow", False) gguf_file = kwargs.get("gguf_file", None) has_tokenizer_file = resolved_vocab_files.get("tokenizer_file", None) is not None
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
# If one passes a GGUF file path to `gguf_file` there is no need for this check as the tokenizer will be # loaded directly from the GGUF file. if (from_slow or not has_tokenizer_file) and cls.slow_tokenizer_class is not None and not gguf_file: slow_tokenizer = (cls.slow_tokenizer_class)._from_pretrained( copy.deepcopy(resolved_vocab_files), pretrained_model_name_or_path, copy.deepcopy(init_configuration), *init_inputs, token=token, cache_dir=cache_dir, local_files_only=local_files_only, _commit_hash=_commit_hash, **(copy.deepcopy(kwargs)), ) else: slow_tokenizer = None
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
# Prepare tokenizer initialization kwargs # Did we saved some inputs and kwargs to reload ? tokenizer_config_file = resolved_vocab_files.pop("tokenizer_config_file", None) if tokenizer_config_file is not None: with open(tokenizer_config_file, encoding="utf-8") as tokenizer_config_handle: init_kwargs = json.load(tokenizer_config_handle) # First attempt. We get tokenizer_class from tokenizer_config to check mismatch between tokenizers. config_tokenizer_class = init_kwargs.get("tokenizer_class") init_kwargs.pop("tokenizer_class", None) if not has_tokenizer_file: init_kwargs.pop("tokenizer_file", None) saved_init_inputs = init_kwargs.pop("init_inputs", ()) if not init_inputs: init_inputs = saved_init_inputs else: config_tokenizer_class = None init_kwargs = init_configuration
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
# If an independent chat template file exists, it takes priority over template entries in the tokenizer config chat_template_file = resolved_vocab_files.pop("chat_template_file", None) if chat_template_file is not None: with open(chat_template_file) as chat_template_handle: init_kwargs["chat_template"] = chat_template_handle.read() # Clobbers any template in the config
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
if not _is_local: if "auto_map" in init_kwargs: # For backward compatibility with odl format. if isinstance(init_kwargs["auto_map"], (tuple, list)): init_kwargs["auto_map"] = {"AutoTokenizer": init_kwargs["auto_map"]} init_kwargs["auto_map"] = add_model_info_to_auto_map( init_kwargs["auto_map"], pretrained_model_name_or_path ) if "custom_pipelines" in init_kwargs: init_kwargs["custom_pipelines"] = add_model_info_to_custom_pipelines( init_kwargs["custom_pipelines"], pretrained_model_name_or_path )
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
if config_tokenizer_class is None: # Matt: This entire block is only used to decide if the tokenizer class matches the class in the repo. # If not, it raises a warning, but otherwise continues. Since we mostly load tokenizers with # AutoTokenizer these days, it seems like a lot of work (and a source of bugs) for little gain. # Maybe we can just remove this entirely? from .models.auto.configuration_auto import AutoConfig # tests_ignore
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
# Second attempt. If we have not yet found tokenizer_class, let's try to use the config. try: config = AutoConfig.from_pretrained( pretrained_model_name_or_path, token=token, cache_dir=cache_dir, local_files_only=local_files_only, trust_remote_code=trust_remote_code, _commit_hash=_commit_hash, ) config_tokenizer_class = config.tokenizer_class except (OSError, ValueError, KeyError): # skip if an error occurred. config = None if config_tokenizer_class is None: # Third attempt. If we have not yet found the original type of the tokenizer, # we are loading we see if we can infer it from the type of the configuration file from .models.auto.tokenization_auto import TOKENIZER_MAPPING_NAMES # tests_ignore
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
if hasattr(config, "model_type"): model_type = config.model_type else: # Fallback: use pattern matching on the string. model_type = None for pattern in TOKENIZER_MAPPING_NAMES.keys(): if pattern in str(pretrained_model_name_or_path): model_type = pattern break if model_type is not None: config_tokenizer_class, config_tokenizer_class_fast = TOKENIZER_MAPPING_NAMES.get( model_type, (None, None) ) if config_tokenizer_class is None: config_tokenizer_class = config_tokenizer_class_fast
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
if config_tokenizer_class is not None: if cls.__name__.replace("Fast", "") != config_tokenizer_class.replace("Fast", ""): logger.warning( "The tokenizer class you load from this checkpoint is not the same type as the class this" " function is called from. It may result in unexpected tokenization. \nThe tokenizer class you" f" load from this checkpoint is '{config_tokenizer_class}'. \nThe class this function is called" f" from is '{cls.__name__}'." ) # Update with newly provided kwargs init_kwargs.update(kwargs)
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
# Merge resolved_vocab_files arguments in init_kwargs. added_tokens_file = resolved_vocab_files.pop("added_tokens_file", None) special_tokens_map_file = resolved_vocab_files.pop("special_tokens_map_file", None) for args_name, file_path in resolved_vocab_files.items(): if args_name not in init_kwargs: init_kwargs[args_name] = file_path tokenizer_file = resolved_vocab_files.pop("tokenizer_file", None) if slow_tokenizer is not None: init_kwargs["__slow_tokenizer"] = slow_tokenizer init_kwargs["name_or_path"] = pretrained_model_name_or_path
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
#### Handle tokenizer serialization of added and special tokens added_tokens_decoder: Dict[int, AddedToken] = {} added_tokens_map: Dict[str, AddedToken] = {} # if we have info on the slow added tokens if "added_tokens_decoder" in init_kwargs: for idx, token in init_kwargs["added_tokens_decoder"].items(): if isinstance(token, dict): token = AddedToken(**token) if isinstance(token, AddedToken): added_tokens_decoder[int(idx)] = token added_tokens_map[str(token)] = token else: raise ValueError( f"Found a {token.__class__} in the saved `added_tokens_decoder`, should be a dictionary or an AddedToken instance" ) else: # begin legacy: read the added_tokens_file and update kwargs with special_tokens_map if modified if special_tokens_map_file is not None:
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
with open(special_tokens_map_file, encoding="utf-8") as special_tokens_map_handle: special_tokens_map = json.load(special_tokens_map_handle) for key, value in special_tokens_map.items(): if key in kwargs and kwargs[key]: # This value has already been redefined by the kwargs # We keep this new value and ignore the one stored in the special_tokens_map_file continue if isinstance(value, dict): value["special"] = True value = AddedToken(**value) elif key == "additional_special_tokens" and isinstance(value, list): additional_special_tokens = init_kwargs.pop("additional_special_tokens", []) or [] for token in value: if isinstance(token, dict):
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
token["special"] = True token = AddedToken(**token) if token not in additional_special_tokens: additional_special_tokens.append(token) value = additional_special_tokens init_kwargs[key] = value
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py
# slow -> slow|fast, legacy: convert the `"added_tokens.json"` file to `added_tokens_decoder`. # this is for legacy purpose. We don't add the tokens after init for efficiency. if added_tokens_file is not None: special_tokens = [] for key in cls.SPECIAL_TOKENS_ATTRIBUTES & init_kwargs.keys(): if init_kwargs[key] is not None: if key == "additional_special_tokens": special_tokens += [str(token) for token in init_kwargs[key]] else: special_tokens.append(str(init_kwargs[key]))
75
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_base.py