text
stringlengths
1
1.02k
class_index
int64
0
10.8k
source
stringlengths
85
188
# very important for fast and slow equivalence! is_special = token in self.all_special_tokens or special_tokens token = AddedToken( token, rstrip=False, lstrip=False, normalized=not is_special, special=is_special ) elif special_tokens: # doing token.special=True changes the normalization! will fix in rust # this is important and the only reason why the AddedTokens in each class are normalized by default token.__setstate__({"special": True, "normalized": token.normalized}) if token in self._added_tokens_decoder: continue if not token.special and token.normalized and getattr(self, "do_lower_case", False): # Normalize if requested token.content = token.content.lower() if token.content not in current_vocab: token_index = new_idx + added_tokens
57
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py
current_vocab[token.content] = token_index added_tokens += 1 else: token_index = current_vocab[token.content]
57
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py
if token.special and str(token) not in self.all_special_tokens: self._special_tokens_map["additional_special_tokens"].append(token) # the setter automatically updates the reverse map self._added_tokens_decoder[token_index] = token self._added_tokens_encoder[token.content] = token_index if self.verbose: logger.info(f"Adding {token} to the vocabulary") self._update_trie() self._update_total_vocab_size() return added_tokens def _update_trie(self, unique_no_split_tokens: Optional[str] = []): for token in self._added_tokens_decoder.values(): if token not in self.tokens_trie._tokens: self.tokens_trie.add(token.content) for token in unique_no_split_tokens: if token not in self.tokens_trie._tokens: self.tokens_trie.add(token)
57
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py
def num_special_tokens_to_add(self, pair: bool = False) -> int: """ Returns the number of added tokens when encoding a sequence with special tokens. <Tip> This encodes a dummy input and checks the number of added tokens, and is therefore not efficient. Do not put this inside your training loop. </Tip> Args: pair (`bool`, *optional*, defaults to `False`): Whether the number of added tokens should be computed in the case of a sequence pair or a single sequence. Returns: `int`: Number of special tokens added to sequences. """ token_ids_0 = [] token_ids_1 = [] return len(self.build_inputs_with_special_tokens(token_ids_0, token_ids_1 if pair else None)) def tokenize(self, text: TextInput, **kwargs) -> List[str]: """ Converts a string into a sequence of tokens, using the tokenizer.
57
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py
Split in words for word-based vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces). Takes care of added tokens. Args: text (`str`): The sequence to be encoded. **kwargs (additional keyword arguments): Passed along to the model-specific `prepare_for_tokenization` preprocessing method. Returns: `List[str]`: The list of tokens. """ split_special_tokens = kwargs.pop("split_special_tokens", self.split_special_tokens) text, kwargs = self.prepare_for_tokenization(text, **kwargs) if kwargs: logger.warning(f"Keyword arguments {kwargs} not recognized.")
57
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py
if hasattr(self, "do_lower_case") and self.do_lower_case: # convert non-special tokens to lowercase. Might be super slow as well? escaped_special_toks = [re.escape(s_tok) for s_tok in (self.all_special_tokens)] escaped_special_toks += [ re.escape(s_tok.content) for s_tok in (self._added_tokens_decoder.values()) if not s_tok.special and s_tok.normalized ] pattern = r"(" + r"|".join(escaped_special_toks) + r")|" + r"(.+?)" text = re.sub(pattern, lambda m: m.groups()[0] or m.groups()[1].lower(), text) if split_special_tokens: no_split_token = [] tokens = [text] else: no_split_token = self._added_tokens_encoder.keys() # don't split on any of the added tokens # "This is something<special_token_1> else" tokens = self.tokens_trie.split(text)
57
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py
# ["This is something", "<special_token_1>", " else"] for i, token in enumerate(tokens): if token in no_split_token: tok_extended = self._added_tokens_decoder.get(self._added_tokens_encoder[token], None) left = tokens[i - 1] if i > 0 else None right = tokens[i + 1] if i < len(tokens) - 1 else None if isinstance(tok_extended, AddedToken): if tok_extended.rstrip and right: # A bit counter-intuitive but we strip the left of the string # since tok_extended.rstrip means the special token is eating all white spaces on its right tokens[i + 1] = right.lstrip() # Strip white spaces on the left if tok_extended.lstrip and left: tokens[i - 1] = left.rstrip() # Opposite here if tok_extended.single_word and left and left[-1] != " ":
57
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py
tokens[i - 1] += token tokens[i] = "" elif tok_extended.single_word and right and right[0] != " ": tokens[i + 1] = token + tokens[i + 1] tokens[i] = "" else: raise ValueError( f"{tok_extended} cannot be tokenized because it was not properly added" f" to the tokenizer. This means that it is not an `AddedToken` but a {type(tok_extended)}" ) # ["This is something", "<special_token_1>", "else"] tokenized_text = [] for token in tokens: # Need to skip eventual empty (fully stripped) tokens if not token: continue if token in no_split_token: tokenized_text.append(token) else: tokenized_text.extend(self._tokenize(token))
57
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py
# ["This", " is", " something", "<special_token_1>", "else"] return tokenized_text
57
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py
def _tokenize(self, text, **kwargs): """ Converts a string into a sequence of tokens (string), using the tokenizer. Split in words for word-based vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces). Do NOT take care of added tokens. """ raise NotImplementedError def convert_tokens_to_ids(self, tokens: Union[str, List[str]]) -> Union[int, List[int]]: """ Converts a token string (or a sequence of tokens) in a single integer id (or a sequence of ids), using the vocabulary. Args: tokens (`str` or `List[str]`): One or several token(s) to convert to token id(s). Returns: `int` or `List[int]`: The token id or list of token ids. """ if tokens is None: return None if isinstance(tokens, str): return self._convert_token_to_id_with_added_voc(tokens)
57
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py
ids = [] for token in tokens: ids.append(self._convert_token_to_id_with_added_voc(token)) return ids def _convert_token_to_id_with_added_voc(self, token): if token is None: return None if token in self._added_tokens_encoder: return self._added_tokens_encoder[token] return self._convert_token_to_id(token) def _convert_token_to_id(self, token): raise NotImplementedError
57
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py
def _encode_plus( self, text: Union[TextInput, PreTokenizedInput, EncodedInput], text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, is_split_into_words: bool = False, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding:
57
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py
def get_input_ids(text): if isinstance(text, str): tokens = self.tokenize(text, **kwargs) return self.convert_tokens_to_ids(tokens) elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], str): if is_split_into_words: tokens = list( itertools.chain(*(self.tokenize(t, is_split_into_words=True, **kwargs) for t in text)) ) return self.convert_tokens_to_ids(tokens) else: return self.convert_tokens_to_ids(text) elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int): return text else: if is_split_into_words: raise ValueError( f"Input {text} is not valid. Should be a string or a list/tuple of strings when" " `is_split_into_words=True`."
57
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py
) else: raise ValueError( f"Input {text} is not valid. Should be a string, a list/tuple of strings or a list/tuple of" " integers." )
57
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py
if return_offsets_mapping: raise NotImplementedError( "return_offset_mapping is not available when using Python tokenizers. " "To use this feature, change your tokenizer to one deriving from " "transformers.PreTrainedTokenizerFast. " "More information on available tokenizers at " "https://github.com/huggingface/transformers/pull/2674" ) first_ids = get_input_ids(text) second_ids = get_input_ids(text_pair) if text_pair is not None else None
57
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py
return self.prepare_for_model( first_ids, pair_ids=second_ids, add_special_tokens=add_special_tokens, padding=padding_strategy.value, truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, prepend_batch_axis=True, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, verbose=verbose, )
57
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py
def _batch_encode_plus( self, batch_text_or_text_pairs: Union[ List[TextInput], List[TextInputPair], List[PreTokenizedInput], List[PreTokenizedInputPair], List[EncodedInput], List[EncodedInputPair], ], add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, is_split_into_words: bool = False, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False,
57
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py
return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, split_special_tokens: bool = False, **kwargs, ) -> BatchEncoding: def get_input_ids(text): if isinstance(text, str): tokens = self.tokenize(text, **kwargs) return self.convert_tokens_to_ids(tokens) elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], str): if is_split_into_words: tokens = list( itertools.chain(*(self.tokenize(t, is_split_into_words=True, **kwargs) for t in text)) ) return self.convert_tokens_to_ids(tokens) else: return self.convert_tokens_to_ids(text) elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int): return text else: raise ValueError(
57
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py
"Input is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers." )
57
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py
if return_offsets_mapping: raise NotImplementedError( "return_offset_mapping is not available when using Python tokenizers. " "To use this feature, change your tokenizer to one deriving from " "transformers.PreTrainedTokenizerFast." ) input_ids = [] for ids_or_pair_ids in batch_text_or_text_pairs: if not isinstance(ids_or_pair_ids, (list, tuple)): ids, pair_ids = ids_or_pair_ids, None elif is_split_into_words and not isinstance(ids_or_pair_ids[0], (list, tuple)): ids, pair_ids = ids_or_pair_ids, None else: ids, pair_ids = ids_or_pair_ids first_ids = get_input_ids(ids) second_ids = get_input_ids(pair_ids) if pair_ids is not None else None input_ids.append((first_ids, second_ids))
57
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py
batch_outputs = self._batch_prepare_for_model( input_ids, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=return_tensors, verbose=verbose, split_special_tokens=split_special_tokens, ) return BatchEncoding(batch_outputs)
57
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def _batch_prepare_for_model( self, batch_ids_pairs: List[Union[PreTokenizedInputPair, Tuple[List[int], None]]], add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[bool] = None, return_tensors: Optional[str] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_length: bool = False, verbose: bool = True, split_special_tokens: bool = False, ) -> BatchEncoding: """
57
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py
Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens
57
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py
Args: batch_ids_pairs: list of tokenized input ids or input ids pairs """
57
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py
batch_outputs = {} for first_ids, second_ids in batch_ids_pairs: outputs = self.prepare_for_model( first_ids, second_ids, add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=None, # we pad in batch afterward padding_side=None, # we pad in batch afterward return_attention_mask=False, # we pad in batch afterward return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, # We convert the whole batch to tensors at the end
57
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py
prepend_batch_axis=False, verbose=verbose, split_special_tokens=split_special_tokens, )
57
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py
for key, value in outputs.items(): if key not in batch_outputs: batch_outputs[key] = [] batch_outputs[key].append(value) batch_outputs = self.pad( batch_outputs, padding=padding_strategy.value, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask, ) batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors) return batch_outputs def prepare_for_tokenization( self, text: str, is_split_into_words: bool = False, **kwargs ) -> Tuple[str, Dict[str, Any]]: """ Performs any necessary transformations before tokenization.
57
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py
This method should pop the arguments from kwargs and return the remaining `kwargs` as well. We test the `kwargs` at the end of the encoding process to be sure all the arguments have been used. Args: text (`str`): The text to prepare. is_split_into_words (`bool`, *optional*, defaults to `False`): Whether or not the input is already pre-tokenized (e.g., split into words). If set to `True`, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification. kwargs (`Dict[str, Any]`, *optional*): Keyword arguments to use for the tokenization. Returns: `Tuple[str, Dict[str, Any]]`: The prepared text and the unused kwargs. """ return (text, kwargs)
57
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py
def get_special_tokens_mask( self, token_ids_0: List, token_ids_1: Optional[List] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods. Args: token_ids_0 (`List[int]`): List of ids of the first sequence. token_ids_1 (`List[int]`, *optional*): List of ids of the second sequence. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model.
57
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py
Returns: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: if token_ids_1 is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) return [0] * ((len(token_ids_1) if token_ids_1 else 0) + len(token_ids_0)) @overload def convert_ids_to_tokens(self, ids: int, skip_special_tokens: bool = False) -> str: ... @overload def convert_ids_to_tokens(self, ids: List[int], skip_special_tokens: bool = False) -> List[str]: ...
57
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py
def convert_ids_to_tokens( self, ids: Union[int, List[int]], skip_special_tokens: bool = False ) -> Union[str, List[str]]: """ Converts a single index or a sequence of indices in a token or a sequence of tokens, using the vocabulary and added tokens. Args: ids (`int` or `List[int]`): The token id (or token ids) to convert to tokens. skip_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not to remove special tokens in the decoding.
57
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py
Returns: `str` or `List[str]`: The decoded token(s). """ if isinstance(ids, int): if ids in self._added_tokens_decoder: return self._added_tokens_decoder[ids].content else: return self._convert_id_to_token(ids) tokens = [] for index in ids: index = int(index) if skip_special_tokens and index in self.all_special_ids: continue if index in self._added_tokens_decoder: tokens.append(self._added_tokens_decoder[index].content) else: tokens.append(self._convert_id_to_token(index)) return tokens def _convert_id_to_token(self, index: int) -> str: raise NotImplementedError def convert_tokens_to_string(self, tokens: List[str]) -> str: return " ".join(tokens)
57
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py
def _decode( self, token_ids: Union[int, List[int]], skip_special_tokens: bool = False, clean_up_tokenization_spaces: bool = None, spaces_between_special_tokens: bool = True, **kwargs, ) -> str: self._decode_use_source_tokenizer = kwargs.pop("use_source_tokenizer", False) filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens) # If given is a single id, prevents splitting the string in upcoming loop if isinstance(filtered_tokens, str): filtered_tokens = [filtered_tokens]
57
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py
legacy_added_tokens = set(self._added_tokens_encoder.keys()) - set(self.all_special_tokens) | { token for token in self.additional_special_tokens if self.convert_tokens_to_ids(token) >= self.vocab_size } # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 sub_texts = [] current_sub_text = [] # TODO @ArthurZ in version 5, special tokens should be handled in convert_tokens_to_string, while _convert_tokens_to_string for token in filtered_tokens: if skip_special_tokens and token in self.all_special_tokens: continue if token in legacy_added_tokens: if current_sub_text: string = self.convert_tokens_to_string(current_sub_text) if len(string) > 0: sub_texts.append(string)
57
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py
current_sub_text = [] sub_texts.append(token) else: current_sub_text.append(token) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(current_sub_text))
57
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py
if spaces_between_special_tokens: text = " ".join(sub_texts) else: text = "".join(sub_texts) clean_up_tokenization_spaces = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: clean_text = self.clean_up_tokenization(text) return clean_text else: return text
57
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils.py
class OnnxConverterArgumentParser(ArgumentParser): """ Wraps all the script arguments supported to export transformers models to ONNX IR """ def __init__(self): super().__init__("ONNX Converter")
58
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_graph_to_onnx.py
self.add_argument( "--pipeline", type=str, choices=SUPPORTED_PIPELINES, default="feature-extraction", ) self.add_argument( "--model", type=str, required=True, help="Model's id or path (ex: google-bert/bert-base-cased)", ) self.add_argument("--tokenizer", type=str, help="Tokenizer's id or path (ex: google-bert/bert-base-cased)") self.add_argument( "--framework", type=str, choices=["pt", "tf"], help="Framework for loading the model", ) self.add_argument("--opset", type=int, default=11, help="ONNX opset to use") self.add_argument( "--check-loading", action="store_true", help="Check ONNX is able to load the model", ) self.add_argument( "--use-external-format", action="store_true",
58
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_graph_to_onnx.py
help="Allow exporting model >= than 2Gb", ) self.add_argument( "--quantize", action="store_true", help="Quantize the neural network to be run with int8", ) self.add_argument("output")
58
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/convert_graph_to_onnx.py
class Conv1D(nn.Module): """ 1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2). Basically works like a linear layer but the weights are transposed. Args: nf (`int`): The number of output features. nx (`int`): The number of input features. """ def __init__(self, nf, nx): super().__init__() self.nf = nf self.nx = nx self.weight = nn.Parameter(torch.empty(nx, nf)) self.bias = nn.Parameter(torch.zeros(nf)) nn.init.normal_(self.weight, std=0.02) def __repr__(self) -> str: return "Conv1D(nf={nf}, nx={nx})".format(**self.__dict__) def forward(self, x): size_out = x.size()[:-1] + (self.nf,) x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight) x = x.view(size_out) return x
59
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/pytorch_utils.py
class Seq2SeqTrainer(Trainer): @deprecate_kwarg("tokenizer", new_name="processing_class", version="5.0.0", raise_if_both_names=True) def __init__( self, model: Union["PreTrainedModel", nn.Module] = None, args: "TrainingArguments" = None, data_collator: Optional["DataCollator"] = None, train_dataset: Optional[Union[Dataset, "IterableDataset", "datasets.Dataset"]] = None, eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]] = None, processing_class: Optional[ Union["PreTrainedTokenizerBase", "BaseImageProcessor", "FeatureExtractionMixin", "ProcessorMixin"] ] = None, model_init: Optional[Callable[[], "PreTrainedModel"]] = None, compute_loss_func: Optional[Callable] = None, compute_metrics: Optional[Callable[["EvalPrediction"], Dict]] = None, callbacks: Optional[List["TrainerCallback"]] = None, optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
60
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_seq2seq.py
preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None, ): super().__init__( model=model, args=args, data_collator=data_collator, train_dataset=train_dataset, eval_dataset=eval_dataset, processing_class=processing_class, model_init=model_init, compute_loss_func=compute_loss_func, compute_metrics=compute_metrics, callbacks=callbacks, optimizers=optimizers, preprocess_logits_for_metrics=preprocess_logits_for_metrics, )
60
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_seq2seq.py
# Override self.model.generation_config if a GenerationConfig is specified in args. # Priority: args.generation_config > model.generation_config > default GenerationConfig. if self.args.generation_config is not None: gen_config = self.load_generation_config(self.args.generation_config) self.model.generation_config = gen_config @staticmethod def load_generation_config(gen_config_arg: Union[str, GenerationConfig]) -> GenerationConfig: """ Loads a `~generation.GenerationConfig` from the `Seq2SeqTrainingArguments.generation_config` arguments. Args: gen_config_arg (`str` or [`~generation.GenerationConfig]`): `Seq2SeqTrainingArguments.generation_config` argument. Returns: A `~generation.GenerationConfig`. """
60
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_seq2seq.py
# GenerationConfig provided, nothing to do if isinstance(gen_config_arg, GenerationConfig): gen_config = deepcopy(gen_config_arg) else: # str or Path pretrained_model_name = Path(gen_config_arg) if isinstance(gen_config_arg, str) else gen_config_arg config_file_name = None # Figuring if it is path pointing to a file, pointing to a directory or else a model id or URL # This step is required in order to determine config_file_name if pretrained_model_name.is_file(): config_file_name = pretrained_model_name.name pretrained_model_name = pretrained_model_name.parent # dir path elif pretrained_model_name.is_dir(): pass # model id or URL else: pretrained_model_name = gen_config_arg gen_config = GenerationConfig.from_pretrained(pretrained_model_name, config_file_name)
60
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_seq2seq.py
# Strict validation to fail early. `GenerationConfig.save_pretrained()`, run at the end of training, throws # an exception if there are warnings at validation time. try: with warnings.catch_warnings(record=True) as caught_warnings: gen_config.validate() if len(caught_warnings) > 0: raise ValueError(str([w.message for w in caught_warnings])) except ValueError as exc: raise ValueError( "The loaded generation config instance is invalid -- `GenerationConfig.validate()` throws warnings " "and/or exceptions. Fix these issues to train your model.\n\nThrown during validation:\n" + str(exc) ) return gen_config
60
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_seq2seq.py
def evaluate( self, eval_dataset: Optional[Dataset] = None, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "eval", **gen_kwargs, ) -> Dict[str, float]: """ Run evaluation and returns metrics. The calling script will be responsible for providing a method to compute metrics, as they are task-dependent (pass it to the init `compute_metrics` argument). You can also subclass and override this method to inject custom behavior.
60
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_seq2seq.py
Args: eval_dataset (`Dataset`, *optional*): Pass a dataset if you wish to override `self.eval_dataset`. If it is an [`~datasets.Dataset`], columns not accepted by the `model.forward()` method are automatically removed. It must implement the `__len__` method. ignore_keys (`List[str]`, *optional*): A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions. metric_key_prefix (`str`, *optional*, defaults to `"eval"`): An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named "eval_bleu" if the prefix is `"eval"` (default) max_length (`int`, *optional*): The maximum target length to use when predicting with the generate method. num_beams (`int`, *optional*):
60
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_seq2seq.py
Number of beams for beam search that will be used when predicting with the generate method. 1 means no beam search. gen_kwargs: Additional `generate` specific kwargs.
60
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_seq2seq.py
Returns: A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The dictionary also contains the epoch number which comes from the training state. """ gen_kwargs = gen_kwargs.copy()
60
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_seq2seq.py
# Use legacy argument setting if a) the option is not explicitly passed; and b) the argument is set in the # training args if ( gen_kwargs.get("max_length") is None and gen_kwargs.get("max_new_tokens") is None and self.args.generation_max_length is not None ): gen_kwargs["max_length"] = self.args.generation_max_length if gen_kwargs.get("num_beams") is None and self.args.generation_num_beams is not None: gen_kwargs["num_beams"] = self.args.generation_num_beams # We don't want to drop samples in general self.gather_function = self.accelerator.gather self._gen_kwargs = gen_kwargs return super().evaluate(eval_dataset, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix)
60
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_seq2seq.py
def predict( self, test_dataset: Dataset, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "test", **gen_kwargs, ) -> "PredictionOutput": """ Run prediction and returns predictions and potential metrics. Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method will also return metrics, like in `evaluate()`.
60
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_seq2seq.py
Args: test_dataset (`Dataset`): Dataset to run the predictions on. If it is a [`~datasets.Dataset`], columns not accepted by the `model.forward()` method are automatically removed. Has to implement the method `__len__` ignore_keys (`List[str]`, *optional*): A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions. metric_key_prefix (`str`, *optional*, defaults to `"eval"`): An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named "eval_bleu" if the prefix is `"eval"` (default) max_length (`int`, *optional*): The maximum target length to use when predicting with the generate method. num_beams (`int`, *optional*):
60
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_seq2seq.py
Number of beams for beam search that will be used when predicting with the generate method. 1 means no beam search. gen_kwargs: Additional `generate` specific kwargs.
60
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_seq2seq.py
<Tip> If your predictions or labels have different sequence lengths (for instance because you're doing dynamic padding in a token classification task) the predictions will be padded (on the right) to allow for concatenation into one array. The padding index is -100. </Tip> Returns: *NamedTuple* A namedtuple with the following keys: - predictions (`np.ndarray`): The predictions on `test_dataset`. - label_ids (`np.ndarray`, *optional*): The labels (if the dataset contained some). - metrics (`Dict[str, float]`, *optional*): The potential dictionary of metrics (if the dataset contained labels). """ gen_kwargs = gen_kwargs.copy()
60
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_seq2seq.py
# Use legacy argument setting if a) the option is not explicitly passed; and b) the argument is set in the # training args if ( gen_kwargs.get("max_length") is None and gen_kwargs.get("max_new_tokens") is None and self.args.generation_max_length is not None ): gen_kwargs["max_length"] = self.args.generation_max_length if gen_kwargs.get("num_beams") is None and self.args.generation_num_beams is not None: gen_kwargs["num_beams"] = self.args.generation_num_beams self.gather_function = self.accelerator.gather self._gen_kwargs = gen_kwargs return super().predict(test_dataset, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix)
60
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_seq2seq.py
def prediction_step( self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]], prediction_loss_only: bool, ignore_keys: Optional[List[str]] = None, **gen_kwargs, ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: """ Perform an evaluation step on `model` using `inputs`. Subclass and override to inject custom behavior. Args: model (`nn.Module`): The model to evaluate. inputs (`Dict[str, Union[torch.Tensor, Any]]`): The inputs and targets of the model.
60
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_seq2seq.py
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the argument `labels`. Check your model's documentation for all accepted arguments. prediction_loss_only (`bool`): Whether or not to return the loss only. gen_kwargs: Additional `generate` specific kwargs. Return: Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and labels (each being optional). """ if not self.args.predict_with_generate or prediction_loss_only: return super().prediction_step( model, inputs, prediction_loss_only=prediction_loss_only, ignore_keys=ignore_keys ) has_labels = "labels" in inputs inputs = self._prepare_inputs(inputs)
60
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_seq2seq.py
# Priority (handled in generate): # non-`None` gen_kwargs > model.generation_config > default GenerationConfig() if len(gen_kwargs) == 0 and hasattr(self, "_gen_kwargs"): gen_kwargs = self._gen_kwargs.copy() if "num_beams" in gen_kwargs and gen_kwargs["num_beams"] is None: gen_kwargs.pop("num_beams") if "max_length" in gen_kwargs and gen_kwargs["max_length"] is None: gen_kwargs.pop("max_length") default_synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self.model) gen_kwargs["synced_gpus"] = gen_kwargs.get("synced_gpus", default_synced_gpus)
60
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_seq2seq.py
generation_inputs = inputs.copy() # If the `decoder_input_ids` was created from `labels`, evict the former, so that the model can freely generate # (otherwise, it would continue generating from the padded `decoder_input_ids`) if ( "labels" in generation_inputs and "decoder_input_ids" in generation_inputs and generation_inputs["labels"].shape == generation_inputs["decoder_input_ids"].shape ): generation_inputs = { k: v for k, v in inputs.items() if k not in ("decoder_input_ids", "decoder_attention_mask") } summon_full_params_context = ( FullyShardedDataParallel.summon_full_params(self.model) if isinstance(self.model, FullyShardedDataParallel) else contextlib.nullcontext() ) with summon_full_params_context: generated_tokens = self.model.generate(**generation_inputs, **gen_kwargs)
60
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_seq2seq.py
# Temporary hack to ensure the generation config is not initialized for each iteration of the evaluation loop # TODO: remove this hack when the legacy code that initializes generation_config from a model config is # removed in https://github.com/huggingface/transformers/blob/98d88b23f54e5a23e741833f1e973fdf600cc2c5/src/transformers/generation/utils.py#L1183 if self.model.generation_config._from_model_config: self.model.generation_config._from_model_config = False
60
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_seq2seq.py
# Retrieves GenerationConfig from model.generation_config gen_config = self.model.generation_config # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_config.max_length: generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_config.max_length) elif gen_config.max_new_tokens is not None and generated_tokens.shape[-1] < gen_config.max_new_tokens + 1: generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_config.max_new_tokens + 1)
60
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_seq2seq.py
with torch.no_grad(): if has_labels: with self.compute_loss_context_manager(): outputs = model(**inputs) if self.label_smoother is not None: loss = self.label_smoother(outputs, inputs["labels"]).mean().detach() else: loss = (outputs["loss"] if isinstance(outputs, dict) else outputs[0]).mean().detach() else: loss = None if self.args.prediction_loss_only: return loss, None, None if has_labels: labels = inputs["labels"] if labels.shape[-1] < gen_config.max_length: labels = self._pad_tensors_to_max_len(labels, gen_config.max_length) elif gen_config.max_new_tokens is not None and labels.shape[-1] < gen_config.max_new_tokens + 1: labels = self._pad_tensors_to_max_len(labels, gen_config.max_new_tokens + 1) else: labels = None
60
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_seq2seq.py
return loss, generated_tokens, labels def _pad_tensors_to_max_len(self, tensor, max_length): if self.processing_class is not None and hasattr(self.processing_class, "pad_token_id"): # If PAD token is not defined at least EOS token has to be defined pad_token_id = ( self.processing_class.pad_token_id if self.processing_class.pad_token_id is not None else self.processing_class.eos_token_id ) else: if self.model.config.pad_token_id is not None: pad_token_id = self.model.config.pad_token_id else: raise ValueError("Pad_token_id must be set in the configuration of the model, in order to pad tensors") padded_tensor = pad_token_id * torch.ones( (tensor.shape[0], max_length), dtype=tensor.dtype, device=tensor.device ) padded_tensor[:, : tensor.shape[-1]] = tensor return padded_tensor
60
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_seq2seq.py
class FlaxPreTrainedModel(PushToHubMixin, FlaxGenerationMixin): r""" Base class for all models. [`FlaxPreTrainedModel`] takes care of storing the configuration of the models and handles methods for loading, downloading and saving models. Class attributes (overridden by derived classes): - **config_class** ([`PretrainedConfig`]) -- A subclass of [`PretrainedConfig`] to use as configuration class for this model architecture. - **base_model_prefix** (`str`) -- A string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model. - **main_input_name** (`str`) -- The name of the principal input to the model (often `input_ids` for NLP models, `pixel_values` for vision models and `input_values` for speech models). """ config_class = None base_model_prefix = "" main_input_name = "input_ids" _auto_class = None _missing_keys = set()
61
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_utils.py
def __init__( self, config: PretrainedConfig, module: nn.Module, input_shape: Tuple = (1, 1), seed: int = 0, dtype: jnp.dtype = jnp.float32, _do_init: bool = True, ): if config is None: raise ValueError("config cannot be None") if module is None: raise ValueError("module cannot be None") # Those are private to be exposed as typed property on derived classes. self._config = config self._module = module # Those are public as their type is generic to every derived classes. self.key = PRNGKey(seed) self.dtype = dtype self.input_shape = input_shape self.generation_config = GenerationConfig.from_model_config(config) if self.can_generate() else None # To check if the model was initialized automatically. self._is_initialized = _do_init
61
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_utils.py
if _do_init: # randomly initialized parameters random_params = self.init_weights(self.key, input_shape) params_shape_tree = jax.eval_shape(lambda params: params, random_params) else: init_fn = partial(self.init_weights, input_shape=input_shape) params_shape_tree = jax.eval_shape(init_fn, self.key) logger.info( "Model weights are not initialized as `_do_init` is set to `False`. " f"Make sure to call `{self.__class__.__name__}.init_weights` manually to initialize the weights." ) # get the shape of the parameters self._params_shape_tree = params_shape_tree # save required_params as set self._required_params = set(flatten_dict(unfreeze(params_shape_tree)).keys()) # initialize the parameters if _do_init: self.params = random_params
61
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_utils.py
def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> Dict: raise NotImplementedError(f"init method has to be implemented for {self}") def enable_gradient_checkpointing(self): raise NotImplementedError(f"gradient checkpointing method has to be implemented for {self}") @classmethod def _from_config(cls, config, **kwargs): """ All context managers that the model should be initialized under go here. """ return cls(config, **kwargs) @property def framework(self) -> str: """ :str: Identifies that this is a Flax model. """ return "flax" @property def config(self) -> PretrainedConfig: return self._config @property def module(self) -> nn.Module: return self._module
61
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_utils.py
@property def params(self) -> Union[Dict, FrozenDict]: if not self._is_initialized: raise ValueError( "`params` cannot be accessed from model when the model is created with `_do_init=False`. " "You must call `init_weights` manually and store the params outside of the model and " "pass it explicitly where needed." ) return self._params @property def required_params(self) -> Set: return self._required_params @property def params_shape_tree(self) -> Dict: return self._params_shape_tree @params.setter def params(self, params: Union[Dict, FrozenDict]): # don't set params if the model is not initialized if not self._is_initialized: raise ValueError( "`params` cannot be set from model when the model is created with `_do_init=False`. " "You store the params outside of the model." )
61
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_utils.py
if isinstance(params, FrozenDict): params = unfreeze(params) param_keys = set(flatten_dict(params).keys()) if len(self.required_params - param_keys) > 0: raise ValueError( "Some parameters are missing. Make sure that `params` include the following " f"parameters {self.required_params - param_keys}" ) self._params = params def _cast_floating_to(self, params: Union[Dict, FrozenDict], dtype: jnp.dtype, mask: Any = None) -> Any: """ Helper method to cast floating-point values of given parameter `PyTree` to given `dtype`. """ # taken from https://github.com/deepmind/jmp/blob/3a8318abc3292be38582794dbf7b094e6583b192/jmp/_src/policy.py#L27 def conditional_cast(param): if isinstance(param, jnp.ndarray) and jnp.issubdtype(param.dtype, jnp.floating): param = param.astype(dtype) return param
61
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_utils.py
if mask is None: return jax.tree_util.tree_map(conditional_cast, params) flat_params = flatten_dict(params) flat_mask, _ = jax.tree_util.tree_flatten(mask) for masked, key in zip(flat_mask, sorted(flat_params.keys())): if masked: flat_params[key] = conditional_cast(flat_params[key]) return unflatten_dict(flat_params) def to_bf16(self, params: Union[Dict, FrozenDict], mask: Any = None): r""" Cast the floating-point `params` to `jax.numpy.bfloat16`. This returns a new `params` tree and does not cast the `params` in place. This method can be used on TPU to explicitly convert the model parameters to bfloat16 precision to do full half-precision training or to save weights in bfloat16 for inference in order to save memory and improve speed.
61
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_utils.py
Arguments: params (`Union[Dict, FrozenDict]`): A `PyTree` of model parameters. mask (`Union[Dict, FrozenDict]`): A `PyTree` with same structure as the `params` tree. The leaves should be booleans, `True` for params you want to cast, and should be `False` for those you want to skip. Examples: ```python >>> from transformers import FlaxBertModel >>> # load model >>> model = FlaxBertModel.from_pretrained("google-bert/bert-base-cased") >>> # By default, the model parameters will be in fp32 precision, to cast these to bfloat16 precision >>> model.params = model.to_bf16(model.params) >>> # If you want don't want to cast certain parameters (for example layer norm bias and scale) >>> # then pass the mask as follows >>> from flax import traverse_util
61
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_utils.py
>>> model = FlaxBertModel.from_pretrained("google-bert/bert-base-cased") >>> flat_params = traverse_util.flatten_dict(model.params) >>> mask = { ... path: (path[-2] != ("LayerNorm", "bias") and path[-2:] != ("LayerNorm", "scale")) ... for path in flat_params ... } >>> mask = traverse_util.unflatten_dict(mask) >>> model.params = model.to_bf16(model.params, mask) ```""" return self._cast_floating_to(params, jnp.bfloat16, mask) def to_fp32(self, params: Union[Dict, FrozenDict], mask: Any = None): r""" Cast the floating-point `parmas` to `jax.numpy.float32`. This method can be used to explicitly convert the model parameters to fp32 precision. This returns a new `params` tree and does not cast the `params` in place.
61
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_utils.py
Arguments: params (`Union[Dict, FrozenDict]`): A `PyTree` of model parameters. mask (`Union[Dict, FrozenDict]`): A `PyTree` with same structure as the `params` tree. The leaves should be booleans, `True` for params you want to cast, and should be `False` for those you want to skip Examples: ```python >>> from transformers import FlaxBertModel >>> # Download model and configuration from huggingface.co >>> model = FlaxBertModel.from_pretrained("google-bert/bert-base-cased") >>> # By default, the model params will be in fp32, to illustrate the use of this method, >>> # we'll first cast to fp16 and back to fp32 >>> model.params = model.to_f16(model.params) >>> # now cast back to fp32 >>> model.params = model.to_fp32(model.params) ```""" return self._cast_floating_to(params, jnp.float32, mask)
61
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_utils.py
def to_fp16(self, params: Union[Dict, FrozenDict], mask: Any = None): r""" Cast the floating-point `parmas` to `jax.numpy.float16`. This returns a new `params` tree and does not cast the `params` in place. This method can be used on GPU to explicitly convert the model parameters to float16 precision to do full half-precision training or to save weights in float16 for inference in order to save memory and improve speed. Arguments: params (`Union[Dict, FrozenDict]`): A `PyTree` of model parameters. mask (`Union[Dict, FrozenDict]`): A `PyTree` with same structure as the `params` tree. The leaves should be booleans, `True` for params you want to cast, and should be `False` for those you want to skip Examples: ```python >>> from transformers import FlaxBertModel
61
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_utils.py
>>> # load model >>> model = FlaxBertModel.from_pretrained("google-bert/bert-base-cased") >>> # By default, the model params will be in fp32, to cast these to float16 >>> model.params = model.to_fp16(model.params) >>> # If you want don't want to cast certain parameters (for example layer norm bias and scale) >>> # then pass the mask as follows >>> from flax import traverse_util >>> model = FlaxBertModel.from_pretrained("google-bert/bert-base-cased") >>> flat_params = traverse_util.flatten_dict(model.params) >>> mask = { ... path: (path[-2] != ("LayerNorm", "bias") and path[-2:] != ("LayerNorm", "scale")) ... for path in flat_params ... } >>> mask = traverse_util.unflatten_dict(mask) >>> model.params = model.to_fp16(model.params, mask) ```""" return self._cast_floating_to(params, jnp.float16, mask)
61
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_utils.py
@classmethod def load_flax_weights(cls, resolved_archive_file): try: if resolved_archive_file.endswith(".safetensors"): state = safe_load_file(resolved_archive_file) state = unflatten_dict(state, sep=".") else: with open(resolved_archive_file, "rb") as state_f: state = from_bytes(cls, state_f.read()) except (UnpicklingError, msgpack.exceptions.ExtraData) as e: try: with open(resolved_archive_file) as f: if f.read().startswith("version"): raise OSError( "You seem to have cloned a repository without having git-lfs installed. Please" " install git-lfs and run `git lfs install` followed by `git lfs pull` in the" " folder you cloned." ) else: raise ValueError from e
61
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_utils.py
except (UnicodeDecodeError, ValueError): raise EnvironmentError(f"Unable to convert {resolved_archive_file} to Flax deserializable object. ")
61
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_utils.py
return state @classmethod def load_flax_sharded_weights(cls, shard_files): """ This is the same as [`flax.serialization.from_bytes`] (https:lax.readthedocs.io/en/latest/_modules/flax/serialization.html#from_bytes) but for a sharded checkpoint. This load is performed efficiently: each checkpoint shard is loaded one by one in RAM and deleted after being loaded in the model. Args: shard_files (`List[str]`: The list of shard files to load. Returns: `Dict`: A nested dictionary of the model parameters, in the expected format for flax models : `{'model': {'params': {'...'}}}`. """ # Load the index state_sharded_dict = {}
61
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_utils.py
for shard_file in shard_files: # load using msgpack utils try: with open(shard_file, "rb") as state_f: state = from_bytes(cls, state_f.read()) except (UnpicklingError, msgpack.exceptions.ExtraData) as e: with open(shard_file) as f: if f.read().startswith("version"): raise OSError( "You seem to have cloned a repository without having git-lfs installed. Please" " install git-lfs and run `git lfs install` followed by `git lfs pull` in the" " folder you cloned." ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(f"Unable to convert {shard_file} to Flax deserializable object. ")
61
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_utils.py
state = flatten_dict(state, sep="/") state_sharded_dict.update(state) del state gc.collect() # the state dict is unflattened to the match the format of model.params return unflatten_dict(state_sharded_dict, sep="/") @classmethod def can_generate(cls) -> bool: """ Returns whether this model can generate sequences with `.generate()`. Returns: `bool`: Whether this model can generate sequences with `.generate()`. """ # Detects whether `prepare_inputs_for_generation` has been overwritten, which is a requirement for generation. # Alternativelly, the model can also have a custom `generate` function. if "GenerationMixin" in str(cls.prepare_inputs_for_generation) and "GenerationMixin" in str(cls.generate): return False return True
61
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_utils.py
@classmethod def from_pretrained( cls, pretrained_model_name_or_path: Union[str, os.PathLike], dtype: jnp.dtype = jnp.float32, *model_args, config: Optional[Union[PretrainedConfig, str, os.PathLike]] = None, cache_dir: Optional[Union[str, os.PathLike]] = None, ignore_mismatched_sizes: bool = False, force_download: bool = False, local_files_only: bool = False, token: Optional[Union[str, bool]] = None, revision: str = "main", **kwargs, ): r""" Instantiate a pretrained flax model from a pre-trained model configuration. The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning task.
61
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_utils.py
The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those weights are discarded. Parameters: pretrained_model_name_or_path (`str` or `os.PathLike`): Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. - A path to a *directory* containing model weights saved using [`~FlaxPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. - A path or url to a *pt index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In this case, `from_pt` should be set to `True`. dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and `jax.numpy.bfloat16` (on TPUs).
61
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_utils.py
This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given `dtype`. **Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.** If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and [`~FlaxPreTrainedModel.to_bf16`]. model_args (sequence of positional arguments, *optional*): All remaining positional arguments will be passed to the underlying model's `__init__` method. config (`Union[PretrainedConfig, str, os.PathLike]`, *optional*): Can be either: - an instance of a class derived from [`PretrainedConfig`], - a string or path valid as input to [`~PretrainedConfig.from_pretrained`].
61
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_utils.py
Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:
61
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_utils.py
- The model is a model provided by the library (loaded with the *model id* string of a pretrained model). - The model was saved using [`~PreTrainedModel.save_pretrained`] and is reloaded by supplying the save directory. - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a configuration JSON file named *config.json* is found in the directory. cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. from_pt (`bool`, *optional*, defaults to `False`): Load the model weights from a PyTorch checkpoint save file (see docstring of `pretrained_model_name_or_path` argument).
61
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_utils.py
ignore_mismatched_sizes (`bool`, *optional*, defaults to `False`): Whether or not to raise an error if some of the weights from the checkpoint do not have the same size as the weights of the model (if for instance, you are instantiating a model with 10 labels from a checkpoint with 3 labels). force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. resume_download: Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v5 of Transformers. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
61
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_utils.py
local_files_only(`bool`, *optional*, defaults to `False`): Whether or not to only look at local files (i.e., do not try to download the model). token (`str` or `bool`, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git.
61
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_utils.py
<Tip> To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>"`. </Tip> subfolder (`str`, *optional*, defaults to `""`): In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can specify the folder name here. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., `output_attentions=True`). Behaves differently depending on whether a `config` is provided or automatically loaded:
61
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_utils.py
- If a configuration is provided with `config`, `**kwargs` will be directly passed to the underlying model's `__init__` method (we assume all relevant updates to the configuration have already been done) - If a configuration is not provided, `kwargs` will be first passed to the configuration class initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that corresponds to a configuration attribute will be used to override said attribute with the supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's `__init__` function. Examples: ```python >>> from transformers import BertConfig, FlaxBertModel
61
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_utils.py
>>> # Download model and configuration from huggingface.co and cache. >>> model = FlaxBertModel.from_pretrained("google-bert/bert-base-cased") >>> # Model was saved using *save_pretrained('./test/saved_model/')* (for example purposes, not runnable). >>> model = FlaxBertModel.from_pretrained("./test/saved_model/") >>> # Loading from a PyTorch checkpoint file instead of a PyTorch model (slower, for example purposes, not runnable). >>> config = BertConfig.from_json_file("./pt_model/config.json") >>> model = FlaxBertModel.from_pretrained("./pt_model/pytorch_model.bin", from_pt=True, config=config) ```""" from_pt = kwargs.pop("from_pt", False) resume_download = kwargs.pop("resume_download", None) proxies = kwargs.pop("proxies", None) use_auth_token = kwargs.pop("use_auth_token", None) trust_remote_code = kwargs.pop("trust_remote_code", None) from_pipeline = kwargs.pop("_from_pipeline", None)
61
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_utils.py
from_auto_class = kwargs.pop("_from_auto", False) _do_init = kwargs.pop("_do_init", True) subfolder = kwargs.pop("subfolder", "") commit_hash = kwargs.pop("_commit_hash", None)
61
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_utils.py
# Not relevant for Flax Models _ = kwargs.pop("adapter_kwargs", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) token = use_auth_token if trust_remote_code is True: logger.warning( "The argument `trust_remote_code` is to be used with Auto classes. It has no effect here and is" " ignored." ) user_agent = {"file_type": "model", "framework": "flax", "from_auto_class": from_auto_class} if from_pipeline is not None: user_agent["using_pipeline"] = from_pipeline
61
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_utils.py
if is_offline_mode() and not local_files_only: logger.info("Offline mode: forcing local_files_only=True") local_files_only = True
61
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_utils.py
# Load config if we don't provide a configuration if not isinstance(config, PretrainedConfig): config_path = config if config is not None else pretrained_model_name_or_path config, model_kwargs = cls.config_class.from_pretrained( config_path, cache_dir=cache_dir, return_unused_kwargs=True, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, _from_auto=from_auto_class, _from_pipeline=from_pipeline, _commit_hash=commit_hash, **kwargs, ) else: model_kwargs = kwargs.copy() if commit_hash is None: commit_hash = getattr(config, "_commit_hash", None)
61
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_utils.py
# Add the dtype to model_kwargs model_kwargs["dtype"] = dtype # This variable will flag if we're loading a sharded checkpoint. In this case the archive file is just the # index of the files. is_sharded = False
61
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_utils.py
# Load model if pretrained_model_name_or_path is not None: pretrained_model_name_or_path = str(pretrained_model_name_or_path) is_local = os.path.isdir(pretrained_model_name_or_path) if os.path.isdir(pretrained_model_name_or_path): if os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_NAME)): # Load from a Flax checkpoint archive_file = os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_NAME) elif os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_INDEX_NAME)): # Load from a sharded Flax checkpoint archive_file = os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_INDEX_NAME) is_sharded = True elif is_safetensors_available() and os.path.isfile(
61
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_utils.py
os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_NAME) ): # Load from a safetensors checkpoint archive_file = os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_NAME) elif from_pt and os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, WEIGHTS_NAME)): # Load from a PyTorch checkpoint archive_file = os.path.join(pretrained_model_name_or_path, subfolder, WEIGHTS_NAME) elif from_pt and os.path.isfile( os.path.join(pretrained_model_name_or_path, subfolder, WEIGHTS_INDEX_NAME) ): # Load from a sharded pytorch checkpoint archive_file = os.path.join(pretrained_model_name_or_path, subfolder, WEIGHTS_INDEX_NAME) is_sharded = True # At this stage we don't have a weight file so we will raise an error.
61
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_utils.py
elif is_safetensors_available() and os.path.isfile( os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_INDEX_NAME) ): # Load from a sharded safetensors checkpoint archive_file = os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_INDEX_NAME) is_sharded = True raise NotImplementedError("Support for sharded checkpoints using safetensors is coming soon!") elif os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, WEIGHTS_NAME)): raise EnvironmentError( f"Error no file named {FLAX_WEIGHTS_NAME} found in directory {pretrained_model_name_or_path} " "but there is a file for PyTorch weights. Use `from_pt=True` to load this model from those " "weights." ) else: raise EnvironmentError(
61
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_utils.py
f"Error no file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME} found in directory " f"{pretrained_model_name_or_path}." ) elif os.path.isfile(os.path.join(subfolder, pretrained_model_name_or_path)): archive_file = pretrained_model_name_or_path is_local = True elif is_remote_url(pretrained_model_name_or_path): filename = pretrained_model_name_or_path resolved_archive_file = download_url(pretrained_model_name_or_path) else: if from_pt: filename = WEIGHTS_NAME else: filename = FLAX_WEIGHTS_NAME
61
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_utils.py
try: # Load from URL or cache if already cached cached_file_kwargs = { "cache_dir": cache_dir, "force_download": force_download, "proxies": proxies, "resume_download": resume_download, "local_files_only": local_files_only, "token": token, "user_agent": user_agent, "revision": revision, "subfolder": subfolder, "_raise_exceptions_for_gated_repo": False, "_raise_exceptions_for_missing_entries": False, "_commit_hash": commit_hash, } resolved_archive_file = cached_file(pretrained_model_name_or_path, filename, **cached_file_kwargs)
61
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_utils.py