text
stringlengths
31
243k
type
stringclasses
1 value
start
int64
36
275k
end
int64
286
280k
depth
int64
0
1
filepath
stringlengths
85
188
parent_class
stringclasses
3 values
class_index
int64
0
10.8k
class FlaxCausalLMOutputWithCrossAttentions(ModelOutput): """ Base class for causal language model (or autoregressive) outputs. Args: logits (`jnp.ndarray` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads. past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `jnp.ndarray` tuples of length `config.n_layers`, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if `config.is_decoder = True`. Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. """ logits: jnp.ndarray = None past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None cross_attentions: Optional[Tuple[jnp.ndarray]] = None
class_definition
19,231
21,757
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
null
200
class FlaxMaskedLMOutput(ModelOutput): """ Base class for masked language models outputs. Args: logits (`jnp.ndarray` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ logits: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None
class_definition
21,783
23,097
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
null
201
class FlaxSeq2SeqLMOutput(ModelOutput): """ Base class for sequence-to-sequence language models outputs. Args: logits (`jnp.ndarray` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(jnp.ndarray)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ logits: jnp.ndarray = None past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None decoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None decoder_attentions: Optional[Tuple[jnp.ndarray]] = None cross_attentions: Optional[Tuple[jnp.ndarray]] = None encoder_last_hidden_state: Optional[jnp.ndarray] = None encoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None encoder_attentions: Optional[Tuple[jnp.ndarray]] = None
class_definition
23,165
27,095
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
null
202
class FlaxNextSentencePredictorOutput(ModelOutput): """ Base class for outputs of models predicting if two sentences are consecutive or not. Args: logits (`jnp.ndarray` of shape `(batch_size, 2)`): Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax). hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ logits: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None
class_definition
27,121
28,490
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
null
203
class FlaxSequenceClassifierOutput(ModelOutput): """ Base class for outputs of sentence classification models. Args: logits (`jnp.ndarray` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ logits: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None
class_definition
28,516
29,815
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
null
204
class FlaxSeq2SeqSequenceClassifierOutput(ModelOutput): """ Base class for outputs of sequence-to-sequence sentence classification models. Args: logits (`jnp.ndarray` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(jnp.ndarray)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ logits: jnp.ndarray = None past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None decoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None decoder_attentions: Optional[Tuple[jnp.ndarray]] = None cross_attentions: Optional[Tuple[jnp.ndarray]] = None encoder_last_hidden_state: Optional[jnp.ndarray] = None encoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None encoder_attentions: Optional[Tuple[jnp.ndarray]] = None
class_definition
29,841
33,769
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
null
205
class FlaxMultipleChoiceModelOutput(ModelOutput): """ Base class for outputs of multiple choice models. Args: logits (`jnp.ndarray` of shape `(batch_size, num_choices)`): *num_choices* is the second dimension of the input tensors. (see *input_ids* above). Classification scores (before SoftMax). hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ logits: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None
class_definition
33,795
35,139
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
null
206
class FlaxTokenClassifierOutput(ModelOutput): """ Base class for outputs of token classification models. Args: logits (`jnp.ndarray` of shape `(batch_size, sequence_length, config.num_labels)`): Classification scores (before SoftMax). hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ logits: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None
class_definition
35,165
36,435
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
null
207
class FlaxQuestionAnsweringModelOutput(ModelOutput): """ Base class for outputs of question answering models. Args: start_logits (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Span-start scores (before SoftMax). end_logits (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Span-end scores (before SoftMax). hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ start_logits: jnp.ndarray = None end_logits: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None
class_definition
36,461
37,883
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
null
208
class FlaxSeq2SeqQuestionAnsweringModelOutput(ModelOutput): """ Base class for outputs of sequence-to-sequence question answering models. Args: start_logits (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Span-start scores (before SoftMax). end_logits (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Span-end scores (before SoftMax). past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(jnp.ndarray)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ start_logits: jnp.ndarray = None end_logits: jnp.ndarray = None past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None decoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None decoder_attentions: Optional[Tuple[jnp.ndarray]] = None cross_attentions: Optional[Tuple[jnp.ndarray]] = None encoder_last_hidden_state: Optional[jnp.ndarray] = None encoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None encoder_attentions: Optional[Tuple[jnp.ndarray]] = None
class_definition
37,909
41,960
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
null
209
class Cache(torch.nn.Module): """ Base, abstract class for all caches. The actual data structure is specific to each subclass. """ def __init__(self): super().__init__() def update( self, key_states: torch.Tensor, value_states: torch.Tensor, layer_idx: int, cache_kwargs: Optional[Dict[str, Any]] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: """ Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`. Parameters: key_states (`torch.Tensor`): The new key states to cache. value_states (`torch.Tensor`): The new value states to cache. layer_idx (`int`): The index of the layer to cache the states for. cache_kwargs (`Dict[str, Any]`, `optional`): Additional arguments for the cache subclass. These are specific to each subclass and allow new types of cache to be created. Return: A tuple containing the updated key and value states. """ raise NotImplementedError("Make sure to implement `update` in a subclass.") def get_seq_length(self, layer_idx: Optional[int] = 0) -> int: """Returns the sequence length of the cached states. A layer index can be optionally passed.""" # TODO: deprecate this function in favor of `cache_position` raise NotImplementedError("Make sure to implement `get_seq_length` in a subclass.") def get_max_cache_shape(self) -> Optional[int]: """Returns the maximum sequence length (i.e. max capacity) of the cache object""" raise NotImplementedError("Make sure to implement `get_max_cache_shape` in a subclass.") def get_usable_length(self, new_seq_length: int, layer_idx: Optional[int] = 0) -> int: """Given the sequence length of the new inputs, returns the usable length of the cache.""" # Cache without size limit -> all cache is usable # Cache with size limit -> if the length cache plus the length of the new inputs is larger the maximum cache # length, we will need to evict part of the cache (and thus not all cache is usable) max_length = self.get_max_cache_shape() previous_seq_length = self.get_seq_length(layer_idx) if max_length is not None and previous_seq_length + new_seq_length > max_length: return max_length - new_seq_length return previous_seq_length def reorder_cache(self, beam_idx: torch.LongTensor): """Reorders the cache for beam search, given the selected beam indices.""" for layer_idx in range(len(self.key_cache)): if self.key_cache[layer_idx] != []: device = self.key_cache[layer_idx].device self.key_cache[layer_idx] = self.key_cache[layer_idx].index_select(0, beam_idx.to(device)) if self.value_cache[layer_idx] != []: device = self.value_cache[layer_idx].device self.value_cache[layer_idx] = self.value_cache[layer_idx].index_select(0, beam_idx.to(device)) @property def seen_tokens(self): logger.warning_once( "The `seen_tokens` attribute is deprecated and will be removed in v4.41. Use the `cache_position` " "model input instead." ) if hasattr(self, "_seen_tokens"): return self._seen_tokens else: return None
class_definition
542
4,041
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
null
210
class CacheConfig: """ Base class for cache configs """ cache_implementation: None @classmethod def from_dict(cls, config_dict, **kwargs): """ Constructs a CacheConfig instance from a dictionary of parameters. Args: config_dict (Dict[str, Any]): Dictionary containing configuration parameters. **kwargs: Additional keyword arguments to override dictionary values. Returns: CacheConfig: Instance of CacheConfig constructed from the dictionary. """ config = cls(**config_dict) to_remove = [] for key, value in kwargs.items(): if hasattr(config, key): setattr(config, key, value) to_remove.append(key) for key in to_remove: kwargs.pop(key, None) return config # Copied from transformers.utils.quantization_config.QuantizationConfigMixin.to_json_file def to_json_file(self, json_file_path: Union[str, os.PathLike]): """ Save this instance to a JSON file. Args: json_file_path (`str` or `os.PathLike`): Path to the JSON file in which this configuration instance's parameters will be saved. use_diff (`bool`, *optional*, defaults to `True`): If set to `True`, only the difference between the config instance and the default `QuantizationConfig()` is serialized to JSON file. """ with open(json_file_path, "w", encoding="utf-8") as writer: config_dict = self.to_dict() json_string = json.dumps(config_dict, indent=2, sort_keys=True) + "\n" writer.write(json_string) # Copied from transformers.utils.quantization_config.QuantizationConfigMixin.to_dict def to_dict(self) -> Dict[str, Any]: """ Serializes this instance to a Python dictionary. Returns: `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance. """ return copy.deepcopy(self.__dict__) # Copied from transformers.utils.quantization_config.QuantizationConfigMixin.__iter__ def __iter__(self): """allows `dict(obj)` for situations where obj may be a dict or QuantizationConfigMixin""" for attr, value in copy.deepcopy(self.__dict__).items(): yield attr, value # Copied from transformers.utils.quantization_config.QuantizationConfigMixin.__repr__ def __repr__(self): return f"{self.__class__.__name__} {self.to_json_string()}" def to_json_string(self): """ Serializes this instance to a JSON formatted string. Returns: str: JSON formatted string representing the configuration instance. """ return json.dumps(self.__dict__, indent=2) + "\n" # Copied from transformers.utils.quantization_config.QuantizationConfigMixin.update def update(self, **kwargs): """ Updates attributes of this class instance with attributes from `kwargs` if they match existing attributes, returning all the unused kwargs. Args: kwargs (`Dict[str, Any]`): Dictionary of attributes to tentatively update this class. Returns: `Dict[str, Any]`: Dictionary containing all the key-value pairs that were not used to update the instance. """ to_remove = [] for key, value in kwargs.items(): if hasattr(self, key): setattr(self, key, value) to_remove.append(key) # Remove all the attributes that were updated, without modifying the input dict unused_kwargs = {key: value for key, value in kwargs.items() if key not in to_remove} return unused_kwargs
class_definition
4,055
7,863
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
null
211
class QuantizedCacheConfig(CacheConfig): """ Configuration class for quantized cache settings. Attributes: backend (`str`, *optional*, defaults to `"quanto"`): Backend to use when performing quantization, Can be one of [`quanto`, `HQQ`] nbits (`Optional[int]`, *optional*, defaults to 4): Number of bits, can be 2 or 4 for the `quanto` backend and one of [1, 2, 3, 4, 8] for the `HQQ` backend. Defaults to 2. axis_key (`int`, *optional*, defaults to 0): Axis over which to perform grouping for the key tensors. Can be [0, -1] for `quanto` backend and [0, 1] for `HQQ` backend. axis_value (`int`, *optional*, defaults to 0): Axis over which to perform grouping for the value tensors. Can be [0, -1] for `quanto` backend and [0, 1] for `HQQ` backend. q_group_size (`Optional[int]`, *optional*, defaults to 64): Size of the quantization group, should be a divisor of the model's hidden dimension. Defaults to 64. residual_length (`Optional[int]`, *optional*, defaults to 128): Length of the residual cache which will always be stored in original presicion. Defaults to 128. compute_dtype (`torch.dtype`, *optional*, defaults to `torch.float16`): The defualt dtype used for computations in the model. Keys and Values will be cast to this dtype after dequantization. device (`str`, *optional*, defaults to `"cpu"`): Device on which to perform computations, should be same as the model's device. """ def __init__( self, backend: str = "quanto", nbits: Optional[int] = 4, axis_key: Optional[int] = 0, axis_value: Optional[int] = 0, q_group_size: Optional[int] = 64, residual_length: Optional[int] = 128, compute_dtype: Optional[torch.dtype] = torch.float16, device: Optional[str] = "cpu", ): self.backend = backend self.nbits = nbits self.axis_key = axis_key self.axis_value = axis_value self.q_group_size = q_group_size self.residual_length = residual_length self.compute_dtype = compute_dtype self.device = device def validate(self): """Validates if the arguments passed are correct""" incorrect_arg_msg = ( "Some of the keys in `cache_config` are defined incorrectly. `{key}` should be {correct_value}` " "but found {found_value}" ) # Check that the values are reasonable in general (nbits, axis) # Later in QuantizedCache init we check if they are supported for that particular backend if self.nbits not in [1, 2, 3, 4, 8]: raise ValueError( incorrect_arg_msg.format( key="nbits", correct_value="2 or 4 or 8", found_value=self.nbits, ), ) if self.q_group_size <= 0: raise ValueError( incorrect_arg_msg.format( key="q_group_size", correct_value="a positive integer", found_value=self.q_group_size, ), ) if self.residual_length < 0: raise ValueError( incorrect_arg_msg.format( key="residual_length", correct_value="a positive integer", found_value=self.residual_length, ), ) if self.axis_key not in [0, 1, -1]: raise ValueError( incorrect_arg_msg.format( key="axis_key", correct_value="`1` or `0`, `-1`", found_value=self.axis_key, ), ) if self.axis_value not in [0, 1, -1]: raise ValueError( incorrect_arg_msg.format( key="axis_value", correct_value="`1` or `0` or `-1`", found_value=self.axis_value, ), )
class_definition
7,877
12,016
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
null
212
class StaticCacheConfig(CacheConfig): """ Configuration class for static cache settings. """ cache_implementation = "static" def __init__(self, batch_size: int, max_cache_len: int, device="cpu"): self.batch_size = batch_size self.max_cache_len = max_cache_len self.device = device def validate(self): """Validates if the arguments passed are correct""" incorrect_arg_msg = ( "Some of the keys in `cache_config` are defined incorrectly. `{key}` should be {correct_value}` " "but found {found_value}" ) if self.batch_size <= 0: raise ValueError( incorrect_arg_msg.format( key="batch_size", correct_value="> 0", found_value=self.batch_size, ), ) if self.max_cache_len <= 0: raise ValueError( incorrect_arg_msg.format( key="max_cache_len", correct_value="> 0", found_value=self.max_cache_len, ), )
class_definition
12,030
13,173
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
null
213
class DynamicCache(Cache): """ A cache that grows dynamically as more tokens are generated. This is the default for generative models. It stores the Key and Value states as a list of tensors, one for each layer. The expected shape for each tensor is `[batch_size, num_heads, seq_len, head_dim]`. Example: ```python >>> from transformers import AutoTokenizer, AutoModelForCausalLM, DynamicCache >>> model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2-0.5B-Instruct") >>> tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2-0.5B-Instruct") >>> inputs = tokenizer(text="My name is Qwen2", return_tensors="pt") >>> # Prepare a cache class and pass it to model's forward >>> past_key_values = DynamicCache() >>> outputs = model(**inputs, past_key_values=past_key_values, use_cache=True) >>> outputs.past_key_values # access cache filled with key/values from generation DynamicCache() ``` """ @deprecate_kwarg("num_hidden_layers", version="4.47.0") def __init__(self, num_hidden_layers: Optional[int] = None) -> None: super().__init__() self._seen_tokens = 0 # Used in `generate` to keep tally of how many tokens the cache has seen self.key_cache: List[torch.Tensor] = [] self.value_cache: List[torch.Tensor] = [] def __getitem__(self, layer_idx: int) -> List[Tuple[torch.Tensor]]: """ Support for backwards-compatible `past_key_value` indexing, e.g. `past_key_value[0][0].shape[2]` to get the sequence length. """ if layer_idx < len(self): return (self.key_cache[layer_idx], self.value_cache[layer_idx]) else: raise KeyError(f"Cache only has {len(self)} layers, attempted to access layer with index {layer_idx}") def __iter__(self): """ Support for backwards-compatible `past_key_value` iteration, e.g. `for x in past_key_value:` to iterate over keys and values """ for layer_idx in range(len(self)): yield (self.key_cache[layer_idx], self.value_cache[layer_idx]) def __len__(self): """ Support for backwards-compatible `past_key_value` length, e.g. `len(past_key_value)`. This value corresponds to the number of layers in the model. """ return len(self.key_cache) def update( self, key_states: torch.Tensor, value_states: torch.Tensor, layer_idx: int, cache_kwargs: Optional[Dict[str, Any]] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: """ Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`. Parameters: key_states (`torch.Tensor`): The new key states to cache. value_states (`torch.Tensor`): The new value states to cache. layer_idx (`int`): The index of the layer to cache the states for. cache_kwargs (`Dict[str, Any]`, `optional`): Additional arguments for the cache subclass. No additional arguments are used in `DynamicCache`. Return: A tuple containing the updated key and value states. """ # Update the number of seen tokens if layer_idx == 0: self._seen_tokens += key_states.shape[-2] # Update the cache if key_states is not None: if len(self.key_cache) <= layer_idx: # There may be skipped layers, fill them with empty lists for _ in range(len(self.key_cache), layer_idx): self.key_cache.append([]) self.value_cache.append([]) self.key_cache.append(key_states) self.value_cache.append(value_states) elif ( len(self.key_cache[layer_idx]) == 0 ): # fills previously skipped layers; checking for tensor causes errors self.key_cache[layer_idx] = key_states self.value_cache[layer_idx] = value_states else: self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=-2) self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=-2) return self.key_cache[layer_idx], self.value_cache[layer_idx] def get_seq_length(self, layer_idx: Optional[int] = 0) -> int: """Returns the sequence length of the cached states. A layer index can be optionally passed.""" # TODO: deprecate this function in favor of `cache_position` is_empty_layer = ( len(self.key_cache) == 0 # no cache in any layer or len(self.key_cache) <= layer_idx # skipped `layer_idx` and hasn't run a layer with cache after it or len(self.key_cache[layer_idx]) == 0 # the layer has no cache ) layer_seq_length = self.key_cache[layer_idx].shape[-2] if not is_empty_layer else 0 return layer_seq_length def get_max_cache_shape(self) -> Optional[int]: """Returns the maximum sequence length of the cache object. DynamicCache does not have a maximum length.""" return None def to_legacy_cache(self) -> Tuple[Tuple[torch.Tensor], Tuple[torch.Tensor]]: """Converts the `DynamicCache` instance into the its equivalent in the legacy cache format. Used for backward compatibility.""" legacy_cache = () for layer_idx in range(len(self)): legacy_cache += ((self.key_cache[layer_idx], self.value_cache[layer_idx]),) return legacy_cache @classmethod @deprecate_kwarg("num_hidden_layers", version="4.47.0") def from_legacy_cache( cls, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, num_hidden_layers: int = None ) -> "DynamicCache": """Converts a cache in the legacy cache format into an equivalent `DynamicCache`. Used for backward compatibility.""" cache = cls() if past_key_values is not None: for layer_idx in range(len(past_key_values)): key_states, value_states = past_key_values[layer_idx] cache.update(key_states, value_states, layer_idx) return cache def crop(self, max_length: int): """Crop the past key values up to a new `max_length` in terms of tokens. `max_length` can also be negative to remove `max_length` tokens. This is used in assisted decoding and contrastive search.""" # In case it is negative if max_length < 0: max_length = self.get_seq_length() - abs(max_length) if self.get_seq_length() <= max_length: return self._seen_tokens = max_length for idx in range(len(self.key_cache)): if self.key_cache[idx] != []: self.key_cache[idx] = self.key_cache[idx][..., :max_length, :] self.value_cache[idx] = self.value_cache[idx][..., :max_length, :] @deprecate_kwarg("num_hidden_layers", version="4.47.0") def batch_split( self, full_batch_size: int, split_size: int, num_hidden_layers: int = None ) -> List["DynamicCache"]: """Split the current instance into a list of `DynamicCache` by the batch size. This will be used by `_split_model_inputs()` in `generation.utils`""" out = [] for i in range(0, full_batch_size, split_size): current_split = DynamicCache() current_split._seen_tokens = self._seen_tokens current_split.key_cache = [tensor[i : i + split_size] for tensor in self.key_cache] current_split.value_cache = [tensor[i : i + split_size] for tensor in self.value_cache] out.append(current_split) return out @classmethod @deprecate_kwarg("num_hidden_layers", version="4.47.0") def from_batch_splits(cls, splits: List["DynamicCache"], num_hidden_layers: int = None) -> "DynamicCache": """This is the opposite of the above `batch_split()` method. This will be used by `stack_model_outputs` in `generation.utils`""" cache = cls() for idx in range(len(splits[0])): key_cache = [current.key_cache[idx] for current in splits if current.key_cache[idx] != []] value_cache = [current.value_cache[idx] for current in splits if current.value_cache[idx] != []] if key_cache != []: layer_keys = torch.cat(key_cache, dim=0) layer_values = torch.cat(value_cache, dim=0) cache.update(layer_keys, layer_values, idx) return cache def batch_repeat_interleave(self, repeats: int): """Repeat the cache `repeats` times in the batch dimension. Used in contrastive search.""" for layer_idx in range(len(self)): self.key_cache[layer_idx] = self.key_cache[layer_idx].repeat_interleave(repeats, dim=0) self.value_cache[layer_idx] = self.value_cache[layer_idx].repeat_interleave(repeats, dim=0) def batch_select_indices(self, indices: torch.Tensor): """Only keep the `indices` in the batch dimension of the cache. Used in contrastive search.""" for layer_idx in range(len(self)): self.key_cache[layer_idx] = self.key_cache[layer_idx][indices, ...] self.value_cache[layer_idx] = self.value_cache[layer_idx][indices, ...]
class_definition
13,176
22,658
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
null
214
class OffloadedCache(DynamicCache): """ A drop-in replacement for DynamicCache that conserves GPU memory at the expense of more CPU memory. Useful for generating from models with very long context. In addition to the default CUDA stream, where all forward() computations happen, this class uses another stream, the prefetch stream, which it creates itself. Since scheduling of operations on separate streams happens independently, this class uses the prefetch stream to asynchronously prefetch the KV cache of layer k+1 when layer k is executing. The movement of the layer k-1 cache to the CPU is handled by the default stream as a simple way to ensure the eviction is scheduled after all computations on that cache are finished. """ def __init__(self) -> None: if not torch.cuda.is_available(): raise RuntimeError("OffloadedCache can only be used with a GPU") super().__init__() self.original_device = [] self.prefetch_stream = torch.cuda.Stream() self.beam_idx = None # used to delay beam search operations def prefetch_layer(self, layer_idx: int): "Starts prefetching the next layer cache" if layer_idx < len(self): with torch.cuda.stream(self.prefetch_stream): # Prefetch next layer tensors to GPU device = self.original_device[layer_idx] self.key_cache[layer_idx] = self.key_cache[layer_idx].to(device, non_blocking=True) self.value_cache[layer_idx] = self.value_cache[layer_idx].to(device, non_blocking=True) def evict_previous_layer(self, layer_idx: int): "Moves the previous layer cache to the CPU" if len(self) > 2: # We do it on the default stream so it occurs after all earlier computations on these tensors are done prev_layer_idx = (layer_idx - 1) % len(self) self.key_cache[prev_layer_idx] = self.key_cache[prev_layer_idx].to("cpu", non_blocking=True) self.value_cache[prev_layer_idx] = self.value_cache[prev_layer_idx].to("cpu", non_blocking=True) def __getitem__(self, layer_idx: int) -> List[Tuple[torch.Tensor]]: "Gets the cache for this layer to the device. Prefetches the next and evicts the previous layer." if layer_idx < len(self): # Evict the previous layer if necessary torch.cuda.current_stream().synchronize() self.evict_previous_layer(layer_idx) # Load current layer cache to its original device if not already there original_device = self.original_device[layer_idx] self.prefetch_stream.synchronize() key_tensor = self.key_cache[layer_idx] value_tensor = self.value_cache[layer_idx] # Now deal with beam search ops which were delayed if self.beam_idx is not None: self.beam_idx = self.beam_idx.to(original_device) key_tensor = key_tensor.index_select(0, self.beam_idx) value_tensor = value_tensor.index_select(0, self.beam_idx) # Prefetch the next layer self.prefetch_layer((layer_idx + 1) % len(self)) return (key_tensor, value_tensor) else: raise KeyError(f"Cache only has {len(self)} layers, attempted to access layer with index {layer_idx}") def reorder_cache(self, beam_idx: torch.LongTensor): """Saves the beam indices and reorders the cache when the tensor is back to its device.""" # We delay this operation until the tensors are back to their original # device because performing torch.index_select on the CPU is very slow del self.beam_idx self.beam_idx = beam_idx.clone() def update( self, key_states: torch.Tensor, value_states: torch.Tensor, layer_idx: int, cache_kwargs: Optional[Dict[str, Any]] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: """ Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`. Parameters: key_states (`torch.Tensor`): The new key states to cache. value_states (`torch.Tensor`): The new value states to cache. layer_idx (`int`): The index of the layer to cache the states for. cache_kwargs (`Dict[str, Any]`, `optional`): Additional arguments for the cache subclass. No additional arguments are used in `OffloadedCache`. Return: A tuple containing the updated key and value states. """ # Update the number of seen tokens if layer_idx == 0: self._seen_tokens += key_states.shape[-2] # Update the cache if len(self.key_cache) < layer_idx: raise ValueError("OffloadedCache does not support model usage where layers are skipped. Use DynamicCache.") elif len(self.key_cache) == layer_idx: self.key_cache.append(key_states) self.value_cache.append(value_states) self.original_device.append(key_states.device) self.evict_previous_layer(layer_idx) else: key_tensor, value_tensor = self[layer_idx] self.key_cache[layer_idx] = torch.cat([key_tensor, key_states], dim=-2) self.value_cache[layer_idx] = torch.cat([value_tensor, value_states], dim=-2) return self.key_cache[layer_idx], self.value_cache[layer_idx] # According to https://docs.python.org/3/library/exceptions.html#NotImplementedError # if a method is not supposed to be supported in a subclass we should set it to None from_legacy_cache = None to_legacy_cache = None
class_definition
22,661
28,434
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
null
215
class QuantizedCache(DynamicCache): """ A quantizer cache similar to what is described in the [KIVI: A Tuning-Free Asymmetric 2bit Quantization for KV Cache paper](https://arxiv.org/abs/2402.02750). It allows the model to generate longer sequence length without allocating too much memory for Key and Value cache by applying quantization. The cache has two types of storage, one for original precision and one for the quantized cache. A `residual length` is set as a maximum capacity for the original precision cache. When the length goes beyond maximum capacity, the original precision cache is discarded and moved into the quantized cache. The quantization is done per-channel with a set `q_group_size` for both Keys and Values, in contrast to what was described in the paper. It stores Keys and Values a list of quantized tensors (tuples in case we need to store metadata), one for each layer. Additionally, it stores the Key and Value in original precision states as a list of tensors, one for each layer. The size of each tensor is `[batch_size, num_heads, seq_len - residual_length, head_dim]` """ def __init__(self, cache_config: QuantizedCacheConfig) -> None: super().__init__() self._quantized_key_cache: List[torch.Tensor] = [] self._quantized_value_cache: List[torch.Tensor] = [] self.nbits = cache_config.nbits self.residual_length = cache_config.residual_length self.q_group_size = cache_config.q_group_size self.axis_key = cache_config.axis_key self.axis_value = cache_config.axis_value self.compute_dtype = cache_config.compute_dtype self.device = cache_config.device super().__init__() def update( self, key_states: torch.Tensor, value_states: torch.Tensor, layer_idx: int, cache_kwargs: Optional[Dict[str, Any]] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: # Update the number of seen tokens if layer_idx == 0: self._seen_tokens += key_states.shape[-2] if len(self.key_cache) < layer_idx: raise ValueError("QuantizedCache does not support model usage where layers are skipped. Use DynamicCache.") elif len(self.key_cache) == layer_idx: self._quantized_key_cache.append(self._quantize(key_states.contiguous(), axis=self.axis_key)) self._quantized_value_cache.append(self._quantize(value_states.contiguous(), axis=self.axis_value)) self.key_cache.append(torch.zeros(0, dtype=key_states.dtype, device=key_states.device)) self.value_cache.append(torch.zeros(0, dtype=key_states.dtype, device=key_states.device)) keys_to_return, values_to_return = key_states, value_states else: dequant_key = self._dequantize(self._quantized_key_cache[layer_idx]) dequant_value = self._dequantize(self._quantized_value_cache[layer_idx]) keys_to_return = [dequant_key, self.key_cache[layer_idx], key_states] values_to_return = [dequant_value, self.value_cache[layer_idx], value_states] keys_to_return = torch.cat(keys_to_return, dim=-2) values_to_return = torch.cat(values_to_return, dim=-2) if ( self.key_cache[layer_idx].dim() == 4 and self.key_cache[layer_idx].shape[-2] + 1 >= self.residual_length ): self._quantized_key_cache[layer_idx] = self._quantize(keys_to_return.contiguous(), axis=self.axis_key) self._quantized_value_cache[layer_idx] = self._quantize( values_to_return.contiguous(), axis=self.axis_value ) self.key_cache[layer_idx] = torch.zeros(0, dtype=key_states.dtype, device=key_states.device) self.value_cache[layer_idx] = torch.zeros(0, dtype=key_states.dtype, device=key_states.device) else: self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=-2) self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=-2) return keys_to_return, values_to_return def get_seq_length(self, layer_idx: Optional[int] = 0) -> int: """Returns the sequence length of the cached states. A layer index can be optionally passed.""" if len(self.key_cache) <= layer_idx: return 0 # since we cannot get the seq_length of each layer directly and rely on `_seen_tokens` which is # updated every "layer_idx" == 0, this is a hack to get the actual seq_length for the given layer_idx # this part of code otherwise fails when used to verify attn_weight shape in some models return self._seen_tokens if layer_idx == 0 else self._seen_tokens - 1 def _quantize(self, tensor, axis): """Quantizes a key/value using a defined quantization method.""" raise NotImplementedError("Make sure to implement `_quantize` in a subclass.") def _dequantize(self, q_tensor): """Dequantizes back the tensor that was quantized by `self._quantize()`""" raise NotImplementedError("Make sure to implement `_dequantize` in a subclass.")
class_definition
28,437
33,697
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
null
216
class QuantoQuantizedCache(QuantizedCache): """ Quantized Cache class that uses `quanto` as a backend to perform quantization. Current implementation supports `int2` and `int4` dtypes only. Parameters: cache_config (`QuantizedCacheConfig`): A configuration containing all the arguments to be used by the quantizer, including axis, qtype and group size. Example: ```python >>> # Run pip install quanto first if you don't have it yet >>> from transformers import AutoTokenizer, AutoModelForCausalLM, QuantoQuantizedCache, QuantizedCacheConfig >>> model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2-0.5B-Instruct") >>> tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2-0.5B-Instruct") >>> inputs = tokenizer(text="My name is Qwen2", return_tensors="pt") >>> # Prepare a cache class and pass it to model's forward >>> cache_config = QuantizedCacheConfig(nbits=4) >>> past_key_values = QuantoQuantizedCache(cache_config=cache_config) >>> outputs = model(**inputs, past_key_values=past_key_values, use_cache=True) >>> outputs.past_key_values # access cache filled with key/values from generation QuantoQuantizedCache() ``` """ def __init__(self, cache_config: CacheConfig) -> None: super().__init__(cache_config) if is_optimum_quanto_available(): optimum_quanto_version = version.parse(importlib.metadata.version("optimum-quanto")) if optimum_quanto_version <= version.parse("0.2.5"): raise ImportError( f"You need optimum-quanto package version to be greater or equal than 0.2.5 to use `QuantoQuantizedCache`. Detected version {optimum_quanto_version}." ) from optimum.quanto import MaxOptimizer, qint2, qint4 if self.nbits not in [2, 4]: raise ValueError(f"`nbits` for `quanto` backend has to be one of [`2`, `4`] but got {self.nbits}") if self.axis_key not in [0, -1]: raise ValueError(f"`axis_key` for `quanto` backend has to be one of [`0`, `-1`] but got {self.axis_key}") if self.axis_value not in [0, -1]: raise ValueError( f"`axis_value` for `quanto` backend has to be one of [`0`, `-1`] but got {self.axis_value}" ) self.qtype = qint4 if self.nbits == 4 else qint2 self.optimizer = MaxOptimizer() # hardcode as it's the only one for per-channel quantization def _quantize(self, tensor, axis): # We have two different API since in optimum-quanto, we don't use AffineQuantizer anymore if is_optimum_quanto_available(): from optimum.quanto import quantize_weight scale, zeropoint = self.optimizer(tensor, self.qtype, axis, self.q_group_size) qtensor = quantize_weight(tensor, self.qtype, axis, scale, zeropoint, self.q_group_size) return qtensor def _dequantize(self, qtensor): return qtensor.dequantize()
class_definition
33,700
36,767
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
null
217
class HQQQuantizedCache(QuantizedCache): """ Quantized Cache class that uses `HQQ` as a backend to perform quantization. Current implementation supports `int2`, `int4`, `int8` dtypes. Parameters: cache_config (`QuantizedCacheConfig`): A configuration containing all the arguments to be used by the quantizer, including axis, qtype and group size. Example: ```python >>> # Run pip install hqq first if you don't have it yet >>> from transformers import AutoTokenizer, AutoModelForCausalLM, HQQQuantizedCache, QuantizedCacheConfig >>> model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2-0.5B-Instruct") >>> tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2-0.5B-Instruct") >>> inputs = tokenizer(text="My name is Qwen2", return_tensors="pt") >>> # Prepare a cache class and pass it to model's forward >>> cache_config = QuantizedCacheConfig(nbits=4, axis_key=1, axis_value=1) >>> past_key_values = HQQQuantizedCache(cache_config=cache_config) >>> outputs = model(**inputs, past_key_values=past_key_values, use_cache=True) >>> outputs.past_key_values # access cache filled with key/values from generation HQQQuantizedCache() ``` """ def __init__(self, cache_config: CacheConfig) -> None: super().__init__(cache_config) if self.nbits not in [1, 2, 3, 4, 8]: raise ValueError( f"`nbits` for `HQQ` backend has to be one of [`1`, `2`, `3`, `4`, `8`] but got {self.nbits}" ) if self.axis_key not in [0, 1]: raise ValueError(f"`axis_key` for `HQQ` backend has to be one of [`0`, `1`] but got {self.axis_key}") if self.axis_value not in [0, 1]: raise ValueError(f"`axis_value` for `HQQ` backend has to be one of [`0`, `1`] but got {self.axis_value}") self.quantizer = HQQQuantizer def _quantize(self, tensor, axis): qtensor, meta = self.quantizer.quantize( tensor, axis=axis, device=self.device, compute_dtype=self.compute_dtype, nbits=self.nbits, group_size=self.q_group_size, ) meta["compute_dtype"] = self.compute_dtype self.quantizer.cuda(qtensor, meta=meta, device=self.device) # Move to device and cast to dtype return qtensor, meta def _dequantize(self, qtensor): quant_tensor, meta = qtensor tensor = self.quantizer.dequantize(quant_tensor, meta) return tensor
class_definition
36,770
39,346
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
null
218
class SinkCache(Cache): """ A cache that as described in the [Attention Sinks paper](https://arxiv.org/abs/2309.17453). It allows the model to generate beyond the length of its context window, without losing fluency in the conversation. As it discards past tokens, the model will lose the ability to generate tokens that depend on the context that was discarded. It stores the Key and Value states as a list of tensors, one for each layer. The expected shape for each tensor is `[batch_size, num_heads, seq_len, head_dim]`. Parameters: window_length (`int`): The length of the context window. num_sink_tokens (`int`): The number of sink tokens. See the original paper for more information. Example: ```python >>> from transformers import AutoTokenizer, AutoModelForCausalLM, SinkCache >>> model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2-0.5B-Instruct") >>> tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2-0.5B-Instruct") >>> inputs = tokenizer(text="My name is Qwen2", return_tensors="pt") >>> # Prepare a cache class and pass it to model's forward >>> past_key_values = SinkCache(window_length=256, num_sink_tokens=4) >>> outputs = model(**inputs, past_key_values=past_key_values, use_cache=True) >>> outputs.past_key_values # access cache filled with key/values from generation SinkCache() ``` """ is_sliding = True def __init__(self, window_length: int, num_sink_tokens: int) -> None: super().__init__() self.key_cache: List[torch.Tensor] = [] self.value_cache: List[torch.Tensor] = [] self.window_length = window_length self.num_sink_tokens = num_sink_tokens self.cos_sin_rerotation_cache = {} self._cos_cache = None self._sin_cache = None self._seen_tokens = 0 # Used in `generate` to keep tally of how many tokens the cache has seen @staticmethod def _rotate_half(x): x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) def _apply_key_rotary_pos_emb( self, key_states: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor ) -> torch.Tensor: rotated_key_states = (key_states * cos) + (self._rotate_half(key_states) * sin) return rotated_key_states def _get_rerotation_cos_sin( self, key_states: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor]: if key_states.shape[-2] not in self.cos_sin_rerotation_cache: # Upcast to float32 temporarily for better accuracy cos = cos.to(torch.float32) sin = sin.to(torch.float32) # Compute the cos and sin required for back- and forward-rotating to one position earlier in the sequence original_cos = cos[self.num_sink_tokens + key_states.shape[-2] :] shifted_cos = cos[self.num_sink_tokens : -key_states.shape[-2]] original_sin = sin[self.num_sink_tokens + key_states.shape[-2] :] shifted_sin = sin[self.num_sink_tokens : -key_states.shape[-2]] rerotation_cos = original_cos * shifted_cos + original_sin * shifted_sin rerotation_sin = -original_sin * shifted_cos + original_cos * shifted_sin self.cos_sin_rerotation_cache[key_states.shape[-2]] = ( rerotation_cos.to(key_states.dtype).unsqueeze(0), rerotation_sin.to(key_states.dtype).unsqueeze(0), ) return self.cos_sin_rerotation_cache[key_states.shape[-2]] def get_seq_length(self, layer_idx: Optional[int] = 0) -> int: """Returns the sequence length of the cached states. A layer index can be optionally passed.""" # TODO: deprecate this function in favor of `cache_position` # Workaround to make 'key_states.shape[-2] + past_key_value.get_seq_length(self.layer_idx)' <= window_length if len(self.key_cache) <= layer_idx: return 0 return self.key_cache[layer_idx].shape[-2] def get_max_cache_shape(self) -> Optional[int]: """Returns the maximum sequence length of the cache object, in case of SinkCache it is the window length.""" return self.window_length def update( self, key_states: torch.Tensor, value_states: torch.Tensor, layer_idx: int, cache_kwargs: Optional[Dict[str, Any]] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: """ Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`. Parameters: key_states (`torch.Tensor`): The new key states to cache. value_states (`torch.Tensor`): The new value states to cache. layer_idx (`int`): The index of the layer to cache the states for. cache_kwargs (`Dict[str, Any]`, `optional`): Additional arguments for the cache subclass. The following arguments can be used in `SinkCache`: `sin`, `cos` and `partial_rotation_size`. These arguments are used with models using RoPE, to recompute the rotation as the tokens are shifted. Return: A tuple containing the updated key and value states. """ # Optional kwargs for `SinkCache` -- needed on models using RoPE. `partial_rotation_size` is used on models # with partially rotated position embeddings, like Phi or Persimmon. sin = cache_kwargs.get("sin") cos = cache_kwargs.get("cos") partial_rotation_size = cache_kwargs.get("partial_rotation_size") using_rope = cos is not None and sin is not None # Update the number of seen tokens if layer_idx == 0: self._seen_tokens += key_states.shape[-2] # Update the sin/cos cache, which holds sin/cos values for all possible positions if using_rope and layer_idx == 0: # BC: some models still pass `sin`/`cos` with 2 dims. In those models, they are the full sin/cos. Remove # after all RoPE models have a llama-like cache utilization. if cos.dim() == 2: self._cos_cache = cos self._sin_cache = sin else: if self._cos_cache is None: self._cos_cache = cos[0, ...] self._sin_cache = sin[0, ...] elif self._cos_cache.shape[0] < self.window_length: self._cos_cache = torch.cat([self._cos_cache, cos[0, ...]], dim=0) self._sin_cache = torch.cat([self._sin_cache, sin[0, ...]], dim=0) # [bsz, num_heads, seq_len, head_dim] if len(self.key_cache) <= layer_idx: # Empty cache self.key_cache.append(key_states) self.value_cache.append(value_states) elif key_states.shape[-2] + self.get_seq_length(layer_idx) < self.window_length: # Growing cache self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=-2) self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=-2) else: # Shifting cache keys_to_keep = self.key_cache[layer_idx][ :, :, -self.window_length + self.num_sink_tokens + key_states.shape[-2] : ] # On RoPE models, we need to recompute the Key rotation as the tokens are shifted if using_rope: rerotation_cos, rerotation_sin = self._get_rerotation_cos_sin( key_states, self._cos_cache[: self.window_length], self._sin_cache[: self.window_length] ) if partial_rotation_size is not None: keys_to_keep, keys_pass = ( keys_to_keep[..., :partial_rotation_size], keys_to_keep[..., partial_rotation_size:], ) keys_to_keep = self._apply_key_rotary_pos_emb(keys_to_keep, rerotation_cos, rerotation_sin) if partial_rotation_size is not None: keys_to_keep = torch.cat((keys_to_keep, keys_pass), dim=-1) # Concatenate sink tokens, shifted & rotated tokens (if needed), and new tokens sink_keys = self.key_cache[layer_idx][:, :, : self.num_sink_tokens] self.key_cache[layer_idx] = torch.cat([sink_keys, keys_to_keep, key_states], dim=-2) sink_values = self.value_cache[layer_idx][:, :, : self.num_sink_tokens] values_to_keep = self.value_cache[layer_idx][ :, :, -self.window_length + self.num_sink_tokens + value_states.shape[-2] : ] self.value_cache[layer_idx] = torch.cat([sink_values, values_to_keep, value_states], dim=-2) return self.key_cache[layer_idx], self.value_cache[layer_idx]
class_definition
39,349
48,396
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
null
219
class StaticCache(Cache): """ Static Cache class to be used with `torch.compile(model)` and `torch.export()`. Parameters: config (`PretrainedConfig`): The configuration file defining the shape-related attributes required to initialize the static cache. batch_size (`int`): The batch size with which the model will be used. Note that a new instance must be instantiated if a smaller batch size is used. If you are manually setting the batch size, make sure to take into account the number of beams if you are running beam search max_cache_len (`int`): The maximum sequence length with which the model will be used. device (`torch.device` or `str`): The device on which the cache should be initialized. Should be the same as the layer. dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): The default `dtype` to use when initializing the layer. layer_device_map(`Dict[int, Union[str, torch.device, int]]]`, `optional`): Mapping between the layers and its device. This is required when you are manually initializing the cache and the model is splitted between differents gpus. You can know which layers mapped to which device by checking the associated device_map: `model.hf_device_map`. Example: ```python >>> from transformers import AutoTokenizer, AutoModelForCausalLM, StaticCache >>> model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf") >>> tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf") >>> inputs = tokenizer(text="My name is Llama", return_tensors="pt") >>> # Prepare a cache class and pass it to model's forward >>> # Leave empty space for 10 new tokens, which can be used when calling forward iteratively 10 times to generate >>> max_generated_length = inputs.input_ids.shape[1] + 10 >>> past_key_values = StaticCache(config=model.config, batch_size=1, max_cache_len=max_generated_length, device=model.device, dtype=model.dtype) >>> outputs = model(**inputs, past_key_values=past_key_values, use_cache=True) >>> outputs.past_key_values # access cache filled with key/values from generation StaticCache() ``` """ # TODO (joao): remove `=None` in non-optional arguments in v4.46. Remove from `OBJECTS_TO_IGNORE` as well. def __init__( self, config: PretrainedConfig, batch_size: int = None, max_cache_len: int = None, device: torch.device = None, dtype: torch.dtype = torch.float32, max_batch_size: Optional[int] = None, layer_device_map: Optional[Dict[int, Union[str, torch.device, int]]] = None, ) -> None: super().__init__() if batch_size is not None: logger.warning_once( f"The 'batch_size' argument of {self.__class__.__name__} is deprecated and will be removed in " "v4.49. Use the more precisely named 'max_batch_size' argument instead." ) self.max_batch_size = batch_size or max_batch_size self.max_cache_len = config.max_position_embeddings if max_cache_len is None else max_cache_len # Some model define a custom `head_dim` != config.hidden_size // config.num_attention_heads self.head_dim = ( config.head_dim if hasattr(config, "head_dim") else config.hidden_size // config.num_attention_heads ) self.dtype = dtype self.num_key_value_heads = ( config.num_attention_heads if getattr(config, "num_key_value_heads", None) is None else config.num_key_value_heads ) self.key_cache: List[torch.Tensor] = [] self.value_cache: List[torch.Tensor] = [] # Note: There will be significant perf decrease if switching to use 5D tensors instead. cache_shape = (self.max_batch_size, self.num_key_value_heads, self.max_cache_len, self.head_dim) for idx in range(config.num_hidden_layers): if layer_device_map is not None: layer_device = layer_device_map[idx] else: layer_device = device new_layer_key_cache = torch.zeros(cache_shape, dtype=self.dtype, device=layer_device) new_layer_value_cache = torch.zeros(cache_shape, dtype=self.dtype, device=layer_device) # Notes: # 1. `mark_static_address` is used to tag the cache as an fixed data pointer, preventing cuda graph # breaks when updating the cache. It can't be used if the cache code is being compiled (but in that case # it is not needed anyway) # 2. `torch.export()` requires mutations to be registered as buffers. if not is_torchdynamo_compiling(): self.register_buffer(f"key_cache_{idx}", torch.zeros(cache_shape, dtype=dtype, device=layer_device)) self.register_buffer(f"value_cache_{idx}", torch.zeros(cache_shape, dtype=dtype, device=layer_device)) new_layer_key_cache = getattr(self, f"key_cache_{idx}") new_layer_value_cache = getattr(self, f"value_cache_{idx}") torch._dynamo.mark_static_address(new_layer_key_cache) torch._dynamo.mark_static_address(new_layer_value_cache) self.key_cache.append(new_layer_key_cache) self.value_cache.append(new_layer_value_cache) def update( self, key_states: torch.Tensor, value_states: torch.Tensor, layer_idx: int, cache_kwargs: Optional[Dict[str, Any]] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: """ Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`. It is VERY important to index using a tensor, otherwise you introduce a copy to the device. Parameters: key_states (`torch.Tensor`): The new key states to cache. value_states (`torch.Tensor`): The new value states to cache. layer_idx (`int`): The index of the layer to cache the states for. cache_kwargs (`Dict[str, Any]`, `optional`): Additional arguments for the cache subclass. The `StaticCache` needs the `cache_position` input to know how where to write in the cache. Return: A tuple containing the updated key and value states. """ cache_position = cache_kwargs.get("cache_position") k_out = self.key_cache[layer_idx] v_out = self.value_cache[layer_idx] key_states = key_states.to(k_out.dtype) value_states = value_states.to(v_out.dtype) if cache_position is None: k_out.copy_(key_states) v_out.copy_(value_states) else: # Note: here we use `tensor.index_copy_(dim, index, tensor)` that is equivalent to # `tensor[:, :, index] = tensor`, but the first one is compile-friendly and it does explicitly an in-place # operation, that avoids copies and uses less memory. try: k_out.index_copy_(2, cache_position, key_states) v_out.index_copy_(2, cache_position, value_states) except NotImplementedError: # The operator 'aten::index_copy.out' is not currently implemented for the MPS device. k_out[:, :, cache_position] = key_states v_out[:, :, cache_position] = value_states return k_out, v_out def get_seq_length(self, layer_idx: Optional[int] = 0) -> int: """Returns the sequence length of the cached states that were seen by the model.""" # Occupied cache == any slot in the 3rd dim (sequence length) holds a non-zero value. To save on compute, let's # limit the check to the first batch member and head dimension. # TODO: deprecate this function in favor of `cache_position` return (self.key_cache[layer_idx][0, 0].any(dim=-1)).sum() def get_max_cache_shape(self) -> Optional[int]: return self.max_cache_len def reset(self): """Resets the cache values while preserving the objects""" for layer_idx in range(len(self.key_cache)): # In-place ops prevent breaking the static address self.key_cache[layer_idx].zero_() self.value_cache[layer_idx].zero_() @property def batch_size(self): logger.warning_once( f"The 'batch_size' attribute of {self.__class__.__name__} is deprecated and will be removed in " "v4.49. Use the more precisely named 'self.max_batch_size' attribute instead." ) return self.max_batch_size
class_definition
48,399
57,273
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
null
220
class SlidingWindowCache(StaticCache): """ Sliding Window Cache class to be used with `torch.compile` for models like Mistral that support sliding window attention. Every time when we try to update the cache, we compute the `indices` based on `cache_position >= self.config.sliding_window - 1`, if true(which means the cache can not hold all the old key value states and new states together because of the sliding window constraint), we need to do a cycle shift based on `indices` to replace the oldest states by the new key value states passed in. The `to_shift` is only true once we are above sliding_window. Thus with `sliding_window==64`: indices = (slicing + to_shift[-1].int()-1) % self.config.sliding_window tensor([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 0]) We overwrite the cache using these, then we always write at cache_position (clamped to `sliding_window`) Parameters: config (`PretrainedConfig`): The configuration file defining the shape-related attributes required to initialize the static cache. batch_size (`int`): The batch size with which the model will be used. Note that a new instance must be instantiated if a smaller batch size is used. max_cache_len (`int`): The maximum sequence length with which the model will be used. device (`torch.device` or `str`): The device on which the cache should be initialized. Should be the same as the layer. dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): The default `dtype` to use when initializing the layer. layer_device_map(`Dict[int, Union[str, torch.device, int]]]`, `optional`): Mapping between the layers and its device. This is required when you are manually initializing the cache and the model is splitted between differents gpus. You can know which layers mapped to which device by checking the associated device_map: `model.hf_device_map`. Example: ```python >>> from transformers import AutoTokenizer, AutoModelForCausalLM, SlidingWindowCache >>> model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-Instruct-v0.3") >>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.3") >>> inputs = tokenizer(text="My name is Mistral", return_tensors="pt") >>> # Prepare a cache class and pass it to model's forward >>> # Leave empty space for 10 new tokens, which can be used when calling forward iteratively 10 times to generate >>> max_generated_length = inputs.input_ids.shape[1] + 10 >>> past_key_values = SlidingWindowCache(config=model.config, batch_size=1, max_cache_len=max_generated_length, device=model.device, dtype=model.dtype) >>> outputs = model(**inputs, past_key_values=past_key_values, use_cache=True) >>> outputs.past_key_values # access cache filled with key/values from generation SlidingWindowCache() ``` """ is_sliding = True # TODO (joao): remove `=None` in non-optional arguments in v4.46. Remove from `OBJECTS_TO_IGNORE` as well. def __init__( self, config: PretrainedConfig, batch_size: int = None, max_cache_len: int = None, device: torch.device = None, dtype: torch.dtype = torch.float32, max_batch_size: Optional[int] = None, layer_device_map: Optional[Dict[int, Union[str, torch.device, int]]] = None, ) -> None: if not hasattr(config, "sliding_window") or config.sliding_window is None: raise ValueError( "Setting `cache_implementation` to 'sliding_window' requires the model config supporting " "sliding window attention, please check if there is a `sliding_window` field in the model " "config and it's not set to None." ) max_cache_len = min(config.sliding_window, max_cache_len) super().__init__( config=config, batch_size=batch_size, max_cache_len=max_cache_len, device=device, dtype=dtype, max_batch_size=max_batch_size, layer_device_map=layer_device_map, ) def update( self, key_states: torch.Tensor, value_states: torch.Tensor, layer_idx: int, cache_kwargs: Optional[Dict[str, Any]] = None, ) -> Tuple[torch.Tensor]: cache_position = cache_kwargs.get("cache_position") k_out = self.key_cache[layer_idx] v_out = self.value_cache[layer_idx] # assume this only happens in prefill phase when prompt length > sliding_window_size (= max_cache_len) if cache_position.shape[0] > self.max_cache_len: k_out = key_states[:, :, -self.max_cache_len :, :] v_out = value_states[:, :, -self.max_cache_len :, :] # Assumption: caches are all zeros at this point, `+=` is equivalent to `=` but compile-friendly self.key_cache[layer_idx] += k_out self.value_cache[layer_idx] += v_out # we should return the whole states instead of k_out, v_out to take the whole prompt # into consideration when building kv cache instead of just throwing away tokens outside of the window return key_states, value_states slicing = torch.ones(self.max_cache_len, dtype=torch.long, device=value_states.device).cumsum(0) cache_position = cache_position.clamp(0, self.max_cache_len - 1) to_shift = cache_position >= self.max_cache_len - 1 indices = (slicing + to_shift[-1].int() - 1) % self.max_cache_len k_out = k_out[:, :, indices] v_out = v_out[:, :, indices] try: k_out.index_copy_(2, cache_position, key_states) v_out.index_copy_(2, cache_position, value_states) except NotImplementedError: # The operator 'aten::index_copy.out' is not currently implemented for the MPS device. k_out[:, :, cache_position] = key_states v_out[:, :, cache_position] = value_states # `_.zero()` followed by `+=` is equivalent `=`, but compile-friendly (without graph breaks due to assignment) self.key_cache[layer_idx].zero_() self.value_cache[layer_idx].zero_() self.key_cache[layer_idx] += k_out self.value_cache[layer_idx] += v_out return k_out, v_out def get_max_cache_shape(self) -> Optional[int]: return self.max_cache_len def reset(self): for layer_idx in range(len(self.key_cache)): # In-place ops prevent breaking the static address self.key_cache[layer_idx].zero_() self.value_cache[layer_idx].zero_()
class_definition
57,276
64,330
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
null
221
class EncoderDecoderCache(Cache): """ Base, abstract class for all encoder-decoder caches. Can be used to hold combinations of self-attention and cross-attention caches. Example: ```python >>> from transformers import AutoProcessor, AutoModelForCausalLM, DynamicCache, EncoderDecoderCache >>> model = AutoModelForCausalLM.from_pretrained("openai/whisper-small") >>> processor = AutoProcessor.from_pretrained("openai/whisper-small") >>> inputs = processor(audio=YOUR-AUDIO, return_tensors="pt") >>> # Prepare cache classes for encoder and decoder and pass it to model's forward >>> self_attention_cache = DynamicCache() >>> cross_attention_cache = DynamicCache() >>> past_key_values = EncoderDecoderCache(self_attention_cache, cross_attention_cache) >>> outputs = model(**inputs, past_key_values=past_key_values, use_cache=True) >>> outputs.past_key_values # access cache filled with key/values from generation EncoderDecoderCache() ``` """ def __init__(self, self_attention_cache: Cache, cross_attention_cache: Cache): super().__init__() self.self_attention_cache = self_attention_cache self.cross_attention_cache = cross_attention_cache self.is_updated = {} for layer_idx in range(len(cross_attention_cache.key_cache)): self.is_updated[layer_idx] = bool(cross_attention_cache.get_seq_length(layer_idx) > 0) def __getitem__(self, layer_idx: int) -> List[Tuple[torch.Tensor]]: """ Support for backwards-compatible `past_key_value` indexing, e.g. `past_key_value[0][0].shape[2]` to get the sequence length. """ if layer_idx < len(self): return ( self.self_attention_cache.key_cache[layer_idx], self.self_attention_cache.value_cache[layer_idx], self.cross_attention_cache.key_cache[layer_idx], self.cross_attention_cache.value_cache[layer_idx], ) else: raise KeyError(f"Cache only has {len(self)} layers, attempted to access layer with index {layer_idx}") def __len__(self): """ Support for backwards-compatible `past_key_value` length, e.g. `len(past_key_value)`. This value corresponds to the number of layers in the model. """ return len(self.self_attention_cache) def to_legacy_cache(self) -> Tuple[Tuple[torch.Tensor], Tuple[torch.Tensor]]: """Converts the `EncoderDecoderCache` instance into its equivalent in the legacy cache format.""" legacy_cache = () if len(self.cross_attention_cache) > 0: for self_attn, cross_attn in zip( self.self_attention_cache.to_legacy_cache(), self.cross_attention_cache.to_legacy_cache() ): legacy_cache += (self_attn + cross_attn,) else: legacy_cache = self.self_attention_cache.to_legacy_cache() return legacy_cache @classmethod def from_legacy_cache( cls, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None ) -> "EncoderDecoderCache": """Converts a cache in the legacy cache format into an equivalent `EncoderDecoderCache`.""" cache = cls( self_attention_cache=DynamicCache(), cross_attention_cache=DynamicCache(), ) if past_key_values is not None: for layer_idx in range(len(past_key_values)): key_states, value_states = past_key_values[layer_idx][:2] cache.self_attention_cache.update(key_states, value_states, layer_idx) if len(past_key_values[layer_idx]) > 2: key_states, value_states = past_key_values[layer_idx][2:] cache.cross_attention_cache.update(key_states, value_states, layer_idx) cache.is_updated[layer_idx] = True return cache def get_seq_length(self, layer_idx: Optional[int] = 0) -> int: """Returns the sequence length of the cached states. A layer index can be optionally passed.""" # check if empty list because in case of static cache it will be a tensors and we can't check `if not torch.Tensor` return self.self_attention_cache.get_seq_length(layer_idx) def reset(self): if hasattr(self.self_attention_cache, "reset"): self.self_attention_cache.reset() if hasattr(self.cross_attention_cache, "reset"): self.cross_attention_cache.reset() elif not hasattr(self.self_attention_cache, "reset") and not hasattr(self.cross_attention_cache, "reset"): raise ValueError( "Neither self nor cross-attention cache have valid `.reset()` methods. `.reset()` should " "only be called on compatible cache classes, such as `StaticCache` or `SlidingWindowCache`. " f"Got {self.self_attention_cache.__str__()} for the self attention cache and " f"{self.cross_attention_cache.__str__()} for the cross attention cache." ) for layer_idx in self.is_updated: self.is_updated[layer_idx] = False def reorder_cache(self, beam_idx: torch.LongTensor): """Reorders the cache for beam search, given the selected beam indices.""" self.self_attention_cache.reorder_cache(beam_idx) self.cross_attention_cache.reorder_cache(beam_idx) def check_dynamic_cache(self, method: str): if not ( isinstance(self.self_attention_cache, DynamicCache) and isinstance(self.cross_attention_cache, DynamicCache) ): raise ValueError( f"`{method}` is only defined for dynamic cache, got {self.self_attention_cache.__str__()} for the self " f"attention cache and {self.cross_attention_cache.__str__()} for the cross attention cache." ) # TODO(gante, sanchit-gandhi): move following functionality into `.generate` def crop(self, maximum_length: int): """Crop the past key values up to a new `maximum_length` in terms of tokens. `maximum_length` can also be negative to remove `maximum_length` tokens. This is used in assisted decoding and contrastive search.""" self.check_dynamic_cache(self.crop.__name__) self.self_attention_cache.crop(maximum_length) @deprecate_kwarg("num_hidden_layers", version="4.47.0") def batch_split( self, full_batch_size: int, split_size: int, num_hidden_layers: int = None ) -> "List[EncoderDecoderCache]": """Split the current instance into a list of `DynamicCache` by the batch size. This will be used by `_split_model_inputs()` in `generation.utils`""" self.check_dynamic_cache(self.batch_split.__name__) self_attention_cache = self.self_attention_cache.batch_split(full_batch_size, split_size) cross_attention_cache = self.cross_attention_cache.batch_split(full_batch_size, split_size) out = [] for self_attn, cross_attn in zip(self_attention_cache, cross_attention_cache): out.append(EncoderDecoderCache(self_attn, cross_attn)) return out @classmethod @deprecate_kwarg("num_hidden_layers", version="4.47.0") def from_batch_splits( cls, splits: List["EncoderDecoderCache"], num_hidden_layers: int = None ) -> "EncoderDecoderCache": """This is the opposite of the above `batch_split()` method. This will be used by `stack_model_outputs` in `generation.utils`""" self_attention_cache = DynamicCache() cross_attention_cache = DynamicCache() for idx in range(len(splits[0])): layer_keys = torch.cat([current.self_attention_cache.key_cache[idx] for current in splits], dim=0) layer_values = torch.cat([current.self_attention_cache.value_cache[idx] for current in splits], dim=0) self_attention_cache.update(layer_keys, layer_values, idx) layer_keys = torch.cat([current.cross_attention_cache.key_cache[idx] for current in splits], dim=0) layer_values = torch.cat([current.cross_attention_cache.value_cache[idx] for current in splits], dim=0) cross_attention_cache.update(layer_keys, layer_values, idx) return cls(self_attention_cache, cross_attention_cache) def batch_repeat_interleave(self, repeats: int): """Repeat the cache `repeats` times in the batch dimension. Used in contrastive search.""" self.check_dynamic_cache(self.batch_repeat_interleave.__name__) self.self_attention_cache.batch_repeat_interleave(repeats) self.cross_attention_cache.batch_repeat_interleave(repeats) def batch_select_indices(self, indices: torch.Tensor): """Only keep the `indices` in the batch dimension of the cache. Used in contrastive search.""" self.check_dynamic_cache(self.batch_select_indices.__name__) self.self_attention_cache.batch_select_indices(indices) self.cross_attention_cache.batch_select_indices(indices)
class_definition
64,333
73,469
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
null
222
class HybridCache(Cache): """ Hybrid Cache class to be used with `torch.compile` for Gemma2 models that alternate between a local sliding window attention and global attention in every other layer. Under the hood, Hybrid Cache leverages ["SlidingWindowCache"] for sliding window attention and ["StaticCache"] for global attention. For more information, see the documentation of each subcomponeent cache class. Parameters: config (`PretrainedConfig): The configuration file defining the shape-related attributes required to initialize the static cache. batch_size (`int`): The batch size with which the model will be used. Note that a new instance must be instantiated if a smaller batch size is used. max_cache_len (`int`): The maximum sequence length with which the model will be used. device (`torch.device` or `str`, *optional*, defaults to `"cpu"`): The device on which the cache should be initialized. Should be the same as the layer. dtype (torch.dtype, *optional*, defaults to `torch.float32`): The default `dtype` to use when initializing the layer. layer_device_map(`Dict[int, Union[str, torch.device, int]]]`, `optional`): Mapping between the layers and its device. This is required when you are manually initializing the cache and the model is splitted between differents gpus. You can know which layers mapped to which device by checking the associated device_map: `model.hf_device_map`. Example: ```python >>> from transformers import AutoTokenizer, AutoModelForCausalLM, HybridCache >>> model = AutoModelForCausalLM.from_pretrained("google/gemma-2-2b") >>> tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-2b") >>> inputs = tokenizer(text="My name is Gemma", return_tensors="pt") >>> # Prepare a cache class and pass it to model's forward >>> # Leave empty space for 10 new tokens, which can be used when calling forward iteratively 10 times to generate >>> max_generated_length = inputs.input_ids.shape[1] + 10 >>> past_key_values = HybridCache(config=model.config, batch_size=1, max_cache_len=max_generated_length, device=model.device, dtype=model.dtype) >>> outputs = model(**inputs, past_key_values=past_key_values, use_cache=True) >>> outputs.past_key_values # access cache filled with key/values from generation HybridCache() ``` """ # TODO (joao): remove `=None` in non-optional arguments in v4.46. Remove from `OBJECTS_TO_IGNORE` as well. def __init__( self, config: PretrainedConfig, batch_size: int = None, max_cache_len: int = None, device: Union[torch.device, str] = "cpu", dtype: torch.dtype = torch.float32, max_batch_size: Optional[int] = None, layer_device_map: Optional[Dict[int, Union[str, torch.device, int]]] = None, ) -> None: super().__init__() if batch_size is not None: logger.warning_once( f"The 'batch_size' argument of {self.__class__.__name__} is deprecated and will be removed in " "v4.49. Use the more precisely named 'max_batch_size' argument instead." ) if not hasattr(config, "sliding_window") or config.sliding_window is None: raise ValueError( "Setting `cache_implementation` to 'sliding_window' requires the model config supporting " "sliding window attention, please check if there is a `sliding_window` field in the model " "config and it's not set to None." ) self.max_cache_len = max_cache_len self.max_batch_size = batch_size or max_batch_size # Some model define a custom `head_dim` != config.hidden_size // config.num_attention_heads self.head_dim = ( config.head_dim if hasattr(config, "head_dim") else config.hidden_size // config.num_attention_heads ) self.dtype = dtype self.num_key_value_heads = ( config.num_attention_heads if config.num_key_value_heads is None else config.num_key_value_heads ) layer_switch = config.sliding_window_pattern if hasattr(config, "sliding_window_pattern") else 2 # 2 is for BC self.is_sliding = torch.tensor( [bool((i + 1) % layer_switch) for i in range(config.num_hidden_layers)], dtype=torch.bool, device=device ) self.key_cache: List[torch.Tensor] = [] self.value_cache: List[torch.Tensor] = [] global_cache_shape = (self.max_batch_size, self.num_key_value_heads, max_cache_len, self.head_dim) sliding_cache_shape = ( self.max_batch_size, self.num_key_value_heads, min(config.sliding_window, max_cache_len), self.head_dim, ) for i in range(config.num_hidden_layers): if layer_device_map is not None: layer_device = layer_device_map[i] else: layer_device = device # Note: `mark_static_address` is used to tag the cache as an fixed data pointer, preventing cuda graph # breaks when updating the cache. cache_shape = global_cache_shape if not self.is_sliding[i] else sliding_cache_shape new_layer_key_cache = torch.zeros(cache_shape, dtype=self.dtype, device=layer_device) new_layer_value_cache = torch.zeros(cache_shape, dtype=self.dtype, device=layer_device) torch._dynamo.mark_static_address(new_layer_key_cache) torch._dynamo.mark_static_address(new_layer_value_cache) self.key_cache.append(new_layer_key_cache) self.value_cache.append(new_layer_value_cache) def _sliding_update(self, cache_position, layer_idx, key_states, value_states, k_out, v_out, max_cache_len): if cache_position.shape[0] > max_cache_len: k_out = key_states[:, :, -max_cache_len:, :] v_out = value_states[:, :, -max_cache_len:, :] # Assumption: caches are all zeros at this point, `+=` is equivalent to `=` but compile-friendly self.key_cache[layer_idx] += k_out self.value_cache[layer_idx] += v_out # we should return the whole states instead of k_out, v_out to take the whole prompt # into consideration when building kv cache instead of just throwing away tokens outside of the window return key_states, value_states slicing = torch.ones(max_cache_len, dtype=torch.long, device=value_states.device).cumsum(0) cache_position = cache_position.clamp(0, max_cache_len - 1) to_shift = cache_position >= max_cache_len - 1 indices = (slicing + to_shift[-1].int() - 1) % max_cache_len k_out = k_out[:, :, indices] v_out = v_out[:, :, indices] k_out[:, :, cache_position] = key_states v_out[:, :, cache_position] = value_states # `_.zero()` followed by `+=` is equivalent `=`, but compile-friendly (without graph breaks due to assignment) self.key_cache[layer_idx].zero_() self.value_cache[layer_idx].zero_() self.key_cache[layer_idx] += k_out self.value_cache[layer_idx] += v_out return k_out, v_out def _static_update(self, cache_position, layer_idx, key_states, value_states, k_out, v_out, max_cache_len): k_out[:, :, cache_position] = key_states v_out[:, :, cache_position] = value_states self.key_cache[layer_idx] = k_out self.value_cache[layer_idx] = v_out return k_out, v_out def update( self, key_states: torch.Tensor, value_states: torch.Tensor, layer_idx: int, cache_kwargs: Optional[Dict[str, Any]] = None, ) -> Tuple[torch.Tensor]: cache_position = cache_kwargs.get("cache_position") sliding_window = cache_kwargs.get("sliding_window") k_out = self.key_cache[layer_idx] v_out = self.value_cache[layer_idx] if sliding_window: update_fn = self._sliding_update else: update_fn = self._static_update return update_fn( cache_position, layer_idx, key_states, value_states, k_out, v_out, k_out.shape[2], ) def get_max_cache_shape(self) -> Optional[int]: return self.max_cache_len def get_seq_length(self, layer_idx: Optional[int] = 0): # Occupied cache == any slot in the 3rd dim (sequence length) holds a non-zero value. To save on compute, let's # limit the check to the first batch member and head dimension. # TODO: deprecate this function in favor of `cache_position` if layer_idx != 0: raise ValueError( "`get_seq_length` on `HybridCache` may get inconsistent results depending on the layer index. " "Using the `layer_idx` argument is not supported." ) return (self.key_cache[layer_idx][0, 0].any(dim=-1)).sum() def reset(self): """Resets the cache values while preserving the objects""" for layer_idx in range(len(self.key_cache)): # In-place ops prevent breaking the static address self.key_cache[layer_idx].zero_() self.value_cache[layer_idx].zero_() @property def batch_size(self): logger.warning_once( f"The 'batch_size' attribute of {self.__class__.__name__} is deprecated and will be removed in " "v4.49. Use the more precisely named 'self.max_batch_size' attribute instead." ) return self.max_batch_size
class_definition
73,472
83,284
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
null
223
class MambaCache: """ Cache for mamba model which does not have attention mechanism and key value states. Arguments: config (`PretrainedConfig): The configuration file defining the shape-related attributes required to initialize the static cache. batch_size (`int`): The batch size with which the model will be used. Note that a new instance must be instantiated if a smaller batch size is used. dtype (`torch.dtype`, *optional*, defaults to `torch.float16`): The default `dtype` to use when initializing the layer. device (`torch.device` or `str`, *optional*): The device on which the cache should be initialized. Should be the same as the layer. Attributes: dtype: (`torch.dtype`): The default `dtype` used to initializing the cache. intermediate_size: (`int`): Model's intermediate_size taken from config. ssm_state_size: (`int`): Model's state_size taken from config. conv_kernel_size: (`int`): Model's convolution kernel size taken from config conv_states: (`torch.Tensor`): A tensor of shape `[layer_idx, batch_size, intermediate_size, conv_kernel_size]` that holds convolutional states. ssm_states: (`torch.Tensor`): A tensor of shape `[layer_idx, batch_size, intermediate_size, ssm_state_size]` that holds ssm states Example: ```python >>> from transformers import AutoTokenizer, MambaForCausalLM, MambaCache >>> model = MambaForCausalLM.from_pretrained("state-spaces/mamba-130m-hf") >>> tokenizer = AutoTokenizer.from_pretrained("state-spaces/mamba-130m-hf") >>> inputs = tokenizer(text="My name is Mamba", return_tensors="pt") >>> # Prepare a cache class and pass it to model's forward >>> past_key_values = MambaCache(config=model.config, batch_size=1, device=model.device, dtype=model.dtype) >>> outputs = model(**inputs, past_key_values=past_key_values, use_cache=True) >>> outputs.past_key_values MambaCache() ``` """ # TODO (joao): remove `=None` in non-optional arguments in v4.46. Remove from `OBJECTS_TO_IGNORE` as well. def __init__( self, config: PretrainedConfig, batch_size: int = None, dtype: torch.dtype = torch.float16, device: Optional[Union[torch.device, str]] = None, max_batch_size: Optional[int] = None, ): if batch_size is not None: logger.warning_once( f"The 'batch_size' argument of {self.__class__.__name__} is deprecated and will be removed in " "v4.49. Use the more precisely named 'max_batch_size' argument instead." ) self.dtype = dtype self.max_batch_size = batch_size or max_batch_size self.intermediate_size = config.intermediate_size self.ssm_state_size = config.state_size self.conv_kernel_size = config.conv_kernel self.conv_states: torch.Tensor = torch.zeros( config.num_hidden_layers, self.max_batch_size, self.intermediate_size, self.conv_kernel_size, device=device, dtype=dtype, ) self.ssm_states: torch.Tensor = torch.zeros( config.num_hidden_layers, self.max_batch_size, self.intermediate_size, self.ssm_state_size, device=device, dtype=dtype, ) torch._dynamo.mark_static_address(self.conv_states) torch._dynamo.mark_static_address(self.ssm_states) def update_conv_state( self, layer_idx: int, new_conv_state: torch.Tensor, cache_position: torch.LongTensor ) -> torch.Tensor: conv_state = self.conv_states[layer_idx] cache_position = cache_position.clamp(0, self.conv_kernel_size - 1) conv_state = conv_state.roll(shifts=-1, dims=-1) conv_state[:, :, cache_position] = new_conv_state.to(device=conv_state.device, dtype=conv_state.dtype) self.conv_states[layer_idx].zero_() self.conv_states[layer_idx] += conv_state return self.conv_states[layer_idx] def update_ssm_state(self, layer_idx: int, new_ssm_state: torch.Tensor): self.ssm_states[layer_idx] = new_ssm_state.to(self.ssm_states.device) return self.ssm_states[layer_idx] def reset(self): self.conv_states.zero_() self.ssm_states.zero_() @property def batch_size(self): logger.warning_once( f"The 'batch_size' attribute of {self.__class__.__name__} is deprecated and will be removed in " "v4.49. Use the more precisely named 'self.max_batch_size' attribute instead." ) return self.max_batch_size
class_definition
83,287
88,150
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
null
224
class OffloadedStaticCache(StaticCache): """ Static cache class to be used with `torch.compile(model)` that offloads to the CPU or another device. Args: config (`PretrainedConfig): The configuration file defining the shape-related attributes required to initialize the static cache. max_batch_size (`int`): The maximum batch size with which the model will be used. max_cache_len (`int`): The maximum sequence length with which the model will be used. device (`Union[str, torch.device]`): The device on which the cache should be initialized. Should be the same as the layer device. dtype (`torch.dtype`, *optional*): The default `dtype` to use when initializing the cache. offload_device (`Union[str, torch.device]`, *optional*, defaults to `cpu`): The device to offload to. Defaults to CPU. layer_device_map (`Dict[int, Union[str, torch.device, int]]`, *optional*): Mapping between the layers and its device. This is required when you are manually initializing the cache and the model is splitted between differents gpus. You can know which layers mapped to which device by checking the associated device_map: `model.hf_device_map`. Attributes: key_cache (`List[torch.Tensor]`): Off-loaded key cache tensors. First one will be on device, where-as the others are off-loaded. value_cache (`List[torch.Tensor]`): Off-loaded value cache tensors. First one will be on device, where-as the others are off-loaded. max_batch_size (`int`): The maximum batch size with which this cache can be used. max_cache_len (`int`): The maximum sequence length with which this cache can be used. device (`torch.device`): The device on which the cache is used. offload_device (`torch.device`): The device used to offload to. dtype (`torch.dtype`): The `dtype` used to initializing the cache. Example: ```python >>> from transformers import AutoTokenizer, AutoModelForCausalLM, OffloadedStaticCache >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2") >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") >>> inputs = tokenizer(text="My name is GPT2", return_tensors="pt") >>> # Prepare a cache class and pass it to model's forward >>> # Leave empty space for 10 new tokens, which can be used when calling forward iteratively 10 times to generate >>> max_generated_length = inputs.input_ids.shape[1] + 10 >>> past_key_values = OffloadedStaticCache(config=model.config, max_batch_size=1, max_cache_len=max_generated_length, device=model.device, dtype=model.dtype) >>> outputs = model(**inputs, past_key_values=past_key_values, use_cache=True) >>> past_kv_length = outputs.past_key_values # access cache filled with key/values from generation ``` """ def __init__( self, config: PretrainedConfig, max_batch_size: int, max_cache_len: Optional[int], device: Union[str, torch.device], dtype: Optional[torch.dtype] = None, offload_device: Union[str, torch.device] = torch.device("cpu"), layer_device_map: Optional[Dict[int, Union[str, torch.device, int]]] = None, ) -> None: self.max_batch_size = max_batch_size self.max_cache_len = config.max_position_embeddings if max_cache_len is None else max_cache_len self.device = torch.device(device) if layer_device_map is None else layer_device_map[0] self.offload_device = torch.device(offload_device) self.dtype = dtype if dtype is not None else torch.float32 # Some model define a custom `head_dim` != config.hidden_size // config.num_attention_heads head_dim = config.head_dim if hasattr(config, "head_dim") else config.hidden_size // config.num_attention_heads num_key_value_heads = ( config.num_attention_heads if getattr(config, "num_key_value_heads", None) is None else config.num_key_value_heads ) cache_shape = (max_batch_size, num_key_value_heads, self.max_cache_len, head_dim) # Create offloaded CPU tensors. self.key_cache: List[torch.Tensor] = [] self.value_cache: List[torch.Tensor] = [] for i in range(config.num_hidden_layers): # First layer is always on-device. device = self.device if i == 0 else self.offload_device key_cache, value_cache = self._create_key_value_cache_tensors(cache_shape, device) self.key_cache.append(key_cache) self.value_cache.append(value_cache) # Create device tensors. self._device_key_cache: List[torch.Tensor] = [] self._device_value_cache: List[torch.Tensor] = [] for i in range(2): key_cache, value_cache = self._create_key_value_cache_tensors(cache_shape, self.device) self._device_key_cache.append(key_cache) self._device_value_cache.append(value_cache) # For backwards compatibility. # TODO(gante): Remove this. self._seen_tokens = 0 # Create new CUDA stream for parallel prefetching. self._prefetch_stream = torch.cuda.Stream() if self.device.type == "cuda" else None def update( self, key_states: torch.Tensor, value_states: torch.Tensor, layer_idx: int, cache_kwargs: Optional[Dict[str, Any]] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: """ Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`. It is VERY important to index using a tensor, otherwise you introduce a copy to the device. Parameters: key_states (`torch.Tensor`): The new key states to cache. value_states (`torch.Tensor`): The new value states to cache. layer_idx (`int`): The index of the layer to cache the states for. cache_kwargs (`Dict[str, Any]`, *optional*): Additional arguments for the cache subclass. The `OffloadedStaticCache` needs the `cache_position` input to know how where to write in the cache. Return: A tuple containing the updated key and value states. """ if layer_idx == 0: # Update seen tokens. # TODO(gante): Remove this. self._seen_tokens += key_states.shape[-2] # Always there. k_out = self.key_cache[0] v_out = self.value_cache[0] else: # Wait for prefetch stream. if self._prefetch_stream is not None: torch.cuda.default_stream(self.device).wait_stream(self._prefetch_stream) k_out = self._device_key_cache[layer_idx & 1] v_out = self._device_value_cache[layer_idx & 1] self._prefetch_layer(layer_idx + 1) cache_position = cache_kwargs.get("cache_position") if cache_kwargs is not None else None if cache_position is None: k_out.copy_(key_states) v_out.copy_(value_states) # Copy the values to the offloaded device as well. if layer_idx == 0: self.key_cache[layer_idx].copy_(key_states.to(self.offload_device)) self.value_cache[layer_idx].copy_(value_states.to(self.offload_device)) else: # Note: here we use `tensor.index_copy_(dim, index, tensor)` that is equivalent to # `tensor[:, :, index] = tensor`, but the first one is compile-friendly and it does # explicitly an in-place operation, that avoids copies and uses less memory. try: k_out.index_copy_(2, cache_position, key_states) v_out.index_copy_(2, cache_position, value_states) except NotImplementedError: # The operator 'aten::index_copy.out' is not currently implemented for the MPS # device. k_out[:, :, cache_position] = key_states v_out[:, :, cache_position] = value_states # Copy the values to the offloaded device as well. if layer_idx != 0: cache_position = cache_position.to(self.offload_device) key_states = key_states.to(self.offload_device) value_states = value_states.to(self.offload_device) try: self.key_cache[layer_idx].index_copy_(2, cache_position, key_states) self.value_cache[layer_idx].index_copy_(2, cache_position, value_states) except NotImplementedError: # The operator 'aten::index_copy.out' is not currently implemented for the MPS # device. self.key_cache[layer_idx][:, :, cache_position] = key_states self.value_cache[layer_idx][:, :, cache_position] = value_states return k_out, v_out def get_seq_length(self, layer_idx: Optional[int] = 0) -> int: """Returns the sequence length of the cached states that were seen by the model.""" # TODO(gante): Remove this. return self._seen_tokens def get_max_cache_shape(self) -> Optional[int]: """Returns the maximum sequence length of the cached states.""" return self.max_cache_len def reset(self) -> None: """Resets the cache values while preserving the objects.""" # For backwards compatibility. # TODO(gante): Remove this. self._seen_tokens = 0 # Zero out cache. for layer_idx in range(len(self.key_cache)): # In-place ops prevent breaking the static address. self.key_cache[layer_idx].zero_() self.value_cache[layer_idx].zero_() @property def seen_tokens(self) -> int: # For backwards compatibility. # TODO(gante): Remove this. return self._seen_tokens def _create_key_value_cache_tensors( self, shape: Tuple[int, ...], device: torch.device ) -> Tuple[torch.Tensor, torch.Tensor]: """Creates K/V cache tensors on a device. Pins memory for CPU tensors. Marks them as static addresses for non-CPU tensors. Args: shape (`Tuple[int, ...]`): Shape. device (`torch.device`): Device. Returns: Key and value cache tensors as a tuple. """ is_cpu_device = device == torch.device("cpu") key_cache = torch.zeros(shape, dtype=self.dtype, device=device, pin_memory=is_cpu_device) value_cache = torch.zeros(shape, dtype=self.dtype, device=device, pin_memory=is_cpu_device) # Note: `mark_static_address` is used to tag the cache as a fixed data pointer, # preventing compiled graph breaks when updating the cache. torch._dynamo.mark_static_address(key_cache) torch._dynamo.mark_static_address(value_cache) return key_cache, value_cache def _prefetch_layer(self, layer_idx: int) -> None: """Prefetch a layer to the device. Needs to be called in order of layer indices.""" # Don't fetch layers that do not exist. if layer_idx >= len(self.key_cache): return # Alternate between two on-device caches. if self._prefetch_stream is not None: with torch.cuda.stream(self._prefetch_stream): self._prefetch_layer_in_context(layer_idx) else: self._prefetch_layer_in_context(layer_idx) def _prefetch_layer_in_context(self, layer_idx: int) -> None: """Performs the actual copy of the layer to device cache.""" self._device_key_cache[layer_idx & 1].copy_(self.key_cache[layer_idx], non_blocking=True) self._device_value_cache[layer_idx & 1].copy_(self.value_cache[layer_idx], non_blocking=True)
class_definition
88,153
100,342
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
null
225
class AttentionMaskConverter: """ A utility attention mask class that allows one to: - Create a causal 4d mask - Create a causal 4d mask with slided window - Convert a 2d attention mask (batch_size, query_length) to a 4d attention mask (batch_size, 1, query_length, key_value_length) that can be multiplied with attention scores Examples: ```python >>> import torch >>> from transformers.modeling_attn_mask_utils import AttentionMaskConverter >>> converter = AttentionMaskConverter(True) >>> converter.to_4d(torch.tensor([[0, 0, 0, 1, 1]]), 5, key_value_length=5, dtype=torch.float32) tensor([[[[-3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38], [-3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38], [-3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38], [-3.4028e+38, -3.4028e+38, -3.4028e+38, 0.0000e+00, -3.4028e+38], [-3.4028e+38, -3.4028e+38, -3.4028e+38, 0.0000e+00, 0.0000e+00]]]]) ``` Parameters: is_causal (`bool`): Whether the attention mask should be a uni-directional (causal) or bi-directional mask. sliding_window (`int`, *optional*): Optionally, the sliding window masks can be created if `sliding_window` is defined to a positive integer. """ is_causal: bool sliding_window: int def __init__(self, is_causal: bool, sliding_window: Optional[int] = None): self.is_causal = is_causal self.sliding_window = sliding_window if self.sliding_window is not None and self.sliding_window <= 0: raise ValueError( f"Make sure that when passing `sliding_window` that its value is a strictly positive integer, not `{self.sliding_window}`" ) def to_causal_4d( self, batch_size: int, query_length: int, key_value_length: int, dtype: torch.dtype, device: Union[torch.device, "str"] = "cpu", ) -> Optional[torch.Tensor]: """ Creates a causal 4D mask of (bsz, head_dim=1, query_length, key_value_length) shape and adds large negative bias to upper right hand triangular matrix (causal mask). """ if not self.is_causal: raise ValueError(f"Please use `to_causal_4d` only if {self.__class__} has `is_causal` set to True.") # If shape is not cached, create a new causal mask and cache it input_shape = (batch_size, query_length) past_key_values_length = key_value_length - query_length # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] causal_4d_mask = None if input_shape[-1] > 1 or self.sliding_window is not None: causal_4d_mask = self._make_causal_mask( input_shape, dtype, device=device, past_key_values_length=past_key_values_length, sliding_window=self.sliding_window, ) return causal_4d_mask def to_4d( self, attention_mask_2d: torch.Tensor, query_length: int, dtype: torch.dtype, key_value_length: Optional[int] = None, ) -> torch.Tensor: """ Converts 2D attention mask to 4D attention mask by expanding mask to (bsz, head_dim=1, query_length, key_value_length) shape and by adding a large negative bias to not-attended positions. If attention_mask is causal, a causal mask will be added. """ input_shape = (attention_mask_2d.shape[0], query_length) # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] causal_4d_mask = None if (input_shape[-1] > 1 or self.sliding_window is not None) and self.is_causal: if key_value_length is None: raise ValueError( "This attention mask converter is causal. Make sure to pass `key_value_length` to correctly create a causal mask." ) past_key_values_length = key_value_length - query_length causal_4d_mask = self._make_causal_mask( input_shape, dtype, device=attention_mask_2d.device, past_key_values_length=past_key_values_length, sliding_window=self.sliding_window, ) elif self.sliding_window is not None: raise NotImplementedError("Sliding window is currently only implemented for causal masking") # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] expanded_attn_mask = self._expand_mask(attention_mask_2d, dtype, tgt_len=input_shape[-1]).to( attention_mask_2d.device ) if causal_4d_mask is not None: expanded_attn_mask = causal_4d_mask.masked_fill(expanded_attn_mask.bool(), torch.finfo(dtype).min) # expanded_attn_mask + causal_4d_mask can cause some overflow expanded_4d_mask = expanded_attn_mask return expanded_4d_mask @staticmethod def _make_causal_mask( input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0, sliding_window: Optional[int] = None, ): """ Make causal mask used for bi-directional self-attention. """ bsz, tgt_len = input_ids_shape mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) mask_cond = torch.arange(mask.size(-1), device=device) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) mask = mask.to(dtype) if past_key_values_length > 0: mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) # add lower triangular sliding window mask if necessary if sliding_window is not None: diagonal = past_key_values_length - sliding_window - 1 context_mask = torch.tril(torch.ones_like(mask, dtype=torch.bool), diagonal=diagonal) # Recent changes in PyTorch prevent mutations on tensors converted with aten::_to_copy # See https://github.com/pytorch/pytorch/issues/127571 if is_torchdynamo_compiling(): mask = mask.clone() mask.masked_fill_(context_mask, torch.finfo(dtype).min) return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) @staticmethod def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) @staticmethod def _unmask_unattended( expanded_mask: torch.FloatTensor, min_dtype: float, ): # fmt: off """ Attend to all tokens in masked rows from the expanded attention mask, for example the relevant first rows when using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. Details: https://github.com/pytorch/pytorch/issues/110213 `expanded_mask` is [bsz, num_masks, tgt_seq_len, src_seq_len] or [bsz, tgt_seq_len, src_seq_len]. `attention_mask` is [bsz, src_seq_len]. The dimension num_masks of `expanded_mask` is most often 1, but it can also be the number of heads in the case of alibi attention bias. For example, if `expanded_mask` is (e.g. here left-padding case) ``` [[[[0, 0, 0], [0, 0, 0], [0, 0, 1]]], [[[1, 0, 0], [1, 1, 0], [1, 1, 1]]], [[[0, 0, 0], [0, 1, 0], [0, 1, 1]]]] ``` then the modified `expanded_mask` will be ``` [[[[1, 1, 1], <-- modified [1, 1, 1], <-- modified [0, 0, 1]]], [[[1, 0, 0], [1, 1, 0], [1, 1, 1]]], [[[1, 1, 1], <-- modified [0, 1, 0], [0, 1, 1]]]] ``` """ # fmt: on if expanded_mask.dtype == torch.bool: raise ValueError( "AttentionMaskConverter._unmask_unattended expects a float `expanded_mask`, got a BoolTensor." ) return expanded_mask.mul(~torch.all(expanded_mask == min_dtype, dim=-1, keepdim=True)) @staticmethod def _ignore_causal_mask_sdpa( attention_mask: Optional[torch.Tensor], inputs_embeds: torch.Tensor, past_key_values_length: int, sliding_window: Optional[int] = None, is_training: bool = False, ) -> bool: """ Detects whether the optional user-specified attention_mask & the automatically created causal mask can be ignored in case PyTorch's SDPA is used, rather relying on SDPA's `is_causal` argument. In case no token is masked in the `attention_mask` argument, if `query_length == 1` or `key_value_length == query_length`, we rather rely on SDPA `is_causal` argument to use causal/non-causal masks, allowing to dispatch to the flash attention kernel (that can otherwise not be used if a custom `attn_mask` is passed). """ _, query_length = inputs_embeds.shape[0], inputs_embeds.shape[1] key_value_length = query_length + past_key_values_length is_tracing = torch.jit.is_tracing() or isinstance(inputs_embeds, torch.fx.Proxy) or is_torchdynamo_compiling() ignore_causal_mask = False if attention_mask is None: # TODO: When tracing with TorchDynamo with fullgraph=True, the model is recompiled depending on the input # shape, thus SDPA's `is_causal` argument is rightfully updated # (see https://gist.github.com/fxmarty/1313f39037fc1c112508989628c57363). However, when using # `torch.export` or `torch.onnx.dynamo_export`, we must pass an example input, and `is_causal` behavior is # hard-coded. If a user exports a model with q_len > 1, the exported model will hard-code `is_causal=True` # which is in general wrong (see https://github.com/pytorch/pytorch/issues/108108). # Thus, we only set `ignore_causal_mask = True` if the model is set to training. # # Besides, jit.trace can not handle the `q_len > 1` condition for `is_causal` # ("TypeError: scaled_dot_product_attention(): argument 'is_causal' must be bool, not Tensor"). if ( (is_training or not is_tracing) and (query_length == 1 or key_value_length == query_length) and (sliding_window is None or key_value_length < sliding_window) ): ignore_causal_mask = True elif sliding_window is None or key_value_length < sliding_window: if len(attention_mask.shape) == 4: return False elif not is_tracing and torch.all(attention_mask == 1): if query_length == 1 or key_value_length == query_length: # For query_length == 1, causal attention and bi-directional attention are the same. ignore_causal_mask = True # Unfortunately, for query_length > 1 and key_value_length != query_length, we cannot generally ignore # the attention mask, as SDPA causal mask generation may be wrong. We will set `is_causal=False` in # SDPA and rely on Transformers attention_mask instead, hence not setting it to None here. # Reference: https://github.com/pytorch/pytorch/issues/108108 # TODO: maybe revisit this with https://github.com/pytorch/pytorch/pull/114823 in PyTorch 2.3. return ignore_causal_mask
class_definition
773
13,019
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_attn_mask_utils.py
null
226
class TransposeType(ExplicitEnum): """ Possible ... """ NO = "no" SIMPLE = "simple" CONV1D = "conv1d" CONV2D = "conv2d"
class_definition
1,132
1,280
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_pytorch_utils.py
null
227
class PreTrainedTokenizerFast(PreTrainedTokenizerBase): """ Base class for all fast tokenizers (wrapping HuggingFace tokenizers library). Inherits from [`~tokenization_utils_base.PreTrainedTokenizerBase`]. Handles all the shared methods for tokenization and special tokens, as well as methods for downloading/caching/loading pretrained tokenizers, as well as adding tokens to the vocabulary. This class also contains the added tokens in a unified way on top of all tokenizers so we don't have to handle the specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece...). """ vocab_files_names = VOCAB_FILES_NAMES slow_tokenizer_class: PreTrainedTokenizer = None def __init__(self, *args, **kwargs): tokenizer_object = kwargs.pop("tokenizer_object", None) slow_tokenizer = kwargs.pop("__slow_tokenizer", None) gguf_file = kwargs.pop("gguf_file", None) fast_tokenizer_file = kwargs.pop("tokenizer_file", None) from_slow = kwargs.pop("from_slow", False) added_tokens_decoder = kwargs.pop("added_tokens_decoder", {}) self.add_prefix_space = kwargs.get("add_prefix_space", False) if from_slow and slow_tokenizer is None and self.slow_tokenizer_class is None: raise ValueError( "Cannot instantiate this tokenizer from a slow version. If it's based on sentencepiece, make sure you " "have sentencepiece installed." ) if tokenizer_object is not None: fast_tokenizer = copy.deepcopy(tokenizer_object) elif fast_tokenizer_file is not None and not from_slow: # We have a serialization from tokenizers which let us directly build the backend fast_tokenizer = TokenizerFast.from_file(fast_tokenizer_file) elif slow_tokenizer: # We need to convert a slow tokenizer to build the backend fast_tokenizer = convert_slow_tokenizer(slow_tokenizer) elif gguf_file is not None: # We need to convert a slow tokenizer to build the backend gguf_param = load_gguf_checkpoint(kwargs.get("vocab_file")) architecture = gguf_param["config"]["model_type"] tokenizer_dict = gguf_param["tokenizer"] tokenizer_config = gguf_param["tokenizer_config"] fast_tokenizer, additional_kwargs = convert_gguf_tokenizer(architecture, tokenizer_dict) kwargs.update(tokenizer_config) if len(additional_kwargs) > 0: kwargs.update(additional_kwargs) elif self.slow_tokenizer_class is not None and slow_tokenizer is not False: # We need to create and convert a slow tokenizer to build the backend slow_tokenizer = self.slow_tokenizer_class(*args, **kwargs) fast_tokenizer = convert_slow_tokenizer(slow_tokenizer) elif not slow_tokenizer: # We tried loading a slow_tokenizer with spm and failed, try to load with tiktoken self.vocab_file = kwargs.get("vocab_file", None) self.additional_special_tokens = kwargs.get("additional_special_tokens", []) fast_tokenizer = convert_slow_tokenizer(self, from_tiktoken=True) slow_tokenizer = None else: raise ValueError( "Couldn't instantiate the backend tokenizer from one of: \n" "(1) a `tokenizers` library serialization file, \n" "(2) a slow tokenizer instance to convert or \n" "(3) an equivalent slow tokenizer class to instantiate and convert. \n" "You need to have sentencepiece or tiktoken installed to convert a slow tokenizer to a fast one." ) self._tokenizer = fast_tokenizer if slow_tokenizer is not None: kwargs.update(slow_tokenizer.init_kwargs) self._decode_use_source_tokenizer = False _truncation = self._tokenizer.truncation if _truncation is not None: self._tokenizer.enable_truncation(**_truncation) kwargs.setdefault("max_length", _truncation["max_length"]) kwargs.setdefault("truncation_side", _truncation["direction"]) kwargs.setdefault("stride", _truncation["stride"]) kwargs.setdefault("truncation_strategy", _truncation["strategy"]) else: self._tokenizer.no_truncation() _padding = self._tokenizer.padding if _padding is not None: self._tokenizer.enable_padding(**_padding) kwargs.setdefault("pad_token", _padding["pad_token"]) kwargs.setdefault("pad_token_type_id", _padding["pad_type_id"]) kwargs.setdefault("padding_side", _padding["direction"]) kwargs.setdefault("max_length", _padding["length"]) kwargs.setdefault("pad_to_multiple_of", _padding["pad_to_multiple_of"]) # We call this after having initialized the backend tokenizer because we update it. super().__init__(**kwargs) self._tokenizer.encode_special_tokens = self.split_special_tokens added_tokens_decoder_hash = {hash(repr(token)) for token in self.added_tokens_decoder} tokens_to_add = [ token for index, token in sorted(added_tokens_decoder.items(), key=lambda x: x[0]) if hash(repr(token)) not in added_tokens_decoder_hash ] encoder = list(self.added_tokens_encoder.keys()) + [str(token) for token in tokens_to_add] # if some of the special tokens are strings, we check if we don't already have a token tokens_to_add += [ token for token in self.all_special_tokens_extended if token not in encoder and token not in tokens_to_add ] if len(tokens_to_add) > 0: tokens = [] special_tokens = self.all_special_tokens for token in tokens_to_add: is_special = ( (token.special or str(token) in special_tokens) if isinstance(token, AddedToken) else str(token) in special_tokens ) if isinstance(token, str): token = AddedToken(token, special=is_special) else: token.special = is_special tokens.append(token) if tokens: self.add_tokens(tokens) try: pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get("add_prefix_space", self.add_prefix_space) != self.add_prefix_space: pre_tok_class = getattr(pre_tokenizers_fast, pre_tok_state.pop("type")) pre_tok_state["add_prefix_space"] = self.add_prefix_space self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state) except Exception: # We'll get an error if there is no pre_tokenizer, or if it's a custom pre_tokenizer that can # not be serialized. In those cases, we just ignore the error as there's no pre_tokenizer # for which we need to update the `add_prefix_space` attribute. pass @property def is_fast(self) -> bool: return True @property def can_save_slow_tokenizer(self) -> bool: """ `bool`: Whether or not the slow tokenizer can be saved. Usually for sentencepiece based slow tokenizer, this can only be `True` if the original `"sentencepiece.model"` was not deleted. """ return True @property def vocab_size(self) -> int: """ `int`: Size of the base vocabulary (without the added tokens). """ return self._tokenizer.get_vocab_size(with_added_tokens=False) def get_vocab(self) -> Dict[str, int]: return self._tokenizer.get_vocab(with_added_tokens=True) @property def vocab(self) -> Dict[str, int]: return self.get_vocab() @property def added_tokens_encoder(self) -> Dict[str, int]: """ Returns the sorted mapping from string to index. The added tokens encoder is cached for performance optimisation in `self._added_tokens_encoder` for the slow tokenizers. """ return {k.content: v for v, k in sorted(self.added_tokens_decoder.items(), key=lambda item: item[0])} @property def added_tokens_decoder(self) -> Dict[int, AddedToken]: """ Returns the added tokens in the vocabulary as a dictionary of index to AddedToken. Returns: `Dict[str, int]`: The added tokens. """ return self._tokenizer.get_added_tokens_decoder() def get_added_vocab(self) -> Dict[str, int]: """ Returns the added tokens in the vocabulary as a dictionary of token to index. Returns: `Dict[str, int]`: The added tokens. """ return {k.content: v for v, k in sorted(self.added_tokens_decoder.items(), key=lambda item: item[0])} def __len__(self) -> int: """ Size of the full vocabulary with the added tokens. """ return self._tokenizer.get_vocab_size(with_added_tokens=True) @property def backend_tokenizer(self) -> TokenizerFast: """ `tokenizers.implementations.BaseTokenizer`: The Rust tokenizer used as a backend. """ return self._tokenizer @property def decoder(self) -> DecoderFast: """ `tokenizers.decoders.Decoder`: The Rust decoder for this tokenizer. """ return self._tokenizer.decoder def _convert_encoding( self, encoding: EncodingFast, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, ) -> Tuple[Dict[str, Any], List[EncodingFast]]: """ Convert the encoding representation (from low-level HuggingFace tokenizer output) to a python Dict and a list of encodings, take care of building a batch from overflowing tokens. Overflowing tokens are converted to additional examples (like batches) so the output values of the dict are lists (overflows) of lists (tokens). Output shape: (overflows, sequence length) """ if return_token_type_ids is None: return_token_type_ids = "token_type_ids" in self.model_input_names if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names if return_overflowing_tokens and encoding.overflowing is not None: encodings = [encoding] + encoding.overflowing else: encodings = [encoding] encoding_dict = defaultdict(list) for e in encodings: encoding_dict["input_ids"].append(e.ids) if return_token_type_ids: encoding_dict["token_type_ids"].append(e.type_ids) if return_attention_mask: encoding_dict["attention_mask"].append(e.attention_mask) if return_special_tokens_mask: encoding_dict["special_tokens_mask"].append(e.special_tokens_mask) if return_offsets_mapping: encoding_dict["offset_mapping"].append(e.offsets) if return_length: encoding_dict["length"].append(len(e.ids)) return encoding_dict, encodings def convert_tokens_to_ids(self, tokens: Union[str, Iterable[str]]) -> Union[int, List[int]]: """ Converts a token string (or a sequence of tokens) in a single integer id (or a Iterable of ids), using the vocabulary. Args: tokens (`str` or `Iterable[str]`): One or several token(s) to convert to token id(s). Returns: `int` or `List[int]`: The token id or list of token ids. """ if isinstance(tokens, str): return self._convert_token_to_id_with_added_voc(tokens) return [self._convert_token_to_id_with_added_voc(token) for token in tokens] def _convert_token_to_id_with_added_voc(self, token: str) -> int: index = self._tokenizer.token_to_id(token) if index is None: return self.unk_token_id return index def _convert_id_to_token(self, index: int) -> Optional[str]: return self._tokenizer.id_to_token(int(index)) def _add_tokens(self, new_tokens: List[Union[str, AddedToken]], special_tokens=False) -> int: if special_tokens: return self._tokenizer.add_special_tokens(new_tokens) return self._tokenizer.add_tokens(new_tokens) def num_special_tokens_to_add(self, pair: bool = False) -> int: """ Returns the number of added tokens when encoding a sequence with special tokens. <Tip> This encodes a dummy input and checks the number of added tokens, and is therefore not efficient. Do not put this inside your training loop. </Tip> Args: pair (`bool`, *optional*, defaults to `False`): Whether the number of added tokens should be computed in the case of a sequence pair or a single sequence. Returns: `int`: Number of special tokens added to sequences. """ return self._tokenizer.num_special_tokens_to_add(pair) def convert_ids_to_tokens( self, ids: Union[int, List[int]], skip_special_tokens: bool = False ) -> Union[str, List[str]]: """ Converts a single index or a sequence of indices in a token or a sequence of tokens, using the vocabulary and added tokens. Args: ids (`int` or `List[int]`): The token id (or token ids) to convert to tokens. skip_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not to remove special tokens in the decoding. Returns: `str` or `List[str]`: The decoded token(s). """ if isinstance(ids, int): return self._tokenizer.id_to_token(ids) tokens = [] for index in ids: index = int(index) if skip_special_tokens and index in self.all_special_ids: continue tokens.append(self._tokenizer.id_to_token(index)) return tokens def tokenize(self, text: str, pair: Optional[str] = None, add_special_tokens: bool = False, **kwargs) -> List[str]: return self.encode_plus(text=text, text_pair=pair, add_special_tokens=add_special_tokens, **kwargs).tokens() def set_truncation_and_padding( self, padding_strategy: PaddingStrategy, truncation_strategy: TruncationStrategy, max_length: int, stride: int, pad_to_multiple_of: Optional[int], padding_side: Optional[bool], ): """ Define the truncation and the padding strategies for fast tokenizers (provided by HuggingFace tokenizers library) and restore the tokenizer settings afterwards. The provided tokenizer has no padding / truncation strategy before the managed section. If your tokenizer set a padding / truncation strategy before, then it will be reset to no padding / truncation when exiting the managed section. Args: padding_strategy ([`~utils.PaddingStrategy`]): The kind of padding that will be applied to the input truncation_strategy ([`~tokenization_utils_base.TruncationStrategy`]): The kind of truncation that will be applied to the input max_length (`int`): The maximum size of a sequence. stride (`int`): The stride to use when handling overflow. pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta). padding_side (`str`, *optional*): The side on which the model should have padding applied. Should be selected between ['right', 'left']. Default value is picked from the class attribute of the same name. """ _truncation = self._tokenizer.truncation _padding = self._tokenizer.padding # Set truncation and padding on the backend tokenizer if truncation_strategy == TruncationStrategy.DO_NOT_TRUNCATE: if _truncation is not None: self._tokenizer.no_truncation() else: target = { "max_length": max_length, "stride": stride, "strategy": truncation_strategy.value, "direction": self.truncation_side, } # _truncation might contain more keys that the target `transformers` # supports. Use only the target keys to trigger `enable_truncation`. # This should enable this code to works on various `tokenizers` # targets. if _truncation is None: current = None else: current = {k: _truncation.get(k, None) for k in target} if current != target: self._tokenizer.enable_truncation(**target) if padding_strategy == PaddingStrategy.DO_NOT_PAD: if _padding is not None: self._tokenizer.no_padding() else: length = max_length if padding_strategy == PaddingStrategy.MAX_LENGTH else None target = { "length": length, "direction": padding_side if padding_side is not None else self.padding_side, "pad_id": self.pad_token_id, "pad_token": self.pad_token, "pad_type_id": self.pad_token_type_id, "pad_to_multiple_of": pad_to_multiple_of, } if _padding != target: self._tokenizer.enable_padding(**target) def _batch_encode_plus( self, batch_text_or_text_pairs: Union[ List[TextInput], List[TextInputPair], List[PreTokenizedInput], List[PreTokenizedInputPair] ], add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, is_split_into_words: bool = False, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[bool] = None, return_tensors: Optional[str] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, split_special_tokens: bool = False, ) -> BatchEncoding: if not isinstance(batch_text_or_text_pairs, (tuple, list)): raise TypeError( f"batch_text_or_text_pairs has to be a list or a tuple (got {type(batch_text_or_text_pairs)})" ) # Set the truncation and padding strategy and restore the initial configuration self.set_truncation_and_padding( padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, ) if self._tokenizer.encode_special_tokens != split_special_tokens: self._tokenizer.encode_special_tokens = split_special_tokens encodings = self._tokenizer.encode_batch( batch_text_or_text_pairs, add_special_tokens=add_special_tokens, is_pretokenized=is_split_into_words, ) # Convert encoding to dict # `Tokens` has type: Tuple[ # List[Dict[str, List[List[int]]]] or List[Dict[str, 2D-Tensor]], # List[EncodingFast] # ] # with nested dimensions corresponding to batch, overflows, sequence length tokens_and_encodings = [ self._convert_encoding( encoding=encoding, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, ) for encoding in encodings ] # Convert the output to have dict[list] from list[dict] and remove the additional overflows dimension # From (variable) shape (batch, overflows, sequence length) to ~ (batch * overflows, sequence length) # (we say ~ because the number of overflow varies with the example in the batch) # # To match each overflowing sample with the original sample in the batch # we add an overflow_to_sample_mapping array (see below) sanitized_tokens = {} for key in tokens_and_encodings[0][0].keys(): stack = [e for item, _ in tokens_and_encodings for e in item[key]] sanitized_tokens[key] = stack sanitized_encodings = [e for _, item in tokens_and_encodings for e in item] # If returning overflowing tokens, we need to return a mapping # from the batch idx to the original sample if return_overflowing_tokens: overflow_to_sample_mapping = [] for i, (toks, _) in enumerate(tokens_and_encodings): overflow_to_sample_mapping += [i] * len(toks["input_ids"]) sanitized_tokens["overflow_to_sample_mapping"] = overflow_to_sample_mapping for input_ids in sanitized_tokens["input_ids"]: self._eventual_warn_about_too_long_sequence(input_ids, max_length, verbose) return BatchEncoding(sanitized_tokens, sanitized_encodings, tensor_type=return_tensors) def _encode_plus( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[Union[TextInput, PreTokenizedInput]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, is_split_into_words: bool = False, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[bool] = None, return_tensors: Optional[bool] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, split_special_tokens: bool = False, **kwargs, ) -> BatchEncoding: batched_input = [(text, text_pair)] if text_pair else [text] batched_output = self._batch_encode_plus( batched_input, is_split_into_words=is_split_into_words, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, split_special_tokens=split_special_tokens, **kwargs, ) # Return tensor is None, then we can remove the leading batch axis # Overflowing tokens are returned as a batch of output so we keep them in this case if return_tensors is None and not return_overflowing_tokens: batched_output = BatchEncoding( { key: (value[0] if len(value) > 0 and isinstance(value[0], list) else value) for key, value in batched_output.items() }, batched_output.encodings, ) self._eventual_warn_about_too_long_sequence(batched_output["input_ids"], max_length, verbose) return batched_output def convert_tokens_to_string(self, tokens: List[str]) -> str: return ( self.backend_tokenizer.decoder.decode(tokens) if self.backend_tokenizer.decoder is not None else " ".join(tokens) ) def _decode( self, token_ids: Union[int, List[int]], skip_special_tokens: bool = False, clean_up_tokenization_spaces: bool = None, **kwargs, ) -> str: self._decode_use_source_tokenizer = kwargs.pop("use_source_tokenizer", False) if isinstance(token_ids, int): token_ids = [token_ids] text = self._tokenizer.decode(token_ids, skip_special_tokens=skip_special_tokens) clean_up_tokenization_spaces = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: clean_text = self.clean_up_tokenization(text) return clean_text else: return text def _save_pretrained( self, save_directory: Union[str, os.PathLike], file_names: Tuple[str], legacy_format: Optional[bool] = None, filename_prefix: Optional[str] = None, ) -> Tuple[str]: """ Save a tokenizer using the slow-tokenizer/legacy format: vocabulary + added tokens as well as in a unique JSON file containing {config + vocab + added-tokens}. """ save_directory = str(save_directory) if self.slow_tokenizer_class is None and legacy_format is True: raise ValueError( "Your tokenizer does not have a legacy version defined and therefore cannot register this version. You" " might consider leaving the legacy_format at `None` or setting it to `False`." ) save_slow = ( (legacy_format is None or legacy_format is True) and self.slow_tokenizer_class is not None and self.can_save_slow_tokenizer ) save_fast = legacy_format is None or legacy_format is False if save_slow: added_tokens_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + ADDED_TOKENS_FILE ) # make sure to be foward compatible added_vocab = {tok: index for tok, index in self.added_tokens_encoder.items() if index >= self.vocab_size} if added_vocab: with open(added_tokens_file, "w", encoding="utf-8") as f: out_str = json.dumps(added_vocab, indent=2, sort_keys=True, ensure_ascii=False) + "\n" f.write(out_str) vocab_files = self.save_vocabulary(save_directory, filename_prefix=filename_prefix) file_names = file_names + vocab_files + (added_tokens_file,) if save_fast: tokenizer_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + TOKENIZER_FILE ) self.backend_tokenizer.save(tokenizer_file) file_names = file_names + (tokenizer_file,) return file_names def train_new_from_iterator( self, text_iterator, vocab_size, length=None, new_special_tokens=None, special_tokens_map=None, **kwargs, ): """ Trains a tokenizer on a new corpus with the same defaults (in terms of special tokens or tokenization pipeline) as the current one. Args: text_iterator (generator of `List[str]`): The training corpus. Should be a generator of batches of texts, for instance a list of lists of texts if you have everything in memory. vocab_size (`int`): The size of the vocabulary you want for your tokenizer. length (`int`, *optional*): The total number of sequences in the iterator. This is used to provide meaningful progress tracking new_special_tokens (list of `str` or `AddedToken`, *optional*): A list of new special tokens to add to the tokenizer you are training. special_tokens_map (`Dict[str, str]`, *optional*): If you want to rename some of the special tokens this tokenizer uses, pass along a mapping old special token name to new special token name in this argument. kwargs (`Dict[str, Any]`, *optional*): Additional keyword arguments passed along to the trainer from the 🤗 Tokenizers library. Returns: [`PreTrainedTokenizerFast`]: A new tokenizer of the same type as the original one, trained on `text_iterator`. """ tokenizer_json = json.loads(self._tokenizer.to_str()) # Remove added tokens for now (uses IDs of tokens) added_tokens = tokenizer_json.pop("added_tokens") # Remove post processor for now (uses IDs of tokens) post_processor = tokenizer_json.pop("post_processor") unk_token = None # Remove vocab if tokenizer_json["model"]["type"] == "BPE": tokenizer_json["model"]["vocab"] = {} tokenizer_json["model"]["merges"] = [] elif tokenizer_json["model"]["type"] == "Unigram": if tokenizer_json["model"]["unk_id"] is not None: unk_id = tokenizer_json["model"]["unk_id"] unk_token = tokenizer_json["model"]["vocab"][unk_id][0] if special_tokens_map is not None and unk_token in special_tokens_map: unk_token = special_tokens_map[unk_token] tokenizer_json["model"]["unk_id"] = 0 tokenizer_json["model"]["vocab"] = [[unk_token, 0.0]] elif tokenizer_json["model"]["type"] in ["WordLevel", "WordPiece"]: tokenizer_json["model"]["vocab"] = {} else: raise ValueError( f"This method does not support this type of tokenizer (found {tokenizer_json['model']['type']}) " "only BPE, Unigram, WordLevel and WordPiece." ) if ( special_tokens_map is not None and "unk_token" in tokenizer_json["model"] and tokenizer_json["model"]["unk_token"] in special_tokens_map ): tokenizer_json["model"]["unk_token"] = special_tokens_map[tokenizer_json["model"]["unk_token"]] tokenizer = TokenizerFast.from_str(json.dumps(tokenizer_json)) # Get the special tokens from the current tokenizer if none are specified. special_tokens = [] for added_token in added_tokens: special = added_token.pop("special", None) _ = added_token.pop("id", None) if tokenizer_json["model"]["type"] != "Unigram" and not special: continue if special_tokens_map is not None and added_token["content"] in special_tokens_map: added_token["content"] = special_tokens_map[added_token["content"]] special_tokens.append(AddedToken(**added_token)) if new_special_tokens is not None: special_tokens.extend(new_special_tokens) # Trainer needs to know the end of word / continuing subword thingies in BPE if ( tokenizer_json["model"]["type"] == "BPE" and "continuing_subword_prefix" not in kwargs and tokenizer_json["model"]["continuing_subword_prefix"] is not None ): kwargs["continuing_subword_prefix"] = tokenizer_json["model"]["continuing_subword_prefix"] if ( tokenizer_json["model"]["type"] == "BPE" and "end_of_word_suffix" not in kwargs and tokenizer_json["model"]["end_of_word_suffix"] is not None ): kwargs["end_of_word_suffix"] = tokenizer_json["model"]["end_of_word_suffix"] if tokenizer_json["model"]["type"] == "Unigram" and unk_token is not None: kwargs["unk_token"] = unk_token if tokenizer_json["pre_tokenizer"] is not None: if ( tokenizer_json["pre_tokenizer"]["type"] == "ByteLevel" or tokenizer_json["pre_tokenizer"]["type"] == "Sequence" and "pretokenizers" in tokenizer_json["pre_tokenizer"] and any( pretokenizer["type"] == "ByteLevel" for pretokenizer in tokenizer_json["pre_tokenizer"]["pretokenizers"] ) ): kwargs["initial_alphabet"] = pre_tokenizers_fast.ByteLevel.alphabet() trainer_class = MODEL_TO_TRAINER_MAPPING[tokenizer_json["model"]["type"]] trainer = trainer_class(vocab_size=vocab_size, special_tokens=special_tokens, **kwargs) tokenizer.train_from_iterator(text_iterator, length=length, trainer=trainer) if post_processor is not None: trained_tokenizer_json = json.loads(tokenizer.to_str()) # Almost done, we just have to adjust the token IDs in the post processor if "special_tokens" in post_processor: for key in post_processor["special_tokens"]: tokens = post_processor["special_tokens"][key]["tokens"] if special_tokens_map is not None: tokens = [special_tokens_map.get(token, token) for token in tokens] post_processor["special_tokens"][key]["tokens"] = tokens for token in tokens: token_id = tokenizer.token_to_id(token) if token_id is None: raise ValueError( "Attempted to set a token in the post processor that does not exist in the mapping" ) post_processor["special_tokens"][key]["ids"] = [tokenizer.token_to_id(token) for token in tokens] for special_token in ["cls", "sep"]: if special_token in post_processor: token, _ = post_processor[special_token] if special_tokens_map is not None and token in special_tokens_map: token = special_tokens_map[token] token_id = tokenizer.token_to_id(token) if token_id is None: raise ValueError( "Attempted to set a token in the post processor that does not exist in the mapping" ) post_processor[special_token] = [token, token_id] trained_tokenizer_json["post_processor"] = post_processor tokenizer = TokenizerFast.from_str(json.dumps(trained_tokenizer_json)) kwargs = self.init_kwargs.copy() # Map pad/cls/mask token at the Transformers level special_tokens_list = SpecialTokensMixin.SPECIAL_TOKENS_ATTRIBUTES.copy() special_tokens_list.remove("additional_special_tokens") for token in special_tokens_list: if getattr(self, token) is not None: special_token = getattr(self, token) if special_tokens_map is not None and special_token in special_tokens_map: special_token = special_tokens_map[special_token] special_token_full = self._special_tokens_map.get(token, None) if isinstance(special_token_full, AddedToken): # Create an added token with the same parameters except the content kwargs[token] = AddedToken( special_token, single_word=special_token_full.single_word, lstrip=special_token_full.lstrip, rstrip=special_token_full.rstrip, normalized=special_token_full.normalized, special=True, ) else: kwargs[token] = special_token additional_special_tokens = self.additional_special_tokens if new_special_tokens is not None: additional_special_tokens.extend(new_special_tokens) if len(additional_special_tokens) > 0: kwargs["additional_special_tokens"] = additional_special_tokens return self.__class__(tokenizer_object=tokenizer, **kwargs)
class_definition
2,915
40,723
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
null
228
class ModuleUtilsMixin: """ A few utilities for `torch.nn.Modules`, to be used as a mixin. """ @staticmethod def _hook_rss_memory_pre_forward(module, *args, **kwargs): try: import psutil except ImportError: raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.") process = psutil.Process(os.getpid()) mem = process.memory_info() module.mem_rss_pre_forward = mem.rss return None @staticmethod def _hook_rss_memory_post_forward(module, *args, **kwargs): try: import psutil except ImportError: raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.") process = psutil.Process(os.getpid()) mem = process.memory_info() module.mem_rss_post_forward = mem.rss mem_rss_diff = module.mem_rss_post_forward - module.mem_rss_pre_forward module.mem_rss_diff = mem_rss_diff + (module.mem_rss_diff if hasattr(module, "mem_rss_diff") else 0) return None def add_memory_hooks(self): """ Add a memory hook before and after each sub-module forward pass to record increase in memory consumption. Increase in memory consumption is stored in a `mem_rss_diff` attribute for each module and can be reset to zero with `model.reset_memory_hooks_state()`. """ for module in self.modules(): module.register_forward_pre_hook(self._hook_rss_memory_pre_forward) module.register_forward_hook(self._hook_rss_memory_post_forward) self.reset_memory_hooks_state() def reset_memory_hooks_state(self): """ Reset the `mem_rss_diff` attribute of each module (see [`~modeling_utils.ModuleUtilsMixin.add_memory_hooks`]). """ for module in self.modules(): module.mem_rss_diff = 0 module.mem_rss_post_forward = 0 module.mem_rss_pre_forward = 0 @property def device(self) -> torch.device: """ `torch.device`: The device on which the module is (assuming that all the module parameters are on the same device). """ return get_parameter_device(self) @property def dtype(self) -> torch.dtype: """ `torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype). """ return get_parameter_dtype(self) def invert_attention_mask(self, encoder_attention_mask: Tensor) -> Tensor: """ Invert an attention mask (e.g., switches 0. and 1.). Args: encoder_attention_mask (`torch.Tensor`): An attention mask. Returns: `torch.Tensor`: The inverted attention mask. """ if encoder_attention_mask.dim() == 3: encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.dim() == 2: encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) encoder_extended_attention_mask = encoder_extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * torch.finfo(self.dtype).min return encoder_extended_attention_mask @staticmethod def create_extended_attention_mask_for_decoder(input_shape, attention_mask, device=None): if device is not None: warnings.warn( "The `device` argument is deprecated and will be removed in v5 of Transformers.", FutureWarning ) else: device = attention_mask.device batch_size, seq_length = input_shape seq_ids = torch.arange(seq_length, device=device) causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None] # in case past_key_values are used we need to add a prefix ones mask to the causal mask # causal and attention masks must have same type with pytorch version < 1.3 causal_mask = causal_mask.to(attention_mask.dtype) if causal_mask.shape[1] < attention_mask.shape[1]: prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1] causal_mask = torch.cat( [ torch.ones((batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype), causal_mask, ], axis=-1, ) extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :] return extended_attention_mask def get_extended_attention_mask( self, attention_mask: Tensor, input_shape: Tuple[int], device: torch.device = None, dtype: torch.float = None ) -> Tensor: """ Makes broadcastable attention and causal masks so that future and masked tokens are ignored. Arguments: attention_mask (`torch.Tensor`): Mask with ones indicating tokens to attend to, zeros for tokens to ignore. input_shape (`Tuple[int]`): The shape of the input to the model. Returns: `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`. """ if dtype is None: dtype = self.dtype if not (attention_mask.dim() == 2 and self.config.is_decoder): # show warning only if it won't be shown in `create_extended_attention_mask_for_decoder` if device is not None: warnings.warn( "The `device` argument is deprecated and will be removed in v5 of Transformers.", FutureWarning ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. if attention_mask.dim() == 3: extended_attention_mask = attention_mask[:, None, :, :] elif attention_mask.dim() == 2: # Provided a padding mask of dimensions [batch_size, seq_length] # - if the model is a decoder, apply a causal mask in addition to the padding mask # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder: extended_attention_mask = ModuleUtilsMixin.create_extended_attention_mask_for_decoder( input_shape, attention_mask, device ) else: extended_attention_mask = attention_mask[:, None, None, :] else: raise ValueError( f"Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})" ) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and the dtype's smallest value for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = extended_attention_mask.to(dtype=dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * torch.finfo(dtype).min return extended_attention_mask def get_head_mask( self, head_mask: Optional[Tensor], num_hidden_layers: int, is_attention_chunked: bool = False ) -> Tensor: """ Prepare the head mask if needed. Args: head_mask (`torch.Tensor` with shape `[num_heads]` or `[num_hidden_layers x num_heads]`, *optional*): The mask indicating if we should keep the heads or not (1.0 for keep, 0.0 for discard). num_hidden_layers (`int`): The number of hidden layers in the model. is_attention_chunked (`bool`, *optional*, defaults to `False`): Whether or not the attentions scores are computed by chunks or not. Returns: `torch.Tensor` with shape `[num_hidden_layers x batch x num_heads x seq_length x seq_length]` or list with `[None]` for each layer. """ if head_mask is not None: head_mask = self._convert_head_mask_to_5d(head_mask, num_hidden_layers) if is_attention_chunked is True: head_mask = head_mask.unsqueeze(-1) else: head_mask = [None] * num_hidden_layers return head_mask def _convert_head_mask_to_5d(self, head_mask, num_hidden_layers): """-> [num_hidden_layers x batch x num_heads x seq_length x seq_length]""" if head_mask.dim() == 1: head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.expand(num_hidden_layers, -1, -1, -1, -1) elif head_mask.dim() == 2: head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer assert head_mask.dim() == 5, f"head_mask.dim != 5, instead {head_mask.dim()}" head_mask = head_mask.to(dtype=self.dtype) # switch to float if need + fp16 compatibility return head_mask def num_parameters(self, only_trainable: bool = False, exclude_embeddings: bool = False) -> int: """ Get number of (optionally, trainable or non-embeddings) parameters in the module. Args: only_trainable (`bool`, *optional*, defaults to `False`): Whether or not to return only the number of trainable parameters exclude_embeddings (`bool`, *optional*, defaults to `False`): Whether or not to return only the number of non-embeddings parameters Returns: `int`: The number of parameters. """ if exclude_embeddings: embedding_param_names = [ f"{name}.weight" for name, module_type in self.named_modules() if isinstance(module_type, nn.Embedding) ] total_parameters = [ parameter for name, parameter in self.named_parameters() if name not in embedding_param_names ] else: total_parameters = list(self.parameters()) total_numel = [] is_loaded_in_4bit = getattr(self, "is_loaded_in_4bit", False) if is_loaded_in_4bit: if is_bitsandbytes_available(): import bitsandbytes as bnb else: raise ValueError( "bitsandbytes is not installed but it seems that the model has been loaded in 4bit precision, something went wrong" " make sure to install bitsandbytes with `pip install bitsandbytes`. You also need a GPU. " ) for param in total_parameters: if param.requires_grad or not only_trainable: # For 4bit models, we need to multiply the number of parameters by 2 as half of the parameters are # used for the 4bit quantization (uint8 tensors are stored) if is_loaded_in_4bit and isinstance(param, bnb.nn.Params4bit): if hasattr(param, "element_size"): num_bytes = param.element_size() elif hasattr(param, "quant_storage"): num_bytes = param.quant_storage.itemsize else: num_bytes = 1 total_numel.append(param.numel() * 2 * num_bytes) else: total_numel.append(param.numel()) return sum(total_numel) def estimate_tokens(self, input_dict: Dict[str, Union[torch.Tensor, Any]]) -> int: """ Helper function to estimate the total number of tokens from the model inputs. Args: inputs (`dict`): The model inputs. Returns: `int`: The total number of tokens. """ if not hasattr(self, "warnings_issued"): self.warnings_issued = {} if self.main_input_name in input_dict: return input_dict[self.main_input_name].numel() elif "estimate_tokens" not in self.warnings_issued: logger.warning( "Could not estimate the number of tokens of the input, floating-point operations will not be computed" ) self.warnings_issued["estimate_tokens"] = True return 0 def floating_point_ops( self, input_dict: Dict[str, Union[torch.Tensor, Any]], exclude_embeddings: bool = True ) -> int: """ Get number of (optionally, non-embeddings) floating-point operations for the forward and backward passes of a batch with this transformer model. Default approximation neglects the quadratic dependency on the number of tokens (valid if `12 * d_model << sequence_length`) as laid out in [this paper](https://arxiv.org/pdf/2001.08361.pdf) section 2.1. Should be overridden for transformers with parameter re-use e.g. Albert or Universal Transformers, or if doing long-range modeling with very high sequence lengths. Args: batch_size (`int`): The batch size for the forward pass. sequence_length (`int`): The number of tokens in each line of the batch. exclude_embeddings (`bool`, *optional*, defaults to `True`): Whether or not to count embedding and softmax operations. Returns: `int`: The number of floating-point operations. """ return 6 * self.estimate_tokens(input_dict) * self.num_parameters(exclude_embeddings=exclude_embeddings)
class_definition
35,280
49,598
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
null
229
class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin, PushToHubMixin, PeftAdapterMixin): r""" Base class for all models. [`PreTrainedModel`] takes care of storing the configuration of the models and handles methods for loading, downloading and saving models as well as a few methods common to all models to: - resize the input embeddings, - prune heads in the self-attention heads. Class attributes (overridden by derived classes): - **config_class** ([`PretrainedConfig`]) -- A subclass of [`PretrainedConfig`] to use as configuration class for this model architecture. - **load_tf_weights** (`Callable`) -- A python *method* for loading a TensorFlow checkpoint in a PyTorch model, taking as arguments: - **model** ([`PreTrainedModel`]) -- An instance of the model on which to load the TensorFlow checkpoint. - **config** ([`PreTrainedConfig`]) -- An instance of the configuration associated to the model. - **path** (`str`) -- A path to the TensorFlow checkpoint. - **base_model_prefix** (`str`) -- A string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model. - **is_parallelizable** (`bool`) -- A flag indicating whether this model supports model parallelization. - **main_input_name** (`str`) -- The name of the principal input to the model (often `input_ids` for NLP models, `pixel_values` for vision models and `input_values` for speech models). """ config_class = None base_model_prefix = "" main_input_name = "input_ids" model_tags = None _auto_class = None _no_split_modules = None _skip_keys_device_placement = None _keep_in_fp32_modules = None # a list of `re` patterns of `state_dict` keys that should be removed from the list of missing # keys we find (keys inside the model but not in the checkpoint) and avoid unnecessary warnings. _keys_to_ignore_on_load_missing = None # a list of `re` patterns of `state_dict` keys that should be removed from the list of # unexpected keys we find (keys inside the checkpoint but not the model) and avoid unnecessary # warnings. _keys_to_ignore_on_load_unexpected = None # a list of `state_dict` keys to ignore when saving the model (useful for keys that aren't # trained, but which are either deterministic or tied variables) _keys_to_ignore_on_save = None # a list of `state_dict` keys that are potentially tied to another key in the state_dict. _tied_weights_keys = None is_parallelizable = False supports_gradient_checkpointing = False _is_stateful = False # Flash Attention 2 support _supports_flash_attn_2 = False # SDPA support _supports_sdpa = False # Flex Attention support _supports_flex_attn = False # Has support for a `Cache` instance as `past_key_values`? Does it support a `StaticCache`? _supports_cache_class = False _supports_static_cache = False # Has support for a `QuantoQuantizedCache` instance as `past_key_values` _supports_quantized_cache = False # A tensor parallel plan to be applied to the model when TP is enabled. For # top-level models, this attribute is currently defined in respective model # code. For base models, this attribute comes from # `config.base_model_tp_plan` during `post_init`. _tp_plan = None @property def dummy_inputs(self) -> Dict[str, torch.Tensor]: """ `Dict[str, torch.Tensor]`: Dummy inputs to do a forward pass in the network. """ return {"input_ids": torch.tensor(DUMMY_INPUTS)} @property def framework(self) -> str: """ :str: Identifies that this is a PyTorch model. """ return "pt" def __init__(self, config: PretrainedConfig, *inputs, **kwargs): super().__init__() if not isinstance(config, PretrainedConfig): raise ValueError( f"Parameter config in `{self.__class__.__name__}(config)` should be an instance of class " "`PretrainedConfig`. To create a model from a pretrained model use " f"`model = {self.__class__.__name__}.from_pretrained(PRETRAINED_MODEL_NAME)`" ) if not getattr(config, "_attn_implementation_autoset", False): # config usually has a `torch_dtype` but we need the next line for the `no_super_init` tests dtype = config.torch_dtype if hasattr(config, "torch_dtype") else torch.get_default_dtype() config = self._autoset_attn_implementation(config, torch_dtype=dtype, check_device_map=False) self.config = config # for initialization of the loss loss_type = self.__class__.__name__ if loss_type not in LOSS_MAPPING: loss_groups = f"({'|'.join(LOSS_MAPPING)})" loss_type = re.findall(loss_groups, self.__class__.__name__) if len(loss_type) > 0: loss_type = loss_type[0] else: loss_type = None self.loss_type = loss_type self.name_or_path = config.name_or_path self.warnings_issued = {} self.generation_config = GenerationConfig.from_model_config(config) if self.can_generate() else None # Overwrite the class attribute to make it an instance attribute, so models like # `InstructBlipForConditionalGeneration` can dynamically update it without modifying the class attribute # when a different component (e.g. language_model) is used. self._keep_in_fp32_modules = copy.copy(self.__class__._keep_in_fp32_modules) def post_init(self): """ A method executed at the end of each Transformer model initialization, to execute code that needs the model's modules properly initialized (such as weight initialization). """ self.init_weights() self._backward_compatibility_gradient_checkpointing() # If current model is a base model, attach `base_model_tp_plan` from config if self.base_model is self: self._tp_plan = self.config.base_model_tp_plan def dequantize(self): """ Potentially dequantize the model in case it has been quantized by a quantization method that support dequantization. """ hf_quantizer = getattr(self, "hf_quantizer", None) if hf_quantizer is None: raise ValueError("You need to first quantize your model in order to dequantize it") return hf_quantizer.dequantize(self) def _backward_compatibility_gradient_checkpointing(self): if self.supports_gradient_checkpointing and getattr(self.config, "gradient_checkpointing", False): self.gradient_checkpointing_enable() # Remove the attribute now that is has been consumed, so it's no saved in the config. delattr(self.config, "gradient_checkpointing") def add_model_tags(self, tags: Union[List[str], str]) -> None: r""" Add custom tags into the model that gets pushed to the Hugging Face Hub. Will not overwrite existing tags in the model. Args: tags (`Union[List[str], str]`): The desired tags to inject in the model Examples: ```python from transformers import AutoModel model = AutoModel.from_pretrained("google-bert/bert-base-cased") model.add_model_tags(["custom", "custom-bert"]) # Push the model to your namespace with the name "my-custom-bert". model.push_to_hub("my-custom-bert") ``` """ if isinstance(tags, str): tags = [tags] if self.model_tags is None: self.model_tags = [] for tag in tags: if tag not in self.model_tags: self.model_tags.append(tag) @classmethod def _from_config(cls, config, **kwargs): """ All context managers that the model should be initialized under go here. Args: torch_dtype (`torch.dtype`, *optional*): Override the default `torch.dtype` and load the model under this dtype. """ # when we init a model from within another model (e.g. VLMs) and dispatch on FA2 # a warning is raised that dtype should be fp16. Since we never pass dtype from within # modeling code, we can try to infer it here same way as done in `from_pretrained` torch_dtype = kwargs.pop("torch_dtype", config.torch_dtype) if isinstance(torch_dtype, str): torch_dtype = getattr(torch, torch_dtype) use_flash_attention_2 = kwargs.pop("use_flash_attention_2", False) # override default dtype if needed dtype_orig = None if torch_dtype is not None: dtype_orig = cls._set_default_torch_dtype(torch_dtype) config = copy.deepcopy(config) # We do not want to modify the config inplace in _from_config. if config._attn_implementation_internal is not None: # In this case, the config has been created with the attn_implementation set by the user, which we # should respect. attn_implementation = config._attn_implementation_internal else: attn_implementation = None config._attn_implementation = kwargs.pop("attn_implementation", attn_implementation) if not getattr(config, "_attn_implementation_autoset", False): config = cls._autoset_attn_implementation( config, use_flash_attention_2=use_flash_attention_2, check_device_map=False, torch_dtype=torch_dtype, ) if is_deepspeed_zero3_enabled() and not _is_quantized and not _is_ds_init_called: import deepspeed logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model") # this immediately partitions the model across all gpus, to avoid the overhead in time # and memory copying it on CPU or each GPU first init_contexts = [deepspeed.zero.Init(config_dict_or_path=deepspeed_config()), set_zero3_state()] with ContextManagers(init_contexts): model = cls(config, **kwargs) else: model = cls(config, **kwargs) # restore default dtype if it was modified if dtype_orig is not None: torch.set_default_dtype(dtype_orig) return model @classmethod def _autoset_attn_implementation( cls, config, use_flash_attention_2: bool = False, torch_dtype: Optional[torch.dtype] = None, device_map: Optional[Union[str, Dict[str, int]]] = None, check_device_map: bool = True, ): """ Automatically checks and dispatches to a default attention implementation. In order of priority: 1. An implementation specified in `config._attn_implementation` (due for example to the argument attn_implementation="sdpa" in from_pretrained). 2. DEPRECATED: if use_flash_attention_2 is set to `True` and `flash_attn` is available, flash attention. (`LlamaFlashAttention` for example) 3. SDPA implementation, if available and supported by the model type. (`LlamaSdpaAttention` for example) 4. The default model's implementation otherwise (`LlamaAttention` for example) . """ # Here we use config._attn_implementation_internal to check whether the attention implementation was explicitely set by the user. # The property `PretrainedConfig._attn_implementation` is never `None`, for backward compatibility (always fall back on "eager"). # The `hasattr` here is used as some Transformers tests for some reason do not call PretrainedConfig __init__ (e.g. test_no_super_init_config_and_model) requested_attn_implementation = None if hasattr(config, "_attn_implementation_internal") and config._attn_implementation_internal is not None: if config._attn_implementation != "flash_attention_2" and use_flash_attention_2: raise ValueError( f'Both attn_implementation="{config._attn_implementation}" and `use_flash_attention_2=True` were used when loading the model, which are not compatible.' ' We recommend to just use `attn_implementation="flash_attention_2"` when loading the model.' ) if not isinstance(config._attn_implementation, dict) and config._attn_implementation not in [ "eager" ] + list(ALL_ATTENTION_FUNCTIONS.keys()): message = f'Specified `attn_implementation="{config._attn_implementation}"` is not supported. The only possible arguments are `attn_implementation="eager"` (manual attention implementation)' if cls._supports_flash_attn_2: message += ', `"attn_implementation=flash_attention_2"` (implementation using flash attention 2)' if cls._supports_sdpa: message += ', `"attn_implementation=sdpa"` (implementation using torch.nn.functional.scaled_dot_product_attention)' if cls._supports_flex_attn: message += ( ', `"attn_implementation=flex_attention"` (implementation using torch\'s flex_attention)' ) raise ValueError(message + ".") # If a config is passed with a preset attn_implementation, we skip the automatic dispatch and use the user-provided config, with hard checks that the requested attention implementation is available. requested_attn_implementation = config._attn_implementation_internal # Composite models consisting of several PretrainedModels have to specify attention impl as a dict # where keys are sub-config names. But most people will specify one `str` which means that should dispatch it # for all sub-models. # Below we check if a config is composite and manually prepare a dict of attn impl if not already passed as a dict. # Later each sub-module will dispatch with its own attn impl, by calling `XXXModel._from_config(config.text_config)` # If any of sub-modules doesn't support requested attn, an error will be raised. See https://github.com/huggingface/transformers/pull/32238 for key in config.sub_configs.keys(): sub_config = getattr(config, key) curr_attn_implementation = ( requested_attn_implementation if not isinstance(requested_attn_implementation, dict) else requested_attn_implementation.get(key, None) ) sub_config._attn_implementation_internal = curr_attn_implementation if use_flash_attention_2: logger.warning_once( 'The model was loaded with use_flash_attention_2=True, which is deprecated and may be removed in a future release. Please use `attn_implementation="flash_attention_2"` instead.' ) config._attn_implementation = "flash_attention_2" if config._attn_implementation == "flash_attention_2": cls._check_and_enable_flash_attn_2( config, torch_dtype=torch_dtype, device_map=device_map, hard_check_only=False, check_device_map=check_device_map, ) elif requested_attn_implementation == "flex_attention": config = cls._check_and_enable_flex_attn(config, hard_check_only=True) elif requested_attn_implementation in [None, "sdpa"] and not is_torch_xla_available(): # use_flash_attention_2 takes priority over SDPA, hence SDPA treated in this elif. config = cls._check_and_enable_sdpa( config, hard_check_only=False if requested_attn_implementation is None else True, ) if ( torch.version.hip is not None and config._attn_implementation == "sdpa" and torch.cuda.device_count() > 1 ): logger.warning_once( "Using the `SDPA` attention implementation on multi-gpu setup with ROCM may lead to performance issues due to the FA backend. Disabling it to use alternative backends." ) torch.backends.cuda.enable_flash_sdp(False) elif requested_attn_implementation in list(ALL_ATTENTION_FUNCTIONS.keys()): config._attn_implementation = requested_attn_implementation elif isinstance(requested_attn_implementation, dict): config._attn_implementation = None else: config._attn_implementation = "eager" config._attn_implementation_autoset = True return config @classmethod def _set_default_torch_dtype(cls, dtype: torch.dtype) -> torch.dtype: """ Change the default dtype and return the previous one. This is needed when wanting to instantiate the model under specific dtype. Args: dtype (`torch.dtype`): a floating dtype to set to. Returns: `torch.dtype`: the original `dtype` that can be used to restore `torch.set_default_dtype(dtype)` if it was modified. If it wasn't, returns `None`. Note `set_default_dtype` currently only works with floating-point types and asserts if for example, `torch.int64` is passed. So if a non-float `dtype` is passed this functions will throw an exception. """ if not dtype.is_floating_point: raise ValueError( f"Can't instantiate {cls.__name__} model under dtype={dtype} since it is not a floating point dtype" ) logger.info(f"Instantiating {cls.__name__} model under default dtype {dtype}.") dtype_orig = torch.get_default_dtype() torch.set_default_dtype(dtype) return dtype_orig @property def base_model(self) -> nn.Module: """ `torch.nn.Module`: The main body of the model. """ return getattr(self, self.base_model_prefix, self) @classmethod def can_generate(cls) -> bool: """ Returns whether this model can generate sequences with `.generate()`. Returns: `bool`: Whether this model can generate sequences with `.generate()`. """ # Directly inherits `GenerationMixin` -> can generate if "GenerationMixin" in str(cls.__bases__): return True # Model class overwrites `generate` (e.g. time series models) -> can generate if str(cls.__name__) in str(cls.generate): return True # The class inherits from a class that can generate (recursive check) -> can generate for base in cls.__bases__: if not hasattr(base, "can_generate"): continue if "PreTrainedModel" not in str(base) and base.can_generate(): return True # BC: Detects whether `prepare_inputs_for_generation` has been overwritten in the model. Prior to v4.45, this # was how we detected whether a model could generate. if "GenerationMixin" not in str(cls.prepare_inputs_for_generation): logger.warning_once( f"{cls.__name__} has generative capabilities, as `prepare_inputs_for_generation` is explicitly " "overwritten. However, it doesn't directly inherit from `GenerationMixin`. From 👉v4.50👈 onwards, " "`PreTrainedModel` will NOT inherit from `GenerationMixin`, and this model will lose the ability " "to call `generate` and other related functions." "\n - If you're using `trust_remote_code=True`, you can get rid of this warning by loading the " "model with an auto class. See https://huggingface.co/docs/transformers/en/model_doc/auto#auto-classes" "\n - If you are the owner of the model architecture code, please modify your model class such that " "it inherits from `GenerationMixin` (after `PreTrainedModel`, otherwise you'll get an exception)." "\n - If you are not the owner of the model architecture class, please contact the model code owner " "to update it." ) return True # Otherwise, can't generate return False @classmethod def _check_and_enable_flash_attn_2( cls, config, torch_dtype: Optional[torch.dtype] = None, device_map: Optional[Union[str, Dict[str, int]]] = None, check_device_map: bool = True, hard_check_only: bool = False, ) -> PretrainedConfig: """ Checks the availability of Flash Attention 2 and compatibility with the current model. If all checks pass and `hard_check_only` is False, the method will set the config attribute `attn_implementation` to "flash_attention_2" so that the model can initialize the correct attention module. """ if not cls._supports_flash_attn_2: raise ValueError( f"{cls.__name__} does not support Flash Attention 2.0 yet. Please request to add support where" f" the model is hosted, on its model hub page: https://huggingface.co/{config._name_or_path}/discussions/new" " or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new" ) if not is_flash_attn_2_available(): preface = "FlashAttention2 has been toggled on, but it cannot be used due to the following error:" install_message = "Please refer to the documentation of https://huggingface.co/docs/transformers/perf_infer_gpu_one#flashattention-2 to install Flash Attention 2." if importlib.util.find_spec("flash_attn") is None: raise ImportError(f"{preface} the package flash_attn seems to be not installed. {install_message}") flash_attention_version = version.parse(importlib.metadata.version("flash_attn")) if torch.version.cuda: if flash_attention_version < version.parse("2.1.0"): raise ImportError( f"{preface} you need flash_attn package version to be greater or equal than 2.1.0. Detected version {flash_attention_version}. {install_message}" ) elif not torch.cuda.is_available(): raise ValueError( f"{preface} Flash Attention 2 is not available on CPU. Please make sure torch can access a CUDA device." ) else: raise ImportError(f"{preface} Flash Attention 2 is not available. {install_message}") elif torch.version.hip: if flash_attention_version < version.parse("2.0.4"): raise ImportError( f"{preface} you need flash_attn package version to be greater or equal than 2.0.4. Make sure to have that version installed - detected version {flash_attention_version}. {install_message}" ) else: raise ImportError(f"{preface} Flash Attention 2 is not available. {install_message}") _is_bettertransformer = getattr(cls, "use_bettertransformer", False) if _is_bettertransformer: raise ValueError( "Flash Attention 2 and BetterTransformer API are not compatible. Please make sure to disable BetterTransformers by doing model.reverse_bettertransformer()" ) if torch_dtype is None: logger.warning_once( "You are attempting to use Flash Attention 2.0 without specifying a torch dtype. This might lead to unexpected behaviour" ) elif torch_dtype is not None and torch_dtype not in [torch.float16, torch.bfloat16]: logger.warning_once( "Flash Attention 2.0 only supports torch.float16 and torch.bfloat16 dtypes, but" f" the current dype in {cls.__name__} is {torch_dtype}. You should run training or inference using Automatic Mixed-Precision via the `with torch.autocast(device_type='torch_device'):` decorator," ' or load the model with the `torch_dtype` argument. Example: `model = AutoModel.from_pretrained("openai/whisper-tiny", attn_implementation="flash_attention_2", torch_dtype=torch.float16)`' ) # The check `torch.empty(0).device.type != "cuda"` is needed as the model may be initialized after `torch.set_default_device` has been called, # or the model may be initialized under the context manager `with torch.device("cuda"):`. if check_device_map and device_map is None and torch.empty(0).device.type != "cuda": if torch.cuda.is_available(): logger.warning_once( "You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU" " after initializing it on CPU with `model.to('cuda')`." ) else: raise ValueError( "You are attempting to use Flash Attention 2.0 with a model not initialized on GPU and with no GPU available. " "This is not supported yet. Please make sure to have access to a GPU and either initialise the model on a GPU by passing a device_map " "or initialising the model on CPU and then moving it to GPU." ) elif ( check_device_map and device_map is not None and isinstance(device_map, dict) and ("cpu" in device_map.values() or "disk" in device_map.values()) ): raise ValueError( "You are attempting to use Flash Attention 2.0 with a model dispatched on CPU or disk. This is not supported. Please make sure to " "initialise the model on a GPU by passing a device_map that contains only GPU devices as keys." ) if not hard_check_only: config._attn_implementation = "flash_attention_2" return config @classmethod def _check_and_enable_sdpa(cls, config, hard_check_only: bool = False) -> PretrainedConfig: """ Checks the availability of SDPA for a given model. If all checks pass and `hard_check_only` is False, the method will set the config attribute `_attn_implementation` to "sdpa" so that the model can initialize the correct attention module. """ if hard_check_only: if not cls._supports_sdpa: raise ValueError( f"{cls.__name__} does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet." " Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe" ' this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation="eager"` meanwhile. Example: `model = AutoModel.from_pretrained("openai/whisper-tiny", attn_implementation="eager")`' ) if not is_torch_sdpa_available(): raise ImportError( "PyTorch SDPA requirements in Transformers are not met. Please install torch>=2.1.1." ) if not is_torch_sdpa_available() or not cls._supports_sdpa: return config _is_bettertransformer = getattr(cls, "use_bettertransformer", False) if _is_bettertransformer: return config if not hard_check_only: config._attn_implementation = "sdpa" return config @classmethod def _check_and_enable_flex_attn(cls, config, hard_check_only: bool = False) -> PretrainedConfig: """ Checks the availability of Flex Attention for a given model. If all checks pass and `hard_check_only` is False, the method will set the config attribute `_attn_implementation` to "flex_attention" so that the model can initialize the correct attention module. """ if hard_check_only: if not cls._supports_flex_attn: raise ValueError( f"{cls.__name__} does not support an attention implementation through torch's flex_attention." " Please request the support for this architecture: https://github.com/huggingface/transformers/issues/34809." " If you believe this error is a bug, please open an issue in Transformers GitHub repository" ' and load your model with the argument `attn_implementation="eager"` meanwhile.' ' Example: `model = AutoModel.from_pretrained("openai/whisper-tiny", attn_implementation="eager")`' ) if not is_torch_flex_attn_available(): raise ImportError( "PyTorch Flex Attention requirements in Transformers are not met. Please install torch>=2.5.0." ) if not is_torch_flex_attn_available() or not cls._supports_flex_attn: return config if not hard_check_only: config._attn_implementation = "flex_attention" return config def enable_input_require_grads(self): """ Enables the gradients for the input embeddings. This is useful for fine-tuning adapter weights while keeping the model weights fixed. """ def make_inputs_require_grads(module, input, output): output.requires_grad_(True) self._require_grads_hook = self.get_input_embeddings().register_forward_hook(make_inputs_require_grads) def disable_input_require_grads(self): """ Removes the `_require_grads_hook`. """ self._require_grads_hook.remove() def get_input_embeddings(self) -> nn.Module: """ Returns the model's input embeddings. Returns: `nn.Module`: A torch module mapping vocabulary to hidden states. """ base_model = getattr(self, self.base_model_prefix, self) if base_model is not self: return base_model.get_input_embeddings() else: raise NotImplementedError def set_input_embeddings(self, value: nn.Module): """ Set model's input embeddings. Args: value (`nn.Module`): A module mapping vocabulary to hidden states. """ base_model = getattr(self, self.base_model_prefix, self) if base_model is not self: base_model.set_input_embeddings(value) else: raise NotImplementedError def get_output_embeddings(self) -> nn.Module: """ Returns the model's output embeddings. Returns: `nn.Module`: A torch module mapping hidden states to vocabulary. """ return None # Overwrite for models with output embeddings def _init_weights(self, module): """ Initialize the weights. This method should be overridden by derived class and is the only initialization method that will be called when loading a checkpoint using `from_pretrained`. Any attempt to initialize outside of this function will be useless as the torch.nn.init function are all replaced with skip. """ pass def _initialize_weights(self, module): """ Initialize the weights if they are not already initialized. """ if getattr(module, "_is_hf_initialized", False): return self._init_weights(module) module._is_hf_initialized = True def tie_weights(self): """ Tie the weights between the input embeddings and the output embeddings. If the `torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning the weights instead. """ if getattr(self.config.get_text_config(decoder=True), "tie_word_embeddings", True): output_embeddings = self.get_output_embeddings() if output_embeddings is not None: self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings()) if getattr(self.config, "is_encoder_decoder", False) and getattr(self.config, "tie_encoder_decoder", False): if hasattr(self, self.base_model_prefix): self = getattr(self, self.base_model_prefix) tied_weights = self._tie_encoder_decoder_weights( self.encoder, self.decoder, self.base_model_prefix, "encoder" ) # Setting a dynamic variable instead of `_tied_weights_keys` because it's a class # attributed not an instance member, therefore modifying it will modify the entire class # Leading to issues on subsequent calls by different tests or subsequent calls. self._dynamic_tied_weights_keys = tied_weights for module in self.modules(): if hasattr(module, "_tie_weights"): module._tie_weights() @staticmethod def _tie_encoder_decoder_weights( encoder: nn.Module, decoder: nn.Module, base_model_prefix: str, base_encoder_name: str ): uninitialized_encoder_weights: List[str] = [] tied_weights: List[str] = [] if decoder.__class__ != encoder.__class__: logger.info( f"{decoder.__class__} and {encoder.__class__} are not equal. In this case make sure that all encoder" " weights are correctly initialized." ) def tie_encoder_to_decoder_recursively( decoder_pointer: nn.Module, encoder_pointer: nn.Module, module_name: str, base_encoder_name: str, uninitialized_encoder_weights: List[str], depth=0, total_decoder_name="", total_encoder_name="", ): assert isinstance(decoder_pointer, nn.Module) and isinstance( encoder_pointer, nn.Module ), f"{decoder_pointer} and {encoder_pointer} have to be of type nn.Module" if hasattr(decoder_pointer, "weight"): assert hasattr(encoder_pointer, "weight") encoder_pointer.weight = decoder_pointer.weight tied_weights.append(f"{base_encoder_name}{total_encoder_name}.weight") if hasattr(decoder_pointer, "bias"): assert hasattr(encoder_pointer, "bias") tied_weights.append(f"{base_encoder_name}{total_encoder_name}.bias") encoder_pointer.bias = decoder_pointer.bias return encoder_modules = encoder_pointer._modules decoder_modules = decoder_pointer._modules if len(decoder_modules) > 0: assert ( len(encoder_modules) > 0 ), f"Encoder module {encoder_pointer} does not match decoder module {decoder_pointer}" all_encoder_weights = {module_name + "/" + sub_name for sub_name in encoder_modules.keys()} encoder_layer_pos = 0 for name, module in decoder_modules.items(): if name.isdigit(): encoder_name = str(int(name) + encoder_layer_pos) decoder_name = name if not isinstance(decoder_modules[decoder_name], type(encoder_modules[encoder_name])) and len( encoder_modules ) != len(decoder_modules): # this can happen if the name corresponds to the position in a list module list of layers # in this case the decoder has added a cross-attention that the encoder does not have # thus skip this step and subtract one layer pos from encoder encoder_layer_pos -= 1 continue elif name not in encoder_modules: continue elif depth > 500: raise ValueError( "Max depth of recursive function `tie_encoder_to_decoder` reached. It seems that there is" " a circular dependency between two or more `nn.Modules` of your model." ) else: decoder_name = encoder_name = name tie_encoder_to_decoder_recursively( decoder_modules[decoder_name], encoder_modules[encoder_name], module_name + "/" + name, base_encoder_name, uninitialized_encoder_weights, depth=depth + 1, total_encoder_name=f"{total_encoder_name}.{encoder_name}", total_decoder_name=f"{total_decoder_name}.{decoder_name}", ) all_encoder_weights.remove(module_name + "/" + encoder_name) uninitialized_encoder_weights += list(all_encoder_weights) # tie weights recursively tie_encoder_to_decoder_recursively( decoder, encoder, base_model_prefix, base_encoder_name, uninitialized_encoder_weights ) if len(uninitialized_encoder_weights) > 0: logger.warning( f"The following encoder weights were not tied to the decoder {uninitialized_encoder_weights}" ) return tied_weights def _tie_or_clone_weights(self, output_embeddings, input_embeddings): """Tie or clone module weights depending of whether we are using TorchScript or not""" if self.config.torchscript: output_embeddings.weight = nn.Parameter(input_embeddings.weight.clone()) else: output_embeddings.weight = input_embeddings.weight if getattr(output_embeddings, "bias", None) is not None: output_embeddings.bias.data = nn.functional.pad( output_embeddings.bias.data, ( 0, output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0], ), "constant", 0, ) if hasattr(output_embeddings, "out_features") and hasattr(input_embeddings, "num_embeddings"): output_embeddings.out_features = input_embeddings.num_embeddings def _get_no_split_modules(self, device_map: str): """ Get the modules of the model that should not be spit when using device_map. We iterate through the modules to get the underlying `_no_split_modules`. Args: device_map (`str`): The device map value. Options are ["auto", "balanced", "balanced_low_0", "sequential"] Returns: `List[str]`: List of modules that should not be split """ _no_split_modules = set() modules_to_check = [self] while len(modules_to_check) > 0: module = modules_to_check.pop(-1) # if the module does not appear in _no_split_modules, we also check the children if module.__class__.__name__ not in _no_split_modules: if isinstance(module, PreTrainedModel): if module._no_split_modules is None: raise ValueError( f"{module.__class__.__name__} does not support `device_map='{device_map}'`. To implement support, the model " "class needs to implement the `_no_split_modules` attribute." ) else: _no_split_modules = _no_split_modules | set(module._no_split_modules) modules_to_check += list(module.children()) return list(_no_split_modules) def resize_token_embeddings( self, new_num_tokens: Optional[int] = None, pad_to_multiple_of: Optional[int] = None, mean_resizing: bool = True, ) -> nn.Embedding: """ Resizes input token embeddings matrix of the model if `new_num_tokens != config.vocab_size`. Takes care of tying weights embeddings afterwards if the model class has a `tie_weights()` method. Arguments: new_num_tokens (`int`, *optional*): The new number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just returns a pointer to the input tokens `torch.nn.Embedding` module of the model without doing anything. pad_to_multiple_of (`int`, *optional*): If set will pad the embedding matrix to a multiple of the provided value.If `new_num_tokens` is set to `None` will just pad the embedding to a multiple of `pad_to_multiple_of`. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. For more details about this, or help on choosing the correct value for resizing, refer to this guide: https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html#requirements-tc mean_resizing (`bool`): Whether to initialize the added embeddings from a multivariate normal distribution that has old embeddings' mean and covariance or to initialize them with a normal distribution that has a mean of zero and std equals `config.initializer_range`. Setting `mean_resizing` to `True` is useful when increasing the size of the embeddings of causal language models, where the generated tokens' probabilities won't be affected by the added embeddings because initializing the new embeddings with the old embeddings' mean will reduce the kl-divergence between the next token probability before and after adding the new embeddings. Refer to this article for more information: https://nlp.stanford.edu/~johnhew/vocab-expansion.html Return: `torch.nn.Embedding`: Pointer to the input tokens Embeddings Module of the model. """ model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing) if new_num_tokens is None and pad_to_multiple_of is None: return model_embeds # Since we are basically resuing the same old embeddings with new weight values, gathering is required is_quantized = hasattr(self, "hf_quantizer") and self.hf_quantizer is not None if is_deepspeed_zero3_enabled() and not is_quantized: import deepspeed with deepspeed.zero.GatheredParameters(model_embeds.weight, modifier_rank=None): vocab_size = model_embeds.weight.shape[0] else: vocab_size = model_embeds.weight.shape[0] # Update base model and current model config. self.config.get_text_config().vocab_size = vocab_size self.vocab_size = vocab_size # Tie weights again if needed self.tie_weights() return model_embeds def _resize_token_embeddings(self, new_num_tokens, pad_to_multiple_of=None, mean_resizing=True): old_embeddings = self.get_input_embeddings() new_embeddings = self._get_resized_embeddings( old_embeddings, new_num_tokens, pad_to_multiple_of, mean_resizing ) if hasattr(old_embeddings, "_hf_hook"): hook = old_embeddings._hf_hook add_hook_to_module(new_embeddings, hook) old_embeddings_requires_grad = old_embeddings.weight.requires_grad new_embeddings.requires_grad_(old_embeddings_requires_grad) self.set_input_embeddings(new_embeddings) is_quantized = hasattr(self, "hf_quantizer") and self.hf_quantizer is not None # Update new_num_tokens with the actual size of new_embeddings if pad_to_multiple_of is not None: if is_deepspeed_zero3_enabled() and not is_quantized: import deepspeed with deepspeed.zero.GatheredParameters(new_embeddings.weight, modifier_rank=None): new_num_tokens = new_embeddings.weight.shape[0] else: new_num_tokens = new_embeddings.weight.shape[0] # if word embeddings are not tied, make sure that lm head is resized as well if ( self.get_output_embeddings() is not None and not self.config.get_text_config(decoder=True).tie_word_embeddings ): old_lm_head = self.get_output_embeddings() if isinstance(old_lm_head, torch.nn.Embedding): new_lm_head = self._get_resized_embeddings(old_lm_head, new_num_tokens, mean_resizing=mean_resizing) else: new_lm_head = self._get_resized_lm_head(old_lm_head, new_num_tokens, mean_resizing=mean_resizing) if hasattr(old_lm_head, "_hf_hook"): hook = old_lm_head._hf_hook add_hook_to_module(new_lm_head, hook) old_lm_head_requires_grad = old_lm_head.weight.requires_grad new_lm_head.requires_grad_(old_lm_head_requires_grad) self.set_output_embeddings(new_lm_head) return self.get_input_embeddings() def _get_resized_embeddings( self, old_embeddings: nn.Embedding, new_num_tokens: Optional[int] = None, pad_to_multiple_of: Optional[int] = None, mean_resizing: bool = True, ) -> nn.Embedding: """ Build a resized Embedding Module from a provided token Embedding Module. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end Args: old_embeddings (`torch.nn.Embedding`): Old embeddings to be resized. new_num_tokens (`int`, *optional*): New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just returns a pointer to the input tokens `torch.nn.Embedding` module of the model without doing anything. pad_to_multiple_of (`int`, *optional*): If set will pad the embedding matrix to a multiple of the provided value. If `new_num_tokens` is set to `None` will just pad the embedding to a multiple of `pad_to_multiple_of`. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. For more details about this, or help on choosing the correct value for resizing, refer to this guide: https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html#requirements-tc mean_resizing (`bool`): Whether to initialize the added embeddings from a multivariate normal distribution that has old embeddings' mean and covariance or to initialize them with a normal distribution that has a mean of zero and std equals `config.initializer_range`. Setting `mean_resizing` to `True` is useful when increasing the size of the embeddings of causal language models, where the generated tokens' probabilities will not be affected by the added embeddings because initializing the new embeddings with the old embeddings' mean will reduce the kl-divergence between the next token probability before and after adding the new embeddings. Refer to this article for more information: https://nlp.stanford.edu/~johnhew/vocab-expansion.html Return: `torch.nn.Embedding`: Pointer to the resized Embedding Module or the old Embedding Module if `new_num_tokens` is `None` """ if pad_to_multiple_of is not None: if not isinstance(pad_to_multiple_of, int): raise ValueError( f"Asking to pad the embedding matrix to a multiple of `{pad_to_multiple_of}`, which is not and integer. Please make sure to pass an integer" ) if new_num_tokens is None: new_num_tokens = old_embeddings.weight.shape[0] new_num_tokens = ((new_num_tokens + pad_to_multiple_of - 1) // pad_to_multiple_of) * pad_to_multiple_of else: logger.info( "You are resizing the embedding layer without providing a `pad_to_multiple_of` parameter. This means that the new embedding" f" dimension will be {new_num_tokens}. This might induce some performance reduction as *Tensor Cores* will not be available." " For more details about this, or help on choosing the correct value for resizing, refer to this guide:" " https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html#requirements-tc" ) if new_num_tokens is None: return old_embeddings is_quantized = hasattr(self, "hf_quantizer") and self.hf_quantizer is not None if is_deepspeed_zero3_enabled() and not is_quantized: import deepspeed with deepspeed.zero.GatheredParameters(old_embeddings.weight, modifier_rank=None): old_num_tokens, old_embedding_dim = old_embeddings.weight.size() else: old_num_tokens, old_embedding_dim = old_embeddings.weight.size() if old_num_tokens == new_num_tokens and not is_deepspeed_zero3_enabled(): return old_embeddings if not isinstance(old_embeddings, nn.Embedding): raise TypeError( f"Old embeddings are of type {type(old_embeddings)}, which is not an instance of {nn.Embedding}. You" " should either use a different resize function or make sure that `old_embeddings` are an instance of" f" {nn.Embedding}." ) # Build new embeddings # When using DeepSpeed ZeRO-3, we shouldn't create new embeddings with DeepSpeed init # because the shape of the new embedding layer is used across various modeling files # as well as to update config vocab size. Shape will be 0 when using DeepSpeed init leading # to errors when training. new_embeddings = nn.Embedding( new_num_tokens, old_embedding_dim, device=old_embeddings.weight.device, dtype=old_embeddings.weight.dtype, ) if new_num_tokens > old_num_tokens and not mean_resizing: # initialize new embeddings (in particular added tokens) with a mean of 0 and std equals `config.initializer_range`. self._init_weights(new_embeddings) elif new_num_tokens > old_num_tokens and mean_resizing: # initialize new embeddings (in particular added tokens). The new embeddings will be initialized # from a multivariate normal distribution that has old embeddings' mean and covariance. # as described in this article: https://nlp.stanford.edu/~johnhew/vocab-expansion.html logger.warning_once( "The new embeddings will be initialized from a multivariate normal distribution that has old embeddings' mean and covariance. " "As described in this article: https://nlp.stanford.edu/~johnhew/vocab-expansion.html. " "To disable this, use `mean_resizing=False`" ) added_num_tokens = new_num_tokens - old_num_tokens if is_deepspeed_zero3_enabled() and not is_quantized: import deepspeed with deepspeed.zero.GatheredParameters([old_embeddings.weight], modifier_rank=None): self._init_added_embeddings_weights_with_mean( old_embeddings, new_embeddings, old_embedding_dim, old_num_tokens, added_num_tokens ) else: self._init_added_embeddings_weights_with_mean( old_embeddings, new_embeddings, old_embedding_dim, old_num_tokens, added_num_tokens ) # Copy token embeddings from the previous weights # numbers of tokens to copy n = min(old_num_tokens, new_num_tokens) if is_deepspeed_zero3_enabled() and not is_quantized: import deepspeed params = [old_embeddings.weight, new_embeddings.weight] with deepspeed.zero.GatheredParameters(params, modifier_rank=0): new_embeddings.weight.data[:n, :] = old_embeddings.weight.data[:n, :] else: new_embeddings.weight.data[:n, :] = old_embeddings.weight.data[:n, :] # Replace weights in old_embeddings and return to maintain the same embedding type. # This ensures correct functionality when a Custom Embedding class is passed as input. # The input and output embedding types remain consistent. (c.f. https://github.com/huggingface/transformers/pull/31979) if is_deepspeed_zero3_enabled() and not is_quantized: import deepspeed params = [old_embeddings.weight, new_embeddings.weight] with deepspeed.zero.GatheredParameters(params, modifier_rank=0): old_embeddings.weight = new_embeddings.weight old_embeddings.num_embeddings = new_embeddings.weight.data.shape[0] # If the new number of tokens is smaller than the original `padding_idx`, the `padding_idx` # will be set to `None` in the resized embeddings. if old_embeddings.padding_idx is not None and (new_num_tokens - 1) < old_embeddings.padding_idx: old_embeddings.padding_idx = None else: old_embeddings.weight.data = new_embeddings.weight.data old_embeddings.num_embeddings = new_embeddings.weight.data.shape[0] if old_embeddings.padding_idx is not None and (new_num_tokens - 1) < old_embeddings.padding_idx: old_embeddings.padding_idx = None return old_embeddings def _get_resized_lm_head( self, old_lm_head: nn.Linear, new_num_tokens: Optional[int] = None, transposed: Optional[bool] = False, mean_resizing: bool = True, ) -> nn.Linear: """ Build a resized Linear Module from a provided old Linear Module. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end Args: old_lm_head (`torch.nn.Linear`): Old lm head liner layer to be resized. new_num_tokens (`int`, *optional*): New number of tokens in the linear matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just returns a pointer to the input tokens `torch.nn.Linear` module of the model without doing anything. transposed (`bool`, *optional*, defaults to `False`): Whether `old_lm_head` is transposed or not. If True `old_lm_head.size()` is `lm_head_dim, vocab_size` else `vocab_size, lm_head_dim`. mean_resizing (`bool`): Whether to initialize the added embeddings from a multivariate normal distribution that has old embeddings' mean and covariance or to initialize them with a normal distribution that has a mean of zero and std equals `config.initializer_range`. Setting `mean_resizing` to `True` is useful when increasing the size of the embeddings of causal language models, where the generated tokens' probabilities will not be affected by the added embeddings because initializing the new embeddings with the old embeddings' mean will reduce the kl-divergence between the next token probability before and after adding the new embeddings. Refer to this article for more information: https://nlp.stanford.edu/~johnhew/vocab-expansion.html Return: `torch.nn.Linear`: Pointer to the resized Linear Module or the old Linear Module if `new_num_tokens` is `None` """ if new_num_tokens is None: return old_lm_head is_quantized = hasattr(self, "hf_quantizer") and self.hf_quantizer is not None if is_deepspeed_zero3_enabled() and not is_quantized: import deepspeed with deepspeed.zero.GatheredParameters(old_lm_head.weight, modifier_rank=None): old_num_tokens, old_lm_head_dim = ( old_lm_head.weight.size() if not transposed else old_lm_head.weight.t().size() ) else: old_num_tokens, old_lm_head_dim = ( old_lm_head.weight.size() if not transposed else old_lm_head.weight.t().size() ) if old_num_tokens == new_num_tokens and not is_deepspeed_zero3_enabled(): return old_lm_head if not isinstance(old_lm_head, nn.Linear): raise TypeError( f"Old language model head is of type {type(old_lm_head)}, which is not an instance of {nn.Linear}. You" " should either use a different resize function or make sure that `old_lm_head` are an instance of" f" {nn.Linear}." ) # Build new lm head new_lm_head_shape = (old_lm_head_dim, new_num_tokens) if not transposed else (new_num_tokens, old_lm_head_dim) has_new_lm_head_bias = old_lm_head.bias is not None # When using DeepSpeed ZeRO-3, we shouldn't create new embeddings with DeepSpeed init # because the shape of the new embedding layer is used across various modeling files # as well as to update config vocab size. Shape will be 0 when using DeepSpeed init leading # to errors when training. new_lm_head = nn.Linear( *new_lm_head_shape, bias=has_new_lm_head_bias, device=old_lm_head.weight.device, dtype=old_lm_head.weight.dtype, ) if new_num_tokens > old_num_tokens and not mean_resizing: # initialize new embeddings (in particular added tokens) with a mean of 0 and std equals `config.initializer_range`. self._init_weights(new_lm_head) elif new_num_tokens > old_num_tokens and mean_resizing: # initialize new lm_head weights (in particular added tokens). The new lm_head weights # will be initialized from a multivariate normal distribution that has old embeddings' mean and covariance. # as described in this article: https://nlp.stanford.edu/~johnhew/vocab-expansion.html logger.warning_once( "The new lm_head weights will be initialized from a multivariate normal distribution that has old embeddings' mean and covariance. " "As described in this article: https://nlp.stanford.edu/~johnhew/vocab-expansion.html. " "To disable this, use `mean_resizing=False`" ) added_num_tokens = new_num_tokens - old_num_tokens if is_deepspeed_zero3_enabled() and not is_quantized: import deepspeed params = [old_lm_head.weight] if has_new_lm_head_bias: params += [old_lm_head.bias] with deepspeed.zero.GatheredParameters(params, modifier_rank=None): self._init_added_lm_head_weights_with_mean( old_lm_head, new_lm_head, old_lm_head_dim, old_num_tokens, added_num_tokens, transposed ) if has_new_lm_head_bias: self._init_added_lm_head_bias_with_mean(old_lm_head, new_lm_head, added_num_tokens) else: self._init_added_lm_head_weights_with_mean( old_lm_head, new_lm_head, old_lm_head_dim, old_num_tokens, added_num_tokens, transposed ) if has_new_lm_head_bias: self._init_added_lm_head_bias_with_mean(old_lm_head, new_lm_head, added_num_tokens) num_tokens_to_copy = min(old_num_tokens, new_num_tokens) if is_deepspeed_zero3_enabled() and not is_quantized: import deepspeed params = [old_lm_head.weight, old_lm_head.bias, new_lm_head.weight, new_lm_head.bias] with deepspeed.zero.GatheredParameters(params, modifier_rank=0): self._copy_lm_head_original_to_resized( new_lm_head, old_lm_head, num_tokens_to_copy, transposed, has_new_lm_head_bias ) else: self._copy_lm_head_original_to_resized( new_lm_head, old_lm_head, num_tokens_to_copy, transposed, has_new_lm_head_bias ) return new_lm_head def _init_added_embeddings_weights_with_mean( self, old_embeddings, new_embeddings, old_embedding_dim, old_num_tokens, added_num_tokens ): old_embeddings_weight = old_embeddings.weight.data.to(torch.float32) mean_embeddings = torch.mean(old_embeddings_weight, axis=0) old_centered_embeddings = old_embeddings_weight - mean_embeddings covariance = old_centered_embeddings.T @ old_centered_embeddings / old_num_tokens # Check if the covariance is positive definite. eigenvalues = torch.linalg.eigvals(covariance) is_covariance_psd = bool( (covariance == covariance.T).all() and not torch.is_complex(eigenvalues) and (eigenvalues > 0).all() ) if is_covariance_psd: # If covariances is positive definite, a distribution can be created. and we can sample new weights from it. distribution = torch.distributions.multivariate_normal.MultivariateNormal( mean_embeddings, covariance_matrix=1e-9 * covariance ) new_embeddings.weight.data[-1 * added_num_tokens :, :] = distribution.sample( sample_shape=(added_num_tokens,) ).to(old_embeddings.weight.dtype) else: # Otherwise, just initialize with the mean. because distribtion will not be created. new_embeddings.weight.data[-1 * added_num_tokens :, :] = ( mean_embeddings[None, :].repeat(added_num_tokens, 1).to(old_embeddings.weight.dtype) ) def _init_added_lm_head_weights_with_mean( self, old_lm_head, new_lm_head, old_lm_head_dim, old_num_tokens, added_num_tokens, transposed=False, ): if transposed: # Transpose to the desired shape for the function. new_lm_head.weight.data = new_lm_head.weight.data.T old_lm_head.weight.data = old_lm_head.weight.data.T # The same initilization logic as Embeddings. self._init_added_embeddings_weights_with_mean( old_lm_head, new_lm_head, old_lm_head_dim, old_num_tokens, added_num_tokens ) if transposed: # Transpose again to the correct shape. new_lm_head.weight.data = new_lm_head.weight.data.T old_lm_head.weight.data = old_lm_head.weight.data.T def _init_added_lm_head_bias_with_mean(self, old_lm_head, new_lm_head, added_num_tokens): bias_mean = torch.mean(old_lm_head.bias.data, axis=0, dtype=torch.float32) bias_std = torch.std(old_lm_head.bias.data, axis=0).to(torch.float32) new_lm_head.bias.data[-1 * added_num_tokens :].normal_(mean=bias_mean, std=1e-9 * bias_std) def _copy_lm_head_original_to_resized( self, new_lm_head, old_lm_head, num_tokens_to_copy, transposed, has_new_lm_head_bias ): # Copy old lm head weights to new lm head if not transposed: new_lm_head.weight.data[:num_tokens_to_copy, :] = old_lm_head.weight.data[:num_tokens_to_copy, :] else: new_lm_head.weight.data[:, :num_tokens_to_copy] = old_lm_head.weight.data[:, :num_tokens_to_copy] # Copy bias weights to new lm head if has_new_lm_head_bias: new_lm_head.bias.data[:num_tokens_to_copy] = old_lm_head.bias.data[:num_tokens_to_copy] def resize_position_embeddings(self, new_num_position_embeddings: int): raise NotImplementedError( f"`resize_position_embeddings` is not implemented for {self.__class__}`. To implement it, you should " f"overwrite this method in the class {self.__class__} in `modeling_{self.__class__.__module__}.py`" ) def get_position_embeddings(self) -> Union[nn.Embedding, Tuple[nn.Embedding]]: raise NotImplementedError( f"`get_position_embeddings` is not implemented for {self.__class__}`. To implement it, you should " f"overwrite this method in the class {self.__class__} in `modeling_{self.__class__.__module__}.py`" ) def init_weights(self): """ If needed prunes and maybe initializes weights. If using a custom `PreTrainedModel`, you need to implement any initialization logic in `_init_weights`. """ # Prune heads if needed if self.config.pruned_heads: self.prune_heads(self.config.pruned_heads) if _init_weights: # Initialize weights self.apply(self._initialize_weights) # Tie weights should be skipped when not initializing all weights # since from_pretrained(...) calls tie weights anyways self.tie_weights() def prune_heads(self, heads_to_prune: Dict[int, List[int]]): """ Prunes heads of the base model. Arguments: heads_to_prune (`Dict[int, List[int]]`): Dictionary with keys being selected layer indices (`int`) and associated values being the list of heads to prune in said layer (list of `int`). For instance {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2. """ # save new sets of pruned heads as union of previously stored pruned heads and newly pruned heads for layer, heads in heads_to_prune.items(): union_heads = set(self.config.pruned_heads.get(layer, [])) | set(heads) self.config.pruned_heads[layer] = list(union_heads) # Unfortunately we have to store it as list for JSON self.base_model._prune_heads(heads_to_prune) def gradient_checkpointing_enable(self, gradient_checkpointing_kwargs=None): """ Activates gradient checkpointing for the current model. Note that in other frameworks this feature can be referred to as "activation checkpointing" or "checkpoint activations". We pass the `__call__` method of the modules instead of `forward` because `__call__` attaches all the hooks of the module. https://discuss.pytorch.org/t/any-different-between-model-input-and-model-forward-input/3690/2 Args: gradient_checkpointing_kwargs (dict, *optional*): Additional keyword arguments passed along to the `torch.utils.checkpoint.checkpoint` function. """ if not self.supports_gradient_checkpointing: raise ValueError(f"{self.__class__.__name__} does not support gradient checkpointing.") if gradient_checkpointing_kwargs is None: gradient_checkpointing_kwargs = {"use_reentrant": True} gradient_checkpointing_func = functools.partial(checkpoint, **gradient_checkpointing_kwargs) # For old GC format (transformers < 4.35.0) for models that live on the Hub # we will fall back to the overwritten `_set_gradient_checkpointing` method _is_using_old_format = "value" in inspect.signature(self._set_gradient_checkpointing).parameters if not _is_using_old_format: self._set_gradient_checkpointing(enable=True, gradient_checkpointing_func=gradient_checkpointing_func) else: self.apply(partial(self._set_gradient_checkpointing, value=True)) logger.warning( "You are using an old version of the checkpointing format that is deprecated (We will also silently ignore `gradient_checkpointing_kwargs` in case you passed it)." "Please update to the new format on your modeling file. To use the new format, you need to completely remove the definition of the method `_set_gradient_checkpointing` in your model." ) if getattr(self, "_hf_peft_config_loaded", False): # When using PEFT + gradient checkpointing + Trainer we need to make sure the input has requires_grad=True # we do it also on PEFT: https://github.com/huggingface/peft/blob/85013987aa82aa1af3da1236b6902556ce3e483e/src/peft/peft_model.py#L334 # When training with PEFT, only LoRA layers will have requires grad set to True, but the output of frozen layers need to propagate # the gradients to make sure the gradient flows. self.enable_input_require_grads() def _set_gradient_checkpointing(self, enable: bool = True, gradient_checkpointing_func: Callable = checkpoint): is_gradient_checkpointing_set = False # Apply it on the top-level module in case the top-level modules supports it # for example, LongT5Stack inherits from `PreTrainedModel`. if hasattr(self, "gradient_checkpointing"): self._gradient_checkpointing_func = gradient_checkpointing_func self.gradient_checkpointing = enable is_gradient_checkpointing_set = True for module in self.modules(): if hasattr(module, "gradient_checkpointing"): module._gradient_checkpointing_func = gradient_checkpointing_func module.gradient_checkpointing = enable is_gradient_checkpointing_set = True if not is_gradient_checkpointing_set: raise ValueError( f"{self.__class__.__name__} is not compatible with gradient checkpointing. Make sure all the architecture support it by setting a boolean attribute" " `gradient_checkpointing` to modules of the model that uses checkpointing." ) def gradient_checkpointing_disable(self): """ Deactivates gradient checkpointing for the current model. Note that in other frameworks this feature can be referred to as "activation checkpointing" or "checkpoint activations". """ if self.supports_gradient_checkpointing: # For old GC format (transformers < 4.35.0) for models that live on the Hub # we will fall back to the overwritten `_set_gradient_checkpointing` methid _is_using_old_format = "value" in inspect.signature(self._set_gradient_checkpointing).parameters if not _is_using_old_format: self._set_gradient_checkpointing(enable=False) else: logger.warning( "You are using an old version of the checkpointing format that is deprecated (We will also silently ignore `gradient_checkpointing_kwargs` in case you passed it)." "Please update to the new format on your modeling file. To use the new format, you need to completely remove the definition of the method `_set_gradient_checkpointing` in your model." ) self.apply(partial(self._set_gradient_checkpointing, value=False)) if getattr(self, "_hf_peft_config_loaded", False): self.disable_input_require_grads() @property def is_gradient_checkpointing(self) -> bool: """ Whether gradient checkpointing is activated for this model or not. Note that in other frameworks this feature can be referred to as "activation checkpointing" or "checkpoint activations". """ return any(hasattr(m, "gradient_checkpointing") and m.gradient_checkpointing for m in self.modules()) def save_pretrained( self, save_directory: Union[str, os.PathLike], is_main_process: bool = True, state_dict: Optional[dict] = None, save_function: Callable = torch.save, push_to_hub: bool = False, max_shard_size: Union[int, str] = "5GB", safe_serialization: bool = True, variant: Optional[str] = None, token: Optional[Union[str, bool]] = None, save_peft_format: bool = True, **kwargs, ): """ Save a model and its configuration file to a directory, so that it can be re-loaded using the [`~PreTrainedModel.from_pretrained`] class method. Arguments: save_directory (`str` or `os.PathLike`): Directory to which to save. Will be created if it doesn't exist. is_main_process (`bool`, *optional*, defaults to `True`): Whether the process calling this is the main process or not. Useful when in distributed training like TPUs and need to call this function on all processes. In this case, set `is_main_process=True` only on the main process to avoid race conditions. state_dict (nested dictionary of `torch.Tensor`): The state dictionary of the model to save. Will default to `self.state_dict()`, but can be used to only save parts of the model or if special precautions need to be taken when recovering the state dictionary of a model (like when using model parallelism). save_function (`Callable`): The function to use to save the state dictionary. Useful on distributed training like TPUs when one need to replace `torch.save` by another method. push_to_hub (`bool`, *optional*, defaults to `False`): Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the repository you want to push to with `repo_id` (will default to the name of `save_directory` in your namespace). max_shard_size (`int` or `str`, *optional*, defaults to `"5GB"`): The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size lower than this size. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`). We default it to 5GB in order for models to be able to run easily on free-tier google colab instances without CPU OOM issues. <Tip warning={true}> If a single weight of the model is bigger than `max_shard_size`, it will be in its own checkpoint shard which will be bigger than `max_shard_size`. </Tip> safe_serialization (`bool`, *optional*, defaults to `True`): Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). variant (`str`, *optional*): If specified, weights are saved in the format pytorch_model.<variant>.bin. token (`str` or `bool`, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). save_peft_format (`bool`, *optional*, defaults to `True`): For backward compatibility with PEFT library, in case adapter weights are attached to the model, all keys of the state dict of adapters needs to be pre-pended with `base_model.model`. Advanced users can disable this behaviours by setting `save_peft_format` to `False`. kwargs (`Dict[str, Any]`, *optional*): Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. """ use_auth_token = kwargs.pop("use_auth_token", None) ignore_metadata_errors = kwargs.pop("ignore_metadata_errors", False) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) token = use_auth_token if token is not None: kwargs["token"] = token _hf_peft_config_loaded = getattr(self, "_hf_peft_config_loaded", False) hf_quantizer = getattr(self, "hf_quantizer", None) quantization_serializable = ( hf_quantizer is not None and isinstance(hf_quantizer, HfQuantizer) and hf_quantizer.is_serializable(safe_serialization=safe_serialization) ) if hf_quantizer is not None and not _hf_peft_config_loaded and not quantization_serializable: raise ValueError( f"The model is quantized with {hf_quantizer.quantization_config.quant_method} and is not serializable - check out the warnings from" " the logger on the traceback to understand the reason why the quantized model is not serializable." ) if "save_config" in kwargs: warnings.warn( "`save_config` is deprecated and will be removed in v5 of Transformers. Use `is_main_process` instead." ) is_main_process = kwargs.pop("save_config") if safe_serialization and not is_safetensors_available(): raise ImportError("`safe_serialization` requires the `safetensors library: `pip install safetensors`.") if os.path.isfile(save_directory): logger.error(f"Provided path ({save_directory}) should be a directory, not a file") return os.makedirs(save_directory, exist_ok=True) if push_to_hub: commit_message = kwargs.pop("commit_message", None) repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1]) repo_id = self._create_repo(repo_id, **kwargs) files_timestamps = self._get_files_timestamps(save_directory) # Only save the model itself if we are using distributed training model_to_save = unwrap_model(self) # save the string version of dtype to the config, e.g. convert torch.float32 => "float32" # we currently don't use this setting automatically, but may start to use with v5 dtype = get_parameter_dtype(model_to_save) model_to_save.config.torch_dtype = str(dtype).split(".")[1] # Attach architecture to the config model_to_save.config.architectures = [model_to_save.__class__.__name__] # Unset attn implementation so it can be set to another one when loading back model_to_save.config._attn_implementation_autoset = False # If we have a custom model, we copy the file defining it in the folder and set the attributes so it can be # loaded from the Hub. if self._auto_class is not None: custom_object_save(self, save_directory, config=self.config) # Save the config if is_main_process: if not _hf_peft_config_loaded: # If the model config has set attributes that should be in the generation config, move them there. misplaced_generation_parameters = model_to_save.config._get_non_default_generation_parameters() if self.can_generate() and len(misplaced_generation_parameters) > 0: warnings.warn( "Moving the following attributes in the config to the generation config: " f"{misplaced_generation_parameters}. You are seeing this warning because you've set " "generation parameters in the model config, as opposed to in the generation config.", UserWarning, ) for param_name, param_value in misplaced_generation_parameters.items(): setattr(model_to_save.generation_config, param_name, param_value) setattr(model_to_save.config, param_name, None) model_to_save.config.save_pretrained(save_directory) if self.can_generate(): model_to_save.generation_config.save_pretrained(save_directory) if _hf_peft_config_loaded: logger.info( "Detected adapters on the model, saving the model in the PEFT format, only adapter weights will be saved." ) state_dict = model_to_save.get_adapter_state_dict() if save_peft_format: logger.info( "To match the expected format of the PEFT library, all keys of the state dict of adapters will be pre-pended with `base_model.model`." ) peft_state_dict = {} for key, value in state_dict.items(): peft_state_dict[f"base_model.model.{key}"] = value state_dict = peft_state_dict active_adapter = self.active_adapters() if len(active_adapter) > 1: raise ValueError( "Multiple active adapters detected, saving multiple active adapters is not supported yet. You can save adapters separately one by one " "by iteratively calling `model.set_adapter(adapter_name)` then `model.save_pretrained(...)`" ) active_adapter = active_adapter[0] current_peft_config = self.peft_config[active_adapter] current_peft_config.save_pretrained(save_directory) # for offloaded modules module_map = {} # Save the model if state_dict is None: # if any model parameters are offloaded, make module map if ( hasattr(self, "hf_device_map") and len(set(self.hf_device_map.values())) > 1 and ("cpu" in self.hf_device_map.values() or "disk" in self.hf_device_map.values()) ): warnings.warn( "Attempting to save a model with offloaded modules. Ensure that unallocated cpu memory exceeds the `shard_size` (5GB default)" ) for name, module in model_to_save.named_modules(): if name == "": continue module_state_dict = module.state_dict() for key in module_state_dict: module_map[name + f".{key}"] = module state_dict = model_to_save.state_dict() # Translate state_dict from smp to hf if saving with smp >= 1.10 if IS_SAGEMAKER_MP_POST_1_10: for smp_to_hf, _ in smp.state.module_manager.translate_functions: state_dict = smp_to_hf(state_dict) # Handle the case where some state_dict keys shouldn't be saved if self._keys_to_ignore_on_save is not None: for ignore_key in self._keys_to_ignore_on_save: if ignore_key in state_dict.keys(): del state_dict[ignore_key] # Rename state_dict keys before saving to file. Do nothing unless overriden in a particular model. # (initially introduced with TimmWrapperModel to remove prefix and make checkpoints compatible with timm) state_dict = self._fix_state_dict_keys_on_save(state_dict) if safe_serialization: # Safetensors does not allow tensor aliasing. # We're going to remove aliases before saving ptrs = collections.defaultdict(list) for name, tensor in state_dict.items(): # Sometimes in the state_dict we have non-tensor objects. # e.g. in bitsandbytes we have some `str` objects in the state_dict if isinstance(tensor, torch.Tensor): ptrs[id_tensor_storage(tensor)].append(name) else: # In the non-tensor case, fall back to the pointer of the object itself ptrs[id(tensor)].append(name) # These are all the pointers of shared tensors if hasattr(self, "hf_device_map"): # if the model has offloaded parameters, we must check using find_tied_parameters() tied_params = find_tied_parameters(self) if tied_params: tied_names = tied_params[0] shared_ptrs = { ptr: names for ptr, names in ptrs.items() if any(name in tied_names for name in names) } else: shared_ptrs = {} else: shared_ptrs = {ptr: names for ptr, names in ptrs.items() if len(names) > 1} # Recursively descend to find tied weight keys _tied_weights_keys = _get_tied_weight_keys(self) error_names = [] to_delete_names = set() for names in shared_ptrs.values(): # Removing the keys which are declared as known duplicates on # load. This allows to make sure the name which is kept is consistent. if _tied_weights_keys is not None: found = 0 for name in sorted(names): matches_pattern = any(re.search(pat, name) for pat in _tied_weights_keys) if matches_pattern and name in state_dict: found += 1 if found < len(names): to_delete_names.add(name) # We are entering a place where the weights and the transformers configuration do NOT match. shared_names, disjoint_names = _find_disjoint(shared_ptrs.values(), state_dict) # Those are actually tensor sharing but disjoint from each other, we can safely clone them # Reloaded won't have the same property, but it shouldn't matter in any meaningful way. for name in disjoint_names: state_dict[name] = state_dict[name].clone() # When not all duplicates have been cleaned, still remove those keys, but put a clear warning. # If the link between tensors was done at runtime then `from_pretrained` will not get # the key back leading to random tensor. A proper warning will be shown # during reload (if applicable), but since the file is not necessarily compatible with # the config, better show a proper warning. shared_names, identical_names = _find_identical(shared_names, state_dict) # delete tensors that have identical storage for inames in identical_names: known = inames.intersection(to_delete_names) for name in known: del state_dict[name] unknown = inames.difference(to_delete_names) if len(unknown) > 1: error_names.append(unknown) if shared_names: error_names.append(set(shared_names)) if len(error_names) > 0: raise RuntimeError( f"The weights trying to be saved contained shared tensors {error_names} that are mismatching the transformers base configuration. Try saving using `safe_serialization=False` or remove this tensor sharing.", ) # Shard the model if it is too big. if not _hf_peft_config_loaded: weights_name = SAFE_WEIGHTS_NAME if safe_serialization else WEIGHTS_NAME weights_name = _add_variant(weights_name, variant) else: weights_name = ADAPTER_SAFE_WEIGHTS_NAME if safe_serialization else ADAPTER_WEIGHTS_NAME filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors") state_dict_split = split_torch_state_dict_into_shards( state_dict, filename_pattern=filename_pattern, max_shard_size=max_shard_size ) # Save index if sharded index = None if state_dict_split.is_sharded: index = { "metadata": state_dict_split.metadata, "weight_map": state_dict_split.tensor_to_filename, } # Clean the folder from a previous save for filename in os.listdir(save_directory): full_filename = os.path.join(save_directory, filename) # If we have a shard file that is not going to be replaced, we delete it, but only from the main process # in distributed settings to avoid race conditions. weights_no_suffix = weights_name.replace(".bin", "").replace(".safetensors", "") # make sure that file to be deleted matches format of sharded file, e.g. pytorch_model-00001-of-00005 filename_no_suffix = filename.replace(".bin", "").replace(".safetensors", "") reg = re.compile(r"(.*?)-\d{5}-of-\d{5}") if ( filename.startswith(weights_no_suffix) and os.path.isfile(full_filename) and filename not in state_dict_split.filename_to_tensors.keys() and is_main_process and reg.fullmatch(filename_no_suffix) is not None ): os.remove(full_filename) # Save the model filename_to_tensors = state_dict_split.filename_to_tensors.items() if module_map: filename_to_tensors = logging.tqdm(filename_to_tensors, desc="Saving checkpoint shards") for shard_file, tensors in filename_to_tensors: shard = {} for tensor in tensors: shard[tensor] = state_dict[tensor].contiguous() # delete reference, see https://github.com/huggingface/transformers/pull/34890 del state_dict[tensor] # remake shard with onloaded parameters if necessary if module_map: if accelerate_version < version.parse("0.31"): raise ImportError( f"You need accelerate version to be greater or equal than 0.31 to save models with offloaded parameters. Detected version {accelerate_version}. " f"Please upgrade accelerate with `pip install -U accelerate`" ) # init state_dict for this shard shard_state_dict = {name: "" for name in shard} for module_name in shard: module = module_map[module_name] # update state dict with onloaded parameters shard_state_dict = get_state_dict_from_offload(module, module_name, shard_state_dict) # assign shard to be the completed state dict shard = shard_state_dict del shard_state_dict gc.collect() if safe_serialization: # At some point we will need to deal better with save_function (used for TPU and other distributed # joyfulness), but for now this enough. safe_save_file(shard, os.path.join(save_directory, shard_file), metadata={"format": "pt"}) else: save_function(shard, os.path.join(save_directory, shard_file)) del state_dict if index is None: path_to_weights = os.path.join(save_directory, weights_name) logger.info(f"Model weights saved in {path_to_weights}") else: save_index_file = SAFE_WEIGHTS_INDEX_NAME if safe_serialization else WEIGHTS_INDEX_NAME save_index_file = os.path.join(save_directory, _add_variant(save_index_file, variant)) # Save the index as well with open(save_index_file, "w", encoding="utf-8") as f: content = json.dumps(index, indent=2, sort_keys=True) + "\n" f.write(content) logger.info( f"The model is bigger than the maximum size per checkpoint ({max_shard_size}) and is going to be " f"split in {len(state_dict_split.filename_to_tensors)} checkpoint shards. You can find where each parameters has been saved in the " f"index located at {save_index_file}." ) if push_to_hub: # Eventually create an empty model card model_card = create_and_tag_model_card( repo_id, self.model_tags, token=token, ignore_metadata_errors=ignore_metadata_errors ) # Update model card if needed: model_card.save(os.path.join(save_directory, "README.md")) self._upload_modified_files( save_directory, repo_id, files_timestamps, commit_message=commit_message, token=token, ) @wraps(PushToHubMixin.push_to_hub) def push_to_hub(self, *args, **kwargs): tags = self.model_tags if self.model_tags is not None else [] tags_kwargs = kwargs.get("tags", []) if isinstance(tags_kwargs, str): tags_kwargs = [tags_kwargs] for tag in tags_kwargs: if tag not in tags: tags.append(tag) if tags: kwargs["tags"] = tags return super().push_to_hub(*args, **kwargs) def get_memory_footprint(self, return_buffers=True): r""" Get the memory footprint of a model. This will return the memory footprint of the current model in bytes. Useful to benchmark the memory footprint of the current model and design some tests. Solution inspired from the PyTorch discussions: https://discuss.pytorch.org/t/gpu-memory-that-model-uses/56822/2 Arguments: return_buffers (`bool`, *optional*, defaults to `True`): Whether to return the size of the buffer tensors in the computation of the memory footprint. Buffers are tensors that do not require gradients and not registered as parameters. E.g. mean and std in batch norm layers. Please see: https://discuss.pytorch.org/t/what-pytorch-means-by-buffers/120266/2 """ mem = sum([param.nelement() * param.element_size() for param in self.parameters()]) if return_buffers: mem_bufs = sum([buf.nelement() * buf.element_size() for buf in self.buffers()]) mem = mem + mem_bufs return mem @wraps(torch.nn.Module.cuda) def cuda(self, *args, **kwargs): if getattr(self, "quantization_method", None) == QuantizationMethod.HQQ: raise ValueError("`.cuda` is not supported for HQQ-quantized models.") # Checks if the model has been loaded in 4-bit or 8-bit with BNB if getattr(self, "quantization_method", None) == QuantizationMethod.BITS_AND_BYTES: if getattr(self, "is_loaded_in_8bit", False): raise ValueError( "Calling `cuda()` is not supported for `8-bit` quantized models. " " Please use the model as it is, since the model has already been set to the correct devices." ) elif version.parse(importlib.metadata.version("bitsandbytes")) < version.parse("0.43.2"): raise ValueError( "Calling `cuda()` is not supported for `4-bit` quantized models with the installed version of bitsandbytes. " f"The current device is `{self.device}`. If you intended to move the model, please install bitsandbytes >= 0.43.2." ) else: return super().cuda(*args, **kwargs) @wraps(torch.nn.Module.to) def to(self, *args, **kwargs): # For BNB/GPTQ models, we prevent users from casting the model to another dtype to restrict unwanted behaviours. # the correct API should be to load the model with the desired dtype directly through `from_pretrained`. dtype_present_in_args = "dtype" in kwargs if not dtype_present_in_args: for arg in args: if isinstance(arg, torch.dtype): dtype_present_in_args = True break if getattr(self, "quantization_method", None) == QuantizationMethod.HQQ: raise ValueError("`.to` is not supported for HQQ-quantized models.") # Checks if the model has been loaded in 4-bit or 8-bit with BNB if getattr(self, "quantization_method", None) == QuantizationMethod.BITS_AND_BYTES: if dtype_present_in_args: raise ValueError( "You cannot cast a bitsandbytes model in a new `dtype`. Make sure to load the model using `from_pretrained` using the" " desired `dtype` by passing the correct `torch_dtype` argument." ) if getattr(self, "is_loaded_in_8bit", False): raise ValueError( "`.to` is not supported for `8-bit` bitsandbytes models. Please use the model as it is, since the" " model has already been set to the correct devices and casted to the correct `dtype`." ) elif version.parse(importlib.metadata.version("bitsandbytes")) < version.parse("0.43.2"): raise ValueError( "Calling `to()` is not supported for `4-bit` quantized models with the installed version of bitsandbytes. " f"The current device is `{self.device}`. If you intended to move the model, please install bitsandbytes >= 0.43.2." ) elif getattr(self, "quantization_method", None) == QuantizationMethod.GPTQ: if dtype_present_in_args: raise ValueError( "You cannot cast a GPTQ model in a new `dtype`. Make sure to load the model using `from_pretrained` using the desired" " `dtype` by passing the correct `torch_dtype` argument." ) return super().to(*args, **kwargs) def half(self, *args): # Checks if the model is quantized if getattr(self, "is_quantized", False): raise ValueError( "`.half()` is not supported for quantized model. Please use the model as it is, since the" " model has already been casted to the correct `dtype`." ) else: return super().half(*args) def float(self, *args): # Checks if the model is quantized if getattr(self, "is_quantized", False): raise ValueError( "`.float()` is not supported for quantized model. Please use the model as it is, since the" " model has already been casted to the correct `dtype`." ) else: return super().float(*args) @classmethod def from_pretrained( cls: Type[SpecificPreTrainedModelType], pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, config: Optional[Union[PretrainedConfig, str, os.PathLike]] = None, cache_dir: Optional[Union[str, os.PathLike]] = None, ignore_mismatched_sizes: bool = False, force_download: bool = False, local_files_only: bool = False, token: Optional[Union[str, bool]] = None, revision: str = "main", use_safetensors: Optional[bool] = None, weights_only: bool = True, **kwargs, ) -> SpecificPreTrainedModelType: r""" Instantiate a pretrained pytorch model from a pre-trained model configuration. The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train the model, you should first set it back in training mode with `model.train()`. The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning task. The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those weights are discarded. If model weights are the same precision as the base model (and is a supported model), weights will be lazily loaded in using the `meta` device and brought into memory once an input is passed through that layer regardless of `low_cpu_mem_usage`. Parameters: pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*): Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. - A path to a *directory* containing model weights saved using [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In this case, `from_tf` should be set to `True` and a configuration object should be provided as `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. - A path or url to a model folder containing a *flax checkpoint file* in *.msgpack* format (e.g, `./flax_model/` containing `flax_model.msgpack`). In this case, `from_flax` should be set to `True`. - `None` if you are both providing the configuration and state dictionary (resp. with keyword arguments `config` and `state_dict`). model_args (sequence of positional arguments, *optional*): All remaining positional arguments will be passed to the underlying model's `__init__` method. config (`Union[PretrainedConfig, str, os.PathLike]`, *optional*): Can be either: - an instance of a class derived from [`PretrainedConfig`], - a string or path valid as input to [`~PretrainedConfig.from_pretrained`]. Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when: - The model is a model provided by the library (loaded with the *model id* string of a pretrained model). - The model was saved using [`~PreTrainedModel.save_pretrained`] and is reloaded by supplying the save directory. - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a configuration JSON file named *config.json* is found in the directory. state_dict (`Dict[str, torch.Tensor]`, *optional*): A state dictionary to use instead of a state dictionary loaded from saved weights file. This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using [`~PreTrainedModel.save_pretrained`] and [`~PreTrainedModel.from_pretrained`] is not a simpler option. cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. from_tf (`bool`, *optional*, defaults to `False`): Load the model weights from a TensorFlow checkpoint save file (see docstring of `pretrained_model_name_or_path` argument). from_flax (`bool`, *optional*, defaults to `False`): Load the model weights from a Flax checkpoint save file (see docstring of `pretrained_model_name_or_path` argument). ignore_mismatched_sizes (`bool`, *optional*, defaults to `False`): Whether or not to raise an error if some of the weights from the checkpoint do not have the same size as the weights of the model (if for instance, you are instantiating a model with 10 labels from a checkpoint with 3 labels). force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. resume_download: Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v5 of Transformers. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. output_loading_info(`bool`, *optional*, defaults to `False`): Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only(`bool`, *optional*, defaults to `False`): Whether or not to only look at local files (i.e., do not try to download the model). token (`str` or `bool`, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. <Tip> To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>"`. </Tip> mirror (`str`, *optional*): Mirror source to accelerate downloads in China. If you are from China and have an accessibility problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety. Please refer to the mirror site for more information. _fast_init(`bool`, *optional*, defaults to `True`): Whether or not to disable fast initialization. <Tip warning={true}> One should only disable *_fast_init* to ensure backwards compatibility with `transformers.__version__ < 4.6.0` for seeded model initialization. This argument will be removed at the next major version. See [pull request 11471](https://github.com/huggingface/transformers/pull/11471) for more information. </Tip> attn_implementation (`str`, *optional*): The attention implementation to use in the model (if relevant). Can be any of `"eager"` (manual implementation of the attention), `"sdpa"` (using [`F.scaled_dot_product_attention`](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention.html)), or `"flash_attention_2"` (using [Dao-AILab/flash-attention](https://github.com/Dao-AILab/flash-attention)). By default, if available, SDPA will be used for torch>=2.1.1. The default is otherwise the manual `"eager"` implementation. > Parameters for big model inference low_cpu_mem_usage(`bool`, *optional*): Tries not to use more than 1x model size in CPU memory (including peak memory) while loading the model. Generally should be combined with a `device_map` (such as `"auto"`) for best results. This is an experimental feature and a subject to change at any moment. </Tip> If the model weights are in the same precision as the model loaded in, `low_cpu_mem_usage` (without `device_map`) is redundant and will not provide any benefit in regards to CPU memory usage. However, this should still be enabled if you are passing in a `device_map`. </Tip> torch_dtype (`str` or `torch.dtype`, *optional*): Override the default `torch.dtype` and load the model under a specific `dtype`. The different options are: 1. `torch.float16` or `torch.bfloat16` or `torch.float`: load in a specified `dtype`, ignoring the model's `config.torch_dtype` if one exists. If not specified - the model will get loaded in `torch.float` (fp32). 2. `"auto"` - A `torch_dtype` entry in the `config.json` file of the model will be attempted to be used. If this entry isn't found then next check the `dtype` of the first weight in the checkpoint that's of a floating point type and use that as `dtype`. This will load the model using the `dtype` it was saved in at the end of the training. It can't be used as an indicator of how the model was trained. Since it could be trained in one of half precision dtypes, but saved in fp32. 3. A string that is a valid `torch.dtype`. E.g. "float32" loads the model in `torch.float32`, "float16" loads in `torch.float16` etc. <Tip> For some models the `dtype` they were trained in is unknown - you may try to check the model's paper or reach out to the authors and ask them to add this information to the model's card and to insert the `torch_dtype` entry in `config.json` on the hub. </Tip> device_map (`str` or `Dict[str, Union[int, str, torch.device]]` or `int` or `torch.device`, *optional*): A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the same device. If we only pass the device (*e.g.*, `"cpu"`, `"cuda:1"`, `"mps"`, or a GPU ordinal rank like `1`) on which the model will be allocated, the device map will map the entire model to this device. Passing `device_map = 0` means put the whole model on GPU 0. To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For more information about each option see [designing a device map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). max_memory (`Dict`, *optional*): A dictionary device identifier to maximum memory. Will default to the maximum memory available for each GPU and the available CPU RAM if unset. offload_folder (`str` or `os.PathLike`, *optional*): If the `device_map` contains any value `"disk"`, the folder where we will offload weights. offload_state_dict (`bool`, *optional*): If `True`, will temporarily offload the CPU state dict to the hard drive to avoid getting out of CPU RAM if the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to `True` when there is some disk offload. offload_buffers (`bool`, *optional*): Whether or not to offload the buffers with the model parameters. quantization_config (`Union[QuantizationConfigMixin,Dict]`, *optional*): A dictionary of configuration parameters or a QuantizationConfigMixin object for quantization (e.g bitsandbytes, gptq). There may be other quantization-related kwargs, including `load_in_4bit` and `load_in_8bit`, which are parsed by QuantizationConfigParser. Supported only for bitsandbytes quantizations and not preferred. consider inserting all such arguments into quantization_config instead. subfolder (`str`, *optional*, defaults to `""`): In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can specify the folder name here. variant (`str`, *optional*): If specified load weights from `variant` filename, *e.g.* pytorch_model.<variant>.bin. `variant` is ignored when using `from_tf` or `from_flax`. use_safetensors (`bool`, *optional*, defaults to `None`): Whether or not to use `safetensors` checkpoints. Defaults to `None`. If not specified and `safetensors` is not installed, it will be set to `False`. weights_only (`bool`, *optional*, defaults to `True`): Indicates whether unpickler should be restricted to loading only tensors, primitive types, dictionaries and any types added via torch.serialization.add_safe_globals(). When set to False, we can load wrapper tensor subclass weights. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., `output_attentions=True`). Behaves differently depending on whether a `config` is provided or automatically loaded: - If a configuration is provided with `config`, `**kwargs` will be directly passed to the underlying model's `__init__` method (we assume all relevant updates to the configuration have already been done) - If a configuration is not provided, `kwargs` will be first passed to the configuration class initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that corresponds to a configuration attribute will be used to override said attribute with the supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's `__init__` function. <Tip> Activate the special ["offline-mode"](https://huggingface.co/transformers/installation.html#offline-mode) to use this method in a firewalled environment. </Tip> Examples: ```python >>> from transformers import BertConfig, BertModel >>> # Download model and configuration from huggingface.co and cache. >>> model = BertModel.from_pretrained("google-bert/bert-base-uncased") >>> # Model was saved using *save_pretrained('./test/saved_model/')* (for example purposes, not runnable). >>> model = BertModel.from_pretrained("./test/saved_model/") >>> # Update configuration during loading. >>> model = BertModel.from_pretrained("google-bert/bert-base-uncased", output_attentions=True) >>> assert model.config.output_attentions == True >>> # Loading from a TF checkpoint file instead of a PyTorch model (slower, for example purposes, not runnable). >>> config = BertConfig.from_json_file("./tf_model/my_tf_model_config.json") >>> model = BertModel.from_pretrained("./tf_model/my_tf_checkpoint.ckpt.index", from_tf=True, config=config) >>> # Loading from a Flax checkpoint file instead of a PyTorch model (slower) >>> model = BertModel.from_pretrained("google-bert/bert-base-uncased", from_flax=True) ``` * `low_cpu_mem_usage` algorithm: This is an experimental function that loads the model using ~1x model size CPU memory Here is how it works: 1. save which state_dict keys we have 2. drop state_dict before the model is created, since the latter takes 1x model size CPU memory 3. after the model has been instantiated switch to the meta device all params/buffers that are going to be replaced from the loaded state_dict 4. load state_dict 2nd time 5. replace the params/buffers from the state_dict Currently, it can't handle deepspeed ZeRO stage 3 and ignores loading errors """ state_dict = kwargs.pop("state_dict", None) from_tf = kwargs.pop("from_tf", False) from_flax = kwargs.pop("from_flax", False) resume_download = kwargs.pop("resume_download", None) proxies = kwargs.pop("proxies", None) output_loading_info = kwargs.pop("output_loading_info", False) use_auth_token = kwargs.pop("use_auth_token", None) trust_remote_code = kwargs.pop("trust_remote_code", None) _ = kwargs.pop("mirror", None) from_pipeline = kwargs.pop("_from_pipeline", None) from_auto_class = kwargs.pop("_from_auto", False) _fast_init = kwargs.pop("_fast_init", True) torch_dtype = kwargs.pop("torch_dtype", None) low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", None) device_map = kwargs.pop("device_map", None) max_memory = kwargs.pop("max_memory", None) offload_folder = kwargs.pop("offload_folder", None) offload_state_dict = kwargs.pop("offload_state_dict", False) offload_buffers = kwargs.pop("offload_buffers", False) load_in_8bit = kwargs.pop("load_in_8bit", False) load_in_4bit = kwargs.pop("load_in_4bit", False) quantization_config = kwargs.pop("quantization_config", None) subfolder = kwargs.pop("subfolder", "") commit_hash = kwargs.pop("_commit_hash", None) variant = kwargs.pop("variant", None) adapter_kwargs = kwargs.pop("adapter_kwargs", {}) adapter_name = kwargs.pop("adapter_name", "default") use_flash_attention_2 = kwargs.pop("use_flash_attention_2", False) generation_config = kwargs.pop("generation_config", None) gguf_file = kwargs.pop("gguf_file", None) # Cache path to the GGUF file gguf_path = None tp_plan = kwargs.pop("tp_plan", None) if tp_plan is not None and tp_plan != "auto": # TODO: we can relax this check when we support taking tp_plan from a json file, for example. raise ValueError(f"tp_plan supports 'auto' only for now but got {tp_plan}.") if is_fsdp_enabled(): low_cpu_mem_usage = True if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) token = use_auth_token if token is not None and adapter_kwargs is not None and "token" not in adapter_kwargs: adapter_kwargs["token"] = token if use_safetensors is None and not is_safetensors_available(): use_safetensors = False if trust_remote_code is True: logger.warning( "The argument `trust_remote_code` is to be used with Auto classes. It has no effect here and is" " ignored." ) if gguf_file is not None and not is_accelerate_available(): raise ValueError("accelerate is required when loading a GGUF file `pip install accelerate`.") if commit_hash is None: if not isinstance(config, PretrainedConfig): # We make a call to the config file first (which may be absent) to get the commit hash as soon as possible resolved_config_file = cached_file( pretrained_model_name_or_path, CONFIG_NAME, cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, _raise_exceptions_for_gated_repo=False, _raise_exceptions_for_missing_entries=False, _raise_exceptions_for_connection_errors=False, ) commit_hash = extract_commit_hash(resolved_config_file, commit_hash) else: commit_hash = getattr(config, "_commit_hash", None) if is_peft_available(): _adapter_model_path = adapter_kwargs.pop("_adapter_model_path", None) if _adapter_model_path is None: _adapter_model_path = find_adapter_config_file( pretrained_model_name_or_path, cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, _commit_hash=commit_hash, **adapter_kwargs, ) if _adapter_model_path is not None and os.path.isfile(_adapter_model_path): with open(_adapter_model_path, "r", encoding="utf-8") as f: _adapter_model_path = pretrained_model_name_or_path pretrained_model_name_or_path = json.load(f)["base_model_name_or_path"] else: _adapter_model_path = None # change device_map into a map if we passed an int, a str or a torch.device if isinstance(device_map, torch.device): device_map = {"": device_map} elif isinstance(device_map, str) and device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: try: device_map = {"": torch.device(device_map)} except RuntimeError: raise ValueError( "When passing device_map as a string, the value needs to be a device name (e.g. cpu, cuda:0) or " f"'auto', 'balanced', 'balanced_low_0', 'sequential' but found {device_map}." ) elif isinstance(device_map, int): if device_map < 0: raise ValueError( "You can't pass device_map as a negative int. If you want to put the model on the cpu, pass device_map = 'cpu' " ) else: device_map = {"": device_map} if device_map is not None: if low_cpu_mem_usage is None: low_cpu_mem_usage = True elif not low_cpu_mem_usage: raise ValueError("Passing along a `device_map` requires `low_cpu_mem_usage=True`") if low_cpu_mem_usage: if is_deepspeed_zero3_enabled(): raise ValueError( "DeepSpeed Zero-3 is not compatible with `low_cpu_mem_usage=True` or with passing a `device_map`." ) elif not is_accelerate_available(): raise ImportError( f"Using `low_cpu_mem_usage=True` or a `device_map` requires Accelerate: `pip install 'accelerate>={ACCELERATE_MIN_VERSION}'`" ) # handling bnb config from kwargs, remove after `load_in_{4/8}bit` deprecation. if load_in_4bit or load_in_8bit: if quantization_config is not None: raise ValueError( "You can't pass `load_in_4bit`or `load_in_8bit` as a kwarg when passing " "`quantization_config` argument at the same time." ) # preparing BitsAndBytesConfig from kwargs config_dict = {k: v for k, v in kwargs.items() if k in inspect.signature(BitsAndBytesConfig).parameters} config_dict = {**config_dict, "load_in_4bit": load_in_4bit, "load_in_8bit": load_in_8bit} quantization_config, kwargs = BitsAndBytesConfig.from_dict( config_dict=config_dict, return_unused_kwargs=True, **kwargs ) logger.warning( "The `load_in_4bit` and `load_in_8bit` arguments are deprecated and will be removed in the future versions. " "Please, pass a `BitsAndBytesConfig` object in `quantization_config` argument instead." ) from_pt = not (from_tf | from_flax) user_agent = {"file_type": "model", "framework": "pytorch", "from_auto_class": from_auto_class} if from_pipeline is not None: user_agent["using_pipeline"] = from_pipeline if is_offline_mode() and not local_files_only: logger.info("Offline mode: forcing local_files_only=True") local_files_only = True # Load config if we don't provide a configuration if not isinstance(config, PretrainedConfig): config_path = config if config is not None else pretrained_model_name_or_path config, model_kwargs = cls.config_class.from_pretrained( config_path, cache_dir=cache_dir, return_unused_kwargs=True, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, _from_auto=from_auto_class, _from_pipeline=from_pipeline, **kwargs, ) else: # In case one passes a config to `from_pretrained` + "attn_implementation" # override the `_attn_implementation` attribute to `attn_implementation` of the kwargs # Please see: https://github.com/huggingface/transformers/issues/28038 # Overwrite `config._attn_implementation` by the one from the kwargs --> in auto-factory # we pop attn_implementation from the kwargs but this handles the case where users # passes manually the config to `from_pretrained`. config = copy.deepcopy(config) kwarg_attn_imp = kwargs.pop("attn_implementation", None) if kwarg_attn_imp is not None: config._attn_implementation = kwarg_attn_imp model_kwargs = kwargs pre_quantized = getattr(config, "quantization_config", None) is not None if pre_quantized or quantization_config is not None: if pre_quantized: config.quantization_config = AutoHfQuantizer.merge_quantization_configs( config.quantization_config, quantization_config ) else: config.quantization_config = quantization_config hf_quantizer = AutoHfQuantizer.from_config( config.quantization_config, pre_quantized=pre_quantized, ) else: hf_quantizer = None if hf_quantizer is not None: hf_quantizer.validate_environment( torch_dtype=torch_dtype, from_tf=from_tf, from_flax=from_flax, device_map=device_map, weights_only=weights_only, ) torch_dtype = hf_quantizer.update_torch_dtype(torch_dtype) device_map = hf_quantizer.update_device_map(device_map) # In order to ensure popular quantization methods are supported. Can be disable with `disable_telemetry` user_agent["quant"] = hf_quantizer.quantization_config.quant_method.value # Force-set to `True` for more mem efficiency if low_cpu_mem_usage is None: low_cpu_mem_usage = True logger.warning("`low_cpu_mem_usage` was None, now default to True since model is quantized.") is_quantized = hf_quantizer is not None # This variable will flag if we're loading a sharded checkpoint. In this case the archive file is just the # index of the files. is_sharded = False sharded_metadata = None # Load model loading_info = None # Keep in fp32 modules keep_in_fp32_modules = None use_keep_in_fp32_modules = False if gguf_file is not None and hf_quantizer is not None: raise ValueError( "You cannot combine Quantization and loading a model from a GGUF file, try again by making sure you did not passed a `quantization_config` or that you did not load a quantized model from the Hub." ) if pretrained_model_name_or_path is not None and gguf_file is None: pretrained_model_name_or_path = str(pretrained_model_name_or_path) is_local = os.path.isdir(pretrained_model_name_or_path) if is_local: if from_tf and os.path.isfile( os.path.join(pretrained_model_name_or_path, subfolder, TF_WEIGHTS_NAME + ".index") ): # Load from a TF 1.0 checkpoint in priority if from_tf archive_file = os.path.join(pretrained_model_name_or_path, subfolder, TF_WEIGHTS_NAME + ".index") elif from_tf and os.path.isfile( os.path.join(pretrained_model_name_or_path, subfolder, TF2_WEIGHTS_NAME) ): # Load from a TF 2.0 checkpoint in priority if from_tf archive_file = os.path.join(pretrained_model_name_or_path, subfolder, TF2_WEIGHTS_NAME) elif from_flax and os.path.isfile( os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_NAME) ): # Load from a Flax checkpoint in priority if from_flax archive_file = os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_NAME) elif use_safetensors is not False and os.path.isfile( os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_NAME, variant)) ): # Load from a safetensors checkpoint archive_file = os.path.join( pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_NAME, variant) ) elif use_safetensors is not False and os.path.isfile( os.path.join( pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_INDEX_NAME, variant) ) ): # Load from a sharded safetensors checkpoint archive_file = os.path.join( pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_INDEX_NAME, variant) ) is_sharded = True elif not use_safetensors and os.path.isfile( os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_NAME, variant)) ): # Load from a PyTorch checkpoint archive_file = os.path.join( pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_NAME, variant) ) elif not use_safetensors and os.path.isfile( os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_INDEX_NAME, variant)) ): # Load from a sharded PyTorch checkpoint archive_file = os.path.join( pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_INDEX_NAME, variant) ) is_sharded = True # At this stage we don't have a weight file so we will raise an error. elif not use_safetensors and ( os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, TF_WEIGHTS_NAME + ".index")) or os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, TF2_WEIGHTS_NAME)) ): raise EnvironmentError( f"Error no file named {_add_variant(WEIGHTS_NAME, variant)} found in directory" f" {pretrained_model_name_or_path} but there is a file for TensorFlow weights. Use" " `from_tf=True` to load this model from those weights." ) elif not use_safetensors and os.path.isfile( os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_NAME) ): raise EnvironmentError( f"Error no file named {_add_variant(WEIGHTS_NAME, variant)} found in directory" f" {pretrained_model_name_or_path} but there is a file for Flax weights. Use `from_flax=True`" " to load this model from those weights." ) elif use_safetensors: raise EnvironmentError( f"Error no file named {_add_variant(SAFE_WEIGHTS_NAME, variant)} found in directory" f" {pretrained_model_name_or_path}." ) else: raise EnvironmentError( f"Error no file named {_add_variant(WEIGHTS_NAME, variant)}, {_add_variant(SAFE_WEIGHTS_NAME, variant)}," f" {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME + '.index'} or {FLAX_WEIGHTS_NAME} found in directory" f" {pretrained_model_name_or_path}." ) elif os.path.isfile(os.path.join(subfolder, pretrained_model_name_or_path)): archive_file = pretrained_model_name_or_path is_local = True elif os.path.isfile(os.path.join(subfolder, pretrained_model_name_or_path + ".index")): if not from_tf: raise ValueError( f"We found a TensorFlow checkpoint at {pretrained_model_name_or_path + '.index'}, please set " "from_tf to True to load from this checkpoint." ) archive_file = os.path.join(subfolder, pretrained_model_name_or_path + ".index") is_local = True elif is_remote_url(pretrained_model_name_or_path): filename = pretrained_model_name_or_path resolved_archive_file = download_url(pretrained_model_name_or_path) else: # set correct filename if from_tf: filename = TF2_WEIGHTS_NAME elif from_flax: filename = FLAX_WEIGHTS_NAME elif use_safetensors is not False: filename = _add_variant(SAFE_WEIGHTS_NAME, variant) else: filename = _add_variant(WEIGHTS_NAME, variant) try: # Load from URL or cache if already cached cached_file_kwargs = { "cache_dir": cache_dir, "force_download": force_download, "proxies": proxies, "resume_download": resume_download, "local_files_only": local_files_only, "token": token, "user_agent": user_agent, "revision": revision, "subfolder": subfolder, "_raise_exceptions_for_gated_repo": False, "_raise_exceptions_for_missing_entries": False, "_commit_hash": commit_hash, } resolved_archive_file = cached_file(pretrained_model_name_or_path, filename, **cached_file_kwargs) # Since we set _raise_exceptions_for_missing_entries=False, we don't get an exception but a None # result when internet is up, the repo and revision exist, but the file does not. if resolved_archive_file is None and filename == _add_variant(SAFE_WEIGHTS_NAME, variant): # Maybe the checkpoint is sharded, we try to grab the index name in this case. resolved_archive_file = cached_file( pretrained_model_name_or_path, _add_variant(SAFE_WEIGHTS_INDEX_NAME, variant), **cached_file_kwargs, ) if resolved_archive_file is not None: is_sharded = True elif use_safetensors: if revision == "main": resolved_archive_file, revision, is_sharded = auto_conversion( pretrained_model_name_or_path, **cached_file_kwargs ) cached_file_kwargs["revision"] = revision if resolved_archive_file is None: raise EnvironmentError( f"{pretrained_model_name_or_path} does not appear to have a file named" f" {_add_variant(SAFE_WEIGHTS_NAME, variant)} or {_add_variant(SAFE_WEIGHTS_INDEX_NAME, variant)} " "and thus cannot be loaded with `safetensors`. Please make sure that the model has " "been saved with `safe_serialization=True` or do not set `use_safetensors=True`." ) else: # This repo has no safetensors file of any kind, we switch to PyTorch. filename = _add_variant(WEIGHTS_NAME, variant) resolved_archive_file = cached_file( pretrained_model_name_or_path, filename, **cached_file_kwargs ) if resolved_archive_file is None and filename == _add_variant(WEIGHTS_NAME, variant): # Maybe the checkpoint is sharded, we try to grab the index name in this case. resolved_archive_file = cached_file( pretrained_model_name_or_path, _add_variant(WEIGHTS_INDEX_NAME, variant), **cached_file_kwargs, ) if resolved_archive_file is not None: is_sharded = True if not local_files_only and not is_offline_mode(): if resolved_archive_file is not None: if filename in [WEIGHTS_NAME, WEIGHTS_INDEX_NAME]: # If the PyTorch file was found, check if there is a safetensors file on the repository # If there is no safetensors file on the repositories, start an auto conversion safe_weights_name = SAFE_WEIGHTS_INDEX_NAME if is_sharded else SAFE_WEIGHTS_NAME has_file_kwargs = { "revision": revision, "proxies": proxies, "token": token, "cache_dir": cache_dir, "local_files_only": local_files_only, } cached_file_kwargs = { "cache_dir": cache_dir, "force_download": force_download, "resume_download": resume_download, "local_files_only": local_files_only, "user_agent": user_agent, "subfolder": subfolder, "_raise_exceptions_for_gated_repo": False, "_raise_exceptions_for_missing_entries": False, "_commit_hash": commit_hash, **has_file_kwargs, } if not has_file(pretrained_model_name_or_path, safe_weights_name, **has_file_kwargs): Thread( target=auto_conversion, args=(pretrained_model_name_or_path,), kwargs={"ignore_errors_during_conversion": True, **cached_file_kwargs}, name="Thread-auto_conversion", ).start() else: # Otherwise, no PyTorch file was found, maybe there is a TF or Flax model file. # We try those to give a helpful error message. has_file_kwargs = { "revision": revision, "proxies": proxies, "token": token, "cache_dir": cache_dir, "local_files_only": local_files_only, } if has_file(pretrained_model_name_or_path, TF2_WEIGHTS_NAME, **has_file_kwargs): raise EnvironmentError( f"{pretrained_model_name_or_path} does not appear to have a file named" f" {_add_variant(WEIGHTS_NAME, variant)} but there is a file for TensorFlow weights." " Use `from_tf=True` to load this model from those weights." ) elif has_file(pretrained_model_name_or_path, FLAX_WEIGHTS_NAME, **has_file_kwargs): raise EnvironmentError( f"{pretrained_model_name_or_path} does not appear to have a file named" f" {_add_variant(WEIGHTS_NAME, variant)} but there is a file for Flax weights. Use" " `from_flax=True` to load this model from those weights." ) elif variant is not None and has_file( pretrained_model_name_or_path, WEIGHTS_NAME, **has_file_kwargs ): raise EnvironmentError( f"{pretrained_model_name_or_path} does not appear to have a file named" f" {_add_variant(WEIGHTS_NAME, variant)} but there is a file without the variant" f" {variant}. Use `variant=None` to load this model from those weights." ) else: raise EnvironmentError( f"{pretrained_model_name_or_path} does not appear to have a file named" f" {_add_variant(WEIGHTS_NAME, variant)}, {_add_variant(SAFE_WEIGHTS_NAME, variant)}," f" {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME} or {FLAX_WEIGHTS_NAME}." ) except EnvironmentError: # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted # to the original exception. raise except Exception as e: # For any other exception, we throw a generic error. raise EnvironmentError( f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it" " from 'https://huggingface.co/models', make sure you don't have a local directory with the" f" same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a" f" directory containing a file named {_add_variant(WEIGHTS_NAME, variant)}," f" {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME} or {FLAX_WEIGHTS_NAME}." ) from e if is_local: logger.info(f"loading weights file {archive_file}") resolved_archive_file = archive_file else: logger.info(f"loading weights file {filename} from cache at {resolved_archive_file}") elif gguf_file: from .modeling_gguf_pytorch_utils import load_gguf_checkpoint # Case 1: the GGUF file is present locally if os.path.isfile(gguf_file): gguf_path = gguf_file # Case 2: The GGUF path is a location on the Hub # Load from URL or cache if already cached else: cached_file_kwargs = { "cache_dir": cache_dir, "force_download": force_download, "proxies": proxies, "resume_download": resume_download, "local_files_only": local_files_only, "token": token, "user_agent": user_agent, "revision": revision, "subfolder": subfolder, "_raise_exceptions_for_gated_repo": False, "_raise_exceptions_for_missing_entries": False, "_commit_hash": commit_hash, } gguf_path = cached_file(pretrained_model_name_or_path, gguf_file, **cached_file_kwargs) # we need a dummy model to help rename state_dict with torch.device("meta"): dummy_model = cls(config) state_dict = load_gguf_checkpoint(gguf_path, return_tensors=True, model_to_load=dummy_model)["tensors"] resolved_archive_file = None is_sharded = False else: resolved_archive_file = None # We'll need to download and cache each checkpoint shard if the checkpoint is sharded. if is_sharded: # resolved_archive_file becomes a list of files that point to the different checkpoint shards in this case. resolved_archive_file, sharded_metadata = get_checkpoint_shard_files( pretrained_model_name_or_path, resolved_archive_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, token=token, user_agent=user_agent, revision=revision, subfolder=subfolder, _commit_hash=commit_hash, ) if ( is_safetensors_available() and isinstance(resolved_archive_file, str) and resolved_archive_file.endswith(".safetensors") ): with safe_open(resolved_archive_file, framework="pt") as f: metadata = f.metadata() if metadata is None: # Assume it's a pytorch checkpoint (introduced for timm checkpoints) pass elif metadata.get("format") == "pt": pass elif metadata.get("format") == "tf": from_tf = True logger.info("A TensorFlow safetensors file is being loaded in a PyTorch model.") elif metadata.get("format") == "flax": from_flax = True logger.info("A Flax safetensors file is being loaded in a PyTorch model.") elif metadata.get("format") == "mlx": # This is a mlx file, we assume weights are compatible with pt pass else: raise ValueError( f"Incompatible safetensors file. File metadata is not ['pt', 'tf', 'flax', 'mlx'] but {metadata.get('format')}" ) from_pt = not (from_tf | from_flax) # load pt weights early so that we know which dtype to init the model under if from_pt: if not is_sharded and state_dict is None: # Time to load the checkpoint state_dict = load_state_dict(resolved_archive_file, weights_only=weights_only) # set dtype to instantiate the model under: # 1. If torch_dtype is not None, we use that dtype # 2. If torch_dtype is "auto", we auto-detect dtype from the loaded state_dict, by checking its first # weights entry that is of a floating type - we assume all floating dtype weights are of the same dtype # we also may have config.torch_dtype available, but we won't rely on it till v5 dtype_orig = None if torch_dtype is not None: if isinstance(torch_dtype, str): if torch_dtype == "auto": if hasattr(config, "torch_dtype") and config.torch_dtype is not None: torch_dtype = config.torch_dtype logger.info(f"Will use torch_dtype={torch_dtype} as defined in model's config object") else: if is_sharded and "dtype" in sharded_metadata: torch_dtype = sharded_metadata["dtype"] elif not is_sharded: torch_dtype = get_state_dict_dtype(state_dict) else: one_state_dict = load_state_dict(resolved_archive_file[0], weights_only=weights_only) torch_dtype = get_state_dict_dtype(one_state_dict) del one_state_dict # free CPU memory logger.info( "Since the `torch_dtype` attribute can't be found in model's config object, " "will use torch_dtype={torch_dtype} as derived from model's weights" ) elif hasattr(torch, torch_dtype): torch_dtype = getattr(torch, torch_dtype) for sub_config_key in config.sub_configs.keys(): sub_config = getattr(config, sub_config_key) sub_config.torch_dtype = torch_dtype elif isinstance(torch_dtype, torch.dtype): pass elif isinstance(torch_dtype, dict): for key, curr_dtype in torch_dtype.items(): if hasattr(config, key): value = getattr(config, key) value.torch_dtype = curr_dtype # main torch dtype for modules that aren't part of any sub-config torch_dtype = torch_dtype.get("") config.torch_dtype = torch_dtype if isinstance(torch_dtype, str) and hasattr(torch, torch_dtype): torch_dtype = getattr(torch, torch_dtype) elif torch_dtype is None: torch_dtype = torch.float32 else: raise ValueError( f"`torch_dtype` can be one of: `torch.dtype`, `'auto'`, a string of a valid `torch.dtype` or a `dict` with valid `torch_dtype` " f"for each sub-config in composite configs, but received {torch_dtype}" ) dtype_orig = cls._set_default_torch_dtype(torch_dtype) else: # set fp32 as the default dtype for BC default_dtype = str(torch.get_default_dtype()).split(".")[-1] config.torch_dtype = default_dtype for key in config.sub_configs.keys(): value = getattr(config, key) value.torch_dtype = default_dtype # Check if `_keep_in_fp32_modules` is not None use_keep_in_fp32_modules = (cls._keep_in_fp32_modules is not None) and ( (torch_dtype == torch.float16) or hasattr(hf_quantizer, "use_keep_in_fp32_modules") ) if is_sharded: loaded_state_dict_keys = sharded_metadata["all_checkpoint_keys"] else: loaded_state_dict_keys = list(state_dict.keys()) if ( gguf_path is None and (low_cpu_mem_usage or (use_keep_in_fp32_modules and is_accelerate_available())) and pretrained_model_name_or_path is not None ): # In case some weights need to be kept in float32 and accelerate is not installed, # we later on want to take the path where state_dict is not None, that is the one # that do not require accelerate. state_dict = None config.name_or_path = pretrained_model_name_or_path # Instantiate model. init_contexts = [no_init_weights(_enable=_fast_init)] tp_device = None if is_deepspeed_zero3_enabled() and not is_quantized and not _is_ds_init_called: import deepspeed logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model") init_contexts = [ deepspeed.zero.Init(config_dict_or_path=deepspeed_config()), set_zero3_state(), ] + init_contexts elif low_cpu_mem_usage: if not is_accelerate_available(): raise ImportError( f"Using `low_cpu_mem_usage=True` or a `device_map` requires Accelerate: `pip install 'accelerate>={ACCELERATE_MIN_VERSION}'`" ) init_contexts.append(init_empty_weights()) elif tp_plan is not None: if not torch.distributed.is_initialized(): raise ValueError("Tensor Parallel requires torch.distributed to be initialized first.") # Detect the accelerator on the machine. If no accelerator is available, it returns CPU. device_type = torch._C._get_accelerator().type device_module = torch.get_device_module(device_type) # Get device with index assuming equal number of devices per host tp_device = torch.device(device_type, torch.distributed.get_rank() % device_module.device_count()) init_contexts.append(tp_device) if is_deepspeed_zero3_enabled() and is_quantized: init_contexts.append(set_quantized_state()) config = copy.deepcopy(config) # We do not want to modify the config inplace in from_pretrained. if not getattr(config, "_attn_implementation_autoset", False): config = cls._autoset_attn_implementation( config, use_flash_attention_2=use_flash_attention_2, torch_dtype=torch_dtype, device_map=device_map ) with ContextManagers(init_contexts): # Let's make sure we don't run the init function of buffer modules model = cls(config, *model_args, **model_kwargs) # make sure we use the model's config since the __init__ call might have copied it config = model.config # Check first if we are `from_pt` if use_keep_in_fp32_modules: if is_accelerate_available() and not is_deepspeed_zero3_enabled(): low_cpu_mem_usage = True keep_in_fp32_modules = model._keep_in_fp32_modules else: keep_in_fp32_modules = [] if hf_quantizer is not None: hf_quantizer.preprocess_model( model=model, device_map=device_map, keep_in_fp32_modules=keep_in_fp32_modules ) # We store the original dtype for quantized models as we cannot easily retrieve it # once the weights have been quantized # Note that once you have loaded a quantized model, you can't change its dtype so this will # remain a single source of truth config._pre_quantization_dtype = torch_dtype if isinstance(device_map, str): special_dtypes = {} if hf_quantizer is not None: special_dtypes.update(hf_quantizer.get_special_dtypes_update(model, torch_dtype)) special_dtypes.update( { name: torch.float32 for name, _ in model.named_parameters() if any(m in name for m in keep_in_fp32_modules) } ) target_dtype = torch_dtype if hf_quantizer is not None: target_dtype = hf_quantizer.adjust_target_dtype(target_dtype) no_split_modules = model._get_no_split_modules(device_map) if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( "If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or " "'sequential'." ) device_map_kwargs = {"no_split_module_classes": no_split_modules} if "special_dtypes" in inspect.signature(infer_auto_device_map).parameters: device_map_kwargs["special_dtypes"] = special_dtypes elif len(special_dtypes) > 0: logger.warning( "This model has some weights that should be kept in higher precision, you need to upgrade " "`accelerate` to properly deal with them (`pip install --upgrade accelerate`)." ) if device_map != "sequential": max_memory = get_balanced_memory( model, dtype=target_dtype, low_zero=(device_map == "balanced_low_0"), max_memory=max_memory, **device_map_kwargs, ) else: max_memory = get_max_memory(max_memory) if hf_quantizer is not None: max_memory = hf_quantizer.adjust_max_memory(max_memory) device_map_kwargs["max_memory"] = max_memory # Make sure tied weights are tied before creating the device map. model.tie_weights() device_map = infer_auto_device_map(model, dtype=target_dtype, **device_map_kwargs) if hf_quantizer is not None: hf_quantizer.validate_environment(device_map=device_map) elif device_map is not None: model.tie_weights() tied_params = find_tied_parameters(model) # check if we don't have tied param in different devices check_tied_parameters_on_same_device(tied_params, device_map) if from_tf: if resolved_archive_file.endswith(".index"): # Load from a TensorFlow 1.X checkpoint - provided by original authors model = cls.load_tf_weights(model, config, resolved_archive_file[:-6]) # Remove the '.index' else: # Load from our TensorFlow 2.0 checkpoints try: from .modeling_tf_pytorch_utils import load_tf2_checkpoint_in_pytorch_model model, loading_info = load_tf2_checkpoint_in_pytorch_model( model, resolved_archive_file, allow_missing_keys=True, output_loading_info=True ) except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed." " Please see https://pytorch.org/ and https://www.tensorflow.org/install/ for installation" " instructions." ) raise elif from_flax: try: from .modeling_flax_pytorch_utils import load_flax_checkpoint_in_pytorch_model model = load_flax_checkpoint_in_pytorch_model(model, resolved_archive_file) except ImportError: logger.error( "Loading a Flax model in PyTorch, requires both PyTorch and Flax to be installed. Please see" " https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for" " installation instructions." ) raise elif from_pt: # restore default dtype if dtype_orig is not None: torch.set_default_dtype(dtype_orig) load_contexts = [] # Make sure we load onto targeted device if tp_device is not None: load_contexts.append(tp_device) with ContextManagers(load_contexts): ( model, missing_keys, unexpected_keys, mismatched_keys, offload_index, error_msgs, ) = cls._load_pretrained_model( model, state_dict, loaded_state_dict_keys, # XXX: rename? resolved_archive_file, pretrained_model_name_or_path, ignore_mismatched_sizes=ignore_mismatched_sizes, sharded_metadata=sharded_metadata, _fast_init=_fast_init, low_cpu_mem_usage=low_cpu_mem_usage, device_map=device_map, offload_folder=offload_folder, offload_state_dict=offload_state_dict, dtype=torch_dtype, hf_quantizer=hf_quantizer, keep_in_fp32_modules=keep_in_fp32_modules, gguf_path=gguf_path, weights_only=weights_only, ) # make sure token embedding weights are still tied if needed model.tie_weights() # Set model in evaluation mode to deactivate DropOut modules by default model.eval() # If it is a model with generation capabilities, attempt to load the generation config if model.can_generate() and generation_config is not None: logger.info("The user-defined `generation_config` will be used to override the default generation config.") model.generation_config = model.generation_config.from_dict(generation_config.to_dict()) elif model.can_generate() and pretrained_model_name_or_path is not None: try: model.generation_config = GenerationConfig.from_pretrained( pretrained_model_name_or_path, cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, _from_auto=from_auto_class, _from_pipeline=from_pipeline, **kwargs, ) except OSError: logger.info( "Generation config file not found, using a generation config created from the model config." ) pass # Dispatch model with hooks on all devices if necessary if device_map is not None: device_map_kwargs = { "device_map": device_map, "offload_dir": offload_folder, "offload_index": offload_index, "offload_buffers": offload_buffers, } if "skip_keys" in inspect.signature(dispatch_model).parameters: device_map_kwargs["skip_keys"] = model._skip_keys_device_placement # For HQQ method we force-set the hooks for single GPU envs if ( "force_hooks" in inspect.signature(dispatch_model).parameters and hf_quantizer is not None and hf_quantizer.quantization_config.quant_method == QuantizationMethod.HQQ ): device_map_kwargs["force_hooks"] = True if ( hf_quantizer is not None and hf_quantizer.quantization_config.quant_method == QuantizationMethod.FBGEMM_FP8 and isinstance(device_map, dict) and ("cpu" in device_map.values() or "disk" in device_map.values()) ): device_map_kwargs["offload_buffers"] = True if not is_fsdp_enabled() and not is_deepspeed_zero3_enabled(): dispatch_model(model, **device_map_kwargs) if hf_quantizer is not None: hf_quantizer.postprocess_model(model, config=config) model.hf_quantizer = hf_quantizer if _adapter_model_path is not None: model.load_adapter( _adapter_model_path, adapter_name=adapter_name, token=token, adapter_kwargs=adapter_kwargs, ) if output_loading_info: if loading_info is None: loading_info = { "missing_keys": missing_keys, "unexpected_keys": unexpected_keys, "mismatched_keys": mismatched_keys, "error_msgs": error_msgs, } return model, loading_info if tp_plan is not None: assert tp_device is not None, "tp_device not set!" if not model.supports_tp_plan: raise NotImplementedError("This model does not have a tensor parallel plan.") # Assuming sharding the model onto the world world_size = torch.distributed.get_world_size() device_mesh = torch.distributed.init_device_mesh(tp_device.type, (world_size,)) # Apply Tensor Parallelism model.tensor_parallel(device_mesh) return model @staticmethod def _fix_state_dict_key_on_load(key) -> Tuple[str, bool]: """Replace legacy parameter names with their modern equivalents. E.g. beta -> bias, gamma -> weight.""" # Rename LayerNorm beta & gamma params for some early models ported from Tensorflow (e.g. Bert) # This rename is logged. if key.endswith("LayerNorm.beta"): return key.replace("LayerNorm.beta", "LayerNorm.bias"), True if key.endswith("LayerNorm.gamma"): return key.replace("LayerNorm.gamma", "LayerNorm.weight"), True # Rename weight norm parametrizations to match changes across torch versions. # Impacts a number of speech/wav2vec models. e.g. Hubert, Wav2Vec2, and others. # This rename is not logged. if hasattr(nn.utils.parametrizations, "weight_norm"): if key.endswith("weight_g"): return key.replace("weight_g", "parametrizations.weight.original0"), True if key.endswith("weight_v"): return key.replace("weight_v", "parametrizations.weight.original1"), True else: if key.endswith("parametrizations.weight.original0"): return key.replace("parametrizations.weight.original0", "weight_g"), True if key.endswith("parametrizations.weight.original1"): return key.replace("parametrizations.weight.original1", "weight_v"), True return key, False @classmethod def _fix_state_dict_keys_on_load(cls, state_dict): """Fixes state dict keys by replacing legacy parameter names with their modern equivalents. Logs if any parameters have been renamed. """ renamed_keys = {} state_dict_keys = list(state_dict.keys()) for key in state_dict_keys: new_key, has_changed = cls._fix_state_dict_key_on_load(key) if has_changed: state_dict[new_key] = state_dict.pop(key) # track gamma/beta rename for logging if key.endswith("LayerNorm.gamma"): renamed_keys["LayerNorm.gamma"] = (key, new_key) elif key.endswith("LayerNorm.beta"): renamed_keys["LayerNorm.beta"] = (key, new_key) if renamed_keys: warning_msg = f"A pretrained model of type `{cls.__name__}` " warning_msg += "contains parameters that have been renamed internally (a few are listed below but more are present in the model):\n" for old_key, new_key in renamed_keys.values(): warning_msg += f"* `{old_key}` -> `{new_key}`\n" warning_msg += "If you are using a model from the Hub, consider submitting a PR to adjust these weights and help future users." logger.info_once(warning_msg) return state_dict @staticmethod def _fix_state_dict_key_on_save(key) -> Tuple[str, bool]: """ Similar to `_fix_state_dict_key_on_load` allows to define hook for state dict key renaming on model save. Do nothing by default, but can be overridden in particular models. """ return key, False def _fix_state_dict_keys_on_save(self, state_dict): """ Similar to `_fix_state_dict_keys_on_load` allows to define hook for state dict key renaming on model save. Apply `_fix_state_dict_key_on_save` to all keys in `state_dict`. """ return {self._fix_state_dict_key_on_save(key)[0]: value for key, value in state_dict.items()} @classmethod def _load_pretrained_model( cls, model, state_dict, loaded_keys, resolved_archive_file, pretrained_model_name_or_path, ignore_mismatched_sizes=False, sharded_metadata=None, _fast_init=True, low_cpu_mem_usage=False, device_map=None, offload_folder=None, offload_state_dict=None, dtype=None, hf_quantizer=None, keep_in_fp32_modules=None, gguf_path=None, weights_only=True, ): is_safetensors = False is_quantized = hf_quantizer is not None state_dict_folder = None state_dict_index = None if device_map is not None and "disk" in device_map.values(): archive_file = ( resolved_archive_file[0] if isinstance(resolved_archive_file, (list, tuple)) else resolved_archive_file ) is_safetensors = archive_file.endswith(".safetensors") if offload_folder is None and not is_safetensors: raise ValueError( "The current `device_map` had weights offloaded to the disk. Please provide an `offload_folder`" " for them. Alternatively, make sure you have `safetensors` installed if the model you are using" " offers the weights in this format." ) if offload_folder is not None: os.makedirs(offload_folder, exist_ok=True) if offload_state_dict is None: offload_state_dict = True is_sharded_safetensors = is_safetensors and sharded_metadata is not None # tie the model weights before retrieving the state_dict model.tie_weights() # Retrieve missing & unexpected_keys model_state_dict = model.state_dict() expected_keys = list(model_state_dict.keys()) prefix = model.base_model_prefix if hf_quantizer is not None: expected_keys = hf_quantizer.update_expected_keys(model, expected_keys, loaded_keys) original_loaded_keys = loaded_keys loaded_keys = [cls._fix_state_dict_key_on_load(key)[0] for key in loaded_keys] if len(prefix) > 0: has_prefix_module = any(s.startswith(prefix) for s in loaded_keys) expects_prefix_module = any(s.startswith(prefix) for s in expected_keys) else: has_prefix_module = False expects_prefix_module = False # key re-naming operations are never done on the keys # that are loaded, but always on the keys of the newly initialized model remove_prefix_from_model = not has_prefix_module and expects_prefix_module add_prefix_to_model = has_prefix_module and not expects_prefix_module if remove_prefix_from_model: _prefix = f"{prefix}." expected_keys_not_prefixed = [s for s in expected_keys if not s.startswith(_prefix)] expected_keys = [s[len(_prefix) :] if s.startswith(_prefix) else s for s in expected_keys] elif add_prefix_to_model: expected_keys = [".".join([prefix, s]) for s in expected_keys] missing_keys = sorted(set(expected_keys) - set(loaded_keys)) unexpected_keys = set(loaded_keys) - set(expected_keys) # Remove nonpersistent buffers from unexpected keys: they are not in the state dict but will be in the model # buffers model_buffers = {n for n, _ in model.named_buffers()} if remove_prefix_from_model: model_buffers = {key[len(_prefix) :] if key.startswith(_prefix) else key for key in model_buffers} elif add_prefix_to_model: model_buffers = {".".join([prefix, key]) for key in model_buffers} unexpected_keys = sorted(unexpected_keys - model_buffers) # Clean up buffer for `inv-freq` because RoPE embedding moved under base model (https://github.com/huggingface/transformers/pull/34858) has_inv_freq_buffers = any(buffer.endswith("rotary_emb.inv_freq") for buffer in model_buffers) if has_inv_freq_buffers: unexpected_keys = {k for k in unexpected_keys if "rotary_emb.inv_freq" not in k} model.tie_weights() if device_map is None and not is_fsdp_enabled() and not is_deepspeed_zero3_enabled(): ptrs = collections.defaultdict(list) for name, tensor in model.state_dict().items(): id_tensor = id_tensor_storage(tensor) ptrs[id_tensor].append(name) # These are all the pointers of shared tensors. tied_params = [names for _, names in ptrs.items() if len(names) > 1] else: # id function doesn't work for meta tensor so we need this function tied_params = find_tied_parameters(model) for group in tied_params: if remove_prefix_from_model: group = [key[len(_prefix) :] if key.startswith(_prefix) else key for key in group] elif add_prefix_to_model: group = [".".join([prefix, key]) for key in group] missing_in_group = [k for k in missing_keys if k in group] if len(missing_in_group) > 0 and len(missing_in_group) < len(group): missing_keys = [k for k in missing_keys if k not in missing_in_group] # Some models may have keys that are not in the state by design, removing them before needlessly warning # the user. if cls._keys_to_ignore_on_load_missing is not None: for pat in cls._keys_to_ignore_on_load_missing: missing_keys = [k for k in missing_keys if re.search(pat, k) is None] if cls._keys_to_ignore_on_load_unexpected is not None: for pat in cls._keys_to_ignore_on_load_unexpected: unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] if hf_quantizer is not None: missing_keys = hf_quantizer.update_missing_keys(model, missing_keys, prefix) # retrieve weights on meta device and put them back on CPU. # This is not ideal in terms of memory, but if we don't do that not, we can't initialize them in the next step if low_cpu_mem_usage: for key in missing_keys: if key in list(model_state_dict.keys()): key = key elif f"{prefix}.{key}" in list(model_state_dict.keys()): key = f"{prefix}.{key}" elif key.startswith(prefix) and ".".join(key.split(".")[1:]) in list(model_state_dict.keys()): key = ".".join(key.split(".")[1:]) param = model_state_dict[key] # upcast in fp32 if any target_dtype = dtype if ( keep_in_fp32_modules is not None and dtype == torch.float16 and any( module_to_keep_in_fp32 in key.split(".") for module_to_keep_in_fp32 in keep_in_fp32_modules ) ): target_dtype = torch.float32 if param.device == torch.device("meta"): value = torch.empty(*param.size(), dtype=target_dtype) if ( not is_quantized or (getattr(hf_quantizer, "requires_parameters_quantization", False)) or not hf_quantizer.check_quantized_param( model, param_value=value, param_name=key, state_dict={} ) ): set_module_tensor_to_device(model, key, "cpu", value) else: hf_quantizer.create_quantized_param(model, value, key, "cpu", state_dict, unexpected_keys) # retrieve uninitialized modules and initialize before maybe overriding that with the pretrained weights. if _fast_init: if not ignore_mismatched_sizes: if remove_prefix_from_model: _loaded_keys = [f"{prefix}.{k}" for k in loaded_keys] elif add_prefix_to_model: _loaded_keys = [k[len(prefix) + 1 :] for k in loaded_keys] else: _loaded_keys = loaded_keys not_initialized_submodules = set_initialized_submodules(model, _loaded_keys) # If we're about to tie the output embeds to the input embeds we don't need to init them if ( hasattr(model.config.get_text_config(decoder=True), "tie_word_embeddings") and model.config.get_text_config(decoder=True).tie_word_embeddings ): output_embeddings = model.get_output_embeddings() if output_embeddings is not None: # Still need to initialize if there is a bias term since biases are not tied. if not hasattr(output_embeddings, "bias") or output_embeddings.bias is None: output_embeddings._is_hf_initialized = True else: not_initialized_submodules = dict(model.named_modules()) # This will only initialize submodules that are not marked as initialized by the line above. if is_deepspeed_zero3_enabled() and not is_quantized: import deepspeed not_initialized_parameters = list( set( itertools.chain.from_iterable( submodule.parameters(recurse=False) for submodule in not_initialized_submodules.values() ) ) ) with deepspeed.zero.GatheredParameters(not_initialized_parameters, modifier_rank=0): model.apply(model._initialize_weights) else: model.apply(model._initialize_weights) # Set some modules to fp32 if any if keep_in_fp32_modules is not None: for name, param in model.named_parameters(): if any(module_to_keep_in_fp32 in name.split(".") for module_to_keep_in_fp32 in keep_in_fp32_modules): # param = param.to(torch.float32) does not work here as only in the local scope. param.data = param.data.to(torch.float32) # Make sure we are able to load base models as well as derived models (with heads) start_prefix = "" model_to_load = model if len(cls.base_model_prefix) > 0 and not hasattr(model, cls.base_model_prefix) and has_prefix_module: start_prefix = cls.base_model_prefix + "." if len(cls.base_model_prefix) > 0 and hasattr(model, cls.base_model_prefix) and not has_prefix_module: model_to_load = getattr(model, cls.base_model_prefix) base_model_expected_keys = list(model_to_load.state_dict().keys()) if any(key in expected_keys_not_prefixed and key not in base_model_expected_keys for key in loaded_keys): raise ValueError( "The state dictionary of the model you are trying to load is corrupted. Are you sure it was " "properly saved?" ) if device_map is not None: device_map = {k.replace(f"{cls.base_model_prefix}.", ""): v for k, v in device_map.items()} def _find_mismatched_keys( state_dict, model_state_dict, loaded_keys, original_loaded_keys, add_prefix_to_model, remove_prefix_from_model, ignore_mismatched_sizes, ): mismatched_keys = [] if ignore_mismatched_sizes: for checkpoint_key, model_key in zip(original_loaded_keys, loaded_keys): # If the checkpoint is sharded, we may not have the key here. if checkpoint_key not in state_dict: continue if remove_prefix_from_model: # The model key starts with `prefix` but `checkpoint_key` doesn't so we add it. model_key = f"{prefix}.{model_key}" elif add_prefix_to_model: # The model key doesn't start with `prefix` but `checkpoint_key` does so we remove it. model_key = ".".join(model_key.split(".")[1:]) if ( model_key in model_state_dict and state_dict[checkpoint_key].shape != model_state_dict[model_key].shape ): if ( state_dict[checkpoint_key].shape[-1] == 1 and state_dict[checkpoint_key].numel() * 2 == model_state_dict[model_key].numel() ): # This skips size mismatches for 4-bit weights. Two 4-bit values share an 8-bit container, causing size differences. # Without matching with module type or paramter type it seems like a practical way to detect valid 4bit weights. pass else: mismatched_keys.append( (checkpoint_key, state_dict[checkpoint_key].shape, model_state_dict[model_key].shape) ) del state_dict[checkpoint_key] return mismatched_keys if resolved_archive_file is not None: folder = os.path.sep.join(resolved_archive_file[0].split(os.path.sep)[:-1]) else: folder = None if device_map is not None and is_safetensors: param_device_map = expand_device_map(device_map, original_loaded_keys, start_prefix) str_dtype = str(dtype).replace("torch.", "") if dtype is not None else "float32" if sharded_metadata is None: archive_file = ( resolved_archive_file[0] if isinstance(resolved_archive_file, (list, tuple)) else resolved_archive_file ) weight_map = {p: archive_file for p in original_loaded_keys} else: weight_map = {p: os.path.join(folder, f) for p, f in sharded_metadata["weight_map"].items()} offload_index = { p[len(start_prefix) :]: {"safetensors_file": f, "weight_name": p, "dtype": str_dtype} for p, f in weight_map.items() if p.startswith(start_prefix) and param_device_map[p[len(start_prefix) :]] == "disk" } else: offload_index = None if state_dict is not None: # Whole checkpoint mismatched_keys = _find_mismatched_keys( state_dict, model_state_dict, loaded_keys, original_loaded_keys, add_prefix_to_model, remove_prefix_from_model, ignore_mismatched_sizes, ) # For GGUF models `state_dict` is never set to None as the state dict is always small if gguf_path or low_cpu_mem_usage: fixed_state_dict = cls._fix_state_dict_keys_on_load(state_dict) error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model( model_to_load, fixed_state_dict, start_prefix, expected_keys, device_map=device_map, offload_folder=offload_folder, offload_index=offload_index, state_dict_folder=state_dict_folder, state_dict_index=state_dict_index, dtype=dtype, hf_quantizer=hf_quantizer, is_safetensors=is_safetensors, keep_in_fp32_modules=keep_in_fp32_modules, unexpected_keys=unexpected_keys, ) else: # Sharded checkpoint or whole but low_cpu_mem_usage==True assign_to_params_buffers = check_support_param_buffer_assignment( model_to_load, state_dict, start_prefix ) fixed_state_dict = cls._fix_state_dict_keys_on_load(state_dict) error_msgs = _load_state_dict_into_model( model_to_load, fixed_state_dict, start_prefix, assign_to_params_buffers ) else: # This should always be a list but, just to be sure. if not isinstance(resolved_archive_file, list): resolved_archive_file = [resolved_archive_file] error_msgs = [] mismatched_keys = [] if not is_safetensors: offload_index = {} if device_map is not None and "disk" in device_map.values() else None if offload_state_dict: state_dict_folder = tempfile.mkdtemp() state_dict_index = {} else: state_dict_folder = None state_dict_index = None if is_sharded_safetensors: disk_only_shard_files = get_disk_only_shard_files( device_map, sharded_metadata=sharded_metadata, start_prefix=start_prefix ) disk_only_shard_files = [os.path.join(folder, f) for f in disk_only_shard_files] else: disk_only_shard_files = [] if len(resolved_archive_file) > 1: resolved_archive_file = logging.tqdm(resolved_archive_file, desc="Loading checkpoint shards") assign_to_params_buffers = None for shard_file in resolved_archive_file: # Skip the load for shards that only contain disk-offloaded weights when using safetensors for the offload. if shard_file in disk_only_shard_files: continue map_location = None if ( device_map is not None and hf_quantizer is not None and hf_quantizer.quantization_config.quant_method == QuantizationMethod.TORCHAO and hf_quantizer.quantization_config.quant_type == "int4_weight_only" ): map_location = torch.device([d for d in device_map.values() if d not in ["cpu", "disk"]][0]) state_dict = load_state_dict( shard_file, is_quantized=is_quantized, map_location=map_location, weights_only=weights_only ) # Mistmatched keys contains tuples key/shape1/shape2 of weights in the checkpoint that have a shape not # matching the weights in the model. mismatched_keys += _find_mismatched_keys( state_dict, model_state_dict, loaded_keys, original_loaded_keys, add_prefix_to_model, remove_prefix_from_model, ignore_mismatched_sizes, ) if low_cpu_mem_usage: if is_fsdp_enabled() and not is_local_dist_rank_0() and not is_quantized: for key, param in model_to_load.state_dict().items(): if param.device == torch.device("meta"): set_module_tensor_to_device( model_to_load, key, "cpu", torch.empty(*param.size(), dtype=dtype) ) else: fixed_state_dict = cls._fix_state_dict_keys_on_load(state_dict) new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model( model_to_load, fixed_state_dict, start_prefix, expected_keys, device_map=device_map, offload_folder=offload_folder, offload_index=offload_index, state_dict_folder=state_dict_folder, state_dict_index=state_dict_index, dtype=dtype, hf_quantizer=hf_quantizer, is_safetensors=is_safetensors, keep_in_fp32_modules=keep_in_fp32_modules, unexpected_keys=unexpected_keys, ) error_msgs += new_error_msgs else: # Sharded checkpoint or whole but low_cpu_mem_usage==True if assign_to_params_buffers is None: assign_to_params_buffers = check_support_param_buffer_assignment( model_to_load, state_dict, start_prefix ) fixed_state_dict = cls._fix_state_dict_keys_on_load(state_dict) error_msgs += _load_state_dict_into_model( model_to_load, fixed_state_dict, start_prefix, assign_to_params_buffers ) # force memory release del state_dict gc.collect() if offload_index is not None and len(offload_index) > 0: if model != model_to_load: # We need to add the prefix of the base model prefix = cls.base_model_prefix if not is_safetensors: for weight_name in offload_index: shutil.move( os.path.join(offload_folder, f"{weight_name}.dat"), os.path.join(offload_folder, f"{prefix}.{weight_name}.dat"), ) offload_index = {f"{prefix}.{key}": value for key, value in offload_index.items()} if not is_safetensors: save_offload_index(offload_index, offload_folder) offload_index = None if offload_state_dict: # Load back temporarily offloaded state dict load_offloaded_weights(model_to_load, state_dict_index, state_dict_folder) shutil.rmtree(state_dict_folder) if len(error_msgs) > 0: error_msg = "\n\t".join(error_msgs) if "size mismatch" in error_msg: error_msg += ( "\n\tYou may consider adding `ignore_mismatched_sizes=True` in the model `from_pretrained` method." ) raise RuntimeError(f"Error(s) in loading state_dict for {model.__class__.__name__}:\n\t{error_msg}") if len(unexpected_keys) > 0: archs = [] if model.config.architectures is None else model.config.architectures warner = logger.warning if model.__class__.__name__ in archs else logger.info warner( f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when" f" initializing {model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are" f" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task or" " with another architecture (e.g. initializing a BertForSequenceClassification model from a" " BertForPreTraining model).\n- This IS NOT expected if you are initializing" f" {model.__class__.__name__} from the checkpoint of a model that you expect to be exactly identical" " (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)." ) else: logger.info(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n") if len(missing_keys) > 0: logger.warning( f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at" f" {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\nYou should probably" " TRAIN this model on a down-stream task to be able to use it for predictions and inference." ) elif len(mismatched_keys) == 0: logger.info( f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at" f" {pretrained_model_name_or_path}.\nIf your task is similar to the task the model of the checkpoint" f" was trained on, you can already use {model.__class__.__name__} for predictions without further" " training." ) if len(mismatched_keys) > 0: mismatched_warning = "\n".join( [ f"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated" for key, shape1, shape2 in mismatched_keys ] ) logger.warning( f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at" f" {pretrained_model_name_or_path} and are newly initialized because the shapes did not" f" match:\n{mismatched_warning}\nYou should probably TRAIN this model on a down-stream task to be able" " to use it for predictions and inference." ) return model, missing_keys, unexpected_keys, mismatched_keys, offload_index, error_msgs def retrieve_modules_from_names(self, names, add_prefix=False, remove_prefix=False): module_keys = {".".join(key.split(".")[:-1]) for key in names} # torch.nn.ParameterList is a special case where two parameter keywords # are appended to the module name, *e.g.* bert.special_embeddings.0 module_keys = module_keys.union( {".".join(key.split(".")[:-2]) for key in names if len(key) > 0 and key[-1].isdigit()} ) retrieved_modules = [] # retrieve all modules that has at least one missing weight name for name, module in self.named_modules(): if remove_prefix: _prefix = f"{self.base_model_prefix}." name = name[len(_prefix) :] if name.startswith(_prefix) else name elif add_prefix: name = ".".join([self.base_model_prefix, name]) if len(name) > 0 else self.base_model_prefix if name in module_keys: retrieved_modules.append(module) return retrieved_modules @staticmethod def _load_pretrained_model_low_mem( model, loaded_state_dict_keys, resolved_archive_file, start_prefix="", hf_quantizer=None, pretrained_model_name_or_path=None, weights_only=True, ): """ This is an experimental function that loads the model using ~1.x model size CPU memory Before you call it do: 1. save which state_dict keys are available 2. drop state_dict before model is created, since the latter takes 1x model size memory Here then we continue: 3. switch to the meta device all params/buffers that are going to be replaced from the loaded state_dict 4. load state_dict 2nd time 5. replace the params/buffers from the state_dict Currently, it doesn't handle missing_keys, unexpected_keys, mismatched_keys. It can't handle deepspeed. To handle bitsandbytes, needs non-empty hf_quantizer argument. """ _move_model_to_meta(model, loaded_state_dict_keys, start_prefix) state_dict = load_state_dict(resolved_archive_file, weights_only=weights_only) expected_keys = loaded_state_dict_keys # plug for missing expected_keys. TODO: replace with proper keys fixed_state_dict = model._fix_state_dict_keys_on_load(state_dict) error_msgs = _load_state_dict_into_meta_model( model, fixed_state_dict, start_prefix, expected_keys=expected_keys, hf_quantizer=hf_quantizer, ) return error_msgs @classmethod def register_for_auto_class(cls, auto_class="AutoModel"): """ Register this class with a given auto class. This should only be used for custom models as the ones in the library are already mapped with an auto class. <Tip warning={true}> This API is experimental and may have some slight breaking changes in the next releases. </Tip> Args: auto_class (`str` or `type`, *optional*, defaults to `"AutoModel"`): The auto class to register this new model with. """ if not isinstance(auto_class, str): auto_class = auto_class.__name__ import transformers.models.auto as auto_module if not hasattr(auto_module, auto_class): raise ValueError(f"{auto_class} is not a valid auto class.") cls._auto_class = auto_class def to_bettertransformer(self) -> "PreTrainedModel": """ Converts the model to use [PyTorch's native attention implementation](https://pytorch.org/docs/stable/generated/torch.nn.MultiheadAttention.html), integrated to Transformers through [Optimum library](https://huggingface.co/docs/optimum/bettertransformer/overview). Only a subset of all Transformers models are supported. PyTorch's attention fastpath allows to speed up inference through kernel fusions and the use of [nested tensors](https://pytorch.org/docs/stable/nested.html). Detailed benchmarks can be found in [this blog post](https://medium.com/pytorch/bettertransformer-out-of-the-box-performance-for-huggingface-transformers-3fbe27d50ab2). Returns: [`PreTrainedModel`]: The model converted to BetterTransformer. """ if not is_optimum_available(): raise ImportError("The package `optimum` is required to use Better Transformer.") from optimum.version import __version__ as optimum_version if version.parse(optimum_version) < version.parse("1.7.0"): raise ImportError( f"Please install optimum>=1.7.0 to use Better Transformer. The version {optimum_version} was found." ) from optimum.bettertransformer import BetterTransformer return BetterTransformer.transform(self) def reverse_bettertransformer(self): """ Reverts the transformation from [`~PreTrainedModel.to_bettertransformer`] so that the original modeling is used, for example in order to save the model. Returns: [`PreTrainedModel`]: The model converted back to the original modeling. """ if not is_optimum_available(): raise ImportError("The package `optimum` is required to use Better Transformer.") from optimum.version import __version__ as optimum_version if version.parse(optimum_version) < version.parse("1.7.0"): raise ImportError( f"Please install optimum>=1.7.0 to use Better Transformer. The version {optimum_version} was found." ) from optimum.bettertransformer import BetterTransformer return BetterTransformer.reverse(self) def warn_if_padding_and_no_attention_mask(self, input_ids, attention_mask): """ Shows a one-time warning if the input_ids appear to contain padding and no attention mask was given. """ # Skip the check during tracing. if is_torch_fx_proxy(input_ids) or torch.jit.is_tracing() or is_torchdynamo_compiling(): return if (attention_mask is not None) or (self.config.pad_token_id is None): return # Check only the first and last input IDs to reduce overhead. if self.config.pad_token_id in input_ids[:, [-1, 0]]: warn_string = ( "We strongly recommend passing in an `attention_mask` since your input_ids may be padded. See " "https://huggingface.co/docs/transformers/troubleshooting" "#incorrect-output-when-padding-tokens-arent-masked." ) # If the pad token is equal to either BOS, EOS, or SEP, we do not know whether the user should use an # attention_mask or not. In this case, we should still show a warning because this is a rare case. if ( (self.config.bos_token_id is not None and self.config.bos_token_id == self.config.pad_token_id) or (self.config.eos_token_id is not None and self.config.eos_token_id == self.config.pad_token_id) or (self.config.sep_token_id is not None and self.config.sep_token_id == self.config.pad_token_id) ): warn_string += ( f"\nYou may ignore this warning if your `pad_token_id` ({self.config.pad_token_id}) is identical " f"to the `bos_token_id` ({self.config.bos_token_id}), `eos_token_id` ({self.config.eos_token_id}), " f"or the `sep_token_id` ({self.config.sep_token_id}), and your input is not padded." ) logger.warning_once(warn_string) @property def supports_tp_plan(self): """ Returns whether the model has a tensor parallelism plan. """ if self._tp_plan is not None: return True # Check if base model has a TP plan if getattr(self.base_model, "_tp_plan", None) is not None: return True return False def tensor_parallel(self, device_mesh): """ Tensor parallelize the model across the given device mesh. Args: device_mesh (`torch.distributed.DeviceMesh`): The device mesh to use for tensor parallelism. """ if not is_torch_greater_or_equal("2.5"): raise EnvironmentError("tensor parallel is only supported for `torch>=2.5`.") # Tensor parallelize a nn.Module based on the `_tp_plan` attribute of the module. # No op if `_tp_plan` attribute does not exist under the module. # This is a helper function to be used with `model.apply` to recursively # parallelize a model. def tplize(mod: torch.nn.Module) -> None: tp_plan = getattr(mod, "_tp_plan", None) if tp_plan is None: return logger.debug(f"Applying tensor parallel to {mod.__class__.__name__}: {tp_plan}") # In model configs, we use a neutral type (string) to specify # parallel styles, here we translate them into torch TP types. # Using tree_map because `tp_plan` is a dict. tp_plan = torch.utils._pytree.tree_map( translate_to_torch_parallel_style, tp_plan, ) # Apply TP to current module. torch.distributed.tensor.parallel.parallelize_module( mod, device_mesh=device_mesh, parallelize_plan=tp_plan, ) # `apply` is a native method of `nn.Module` that recursively applies a # function to every submodule. self.apply(tplize) @property def loss_function(self): loss_type = getattr(self, "loss_type", None) if loss_type is None or loss_type not in LOSS_MAPPING: logger.warning_once( f"`loss_type={loss_type}` was set in the config but it is unrecognised." f"Using the default loss: `ForCausalLMLoss`." ) loss_type = "ForCausalLM" return LOSS_MAPPING[loss_type] def get_compiled_call(self, compile_config: CompileConfig): """Return a `torch.compile`'d version of `self.__call__`. This is useful to dynamically choose between non-compiled/compiled `forward` during inference, especially to switch between prefill (where we don't want to use compiled version to avoid recomputing the graph with new shapes) and iterative decoding (where we want the speed-ups of compiled version with static shapes).""" # Only reset it if not present or different from previous config default_config = getattr(self.generation_config, "compile_config", CompileConfig()) if ( not hasattr(self, "_compiled_call") or getattr(self, "_last_compile_config", default_config) != compile_config ): self._last_compile_config = compile_config self._compiled_call = torch.compile(self.__call__, **compile_config.to_dict()) return self._compiled_call
class_definition
49,662
259,106
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
null
230
class PoolerStartLogits(nn.Module): """ Compute SQuAD start logits from sequence hidden states. Args: config ([`PretrainedConfig`]): The config used by the model, will be used to grab the `hidden_size` of the model. """ def __init__(self, config: PretrainedConfig): super().__init__() self.dense = nn.Linear(config.hidden_size, 1) def forward( self, hidden_states: torch.FloatTensor, p_mask: Optional[torch.FloatTensor] = None ) -> torch.FloatTensor: """ Args: hidden_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`): The final hidden states of the model. p_mask (`torch.FloatTensor` of shape `(batch_size, seq_len)`, *optional*): Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token should be masked. Returns: `torch.FloatTensor`: The start logits for SQuAD. """ x = self.dense(hidden_states).squeeze(-1) if p_mask is not None: if get_parameter_dtype(self) == torch.float16: x = x * (1 - p_mask) - 65500 * p_mask else: x = x * (1 - p_mask) - 1e30 * p_mask return x
class_definition
259,400
260,716
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
null
231
class PoolerEndLogits(nn.Module): """ Compute SQuAD end logits from sequence hidden states. Args: config ([`PretrainedConfig`]): The config used by the model, will be used to grab the `hidden_size` of the model and the `layer_norm_eps` to use. """ def __init__(self, config: PretrainedConfig): super().__init__() self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size) self.activation = nn.Tanh() self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dense_1 = nn.Linear(config.hidden_size, 1) def forward( self, hidden_states: torch.FloatTensor, start_states: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, p_mask: Optional[torch.FloatTensor] = None, ) -> torch.FloatTensor: """ Args: hidden_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`): The final hidden states of the model. start_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`, *optional*): The hidden states of the first tokens for the labeled span. start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): The position of the first token for the labeled span. p_mask (`torch.FloatTensor` of shape `(batch_size, seq_len)`, *optional*): Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token should be masked. <Tip> One of `start_states` or `start_positions` should be not `None`. If both are set, `start_positions` overrides `start_states`. </Tip> Returns: `torch.FloatTensor`: The end logits for SQuAD. """ assert ( start_states is not None or start_positions is not None ), "One of start_states, start_positions should be not None" if start_positions is not None: slen, hsz = hidden_states.shape[-2:] start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz) start_states = hidden_states.gather(-2, start_positions) # shape (bsz, 1, hsz) start_states = start_states.expand(-1, slen, -1) # shape (bsz, slen, hsz) x = self.dense_0(torch.cat([hidden_states, start_states], dim=-1)) x = self.activation(x) x = self.LayerNorm(x) x = self.dense_1(x).squeeze(-1) if p_mask is not None: if get_parameter_dtype(self) == torch.float16: x = x * (1 - p_mask) - 65500 * p_mask else: x = x * (1 - p_mask) - 1e30 * p_mask return x
class_definition
260,719
263,574
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
null
232
class PoolerAnswerClass(nn.Module): """ Compute SQuAD 2.0 answer class from classification and start tokens hidden states. Args: config ([`PretrainedConfig`]): The config used by the model, will be used to grab the `hidden_size` of the model. """ def __init__(self, config): super().__init__() self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size) self.activation = nn.Tanh() self.dense_1 = nn.Linear(config.hidden_size, 1, bias=False) def forward( self, hidden_states: torch.FloatTensor, start_states: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, cls_index: Optional[torch.LongTensor] = None, ) -> torch.FloatTensor: """ Args: hidden_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`): The final hidden states of the model. start_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`, *optional*): The hidden states of the first tokens for the labeled span. start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): The position of the first token for the labeled span. cls_index (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Position of the CLS token for each sentence in the batch. If `None`, takes the last token. <Tip> One of `start_states` or `start_positions` should be not `None`. If both are set, `start_positions` overrides `start_states`. </Tip> Returns: `torch.FloatTensor`: The SQuAD 2.0 answer class. """ # No dependency on end_feature so that we can obtain one single `cls_logits` for each sample. hsz = hidden_states.shape[-1] assert ( start_states is not None or start_positions is not None ), "One of start_states, start_positions should be not None" if start_positions is not None: start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz) start_states = hidden_states.gather(-2, start_positions).squeeze(-2) # shape (bsz, hsz) if cls_index is not None: cls_index = cls_index[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz) cls_token_state = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, hsz) else: cls_token_state = hidden_states[:, -1, :] # shape (bsz, hsz) x = self.dense_0(torch.cat([start_states, cls_token_state], dim=-1)) x = self.activation(x) x = self.dense_1(x).squeeze(-1) return x
class_definition
263,577
266,361
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
null
233
class SquadHeadOutput(ModelOutput): """ Base class for outputs of question answering models using a [`~modeling_utils.SQuADHead`]. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned if both `start_positions` and `end_positions` are provided): Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses. start_top_log_probs (`torch.FloatTensor` of shape `(batch_size, config.start_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided): Log probabilities for the top config.start_n_top start token possibilities (beam-search). start_top_index (`torch.LongTensor` of shape `(batch_size, config.start_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided): Indices for the top config.start_n_top start token possibilities (beam-search). end_top_log_probs (`torch.FloatTensor` of shape `(batch_size, config.start_n_top * config.end_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided): Log probabilities for the top `config.start_n_top * config.end_n_top` end token possibilities (beam-search). end_top_index (`torch.LongTensor` of shape `(batch_size, config.start_n_top * config.end_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided): Indices for the top `config.start_n_top * config.end_n_top` end token possibilities (beam-search). cls_logits (`torch.FloatTensor` of shape `(batch_size,)`, *optional*, returned if `start_positions` or `end_positions` is not provided): Log probabilities for the `is_impossible` label of the answers. """ loss: Optional[torch.FloatTensor] = None start_top_log_probs: Optional[torch.FloatTensor] = None start_top_index: Optional[torch.LongTensor] = None end_top_log_probs: Optional[torch.FloatTensor] = None end_top_index: Optional[torch.LongTensor] = None cls_logits: Optional[torch.FloatTensor] = None
class_definition
266,375
268,500
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
null
234
class SQuADHead(nn.Module): r""" A SQuAD head inspired by XLNet. Args: config ([`PretrainedConfig`]): The config used by the model, will be used to grab the `hidden_size` of the model and the `layer_norm_eps` to use. """ def __init__(self, config): super().__init__() self.start_n_top = config.start_n_top self.end_n_top = config.end_n_top self.start_logits = PoolerStartLogits(config) self.end_logits = PoolerEndLogits(config) self.answer_class = PoolerAnswerClass(config) @replace_return_docstrings(output_type=SquadHeadOutput, config_class=PretrainedConfig) def forward( self, hidden_states: torch.FloatTensor, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, cls_index: Optional[torch.LongTensor] = None, is_impossible: Optional[torch.LongTensor] = None, p_mask: Optional[torch.FloatTensor] = None, return_dict: bool = False, ) -> Union[SquadHeadOutput, Tuple[torch.FloatTensor]]: """ Args: hidden_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`): Final hidden states of the model on the sequence tokens. start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Positions of the first token for the labeled span. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Positions of the last token for the labeled span. cls_index (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Position of the CLS token for each sentence in the batch. If `None`, takes the last token. is_impossible (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Whether the question has a possible answer in the paragraph or not. p_mask (`torch.FloatTensor` of shape `(batch_size, seq_len)`, *optional*): Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token should be masked. return_dict (`bool`, *optional*, defaults to `False`): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. Returns: """ start_logits = self.start_logits(hidden_states, p_mask=p_mask) if start_positions is not None and end_positions is not None: # If we are on multi-GPU, let's remove the dimension added by batch splitting for x in (start_positions, end_positions, cls_index, is_impossible): if x is not None and x.dim() > 1: x.squeeze_(-1) # during training, compute the end logits based on the ground truth of the start position end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask) loss_fct = CrossEntropyLoss() start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if cls_index is not None and is_impossible is not None: # Predict answerability from the representation of CLS and START cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index) loss_fct_cls = nn.BCEWithLogitsLoss() cls_loss = loss_fct_cls(cls_logits, is_impossible) # note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss total_loss += cls_loss * 0.5 return SquadHeadOutput(loss=total_loss) if return_dict else (total_loss,) else: # during inference, compute the end logits based on beam search bsz, slen, hsz = hidden_states.size() start_log_probs = nn.functional.softmax(start_logits, dim=-1) # shape (bsz, slen) start_top_log_probs, start_top_index = torch.topk( start_log_probs, self.start_n_top, dim=-1 ) # shape (bsz, start_n_top) start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) # shape (bsz, start_n_top, hsz) start_states = torch.gather(hidden_states, -2, start_top_index_exp) # shape (bsz, start_n_top, hsz) start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, start_n_top, hsz) hidden_states_expanded = hidden_states.unsqueeze(2).expand_as( start_states ) # shape (bsz, slen, start_n_top, hsz) p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask) end_log_probs = nn.functional.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top) end_top_log_probs, end_top_index = torch.topk( end_log_probs, self.end_n_top, dim=1 ) # shape (bsz, end_n_top, start_n_top) end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top) end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top) start_states = torch.einsum("blh,bl->bh", hidden_states, start_log_probs) cls_logits = self.answer_class(hidden_states, start_states=start_states, cls_index=cls_index) if not return_dict: return (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits) else: return SquadHeadOutput( start_top_log_probs=start_top_log_probs, start_top_index=start_top_index, end_top_log_probs=end_top_log_probs, end_top_index=end_top_index, cls_logits=cls_logits, )
class_definition
268,503
274,587
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
null
235
class SequenceSummary(nn.Module): r""" Compute a single vector summary of a sequence hidden states. Args: config ([`PretrainedConfig`]): The config used by the model. Relevant arguments in the config class of the model are (refer to the actual config class of your model for the default values it uses): - **summary_type** (`str`) -- The method to use to make this summary. Accepted values are: - `"last"` -- Take the last token hidden state (like XLNet) - `"first"` -- Take the first token hidden state (like Bert) - `"mean"` -- Take the mean of all tokens hidden states - `"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2) - `"attn"` -- Not implemented now, use multi-head attention - **summary_use_proj** (`bool`) -- Add a projection after the vector extraction. - **summary_proj_to_labels** (`bool`) -- If `True`, the projection outputs to `config.num_labels` classes (otherwise to `config.hidden_size`). - **summary_activation** (`Optional[str]`) -- Set to `"tanh"` to add a tanh activation to the output, another string or `None` will add no activation. - **summary_first_dropout** (`float`) -- Optional dropout probability before the projection and activation. - **summary_last_dropout** (`float`)-- Optional dropout probability after the projection and activation. """ def __init__(self, config: PretrainedConfig): super().__init__() self.summary_type = getattr(config, "summary_type", "last") if self.summary_type == "attn": # We should use a standard multi-head attention module with absolute positional embedding for that. # Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276 # We can probably just use the multi-head attention module of PyTorch >=1.1.0 raise NotImplementedError self.summary = Identity() if hasattr(config, "summary_use_proj") and config.summary_use_proj: if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0: num_classes = config.num_labels else: num_classes = config.hidden_size self.summary = nn.Linear(config.hidden_size, num_classes) activation_string = getattr(config, "summary_activation", None) self.activation: Callable = get_activation(activation_string) if activation_string else Identity() self.first_dropout = Identity() if hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0: self.first_dropout = nn.Dropout(config.summary_first_dropout) self.last_dropout = Identity() if hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0: self.last_dropout = nn.Dropout(config.summary_last_dropout) def forward( self, hidden_states: torch.FloatTensor, cls_index: Optional[torch.LongTensor] = None ) -> torch.FloatTensor: """ Compute a single vector summary of a sequence hidden states. Args: hidden_states (`torch.FloatTensor` of shape `[batch_size, seq_len, hidden_size]`): The hidden states of the last layer. cls_index (`torch.LongTensor` of shape `[batch_size]` or `[batch_size, ...]` where ... are optional leading dimensions of `hidden_states`, *optional*): Used if `summary_type == "cls_index"` and takes the last token of the sequence as classification token. Returns: `torch.FloatTensor`: The summary of the sequence hidden states. """ if self.summary_type == "last": output = hidden_states[:, -1] elif self.summary_type == "first": output = hidden_states[:, 0] elif self.summary_type == "mean": output = hidden_states.mean(dim=1) elif self.summary_type == "cls_index": if cls_index is None: cls_index = torch.full_like( hidden_states[..., :1, :], hidden_states.shape[-2] - 1, dtype=torch.long, ) else: cls_index = cls_index.unsqueeze(-1).unsqueeze(-1) cls_index = cls_index.expand((-1,) * (cls_index.dim() - 1) + (hidden_states.size(-1),)) # shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states output = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, XX, hidden_size) elif self.summary_type == "attn": raise NotImplementedError output = self.first_dropout(output) output = self.summary(output) output = self.activation(output) output = self.last_dropout(output) return output
class_definition
274,590
279,614
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
null
236
class AffineTransformed(TransformedDistribution): def __init__(self, base_distribution: Distribution, loc=None, scale=None, event_dim=0): self.scale = 1.0 if scale is None else scale self.loc = 0.0 if loc is None else loc super().__init__(base_distribution, [AffineTransform(loc=self.loc, scale=self.scale, event_dim=event_dim)]) @property def mean(self): """ Returns the mean of the distribution. """ return self.base_dist.mean * self.scale + self.loc @property def variance(self): """ Returns the variance of the distribution. """ return self.base_dist.variance * self.scale**2 @property def stddev(self): """ Returns the standard deviation of the distribution. """ return self.variance.sqrt()
class_definition
1,002
1,849
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/time_series_utils.py
null
237
class ParameterProjection(nn.Module): def __init__( self, in_features: int, args_dim: Dict[str, int], domain_map: Callable[..., Tuple[torch.Tensor]], **kwargs ) -> None: super().__init__(**kwargs) self.args_dim = args_dim self.proj = nn.ModuleList([nn.Linear(in_features, dim) for dim in args_dim.values()]) self.domain_map = domain_map def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor]: params_unbounded = [proj(x) for proj in self.proj] return self.domain_map(*params_unbounded)
class_definition
1,852
2,410
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/time_series_utils.py
null
238
class LambdaLayer(nn.Module): def __init__(self, function): super().__init__() self.function = function def forward(self, x, *args): return self.function(x, *args)
class_definition
2,413
2,609
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/time_series_utils.py
null
239
class DistributionOutput: distribution_class: type in_features: int args_dim: Dict[str, int] def __init__(self, dim: int = 1) -> None: self.dim = dim self.args_dim = {k: dim * self.args_dim[k] for k in self.args_dim} def _base_distribution(self, distr_args): if self.dim == 1: return self.distribution_class(*distr_args) else: return Independent(self.distribution_class(*distr_args), 1) def distribution( self, distr_args, loc: Optional[torch.Tensor] = None, scale: Optional[torch.Tensor] = None, ) -> Distribution: distr = self._base_distribution(distr_args) if loc is None and scale is None: return distr else: return AffineTransformed(distr, loc=loc, scale=scale, event_dim=self.event_dim) @property def event_shape(self) -> Tuple: r""" Shape of each individual event contemplated by the distributions that this object constructs. """ return () if self.dim == 1 else (self.dim,) @property def event_dim(self) -> int: r""" Number of event dimensions, i.e., length of the `event_shape` tuple, of the distributions that this object constructs. """ return len(self.event_shape) @property def value_in_support(self) -> float: r""" A float that will have a valid numeric value when computing the log-loss of the corresponding distribution. By default 0.0. This value will be used when padding data series. """ return 0.0 def get_parameter_projection(self, in_features: int) -> nn.Module: r""" Return the parameter projection layer that maps the input to the appropriate parameters of the distribution. """ return ParameterProjection( in_features=in_features, args_dim=self.args_dim, domain_map=LambdaLayer(self.domain_map), ) def domain_map(self, *args: torch.Tensor): r""" Converts arguments to the right shape and domain. The domain depends on the type of distribution, while the correct shape is obtained by reshaping the trailing axis in such a way that the returned tensors define a distribution of the right event_shape. """ raise NotImplementedError() @staticmethod def squareplus(x: torch.Tensor) -> torch.Tensor: r""" Helper to map inputs to the positive orthant by applying the square-plus operation. Reference: https://twitter.com/jon_barron/status/1387167648669048833 """ return (x + torch.sqrt(torch.square(x) + 4.0)) / 2.0
class_definition
2,612
5,334
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/time_series_utils.py
null
240
class StudentTOutput(DistributionOutput): """ Student-T distribution output class. """ args_dim: Dict[str, int] = {"df": 1, "loc": 1, "scale": 1} distribution_class: type = StudentT @classmethod def domain_map(cls, df: torch.Tensor, loc: torch.Tensor, scale: torch.Tensor): scale = cls.squareplus(scale).clamp_min(torch.finfo(scale.dtype).eps) df = 2.0 + cls.squareplus(df) return df.squeeze(-1), loc.squeeze(-1), scale.squeeze(-1)
class_definition
5,337
5,822
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/time_series_utils.py
null
241
class NormalOutput(DistributionOutput): """ Normal distribution output class. """ args_dim: Dict[str, int] = {"loc": 1, "scale": 1} distribution_class: type = Normal @classmethod def domain_map(cls, loc: torch.Tensor, scale: torch.Tensor): scale = cls.squareplus(scale).clamp_min(torch.finfo(scale.dtype).eps) return loc.squeeze(-1), scale.squeeze(-1)
class_definition
5,825
6,222
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/time_series_utils.py
null
242
class NegativeBinomialOutput(DistributionOutput): """ Negative Binomial distribution output class. """ args_dim: Dict[str, int] = {"total_count": 1, "logits": 1} distribution_class: type = NegativeBinomial @classmethod def domain_map(cls, total_count: torch.Tensor, logits: torch.Tensor): total_count = cls.squareplus(total_count) return total_count.squeeze(-1), logits.squeeze(-1) def _base_distribution(self, distr_args) -> Distribution: total_count, logits = distr_args if self.dim == 1: return self.distribution_class(total_count=total_count, logits=logits) else: return Independent(self.distribution_class(total_count=total_count, logits=logits), 1) # Overwrites the parent class method. We cannot scale using the affine # transformation since negative binomial should return integers. Instead # we scale the parameters. def distribution( self, distr_args, loc: Optional[torch.Tensor] = None, scale: Optional[torch.Tensor] = None ) -> Distribution: total_count, logits = distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits))
class_definition
6,225
7,520
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/time_series_utils.py
null
243
class Trainer: """ Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers. Args: model ([`PreTrainedModel`] or `torch.nn.Module`, *optional*): The model to train, evaluate or use for predictions. If not provided, a `model_init` must be passed. <Tip> [`Trainer`] is optimized to work with the [`PreTrainedModel`] provided by the library. You can still use your own models defined as `torch.nn.Module` as long as they work the same way as the 🤗 Transformers models. </Tip> args ([`TrainingArguments`], *optional*): The arguments to tweak for training. Will default to a basic instance of [`TrainingArguments`] with the `output_dir` set to a directory named *tmp_trainer* in the current directory if not provided. data_collator (`DataCollator`, *optional*): The function to use to form a batch from a list of elements of `train_dataset` or `eval_dataset`. Will default to [`default_data_collator`] if no `processing_class` is provided, an instance of [`DataCollatorWithPadding`] otherwise if the processing_class is a feature extractor or tokenizer. train_dataset (Union[`torch.utils.data.Dataset`, `torch.utils.data.IterableDataset`, `datasets.Dataset`], *optional*): The dataset to use for training. If it is a [`~datasets.Dataset`], columns not accepted by the `model.forward()` method are automatically removed. Note that if it's a `torch.utils.data.IterableDataset` with some randomization and you are training in a distributed fashion, your iterable dataset should either use a internal attribute `generator` that is a `torch.Generator` for the randomization that must be identical on all processes (and the Trainer will manually set the seed of this `generator` at each epoch) or have a `set_epoch()` method that internally sets the seed of the RNGs used. eval_dataset (Union[`torch.utils.data.Dataset`, Dict[str, `torch.utils.data.Dataset`, `datasets.Dataset`]), *optional*): The dataset to use for evaluation. If it is a [`~datasets.Dataset`], columns not accepted by the `model.forward()` method are automatically removed. If it is a dictionary, it will evaluate on each dataset prepending the dictionary key to the metric name. processing_class (`PreTrainedTokenizerBase` or `BaseImageProcessor` or `FeatureExtractionMixin` or `ProcessorMixin`, *optional*): Processing class used to process the data. If provided, will be used to automatically process the inputs for the model, and it will be saved along the model to make it easier to rerun an interrupted training or reuse the fine-tuned model. This supercedes the `tokenizer` argument, which is now deprecated. model_init (`Callable[[], PreTrainedModel]`, *optional*): A function that instantiates the model to be used. If provided, each call to [`~Trainer.train`] will start from a new instance of the model as given by this function. The function may have zero argument, or a single one containing the optuna/Ray Tune/SigOpt trial object, to be able to choose different architectures according to hyper parameters (such as layer count, sizes of inner layers, dropout probabilities etc). compute_loss_func (`Callable`, *optional*): A function that accepts the raw model outputs, labels, and the number of items in the entire accumulated batch (batch_size * gradient_accumulation_steps) and returns the loss. For example, see the default [loss function](https://github.com/huggingface/transformers/blob/052e652d6d53c2b26ffde87e039b723949a53493/src/transformers/trainer.py#L3618) used by [`Trainer`]. compute_metrics (`Callable[[EvalPrediction], Dict]`, *optional*): The function that will be used to compute metrics at evaluation. Must take a [`EvalPrediction`] and return a dictionary string to metric values. *Note* When passing TrainingArgs with `batch_eval_metrics` set to `True`, your compute_metrics function must take a boolean `compute_result` argument. This will be triggered after the last eval batch to signal that the function needs to calculate and return the global summary statistics rather than accumulating the batch-level statistics callbacks (List of [`TrainerCallback`], *optional*): A list of callbacks to customize the training loop. Will add those to the list of default callbacks detailed in [here](callback). If you want to remove one of the default callbacks used, use the [`Trainer.remove_callback`] method. optimizers (`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`, *optional*, defaults to `(None, None)`): A tuple containing the optimizer and the scheduler to use. Will default to an instance of [`AdamW`] on your model and a scheduler given by [`get_linear_schedule_with_warmup`] controlled by `args`. optimizer_cls_and_kwargs (`Tuple[Type[torch.optim.Optimizer], Dict[str, Any]]`, *optional*): A tuple containing the optimizer class and keyword arguments to use. Overrides `optim` and `optim_args` in `args`. Incompatible with the `optimizers` argument. Unlike `optimizers`, this argument avoids the need to place model parameters on the correct devices before initializing the Trainer. preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`, *optional*): A function that preprocess the logits right before caching them at each evaluation step. Must take two tensors, the logits and the labels, and return the logits once processed as desired. The modifications made by this function will be reflected in the predictions received by `compute_metrics`. Note that the labels (second parameter) will be `None` if the dataset does not have them. Important attributes: - **model** -- Always points to the core model. If using a transformers model, it will be a [`PreTrainedModel`] subclass. - **model_wrapped** -- Always points to the most external model in case one or more other modules wrap the original model. This is the model that should be used for the forward pass. For example, under `DeepSpeed`, the inner model is wrapped in `DeepSpeed` and then again in `torch.nn.DistributedDataParallel`. If the inner model hasn't been wrapped, then `self.model_wrapped` is the same as `self.model`. - **is_model_parallel** -- Whether or not a model has been switched to a model parallel mode (different from data parallelism, this means some of the model layers are split on different GPUs). - **place_model_on_device** -- Whether or not to automatically place the model on the device - it will be set to `False` if model parallel or deepspeed is used, or if the default `TrainingArguments.place_model_on_device` is overridden to return `False` . - **is_in_train** -- Whether or not a model is currently running `train` (e.g. when `evaluate` is called while in `train`) """ # Those are used as methods of the Trainer in examples. from .trainer_pt_utils import _get_learning_rate, log_metrics, metrics_format, save_metrics, save_state @deprecate_kwarg("tokenizer", new_name="processing_class", version="5.0.0", raise_if_both_names=True) def __init__( self, model: Union[PreTrainedModel, nn.Module] = None, args: TrainingArguments = None, data_collator: Optional[DataCollator] = None, train_dataset: Optional[Union[Dataset, IterableDataset, "datasets.Dataset"]] = None, eval_dataset: Optional[Union[Dataset, Dict[str, Dataset], "datasets.Dataset"]] = None, processing_class: Optional[ Union[PreTrainedTokenizerBase, BaseImageProcessor, FeatureExtractionMixin, ProcessorMixin] ] = None, model_init: Optional[Callable[[], PreTrainedModel]] = None, compute_loss_func: Optional[Callable] = None, compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None, callbacks: Optional[List[TrainerCallback]] = None, optimizers: Tuple[Optional[torch.optim.Optimizer], Optional[torch.optim.lr_scheduler.LambdaLR]] = (None, None), optimizer_cls_and_kwargs: Optional[Tuple[Type[torch.optim.Optimizer], Dict[str, Any]]] = None, preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None, ): if args is None: output_dir = "tmp_trainer" logger.info(f"No `TrainingArguments` passed, using `output_dir={output_dir}`.") args = TrainingArguments(output_dir=output_dir) if args.batch_eval_metrics and compute_metrics is not None: if "compute_result" not in inspect.signature(compute_metrics).parameters.keys(): raise ValueError( "When using `batch_eval_metrics`, your `compute_metrics` function must take a `compute_result`" " boolean argument which will be triggered after the last batch of the eval set to signal that the" " summary statistics should be returned by the function." ) if args.eval_strategy is not None and args.eval_strategy != "no" and eval_dataset is None: raise ValueError( f"You have set `args.eval_strategy` to {args.eval_strategy} but you didn't pass an `eval_dataset` to `Trainer`. Either set `args.eval_strategy` to `no` or pass an `eval_dataset`. " ) if args.save_strategy == SaveStrategy.BEST or args.load_best_model_at_end: if args.metric_for_best_model is None: raise ValueError( "`args.metric_for_best_model` must be provided when using 'best' save_strategy or if `args.load_best_model_at_end` is set to `True`." ) self.args = args self.compute_loss_func = compute_loss_func # Seed must be set before instantiating the model when using model enable_full_determinism(self.args.seed) if self.args.full_determinism else set_seed(self.args.seed) self.hp_name = None self.deepspeed = None self.is_in_train = False self.create_accelerator_and_postprocess() # memory metrics - must set up as early as possible self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics) self._memory_tracker.start() # set the correct log level depending on the node log_level = args.get_process_log_level() logging.set_verbosity(log_level) # force device and distributed setup init explicitly args._setup_devices if model is None: if model_init is not None: self.model_init = model_init model = self.call_model_init() else: raise RuntimeError("`Trainer` requires either a `model` or `model_init` argument") else: if model_init is not None: warnings.warn( "`Trainer` requires either a `model` or `model_init` argument, but not both. `model_init` will" " overwrite your model when calling the `train` method. This will become a fatal error in the next" " release.", FutureWarning, ) self.model_init = model_init if model.__class__.__name__ in MODEL_MAPPING_NAMES: raise ValueError( f"The model you have picked ({model.__class__.__name__}) cannot be used as is for training: it only " "computes hidden states and does not accept any labels. You should choose a model with a head " "suitable for your task like any of the `AutoModelForXxx` listed at " "https://huggingface.co/docs/transformers/model_doc/auto" ) if getattr(model, "is_parallelizable", False) and getattr(model, "model_parallel", False): self.is_model_parallel = True else: self.is_model_parallel = False if getattr(model, "hf_device_map", None) is not None: devices = [device for device in set(model.hf_device_map.values()) if device not in ["cpu", "disk"]] if len(devices) > 1: self.is_model_parallel = True elif len(devices) == 1: self.is_model_parallel = self.args.device != torch.device(devices[0]) else: self.is_model_parallel = False # warn users if self.is_model_parallel: logger.info( "You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set" " to `True` to avoid any unexpected behavior such as device placement mismatching." ) if self.args.use_liger_kernel: if is_liger_kernel_available(): from liger_kernel.transformers import _apply_liger_kernel_to_instance if isinstance(model, PreTrainedModel): # Patch the model with liger kernels. Use the default kernel configurations. _apply_liger_kernel_to_instance(model=model) elif hasattr(model, "get_base_model") and isinstance(model.get_base_model(), PreTrainedModel): # Patch the base model with liger kernels where model is a PeftModel. Use the default kernel configurations. _apply_liger_kernel_to_instance(model=model.get_base_model()) else: logger.warning( "The model is not an instance of PreTrainedModel. No liger kernels will be applied." ) else: raise ImportError( "You have set `use_liger_kernel` to `True` but liger-kernel >= 0.3.0 is not available. " "Please install it with `pip install liger-kernel`" ) _is_quantized_and_base_model = getattr(model, "is_quantized", False) and not getattr( model, "_hf_peft_config_loaded", False ) _quantization_method_supports_training = ( getattr(model, "hf_quantizer", None) is not None and model.hf_quantizer.is_trainable ) _is_model_quantized_and_qat_trainable = getattr(model, "hf_quantizer", None) is not None and getattr( model.hf_quantizer, "is_qat_trainable", False ) # Filter out quantized + compiled models if _is_quantized_and_base_model and hasattr(model, "_orig_mod"): raise ValueError( "You cannot fine-tune quantized model with `torch.compile()` make sure to pass a non-compiled model when fine-tuning a quantized model with PEFT" ) # At this stage the model is already loaded if _is_quantized_and_base_model and not _is_peft_model(model) and not _is_model_quantized_and_qat_trainable: raise ValueError( "You cannot perform fine-tuning on purely quantized models. Please attach trainable adapters on top of" " the quantized model to correctly perform fine-tuning. Please see: https://huggingface.co/docs/transformers/peft" " for more details" ) elif _is_quantized_and_base_model and not _quantization_method_supports_training: raise ValueError( f"The model you are trying to fine-tune is quantized with {model.hf_quantizer.quantization_config.quant_method}" " but that quantization method do not support training. Please open an issue on GitHub: https://github.com/huggingface/transformers" f" to request the support for training support for {model.hf_quantizer.quantization_config.quant_method}" ) self.is_fsdp_xla_enabled = args.fsdp_config["xla"] if len(args.fsdp) > 0: if self.is_deepspeed_enabled: raise ValueError( "Using --fsdp xxx together with --deepspeed is not possible, deactivate one of those flags." ) if not args.fsdp_config["xla"] and args.parallel_mode != ParallelMode.DISTRIBUTED: raise ValueError("Using fsdp only works in distributed training.") # one place to sort out whether to place the model on device or not # postpone switching model to cuda when: # 1. MP - since we are trying to fit a much bigger than 1 gpu model # 2. fp16-enabled DeepSpeed loads the model in half the size and it doesn't need .to() anyway, # and we only use deepspeed for training at the moment # 3. full bf16 or fp16 eval - since the model needs to be cast to the right dtype first # 4. FSDP - same as MP self.place_model_on_device = args.place_model_on_device if ( self.is_model_parallel or self.is_deepspeed_enabled or ((args.fp16_full_eval or args.bf16_full_eval) and not args.do_train) or self.is_fsdp_xla_enabled or self.is_fsdp_enabled ): self.place_model_on_device = False default_collator = ( DataCollatorWithPadding(processing_class) if processing_class is not None and isinstance(processing_class, (PreTrainedTokenizerBase, SequenceFeatureExtractor)) else default_data_collator ) self.data_collator = data_collator if data_collator is not None else default_collator self.train_dataset = train_dataset self.eval_dataset = eval_dataset self.processing_class = processing_class # Bnb Quantized models doesn't support `.to` operation. if ( self.place_model_on_device and not getattr(model, "quantization_method", None) == QuantizationMethod.BITS_AND_BYTES ): self._move_model_to_device(model, args.device) # Force n_gpu to 1 to avoid DataParallel as MP will manage the GPUs if self.is_model_parallel: self.args._n_gpu = 1 # later use `self.model is self.model_wrapped` to check if it's wrapped or not self.model_wrapped = model self.model = model # Just in case the model was wrapped outside of the `Trainer` unwrapped_model = self.accelerator.unwrap_model(model) model_forward = ( unwrapped_model.forward if not _is_peft_model(unwrapped_model) else unwrapped_model.get_base_model().forward ) forward_params = inspect.signature(model_forward).parameters # Check if the model has explicit setup for loss kwargs, # if not, check if `**kwargs` are in model.forward if hasattr(model, "accepts_loss_kwargs"): self.model_accepts_loss_kwargs = model.accepts_loss_kwargs else: self.model_accepts_loss_kwargs = any( k.kind == inspect.Parameter.VAR_KEYWORD for k in forward_params.values() ) self.neftune_noise_alpha = args.neftune_noise_alpha self.compute_metrics = compute_metrics self.preprocess_logits_for_metrics = preprocess_logits_for_metrics self.optimizer, self.lr_scheduler = optimizers self.optimizer_cls_and_kwargs = optimizer_cls_and_kwargs if self.optimizer_cls_and_kwargs is not None and self.optimizer is not None: raise RuntimeError("Passing both `optimizers` and `optimizer_cls_and_kwargs` arguments is incompatible.") if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None): raise RuntimeError( "Passing a `model_init` is incompatible with providing the `optimizers` argument. " "You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method." ) if is_torch_xla_available() and self.optimizer is not None: for param in self.model.parameters(): model_device = param.device break for param_group in self.optimizer.param_groups: if len(param_group["params"]) > 0: optimizer_device = param_group["params"][0].device break if model_device != optimizer_device: raise ValueError( "The model and the optimizer parameters are not on the same device, which probably means you" " created an optimizer around your model **before** putting on the device and passing it to the" " `Trainer`. Make sure the lines `import torch_xla.core.xla_model as xm` and" " `model.to(xm.xla_device())` is performed before the optimizer creation in your script." ) if (self.is_fsdp_xla_enabled or self.is_fsdp_enabled) and ( self.optimizer is not None or self.lr_scheduler is not None ): raise RuntimeError( "Passing `optimizers` is not allowed if PyTorch FSDP is enabled. " "You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method." ) default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to) callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks self.callback_handler = CallbackHandler( callbacks, self.model, self.processing_class, self.optimizer, self.lr_scheduler ) self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK) # Will be set to True by `self._setup_loggers()` on first call to `self.log()`. self._loggers_initialized = False # Create distant repo and output directory if needed self.hub_model_id = None if self.args.push_to_hub: self.init_hf_repo() if self.args.should_save: os.makedirs(self.args.output_dir, exist_ok=True) if not callable(self.data_collator) and callable(getattr(self.data_collator, "collate_batch", None)): raise ValueError("The `data_collator` should be a simple callable (function, class with `__call__`).") if args.max_steps > 0 and args.num_train_epochs > 0: logger.info("max_steps is given, it will override any value given in num_train_epochs") if train_dataset is not None and not has_length(train_dataset) and args.max_steps <= 0: raise ValueError( "The train_dataset does not implement __len__, max_steps has to be specified. " "The number of steps needs to be known in advance for the learning rate scheduler." ) if ( train_dataset is not None and isinstance(train_dataset, torch.utils.data.IterableDataset) and args.group_by_length ): raise ValueError("the `--group_by_length` option is only available for `Dataset`, not `IterableDataset") self._signature_columns = None # Mixed precision setup self.use_apex = False self.use_cpu_amp = False # Mixed precision setup for SageMaker Model Parallel if is_sagemaker_mp_enabled(): # BF16 + model parallelism in SageMaker: currently not supported, raise an error if args.bf16: raise ValueError("SageMaker Model Parallelism does not support BF16 yet. Please use FP16 instead ") if IS_SAGEMAKER_MP_POST_1_10: # When there's mismatch between SMP config and trainer argument, use SMP config as truth if args.fp16 != smp.state.cfg.fp16: logger.warning( f"FP16 provided in SM_HP_MP_PARAMETERS is {smp.state.cfg.fp16}, " f"but FP16 provided in trainer argument is {args.fp16}, " f"setting to {smp.state.cfg.fp16}" ) args.fp16 = smp.state.cfg.fp16 else: # smp < 1.10 does not support fp16 in trainer. if hasattr(smp.state.cfg, "fp16"): logger.warning( f"FP16 provided in SM_HP_MP_PARAMETERS is {smp.state.cfg.fp16}, " "but SageMaker Model Parallelism < 1.10 does not support FP16 in trainer." ) if (args.fp16 or args.bf16) and args.half_precision_backend == "auto": if args.device == torch.device("cpu"): if args.fp16: if not is_torch_greater_or_equal_than_2_3: raise ValueError("Tried to use `fp16` but it is not supported on cpu") else: args.half_precision_backend = "cpu_amp" logger.info(f"Using {args.half_precision_backend} half precision backend") if (args.fp16 or args.bf16) and not (self.is_deepspeed_enabled or is_sagemaker_mp_enabled()): # deepspeed and SageMaker Model Parallel manage their own half precision if args.half_precision_backend == "cpu_amp": self.use_cpu_amp = True self.amp_dtype = torch.bfloat16 elif args.half_precision_backend == "apex": if not is_apex_available(): raise ImportError( "Using FP16 with APEX but APEX is not installed, please refer to" " https://www.github.com/nvidia/apex." ) self.use_apex = True # Label smoothing if self.args.label_smoothing_factor != 0: self.label_smoother = LabelSmoother(epsilon=self.args.label_smoothing_factor) else: self.label_smoother = None self.control = TrainerControl() self.state = TrainerState( is_local_process_zero=self.is_local_process_zero(), is_world_process_zero=self.is_world_process_zero(), stateful_callbacks=[ cb for cb in self.callback_handler.callbacks + [self.control] if isinstance(cb, ExportableState) ], ) # Internal variable to count flos in each process, will be accumulated in `self.state.total_flos` then # returned to 0 every time flos need to be logged self.current_flos = 0 self.hp_search_backend = None default_label_names = find_labels(self.model.__class__) self.label_names = default_label_names if self.args.label_names is None else self.args.label_names self.can_return_loss = can_return_loss(self.model.__class__) self.control = self.callback_handler.on_init_end(self.args, self.state, self.control) # Internal variables to help with automatic batch size reduction self._train_batch_size = args.train_batch_size self._created_lr_scheduler = False # very last self._memory_tracker.stop_and_update_metrics() # torch.compile if args.torch_compile and not is_torch_compile_available(): raise RuntimeError("Using torch.compile requires PyTorch 2.0 or higher.") self.is_fsdp_xla_v2_enabled = args.fsdp_config.get("xla_fsdp_v2", False) if self.is_fsdp_xla_v2_enabled: if not IS_XLA_FSDPV2_POST_2_2: raise ValueError("FSDPv2 requires `torch_xla` 2.2 or higher.") # Prepare the SPMD mesh that is going to be used by the data loader and the FSDPv2 wrapper. # Tensor axis is just a placeholder where it will not be used in FSDPv2. num_devices = xr.global_runtime_device_count() xs.set_global_mesh(xs.Mesh(np.array(range(num_devices)), (num_devices, 1), axis_names=("fsdp", "tensor"))) self.is_fsdp_xla_v1_enabled = self.is_fsdp_xla_enabled and not self.is_fsdp_xla_v2_enabled @property def tokenizer(self) -> Optional[PreTrainedTokenizerBase]: logger.warning("Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.") return self.processing_class @tokenizer.setter def tokenizer(self, processing_class) -> None: logger.warning( "Trainer.tokenizer is now deprecated. You should use `Trainer.processing_class = processing_class` instead." ) self.processing_class = processing_class def _activate_neftune(self, model): r""" Activates the neftune as presented in this code: https://github.com/neelsjain/NEFTune and paper: https://arxiv.org/abs/2310.05914 """ unwrapped_model = self.accelerator.unwrap_model(model) if _is_peft_model(unwrapped_model): embeddings = unwrapped_model.base_model.model.get_input_embeddings() else: embeddings = unwrapped_model.get_input_embeddings() del unwrapped_model embeddings.neftune_noise_alpha = self.neftune_noise_alpha hook_handle = embeddings.register_forward_hook(neftune_post_forward_hook) self.neftune_hook_handle = hook_handle return model def _deactivate_neftune(self, model): """ Deactivates the neftune method. Make sure to call `_activate_neftune` first. """ if not hasattr(self, "neftune_hook_handle"): raise ValueError("Neftune is not activated make sure to call `trainer._activate_neftune()` first") unwrapped_model = self.accelerator.unwrap_model(model) if _is_peft_model(unwrapped_model): embeddings = unwrapped_model.base_model.model.get_input_embeddings() else: embeddings = unwrapped_model.get_input_embeddings() self.neftune_hook_handle.remove() del embeddings.neftune_noise_alpha, unwrapped_model def add_callback(self, callback): """ Add a callback to the current list of [`~transformers.TrainerCallback`]. Args: callback (`type` or [`~transformers.TrainerCallback]`): A [`~transformers.TrainerCallback`] class or an instance of a [`~transformers.TrainerCallback`]. In the first case, will instantiate a member of that class. """ self.callback_handler.add_callback(callback) def pop_callback(self, callback): """ Remove a callback from the current list of [`~transformers.TrainerCallback`] and returns it. If the callback is not found, returns `None` (and no error is raised). Args: callback (`type` or [`~transformers.TrainerCallback]`): A [`~transformers.TrainerCallback`] class or an instance of a [`~transformers.TrainerCallback`]. In the first case, will pop the first member of that class found in the list of callbacks. Returns: [`~transformers.TrainerCallback`]: The callback removed, if found. """ return self.callback_handler.pop_callback(callback) def remove_callback(self, callback): """ Remove a callback from the current list of [`~transformers.TrainerCallback`]. Args: callback (`type` or [`~transformers.TrainerCallback]`): A [`~transformers.TrainerCallback`] class or an instance of a [`~transformers.TrainerCallback`]. In the first case, will remove the first member of that class found in the list of callbacks. """ self.callback_handler.remove_callback(callback) def _move_model_to_device(self, model, device): model = model.to(device) # Moving a model to an XLA device disconnects the tied weights, so we have to retie them. if self.args.parallel_mode == ParallelMode.TPU and hasattr(model, "tie_weights"): model.tie_weights() def _set_signature_columns_if_needed(self): if self._signature_columns is None: # Inspect model forward signature to keep only the arguments it accepts. model_to_inspect = self.model if _is_peft_model(self.model): if hasattr(self.model, "get_base_model"): model_to_inspect = self.model.get_base_model() else: # PeftMixedModel do not provide a `get_base_model` method model_to_inspect = self.model.base_model.model signature = inspect.signature(model_to_inspect.forward) self._signature_columns = list(signature.parameters.keys()) # Labels may be named label or label_ids, the default data collator handles that. self._signature_columns += list(set(["label", "label_ids"] + self.label_names)) def _remove_unused_columns(self, dataset: "datasets.Dataset", description: Optional[str] = None): if not self.args.remove_unused_columns: return dataset self._set_signature_columns_if_needed() signature_columns = self._signature_columns ignored_columns = list(set(dataset.column_names) - set(signature_columns)) if len(ignored_columns) > 0: dset_description = "" if description is None else f"in the {description} set" logger.info( f"The following columns {dset_description} don't have a corresponding argument in " f"`{self.model.__class__.__name__}.forward` and have been ignored: {', '.join(ignored_columns)}." f" If {', '.join(ignored_columns)} are not expected by `{self.model.__class__.__name__}.forward`, " " you can safely ignore this message." ) columns = [k for k in signature_columns if k in dataset.column_names] if len(columns) == 0: raise ValueError( "No columns in the dataset match the model's forward method signature. " f"The following columns have been ignored: [{', '.join(ignored_columns)}]. " "Please check the dataset and model. You may need to set `remove_unused_columns=False` in `TrainingArguments`." ) if version.parse(datasets.__version__) < version.parse("1.4.0"): dataset.set_format( type=dataset.format["type"], columns=columns, format_kwargs=dataset.format["format_kwargs"] ) return dataset else: return dataset.remove_columns(ignored_columns) def _get_collator_with_removed_columns( self, data_collator: Callable, description: Optional[str] = None ) -> Callable: """Wrap the data collator in a callable removing unused columns.""" if not self.args.remove_unused_columns: return data_collator self._set_signature_columns_if_needed() signature_columns = self._signature_columns remove_columns_collator = RemoveColumnsCollator( data_collator=data_collator, signature_columns=signature_columns, logger=logger, description=description, model_name=self.model.__class__.__name__, ) return remove_columns_collator def _get_train_sampler(self) -> Optional[torch.utils.data.Sampler]: if self.train_dataset is None or not has_length(self.train_dataset): return None # Build the sampler. if self.args.group_by_length: if is_datasets_available() and isinstance(self.train_dataset, datasets.Dataset): lengths = ( self.train_dataset[self.args.length_column_name] if self.args.length_column_name in self.train_dataset.column_names else None ) else: lengths = None model_input_name = ( self.processing_class.model_input_names[0] if self.processing_class is not None else None ) return LengthGroupedSampler( self.args.train_batch_size * self.args.gradient_accumulation_steps, dataset=self.train_dataset, lengths=lengths, model_input_name=model_input_name, ) else: return RandomSampler(self.train_dataset) def get_train_dataloader(self) -> DataLoader: """ Returns the training [`~torch.utils.data.DataLoader`]. Will use no sampler if `train_dataset` does not implement `__len__`, a random sampler (adapted to distributed training if necessary) otherwise. Subclass and override this method if you want to inject some custom behavior. """ if self.train_dataset is None: raise ValueError("Trainer: training requires a train_dataset.") train_dataset = self.train_dataset data_collator = self.data_collator if is_datasets_available() and isinstance(train_dataset, datasets.Dataset): train_dataset = self._remove_unused_columns(train_dataset, description="training") else: data_collator = self._get_collator_with_removed_columns(data_collator, description="training") dataloader_params = { "batch_size": self._train_batch_size, "collate_fn": data_collator, "num_workers": self.args.dataloader_num_workers, "pin_memory": self.args.dataloader_pin_memory, "persistent_workers": self.args.dataloader_persistent_workers, } if not isinstance(train_dataset, torch.utils.data.IterableDataset): dataloader_params["sampler"] = self._get_train_sampler() dataloader_params["drop_last"] = self.args.dataloader_drop_last dataloader_params["worker_init_fn"] = seed_worker dataloader_params["prefetch_factor"] = self.args.dataloader_prefetch_factor return self.accelerator.prepare(DataLoader(train_dataset, **dataloader_params)) def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.Sampler]: if eval_dataset is None or not has_length(eval_dataset): return None # Build the sampler. # Deprecated code if self.args.use_legacy_prediction_loop: if is_torch_xla_available(): return SequentialDistributedSampler( eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal() ) elif is_sagemaker_mp_enabled(): return SequentialDistributedSampler( eval_dataset, num_replicas=smp.dp_size(), rank=smp.dp_rank(), batch_size=self.args.per_device_eval_batch_size, ) else: return SequentialSampler(eval_dataset) if self.args.group_by_length: if is_datasets_available() and isinstance(eval_dataset, datasets.Dataset): lengths = ( eval_dataset[self.args.length_column_name] if self.args.length_column_name in eval_dataset.column_names else None ) else: lengths = None model_input_name = self.tokenizer.model_input_names[0] if self.tokenizer is not None else None return LengthGroupedSampler( self.args.eval_batch_size, dataset=eval_dataset, lengths=lengths, model_input_name=model_input_name, ) if self.args.world_size <= 1: return SequentialSampler(eval_dataset) else: return None def get_eval_dataloader(self, eval_dataset: Optional[Union[str, Dataset]] = None) -> DataLoader: """ Returns the evaluation [`~torch.utils.data.DataLoader`]. Subclass and override this method if you want to inject some custom behavior. Args: eval_dataset (`str` or `torch.utils.data.Dataset`, *optional*): If a `str`, will use `self.eval_dataset[eval_dataset]` as the evaluation dataset. If a `Dataset`, will override `self.eval_dataset` and must implement `__len__`. If it is a [`~datasets.Dataset`], columns not accepted by the `model.forward()` method are automatically removed. """ if eval_dataset is None and self.eval_dataset is None: raise ValueError("Trainer: evaluation requires an eval_dataset.") # If we have persistent workers, don't do a fork bomb especially as eval datasets # don't change during training dataloader_key = eval_dataset if isinstance(eval_dataset, str) else "eval" if ( hasattr(self, "_eval_dataloaders") and dataloader_key in self._eval_dataloaders and self.args.dataloader_persistent_workers ): return self.accelerator.prepare(self._eval_dataloaders[dataloader_key]) eval_dataset = ( self.eval_dataset[eval_dataset] if isinstance(eval_dataset, str) else eval_dataset if eval_dataset is not None else self.eval_dataset ) data_collator = self.data_collator if is_datasets_available() and isinstance(eval_dataset, datasets.Dataset): eval_dataset = self._remove_unused_columns(eval_dataset, description="evaluation") else: data_collator = self._get_collator_with_removed_columns(data_collator, description="evaluation") dataloader_params = { "batch_size": self.args.eval_batch_size, "collate_fn": data_collator, "num_workers": self.args.dataloader_num_workers, "pin_memory": self.args.dataloader_pin_memory, "persistent_workers": self.args.dataloader_persistent_workers, } if not isinstance(eval_dataset, torch.utils.data.IterableDataset): dataloader_params["sampler"] = self._get_eval_sampler(eval_dataset) dataloader_params["drop_last"] = self.args.dataloader_drop_last dataloader_params["prefetch_factor"] = self.args.dataloader_prefetch_factor # accelerator.free_memory() will destroy the references, so # we need to store the non-prepared version eval_dataloader = DataLoader(eval_dataset, **dataloader_params) if self.args.dataloader_persistent_workers: if hasattr(self, "_eval_dataloaders"): self._eval_dataloaders[dataloader_key] = eval_dataloader else: self._eval_dataloaders = {dataloader_key: eval_dataloader} return self.accelerator.prepare(eval_dataloader) def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader: """ Returns the test [`~torch.utils.data.DataLoader`]. Subclass and override this method if you want to inject some custom behavior. Args: test_dataset (`torch.utils.data.Dataset`, *optional*): The test dataset to use. If it is a [`~datasets.Dataset`], columns not accepted by the `model.forward()` method are automatically removed. It must implement `__len__`. """ data_collator = self.data_collator if is_datasets_available() and isinstance(test_dataset, datasets.Dataset): test_dataset = self._remove_unused_columns(test_dataset, description="test") else: data_collator = self._get_collator_with_removed_columns(data_collator, description="test") dataloader_params = { "batch_size": self.args.eval_batch_size, "collate_fn": data_collator, "num_workers": self.args.dataloader_num_workers, "pin_memory": self.args.dataloader_pin_memory, "persistent_workers": self.args.dataloader_persistent_workers, } if not isinstance(test_dataset, torch.utils.data.IterableDataset): dataloader_params["sampler"] = self._get_eval_sampler(test_dataset) dataloader_params["drop_last"] = self.args.dataloader_drop_last dataloader_params["prefetch_factor"] = self.args.dataloader_prefetch_factor # We use the same batch_size as for eval. return self.accelerator.prepare(DataLoader(test_dataset, **dataloader_params)) def create_optimizer_and_scheduler(self, num_training_steps: int): """ Setup the optimizer and the learning rate scheduler. We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the Trainer's init through `optimizers`, or subclass and override this method (or `create_optimizer` and/or `create_scheduler`) in a subclass. """ self.create_optimizer() if IS_SAGEMAKER_MP_POST_1_10 and smp.state.cfg.fp16: # If smp >= 1.10 and fp16 is enabled, we unwrap the optimizer optimizer = self.optimizer.optimizer else: optimizer = self.optimizer self.create_scheduler(num_training_steps=num_training_steps, optimizer=optimizer) def get_decay_parameter_names(self, model) -> List[str]: """ Get all parameter names that weight decay will be applied to Note that some models implement their own layernorm instead of calling nn.LayerNorm, weight decay could still apply to those modules since this function only filter out instance of nn.LayerNorm """ decay_parameters = get_parameter_names(model, ALL_LAYERNORM_LAYERS) decay_parameters = [name for name in decay_parameters if "bias" not in name] return decay_parameters def create_optimizer(self): """ Setup the optimizer. We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the Trainer's init through `optimizers`, or subclass and override this method in a subclass. """ opt_model = self.model_wrapped if is_sagemaker_mp_enabled() else self.model if self.optimizer is None: decay_parameters = self.get_decay_parameter_names(opt_model) optimizer_grouped_parameters = [ { "params": [ p for n, p in opt_model.named_parameters() if (n in decay_parameters and p.requires_grad) ], "weight_decay": self.args.weight_decay, }, { "params": [ p for n, p in opt_model.named_parameters() if (n not in decay_parameters and p.requires_grad) ], "weight_decay": 0.0, }, ] if self.optimizer_cls_and_kwargs is not None: optimizer_cls, optimizer_kwargs = self.optimizer_cls_and_kwargs else: optimizer_cls, optimizer_kwargs = self.get_optimizer_cls_and_kwargs(self.args, opt_model) # Overwrite `params` in case it's created by `get_optimizer_cls_and_kwargs` # e.g. for GaLore optimizer. if "params" in optimizer_kwargs: optimizer_grouped_parameters = optimizer_kwargs.pop("params") # Overwrite `model` in case it's created by `get_optimizer_cls_and_kwargs` # e.g. for LOMO optimizer. if "model" in optimizer_kwargs: optimizer_grouped_parameters = optimizer_kwargs.pop("model") # For layer-wise dummy optimizers we overwrite optimizer_grouped_parameters with `optimizer_dict` # to avoid arguments conflicts. if "optimizer_dict" in optimizer_kwargs: optimizer_grouped_parameters = optimizer_kwargs.pop("optimizer_dict") self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs) if optimizer_cls.__name__ == "Adam8bit": import bitsandbytes manager = bitsandbytes.optim.GlobalOptimManager.get_instance() skipped = 0 for module in opt_model.modules(): if isinstance(module, nn.Embedding): skipped += sum({p.data_ptr(): p.numel() for p in module.parameters()}.values()) logger.info(f"skipped {module}: {skipped/2**20}M params") manager.register_module_override(module, "weight", {"optim_bits": 32}) logger.debug(f"bitsandbytes: will optimize {module} in fp32") logger.info(f"skipped: {skipped/2**20}M params") if is_sagemaker_mp_enabled(): self.optimizer = smp.DistributedOptimizer(self.optimizer) return self.optimizer def get_num_trainable_parameters(self): """ Get the number of trainable parameters. """ return sum(p.numel() for p in self.model.parameters() if p.requires_grad) def get_learning_rates(self): """ Returns the learning rate of each parameter from self.optimizer. """ if self.optimizer is None: raise ValueError("Trainer optimizer is None, please make sure you have setup the optimizer before.") return [group["lr"] for group in self.optimizer.param_groups] def get_optimizer_group(self, param: Optional[Union[str, torch.nn.parameter.Parameter]] = None): """ Returns optimizer group for a parameter if given, else returns all optimizer groups for params. Args: param (`str` or `torch.nn.parameter.Parameter`, *optional*): The parameter for which optimizer group needs to be returned. """ if self.optimizer is None: raise ValueError("Trainer optimizer is None, please make sure you have setup the optimizer before.") if param is not None: for group in self.optimizer.param_groups: if param in group["params"]: return group return [group["params"] for group in self.optimizer.param_groups] @staticmethod def get_optimizer_cls_and_kwargs( args: TrainingArguments, model: Optional[PreTrainedModel] = None ) -> Tuple[Any, Any]: """ Returns the optimizer class and optimizer parameters based on the training arguments. Args: args (`transformers.training_args.TrainingArguments`): The training arguments for the training session. """ # parse args.optim_args optim_args = {} if args.optim_args: for mapping in args.optim_args.replace(" ", "").split(","): key, value = mapping.split("=") optim_args[key] = value optimizer_kwargs = {"lr": args.learning_rate} adam_kwargs = { "betas": (args.adam_beta1, args.adam_beta2), "eps": args.adam_epsilon, } if args.optim == OptimizerNames.ADAFACTOR: optimizer_cls = Adafactor optimizer_kwargs.update({"scale_parameter": False, "relative_step": False}) elif args.optim == OptimizerNames.ADAMW_HF: from .optimization import AdamW optimizer_cls = AdamW optimizer_kwargs.update(adam_kwargs) elif args.optim in [OptimizerNames.ADAMW_TORCH, OptimizerNames.ADAMW_TORCH_FUSED]: from torch.optim import AdamW optimizer_cls = AdamW optimizer_kwargs.update(adam_kwargs) if args.optim == OptimizerNames.ADAMW_TORCH_FUSED: optimizer_kwargs.update({"fused": True}) elif args.optim == OptimizerNames.ADAMW_TORCH_XLA: try: from torch_xla.amp.syncfree import AdamW optimizer_cls = AdamW optimizer_kwargs.update(adam_kwargs) except ImportError: raise ValueError("Trainer failed to import syncfree AdamW from torch_xla.") elif args.optim == OptimizerNames.ADAMW_TORCH_NPU_FUSED: try: from torch_npu.optim import NpuFusedAdamW optimizer_cls = NpuFusedAdamW optimizer_kwargs.update(adam_kwargs) except ImportError: raise ValueError("Trainer failed to import FusedAdamW from torch_npu.") elif args.optim == OptimizerNames.ADAMW_APEX_FUSED: try: from apex.optimizers import FusedAdam optimizer_cls = FusedAdam optimizer_kwargs.update(adam_kwargs) except ImportError: raise ValueError("Trainer tried to instantiate apex FusedAdam but apex is not installed!") elif args.optim in [ OptimizerNames.ADAMW_BNB, OptimizerNames.ADAMW_8BIT, OptimizerNames.PAGED_ADAMW, OptimizerNames.PAGED_ADAMW_8BIT, OptimizerNames.ADEMAMIX, OptimizerNames.ADEMAMIX_8BIT, OptimizerNames.PAGED_ADEMAMIX, OptimizerNames.PAGED_ADEMAMIX_8BIT, OptimizerNames.LION, OptimizerNames.LION_8BIT, OptimizerNames.PAGED_LION, OptimizerNames.PAGED_LION_8BIT, OptimizerNames.RMSPROP_BNB, OptimizerNames.RMSPROP_8BIT, OptimizerNames.RMSPROP_32BIT, ]: try: from bitsandbytes.optim import AdamW, Lion, RMSprop is_paged = False optim_bits = 32 optimizer_cls = None additional_optim_kwargs = adam_kwargs if "paged" in args.optim: is_paged = True if "8bit" in args.optim: optim_bits = 8 if "adam" in args.optim: optimizer_cls = AdamW elif "lion" in args.optim: optimizer_cls = Lion additional_optim_kwargs = {"betas": (args.adam_beta1, args.adam_beta2)} elif "rmsprop" in args.optim: optimizer_cls = RMSprop # Above we pass all `adam_kwargs` to the optimizer, here # we only pass `optim_args` which can be passed by the user. additional_optim_kwargs = optim_args elif "ademamix" in args.optim: if is_bitsandbytes_available() and version.parse( importlib.metadata.version("bitsandbytes") ) < version.parse("0.44.0"): raise ValueError( "The AdEMAMix optimizer is not supported by your current version of `bitsandbytes`. " "Please install `bitsandbytes` >= 0.44.0." ) from bitsandbytes.optim import AdEMAMix optimizer_cls = AdEMAMix additional_optim_kwargs = { "betas": ( float(optim_args.get("beta1", args.adam_beta1)), float(optim_args.get("beta2", args.adam_beta2)), float(optim_args.get("beta3", 0.9999)), ), "alpha": float(optim_args.get("alpha", 5.0)), "eps": float(optim_args.get("eps", args.adam_epsilon)), } if "t_alpha" in optim_args: additional_optim_kwargs["t_alpha"] = int(optim_args["t_alpha"]) if "t_beta3" in optim_args: additional_optim_kwargs["t_beta3"] = int(optim_args["t_beta3"]) bnb_kwargs = {"optim_bits": optim_bits} if "rmsprop" not in args.optim: bnb_kwargs["is_paged"] = is_paged optimizer_kwargs.update(additional_optim_kwargs) optimizer_kwargs.update(bnb_kwargs) except ImportError: raise ValueError("Trainer tried to instantiate bnb optimizer but `bitsandbytes` is not installed!") if is_bitsandbytes_available() and version.parse( importlib.metadata.version("bitsandbytes") ) < version.parse("0.41.1"): logger.warning( "You are using 8-bit optimizers with a version of `bitsandbytes` < 0.41.1. " "It is recommended to update your version as a major bug has been fixed in 8-bit optimizers." ) elif args.optim == OptimizerNames.ADAMW_ANYPRECISION: try: from torchdistx.optimizers import AnyPrecisionAdamW optimizer_cls = AnyPrecisionAdamW optimizer_kwargs.update(adam_kwargs) # TODO Change dtypes back to M=FP32, Var = BF16, Kahan = False once they can be cast together in torchdistx. optimizer_kwargs.update( { "use_kahan_summation": strtobool(optim_args.get("use_kahan_summation", "False")), "momentum_dtype": getattr(torch, optim_args.get("momentum_dtype", "float32")), "variance_dtype": getattr(torch, optim_args.get("variance_dtype", "float32")), "compensation_buffer_dtype": getattr( torch, optim_args.get("compensation_buffer_dtype", "bfloat16") ), } ) except ImportError: raise ValueError("Please install https://github.com/pytorch/torchdistx") elif args.optim == OptimizerNames.SGD: optimizer_cls = torch.optim.SGD elif args.optim == OptimizerNames.ADAGRAD: optimizer_cls = torch.optim.Adagrad elif args.optim == OptimizerNames.RMSPROP: optimizer_cls = torch.optim.RMSprop elif args.optim in [ OptimizerNames.GALORE_ADAMW, OptimizerNames.GALORE_ADAMW_8BIT, OptimizerNames.GALORE_ADAFACTOR, OptimizerNames.GALORE_ADAMW_LAYERWISE, OptimizerNames.GALORE_ADAMW_8BIT_LAYERWISE, OptimizerNames.GALORE_ADAFACTOR_LAYERWISE, ]: if not is_galore_torch_available(): raise ImportError( "You need to install `galore_torch` in order to use GaLore optimizers" " install it with `pip install git+https://github.com/jiaweizzhao/GaLore`" ) from galore_torch import GaLoreAdafactor, GaLoreAdamW, GaLoreAdamW8bit is_layerwise = args.optim.lower().endswith("layerwise") if is_layerwise and args.parallel_mode == ParallelMode.DISTRIBUTED: raise NotImplementedError("Layer-wise GaLore does not support DDP at this time") optimizer_mapping = { OptimizerNames.GALORE_ADAMW: GaLoreAdamW, OptimizerNames.GALORE_ADAMW_8BIT: GaLoreAdamW8bit, OptimizerNames.GALORE_ADAFACTOR: GaLoreAdafactor, OptimizerNames.GALORE_ADAMW_LAYERWISE: GaLoreAdamW, OptimizerNames.GALORE_ADAMW_8BIT_LAYERWISE: GaLoreAdamW8bit, OptimizerNames.GALORE_ADAFACTOR_LAYERWISE: GaLoreAdafactor, } optimizer_cls = optimizer_mapping[args.optim] if args.optim_target_modules is None: raise ValueError( "You need to define a `optim_target_modules` in order to properly use GaLore optimizers" ) if not isinstance(args.optim_target_modules, (list, str)): raise ValueError( f"`optim_target_modules` has to be a list of strings, a string corresponding to a regex, or a specific module or 'all-linear', you passed {args.optim_target_modules}" ) if model is None: raise ValueError("You need to pass a model in order to correctly initialize a GaLore optimizer.") logger.warning( "Activated GaLoRE fine-tuning, depending on your model size and hardware, the training might take a while before starting. Please be patient !" ) all_linear = ( isinstance(args.optim_target_modules, str) and args.optim_target_modules.replace("_", "-") == "all-linear" ) galore_params = [] galore_params_names = [] for module_name, module in model.named_modules(): target_module_exists, is_regex = check_target_module_exists( args.optim_target_modules, module_name, return_is_regex=True ) if not isinstance(module, nn.Linear): # Warn in case we match but it's not a linear layer if target_module_exists and not is_regex: logger.warning( f"{module_name} has been matched but ignored as GaLore only supports linear layers. Please double check your `optim_target_modules`!" ) continue if not target_module_exists and not all_linear: continue galore_params.append(module.weight) galore_params_names.append(module_name + ".weight") if len(galore_params) == 0: raise ValueError( f"None of the target modules were found! ({args.optim_target_modules}). Please make sure to pass a valid `target_modules`." ) non_galore_params = [p for n, p in model.named_parameters() if n not in galore_params_names] galore_optim_kwargs = { "rank": int(optim_args.pop("rank", 128)), "update_proj_gap": int(optim_args.pop("update_proj_gap", 200)), "scale": float(optim_args.pop("scale", 0.25)), "proj_type": optim_args.pop("proj_type", "std"), } # The default args are from the official repository: https://github.com/jiaweizzhao/GaLore param_groups = [ {"params": non_galore_params}, {"params": galore_params, **galore_optim_kwargs}, ] if is_layerwise: # For layer-wise optimizers, the optimization step is done through post accumulation # gradient hooks. The trick is to first attach these hooks to the model parameters then # create a dummy optimizer that will perform no-ops in the Trainer. # See the original implementation or the nice implementation from @hiyouga # here: https://github.com/hiyouga/LLaMA-Factory/commit/8664262cde3919e10eaecbd66e8c5d356856362e#diff-ebe08ab14496dfb9e06075f0fdd36799ef6d1535cc4dd4715b74c4e3e06fe3ba if args.gradient_accumulation_steps != 1: raise ValueError("Layerwise GaLoRE optimizer do not support gradient accumulation !") optimizer_dict = {} for param in non_galore_params: param_groups = [{"params": [param]}] optimizer_dict[param] = optimizer_cls(param_groups, **optimizer_kwargs) for param in galore_params: param_groups = [{"params": [param], **galore_optim_kwargs}] optimizer_dict[param] = optimizer_cls(param_groups, **optimizer_kwargs) def optimizer_hook(param): if param.grad is not None: optimizer_dict[param].step() optimizer_dict[param].zero_grad() for param in model.parameters(): if param.requires_grad: param.register_post_accumulate_grad_hook(optimizer_hook) optimizer_cls = LayerWiseDummyOptimizer optimizer_kwargs.update({"optimizer_dict": optimizer_dict}) optimizer_kwargs.update({"params": param_groups}) if args.optim == OptimizerNames.GALORE_ADAFACTOR: optimizer_kwargs.update({"scale_parameter": False, "relative_step": False}) elif args.optim in [OptimizerNames.LOMO, OptimizerNames.ADALOMO]: if not is_lomo_available(): raise ImportError( "You need to install `lomo_optim` in order to use LOMO optimizers" " install it with `pip install lomo-optim`" ) if not is_accelerate_available("0.30.0"): raise ImportError("You need to have `accelerate>=0.30.0` to be able to use LOMO optimizers") if model is None: raise ValueError("You need to pass a `model` in order to correctly initialize a LOMO optimizer.") from lomo_optim import AdaLomo, Lomo if "ada" in args.optim: optimizer_cls = AdaLomo else: optimizer_cls = Lomo optimizer_kwargs.update({"model": model}) elif args.optim == OptimizerNames.GROKADAMW: if not is_grokadamw_available(): raise ValueError("Please install grokadamw with `pip install grokadamw`") from grokadamw import GrokAdamW optimizer_cls = GrokAdamW optimizer_kwargs.update( { "alpha_init": float(optim_args.get("alpha_init", 0.98)), "lamb": float(optim_args.get("lamb", 2.0)), "gamma": float(optim_args.get("gamma", 0.1)), "grokking_signal_decay_rate": float(optim_args.get("grokking_signal_decay_rate", 0.1)), "gradient_clipping": float(optim_args.get("gradient_clipping", 1.0)), } ) elif args.optim == OptimizerNames.ADAMW_TORCH_4BIT: if not is_torchao_available() or version.parse(importlib.metadata.version("torchao")) < version.parse( "0.4.0" ): raise ImportError( "You need to have `torchao>=0.4.0` in order to use torch 4-bit optimizers." "Install it with `pip install torchao` or follow the instructions here: https://github.com/pytorch/ao" ) if version.parse(importlib.metadata.version("torch")) <= version.parse("2.4"): raise ImportError( "You need to have `torch>2.4` in order to use torch 4-bit optimizers. " "Install it with `pip install --upgrade torch` it is available on pipy. Otherwise, you need to install torch nightly." ) from torchao.prototype.low_bit_optim import AdamW4bit optimizer_cls = AdamW4bit optimizer_kwargs.update(adam_kwargs) elif args.optim in [ OptimizerNames.SCHEDULE_FREE_ADAMW, OptimizerNames.SCHEDULE_FREE_SGD, ]: if not is_schedulefree_available(): raise ImportError( "You need to install `schedulefree` in order to use schedulefree optimizers" " install it with `pip install schedulefree`" ) if not is_accelerate_available("0.30.0"): raise ImportError("You need to have `accelerate>=0.30.0` to be able to use schedulefree optimizers") from schedulefree import AdamWScheduleFree, SGDScheduleFree additional_optim_kwargs = {} if args.optim == OptimizerNames.SCHEDULE_FREE_ADAMW: optimizer_cls = AdamWScheduleFree additional_optim_kwargs = adam_kwargs elif args.optim == OptimizerNames.SCHEDULE_FREE_SGD: optimizer_cls = SGDScheduleFree else: raise ValueError("Invalid schedulefree optimizer") additional_optim_kwargs["weight_decay"] = args.weight_decay additional_optim_kwargs["warmup_steps"] = args.warmup_steps additional_optim_kwargs.update( { "weight_lr_power": float(optim_args.get("weight_lr_power", 2.0)), "r": float(optim_args.get("r", 0.0)), } ) optimizer_kwargs.update(additional_optim_kwargs) else: raise ValueError(f"Trainer cannot instantiate unsupported optimizer: {args.optim}") return optimizer_cls, optimizer_kwargs def create_scheduler(self, num_training_steps: int, optimizer: torch.optim.Optimizer = None): """ Setup the scheduler. The optimizer of the trainer must have been set up either before this method is called or passed as an argument. Args: num_training_steps (int): The number of training steps to do. """ if self.lr_scheduler is None: self.lr_scheduler = get_scheduler( self.args.lr_scheduler_type, optimizer=self.optimizer if optimizer is None else optimizer, num_warmup_steps=self.args.get_warmup_steps(num_training_steps), num_training_steps=num_training_steps, scheduler_specific_kwargs=self.args.lr_scheduler_kwargs, ) self._created_lr_scheduler = True return self.lr_scheduler def num_examples(self, dataloader: DataLoader) -> int: """ Helper to get number of samples in a [`~torch.utils.data.DataLoader`] by accessing its dataset. When dataloader.dataset does not exist or has no length, estimates as best it can """ try: dataset = dataloader.dataset # Special case for IterableDatasetShard, we need to dig deeper if isinstance(dataset, IterableDatasetShard): return len(dataloader.dataset.dataset) return len(dataloader.dataset) except (NameError, AttributeError, TypeError): # no dataset or length, estimate by length of dataloader return len(dataloader) * self.args.per_device_train_batch_size @staticmethod def num_tokens(train_dl: DataLoader, max_steps: Optional[int] = None) -> int: """ Helper to get number of tokens in a [`~torch.utils.data.DataLoader`] by enumerating dataloader. """ train_tokens = 0 try: for batch in train_dl: tokens = batch["input_ids"].numel() if max_steps is not None: return tokens * max_steps train_tokens += tokens except KeyError: logger.warning("Cannot get num_tokens from dataloader") return train_tokens def _hp_search_setup(self, trial: Union["optuna.Trial", Dict[str, Any]]): """HP search setup code""" self._trial = trial if self.hp_search_backend is None or trial is None: return if self.hp_search_backend == HPSearchBackend.OPTUNA: params = self.hp_space(trial) elif self.hp_search_backend == HPSearchBackend.RAY: params = trial params.pop("wandb", None) elif self.hp_search_backend == HPSearchBackend.SIGOPT: params = {k: int(v) if isinstance(v, str) else v for k, v in trial.assignments.items()} elif self.hp_search_backend == HPSearchBackend.WANDB: params = trial for key, value in params.items(): if not hasattr(self.args, key): logger.warning( f"Trying to set {key} in the hyperparameter search but there is no corresponding field in" " `TrainingArguments`." ) continue old_attr = getattr(self.args, key, None) # Casting value to the proper type if old_attr is not None: value = type(old_attr)(value) setattr(self.args, key, value) if self.hp_search_backend == HPSearchBackend.OPTUNA: logger.info(f"Trial: {trial.params}") if self.hp_search_backend == HPSearchBackend.SIGOPT: logger.info(f"SigOpt Assignments: {trial.assignments}") if self.hp_search_backend == HPSearchBackend.WANDB: logger.info(f"W&B Sweep parameters: {trial}") if self.is_deepspeed_enabled: if self.args.deepspeed is None: raise ValueError("For sweeps with deepspeed, `args.deepspeed` must be set") self.accelerator.free_memory() # Rebuild the deepspeed config to reflect the updated training parameters from accelerate.utils import DeepSpeedPlugin from transformers.integrations.deepspeed import HfTrainerDeepSpeedConfig self.args.hf_deepspeed_config = HfTrainerDeepSpeedConfig(self.args.deepspeed) self.args.hf_deepspeed_config.trainer_config_process(self.args) self.args.deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=self.args.hf_deepspeed_config) # From 1.0 on, we need to fully wipe the DS plugin when doing sweeps. # Simply calling `_reset_state` is enough and doesn't need a version pin. AcceleratorState()._reset_state() self.create_accelerator_and_postprocess() def _report_to_hp_search(self, trial: Union["optuna.Trial", Dict[str, Any]], step: int, metrics: Dict[str, float]): if self.hp_search_backend is None or trial is None: return metrics = metrics.copy() self.objective = self.compute_objective(metrics) if self.hp_search_backend == HPSearchBackend.OPTUNA: import optuna if hasattr(trial, "study") and not trial.study._is_multi_objective(): trial.report(self.objective, step) if trial.should_prune(): self.callback_handler.on_train_end(self.args, self.state, self.control) raise optuna.TrialPruned() elif self.hp_search_backend == HPSearchBackend.RAY: import ray.train with tempfile.TemporaryDirectory() as temp_checkpoint_dir: checkpoint = None if self.control.should_save: self._tune_save_checkpoint(checkpoint_dir=temp_checkpoint_dir) checkpoint = ray.train.Checkpoint.from_directory(temp_checkpoint_dir) metrics["objective"] = self.objective ray.train.report(metrics, checkpoint=checkpoint) def _tune_save_checkpoint(self, checkpoint_dir: str): output_dir = os.path.join(checkpoint_dir, f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}") self.save_model(output_dir, _internal_call=True) if self.args.should_save: # Update the `TrainerControl` state to where we are currently self.state.stateful_callbacks["TrainerControl"] = self.control.state() self.state.save_to_json(os.path.join(output_dir, TRAINER_STATE_NAME)) torch.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME)) torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME)) def call_model_init(self, trial=None): model_init_argcount = number_of_arguments(self.model_init) if model_init_argcount == 0: model = self.model_init() elif model_init_argcount == 1: model = self.model_init(trial) else: raise RuntimeError("model_init should have 0 or 1 argument.") if model is None: raise RuntimeError("model_init should not return None.") return model def torch_jit_model_eval(self, model, dataloader, training=False): if not training: if dataloader is None: logger.warning("failed to use PyTorch jit mode due to current dataloader is none.") return model example_batch = next(iter(dataloader)) example_batch = self._prepare_inputs(example_batch) try: jit_model = copy.copy(model) jit_model.eval() original_forward = jit_model.__dict__.pop("_original_forward", None) # remove mixed precision hooks from the model if original_forward: jit_model.forward = original_forward autocast_handler = AutocastKwargs(cache_enabled=False) with self.accelerator.autocast(autocast_handler=autocast_handler), torch.no_grad(): if version.parse(version.parse(torch.__version__).base_version) >= version.parse("2.0.0"): if isinstance(example_batch, dict): jit_model = torch.jit.trace(jit_model, example_kwarg_inputs=example_batch, strict=False) else: jit_model = torch.jit.trace( jit_model, example_kwarg_inputs={key: example_batch[key] for key in example_batch}, strict=False, ) else: jit_inputs = [] for key in example_batch: example_tensor = torch.ones_like(example_batch[key]) jit_inputs.append(example_tensor) jit_inputs = tuple(jit_inputs) jit_model = torch.jit.trace(jit_model, jit_inputs, strict=False) jit_model = torch.jit.freeze(jit_model) with torch.no_grad(): jit_model(**example_batch) jit_model(**example_batch) model = jit_model self.use_cpu_amp = False except (RuntimeError, TypeError, ValueError, NameError, IndexError) as e: logger.warning(f"failed to use PyTorch jit mode due to: {e}.") return model def ipex_optimize_model(self, model, training=False, dtype=torch.float32): if not is_ipex_available(): raise ImportError( "Using IPEX but IPEX is not installed or IPEX's version does not match current PyTorch, please refer" " to https://github.com/intel/intel-extension-for-pytorch." ) import intel_extension_for_pytorch as ipex if not training: model.eval() dtype = torch.bfloat16 if not self.is_in_train and self.args.bf16_full_eval else dtype # conv_bn_folding is disabled as it fails in symbolic tracing, resulting in ipex warnings model = ipex.optimize(model, dtype=dtype, level="O1", conv_bn_folding=False, inplace=not self.is_in_train) else: if not model.training: model.train() model, self.optimizer = ipex.optimize( model, dtype=dtype, optimizer=self.optimizer, inplace=True, level="O1" ) return model def compare_trainer_and_checkpoint_args(self, training_args, trainer_state): attributes_map = { "logging_steps": "logging_steps", "eval_steps": "eval_steps", "save_steps": "save_steps", } has_warning = False warning_str = "Warning: The following arguments do not match the ones in the `trainer_state.json` within the checkpoint directory: " for arg_attr, state_attr in attributes_map.items(): arg_value = getattr(training_args, arg_attr, None) state_value = getattr(trainer_state, state_attr, None) if arg_value is not None and state_value is not None and arg_value != state_value: warning_str += f"\n\t{arg_attr}: {arg_value} (from args) != {state_value} (from trainer_state.json)" has_warning = True # train bs is special as we need to account for multi-GPU train_bs_args = training_args.per_device_train_batch_size train_bs_state = trainer_state.train_batch_size // max(1, training_args.n_gpu) if train_bs_args != train_bs_state: warning_str += f"\n\tper_device_train_batch_size: {train_bs_args} (from args) != {train_bs_state} (from trainer_state.json)" has_warning = True if has_warning: logger.warning_once(warning_str) def _wrap_model(self, model, training=True, dataloader=None): if self.args.use_ipex: dtype = torch.bfloat16 if self.use_cpu_amp else torch.float32 model = self.ipex_optimize_model(model, training, dtype=dtype) if is_sagemaker_mp_enabled(): # Wrapping the base model twice in a DistributedModel will raise an error. if isinstance(self.model_wrapped, smp.model.DistributedModel): return self.model_wrapped return smp.DistributedModel(model, backward_passes_per_step=self.args.gradient_accumulation_steps) # train/eval could be run multiple-times - if already wrapped, don't re-wrap it again if self.accelerator.unwrap_model(model) is not model: return model # Mixed precision training with apex (torch < 1.6) if self.use_apex and training: model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level) # Multi-gpu training (should be after apex fp16 initialization) / 8bit models does not support DDP if self.args.n_gpu > 1 and not getattr(model, "is_loaded_in_8bit", False): model = nn.DataParallel(model) if self.args.jit_mode_eval: start_time = time.time() model = self.torch_jit_model_eval(model, dataloader, training) self.jit_compilation_time = round(time.time() - start_time, 4) # Note: in torch.distributed mode, there's no point in wrapping the model # inside a DistributedDataParallel as we'll be under `no_grad` anyways. if not training: return model # Distributed training (should be after apex fp16 initialization) # Distributed training using PyTorch FSDP if self.is_fsdp_xla_enabled: try: from torch_xla.distributed.fsdp import XlaFullyShardedDataParallel as FSDP from torch_xla.distributed.fsdp import checkpoint_module from torch_xla.distributed.fsdp.wrap import ( size_based_auto_wrap_policy, transformer_auto_wrap_policy, ) if self.is_fsdp_xla_v2_enabled: from torch_xla.experimental.spmd_fully_sharded_data_parallel import ( SpmdFullyShardedDataParallel as FSDPv2, ) except ImportError: raise ImportError("Missing XLA FSDP related module; please make sure to use torch-xla >= 2.0.") auto_wrap_policy = None auto_wrapper_callable = None default_transformer_cls_names_to_wrap = getattr(model, "_no_split_modules", None) fsdp_transformer_layer_cls_to_wrap = self.args.fsdp_config.get( "transformer_layer_cls_to_wrap", default_transformer_cls_names_to_wrap ) if self.args.fsdp_config["min_num_params"] > 0: auto_wrap_policy = functools.partial( size_based_auto_wrap_policy, min_num_params=self.args.fsdp_config["min_num_params"] ) elif fsdp_transformer_layer_cls_to_wrap is not None: transformer_cls_to_wrap = set() for layer_class in fsdp_transformer_layer_cls_to_wrap: transformer_cls = get_module_class_from_name(model, layer_class) if transformer_cls is None: raise Exception("Could not find the transformer layer class to wrap in the model.") else: transformer_cls_to_wrap.add(transformer_cls) auto_wrap_policy = functools.partial( transformer_auto_wrap_policy, # Transformer layer class to wrap transformer_layer_cls=transformer_cls_to_wrap, ) fsdp_kwargs = self.args.xla_fsdp_config if self.args.fsdp_config["xla_fsdp_grad_ckpt"]: if model.config.use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`." ) model.config.use_cache = False # Apply gradient checkpointing to auto-wrapped sub-modules if specified def auto_wrapper_callable(m, *args, **kwargs): target_cls = FSDP if not self.is_fsdp_xla_v2_enabled else FSDPv2 return target_cls(checkpoint_module(m), *args, **kwargs) # Wrap the base model with an outer FSDP wrapper if self.is_fsdp_xla_v2_enabled: def shard_output(output, mesh): from .modeling_outputs import CausalLMOutputWithPast real_output = None if isinstance(output, torch.Tensor): real_output = output elif isinstance(output, tuple): real_output = output[0] elif isinstance(output, CausalLMOutputWithPast): real_output = output.logits if real_output is None: raise ValueError("Something went wrong, the output of the model shouldn't be `None`") xs.mark_sharding(real_output, mesh, ("fsdp", None, None)) self.model = model = FSDPv2( model, shard_output=shard_output, auto_wrap_policy=auto_wrap_policy, auto_wrapper_callable=auto_wrapper_callable, ) else: self.model = model = FSDP( model, auto_wrap_policy=auto_wrap_policy, auto_wrapper_callable=auto_wrapper_callable, **fsdp_kwargs, ) # Patch `xm.optimizer_step` should not reduce gradients in this case, # as FSDP does not need gradient reduction over sharded parameters. def patched_optimizer_step(optimizer, barrier=False, optimizer_args={}): loss = optimizer.step(**optimizer_args) if barrier: xm.mark_step() return loss xm.optimizer_step = patched_optimizer_step elif is_sagemaker_dp_enabled(): model = nn.parallel.DistributedDataParallel( model, device_ids=[int(os.getenv("SMDATAPARALLEL_LOCAL_RANK"))] ) elif self.args.parallel_mode == ParallelMode.DISTRIBUTED: if is_torch_neuroncore_available(): return model kwargs = {} if self.args.ddp_find_unused_parameters is not None: kwargs["find_unused_parameters"] = self.args.ddp_find_unused_parameters elif isinstance(model, PreTrainedModel): # find_unused_parameters breaks checkpointing as per # https://github.com/huggingface/transformers/pull/4659#issuecomment-643356021 kwargs["find_unused_parameters"] = not model.is_gradient_checkpointing else: kwargs["find_unused_parameters"] = True if self.args.ddp_bucket_cap_mb is not None: kwargs["bucket_cap_mb"] = self.args.ddp_bucket_cap_mb if self.args.ddp_broadcast_buffers is not None: kwargs["broadcast_buffers"] = self.args.ddp_broadcast_buffers self.accelerator.ddp_handler = DistributedDataParallelKwargs(**kwargs) return model def train( self, resume_from_checkpoint: Optional[Union[str, bool]] = None, trial: Union["optuna.Trial", Dict[str, Any]] = None, ignore_keys_for_eval: Optional[List[str]] = None, **kwargs, ): """ Main training entry point. Args: resume_from_checkpoint (`str` or `bool`, *optional*): If a `str`, local path to a saved checkpoint as saved by a previous instance of [`Trainer`]. If a `bool` and equals `True`, load the last checkpoint in *args.output_dir* as saved by a previous instance of [`Trainer`]. If present, training will resume from the model/optimizer/scheduler states loaded here. trial (`optuna.Trial` or `Dict[str, Any]`, *optional*): The trial run or the hyperparameter dictionary for hyperparameter search. ignore_keys_for_eval (`List[str]`, *optional*) A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions for evaluation during the training. kwargs (`Dict[str, Any]`, *optional*): Additional keyword arguments used to hide deprecated arguments """ if resume_from_checkpoint is False: resume_from_checkpoint = None # memory metrics - must set up as early as possible self._memory_tracker.start() args = self.args self.is_in_train = True # Attach NEFTune hooks if necessary if self.neftune_noise_alpha is not None: self.model = self._activate_neftune(self.model) # do_train is not a reliable argument, as it might not be set and .train() still called, so # the following is a workaround: if (args.fp16_full_eval or args.bf16_full_eval) and not args.do_train and not self.is_model_parallel: self._move_model_to_device(self.model, args.device) if "model_path" in kwargs: resume_from_checkpoint = kwargs.pop("model_path") warnings.warn( "`model_path` is deprecated and will be removed in a future version. Use `resume_from_checkpoint` " "instead.", FutureWarning, ) if len(kwargs) > 0: raise TypeError(f"train() got unexpected keyword arguments: {', '.join(list(kwargs.keys()))}.") # This might change the seed so needs to run first. self._hp_search_setup(trial) self._train_batch_size = self.args.train_batch_size # Model re-init model_reloaded = False if self.model_init is not None: # Seed must be set before instantiating the model when using model_init. enable_full_determinism(self.args.seed) if self.args.full_determinism else set_seed(self.args.seed) self.model = self.call_model_init(trial) model_reloaded = True # Reinitializes optimizer and scheduler self.optimizer, self.lr_scheduler = None, None # Load potential model checkpoint if isinstance(resume_from_checkpoint, bool) and resume_from_checkpoint: resume_from_checkpoint = get_last_checkpoint(args.output_dir) if resume_from_checkpoint is None: raise ValueError(f"No valid checkpoint found in output directory ({args.output_dir})") if resume_from_checkpoint is not None: if not is_sagemaker_mp_enabled() and not self.is_deepspeed_enabled and not self.is_fsdp_enabled: self._load_from_checkpoint(resume_from_checkpoint) # In case of repeating the find_executable_batch_size, set `self._train_batch_size` properly state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME)) if state.train_batch_size is not None: self._train_batch_size = state.train_batch_size # If model was re-initialized, put it on the right device and update self.model_wrapped if model_reloaded: if self.place_model_on_device: self._move_model_to_device(self.model, args.device) self.model_wrapped = self.model inner_training_loop = find_executable_batch_size( self._inner_training_loop, self._train_batch_size, args.auto_find_batch_size ) if args.push_to_hub: try: # Disable progress bars when uploading models during checkpoints to avoid polluting stdout hf_hub_utils.disable_progress_bars() return inner_training_loop( args=args, resume_from_checkpoint=resume_from_checkpoint, trial=trial, ignore_keys_for_eval=ignore_keys_for_eval, ) finally: hf_hub_utils.enable_progress_bars() else: return inner_training_loop( args=args, resume_from_checkpoint=resume_from_checkpoint, trial=trial, ignore_keys_for_eval=ignore_keys_for_eval, ) def _inner_training_loop( self, batch_size=None, args=None, resume_from_checkpoint=None, trial=None, ignore_keys_for_eval=None ): self.accelerator.free_memory() self._train_batch_size = batch_size if self.args.auto_find_batch_size: if self.state.train_batch_size != self._train_batch_size: from accelerate.utils import release_memory (self.model_wrapped,) = release_memory(self.model_wrapped) self.model_wrapped = self.model # Check for DeepSpeed *after* the intial pass and modify the config if self.is_deepspeed_enabled: # Temporarily unset `self.args.train_batch_size` original_bs = self.args.per_device_train_batch_size self.args.per_device_train_batch_size = self._train_batch_size // max(1, self.args.n_gpu) self.propagate_args_to_deepspeed(True) self.args.per_device_train_batch_size = original_bs self.state.train_batch_size = self._train_batch_size logger.debug(f"Currently training with a batch size of: {self._train_batch_size}") # Data loader and number of training steps train_dataloader = self.get_train_dataloader() if self.is_fsdp_xla_v2_enabled: train_dataloader = tpu_spmd_dataloader(train_dataloader) # Setting up training control variables: # number of training epochs: num_train_epochs # number of training steps per epoch: num_update_steps_per_epoch # total number of training steps to execute: max_steps total_train_batch_size = self._train_batch_size * args.gradient_accumulation_steps * args.world_size len_dataloader = None num_train_tokens = None if has_length(train_dataloader): len_dataloader = len(train_dataloader) num_update_steps_per_epoch = len_dataloader // args.gradient_accumulation_steps num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1) num_examples = self.num_examples(train_dataloader) if args.max_steps > 0: max_steps = args.max_steps num_train_epochs = args.max_steps // num_update_steps_per_epoch + int( args.max_steps % num_update_steps_per_epoch > 0 ) # May be slightly incorrect if the last batch in the training dataloader has a smaller size but it's # the best we can do. num_train_samples = args.max_steps * total_train_batch_size if args.include_tokens_per_second: num_train_tokens = ( self.num_tokens(train_dataloader, args.max_steps) * args.gradient_accumulation_steps ) else: max_steps = math.ceil(args.num_train_epochs * num_update_steps_per_epoch) num_train_epochs = math.ceil(args.num_train_epochs) num_train_samples = self.num_examples(train_dataloader) * args.num_train_epochs if args.include_tokens_per_second: num_train_tokens = self.num_tokens(train_dataloader) * args.num_train_epochs elif args.max_steps > 0: # Rely on max_steps when dataloader does not have a working size max_steps = args.max_steps # Setting a very large number of epochs so we go as many times as necessary over the iterator. num_train_epochs = sys.maxsize num_update_steps_per_epoch = max_steps num_examples = total_train_batch_size * args.max_steps num_train_samples = args.max_steps * total_train_batch_size if args.include_tokens_per_second: num_train_tokens = self.num_tokens(train_dataloader, args.max_steps) * args.gradient_accumulation_steps else: raise ValueError( "args.max_steps must be set to a positive value if dataloader does not have a length, was" f" {args.max_steps}" ) if DebugOption.UNDERFLOW_OVERFLOW in self.args.debug: if self.args.n_gpu > 1: # nn.DataParallel(model) replicates the model, creating new variables and module # references registered here no longer work on other gpus, breaking the module raise ValueError( "Currently --debug underflow_overflow is not supported under DP. Please use DDP" " (torchrun or torch.distributed.launch (deprecated))." ) else: debug_overflow = DebugUnderflowOverflow(self.model) # noqa delay_optimizer_creation = is_sagemaker_mp_enabled() or self.is_fsdp_xla_enabled or self.is_fsdp_enabled # We need to reset the scheduler, as its parameters may be different on subsequent calls if self._created_lr_scheduler: self.lr_scheduler = None self._created_lr_scheduler = False if self.is_deepspeed_enabled: self.optimizer, self.lr_scheduler = deepspeed_init(self, num_training_steps=max_steps) if not delay_optimizer_creation: self.create_optimizer_and_scheduler(num_training_steps=max_steps) self.state = TrainerState( stateful_callbacks=[ cb for cb in self.callback_handler.callbacks + [self.control] if isinstance(cb, ExportableState) ] ) self.state.is_hyper_param_search = trial is not None self.state.train_batch_size = self._train_batch_size # Compute absolute values for logging, eval, and save if given as ratio if args.logging_steps is not None: if args.logging_steps < 1: self.state.logging_steps = math.ceil(max_steps * args.logging_steps) else: self.state.logging_steps = args.logging_steps if args.eval_steps is not None: if args.eval_steps < 1: self.state.eval_steps = math.ceil(max_steps * args.eval_steps) else: self.state.eval_steps = args.eval_steps if args.save_steps is not None: if args.save_steps < 1: self.state.save_steps = math.ceil(max_steps * args.save_steps) else: self.state.save_steps = args.save_steps # Activate gradient checkpointing if needed if args.gradient_checkpointing: self.model.gradient_checkpointing_enable(gradient_checkpointing_kwargs=args.gradient_checkpointing_kwargs) model = self._wrap_model(self.model_wrapped) # as the model is wrapped, don't use `accelerator.prepare` # this is for unhandled cases such as # FSDP-XLA, SageMaker MP/DP, DataParallel, IPEX use_accelerator_prepare = True if model is self.model else False if use_accelerator_prepare and self.is_fsdp_enabled: # In case of auto_find_batch_size=True # Remove FSDP wrapping from sub-models. self.model = unwrap_model(self.model, recursive=True) if delay_optimizer_creation: if use_accelerator_prepare: # configure fsdp plugin for qlora if any self._fsdp_qlora_plugin_updates() if self.accelerator.mixed_precision != "fp8": self.model = self.accelerator.prepare(self.model) self.create_optimizer_and_scheduler(num_training_steps=max_steps) # prepare using `accelerator` prepare if use_accelerator_prepare: self.model.train() if hasattr(self.lr_scheduler, "step"): if self.use_apex: model = self.accelerator.prepare(self.model) else: model, self.optimizer = self.accelerator.prepare(self.model, self.optimizer) else: # to handle cases wherein we pass "DummyScheduler" such as when it is specified in DeepSpeed config. model, self.optimizer, self.lr_scheduler = self.accelerator.prepare( self.model, self.optimizer, self.lr_scheduler ) elif self.args.optim in [OptimizerNames.LOMO, OptimizerNames.ADALOMO]: # In this case we are in DDP + LOMO, which should be supported self.optimizer = self.accelerator.prepare(self.optimizer) if self.is_fsdp_enabled: self.model = self.model_wrapped = model # for the rest of this function `model` is the outside model, whether it was wrapped or not if model is not self.model: self.model_wrapped = model # backward compatibility if self.is_deepspeed_enabled: self.deepspeed = self.model_wrapped # ckpt loading if resume_from_checkpoint is not None: if self.is_deepspeed_enabled: deepspeed_load_checkpoint( self.model_wrapped, resume_from_checkpoint, load_module_strict=not _is_peft_model(self.model) ) elif is_sagemaker_mp_enabled() or self.is_fsdp_enabled: self._load_from_checkpoint(resume_from_checkpoint, self.model_wrapped) # Check if saved optimizer or scheduler states exist self._load_optimizer_and_scheduler(resume_from_checkpoint) # important: at this point: # self.model is the Transformers Model # self.model_wrapped is DDP(Transformers Model), Deepspeed(Transformers Model), # FSDP(Transformers Model), Dynamo Optimized Module(Transformers Model) etc. # Train! logger.info("***** Running training *****") logger.info(f" Num examples = {num_examples:,}") logger.info(f" Num Epochs = {num_train_epochs:,}") logger.info(f" Instantaneous batch size per device = {self.args.per_device_train_batch_size:,}") if self.args.per_device_train_batch_size != self._train_batch_size: logger.info(f" Training with DataParallel so batch size has been adjusted to: {self._train_batch_size:,}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size:,}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {max_steps:,}") logger.info(f" Number of trainable parameters = {get_model_param_count(model, trainable_only=True):,}") self.state.epoch = 0 start_time = time.time() epochs_trained = 0 steps_trained_in_current_epoch = 0 steps_trained_progress_bar = None # Check if continuing training from a checkpoint if resume_from_checkpoint is not None and os.path.isfile( os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME) ): self.state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME)) self.compare_trainer_and_checkpoint_args(self.args, self.state) self._load_callback_state() epochs_trained = int(self.state.global_step // num_update_steps_per_epoch) if not args.ignore_data_skip: steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch) steps_trained_in_current_epoch *= args.gradient_accumulation_steps else: steps_trained_in_current_epoch = 0 logger.info(" Continuing training from checkpoint, will skip to saved global_step") logger.info(f" Continuing training from epoch {epochs_trained}") logger.info(f" Continuing training from global step {self.state.global_step}") if not args.ignore_data_skip: logger.info( f" Will skip the first {epochs_trained} epochs then the first" f" {steps_trained_in_current_epoch} batches in the first epoch." ) # Update the references self.callback_handler.model = self.model self.callback_handler.optimizer = self.optimizer self.callback_handler.lr_scheduler = self.lr_scheduler self.callback_handler.train_dataloader = train_dataloader if self.hp_name is not None and self._trial is not None: # use self._trial because the SigOpt/Optuna hpo only call `_hp_search_setup(trial)` instead of passing trial # parameter to Train when using DDP. self.state.trial_name = self.hp_name(self._trial) if trial is not None: assignments = trial.assignments if self.hp_search_backend == HPSearchBackend.SIGOPT else trial self.state.trial_params = hp_params(assignments) else: self.state.trial_params = None # This should be the same if the state has been saved but in case the training arguments changed, it's safer # to set this after the load. self.state.max_steps = max_steps self.state.num_train_epochs = num_train_epochs self.state.is_local_process_zero = self.is_local_process_zero() self.state.is_world_process_zero = self.is_world_process_zero() # tr_loss is a tensor to avoid synchronization of TPUs through .item() tr_loss = torch.tensor(0.0).to(args.device) # _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses self._total_loss_scalar = 0.0 self._globalstep_last_logged = self.state.global_step model.zero_grad() grad_norm: Optional[float] = None self.control = self.callback_handler.on_train_begin(args, self.state, self.control) if args.eval_on_start: self._evaluate(trial, ignore_keys_for_eval, skip_scheduler=True) for epoch in range(epochs_trained, num_train_epochs): epoch_dataloader = train_dataloader if hasattr(epoch_dataloader, "set_epoch"): epoch_dataloader.set_epoch(epoch) # Reset the past mems state at the beginning of each epoch if necessary. if args.past_index >= 0: self._past = None steps_in_epoch = ( len(epoch_dataloader) if len_dataloader is not None else args.max_steps * args.gradient_accumulation_steps ) self.control = self.callback_handler.on_epoch_begin(args, self.state, self.control) if epoch == epochs_trained and resume_from_checkpoint is not None and steps_trained_in_current_epoch == 0: self._load_rng_state(resume_from_checkpoint) rng_to_sync = False steps_skipped = 0 if steps_trained_in_current_epoch > 0: epoch_dataloader = skip_first_batches(epoch_dataloader, steps_trained_in_current_epoch) steps_skipped = steps_trained_in_current_epoch steps_trained_in_current_epoch = 0 rng_to_sync = True step = -1 epoch_iterator = iter(epoch_dataloader) # We chunkify the epoch iterator into gradient accumulation steps `n` batches remainder = num_examples % args.gradient_accumulation_steps if remainder == 0: remainder = args.gradient_accumulation_steps update_step = -1 total_updates = steps_in_epoch // args.gradient_accumulation_steps + 1 for _ in range(total_updates): update_step += 1 num_batches = args.gradient_accumulation_steps if update_step != (total_updates - 1) else remainder batch_samples, num_items_in_batch = self.get_batch_samples(epoch_iterator, num_batches) for i, inputs in enumerate(batch_samples): step += 1 do_sync_step = (step + 1) % args.gradient_accumulation_steps == 0 or (step + 1) == steps_in_epoch # Since we perform prefetching, we need to manually set sync_gradients if not do_sync_step: self.accelerator.gradient_state._set_sync_gradients(False) else: self.accelerator.gradient_state._set_sync_gradients(True) if self.args.include_num_input_tokens_seen: main_input_name = getattr(self.model, "main_input_name", "input_ids") if main_input_name not in inputs: logger.warning( "Tried to track the number of tokens seen, however the current model is " "not configured properly to know what item is the input. To fix this, add " "a `main_input_name` attribute to the model class you are using." ) else: input_tokens = inputs[main_input_name].numel() input_tokens = torch.tensor(input_tokens, device=self.args.device, dtype=torch.int64) self.state.num_input_tokens_seen += ( self.accelerator.gather(input_tokens).sum().cpu().item() ) if rng_to_sync: self._load_rng_state(resume_from_checkpoint) rng_to_sync = False # Skip past any already trained steps if resuming training if steps_trained_in_current_epoch > 0: steps_trained_in_current_epoch -= 1 if steps_trained_progress_bar is not None: steps_trained_progress_bar.update(1) if steps_trained_in_current_epoch == 0: self._load_rng_state(resume_from_checkpoint) continue elif steps_trained_progress_bar is not None: steps_trained_progress_bar.close() steps_trained_progress_bar = None if step % args.gradient_accumulation_steps == 0: self.control = self.callback_handler.on_step_begin(args, self.state, self.control) # We explicitly want to avoid relying on `accelerator.accumulate` for generation training context = ( functools.partial(self.accelerator.no_sync, model=model) if i != len(batch_samples) - 1 and self.accelerator.distributed_type != DistributedType.DEEPSPEED else contextlib.nullcontext ) with context(): tr_loss_step = self.training_step(model, inputs, num_items_in_batch) if ( args.logging_nan_inf_filter and not is_torch_xla_available() and (torch.isnan(tr_loss_step) or torch.isinf(tr_loss_step)) ): # if loss is nan or inf simply add the average of previous logged losses tr_loss = tr_loss + tr_loss / (1 + self.state.global_step - self._globalstep_last_logged) else: if tr_loss.device != tr_loss_step.device: raise ValueError( f"Calculated loss must be on the original device: {tr_loss.device} but device in use is {tr_loss_step.device}" ) tr_loss = tr_loss + tr_loss_step self.current_flos += float(self.floating_point_ops(inputs)) if do_sync_step: # Since we perform prefetching, we need to manually set sync_gradients to True self.accelerator.gradient_state._set_sync_gradients(True) # Gradient clipping if args.max_grad_norm is not None and args.max_grad_norm > 0: # deepspeed does its own clipping if is_sagemaker_mp_enabled() and args.fp16: _grad_norm = self.optimizer.clip_master_grads(args.max_grad_norm) elif self.use_apex: # Revert to normal clipping otherwise, handling Apex or full precision _grad_norm = nn.utils.clip_grad_norm_( amp.master_params(self.optimizer), args.max_grad_norm, ) else: _grad_norm = self.accelerator.clip_grad_norm_( model.parameters(), args.max_grad_norm, ) if ( is_accelerate_available() and self.accelerator.distributed_type == DistributedType.DEEPSPEED ): grad_norm = model.get_global_grad_norm() # In some cases the grad norm may not return a float if hasattr(grad_norm, "item"): grad_norm = grad_norm.item() else: grad_norm = _grad_norm self.control = self.callback_handler.on_pre_optimizer_step(args, self.state, self.control) self.optimizer.step() self.control = self.callback_handler.on_optimizer_step(args, self.state, self.control) optimizer_was_run = not self.accelerator.optimizer_step_was_skipped if optimizer_was_run: # Delay optimizer scheduling until metrics are generated if not isinstance(self.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau): self.lr_scheduler.step() model.zero_grad() self.state.global_step += 1 self.state.epoch = epoch + (step + 1 + steps_skipped) / steps_in_epoch self.control = self.callback_handler.on_step_end(args, self.state, self.control) self._maybe_log_save_evaluate( tr_loss, grad_norm, model, trial, epoch, ignore_keys_for_eval, start_time ) else: self.control = self.callback_handler.on_substep_end(args, self.state, self.control) # PyTorch/XLA relies on the data loader to insert the mark_step for # each step. Since we are breaking the loop early, we need to manually # insert the mark_step here. if self.control.should_epoch_stop or self.control.should_training_stop: if is_torch_xla_available(): xm.mark_step() break # We also need to break out of the nested loop if self.control.should_epoch_stop or self.control.should_training_stop: if is_torch_xla_available(): xm.mark_step() break if step < 0: logger.warning( "There seems not to be a single sample in your epoch_iterator, stopping training at step" f" {self.state.global_step}! This is expected if you're using an IterableDataset and set" f" num_steps ({max_steps}) higher than the number of available samples." ) self.control.should_training_stop = True self.control = self.callback_handler.on_epoch_end(args, self.state, self.control) self._maybe_log_save_evaluate(tr_loss, grad_norm, model, trial, epoch, ignore_keys_for_eval, start_time) if DebugOption.TPU_METRICS_DEBUG in self.args.debug: if is_torch_xla_available(): # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report()) else: logger.warning( "You enabled PyTorch/XLA debug metrics but you don't have a TPU " "configured. Check your training configuration if this is unexpected." ) if self.control.should_training_stop: break if args.past_index and hasattr(self, "_past"): # Clean the state at the end of training delattr(self, "_past") logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n") if args.load_best_model_at_end and self.state.best_model_checkpoint is not None: # Wait for everyone to get here so we are sure the model has been saved by process 0. if is_torch_xla_available(): xm.rendezvous("load_best_model_at_end") elif args.parallel_mode == ParallelMode.DISTRIBUTED: dist.barrier() elif is_sagemaker_mp_enabled(): smp.barrier() self._load_best_model() # add remaining tr_loss self._total_loss_scalar += tr_loss.item() effective_global_step = max(self.state.global_step, 0.001) # Avoid ZeroDivisionError train_loss = self._total_loss_scalar / effective_global_step metrics = speed_metrics( "train", start_time, num_samples=num_train_samples, num_steps=self.state.max_steps, num_tokens=num_train_tokens, ) self.store_flos() metrics["total_flos"] = self.state.total_flos metrics["train_loss"] = train_loss self.is_in_train = False self._memory_tracker.stop_and_update_metrics(metrics) self.log(metrics) run_dir = self._get_output_dir(trial) checkpoints_sorted = self._sorted_checkpoints(use_mtime=False, output_dir=run_dir) # Delete the last checkpoint when save_total_limit=1 if it's different from the best checkpoint and process allowed to save. if self.args.should_save and self.state.best_model_checkpoint is not None and self.args.save_total_limit == 1: for checkpoint in checkpoints_sorted: if not os.path.samefile(checkpoint, self.state.best_model_checkpoint): logger.info(f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit") shutil.rmtree(checkpoint, ignore_errors=True) self.control = self.callback_handler.on_train_end(args, self.state, self.control) # Wait for the checkpoint to be uploaded. self._finish_current_push() # After training we make sure to retrieve back the original forward pass method # for the embedding layer by removing the forward post hook. if self.neftune_noise_alpha is not None: self._deactivate_neftune(self.model) return TrainOutput(self.state.global_step, train_loss, metrics) def _get_output_dir(self, trial): if self.hp_search_backend is not None and trial is not None: if self.hp_search_backend == HPSearchBackend.OPTUNA: run_id = trial.number elif self.hp_search_backend == HPSearchBackend.RAY: import ray.train run_id = ray.train.get_context().get_trial_id() elif self.hp_search_backend == HPSearchBackend.SIGOPT: run_id = trial.id elif self.hp_search_backend == HPSearchBackend.WANDB: import wandb run_id = wandb.run.id run_name = self.hp_name(trial) if self.hp_name is not None else f"run-{run_id}" run_dir = os.path.join(self.args.output_dir, run_name) else: run_dir = self.args.output_dir return run_dir def _load_from_checkpoint(self, resume_from_checkpoint, model=None): if model is None: model = self.model config_file = os.path.join(resume_from_checkpoint, CONFIG_NAME) adapter_weights_file = os.path.join(resume_from_checkpoint, ADAPTER_WEIGHTS_NAME) adapter_safe_weights_file = os.path.join(resume_from_checkpoint, ADAPTER_SAFE_WEIGHTS_NAME) weights_file = os.path.join(resume_from_checkpoint, WEIGHTS_NAME) weights_index_file = os.path.join(resume_from_checkpoint, WEIGHTS_INDEX_NAME) safe_weights_file = os.path.join(resume_from_checkpoint, SAFE_WEIGHTS_NAME) safe_weights_index_file = os.path.join(resume_from_checkpoint, SAFE_WEIGHTS_INDEX_NAME) is_fsdp_ckpt = os.path.isdir(resume_from_checkpoint) and ( # this checks the FSDP state dict when `SHARDED_STATE_DICT` is used any( FSDP_MODEL_NAME in folder_name for folder_name in os.listdir(resume_from_checkpoint) if os.path.isdir(os.path.join(resume_from_checkpoint, folder_name)) ) # this checks the FSDP state dict when `FULL_STATE_DICT` is used or os.path.isfile(os.path.join(resume_from_checkpoint, f"{FSDP_MODEL_NAME}.bin")) ) # if multiple adapters exist, they get saved in sub directories adapter_subdirs = ( [ folder_name for folder_name in os.listdir(resume_from_checkpoint) if os.path.isdir(os.path.join(resume_from_checkpoint, folder_name)) and ( os.path.isfile(os.path.join(resume_from_checkpoint, folder_name, ADAPTER_WEIGHTS_NAME)) or os.path.isfile(os.path.join(resume_from_checkpoint, folder_name, ADAPTER_SAFE_WEIGHTS_NAME)) ) ] if os.path.isdir(resume_from_checkpoint) else [] ) if is_fsdp_ckpt and not self.is_fsdp_enabled: raise ValueError(f"Checkpoint found at {resume_from_checkpoint} is only supported when using PyTorch FSDP") if not ( any( os.path.isfile(f) for f in [ weights_file, safe_weights_file, weights_index_file, safe_weights_index_file, adapter_weights_file, adapter_safe_weights_file, ] ) or is_fsdp_ckpt or adapter_subdirs ): raise ValueError(f"Can't find a valid checkpoint at {resume_from_checkpoint}") logger.info(f"Loading model from {resume_from_checkpoint}.") if os.path.isfile(config_file): config = PretrainedConfig.from_json_file(config_file) checkpoint_version = config.transformers_version if checkpoint_version is not None and checkpoint_version != __version__: logger.warning( f"You are resuming training from a checkpoint trained with {checkpoint_version} of " f"Transformers but your current version is {__version__}. This is not recommended and could " "yield to errors or unwanted behaviors." ) if os.path.isfile(weights_file) or os.path.isfile(safe_weights_file) or is_fsdp_ckpt: weights_only_kwarg = {"weights_only": True} # If the model is on the GPU, it still works! if is_sagemaker_mp_enabled(): if os.path.isfile(os.path.join(resume_from_checkpoint, "user_content.pt")): # If the 'user_content.pt' file exists, load with the new smp api. # Checkpoint must have been saved with the new smp api. smp.resume_from_checkpoint( path=resume_from_checkpoint, tag=WEIGHTS_NAME, partial=False, load_optimizer=False ) else: # If the 'user_content.pt' file does NOT exist, load with the old smp api. # Checkpoint must have been saved with the old smp api. if hasattr(self.args, "fp16") and self.args.fp16 is True: logger.warning( "Enabling FP16 and loading from smp < 1.10 checkpoint together is not suppported." ) state_dict = torch.load( weights_file, map_location="cpu", **weights_only_kwarg, ) # Required for smp to not auto-translate state_dict from hf to smp (is already smp). state_dict["_smp_is_partial"] = False load_result = model.load_state_dict(state_dict, strict=True) # release memory del state_dict elif self.is_fsdp_enabled: load_fsdp_model( self.accelerator.state.fsdp_plugin, self.accelerator, model, resume_from_checkpoint, **_get_fsdp_ckpt_kwargs(), ) else: # We load the model state dict on the CPU to avoid an OOM error. if self.args.save_safetensors and os.path.isfile(safe_weights_file): state_dict = safetensors.torch.load_file(safe_weights_file, device="cpu") else: state_dict = torch.load( weights_file, map_location="cpu", **weights_only_kwarg, ) # workaround for FSDP bug https://github.com/pytorch/pytorch/issues/82963 # which takes *args instead of **kwargs load_result = model.load_state_dict(state_dict, False) # release memory del state_dict self._issue_warnings_after_load(load_result) # Load adapters following PR # 24096 elif _is_peft_model(model): # If train a model using PEFT & LoRA, assume that adapter have been saved properly. # TODO: in the future support only specific min PEFT versions if (hasattr(model, "active_adapter") or hasattr(model, "active_adapters")) and hasattr( model, "load_adapter" ): if os.path.exists(resume_from_checkpoint): # For BC for older PEFT versions if hasattr(model, "active_adapters"): active_adapters = model.active_adapters if len(active_adapters) > 1: logger.warning("Multiple active adapters detected will only consider the first adapter") active_adapter = active_adapters[0] else: active_adapter = model.active_adapter if adapter_subdirs: for subdir_name in adapter_subdirs: peft_id = os.path.join(resume_from_checkpoint, subdir_name) model.load_adapter(peft_id, subdir_name, is_trainable=(subdir_name == active_adapter)) model.set_adapter(active_adapter) else: model.load_adapter(resume_from_checkpoint, active_adapter, is_trainable=True) else: logger.warning( "The intermediate checkpoints of PEFT may not be saved correctly, " f"consider using a custom callback to save {ADAPTER_WEIGHTS_NAME} in corresponding saving folders. " "Check some examples here: https://github.com/huggingface/peft/issues/96" ) else: logger.warning("Could not load adapter model, make sure to have `peft>=0.3.0` installed") else: # We load the sharded checkpoint load_result = load_sharded_checkpoint( model, resume_from_checkpoint, strict=is_sagemaker_mp_enabled(), prefer_safe=self.args.save_safetensors ) if not is_sagemaker_mp_enabled(): self._issue_warnings_after_load(load_result) def _load_best_model(self): logger.info(f"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric}).") best_model_path = os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME) best_safe_model_path = os.path.join(self.state.best_model_checkpoint, SAFE_WEIGHTS_NAME) best_adapter_model_path = os.path.join(self.state.best_model_checkpoint, ADAPTER_WEIGHTS_NAME) best_safe_adapter_model_path = os.path.join(self.state.best_model_checkpoint, ADAPTER_SAFE_WEIGHTS_NAME) model = self.model_wrapped if is_sagemaker_mp_enabled() else self.model if self.is_deepspeed_enabled: deepspeed_load_checkpoint( self.model_wrapped, self.state.best_model_checkpoint, load_module_strict=not _is_peft_model(self.model), ) elif self.is_fsdp_enabled: load_result = load_fsdp_model( self.accelerator.state.fsdp_plugin, self.accelerator, model, self.state.best_model_checkpoint, **_get_fsdp_ckpt_kwargs(), ) elif ( os.path.exists(best_model_path) or os.path.exists(best_safe_model_path) or os.path.exists(best_adapter_model_path) or os.path.exists(best_safe_adapter_model_path) ): has_been_loaded = True weights_only_kwarg = {"weights_only": True} if is_sagemaker_mp_enabled(): if os.path.isfile(os.path.join(self.state.best_model_checkpoint, "user_content.pt")): # If the 'user_content.pt' file exists, load with the new smp api. # Checkpoint must have been saved with the new smp api. smp.resume_from_checkpoint( path=self.state.best_model_checkpoint, tag=WEIGHTS_NAME, partial=False, load_optimizer=False, ) else: # If the 'user_content.pt' file does NOT exist, load with the old smp api. # Checkpoint must have been saved with the old smp api. if self.args.save_safetensors and os.path.isfile(best_safe_model_path): state_dict = safetensors.torch.load_file(best_safe_model_path, device="cpu") else: state_dict = torch.load( best_model_path, map_location="cpu", **weights_only_kwarg, ) state_dict["_smp_is_partial"] = False load_result = model.load_state_dict(state_dict, strict=True) else: if _is_peft_model(model): # If train a model using PEFT & LoRA, assume that adapter have been saved properly. # TODO: in the future support only specific min PEFT versions if (hasattr(model, "active_adapter") or hasattr(model, "active_adapters")) and hasattr( model, "load_adapter" ): # For BC for older PEFT versions if hasattr(model, "active_adapters"): active_adapter = model.active_adapters[0] if len(model.active_adapters) > 1: logger.warning("Detected multiple active adapters, will only consider the first one") else: active_adapter = model.active_adapter if os.path.exists(best_adapter_model_path) or os.path.exists(best_safe_adapter_model_path): try: model.load_adapter(self.state.best_model_checkpoint, active_adapter) except RuntimeError as exc: if model.peft_config[active_adapter].is_prompt_learning: # for context: https://github.com/huggingface/peft/issues/2256 msg = ( "When using prompt learning PEFT methods such as " f"{model.peft_config[active_adapter].peft_type.value}, setting " "load_best_model_at_end=True can lead to errors, it is recommended " "to set this to False and to load the model manually from the checkpoint " "directory using PeftModel.from_pretrained(base_model, <path>) after training " "has finished." ) raise RuntimeError(msg) from exc else: raise # Load_adapter has no return value present, modify it when appropriate. from torch.nn.modules.module import _IncompatibleKeys load_result = _IncompatibleKeys([], []) else: logger.warning( "The intermediate checkpoints of PEFT may not be saved correctly, " f"consider using a custom callback to save {ADAPTER_WEIGHTS_NAME} in corresponding saving folders. " "Check some examples here: https://github.com/huggingface/peft/issues/96" ) has_been_loaded = False else: logger.warning("Could not load adapter model, make sure to have `peft>=0.3.0` installed") has_been_loaded = False else: # We load the model state dict on the CPU to avoid an OOM error. if self.args.save_safetensors and os.path.isfile(best_safe_model_path): state_dict = safetensors.torch.load_file(best_safe_model_path, device="cpu") else: state_dict = torch.load( best_model_path, map_location="cpu", **weights_only_kwarg, ) # If the model is on the GPU, it still works! # workaround for FSDP bug https://github.com/pytorch/pytorch/issues/82963 # which takes *args instead of **kwargs load_result = model.load_state_dict(state_dict, False) if not is_sagemaker_mp_enabled() and has_been_loaded: self._issue_warnings_after_load(load_result) elif os.path.exists(os.path.join(self.state.best_model_checkpoint, SAFE_WEIGHTS_INDEX_NAME)) or os.path.exists( os.path.join(self.state.best_model_checkpoint, WEIGHTS_INDEX_NAME) ): load_result = load_sharded_checkpoint( model, self.state.best_model_checkpoint, strict=is_sagemaker_mp_enabled() ) if not is_sagemaker_mp_enabled(): self._issue_warnings_after_load(load_result) else: logger.warning( f"Could not locate the best model at {best_model_path}, if you are running a distributed training " "on multiple nodes, you should activate `--save_on_each_node`." ) def _issue_warnings_after_load(self, load_result): if len(load_result.missing_keys) != 0: if self.model._keys_to_ignore_on_save is not None and set(load_result.missing_keys) == set( self.model._keys_to_ignore_on_save ): self.model.tie_weights() else: logger.warning(f"There were missing keys in the checkpoint model loaded: {load_result.missing_keys}.") if len(load_result.unexpected_keys) != 0: logger.warning( f"There were unexpected keys in the checkpoint model loaded: {load_result.unexpected_keys}." ) def _evaluate(self, trial, ignore_keys_for_eval, skip_scheduler=False): metrics = self.evaluate(ignore_keys=ignore_keys_for_eval) self._report_to_hp_search(trial, self.state.global_step, metrics) # Run delayed LR scheduler now that metrics are populated if isinstance(self.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau) and not skip_scheduler: metric_to_check = self.args.metric_for_best_model if not metric_to_check.startswith("eval_"): metric_to_check = f"eval_{metric_to_check}" try: self.lr_scheduler.step(metrics[metric_to_check]) except KeyError as exc: raise KeyError( f"The `metric_for_best_model` training argument is set to '{metric_to_check}', " f"which is not found in the evaluation metrics. " f"The available evaluation metrics are: {list(metrics.keys())}. " f"Please ensure that the `compute_metrics` function returns a dictionary that includes '{metric_to_check}' or " f"consider changing the `metric_for_best_model` via the TrainingArguments." ) from exc return metrics def _maybe_log_save_evaluate(self, tr_loss, grad_norm, model, trial, epoch, ignore_keys_for_eval, start_time): if self.control.should_log and self.state.global_step > self._globalstep_last_logged: if is_torch_xla_available(): xm.mark_step() logs: Dict[str, float] = {} # all_gather + mean() to get average loss over all processes tr_loss_scalar = self._nested_gather(tr_loss).mean().item() # reset tr_loss to zero tr_loss -= tr_loss logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4) if grad_norm is not None: logs["grad_norm"] = grad_norm.detach().item() if isinstance(grad_norm, torch.Tensor) else grad_norm logs["learning_rate"] = self._get_learning_rate() self._total_loss_scalar += tr_loss_scalar self._globalstep_last_logged = self.state.global_step self.store_flos() self.log(logs, start_time) metrics = None if self.control.should_evaluate: metrics = self._evaluate(trial, ignore_keys_for_eval) is_new_best_metric = self._determine_best_metric(metrics=metrics, trial=trial) if self.args.save_strategy == SaveStrategy.BEST: self.control.should_save = is_new_best_metric if self.control.should_save: self._save_checkpoint(model, trial) self.control = self.callback_handler.on_save(self.args, self.state, self.control) def _load_rng_state(self, checkpoint): # Load RNG states from `checkpoint` if checkpoint is None: return if self.args.world_size > 1: process_index = self.args.process_index rng_file = os.path.join(checkpoint, f"rng_state_{process_index}.pth") if not os.path.isfile(rng_file): logger.info( f"Didn't find an RNG file for process {process_index}, if you are resuming a training that " "wasn't launched in a distributed fashion, reproducibility is not guaranteed." ) return else: rng_file = os.path.join(checkpoint, "rng_state.pth") if not os.path.isfile(rng_file): logger.info( "Didn't find an RNG file, if you are resuming a training that was launched in a distributed " "fashion, reproducibility is not guaranteed." ) return with safe_globals(): checkpoint_rng_state = torch.load(rng_file) random.setstate(checkpoint_rng_state["python"]) np.random.set_state(checkpoint_rng_state["numpy"]) torch.random.set_rng_state(checkpoint_rng_state["cpu"]) if torch.cuda.is_available(): if self.args.parallel_mode == ParallelMode.DISTRIBUTED: torch.cuda.random.set_rng_state_all(checkpoint_rng_state["cuda"]) else: try: torch.cuda.random.set_rng_state(checkpoint_rng_state["cuda"]) except Exception as e: logger.info( f"Didn't manage to set back the RNG states of the GPU because of the following error:\n {e}" "\nThis won't yield the same results as if the training had not been interrupted." ) if is_torch_xla_available(): xm.set_rng_state(checkpoint_rng_state["xla"]) if is_torch_npu_available(): if self.args.parallel_mode == ParallelMode.DISTRIBUTED: torch.npu.random.set_rng_state_all(checkpoint_rng_state["npu"]) else: try: torch.npu.random.set_rng_state(checkpoint_rng_state["npu"]) except Exception as e: logger.info( f"Didn't manage to set back the RNG states of the NPU because of the following error:\n {e}" "\nThis won't yield the same results as if the training had not been interrupted." ) if is_torch_mlu_available(): if self.args.parallel_mode == ParallelMode.DISTRIBUTED: torch.mlu.random.set_rng_state_all(checkpoint_rng_state["mlu"]) else: try: torch.mlu.random.set_rng_state(checkpoint_rng_state["mlu"]) except Exception as e: logger.info( f"Didn't manage to set back the RNG states of the MLU because of the following error:\n {e}" "\nThis won't yield the same results as if the training had not been interrupted." ) if is_torch_musa_available(): if self.args.parallel_mode == ParallelMode.DISTRIBUTED: torch.musa.set_rng_state_all(checkpoint_rng_state["musa"]) else: try: torch.musa.set_rng_state(checkpoint_rng_state["musa"]) except Exception as e: logger.info( f"Didn't manage to set back the RNG states of the MUSA because of the following error:\n {e}" "\nThis won't yield the same results as if the training had not been interrupted." ) def _determine_best_metric(self, metrics, trial): """ Determine if the model should be saved based on the evaluation metrics. Returns: bool: True if a new best metric was found, else False """ is_new_best_metric = False if self.args.metric_for_best_model is not None: metric_to_check = self.args.metric_for_best_model if not metric_to_check.startswith("eval_"): metric_to_check = f"eval_{metric_to_check}" try: metric_value = metrics[metric_to_check] except KeyError as exc: raise KeyError( f"The `metric_for_best_model` training argument is set to '{metric_to_check}', which is not found in the evaluation metrics. " f"The available evaluation metrics are: {list(metrics.keys())}. Consider changing the `metric_for_best_model` via the TrainingArguments." ) from exc operator = np.greater if self.args.greater_is_better else np.less if self.state.best_metric is None: self.state.best_metric = float("-inf") if self.args.greater_is_better else float("inf") if operator(metric_value, self.state.best_metric): run_dir = self._get_output_dir(trial=trial) checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}" output_dir = os.path.join(run_dir, checkpoint_folder) self.state.best_metric = metric_value self.state.best_model_checkpoint = output_dir is_new_best_metric = True return is_new_best_metric def _save_checkpoint(self, model, trial): # In all cases, including ddp/dp/deepspeed, self.model is always a reference to the model we # want to save except FullyShardedDDP. # assert unwrap_model(model) is self.model, "internal model should be a reference to self.model" # Save model checkpoint checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}" if self.hp_search_backend is None and trial is None: self.store_flos() run_dir = self._get_output_dir(trial=trial) output_dir = os.path.join(run_dir, checkpoint_folder) self.save_model(output_dir, _internal_call=True) if not self.args.save_only_model: # Save optimizer and scheduler self._save_optimizer_and_scheduler(output_dir) # Save RNG state self._save_rng_state(output_dir) # Save the Trainer state if self.args.should_save: # Update `ExportableState` callbacks and `TrainerControl` state to where we are currently for cb in [ cb for cb in self.callback_handler.callbacks + [self.control] if isinstance(cb, ExportableState) ]: cb_name = cb.__class__.__name__ cb_state = cb.state() if isinstance(self.state.stateful_callbacks[cb_name], list): self.state.stateful_callbacks[cb_name].append(cb_state) else: self.state.stateful_callbacks[cb_name] = cb_state self.state.save_to_json(os.path.join(output_dir, TRAINER_STATE_NAME)) if self.args.push_to_hub: self._push_from_checkpoint(output_dir) # Maybe delete some older checkpoints. if self.args.should_save: # Solely rely on numerical checkpoint id for rotation. # mtime is not reliable especially on some fuse fs in cloud environments. self._rotate_checkpoints(use_mtime=False, output_dir=run_dir) def _save_rng_state(self, output_dir): # Save RNG state in non-distributed training rng_states = { "python": random.getstate(), "numpy": np.random.get_state(), "cpu": torch.random.get_rng_state(), } if torch.cuda.is_available(): if self.args.parallel_mode == ParallelMode.DISTRIBUTED: # In non distributed, we save the global CUDA RNG state (will take care of DataParallel) rng_states["cuda"] = torch.cuda.random.get_rng_state_all() else: rng_states["cuda"] = torch.cuda.random.get_rng_state() if is_torch_xla_available(): rng_states["xla"] = xm.get_rng_state() if is_torch_npu_available(): if self.args.parallel_mode == ParallelMode.DISTRIBUTED: rng_states["npu"] = torch.npu.random.get_rng_state_all() else: rng_states["npu"] = torch.npu.random.get_rng_state() if is_torch_mlu_available(): if self.args.parallel_mode == ParallelMode.DISTRIBUTED: rng_states["mlu"] = torch.mlu.random.get_rng_state_all() else: rng_states["mlu"] = torch.mlu.random.get_rng_state() if is_torch_musa_available(): if self.args.parallel_mode == ParallelMode.DISTRIBUTED: rng_states["musa"] = torch.musa.get_rng_state_all() else: rng_states["musa"] = torch.musa.get_rng_state() # A process can arrive here before the process 0 has a chance to save the model, in which case output_dir may # not yet exist. os.makedirs(output_dir, exist_ok=True) if self.args.world_size <= 1: torch.save(rng_states, os.path.join(output_dir, "rng_state.pth")) else: torch.save(rng_states, os.path.join(output_dir, f"rng_state_{self.args.process_index}.pth")) def _save_optimizer_and_scheduler(self, output_dir): if is_torch_xla_available(): xm.rendezvous("saving_optimizer_states") if self.is_fsdp_xla_v1_enabled: optm = { "optimizer": self.optimizer.state_dict(), "shard_metadata": self.model.get_shard_metadata(), } xm.save( optm, os.path.join( output_dir, f"rank{self.args.process_index}-of-{self.args.world_size}-{OPTIMIZER_NAME}" ), master_only=False, ) else: xm.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME)) with warnings.catch_warnings(record=True) as caught_warnings: xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME)) reissue_pt_warnings(caught_warnings) elif is_sagemaker_mp_enabled(): opt_state_dict = self.optimizer.local_state_dict(gather_if_shard=False) smp.barrier() if smp.rdp_rank() == 0 or smp.state.cfg.shard_optimizer_state: smp.save( opt_state_dict, os.path.join(output_dir, OPTIMIZER_NAME), partial=True, v3=smp.state.cfg.shard_optimizer_state, ) elif self.is_deepspeed_enabled: # under zero3 model file itself doesn't get saved since it's bogus! Unless deepspeed # config `stage3_gather_16bit_weights_on_model_save` is True accept_exclude_frozen_parameters = "exclude_frozen_parameters" in set( inspect.signature(self.model_wrapped.save_checkpoint).parameters.keys() ) if accept_exclude_frozen_parameters and _is_peft_model(self.model): self.model_wrapped.save_checkpoint(output_dir, exclude_frozen_parameters=True) else: self.model_wrapped.save_checkpoint(output_dir) elif self.is_fsdp_enabled: # save fsdp specific ckpt for resuming from ckpt save_fsdp_model( self.accelerator.state.fsdp_plugin, self.accelerator, self.model, output_dir, **_get_fsdp_ckpt_kwargs() ) save_fsdp_optimizer( self.accelerator.state.fsdp_plugin, self.accelerator, self.optimizer, self.model, output_dir ) elif self.args.should_save: # deepspeed.save_checkpoint above saves model/optim/sched torch.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME)) # Save SCHEDULER & SCALER is_deepspeed_custom_scheduler = self.is_deepspeed_enabled and not isinstance( self.lr_scheduler, DeepSpeedSchedulerWrapper ) if ( self.args.should_save and (not self.is_deepspeed_enabled or is_deepspeed_custom_scheduler) and not is_torch_xla_available() ): with warnings.catch_warnings(record=True) as caught_warnings: torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME)) reissue_pt_warnings(caught_warnings) def _load_optimizer_and_scheduler(self, checkpoint): """If optimizer and scheduler states exist, load them.""" if checkpoint is None: return if self.is_deepspeed_enabled: # deepspeed loads optimizer/lr_scheduler together with the model in deepspeed_init if not isinstance(self.lr_scheduler, DeepSpeedSchedulerWrapper): with warnings.catch_warnings(record=True) as caught_warnings: self.lr_scheduler.load_state_dict(torch.load(os.path.join(checkpoint, SCHEDULER_NAME))) reissue_pt_warnings(caught_warnings) return checkpoint_file_exists = ( glob.glob(os.path.join(checkpoint, OPTIMIZER_NAME) + "_*") if is_sagemaker_mp_enabled() else ( os.path.isfile(os.path.join(checkpoint, OPTIMIZER_NAME)) or os.path.isfile(os.path.join(checkpoint, OPTIMIZER_NAME_BIN)) or ( os.path.isdir(checkpoint) and any( OPTIMIZER_NAME_BIN.split(".")[0] in folder_name for folder_name in os.listdir(checkpoint) if os.path.isdir(os.path.join(checkpoint, folder_name)) ) ) ) ) checkpoint_file_exists = ( glob.glob(os.path.join(checkpoint, f"rank*-of-{self.args.world_size}-{OPTIMIZER_NAME}")) if self.is_fsdp_xla_v1_enabled else checkpoint_file_exists ) if checkpoint_file_exists and os.path.isfile(os.path.join(checkpoint, SCHEDULER_NAME)): # Load in optimizer and scheduler states if is_torch_xla_available(): # On TPU we have to take some extra precautions to properly load the states on the right device. if self.is_fsdp_xla_v1_enabled: optimizer_state = torch.load( os.path.join( checkpoint, f"rank{self.args.process_index}-of-{self.args.world_size}-{OPTIMIZER_NAME}" ), map_location="cpu", ) # We only need `optimizer` when resuming from checkpoint optimizer_state = optimizer_state["optimizer"] else: optimizer_state = torch.load(os.path.join(checkpoint, OPTIMIZER_NAME), map_location="cpu") with warnings.catch_warnings(record=True) as caught_warnings: lr_scheduler_state = torch.load(os.path.join(checkpoint, SCHEDULER_NAME), map_location="cpu") reissue_pt_warnings(caught_warnings) xm.send_cpu_data_to_device(optimizer_state, self.args.device) xm.send_cpu_data_to_device(lr_scheduler_state, self.args.device) self.optimizer.load_state_dict(optimizer_state) self.lr_scheduler.load_state_dict(lr_scheduler_state) else: if is_sagemaker_mp_enabled(): if os.path.isfile(os.path.join(checkpoint, "user_content.pt")): # Optimizer checkpoint was saved with smp >= 1.10 def opt_load_hook(mod, opt): opt.load_state_dict(smp.load(os.path.join(checkpoint, OPTIMIZER_NAME), partial=True)) else: # Optimizer checkpoint was saved with smp < 1.10 def opt_load_hook(mod, opt): if IS_SAGEMAKER_MP_POST_1_10: opt.load_state_dict( smp.load(os.path.join(checkpoint, OPTIMIZER_NAME), partial=True, back_compat=True) ) else: opt.load_state_dict(smp.load(os.path.join(checkpoint, OPTIMIZER_NAME), partial=True)) self.model_wrapped.register_post_step_hook(opt_load_hook) else: # We use the CPU when training on one GPU to avoid OOM for GPU RAM when training big models. # In distributed training however, we load directly on each GPU and risk the GPU OOM as it's more # likely to get OOM on CPU (since we load num_gpu times the optimizer state map_location = self.args.device if self.args.world_size > 1 else "cpu" if self.is_fsdp_enabled: load_fsdp_optimizer( self.accelerator.state.fsdp_plugin, self.accelerator, self.optimizer, self.model, checkpoint, **_get_fsdp_ckpt_kwargs(), ) else: self.optimizer.load_state_dict( torch.load(os.path.join(checkpoint, OPTIMIZER_NAME), map_location=map_location) ) with warnings.catch_warnings(record=True) as caught_warnings: self.lr_scheduler.load_state_dict(torch.load(os.path.join(checkpoint, SCHEDULER_NAME))) reissue_pt_warnings(caught_warnings) def _load_callback_state(self): """If callback states exist and were passed in, restore their states if enabled""" if not self.args.restore_callback_states_from_checkpoint: return # Callback states are stored in stateful_callbacks not_found = [] new_callbacks = [] original_callbacks = self.callback_handler.callbacks + [self.control] for stored_callback, data in self.state.stateful_callbacks.items(): if not isinstance(data, list): data = [data] if any(callback.__class__.__name__ == stored_callback for callback in original_callbacks): # We can load/restore from multiple callbacks of the same type. duplicates = [ callback for callback in original_callbacks if callback.__class__.__name__ == stored_callback ] for callback, callback_data in zip(duplicates, data): args = callback_data.get("args", {}) attributes = callback_data.get("attributes", {}) new_callback = type(callback)(**args) for attribute, value in attributes.items(): setattr(new_callback, attribute, value) if isinstance(callback, TrainerControl): # Specifically for restoring the `control` state self.control = new_callback else: new_callbacks.append(new_callback) # We remove the existing callback and add it to the list of new callbacks self.callback_handler.remove_callback(type(new_callback)) logger.info("Continuing training from checkpoint, restoring any callbacks that were passed in") else: not_found.append(stored_callback) if len(not_found) > 0: logger.warning( f"Checkpoint included callbacks not included in current configuration. Ignoring. ({', '.join(not_found)})" ) for callback in new_callbacks: self.callback_handler.add_callback(callback) def hyperparameter_search( self, hp_space: Optional[Callable[["optuna.Trial"], Dict[str, float]]] = None, compute_objective: Optional[Callable[[Dict[str, float]], float]] = None, n_trials: int = 20, direction: Union[str, List[str]] = "minimize", backend: Optional[Union["str", HPSearchBackend]] = None, hp_name: Optional[Callable[["optuna.Trial"], str]] = None, **kwargs, ) -> Union[BestRun, List[BestRun]]: """ Launch an hyperparameter search using `optuna` or `Ray Tune` or `SigOpt`. The optimized quantity is determined by `compute_objective`, which defaults to a function returning the evaluation loss when no metric is provided, the sum of all metrics otherwise. <Tip warning={true}> To use this method, you need to have provided a `model_init` when initializing your [`Trainer`]: we need to reinitialize the model at each new run. This is incompatible with the `optimizers` argument, so you need to subclass [`Trainer`] and override the method [`~Trainer.create_optimizer_and_scheduler`] for custom optimizer/scheduler. </Tip> Args: hp_space (`Callable[["optuna.Trial"], Dict[str, float]]`, *optional*): A function that defines the hyperparameter search space. Will default to [`~trainer_utils.default_hp_space_optuna`] or [`~trainer_utils.default_hp_space_ray`] or [`~trainer_utils.default_hp_space_sigopt`] depending on your backend. compute_objective (`Callable[[Dict[str, float]], float]`, *optional*): A function computing the objective to minimize or maximize from the metrics returned by the `evaluate` method. Will default to [`~trainer_utils.default_compute_objective`]. n_trials (`int`, *optional*, defaults to 100): The number of trial runs to test. direction (`str` or `List[str]`, *optional*, defaults to `"minimize"`): If it's single objective optimization, direction is `str`, can be `"minimize"` or `"maximize"`, you should pick `"minimize"` when optimizing the validation loss, `"maximize"` when optimizing one or several metrics. If it's multi objectives optimization, direction is `List[str]`, can be List of `"minimize"` and `"maximize"`, you should pick `"minimize"` when optimizing the validation loss, `"maximize"` when optimizing one or several metrics. backend (`str` or [`~training_utils.HPSearchBackend`], *optional*): The backend to use for hyperparameter search. Will default to optuna or Ray Tune or SigOpt, depending on which one is installed. If all are installed, will default to optuna. hp_name (`Callable[["optuna.Trial"], str]]`, *optional*): A function that defines the trial/run name. Will default to None. kwargs (`Dict[str, Any]`, *optional*): Additional keyword arguments for each backend: - `optuna`: parameters from [optuna.study.create_study](https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.html) and also the parameters `timeout`, `n_jobs` and `gc_after_trial` from [optuna.study.Study.optimize](https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.Study.html#optuna.study.Study.optimize) - `ray`: parameters from [tune.run](https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run). If `resources_per_trial` is not set in the `kwargs`, it defaults to 1 CPU core and 1 GPU (if available). If `progress_reporter` is not set in the `kwargs`, [ray.tune.CLIReporter](https://docs.ray.io/en/latest/tune/api/doc/ray.tune.CLIReporter.html) is used. - `sigopt`: the parameter `proxies` from [sigopt.Connection.set_proxies](https://docs.sigopt.com/support/faq#how-do-i-use-sigopt-with-a-proxy). Returns: [`trainer_utils.BestRun` or `List[trainer_utils.BestRun]`]: All the information about the best run or best runs for multi-objective optimization. Experiment summary can be found in `run_summary` attribute for Ray backend. """ if backend is None: backend = default_hp_search_backend() backend = HPSearchBackend(backend) backend_obj = ALL_HYPERPARAMETER_SEARCH_BACKENDS[backend]() backend_obj.ensure_available() self.hp_search_backend = backend if self.model_init is None: raise RuntimeError( "To use hyperparameter search, you need to pass your model through a model_init function." ) self.hp_space = backend_obj.default_hp_space if hp_space is None else hp_space self.hp_name = hp_name self.compute_objective = default_compute_objective if compute_objective is None else compute_objective best_run = backend_obj.run(self, n_trials, direction, **kwargs) self.hp_search_backend = None return best_run def log(self, logs: Dict[str, float], start_time: Optional[float] = None) -> None: """ Log `logs` on the various objects watching training. Subclass and override this method to inject custom behavior. Args: logs (`Dict[str, float]`): The values to log. start_time (`Optional[float]`): The start of training. """ if self.state.epoch is not None: logs["epoch"] = self.state.epoch if self.args.include_num_input_tokens_seen: logs["num_input_tokens_seen"] = self.state.num_input_tokens_seen if start_time is not None: speed_metrics("train", start_time, num_tokens=self.state.num_input_tokens_seen) output = {**logs, **{"step": self.state.global_step}} self.state.log_history.append(output) self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs) def _prepare_input(self, data: Union[torch.Tensor, Any]) -> Union[torch.Tensor, Any]: """ Prepares one `data` before feeding it to the model, be it a tensor or a nested list/dictionary of tensors. """ if isinstance(data, Mapping): return type(data)({k: self._prepare_input(v) for k, v in data.items()}) elif isinstance(data, (tuple, list)): return type(data)(self._prepare_input(v) for v in data) elif isinstance(data, torch.Tensor): kwargs = {"device": self.args.device} if self.is_deepspeed_enabled and (torch.is_floating_point(data) or torch.is_complex(data)): # NLP models inputs are int/uint and those get adjusted to the right dtype of the # embedding. Other models such as wav2vec2's inputs are already float and thus # may need special handling to match the dtypes of the model kwargs.update({"dtype": self.accelerator.state.deepspeed_plugin.hf_ds_config.dtype()}) return data.to(**kwargs) return data def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]: """ Prepare `inputs` before feeding them to the model, converting them to tensors if they are not already and handling potential state. """ inputs = self._prepare_input(inputs) if len(inputs) == 0: raise ValueError( "The batch received was empty, your model won't be able to train on it. Double-check that your " f"training dataset contains keys expected by the model: {','.join(self._signature_columns)}." ) if self.args.past_index >= 0 and self._past is not None: inputs["mems"] = self._past return inputs def compute_loss_context_manager(self): """ A helper wrapper to group together context managers. """ return self.autocast_smart_context_manager() def autocast_smart_context_manager(self, cache_enabled: Optional[bool] = True): """ A helper wrapper that creates an appropriate context manager for `autocast` while feeding it the desired arguments, depending on the situation. """ if self.use_cpu_amp: ctx_manager = torch.cpu.amp.autocast(cache_enabled=cache_enabled, dtype=self.amp_dtype) else: ctx_manager = contextlib.nullcontext() return ctx_manager def training_step( self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]], num_items_in_batch=None ) -> torch.Tensor: """ Perform a training step on a batch of inputs. Subclass and override to inject custom behavior. Args: model (`nn.Module`): The model to train. inputs (`Dict[str, Union[torch.Tensor, Any]]`): The inputs and targets of the model. The dictionary will be unpacked before being fed to the model. Most models expect the targets under the argument `labels`. Check your model's documentation for all accepted arguments. Return: `torch.Tensor`: The tensor with training loss on this batch. """ model.train() if hasattr(self.optimizer, "train") and callable(self.optimizer.train): self.optimizer.train() inputs = self._prepare_inputs(inputs) if is_sagemaker_mp_enabled(): loss_mb = smp_forward_backward(model, inputs, self.args.gradient_accumulation_steps) return loss_mb.reduce_mean().detach().to(self.args.device) with self.compute_loss_context_manager(): loss = self.compute_loss(model, inputs, num_items_in_batch=num_items_in_batch) del inputs if ( self.args.torch_empty_cache_steps is not None and self.state.global_step % self.args.torch_empty_cache_steps == 0 ): if is_torch_xpu_available(): torch.xpu.empty_cache() elif is_torch_mlu_available(): torch.mlu.empty_cache() elif is_torch_musa_available(): torch.musa.empty_cache() elif is_torch_npu_available(): torch.npu.empty_cache() elif is_torch_mps_available(min_version="2.0"): torch.mps.empty_cache() else: torch.cuda.empty_cache() kwargs = {} # For LOMO optimizers you need to explicitly use the learnign rate if self.args.optim in [OptimizerNames.LOMO, OptimizerNames.ADALOMO]: kwargs["learning_rate"] = self._get_learning_rate() if self.args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel training if self.use_apex: with amp.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() else: # Finally we need to normalize the loss for reporting if not self.model_accepts_loss_kwargs and self.compute_loss_func is None: loss = loss / self.args.gradient_accumulation_steps self.accelerator.backward(loss, **kwargs) return loss.detach() def compute_loss(self, model, inputs, return_outputs=False, num_items_in_batch=None): """ How the loss is computed by Trainer. By default, all models return the loss in the first element. Subclass and override for custom behavior. """ if (self.label_smoother is not None or self.compute_loss_func is not None) and "labels" in inputs: labels = inputs.pop("labels") else: labels = None if self.model_accepts_loss_kwargs: loss_kwargs = {} if num_items_in_batch is not None: loss_kwargs["num_items_in_batch"] = num_items_in_batch inputs = {**inputs, **loss_kwargs} outputs = model(**inputs) # Save past state if it exists # TODO: this needs to be fixed and made cleaner later. if self.args.past_index >= 0: self._past = outputs[self.args.past_index] if labels is not None: unwrapped_model = self.accelerator.unwrap_model(model) if _is_peft_model(unwrapped_model): model_name = unwrapped_model.base_model.model._get_name() else: model_name = unwrapped_model._get_name() # User-defined compute_loss function if self.compute_loss_func is not None: loss = self.compute_loss_func(outputs, labels, num_items_in_batch=num_items_in_batch) elif model_name in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.values(): loss = self.label_smoother(outputs, labels, shift_labels=True) else: loss = self.label_smoother(outputs, labels) else: if isinstance(outputs, dict) and "loss" not in outputs: raise ValueError( "The model did not return a loss from the inputs, only the following keys: " f"{','.join(outputs.keys())}. For reference, the inputs it received are {','.join(inputs.keys())}." ) # We don't use .loss here since the model may return tuples instead of ModelOutput. loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0] if self.args.average_tokens_across_devices and self.model_accepts_loss_kwargs: loss *= self.accelerator.num_processes return (loss, outputs) if return_outputs else loss def is_local_process_zero(self) -> bool: """ Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several machines) main process. """ return self.args.local_process_index == 0 def is_world_process_zero(self) -> bool: """ Whether or not this process is the global main process (when training in a distributed fashion on several machines, this is only going to be `True` for one process). """ # Special case for SageMaker ModelParallel since there process_index is dp_process_index, not the global # process index. if is_sagemaker_mp_enabled(): return smp.rank() == 0 else: return self.args.process_index == 0 def save_model(self, output_dir: Optional[str] = None, _internal_call: bool = False): """ Will save the model, so you can reload it using `from_pretrained()`. Will only save from the main process. """ if output_dir is None: output_dir = self.args.output_dir if is_torch_xla_available(): self._save_tpu(output_dir) elif is_sagemaker_mp_enabled(): # Calling the state_dict needs to be done on the wrapped model and on all processes. os.makedirs(output_dir, exist_ok=True) state_dict = self.model_wrapped.state_dict() if self.args.should_save: self._save(output_dir, state_dict=state_dict) if IS_SAGEMAKER_MP_POST_1_10: # 'user_content.pt' indicates model state_dict saved with smp >= 1.10 Path(os.path.join(output_dir, "user_content.pt")).touch() elif self.is_fsdp_enabled: if ("FULL_STATE_DICT" in str(self.accelerator.state.fsdp_plugin.state_dict_type)) and ( version.parse(accelerate_version) > version.parse("0.24.1") ): state_dict = self.accelerator.get_state_dict(self.model) if self.args.should_save: self._save(output_dir, state_dict=state_dict) elif self.is_deepspeed_enabled: try: state_dict = self.accelerator.get_state_dict(self.deepspeed) if self.args.should_save: self._save(output_dir, state_dict=state_dict) except ValueError: logger.warning( " stage3_gather_16bit_weights_on_model_save=false. Saving the full checkpoint instead, use" " zero_to_fp32.py to recover weights" ) if self.args.should_save: self._save(output_dir, state_dict={}) # remove the dummy state_dict remove_dummy_checkpoint(self.args.should_save, output_dir, [WEIGHTS_NAME, SAFE_WEIGHTS_NAME]) self.model_wrapped.save_checkpoint(output_dir) elif self.args.should_save: self._save(output_dir) # Push to the Hub when `save_model` is called by the user. if self.args.push_to_hub and not _internal_call: self.push_to_hub(commit_message="Model save") def _save_tpu(self, output_dir: Optional[str] = None): output_dir = output_dir if output_dir is not None else self.args.output_dir logger.info(f"Saving model checkpoint to {output_dir}") model = self.model xm.mark_step() if xm.is_master_ordinal(local=False): os.makedirs(output_dir, exist_ok=True) torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME)) # Save a trained model and configuration using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` supported_classes = (PushToHubMixin,) xm.rendezvous("saving_checkpoint") if self.is_fsdp_xla_v1_enabled: ckpt = { "model": model.state_dict(), "shard_metadata": model.get_shard_metadata(), } ckpt_path = os.path.join( output_dir, f"rank{self.args.process_index}-of-{self.args.world_size}-{WEIGHTS_NAME}" ) # All ranks save sharded checkpoint xm.save(ckpt, ckpt_path, master_only=False) # Make sure all ranks have saved checkpoints xm.rendezvous("save_full_checkpoints") # Master save full checkpoint if self.args.should_save: from torch_xla.distributed.fsdp import consolidate_sharded_model_checkpoints full_state_dict, _ = consolidate_sharded_model_checkpoints( ckpt_prefix=os.path.join(output_dir, ""), ckpt_suffix=f"rank*-of-*-{WEIGHTS_NAME}", save_model=False, ) model = model.module.module unwrapped_model = self.accelerator.unwrap_model(model) if isinstance(unwrapped_model, supported_classes): unwrapped_model.save_pretrained( output_dir, state_dict=full_state_dict, save_function=xm.save, safe_serialization=self.args.save_safetensors, ) else: logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.") xm.save(full_state_dict, os.path.join(output_dir, WEIGHTS_NAME)) elif not isinstance(model, supported_classes): if isinstance(self.accelerator.unwrap_model(model), supported_classes): self.accelerator.unwrap_model(model).save_pretrained( output_dir, is_main_process=self.args.should_save, state_dict=xm._maybe_convert_to_cpu(model.state_dict()), save_function=xm.save, safe_serialization=self.args.save_safetensors, ) else: logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.") state_dict = xm._maybe_convert_to_cpu(model.state_dict()) xm.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME)) else: model.save_pretrained( output_dir, is_main_process=self.args.should_save, save_function=xm.save, safe_serialization=self.args.save_safetensors, state_dict=xm._maybe_convert_to_cpu(model.state_dict()), ) if self.processing_class is not None and self.args.should_save: self.processing_class.save_pretrained(output_dir) def _save(self, output_dir: Optional[str] = None, state_dict=None): # If we are executing this function, we are the process zero, so we don't check for that. output_dir = output_dir if output_dir is not None else self.args.output_dir os.makedirs(output_dir, exist_ok=True) logger.info(f"Saving model checkpoint to {output_dir}") supported_classes = (PreTrainedModel,) if not is_peft_available() else (PreTrainedModel, PeftModel) # Save a trained model and configuration using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` if not isinstance(self.model, supported_classes): if state_dict is None: state_dict = self.model.state_dict() if isinstance(self.accelerator.unwrap_model(self.model), supported_classes): self.accelerator.unwrap_model(self.model).save_pretrained( output_dir, state_dict=state_dict, safe_serialization=self.args.save_safetensors ) else: logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.") if self.args.save_safetensors: safetensors.torch.save_file( state_dict, os.path.join(output_dir, SAFE_WEIGHTS_NAME), metadata={"format": "pt"} ) else: torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME)) else: self.model.save_pretrained( output_dir, state_dict=state_dict, safe_serialization=self.args.save_safetensors ) if self.processing_class is not None: self.processing_class.save_pretrained(output_dir) # Good practice: save your training arguments together with the trained model torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME)) def store_flos(self): # Storing the number of floating-point operations that went into the model if self.args.parallel_mode == ParallelMode.DISTRIBUTED: self.state.total_flos += ( distributed_broadcast_scalars([self.current_flos], device=self.args.device).sum().item() ) self.current_flos = 0 else: self.state.total_flos += self.current_flos self.current_flos = 0 def _sorted_checkpoints( self, output_dir=None, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False ) -> List[str]: ordering_and_checkpoint_path = [] glob_checkpoints = [str(x) for x in Path(output_dir).glob(f"{checkpoint_prefix}-*") if os.path.isdir(x)] for path in glob_checkpoints: if use_mtime: ordering_and_checkpoint_path.append((os.path.getmtime(path), path)) else: regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path) if regex_match is not None and regex_match.groups() is not None: ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path)) checkpoints_sorted = sorted(ordering_and_checkpoint_path) checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted] # Make sure we don't delete the best model. if ( self.state.best_model_checkpoint is not None and str(Path(self.state.best_model_checkpoint)) in checkpoints_sorted ): best_model_index = checkpoints_sorted.index(str(Path(self.state.best_model_checkpoint))) for i in range(best_model_index, len(checkpoints_sorted) - 2): checkpoints_sorted[i], checkpoints_sorted[i + 1] = checkpoints_sorted[i + 1], checkpoints_sorted[i] return checkpoints_sorted def _rotate_checkpoints(self, use_mtime=False, output_dir=None) -> None: if self.args.save_total_limit is None or self.args.save_total_limit <= 0: return # Check if we should delete older checkpoint(s) checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime, output_dir=output_dir) if len(checkpoints_sorted) <= self.args.save_total_limit: return # If save_total_limit=1 with load_best_model_at_end=True, we could end up deleting the last checkpoint, which # we don't do to allow resuming. save_total_limit = self.args.save_total_limit if ( self.state.best_model_checkpoint is not None and self.args.save_total_limit == 1 and checkpoints_sorted[-1] != self.state.best_model_checkpoint ): save_total_limit = 2 number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - save_total_limit) checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete] for checkpoint in checkpoints_to_be_deleted: logger.info(f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit") shutil.rmtree(checkpoint, ignore_errors=True) def evaluate( self, eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]] = None, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "eval", ) -> Dict[str, float]: """ Run evaluation and returns metrics. The calling script will be responsible for providing a method to compute metrics, as they are task-dependent (pass it to the init `compute_metrics` argument). You can also subclass and override this method to inject custom behavior. Args: eval_dataset (Union[`Dataset`, Dict[str, `Dataset`]), *optional*): Pass a dataset if you wish to override `self.eval_dataset`. If it is a [`~datasets.Dataset`], columns not accepted by the `model.forward()` method are automatically removed. If it is a dictionary, it will evaluate on each dataset, prepending the dictionary key to the metric name. Datasets must implement the `__len__` method. <Tip> If you pass a dictionary with names of datasets as keys and datasets as values, evaluate will run separate evaluations on each dataset. This can be useful to monitor how training affects other datasets or simply to get a more fine-grained evaluation. When used with `load_best_model_at_end`, make sure `metric_for_best_model` references exactly one of the datasets. If you, for example, pass in `{"data1": data1, "data2": data2}` for two datasets `data1` and `data2`, you could specify `metric_for_best_model="eval_data1_loss"` for using the loss on `data1` and `metric_for_best_model="eval_data2_loss"` for the loss on `data2`. </Tip> ignore_keys (`List[str]`, *optional*): A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions. metric_key_prefix (`str`, *optional*, defaults to `"eval"`): An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named "eval_bleu" if the prefix is "eval" (default) Returns: A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The dictionary also contains the epoch number which comes from the training state. """ # handle multipe eval datasets override = eval_dataset is not None eval_dataset = eval_dataset if override else self.eval_dataset if isinstance(eval_dataset, dict): metrics = {} for eval_dataset_name, _eval_dataset in eval_dataset.items(): dataset_metrics = self.evaluate( eval_dataset=_eval_dataset if override else eval_dataset_name, ignore_keys=ignore_keys, metric_key_prefix=f"{metric_key_prefix}_{eval_dataset_name}", ) metrics.update(dataset_metrics) return metrics # memory metrics - must set up as early as possible self._memory_tracker.start() eval_dataloader = self.get_eval_dataloader(eval_dataset) if self.is_fsdp_xla_v2_enabled: eval_dataloader = tpu_spmd_dataloader(eval_dataloader) start_time = time.time() eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop output = eval_loop( eval_dataloader, description="Evaluation", # No point gathering the predictions if there are no metrics, otherwise we defer to # self.args.prediction_loss_only prediction_loss_only=True if self.compute_metrics is None else None, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix, ) total_batch_size = self.args.eval_batch_size * self.args.world_size if f"{metric_key_prefix}_jit_compilation_time" in output.metrics: start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"] if f"{metric_key_prefix}_model_preparation_time" in output.metrics: start_time += output.metrics[f"{metric_key_prefix}_model_preparation_time"] output.metrics.update( speed_metrics( metric_key_prefix, start_time, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size), ) ) self.log(output.metrics) if DebugOption.TPU_METRICS_DEBUG in self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report()) self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics) self._memory_tracker.stop_and_update_metrics(output.metrics) return output.metrics def predict( self, test_dataset: Dataset, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "test" ) -> PredictionOutput: """ Run prediction and returns predictions and potential metrics. Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method will also return metrics, like in `evaluate()`. Args: test_dataset (`Dataset`): Dataset to run the predictions on. If it is an `datasets.Dataset`, columns not accepted by the `model.forward()` method are automatically removed. Has to implement the method `__len__` ignore_keys (`List[str]`, *optional*): A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions. metric_key_prefix (`str`, *optional*, defaults to `"test"`): An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named "test_bleu" if the prefix is "test" (default) <Tip> If your predictions or labels have different sequence length (for instance because you're doing dynamic padding in a token classification task) the predictions will be padded (on the right) to allow for concatenation into one array. The padding index is -100. </Tip> Returns: *NamedTuple* A namedtuple with the following keys: - predictions (`np.ndarray`): The predictions on `test_dataset`. - label_ids (`np.ndarray`, *optional*): The labels (if the dataset contained some). - metrics (`Dict[str, float]`, *optional*): The potential dictionary of metrics (if the dataset contained labels). """ # memory metrics - must set up as early as possible self._memory_tracker.start() test_dataloader = self.get_test_dataloader(test_dataset) start_time = time.time() eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop output = eval_loop( test_dataloader, description="Prediction", ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix ) total_batch_size = self.args.eval_batch_size * self.args.world_size if f"{metric_key_prefix}_jit_compilation_time" in output.metrics: start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"] if f"{metric_key_prefix}_model_preparation_time" in output.metrics: start_time += output.metrics[f"{metric_key_prefix}_model_preparation_time"] output.metrics.update( speed_metrics( metric_key_prefix, start_time, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size), ) ) self.control = self.callback_handler.on_predict(self.args, self.state, self.control, output.metrics) self._memory_tracker.stop_and_update_metrics(output.metrics) return PredictionOutput(predictions=output.predictions, label_ids=output.label_ids, metrics=output.metrics) def evaluation_loop( self, dataloader: DataLoader, description: str, prediction_loss_only: Optional[bool] = None, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "eval", ) -> EvalLoopOutput: """ Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`. Works both with or without labels. """ args = self.args prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else args.prediction_loss_only # if eval is called w/o train, handle model prep here if self.is_deepspeed_enabled and self.deepspeed is None: _, _ = deepspeed_init(self, num_training_steps=0, inference=True) model = self._wrap_model(self.model, training=False, dataloader=dataloader) if len(self.accelerator._models) == 0 and model is self.model: start_time = time.time() model = ( self.accelerator.prepare(model) if self.is_deepspeed_enabled or (self.is_fsdp_enabled and self.accelerator.mixed_precision != "fp8") else self.accelerator.prepare_model(model, evaluation_mode=True) ) self.model_preparation_time = round(time.time() - start_time, 4) if self.is_fsdp_enabled: self.model = model # for the rest of this function `model` is the outside model, whether it was wrapped or not if model is not self.model: self.model_wrapped = model # backward compatibility if self.is_deepspeed_enabled: self.deepspeed = self.model_wrapped # if full fp16 or bf16 eval is wanted and this ``evaluation`` or ``predict`` isn't called # while ``train`` is running, cast it to the right dtype first and then put on device if not self.is_in_train: if args.fp16_full_eval: model = model.to(dtype=torch.float16, device=args.device) elif args.bf16_full_eval: model = model.to(dtype=torch.bfloat16, device=args.device) batch_size = self.args.eval_batch_size logger.info(f"\n***** Running {description} *****") if has_length(dataloader): logger.info(f" Num examples = {self.num_examples(dataloader)}") else: logger.info(" Num examples: Unknown") logger.info(f" Batch size = {batch_size}") model.eval() if hasattr(self.optimizer, "eval") and callable(self.optimizer.eval): self.optimizer.eval() self.callback_handler.eval_dataloader = dataloader # Do this before wrapping. eval_dataset = getattr(dataloader, "dataset", None) if args.past_index >= 0: self._past = None # Initialize containers all_losses = EvalLoopContainer(self.args.eval_do_concat_batches, padding_index=-100) all_preds = EvalLoopContainer(self.args.eval_do_concat_batches, padding_index=-100) all_labels = EvalLoopContainer(self.args.eval_do_concat_batches, padding_index=-100) all_inputs = EvalLoopContainer(self.args.eval_do_concat_batches, padding_index=-100) metrics = None eval_set_kwargs = {} # Will be useful when we have an iterable dataset so don't know its length. observed_num_examples = 0 # Main evaluation loop for step, inputs in enumerate(dataloader): # Update the observed num examples observed_batch_size = find_batch_size(inputs) if observed_batch_size is not None: observed_num_examples += observed_batch_size # For batch samplers, batch_size is not known by the dataloader in advance. if batch_size is None: batch_size = observed_batch_size # Prediction step losses, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys) main_input_name = getattr(self.model, "main_input_name", "input_ids") inputs_decode = ( self._prepare_input(inputs[main_input_name]) if "inputs" in args.include_for_metrics else None ) if is_torch_xla_available(): xm.mark_step() # Update containers if losses is not None: losses = self.gather_function((losses.repeat(batch_size))) all_losses.add(losses) if inputs_decode is not None: inputs_decode = self.accelerator.pad_across_processes(inputs_decode, dim=1, pad_index=-100) inputs_decode = self.gather_function((inputs_decode)) if not self.args.batch_eval_metrics or description == "Prediction": all_inputs.add(inputs_decode) if labels is not None: # Pad labels here, preparing for preprocess_logits_for_metrics in next logits block. labels = self.accelerator.pad_across_processes(labels, dim=1, pad_index=-100) if logits is not None: logits = self.accelerator.pad_across_processes(logits, dim=1, pad_index=-100) if self.preprocess_logits_for_metrics is not None: logits = self.preprocess_logits_for_metrics(logits, labels) logits = self.gather_function((logits)) if not self.args.batch_eval_metrics or description == "Prediction": all_preds.add(logits) if labels is not None: labels = self.gather_function((labels)) if not self.args.batch_eval_metrics or description == "Prediction": all_labels.add(labels) self.control = self.callback_handler.on_prediction_step(args, self.state, self.control) if self.args.batch_eval_metrics: if self.compute_metrics is not None and logits is not None and labels is not None: is_last_step = self.accelerator.gradient_state.end_of_dataloader batch_kwargs = {} batch_kwargs["losses"] = losses if "loss" in args.include_for_metrics else None batch_kwargs["inputs"] = inputs if "inputs" in args.include_for_metrics else None metrics = self.compute_metrics( EvalPrediction(predictions=logits, label_ids=labels, **batch_kwargs), compute_result=is_last_step, ) del losses, logits, labels, inputs torch.cuda.empty_cache() # Gather all tensors and put them back on the CPU if we have done enough accumulation steps. elif args.eval_accumulation_steps is not None and (step + 1) % args.eval_accumulation_steps == 0: all_losses.to_cpu_and_numpy() all_preds.to_cpu_and_numpy() all_labels.to_cpu_and_numpy() all_inputs.to_cpu_and_numpy() del losses, logits, labels, inputs torch.cuda.empty_cache() # After all calls to `.gather_function`, reset to `gather_for_metrics`: self.gather_function = self.accelerator.gather_for_metrics if args.past_index and hasattr(self, "_past"): # Clean the state at the end of the evaluation loop delattr(self, "_past") # Gather all remaining tensors and put them back on the CPU all_losses = all_losses.get_arrays() all_preds = all_preds.get_arrays() all_labels = all_labels.get_arrays() all_inputs = all_inputs.get_arrays() # Number of samples if has_length(eval_dataset): num_samples = len(eval_dataset) # The instance check is weird and does not actually check for the type, but whether the dataset has the right # methods. Therefore we need to make sure it also has the attribute. elif isinstance(eval_dataset, IterableDatasetShard) and getattr(eval_dataset, "num_examples", 0) > 0: num_samples = eval_dataset.num_examples else: if has_length(dataloader): num_samples = self.num_examples(dataloader) else: # both len(dataloader.dataset) and len(dataloader) fail num_samples = observed_num_examples if num_samples == 0 and observed_num_examples > 0: num_samples = observed_num_examples # Metrics! if ( self.compute_metrics is not None and all_preds is not None and all_labels is not None and not self.args.batch_eval_metrics ): eval_set_kwargs["losses"] = all_losses if "loss" in args.include_for_metrics else None eval_set_kwargs["inputs"] = all_inputs if "inputs" in args.include_for_metrics else None metrics = self.compute_metrics( EvalPrediction(predictions=all_preds, label_ids=all_labels, **eval_set_kwargs) ) elif metrics is None: metrics = {} # To be JSON-serializable, we need to remove numpy types or zero-d tensors metrics = denumpify_detensorize(metrics) if isinstance(all_losses, list) and all_losses: metrics[f"{metric_key_prefix}_loss"] = np.concatenate(all_losses).mean().item() elif isinstance(all_losses, np.ndarray): metrics[f"{metric_key_prefix}_loss"] = all_losses.mean().item() if hasattr(self, "jit_compilation_time"): metrics[f"{metric_key_prefix}_jit_compilation_time"] = self.jit_compilation_time if hasattr(self, "model_preparation_time"): metrics[f"{metric_key_prefix}_model_preparation_time"] = self.model_preparation_time # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys()): if not key.startswith(f"{metric_key_prefix}_"): metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key) return EvalLoopOutput(predictions=all_preds, label_ids=all_labels, metrics=metrics, num_samples=num_samples) def _nested_gather(self, tensors, name=None): """ Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before concatenating them to `gathered` """ if tensors is None: return if is_torch_xla_available(): if name is None: name = "nested_gather" tensors = nested_xla_mesh_reduce(tensors, name) elif is_sagemaker_mp_enabled(): tensors = smp_gather(tensors) elif (self.args.distributed_state is not None and self.args.distributed_state.distributed_type != "NO") or ( self.args.distributed_state is None and self.args.local_rank != -1 ): tensors = distributed_concat(tensors) return tensors def prediction_step( self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]], prediction_loss_only: bool, ignore_keys: Optional[List[str]] = None, ) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]: """ Perform an evaluation step on `model` using `inputs`. Subclass and override to inject custom behavior. Args: model (`nn.Module`): The model to evaluate. inputs (`Dict[str, Union[torch.Tensor, Any]]`): The inputs and targets of the model. The dictionary will be unpacked before being fed to the model. Most models expect the targets under the argument `labels`. Check your model's documentation for all accepted arguments. prediction_loss_only (`bool`): Whether or not to return the loss only. ignore_keys (`List[str]`, *optional*): A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions. Return: Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and labels (each being optional). """ has_labels = False if len(self.label_names) == 0 else all(inputs.get(k) is not None for k in self.label_names) # For CLIP-like models capable of returning loss values. # If `return_loss` is not specified or being `None` in `inputs`, we check if the default value of `return_loss` # is `True` in `model.forward`. return_loss = inputs.get("return_loss", None) if return_loss is None: return_loss = self.can_return_loss loss_without_labels = True if len(self.label_names) == 0 and return_loss else False inputs = self._prepare_inputs(inputs) if ignore_keys is None: if hasattr(self.model, "config"): ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", []) else: ignore_keys = [] # labels may be popped when computing the loss (label smoothing for instance) so we grab them first. if has_labels or loss_without_labels: labels = nested_detach(tuple(inputs.get(name) for name in self.label_names)) if len(labels) == 1: labels = labels[0] else: labels = None with torch.no_grad(): if is_sagemaker_mp_enabled(): raw_outputs = smp_forward_only(model, inputs) if has_labels or loss_without_labels: if isinstance(raw_outputs, dict): loss_mb = raw_outputs["loss"] logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys + ["loss"]) else: loss_mb = raw_outputs[0] logits_mb = raw_outputs[1:] loss = loss_mb.reduce_mean().detach().cpu() logits = smp_nested_concat(logits_mb) else: loss = None if isinstance(raw_outputs, dict): logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys) else: logits_mb = raw_outputs logits = smp_nested_concat(logits_mb) else: if has_labels or loss_without_labels: with self.compute_loss_context_manager(): loss, outputs = self.compute_loss(model, inputs, return_outputs=True) loss = loss.mean().detach() if isinstance(outputs, dict): logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + ["loss"]) else: logits = outputs[1:] else: loss = None with self.compute_loss_context_manager(): outputs = model(**inputs) if isinstance(outputs, dict): logits = tuple(v for k, v in outputs.items() if k not in ignore_keys) else: logits = outputs # TODO: this needs to be fixed and made cleaner later. if self.args.past_index >= 0: self._past = outputs[self.args.past_index - 1] if prediction_loss_only: return (loss, None, None) logits = nested_detach(logits) if len(logits) == 1: logits = logits[0] return (loss, logits, labels) def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]): """ For models that inherit from [`PreTrainedModel`], uses that method to compute the number of floating point operations for every backward + forward pass. If using another model, either implement such a method in the model or subclass and override this method. Args: inputs (`Dict[str, Union[torch.Tensor, Any]]`): The inputs and targets of the model. Returns: `int`: The number of floating-point operations. """ if hasattr(self.model, "floating_point_ops"): return self.model.floating_point_ops(inputs) else: return 0 def init_hf_repo(self, token: Optional[str] = None): """ Initializes a git repo in `self.args.hub_model_id`. """ # Only on process zero if not self.is_world_process_zero(): return if self.args.hub_model_id is None: repo_name = Path(self.args.output_dir).absolute().name else: repo_name = self.args.hub_model_id token = token if token is not None else self.args.hub_token repo_url = create_repo(repo_name, token=token, private=self.args.hub_private_repo, exist_ok=True) self.hub_model_id = repo_url.repo_id self.push_in_progress = None def create_model_card( self, language: Optional[str] = None, license: Optional[str] = None, tags: Union[str, List[str], None] = None, model_name: Optional[str] = None, finetuned_from: Optional[str] = None, tasks: Union[str, List[str], None] = None, dataset_tags: Union[str, List[str], None] = None, dataset: Union[str, List[str], None] = None, dataset_args: Union[str, List[str], None] = None, ): """ Creates a draft of a model card using the information available to the `Trainer`. Args: language (`str`, *optional*): The language of the model (if applicable) license (`str`, *optional*): The license of the model. Will default to the license of the pretrained model used, if the original model given to the `Trainer` comes from a repo on the Hub. tags (`str` or `List[str]`, *optional*): Some tags to be included in the metadata of the model card. model_name (`str`, *optional*): The name of the model. finetuned_from (`str`, *optional*): The name of the model used to fine-tune this one (if applicable). Will default to the name of the repo of the original model given to the `Trainer` (if it comes from the Hub). tasks (`str` or `List[str]`, *optional*): One or several task identifiers, to be included in the metadata of the model card. dataset_tags (`str` or `List[str]`, *optional*): One or several dataset tags, to be included in the metadata of the model card. dataset (`str` or `List[str]`, *optional*): One or several dataset identifiers, to be included in the metadata of the model card. dataset_args (`str` or `List[str]`, *optional*): One or several dataset arguments, to be included in the metadata of the model card. """ if not self.is_world_process_zero(): return model_card_filepath = os.path.join(self.args.output_dir, "README.md") is_peft_library = False if os.path.exists(model_card_filepath): library_name = ModelCard.load(model_card_filepath).data.get("library_name") is_peft_library = library_name == "peft" # Append existing tags in `tags` existing_tags = ModelCard.load(model_card_filepath).data.tags if tags is not None and existing_tags is not None: if isinstance(tags, str): tags = [tags] for tag in existing_tags: if tag not in tags: tags.append(tag) training_summary = TrainingSummary.from_trainer( self, language=language, license=license, tags=tags, model_name=model_name, finetuned_from=finetuned_from, tasks=tasks, dataset_tags=dataset_tags, dataset=dataset, dataset_args=dataset_args, ) model_card = training_summary.to_model_card() with open(model_card_filepath, "w") as f: f.write(model_card) if is_peft_library: self.accelerator.unwrap_model(self.model).create_or_update_model_card(self.args.output_dir) def _push_from_checkpoint(self, checkpoint_folder): # Only push from one node. if not self.is_world_process_zero() or self.args.hub_strategy == HubStrategy.END: return # If we haven't finished the last push, we don't do this one unless args.hub_always_push=True. if not self.args.hub_always_push and self.push_in_progress is not None and not self.push_in_progress.is_done(): return output_dir = self.args.output_dir # To avoid a new synchronization of all model weights, we just copy the file from the checkpoint folder modeling_files = [CONFIG_NAME, WEIGHTS_NAME, SAFE_WEIGHTS_NAME] # Add sharded checkpoints if we have an index for index_file in [WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_INDEX_NAME]: index_path = os.path.join(checkpoint_folder, index_file) if os.path.isfile(index_path): modeling_files.append(index_file) with open(index_path) as f: index = json.loads(f.read()) shard_files = list(set(index["weight_map"].values())) modeling_files.extend(shard_files) if is_peft_available(): modeling_files.extend([ADAPTER_CONFIG_NAME, ADAPTER_WEIGHTS_NAME, ADAPTER_SAFE_WEIGHTS_NAME]) for modeling_file in modeling_files: if os.path.isfile(os.path.join(checkpoint_folder, modeling_file)): shutil.copy(os.path.join(checkpoint_folder, modeling_file), os.path.join(output_dir, modeling_file)) # Saving the processing class is fast and we don't know how many files it may have spawned, so we resave it to be sure. if self.processing_class is not None: self.processing_class.save_pretrained(output_dir) # Same for the training arguments torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME)) if self.args.save_strategy == SaveStrategy.STEPS: commit_message = f"Training in progress, step {self.state.global_step}" else: commit_message = f"Training in progress, epoch {int(self.state.epoch)}" model_push_job = upload_folder( repo_id=self.hub_model_id, folder_path=output_dir, commit_message=commit_message, token=self.args.hub_token, run_as_future=True, ignore_patterns=["_*", f"{PREFIX_CHECKPOINT_DIR}-*"], ) push_jobs = [model_push_job] if self.args.hub_strategy in [HubStrategy.CHECKPOINT, HubStrategy.ALL_CHECKPOINTS]: path_in_repo = ( "last-checkpoint" if self.args.hub_strategy == HubStrategy.CHECKPOINT else Path(checkpoint_folder).name ) checkpoint_push = upload_folder( repo_id=self.hub_model_id, folder_path=checkpoint_folder, path_in_repo=path_in_repo, commit_message=commit_message + ", checkpoint", token=self.args.hub_token, run_as_future=True, ) push_jobs.append(checkpoint_push) if self.push_in_progress is None or self.push_in_progress.is_done(): self.push_in_progress = PushInProgress(push_jobs) else: self.push_in_progress.jobs.extend(push_jobs) def _finish_current_push(self): if not hasattr(self, "push_in_progress"): return if self.push_in_progress is not None and not self.push_in_progress.is_done(): logger.info("Waiting for the current checkpoint push to be finished, this might take a couple of minutes.") self.push_in_progress.wait_until_done() def push_to_hub( self, commit_message: Optional[str] = "End of training", blocking: bool = True, token: Optional[str] = None, revision: Optional[str] = None, **kwargs, ) -> str: """ Upload `self.model` and `self.processing_class` to the 🤗 model hub on the repo `self.args.hub_model_id`. Parameters: commit_message (`str`, *optional*, defaults to `"End of training"`): Message to commit while pushing. blocking (`bool`, *optional*, defaults to `True`): Whether the function should return only when the `git push` has finished. token (`str`, *optional*, defaults to `None`): Token with write permission to overwrite Trainer's original args. revision (`str`, *optional*): The git revision to commit from. Defaults to the head of the "main" branch. kwargs (`Dict[str, Any]`, *optional*): Additional keyword arguments passed along to [`~Trainer.create_model_card`]. Returns: The URL of the repository where the model was pushed if `blocking=False`, or a `Future` object tracking the progress of the commit if `blocking=True`. """ model_name = kwargs.pop("model_name", None) if model_name is None and self.args.should_save: if self.args.hub_model_id is None: model_name = Path(self.args.output_dir).name else: model_name = self.args.hub_model_id.split("/")[-1] token = token if token is not None else self.args.hub_token # In case the user calls this method with args.push_to_hub = False if self.hub_model_id is None: self.init_hf_repo(token=token) # Needs to be executed on all processes for TPU training, but will only save on the processed determined by # self.args.should_save. self.save_model(_internal_call=True) # Only push from one node. if not self.is_world_process_zero(): return # Add additional tags in the case the model has already some tags and users pass # "tags" argument to `push_to_hub` so that trainer automatically handles internal tags # from all models since Trainer does not call `model.push_to_hub`. if getattr(self.model, "model_tags", None) is not None: if "tags" not in kwargs: kwargs["tags"] = [] # If it is a string, convert it to a list if isinstance(kwargs["tags"], str): kwargs["tags"] = [kwargs["tags"]] for model_tag in self.model.model_tags: if model_tag not in kwargs["tags"]: kwargs["tags"].append(model_tag) self.create_model_card(model_name=model_name, **kwargs) # Wait for the current upload to be finished. self._finish_current_push() return upload_folder( repo_id=self.hub_model_id, folder_path=self.args.output_dir, commit_message=commit_message, token=token, run_as_future=not blocking, ignore_patterns=["_*", f"{PREFIX_CHECKPOINT_DIR}-*"], revision=revision, ) # # Deprecated code # def prediction_loop( self, dataloader: DataLoader, description: str, prediction_loss_only: Optional[bool] = None, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "eval", ) -> EvalLoopOutput: """ Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`. Works both with or without labels. """ args = self.args if not has_length(dataloader): raise ValueError("dataloader must implement a working __len__") prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else args.prediction_loss_only # if eval is called w/o train, handle model prep here if self.is_deepspeed_enabled and self.deepspeed is None: _, _ = deepspeed_init(self, num_training_steps=0, inference=True) model = self._wrap_model(self.model, training=False, dataloader=dataloader) if len(self.accelerator._models) == 0 and model is self.model: model = ( self.accelerator.prepare(model) if self.is_deepspeed_enabled or self.is_fsdp_enabled else self.accelerator.prepare_model(model, evaluation_mode=True) ) if self.is_fsdp_enabled: self.model = model # for the rest of this function `model` is the outside model, whether it was wrapped or not if model is not self.model: self.model_wrapped = model # backward compatibility if self.is_deepspeed_enabled: self.deepspeed = self.model_wrapped # if full fp16 or bf16 eval is wanted and this ``evaluation`` or ``predict`` isn't called # while ``train`` is running, cast it to the right dtype first and then put on device if not self.is_in_train: if args.fp16_full_eval: model = model.to(dtype=torch.float16, device=args.device) elif args.bf16_full_eval: model = model.to(dtype=torch.bfloat16, device=args.device) batch_size = ( dataloader.total_batch_size if getattr(dataloader, "_is_accelerate_prepared", False) else dataloader.batch_size ) if batch_size is None: raise ValueError( "Batch size cannot be None. Ensure the dataloader has a valid batch_size or total_batch_size." ) num_examples = self.num_examples(dataloader) logger.info(f"\n***** Running {description} *****") logger.info(f" Num examples = {num_examples}") logger.info(f" Batch size = {batch_size}") losses_host: torch.Tensor = None preds_host: Union[torch.Tensor, List[torch.Tensor]] = None labels_host: Union[torch.Tensor, List[torch.Tensor]] = None inputs_host: Union[torch.Tensor, List[torch.Tensor]] = None metrics: Optional[dict] = None eval_set_kwargs: dict = {} world_size = max(1, args.world_size) eval_losses_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=batch_size) if not prediction_loss_only: # The actual number of eval_sample can be greater than num_examples in distributed settings (when we pass # a batch size to the sampler) make_multiple_of = None if hasattr(dataloader, "sampler") and isinstance(dataloader.sampler, SequentialDistributedSampler): make_multiple_of = dataloader.sampler.batch_size preds_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of) labels_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of) inputs_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of) model.eval() if hasattr(self.optimizer, "eval") and callable(self.optimizer.eval): self.optimizer.eval() if args.past_index >= 0: self._past = None self.callback_handler.eval_dataloader = dataloader for step, inputs in enumerate(dataloader): loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys) main_input_name = getattr(self.model, "main_input_name", "input_ids") inputs_decode = ( self._prepare_input(inputs[main_input_name]) if "inputs" in args.include_for_metrics else None ) if loss is not None: losses = loss.repeat(batch_size) losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0) if logits is not None: preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100) if labels is not None: labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100) if inputs_decode is not None: inputs_host = ( inputs_decode if inputs_host is None else nested_concat(inputs_host, inputs_decode, padding_index=-100) ) self.control = self.callback_handler.on_prediction_step(args, self.state, self.control) if self.args.batch_eval_metrics: if self.compute_metrics is not None and preds_host is not None and labels_host is not None: is_last_step = self.accelerator.gradient_state.end_of_dataloader batch_kwargs = {} batch_kwargs["losses"] = losses_host if "loss" in args.include_for_metrics else None batch_kwargs["inputs"] = inputs_host if "inputs" in args.include_for_metrics else None metrics = self.compute_metrics( EvalPrediction(predictions=preds_host, label_ids=labels_host, **batch_kwargs), compute_result=is_last_step, ) if self.args.batch_eval_metrics or ( args.eval_accumulation_steps is not None and (step + 1) % args.eval_accumulation_steps == 0 ): # Gather all tensors and put them back on the CPU if we have done enough accumulation steps. eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses")) if not prediction_loss_only: preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds")) labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids")) inputs_gatherer.add_arrays(self._gather_and_numpify(inputs_host, "eval_inputs_ids")) # Set back to None to begin a new accumulation del losses_host, preds_host, labels_host, inputs_host torch.cuda.empty_cache() losses_host, preds_host, labels_host, inputs_host = None, None, None, None if args.past_index and hasattr(self, "_past"): # Clean the state at the end of the evaluation loop delattr(self, "_past") # Gather all remaining tensors and put them back on the CPU eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses")) if not prediction_loss_only: preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds")) labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids")) inputs_gatherer.add_arrays(self._gather_and_numpify(inputs_host, "eval_inputs_ids")) eval_loss = eval_losses_gatherer.finalize() preds = preds_gatherer.finalize() if not prediction_loss_only else None label_ids = labels_gatherer.finalize() if not prediction_loss_only else None inputs_ids = inputs_gatherer.finalize() if not prediction_loss_only else None if ( self.compute_metrics is not None and preds is not None and label_ids is not None and not self.args.batch_eval_metrics ): eval_set_kwargs["losses"] = eval_loss if "loss" in args.include_for_metrics else None eval_set_kwargs["inputs"] = inputs_ids if "inputs" in args.include_for_metrics else None metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids, **eval_set_kwargs)) elif metrics is None: metrics = {} # To be JSON-serializable, we need to remove numpy types or zero-d tensors metrics = denumpify_detensorize(metrics) if eval_loss is not None: metrics[f"{metric_key_prefix}_loss"] = eval_loss.mean().item() # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys()): if not key.startswith(f"{metric_key_prefix}_"): metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key) return EvalLoopOutput(predictions=preds, label_ids=label_ids, metrics=metrics, num_samples=num_examples) def _gather_and_numpify(self, tensors, name): """ Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before concatenating them to `gathered` """ if tensors is None: return if is_torch_xla_available(): tensors = nested_xla_mesh_reduce(tensors, name) elif is_sagemaker_mp_enabled(): tensors = smp_gather(tensors) elif self.args.parallel_mode == ParallelMode.DISTRIBUTED: tensors = distributed_concat(tensors) return nested_numpify(tensors) def _add_sm_patterns_to_gitignore(self) -> None: """Add SageMaker Checkpointing patterns to .gitignore file.""" # Make sure we only do this on the main process if not self.is_world_process_zero(): return patterns = ["*.sagemaker-uploading", "*.sagemaker-uploaded"] # Get current .gitignore content if os.path.exists(os.path.join(self.repo.local_dir, ".gitignore")): with open(os.path.join(self.repo.local_dir, ".gitignore"), "r") as f: current_content = f.read() else: current_content = "" # Add the patterns to .gitignore content = current_content for pattern in patterns: if pattern not in content: if content.endswith("\n"): content += pattern else: content += f"\n{pattern}" # Write the .gitignore file if it has changed if content != current_content: with open(os.path.join(self.repo.local_dir, ".gitignore"), "w") as f: logger.debug(f"Writing .gitignore file. Content: {content}") f.write(content) self.repo.git_add(".gitignore") # avoid race condition with git status time.sleep(0.5) if not self.repo.is_repo_clean(): self.repo.git_commit("Add *.sagemaker patterns to .gitignore.") self.repo.git_push() def create_accelerator_and_postprocess(self): # We explicitly don't rely on the `Accelerator` to do gradient accumulation grad_acc_kwargs = {} if is_accelerate_available("0.28.0") and self.args.accelerator_config.gradient_accumulation_kwargs is not None: grad_acc_kwargs = self.args.accelerator_config.gradient_accumulation_kwargs # check if num_steps is attempted to be passed in gradient_accumulation_kwargs if "num_steps" in grad_acc_kwargs: if self.args.gradient_accumulation_steps > 1: # raise because we do not know which setting is intended. raise ValueError( "The `AcceleratorConfig`'s `num_steps` is set but `gradient_accumulation_steps` is greater than 1 in the passed `TrainingArguments`" "If using the passed `AcceleratorConfig` is desired, do not set the `TrainingArguments` `gradient_accumulation_steps`." ) else: self.args.gradient_accumulation_steps = grad_acc_kwargs["num_steps"] accelerator_config = self.args.accelerator_config.to_dict() if is_accelerate_available("0.28.0"): dataloader_config = DataLoaderConfiguration( split_batches=accelerator_config.pop("split_batches"), dispatch_batches=accelerator_config.pop("dispatch_batches"), even_batches=accelerator_config.pop("even_batches"), use_seedable_sampler=accelerator_config.pop("use_seedable_sampler"), ) if is_accelerate_available("1.1.0"): dataloader_config.data_seed = self.args.data_seed non_blocking = accelerator_config.pop("non_blocking") if not is_accelerate_available("0.30.0"): if non_blocking: raise ImportError( "`non_blocking` is only supported in accelerate v0.30.0 and above. Please upgrade accelerate to use this feature." ) else: if non_blocking and not self.args.dataloader_pin_memory: logger.warning( "`non_blocking` is enabled but `dataloader_pin_memory` is not. For the best performance, it's recommended to enable both." ) dataloader_config.non_blocking = non_blocking # this would have been updated above, no need for it anymore accelerator_config.pop("gradient_accumulation_kwargs") args = { "deepspeed_plugin": self.args.deepspeed_plugin, } if is_accelerate_available("0.28.0"): args["dataloader_config"] = dataloader_config else: args.update(accelerator_config) # create accelerator object self.accelerator = Accelerator(**args) # some Trainer classes need to use `gather` instead of `gather_for_metrics`, thus we store a flag self.gather_function = self.accelerator.gather_for_metrics if "use_gather_object" in inspect.signature(self.gather_function).parameters.keys(): self.gather_function = functools.partial( self.gather_function, use_gather_object=self.args.eval_use_gather_object ) # deepspeed and accelerate flags covering both trainer args and accelerate launcher self.is_deepspeed_enabled = getattr(self.accelerator.state, "deepspeed_plugin", None) is not None self.is_fsdp_enabled = getattr(self.accelerator.state, "fsdp_plugin", None) is not None # post accelerator creation setup if self.is_fsdp_enabled: fsdp_plugin = self.accelerator.state.fsdp_plugin fsdp_plugin.limit_all_gathers = self.args.fsdp_config.get( "limit_all_gathers", fsdp_plugin.limit_all_gathers ) fsdp_plugin.activation_checkpointing = self.args.fsdp_config.get( "activation_checkpointing", fsdp_plugin.activation_checkpointing ) if fsdp_plugin.activation_checkpointing and self.args.gradient_checkpointing: raise ValueError( "The activation_checkpointing in FSDP config and the gradient_checkpointing in training arg " "can't be set to True simultaneously. Please use FSDP's activation_checkpointing logic " "when using FSDP." ) if self.is_deepspeed_enabled and getattr(self.args, "hf_deepspeed_config", None) is None: self.propagate_args_to_deepspeed() # `save_only_model` can't be used with DeepSpeed/FSDP along with `load_best_model_at_end` if ( self.args.save_only_model and (self.is_deepspeed_enabled or self.is_fsdp_enabled) and self.args.load_best_model_at_end ): wrapper = "DeepSpeed" if self.is_deepspeed_enabled else "FSDP" raise ValueError(f"{wrapper} can't be used with `save_only_model` along with `load_best_model_at_end`.") # `auto_find_batch_size` isn't supported yet with DeepSpeed Zero-3 if ( self.is_deepspeed_enabled and self.accelerator.state.deepspeed_plugin.zero_stage == 3 and self.args.auto_find_batch_size ): raise ValueError( "`auto_find_batch_size` isn't supported yet with DeepSpeed Zero-3. Please consider using Zero-2, Zero-1, or FSDP" ) def propagate_args_to_deepspeed(self, auto_find_batch_size=False): """ Sets values in the deepspeed plugin based on the Trainer args """ from transformers.integrations.deepspeed import HfTrainerDeepSpeedConfig ds_plugin = self.accelerator.state.deepspeed_plugin ds_plugin.hf_ds_config = HfTrainerDeepSpeedConfig(ds_plugin.hf_ds_config.config) ds_plugin.deepspeed_config = ds_plugin.hf_ds_config.config ds_plugin.hf_ds_config.trainer_config_process(self.args, auto_find_batch_size) def _fsdp_qlora_plugin_updates(self): if self.is_fsdp_enabled and _is_peft_model(self.model): from peft import LoraConfig from peft.utils.other import fsdp_auto_wrap_policy if isinstance(self.model.active_peft_config, LoraConfig): fsdp_plugin = self.accelerator.state.fsdp_plugin fsdp_plugin.auto_wrap_policy = fsdp_auto_wrap_policy(self.model) if ( getattr(self.model, "quantization_method", None) == QuantizationMethod.BITS_AND_BYTES and self.model.hf_quantizer.quantization_config.bnb_4bit_quant_storage.is_floating_point and version.parse(accelerate_version) > version.parse("0.27.0") ): fsdp_plugin.set_mixed_precision( self.model.hf_quantizer.quantization_config.bnb_4bit_quant_storage, override=True ) def get_batch_samples(self, epoch_iterator, num_batches): batch_samples = [] num_items_in_batch = None for _ in range(num_batches): try: batch_samples += [next(epoch_iterator)] except StopIteration: break if len(batch_samples) > 0 and "labels" in batch_samples[0]: # For now we don't support object detection try: num_items_in_batch = sum([(batch["labels"].ne(-100)).sum() for batch in batch_samples]) except (TypeError, AttributeError): pass if self.args.average_tokens_across_devices and num_items_in_batch is not None: num_items_in_batch = self.accelerator.gather(num_items_in_batch).sum().item() if torch.is_tensor(num_items_in_batch): num_items_in_batch = num_items_in_batch.item() return batch_samples, num_items_in_batch
class_definition
9,609
253,063
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
null
244
class HyperParamSearchBackendBase: name: str pip_package: str = None @staticmethod def is_available(): raise NotImplementedError def run(self, trainer, n_trials: int, direction: str, **kwargs): raise NotImplementedError def default_hp_space(self, trial): raise NotImplementedError def ensure_available(self): if not self.is_available(): raise RuntimeError( f"You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}." ) @classmethod def pip_install(cls): return f"`pip install {cls.pip_package or cls.name}`"
class_definition
1,078
1,736
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/hyperparameter_search.py
null
245
class OptunaBackend(HyperParamSearchBackendBase): name = "optuna" @staticmethod def is_available(): return is_optuna_available() def run(self, trainer, n_trials: int, direction: str, **kwargs): return run_hp_search_optuna(trainer, n_trials, direction, **kwargs) def default_hp_space(self, trial): return default_hp_space_optuna(trial)
class_definition
1,739
2,120
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/hyperparameter_search.py
null
246
class RayTuneBackend(HyperParamSearchBackendBase): name = "ray" pip_package = "'ray[tune]'" @staticmethod def is_available(): return is_ray_tune_available() def run(self, trainer, n_trials: int, direction: str, **kwargs): return run_hp_search_ray(trainer, n_trials, direction, **kwargs) def default_hp_space(self, trial): return default_hp_space_ray(trial)
class_definition
2,123
2,530
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/hyperparameter_search.py
null
247
class SigOptBackend(HyperParamSearchBackendBase): name = "sigopt" @staticmethod def is_available(): return is_sigopt_available() def run(self, trainer, n_trials: int, direction: str, **kwargs): return run_hp_search_sigopt(trainer, n_trials, direction, **kwargs) def default_hp_space(self, trial): return default_hp_space_sigopt(trial)
class_definition
2,533
2,914
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/hyperparameter_search.py
null
248
class WandbBackend(HyperParamSearchBackendBase): name = "wandb" @staticmethod def is_available(): return is_wandb_available() def run(self, trainer, n_trials: int, direction: str, **kwargs): return run_hp_search_wandb(trainer, n_trials, direction, **kwargs) def default_hp_space(self, trial): return default_hp_space_wandb(trial)
class_definition
2,917
3,293
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/hyperparameter_search.py
null
249
class KerasMetricCallback(keras.callbacks.Callback): """ Callback to compute metrics at the end of every epoch. Unlike normal Keras metrics, these do not need to be compilable by TF. It is particularly useful for common NLP metrics like BLEU and ROUGE that require string operations or generation loops that cannot be compiled. Predictions (or generations) will be computed on the `eval_dataset` before being passed to the `metric_fn` in `np.ndarray` format. The `metric_fn` should compute metrics and return a dict mapping metric names to metric values. We provide an example of a suitable metric_fn that computes ROUGE scores for a summarization model below. Note that this example skips some post-processing for readability and simplicity, and should probably not be used as-is! ```py from datasets import load_metric rouge_metric = load_metric("rouge") def rouge_fn(predictions, labels): decoded_predictions = tokenizer.batch_decode(predictions, skip_special_tokens=True) decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) result = rouge_metric.compute(predictions=decoded_predictions, references=decoded_labels) return {key: value.mid.fmeasure * 100 for key, value in result.items()} ``` The above function will return a dict containing values which will be logged like any other Keras metric: ``` {'rouge1': 37.4199, 'rouge2': 13.9768, 'rougeL': 34.361, 'rougeLsum': 35.0781 ``` Args: metric_fn (`Callable`): Metric function provided by the user. It will be called with two arguments - `predictions` and `labels`. These contain the model's outputs and matching labels from the dataset. It should return a dict mapping metric names to numerical values. eval_dataset (`tf.data.Dataset` or `dict` or `tuple` or `np.ndarray` or `tf.Tensor`): Validation data to be used to generate predictions for the `metric_fn`. output_cols (`List[str], *optional*): A list of columns to be retained from the model output as the predictions. Defaults to all. label_cols ('`List[str]`, *optional*'): A list of columns to be retained from the input dataset as the labels. Will be autodetected if this is not supplied. batch_size (`int`, *optional*): Batch size. Only used when the data is not a pre-batched `tf.data.Dataset`. predict_with_generate (`bool`, *optional*, defaults to `False`): Whether we should use `model.generate()` to get outputs for the model. use_xla_generation (`bool`, *optional*, defaults to `False`): If we're generating, whether to compile model generation with XLA. This can massively increase the speed of generation (up to 100X speedup) but will require a new XLA compilation for each input shape. When using XLA generation, it's a good idea to pad your inputs to the same size, or to use the `pad_to_multiple_of` argument in your `tokenizer` or `DataCollator`, which will reduce the number of unique input shapes and save a lot of compilation time. This option has no effect is `predict_with_generate` is `False`. generate_kwargs (`dict`, *optional*): Keyword arguments to pass to `model.generate()` when generating. Has no effect if `predict_with_generate` is `False`. """ def __init__( self, metric_fn: Callable, eval_dataset: Union[tf.data.Dataset, np.ndarray, tf.Tensor, tuple, dict], output_cols: Optional[List[str]] = None, label_cols: Optional[List[str]] = None, batch_size: Optional[int] = None, predict_with_generate: bool = False, use_xla_generation: bool = False, generate_kwargs: Optional[dict] = None, ): super().__init__() self.metric_fn = metric_fn self.batch_size = batch_size if not isinstance(eval_dataset, tf.data.Dataset): if batch_size is None: raise ValueError( "When passing data to KerasMetricCallback that is not a pre-batched tf.data.Dataset " "the batch_size argument must be set." ) # Wrap a tf.data.Dataset around it eval_dataset = tf.data.Dataset.from_tensor_slices(eval_dataset).batch(batch_size, drop_remainder=False) self.eval_dataset = eval_dataset self.predict_with_generate = predict_with_generate self.output_cols = output_cols # This next block attempts to parse out which elements of the dataset should be appended to the labels list # that is passed to the metric_fn if isinstance(eval_dataset.element_spec, tuple) and len(eval_dataset.element_spec) == 2: input_spec, label_spec = eval_dataset.element_spec else: input_spec = eval_dataset.element_spec label_spec = None if label_cols is not None: for label in label_cols: if label not in input_spec: raise ValueError(f"Label {label} is in label_cols but could not be found in the dataset inputs!") self.label_cols = label_cols self.use_keras_label = False elif label_spec is not None: # If the dataset inputs are split into a 2-tuple of inputs and labels, # assume the second element is the labels self.label_cols = None self.use_keras_label = True elif "labels" in input_spec: self.label_cols = ["labels"] self.use_keras_label = False logging.warning("No label_cols specified for KerasMetricCallback, assuming you want the 'labels' key.") elif "start_positions" in input_spec and "end_positions" in input_spec: self.label_cols = ["start_positions", "end_positions"] self.use_keras_label = False logging.warning( "No label_cols specified for KerasMetricCallback, assuming you want the " "start_positions and end_positions keys." ) else: raise ValueError("Could not autodetect label_cols for KerasMetricCallback, please specify them!") if parse(tf.__version__) < parse("2.7"): logging.warning("TF versions less than 2.7 may encounter issues with KerasMetricCallback!") self.use_xla_generation = use_xla_generation self.generate_kwargs = {} if generate_kwargs is None else generate_kwargs self.generation_function = None @staticmethod def _concatenate_batches(batches, padding_index=-100): # If all batches are unidimensional or same length, do a simple concatenation if batches[0].ndim == 1 or all(batch.shape[1] == batches[0].shape[1] for batch in batches): return np.concatenate(batches, axis=0) # Welp, they're not the same length. Let's do some padding max_len = max([batch.shape[1] for batch in batches]) num_samples = sum([batch.shape[0] for batch in batches]) output = np.full_like( batches[0], fill_value=padding_index, shape=[num_samples, max_len] + list(batches[0].shape[2:]) ) # i keeps track of which part of the concatenated array we're writing the next batch to i = 0 for batch in batches: output[i : i + len(batch), : batch.shape[1]] = batch i += len(batch) return output def _postprocess_predictions_or_labels(self, inputs): if isinstance(inputs[0], dict): outputs = {} for key in inputs[0].keys(): outputs[key] = self._concatenate_batches([batch[key] for batch in inputs]) # If it's a dict with only one key, just return the array if len(outputs) == 1: outputs = list(outputs.values())[0] elif isinstance(inputs[0], list) or isinstance(inputs[0], tuple): outputs = [] for input_list in zip(*inputs): outputs.append(self._concatenate_batches(input_list)) if len(outputs) == 1: outputs = outputs[0] # If it's a list with only one element, just return the array elif isinstance(inputs[0], np.ndarray): outputs = self._concatenate_batches(inputs) elif isinstance(inputs[0], tf.Tensor): outputs = self._concatenate_batches([tensor.numpy() for tensor in inputs]) else: raise TypeError(f"Couldn't handle batch of type {type(inputs[0])}!") return outputs def on_epoch_end(self, epoch, logs=None): if hasattr(self.model, "config"): ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", []) else: ignore_keys = [] main_input_name = None if self.predict_with_generate: # This dense conditional recognizes the case where we have an encoder-decoder model, but # avoids getting tangled up when we just have a model with a layer called 'encoder' if hasattr(self.model, "encoder") and hasattr(self.model.encoder, "main_input_name"): main_input_name = self.model.encoder.main_input_name else: main_input_name = getattr(self.model, "main_input_name", "input_ids") if self.use_xla_generation and self.generation_function is None: def generation_function(inputs, attention_mask): return self.model.generate(inputs, attention_mask=attention_mask, **self.generate_kwargs) self.generation_function = tf.function(generation_function, jit_compile=True) prediction_list = [] label_list = [] # The whole predict/generate loop is handled inside this method for batch in self.eval_dataset: if isinstance(batch, tuple): batch, labels = batch else: labels = None if self.predict_with_generate: if isinstance(batch, dict): generation_inputs = batch[main_input_name] attention_mask = batch.get("attention_mask", None) else: generation_inputs = batch attention_mask = None if self.use_xla_generation: predictions = self.generation_function(generation_inputs, attention_mask=attention_mask) else: predictions = self.model.generate( generation_inputs, attention_mask=attention_mask, **self.generate_kwargs ) else: predictions = self.model.predict_on_batch(batch) if isinstance(predictions, dict): # This converts any dict-subclass to a regular dict # Keras REALLY doesn't like it when we pass around a BatchEncoding or other derived class predictions = dict(predictions) if self.output_cols is not None: predictions = {key: predictions[key] for key in self.output_cols} else: predictions = { key: val for key, val in predictions.items() if key not in ignore_keys + ["loss"] } prediction_list.append(predictions) if not self.use_keras_label: labels = {key: batch[key].numpy() for key in self.label_cols} elif isinstance(labels, dict): labels = {key: array.numpy() for key, array in labels.items()} elif isinstance(labels, list) or isinstance(labels, tuple): labels = [array.numpy() for array in labels] elif isinstance(labels, tf.Tensor): labels = labels.numpy() else: raise TypeError(f"Confused by labels of type {type(labels)}") label_list.append(labels) all_preds = self._postprocess_predictions_or_labels(prediction_list) all_labels = self._postprocess_predictions_or_labels(label_list) metric_output = self.metric_fn((all_preds, all_labels)) if not isinstance(metric_output, dict): raise TypeError( f"metric_fn should return a dict mapping metric names to values but instead returned {metric_output}" ) # This is the critical bit - Keras passes a dict containing the loss and standard metric values for this epoch # in the logs argument. Ordinarily, this is so the callback can read them, but in this case we write a bunch of # new keys in there, which will then get read by the History callback and treated like any other metric value. # I promise that I have it in writing from Chollet that this is okay. logs.update(metric_output)
class_definition
430
13,457
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/keras_callbacks.py
null
250
class PushToHubCallback(keras.callbacks.Callback): """ Callback that will save and push the model to the Hub regularly. By default, it pushes once per epoch, but this can be changed with the `save_strategy` argument. Pushed models can be accessed like any other model on the hub, such as with the `from_pretrained` method. ```py from transformers.keras_callbacks import PushToHubCallback push_to_hub_callback = PushToHubCallback( output_dir="./model_save", tokenizer=tokenizer, hub_model_id="gpt5-7xlarge", ) model.fit(train_dataset, callbacks=[push_to_hub_callback]) ``` Args: output_dir (`str`): The output directory where the model predictions and checkpoints will be written and synced with the repository on the Hub. save_strategy (`str` or [`~trainer_utils.IntervalStrategy`], *optional*, defaults to `"epoch"`): The checkpoint save strategy to adopt during training. Possible values are: - `"no"`: Save is done at the end of training. - `"epoch"`: Save is done at the end of each epoch. - `"steps"`: Save is done every `save_steps` save_steps (`int`, *optional*): The number of steps between saves when using the "steps" `save_strategy`. tokenizer (`PreTrainedTokenizerBase`, *optional*): The tokenizer used by the model. If supplied, will be uploaded to the repo alongside the weights. hub_model_id (`str`, *optional*): The name of the repository to keep in sync with the local `output_dir`. It can be a simple model ID in which case the model will be pushed in your namespace. Otherwise it should be the whole repository name, for instance `"user_name/model"`, which allows you to push to an organization you are a member of with `"organization_name/model"`. Will default to the name of `output_dir`. hub_token (`str`, *optional*): The token to use to push the model to the Hub. Will default to the token in the cache folder obtained with `huggingface-cli login`. checkpoint (`bool`, *optional*, defaults to `False`): Whether to save full training checkpoints (including epoch and optimizer state) to allow training to be resumed. Only usable when `save_strategy` is `"epoch"`. """ def __init__( self, output_dir: Union[str, Path], save_strategy: Union[str, IntervalStrategy] = "epoch", save_steps: Optional[int] = None, tokenizer: Optional[PreTrainedTokenizerBase] = None, hub_model_id: Optional[str] = None, hub_token: Optional[str] = None, checkpoint: bool = False, **model_card_args, ): super().__init__() if checkpoint and save_strategy != "epoch": raise ValueError("Cannot save checkpoints when save_strategy is not 'epoch'!") if isinstance(save_strategy, str): save_strategy = IntervalStrategy(save_strategy.lower()) self.save_strategy = save_strategy if self.save_strategy == IntervalStrategy.STEPS and (not isinstance(save_steps, int) or save_steps <= 0): raise ValueError("Please supply a positive integer argument for save_steps when save_strategy == 'steps'!") self.save_steps = save_steps output_dir = Path(output_dir) # Create repo and retrieve repo_id if hub_model_id is None: hub_model_id = output_dir.absolute().name self.hub_model_id = create_repo(repo_id=hub_model_id, exist_ok=True, token=hub_token).repo_id self.output_dir = output_dir self.repo = Repository(str(self.output_dir), clone_from=self.hub_model_id, token=hub_token) self.tokenizer = tokenizer self.last_job = None self.checkpoint = checkpoint self.training_history = None self.model_card_args = model_card_args def on_train_begin(self, logs=None): # Although we can access model.history, we have no guarantees that the History callback will fire before this # one, so we keep track of it here too self.training_history = [] def on_train_batch_end(self, batch, logs=None): if self.save_strategy == IntervalStrategy.STEPS and (batch + 1) % self.save_steps == 0: if self.last_job is not None and not self.last_job.is_done: return # The last upload is still running, don't start another self.model.save_pretrained(self.output_dir) if self.tokenizer is not None: self.tokenizer.save_pretrained(self.output_dir) _, self.last_job = self.repo.push_to_hub( commit_message=f"Training in progress steps {batch}", blocking=False ) def on_epoch_end(self, epoch, logs=None): logs = logs.copy() # Don't accidentally write things that Keras will read later if "epoch" not in logs: logs["epoch"] = epoch self.training_history.append(logs) if self.save_strategy == IntervalStrategy.EPOCH: if self.last_job is not None and not self.last_job.is_done: return # The last upload is still running, don't start another self.model.save_pretrained(self.output_dir) if self.tokenizer is not None: self.tokenizer.save_pretrained(self.output_dir) if self.checkpoint: checkpoint_dir = os.path.join(self.output_dir, "checkpoint") self.model._save_checkpoint(checkpoint_dir, epoch) train_summary = TrainingSummary.from_keras( model=self.model, model_name=self.hub_model_id, keras_history=self.training_history, **self.model_card_args, ) model_card = train_summary.to_model_card() with (self.output_dir / "README.md").open("w") as f: f.write(model_card) _, self.last_job = self.repo.push_to_hub( commit_message=f"Training in progress epoch {epoch}", blocking=False ) def on_train_end(self, logs=None): # Makes sure the latest version of the model is uploaded if self.last_job is not None and not self.last_job.is_done: logging.info("Pushing the last epoch to the Hub, this may take a while...") while not self.last_job.is_done: sleep(1) else: self.model.save_pretrained(self.output_dir) if self.tokenizer is not None: self.tokenizer.save_pretrained(self.output_dir) train_summary = TrainingSummary.from_keras( model=self.model, model_name=self.hub_model_id, keras_history=self.training_history, **self.model_card_args, ) model_card = train_summary.to_model_card() with (self.output_dir / "README.md").open("w") as f: f.write(model_card) self.repo.push_to_hub(commit_message="End of training", blocking=True)
class_definition
13,460
20,674
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/keras_callbacks.py
null
251
class TrainerState: """ A class containing the [`Trainer`] inner state that will be saved along the model and optimizer when checkpointing and passed to the [`TrainerCallback`]. <Tip> In all this class, one step is to be understood as one update step. When using gradient accumulation, one update step may require several forward and backward passes: if you use `gradient_accumulation_steps=n`, then one update step requires going through *n* batches. </Tip> Args: epoch (`float`, *optional*): Only set during training, will represent the epoch the training is at (the decimal part being the percentage of the current epoch completed). global_step (`int`, *optional*, defaults to 0): During training, represents the number of update steps completed. max_steps (`int`, *optional*, defaults to 0): The number of update steps to do during the current training. logging_steps (`int`, *optional*, defaults to 500): Log every X updates steps eval_steps (`int`, *optional*): Run an evaluation every X steps. save_steps (`int`, *optional*, defaults to 500): Save checkpoint every X updates steps. train_batch_size (`int`, *optional*): The batch size for the training dataloader. Only needed when `auto_find_batch_size` has been used. num_input_tokens_seen (`int`, *optional*, defaults to 0): When tracking the inputs tokens, the number of tokens seen during training (number of input tokens, not the number of prediction tokens). total_flos (`float`, *optional*, defaults to 0): The total number of floating operations done by the model since the beginning of training (stored as floats to avoid overflow). log_history (`List[Dict[str, float]]`, *optional*): The list of logs done since the beginning of training. best_metric (`float`, *optional*): When tracking the best model, the value of the best metric encountered so far. best_model_checkpoint (`str`, *optional*): When tracking the best model, the value of the name of the checkpoint for the best model encountered so far. is_local_process_zero (`bool`, *optional*, defaults to `True`): Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several machines) main process. is_world_process_zero (`bool`, *optional*, defaults to `True`): Whether or not this process is the global main process (when training in a distributed fashion on several machines, this is only going to be `True` for one process). is_hyper_param_search (`bool`, *optional*, defaults to `False`): Whether we are in the process of a hyper parameter search using Trainer.hyperparameter_search. This will impact the way data will be logged in TensorBoard. stateful_callbacks (`List[StatefulTrainerCallback]`, *optional*): Callbacks attached to the `Trainer` that should have their states be saved or restored. Relevent callbacks should implement a `state` and `from_state` function. """ epoch: Optional[float] = None global_step: int = 0 max_steps: int = 0 logging_steps: int = 500 eval_steps: int = 500 save_steps: int = 500 train_batch_size: int = None num_train_epochs: int = 0 num_input_tokens_seen: int = 0 total_flos: float = 0 log_history: List[Dict[str, float]] = None best_metric: Optional[float] = None best_model_checkpoint: Optional[str] = None is_local_process_zero: bool = True is_world_process_zero: bool = True is_hyper_param_search: bool = False trial_name: str = None trial_params: Dict[str, Union[str, float, int, bool]] = None stateful_callbacks: List["TrainerCallback"] = None def __post_init__(self): if self.log_history is None: self.log_history = [] if self.stateful_callbacks is None: self.stateful_callbacks = {} elif isinstance(self.stateful_callbacks, dict): # We are loading the callbacks in from the state file, no need to process them pass else: # Saveable callbacks get stored as dict of kwargs stateful_callbacks = {} for callback in self.stateful_callbacks: if not isinstance(callback, (ExportableState)): raise TypeError( f"All callbacks passed to be saved must inherit `ExportableState`, but received {type(callback)}" ) name = callback.__class__.__name__ if name in stateful_callbacks: # We can have multiple versions of the same callback # if so, we store them as a list of states to restore if not isinstance(stateful_callbacks[name], list): stateful_callbacks[name] = [stateful_callbacks[name]] stateful_callbacks[name].append(callback.state()) else: stateful_callbacks[name] = callback.state() self.stateful_callbacks = stateful_callbacks def save_to_json(self, json_path: str): """Save the content of this instance in JSON format inside `json_path`.""" json_string = json.dumps(dataclasses.asdict(self), indent=2, sort_keys=True) + "\n" with open(json_path, "w", encoding="utf-8") as f: f.write(json_string) @classmethod def load_from_json(cls, json_path: str): """Create an instance from the content of `json_path`.""" with open(json_path, "r", encoding="utf-8") as f: text = f.read() return cls(**json.loads(text))
class_definition
1,050
6,994
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_callback.py
null
252
class ExportableState: """ A class for objects that include the ability to have its state be saved during `Trainer._save_checkpoint` and loaded back in during `Trainer._load_from_checkpoint`. These must implement a `state` function that gets called during the respective Trainer function call. It should only include parameters and attributes needed to recreate the state at a particular time, to avoid utilizing pickle/maintain standard file IO writing. Example: ```python class EarlyStoppingCallback(TrainerCallback, ExportableState): def __init__(self, early_stopping_patience: int = 1, early_stopping_threshold: Optional[float] = 0.0): self.early_stopping_patience = early_stopping_patience self.early_stopping_threshold = early_stopping_threshold # early_stopping_patience_counter denotes the number of times validation metrics failed to improve. self.early_stopping_patience_counter = 0 def state(self) -> dict: return { "args": { "early_stopping_patience": self.early_stopping_patience, "early_stopping_threshold": self.early_stopping_threshold, }, "attributes": { "early_stopping_patience_counter": self.early_stopping_patience_counter, } } ```""" def state(self) -> dict: raise NotImplementedError("You must implement a `state` function to utilize this class.") @classmethod def from_state(cls, state): instance = cls(**state["args"]) for k, v in state["attributes"].items(): setattr(instance, k, v) return instance
class_definition
6,997
8,743
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_callback.py
null
253
class TrainerControl(ExportableState): """ A class that handles the [`Trainer`] control flow. This class is used by the [`TrainerCallback`] to activate some switches in the training loop. Args: should_training_stop (`bool`, *optional*, defaults to `False`): Whether or not the training should be interrupted. If `True`, this variable will not be set back to `False`. The training will just stop. should_epoch_stop (`bool`, *optional*, defaults to `False`): Whether or not the current epoch should be interrupted. If `True`, this variable will be set back to `False` at the beginning of the next epoch. should_save (`bool`, *optional*, defaults to `False`): Whether or not the model should be saved at this step. If `True`, this variable will be set back to `False` at the beginning of the next step. should_evaluate (`bool`, *optional*, defaults to `False`): Whether or not the model should be evaluated at this step. If `True`, this variable will be set back to `False` at the beginning of the next step. should_log (`bool`, *optional*, defaults to `False`): Whether or not the logs should be reported at this step. If `True`, this variable will be set back to `False` at the beginning of the next step. """ should_training_stop: bool = False should_epoch_stop: bool = False should_save: bool = False should_evaluate: bool = False should_log: bool = False def _new_training(self): """Internal method that resets the variable for a new training.""" self.should_training_stop = False def _new_epoch(self): """Internal method that resets the variable for a new epoch.""" self.should_epoch_stop = False def _new_step(self): """Internal method that resets the variable for a new step.""" self.should_save = False self.should_evaluate = False self.should_log = False def state(self) -> dict: return { "args": { "should_training_stop": self.should_training_stop, "should_epoch_stop": self.should_epoch_stop, "should_save": self.should_save, "should_evaluate": self.should_evaluate, "should_log": self.should_log, }, "attributes": {}, }
class_definition
8,757
11,209
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_callback.py
null
254
class TrainerCallback: # no-format """ A class for objects that will inspect the state of the training loop at some events and take some decisions. At each of those events the following arguments are available: Args: args ([`TrainingArguments`]): The training arguments used to instantiate the [`Trainer`]. state ([`TrainerState`]): The current state of the [`Trainer`]. control ([`TrainerControl`]): The object that is returned to the [`Trainer`] and can be used to make some decisions. model ([`PreTrainedModel`] or `torch.nn.Module`): The model being trained. tokenizer ([`PreTrainedTokenizer`]): The tokenizer used for encoding the data. This is deprecated in favour of `processing_class`. processing_class ([`PreTrainedTokenizer` or `BaseImageProcessor` or `ProcessorMixin` or `FeatureExtractionMixin`]): The processing class used for encoding the data. Can be a tokenizer, a processor, an image processor or a feature extractor. optimizer (`torch.optim.Optimizer`): The optimizer used for the training steps. lr_scheduler (`torch.optim.lr_scheduler.LambdaLR`): The scheduler used for setting the learning rate. train_dataloader (`torch.utils.data.DataLoader`, *optional*): The current dataloader used for training. eval_dataloader (`torch.utils.data.DataLoader`, *optional*): The current dataloader used for evaluation. metrics (`Dict[str, float]`): The metrics computed by the last evaluation phase. Those are only accessible in the event `on_evaluate`. logs (`Dict[str, float]`): The values to log. Those are only accessible in the event `on_log`. The `control` object is the only one that can be changed by the callback, in which case the event that changes it should return the modified version. The argument `args`, `state` and `control` are positionals for all events, all the others are grouped in `kwargs`. You can unpack the ones you need in the signature of the event using them. As an example, see the code of the simple [`~transformers.PrinterCallback`]. Example: ```python class PrinterCallback(TrainerCallback): def on_log(self, args, state, control, logs=None, **kwargs): _ = logs.pop("total_flos", None) if state.is_local_process_zero: print(logs) ```""" def on_init_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called at the end of the initialization of the [`Trainer`]. """ pass def on_train_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called at the beginning of training. """ pass def on_train_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called at the end of training. """ pass def on_epoch_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called at the beginning of an epoch. """ pass def on_epoch_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called at the end of an epoch. """ pass def on_step_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called at the beginning of a training step. If using gradient accumulation, one training step might take several inputs. """ pass def on_pre_optimizer_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called before the optimizer step but after gradient clipping. Useful for monitoring gradients. """ pass def on_optimizer_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called after the optimizer step but before gradients are zeroed out. Useful for monitoring gradients. """ pass def on_substep_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called at the end of an substep during gradient accumulation. """ pass def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called at the end of a training step. If using gradient accumulation, one training step might take several inputs. """ pass def on_evaluate(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called after an evaluation phase. """ pass def on_predict(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, metrics, **kwargs): """ Event called after a successful prediction. """ pass def on_save(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called after a checkpoint save. """ pass def on_log(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called after logging the last logs. """ pass def on_prediction_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called after a prediction step. """ pass
class_definition
11,212
17,078
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_callback.py
null
255
class CallbackHandler(TrainerCallback): """Internal class that just calls the list of callbacks in order.""" def __init__(self, callbacks, model, processing_class, optimizer, lr_scheduler): self.callbacks = [] for cb in callbacks: self.add_callback(cb) self.model = model self.processing_class = processing_class self.optimizer = optimizer self.lr_scheduler = lr_scheduler self.train_dataloader = None self.eval_dataloader = None if not any(isinstance(cb, DefaultFlowCallback) for cb in self.callbacks): logger.warning( "The Trainer will not work properly if you don't have a `DefaultFlowCallback` in its callbacks. You\n" + "should add one before training with `trainer.add_callback(DefaultFlowCallback). The current list of" + "callbacks is\n:" + self.callback_list ) def add_callback(self, callback): cb = callback() if isinstance(callback, type) else callback cb_class = callback if isinstance(callback, type) else callback.__class__ if cb_class in [c.__class__ for c in self.callbacks]: logger.warning( f"You are adding a {cb_class} to the callbacks of this Trainer, but there is already one. The current" + "list of callbacks is\n:" + self.callback_list ) self.callbacks.append(cb) def pop_callback(self, callback): if isinstance(callback, type): for cb in self.callbacks: if isinstance(cb, callback): self.callbacks.remove(cb) return cb else: for cb in self.callbacks: if cb == callback: self.callbacks.remove(cb) return cb def remove_callback(self, callback): if isinstance(callback, type): for cb in self.callbacks: if isinstance(cb, callback): self.callbacks.remove(cb) return else: self.callbacks.remove(callback) @property def callback_list(self): return "\n".join(cb.__class__.__name__ for cb in self.callbacks) def on_init_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): return self.call_event("on_init_end", args, state, control) def on_train_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): control.should_training_stop = False return self.call_event("on_train_begin", args, state, control) def on_train_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): return self.call_event("on_train_end", args, state, control) def on_epoch_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): control.should_epoch_stop = False return self.call_event("on_epoch_begin", args, state, control) def on_epoch_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): return self.call_event("on_epoch_end", args, state, control) def on_step_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): control.should_log = False control.should_evaluate = False control.should_save = False return self.call_event("on_step_begin", args, state, control) def on_pre_optimizer_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): return self.call_event("on_pre_optimizer_step", args, state, control) def on_optimizer_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): return self.call_event("on_optimizer_step", args, state, control) def on_substep_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): return self.call_event("on_substep_end", args, state, control) def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): return self.call_event("on_step_end", args, state, control) def on_evaluate(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, metrics): control.should_evaluate = False return self.call_event("on_evaluate", args, state, control, metrics=metrics) def on_predict(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, metrics): return self.call_event("on_predict", args, state, control, metrics=metrics) def on_save(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): control.should_save = False return self.call_event("on_save", args, state, control) def on_log(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, logs): control.should_log = False return self.call_event("on_log", args, state, control, logs=logs) def on_prediction_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): return self.call_event("on_prediction_step", args, state, control) def call_event(self, event, args, state, control, **kwargs): for callback in self.callbacks: result = getattr(callback, event)( args, state, control, model=self.model, processing_class=self.processing_class, optimizer=self.optimizer, lr_scheduler=self.lr_scheduler, train_dataloader=self.train_dataloader, eval_dataloader=self.eval_dataloader, **kwargs, ) # A Callback can skip the return of `control` if it doesn't change it. if result is not None: control = result return control
class_definition
17,081
23,033
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_callback.py
null
256
class DefaultFlowCallback(TrainerCallback): """ A [`TrainerCallback`] that handles the default flow of the training loop for logs, evaluation and checkpoints. """ def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): # Log if state.global_step == 1 and args.logging_first_step: control.should_log = True if args.logging_strategy == IntervalStrategy.STEPS and state.global_step % state.logging_steps == 0: control.should_log = True # Evaluate if ( args.eval_strategy == IntervalStrategy.STEPS and state.global_step % state.eval_steps == 0 and args.eval_delay <= state.global_step ): control.should_evaluate = True # Save if ( args.save_strategy == SaveStrategy.STEPS and state.save_steps > 0 and state.global_step % state.save_steps == 0 ): control.should_save = True # End training if state.global_step >= state.max_steps: control.should_training_stop = True # Save the model at the end if we have a save strategy if args.save_strategy not in [SaveStrategy.NO, SaveStrategy.BEST]: control.should_save = True return control def on_epoch_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): # Log if args.logging_strategy == IntervalStrategy.EPOCH: control.should_log = True # Evaluate if args.eval_strategy == IntervalStrategy.EPOCH and args.eval_delay <= state.epoch: control.should_evaluate = True # Save if args.save_strategy == SaveStrategy.EPOCH: control.should_save = True return control
class_definition
23,036
24,906
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_callback.py
null
257
class ProgressCallback(TrainerCallback): """ A [`TrainerCallback`] that displays the progress of training or evaluation. You can modify `max_str_len` to control how long strings are truncated when logging. """ def __init__(self, max_str_len: int = 100): """ Initialize the callback with optional max_str_len parameter to control string truncation length. Args: max_str_len (`int`): Maximum length of strings to display in logs. Longer strings will be truncated with a message. """ self.training_bar = None self.prediction_bar = None self.max_str_len = max_str_len def on_train_begin(self, args, state, control, **kwargs): if state.is_world_process_zero: self.training_bar = tqdm(total=state.max_steps, dynamic_ncols=True) self.current_step = 0 def on_step_end(self, args, state, control, **kwargs): if state.is_world_process_zero: self.training_bar.update(state.global_step - self.current_step) self.current_step = state.global_step def on_prediction_step(self, args, state, control, eval_dataloader=None, **kwargs): if state.is_world_process_zero and has_length(eval_dataloader): if self.prediction_bar is None: self.prediction_bar = tqdm( total=len(eval_dataloader), leave=self.training_bar is None, dynamic_ncols=True ) self.prediction_bar.update(1) def on_evaluate(self, args, state, control, **kwargs): if state.is_world_process_zero: if self.prediction_bar is not None: self.prediction_bar.close() self.prediction_bar = None def on_predict(self, args, state, control, **kwargs): if state.is_world_process_zero: if self.prediction_bar is not None: self.prediction_bar.close() self.prediction_bar = None def on_log(self, args, state, control, logs=None, **kwargs): if state.is_world_process_zero and self.training_bar is not None: # make a shallow copy of logs so we can mutate the fields copied # but avoid doing any value pickling. shallow_logs = {} for k, v in logs.items(): if isinstance(v, str) and len(v) > self.max_str_len: shallow_logs[k] = ( f"[String too long to display, length: {len(v)} > {self.max_str_len}. " "Consider increasing `max_str_len` if needed.]" ) else: shallow_logs[k] = v _ = shallow_logs.pop("total_flos", None) # round numbers so that it looks better in console if "epoch" in shallow_logs: shallow_logs["epoch"] = round(shallow_logs["epoch"], 2) self.training_bar.write(str(shallow_logs)) def on_train_end(self, args, state, control, **kwargs): if state.is_world_process_zero: self.training_bar.close() self.training_bar = None
class_definition
24,909
28,058
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_callback.py
null
258
class PrinterCallback(TrainerCallback): """ A bare [`TrainerCallback`] that just prints the logs. """ def on_log(self, args, state, control, logs=None, **kwargs): _ = logs.pop("total_flos", None) if state.is_local_process_zero: print(logs)
class_definition
28,061
28,345
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_callback.py
null
259
class EarlyStoppingCallback(TrainerCallback, ExportableState): """ A [`TrainerCallback`] that handles early stopping. Args: early_stopping_patience (`int`): Use with `metric_for_best_model` to stop training when the specified metric worsens for `early_stopping_patience` evaluation calls. early_stopping_threshold(`float`, *optional*): Use with TrainingArguments `metric_for_best_model` and `early_stopping_patience` to denote how much the specified metric must improve to satisfy early stopping conditions. ` This callback depends on [`TrainingArguments`] argument *load_best_model_at_end* functionality to set best_metric in [`TrainerState`]. Note that if the [`TrainingArguments`] argument *save_steps* differs from *eval_steps*, the early stopping will not occur until the next save step. """ def __init__(self, early_stopping_patience: int = 1, early_stopping_threshold: Optional[float] = 0.0): self.early_stopping_patience = early_stopping_patience self.early_stopping_threshold = early_stopping_threshold # early_stopping_patience_counter denotes the number of times validation metrics failed to improve. self.early_stopping_patience_counter = 0 def check_metric_value(self, args, state, control, metric_value): # best_metric is set by code for load_best_model operator = np.greater if args.greater_is_better else np.less if state.best_metric is None or ( operator(metric_value, state.best_metric) and abs(metric_value - state.best_metric) > self.early_stopping_threshold ): self.early_stopping_patience_counter = 0 else: self.early_stopping_patience_counter += 1 def on_train_begin(self, args, state, control, **kwargs): if not args.load_best_model_at_end: logger.warning( "Using EarlyStoppingCallback without load_best_model_at_end=True. " "Once training is finished, the best model will not be loaded automatically." ) assert ( args.metric_for_best_model is not None ), "EarlyStoppingCallback requires metric_for_best_model to be defined" assert ( args.eval_strategy != IntervalStrategy.NO ), "EarlyStoppingCallback requires IntervalStrategy of steps or epoch" def on_evaluate(self, args, state, control, metrics, **kwargs): metric_to_check = args.metric_for_best_model if not metric_to_check.startswith("eval_"): metric_to_check = f"eval_{metric_to_check}" metric_value = metrics.get(metric_to_check) if metric_value is None: logger.warning( f"early stopping required metric_for_best_model, but did not find {metric_to_check} so early stopping" " is disabled" ) return self.check_metric_value(args, state, control, metric_value) if self.early_stopping_patience_counter >= self.early_stopping_patience: control.should_training_stop = True def state(self) -> dict: return { "args": { "early_stopping_patience": self.early_stopping_patience, "early_stopping_threshold": self.early_stopping_threshold, }, "attributes": { "early_stopping_patience_counter": self.early_stopping_patience_counter, }, }
class_definition
28,348
31,866
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_callback.py
null
260
class DistributedSamplerWithLoop(DistributedSampler): """ Like a torch.utils.data.distributed.DistributedSampler` but loops at the end back to the beginning of the shuffled samples to make each process have a round multiple of batch_size samples. Args: dataset (`torch.utils.data.Dataset`): Dataset used for sampling. batch_size (`int`): The batch size used with this sampler kwargs (`Dict[str, Any]`, *optional*): All other keyword arguments passed to `DistributedSampler`. """ def __init__(self, dataset, batch_size, **kwargs): super().__init__(dataset, **kwargs) self.batch_size = batch_size def __iter__(self): indices = list(super().__iter__()) remainder = 0 if len(indices) % self.batch_size == 0 else self.batch_size - len(indices) % self.batch_size # DistributedSampler already added samples from the beginning to make the number of samples a round multiple # of the world size, so we skip those. start_remainder = 1 if self.rank < len(self.dataset) % self.num_replicas else 0 indices += indices[start_remainder : start_remainder + remainder] return iter(indices)
class_definition
9,937
11,171
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_pt_utils.py
null
261
class EvalLoopContainer: """ Container to store intermediate results of evaluation loop Args: do_nested_concat (`bool`, *optional*, defaults to `True`): If set to `True`, each iteration will recursively concatenate a new object containing tensors to the existing stored tensors, provided that the structure of the existing object and the new one are identical. If set to `False`, all newly added tensors will be stored in a list. padding_index (`int`, *optional*, defaults to -100): Value used to pad tensors of different shapes when `do_nested_concat=True`. """ def __init__(self, do_nested_concat: bool = True, padding_index: int = -100): self.do_nested_concat = do_nested_concat self.padding_index = padding_index self.tensors = None self.arrays = None def add(self, tensors) -> None: """Add tensors to the stored objects. If `do_nested_concat=True`, the tensors will be concatenated recursively.""" if self.tensors is None: self.tensors = tensors if self.do_nested_concat else [tensors] elif self.do_nested_concat: self.tensors = nested_concat(self.tensors, tensors, padding_index=self.padding_index) else: self.tensors.append(tensors) def to_cpu_and_numpy(self) -> None: """Move tensors in stored objects to CPU and convert them to numpy arrays.""" # Check if we have something to add, if not just return if self.tensors is None: return new_arrays = nested_numpify(self.tensors) if self.arrays is None: self.arrays = new_arrays elif self.do_nested_concat: self.arrays = nested_concat(self.arrays, new_arrays, padding_index=self.padding_index) else: self.arrays.extend(new_arrays) # reset device tensors after adding to cpu self.tensors = None def get_arrays(self): """Returns the numpified and moved to CPU stored objects.""" self.to_cpu_and_numpy() return self.arrays
class_definition
11,174
13,295
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_pt_utils.py
null
262
class SequentialDistributedSampler(Sampler): """ Distributed Sampler that subsamples indices sequentially, making it easier to collate all results at the end. Even though we only use this sampler for eval and predict (no training), which means that the model params won't have to be synced (i.e. will not hang for synchronization even if varied number of forward passes), we still add extra samples to the sampler to make it evenly divisible (like in `DistributedSampler`) to make it easy to `gather` or `reduce` resulting tensors at the end of the loop. """ def __init__(self, dataset, num_replicas=None, rank=None, batch_size=None): warnings.warn( "SequentialDistributedSampler is deprecated and will be removed in v5 of Transformers.", FutureWarning, ) if num_replicas is None: if not dist.is_available(): raise RuntimeError("Requires distributed package to be available") num_replicas = dist.get_world_size() if rank is None: if not dist.is_available(): raise RuntimeError("Requires distributed package to be available") rank = dist.get_rank() self.dataset = dataset self.num_replicas = num_replicas self.rank = rank num_samples = len(self.dataset) # Add extra samples to make num_samples a multiple of batch_size if passed if batch_size is not None: self.num_samples = int(math.ceil(num_samples / (batch_size * num_replicas))) * batch_size else: self.num_samples = int(math.ceil(num_samples / num_replicas)) self.total_size = self.num_samples * self.num_replicas self.batch_size = batch_size def __iter__(self): indices = list(range(len(self.dataset))) # add extra samples to make it evenly divisible indices += indices[: (self.total_size - len(indices))] assert ( len(indices) == self.total_size ), f"Indices length {len(indices)} and total size {self.total_size} mismatched" # subsample indices = indices[self.rank * self.num_samples : (self.rank + 1) * self.num_samples] assert ( len(indices) == self.num_samples ), f"Indices length {len(indices)} and sample number {self.num_samples} mismatched" return iter(indices) def __len__(self): return self.num_samples
class_definition
13,298
15,757
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_pt_utils.py
null
263
class DistributedTensorGatherer: """ A class responsible for properly gathering tensors (or nested list/tuple of tensors) on the CPU by chunks. If our dataset has 16 samples with a batch size of 2 on 3 processes and we gather then transfer on CPU at every step, our sampler will generate the following indices: `[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1]` to get something of size a multiple of 3 (so that each process gets the same dataset length). Then process 0, 1 and 2 will be responsible of making predictions for the following samples: - P0: `[0, 1, 2, 3, 4, 5]` - P1: `[6, 7, 8, 9, 10, 11]` - P2: `[12, 13, 14, 15, 0, 1]` The first batch treated on each process will be - P0: `[0, 1]` - P1: `[6, 7]` - P2: `[12, 13]` So if we gather at the end of the first batch, we will get a tensor (nested list/tuple of tensor) corresponding to the following indices: `[0, 1, 6, 7, 12, 13]` If we directly concatenate our results without taking any precautions, the user will then get the predictions for the indices in this order at the end of the prediction loop: `[0, 1, 6, 7, 12, 13, 2, 3, 8, 9, 14, 15, 4, 5, 10, 11, 0, 1]` For some reason, that's not going to roll their boat. This class is there to solve that problem. Args: world_size (`int`): The number of processes used in the distributed training. num_samples (`int`): The number of samples in our dataset. make_multiple_of (`int`, *optional*): If passed, the class assumes the datasets passed to each process are made to be a multiple of this argument (by adding samples). padding_index (`int`, *optional*, defaults to -100): The padding index to use if the arrays don't all have the same sequence length. """ def __init__(self, world_size, num_samples, make_multiple_of=None, padding_index=-100): warnings.warn( "DistributedTensorGatherer is deprecated and will be removed in v5 of Transformers.", FutureWarning, ) self.world_size = world_size self.num_samples = num_samples total_size = world_size if make_multiple_of is None else world_size * make_multiple_of self.total_samples = int(np.ceil(num_samples / total_size)) * total_size self.process_length = self.total_samples // world_size self._storage = None self._offsets = None self.padding_index = padding_index def add_arrays(self, arrays): """ Add `arrays` to the internal storage, Will initialize the storage to the full size at the first arrays passed so that if we're bound to get an OOM, it happens at the beginning. """ if arrays is None: return if self._storage is None: self._storage = nested_new_like(arrays, self.total_samples, padding_index=self.padding_index) self._offsets = list(range(0, self.total_samples, self.process_length)) slice_len, self._storage = self._nested_set_tensors(self._storage, arrays) for i in range(self.world_size): self._offsets[i] += slice_len def _nested_set_tensors(self, storage, arrays): if isinstance(arrays, (list, tuple)): result = [self._nested_set_tensors(x, y) for x, y in zip(storage, arrays)] return result[0][0], type(arrays)(r[1] for r in result) assert ( arrays.shape[0] % self.world_size == 0 ), f"Arrays passed should all have a first dimension multiple of {self.world_size}, found {arrays.shape[0]}." slice_len = arrays.shape[0] // self.world_size for i in range(self.world_size): if len(arrays.shape) == 1: storage[self._offsets[i] : self._offsets[i] + slice_len] = arrays[i * slice_len : (i + 1) * slice_len] else: # Expand the array on the fly if needed. if len(storage.shape) > 1 and storage.shape[1] < arrays.shape[1]: storage = expand_like(storage, arrays.shape[1], padding_index=self.padding_index) storage[self._offsets[i] : self._offsets[i] + slice_len, : arrays.shape[1]] = arrays[ i * slice_len : (i + 1) * slice_len ] return slice_len, storage def finalize(self): """ Return the properly gathered arrays and truncate to the number of samples (since the sampler added some extras to get each process a dataset of the same length). """ if self._storage is None: return if self._offsets[0] != self.process_length: logger.warning("Not all data has been set. Are you sure you passed all values?") return nested_truncate(self._storage, self.num_samples)
class_definition
17,124
22,050
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_pt_utils.py
null
264
class LabelSmoother: """ Adds label-smoothing on a pre-computed output from a Transformers model. Args: epsilon (`float`, *optional*, defaults to 0.1): The label smoothing factor. ignore_index (`int`, *optional*, defaults to -100): The index in the labels to ignore when computing the loss. """ epsilon: float = 0.1 ignore_index: int = -100 def __call__(self, model_output, labels, shift_labels=False): logits = model_output["logits"] if isinstance(model_output, dict) else model_output[0] if shift_labels: logits = logits[..., :-1, :].contiguous() labels = labels[..., 1:].contiguous() log_probs = -nn.functional.log_softmax(logits, dim=-1) if labels.dim() == log_probs.dim() - 1: labels = labels.unsqueeze(-1) padding_mask = labels.eq(self.ignore_index) # In case the ignore_index is -100, the gather will fail, so we replace labels by 0. The padding_mask # will ignore them in any case. labels = torch.clamp(labels, min=0) nll_loss = log_probs.gather(dim=-1, index=labels) # works for fp16 input tensor too, by internally upcasting it to fp32 smoothed_loss = log_probs.sum(dim=-1, keepdim=True, dtype=torch.float32) nll_loss.masked_fill_(padding_mask, 0.0) smoothed_loss.masked_fill_(padding_mask, 0.0) # Take the mean over the label dimensions, then divide by the number of active elements (i.e. not-padded): num_active_elements = padding_mask.numel() - padding_mask.long().sum() nll_loss = nll_loss.sum() / num_active_elements smoothed_loss = smoothed_loss.sum() / (num_active_elements * log_probs.shape[-1]) return (1 - self.epsilon) * nll_loss + self.epsilon * smoothed_loss
class_definition
22,064
23,900
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_pt_utils.py
null
265
class LengthGroupedSampler(Sampler): r""" Sampler that samples indices in a way that groups together features of the dataset of roughly the same length while keeping a bit of randomness. """ def __init__( self, batch_size: int, dataset: Optional[Dataset] = None, lengths: Optional[List[int]] = None, model_input_name: Optional[str] = None, generator=None, ): if dataset is None and lengths is None: raise ValueError("One of dataset and lengths must be provided.") self.batch_size = batch_size if lengths is None: model_input_name = model_input_name if model_input_name is not None else "input_ids" if ( not (isinstance(dataset[0], dict) or isinstance(dataset[0], BatchEncoding)) or model_input_name not in dataset[0] ): raise ValueError( "Can only automatically infer lengths for datasets whose items are dictionaries with an " f"'{model_input_name}' key." ) lengths = [len(feature[model_input_name]) for feature in dataset] elif isinstance(lengths, torch.Tensor): logger.info( "If lengths is a torch.Tensor, LengthGroupedSampler will be slow. Converting lengths to List[int]..." ) lengths = lengths.tolist() self.lengths = lengths self.generator = generator def __len__(self): return len(self.lengths) def __iter__(self): indices = get_length_grouped_indices(self.lengths, self.batch_size, generator=self.generator) return iter(indices)
class_definition
25,750
27,460
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_pt_utils.py
null
266
class DistributedLengthGroupedSampler(DistributedSampler): r""" Distributed Sampler that samples indices in a way that groups together features of the dataset of roughly the same length while keeping a bit of randomness. """ # Copied and adapted from PyTorch DistributedSampler. def __init__( self, batch_size: int, dataset: Optional[Dataset] = None, num_replicas: Optional[int] = None, rank: Optional[int] = None, seed: int = 0, drop_last: bool = False, lengths: Optional[List[int]] = None, model_input_name: Optional[str] = None, ): if dataset is None and lengths is None: raise ValueError("One of dataset and lengths must be provided.") if num_replicas is None: if not dist.is_available(): raise RuntimeError("Requires distributed package to be available") num_replicas = dist.get_world_size() if rank is None: if not dist.is_available(): raise RuntimeError("Requires distributed package to be available") rank = dist.get_rank() self.batch_size = batch_size self.num_replicas = num_replicas self.rank = rank self.epoch = 0 self.drop_last = drop_last if lengths is None: model_input_name = model_input_name if model_input_name is not None else "input_ids" if ( not (isinstance(dataset[0], dict) or isinstance(dataset[0], BatchEncoding)) or model_input_name not in dataset[0] ): raise ValueError( "Can only automatically infer lengths for datasets whose items are dictionaries with an " f"'{model_input_name}' key." ) lengths = [len(feature[model_input_name]) for feature in dataset] elif isinstance(lengths, torch.Tensor): logger.info( "If lengths is a torch.Tensor, DistributedLengthGroupedSampler will be slow. Converting lengths to" " List[int]..." ) lengths = lengths.tolist() self.lengths = lengths # If the dataset length is evenly divisible by # of replicas, then there # is no need to drop any data, since the dataset will be split equally. if self.drop_last and len(self.lengths) % self.num_replicas != 0: # Split to nearest available length that is evenly divisible. # This is to ensure each rank receives the same amount of data when # using this Sampler. self.num_samples = math.ceil((len(self.lengths) - self.num_replicas) / self.num_replicas) else: self.num_samples = math.ceil(len(self.lengths) / self.num_replicas) self.total_size = self.num_samples * self.num_replicas self.seed = seed def __iter__(self) -> Iterator: # Deterministically shuffle based on epoch and seed g = torch.Generator() g.manual_seed(self.seed + self.epoch) indices = get_length_grouped_indices(self.lengths, self.batch_size, generator=g) if not self.drop_last: # add extra samples to make it evenly divisible indices += indices[: (self.total_size - len(indices))] else: # remove tail of data to make it evenly divisible. indices = indices[: self.total_size] assert len(indices) == self.total_size # subsample indices = indices[self.rank : self.total_size : self.num_replicas] assert len(indices) == self.num_samples return iter(indices)
class_definition
27,463
31,148
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_pt_utils.py
null
267
class ShardSampler(Sampler): """ Sampler that shards batches between several processes. Dispatches indices batch by batch: on 2 processes with batch size 4, the first two batches are `[0, 1, 2, 3, 4, 5, 6, 7]` and `[8, 9, 10, 11, 12, 13, 14, 15]`, which shard into `[0, 1, 2, 3]` and `[8, 9, 10, 11]` for GPU-0 and `[4, 5, 6, 7]` and `[12, 13, 14, 15]` for GPU-1. The sampler thus yields `[0, 1, 2, 3, 8, 9, 10, 11]` on GPU-0 and `[4, 5, 6, 7, 12, 13, 14, 15]` on GPU-1. """ def __init__( self, dataset: Dataset, batch_size: int = 1, drop_last: bool = False, num_processes: int = 1, process_index: int = 0, ): self.dataset = dataset self.batch_size = batch_size self.drop_last = drop_last self.num_processes = num_processes self.process_index = process_index self.total_batch_size = total_batch_size = batch_size * num_processes num_batches = len(dataset) // total_batch_size if drop_last else math.ceil(len(dataset) / total_batch_size) self.total_num_samples = num_batches * total_batch_size def __iter__(self): indices = list(range(len(self.dataset))) # Add extra samples to make it evenly divisible. While loop is there in the edge case we have a tiny dataset # and it needs to be done several times. while len(indices) < self.total_num_samples: indices += indices[: (self.total_num_samples - len(indices))] result = [] for batch_start in range(self.batch_size * self.process_index, self.total_num_samples, self.total_batch_size): result += indices[batch_start : batch_start + self.batch_size] return iter(result) def __len__(self): # Each shard only sees a fraction of total_num_samples. return self.total_num_samples // self.num_processes
class_definition
31,151
33,051
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_pt_utils.py
null
268
class IterableDatasetShard(IterableDataset): """ Wraps a PyTorch `IterableDataset` to generate samples for one of the processes only. Instances of this class will always yield a number of samples that is a round multiple of the actual batch size (which is `batch_size x num_processes`). Depending on the value of the `drop_last` attribute, it will either stop the iteration at the first batch that would be too small or loop with indices from the beginning. On two processes with an iterable dataset yielding of `[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]` with a batch size of 2: - the shard on process 0 will yield `[0, 1, 4, 5, 8, 9]` so will see batches `[0, 1]`, `[4, 5]`, `[8, 9]` - the shard on process 1 will yield `[2, 3, 6, 7, 10, 11]` so will see batches `[2, 3]`, `[6, 7]`, `[10, 11]` <Tip warning={true}> If your IterableDataset implements some randomization that needs to be applied the same way on all processes (for instance, a shuffling), you should use a `torch.Generator` in a `generator` attribute of the `dataset` to generate your random numbers and call the [`~trainer_pt_utils.IterableDatasetShard.set_epoch`] method of this object. It will set the seed of this `generator` to `seed + epoch` on all processes before starting the iteration. Alternatively, you can also implement a `set_epoch()` method in your iterable dataset to deal with this. </Tip> Args: dataset (`torch.utils.data.IterableDataset`): The batch sampler to split in several shards. batch_size (`int`, *optional*, defaults to 1): The size of the batches per shard. drop_last (`bool`, *optional*, defaults to `False`): Whether or not to drop the last incomplete batch or complete the last batches by using the samples from the beginning. num_processes (`int`, *optional*, defaults to 1): The number of processes running concurrently. process_index (`int`, *optional*, defaults to 0): The index of the current process. seed (`int`, *optional*, defaults to 0): A random seed that will be used for the random number generation in [`~trainer_pt_utils.IterableDatasetShard.set_epoch`]. """ def __init__( self, dataset: IterableDataset, batch_size: int = 1, drop_last: bool = False, num_processes: int = 1, process_index: int = 0, seed: int = 0, ): self.dataset = dataset self.batch_size = batch_size self.drop_last = drop_last self.num_processes = num_processes self.process_index = process_index self.seed = seed self.epoch = 0 self.num_examples = 0 def set_epoch(self, epoch): self.epoch = epoch if hasattr(self.dataset, "set_epoch"): self.dataset.set_epoch(epoch) def __iter__(self): self.num_examples = 0 if ( not hasattr(self.dataset, "set_epoch") and hasattr(self.dataset, "generator") and isinstance(self.dataset.generator, torch.Generator) ): self.dataset.generator.manual_seed(self.seed + self.epoch) real_batch_size = self.batch_size * self.num_processes process_slice = range(self.process_index * self.batch_size, (self.process_index + 1) * self.batch_size) first_batch = None current_batch = [] for element in self.dataset: self.num_examples += 1 current_batch.append(element) # Wait to have a full batch before yielding elements. if len(current_batch) == real_batch_size: for i in process_slice: yield current_batch[i] if first_batch is None: first_batch = current_batch.copy() current_batch = [] # Finished if drop_last is True, otherwise complete the last batch with elements from the beginning. if not self.drop_last and len(current_batch) > 0: if first_batch is None: first_batch = current_batch.copy() while len(current_batch) < real_batch_size: current_batch += first_batch for i in process_slice: yield current_batch[i] def __len__(self): # Will raise an error if the underlying dataset is not sized. if self.drop_last: return (len(self.dataset) // (self.batch_size * self.num_processes)) * self.batch_size else: return math.ceil(len(self.dataset) / (self.batch_size * self.num_processes)) * self.batch_size
class_definition
33,054
37,781
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_pt_utils.py
null
269
class AcceleratorConfig: """ A subset of arguments relating to the underlying [`accelerate.Accelerator`] implementation utilized in the `Trainer` that can be customized. Mostly relating to data. Parameters: split_batches (`bool`, *optional*, defaults to `False`): Whether or not the accelerator should split the batches yielded by the dataloaders across the devices. If `True` the actual batch size used will be the same on any kind of distributed processes, but it must be a round multiple of the `num_processes` you are using. If `False`, actual batch size used will be the one set in your script multiplied by the number of processes. dispatch_batches (`bool`, *optional*): If set to `True`, the dataloader prepared by the Accelerator is only iterated through on the main process and then the batches are split and broadcast to each process. Will default to `True` for `DataLoader` whose underlying dataset is an `IterableDataset`, `False` otherwise. even_batches (`bool`, *optional*, defaults to `True`): If set to `True`, in cases where the total batch size across all processes does not exactly divide the dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among all workers. use_seedable_sampler (`bool`, *optional*, defaults to `True`): Whether or not use a fully seedable random sampler ([`accelerate.data_loader.SeedableRandomSampler`]). Ensures training results are fully reproducable using a different sampling technique. While seed-to-seed results may differ, on average the differences are neglible when using multiple different seeds to compare. Should also be ran with [`~utils.set_seed`] for the best results. gradient_accumulation_kwargs (`dict`, *optional*): Additional kwargs to configure gradient accumulation, see [`accelerate.utils.GradientAccumulationPlugin`]. Any of the following (optional) keys are acceptable: num_steps (`int`): Will take precedence over [`~.TrainingArguments.gradient_accumulation_steps`] if the latter is set to 1, otherwise an exception will be raised. adjust_scheduler (`bool`): Whether to adjust the scheduler steps to account for [`~.TrainingArguments.gradient_accumulation_steps`]. The [`accelerate.utils.GradientAccumulationPlugin`] default is `True`. sync_each_batch (`bool`): Whether to synchronize the gradients at each data batch. The [`accelerate.utils.GradientAccumulationPlugin`] default is `False`. non_blocking (`bool`, *optional*, defaults to `False`): Whether to use non-blocking CUDA calls to help minimize synchronization during distributed training with prepared `DataLoader` inputs being moved to device. Best if used with `pin_memory=True` in the `TrainingArguments`. use_configured_state (`bool*, *optional*, defaults to `False`): Whether or not to use a pre-configured `AcceleratorState` or `PartialState` defined before calling `TrainingArguments`. If `True`, an `Accelerator` or `PartialState` must be initialized. May lead to issues using sweeps or hyperparameter tuning. """ # Data related arguments split_batches: bool = field( default=False, metadata={ "help": "Whether or not the accelerator should split the batches yielded by the dataloaders across the devices. If" " `True` the actual batch size used will be the same on any kind of distributed processes, but it must be a" " round multiple of the `num_processes` you are using. If `False`, actual batch size used will be the one set" " in your script multiplied by the number of processes." }, ) dispatch_batches: bool = field( default=None, metadata={ "help": "If set to `True`, the dataloader prepared by the Accelerator is only iterated through on the main process" " and then the batches are split and broadcast to each process. Will default to `True` for `DataLoader` whose" " underlying dataset is an `IterableDataslet`, `False` otherwise." }, ) even_batches: bool = field( default=True, metadata={ "help": "If set to `True`, in cases where the total batch size across all processes does not exactly divide the" " dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among" " all workers." }, ) use_seedable_sampler: bool = field( default=True, metadata={ "help": "Whether or not use a fully seedable random sampler ([`accelerate.data_loader.SeedableRandomSampler`])." "Ensures training results are fully reproducable using a different sampling technique. " "While seed-to-seed results may differ, on average the differences are neglible when using" "multiple different seeds to compare. Should also be ran with [`~utils.set_seed`] for the best results." }, ) non_blocking: Optional[bool] = field( default=False, metadata={ "help": "Whether to use non-blocking CUDA calls to help minimize synchronization during " "distributed training with prepared `DataLoader` inputs being moved to device. " "Best if used with `pin_memory=True` in the `TrainingArguments`. Requires accelerate " "v0.30.0." }, ) gradient_accumulation_kwargs: Optional[Dict] = field( default=None, metadata={ "help": "Additional kwargs to configure gradient accumulation, see [`accelerate.utils.GradientAccumulationPlugin`]. " "Any of the following (optional) keys are acceptable: " " num_steps (`int`): Will take precedence over [`~.TrainingArguments.gradient_accumulation_steps`] if " " the latter is set to 1, otherwise an exception will be raised. " " adjust_scheduler (`bool`): Whether to adjust the scheduler steps to account for [`~.TrainingArguments.gradient_accumulation_steps`]. " " The [`accelerate.utils.GradientAccumulationPlugin`] default is `True`. " " sync_each_batch (`bool`): Whether to synchronize the gradients at each data batch. " " The [`accelerate.utils.GradientAccumulationPlugin`] default is `False`." }, ) use_configured_state: bool = field( default=False, metadata={ "help": "Whether or not to use a pre-configured `AcceleratorState` or `PartialState` defined before calling `TrainingArguments`." "If `True`, an `Accelerator` or `PartialState` must be initialized. May lead to issues using sweeps or hyperparameter tuning." }, ) @classmethod def from_json_file(cls, json_file): # Check if exists open_file = io.open if os.path.exists(json_file) else open with open_file(json_file, "r", encoding="utf-8") as f: config_dict = json.load(f) # Check for keys and load sensible defaults extra_keys = sorted(key for key in config_dict.keys() if key not in cls.__dataclass_fields__.keys()) if len(extra_keys) > 0: raise ValueError( f"The config file at {json_file} had unknown keys ({extra_keys}), please try upgrading your `transformers`" " version or fix (and potentially remove these keys) from your config file." ) return cls(**config_dict) def to_dict(self): return copy.deepcopy(self.__dict__) def pop(self, key, default=None): return self.__dict__.pop(key, default)
class_definition
50,573
58,541
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_pt_utils.py
null
270
class LayerWiseDummyOptimizer(torch.optim.Optimizer): """ For Layer-wise optimizers such as GaLoRE optimizer, the optimization step is already done through the post gradient hooks. Therefore the trick is to create a dummy optimizer that can take arbitrary args and kwargs and return a no-op during training. Initial idea from @hiyouga in LLaMA-Factory: https://github.com/hiyouga/LLaMA-Factory/commit/8664262cde3919e10eaecbd66e8c5d356856362e#diff-ebe08ab14496dfb9e06075f0fdd36799ef6d1535cc4dd4715b74c4e3e06fe3ba """ def __init__(self, optimizer_dict=None, *args, **kwargs): dummy_tensor = torch.randn(1, 1) self.optimizer_dict = optimizer_dict super().__init__([dummy_tensor], {"lr": kwargs.get("lr", 1e-03)}) def zero_grad(self, set_to_none: bool = True) -> None: pass def step(self, closure=None) -> Optional[float]: pass
class_definition
58,544
59,455
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_pt_utils.py
null
271
class LayerWiseDummyScheduler(LRScheduler): """ For Layer-wise optimizers such as GaLoRE optimizer, the optimization and scheduling step are already done through the post gradient hooks. Therefore the trick is to create a dummy scheduler that can take arbitrary args and kwargs and return a no-op during training. """ def __init__(self, *args, **kwargs): self.default_lr = kwargs["lr"] optimizer = LayerWiseDummyOptimizer(**kwargs) last_epoch = -1 verbose = False super().__init__(optimizer, last_epoch, verbose) def get_lr(self): # default value lrs = [self.default_lr] # we take each lr in the parameters if they exist, assumes the optimizer to be the `LayerWiseDummyOptimizer` if self.optimizer is not None: param_wise_lrs = [ [group["lr"] for group in optim.param_groups] for optim in self.optimizer.optimizer_dict.values() ] lrs = list(chain(*param_wise_lrs)) return lrs def _get_closed_form_lr(self): return self.base_lrs
class_definition
59,458
60,566
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer_pt_utils.py
null
272
class TFModelUtilsMixin: """ A few utilities for `keras.Model`, to be used as a mixin. """ def num_parameters(self, only_trainable: bool = False) -> int: """ Get the number of (optionally, trainable) parameters in the model. Args: only_trainable (`bool`, *optional*, defaults to `False`): Whether or not to return only the number of trainable parameters Returns: `int`: The number of parameters. """ if only_trainable: return int(sum(np.prod(w.shape.as_list()) for w in self.trainable_variables)) else: return self.count_params()
class_definition
3,632
4,300
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
null
273
class TFCausalLanguageModelingLoss: """ Loss function suitable for causal language modeling (CLM), that is, the task of guessing the next token. <Tip> Any label of -100 will be ignored (along with the corresponding logits) in the loss computation. </Tip> """ def hf_compute_loss(self, labels, logits): loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=keras.losses.Reduction.NONE) if self.config.tf_legacy_loss: # make sure only labels that are not equal to -100 affect the loss active_loss = tf.not_equal(tf.reshape(labels, (-1,)), -100) reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss) labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss) return loss_fn(labels, reduced_logits) # Clip negative labels to zero here to avoid NaNs and errors - those positions will get masked later anyway unmasked_loss = loss_fn(tf.nn.relu(labels), logits) # make sure only labels that are not equal to -100 affect the loss loss_mask = tf.cast(labels != -100, dtype=unmasked_loss.dtype) masked_loss = unmasked_loss * loss_mask reduced_masked_loss = tf.reduce_sum(masked_loss) / tf.reduce_sum(loss_mask) return tf.reshape(reduced_masked_loss, (1,))
class_definition
6,775
8,159
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
null
274
class TFQuestionAnsweringLoss: """ Loss function suitable for question answering. """ def hf_compute_loss(self, labels, logits): loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=keras.losses.Reduction.NONE) start_loss = loss_fn(labels["start_position"], logits[0]) end_loss = loss_fn(labels["end_position"], logits[1]) return (start_loss + end_loss) / 2.0
class_definition
8,162
8,599
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
null
275
class TFTokenClassificationLoss: """ Loss function suitable for token classification. <Tip> Any label of -100 will be ignored (along with the corresponding logits) in the loss computation. </Tip> """ def hf_compute_loss(self, labels, logits): loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=keras.losses.Reduction.NONE) if tf.executing_eagerly(): # Data-dependent conditionals are forbidden in XLA if tf.math.reduce_any(labels == -1): tf.print("Using `-1` to mask the loss for the token is deprecated. Please use `-100` instead.") if self.config.tf_legacy_loss: # make sure only labels that are not equal to -100 # are taken into account as loss if tf.math.reduce_any(labels == -1): tf.print("Using `-1` to mask the loss for the token is deprecated. Please use `-100` instead.") active_loss = tf.reshape(labels, (-1,)) != -1 else: active_loss = tf.reshape(labels, (-1,)) != -100 reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss) labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss) return loss_fn(labels, reduced_logits) # Clip negative labels to zero here to avoid NaNs and errors - those positions will get masked later anyway unmasked_loss = loss_fn(tf.nn.relu(labels), logits) # make sure only labels that are not equal to -100 or -1 # are taken into account as loss loss_mask = tf.cast(labels >= 0, dtype=unmasked_loss.dtype) # Avoid possible division by zero later # Masked positions will have a loss of NaN because -100 and -1 are not valid labels masked_loss = unmasked_loss * loss_mask reduced_masked_loss = tf.reduce_sum(masked_loss) / tf.reduce_sum(loss_mask) return tf.reshape(reduced_masked_loss, (1,))
class_definition
8,602
10,607
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
null
276
class TFSequenceClassificationLoss: """ Loss function suitable for sequence classification. """ def hf_compute_loss(self, labels, logits): if logits.shape.rank == 1 or logits.shape[1] == 1: loss_fn = keras.losses.MeanSquaredError(reduction=keras.losses.Reduction.NONE) if labels.shape.rank == 1: # MeanSquaredError returns a scalar loss if the labels are 1D, so avoid that labels = tf.expand_dims(labels, axis=-1) else: loss_fn = keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=keras.losses.Reduction.NONE ) return loss_fn(labels, logits)
class_definition
10,610
11,310
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
null
277
class TFMultipleChoiceLoss: """Loss function suitable for multiple choice tasks.""" def hf_compute_loss(self, labels, logits): loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=keras.losses.Reduction.NONE) return loss_fn(labels, logits)
class_definition
11,313
11,605
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
null
278
class TFMaskedLanguageModelingLoss(TFCausalLanguageModelingLoss): """ Loss function suitable for masked language modeling (MLM), that is, the task of guessing the masked tokens. <Tip> Any label of -100 will be ignored (along with the corresponding logits) in the loss computation. </Tip> """
class_definition
11,608
11,926
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
null
279
class TFNextSentencePredictionLoss: """ Loss function suitable for next sentence prediction (NSP), that is, the task of guessing the next sentence. <Tip> Any label of -100 will be ignored (along with the corresponding logits) in the loss computation. </Tip> """ def hf_compute_loss(self, labels, logits): loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=keras.losses.Reduction.NONE) if self.config.tf_legacy_loss: # make sure only labels that are not equal to -100 # are taken into account as loss next_sentence_active_loss = tf.not_equal(tf.reshape(labels, (-1,)), -100) next_sentence_reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, 2)), next_sentence_active_loss) next_sentence_label = tf.boolean_mask(tf.reshape(labels, (-1,)), next_sentence_active_loss) return loss_fn(next_sentence_label, next_sentence_reduced_logits) # make sure only labels that are not equal to -100 # are taken into account as loss # Clip negative labels to zero here to avoid NaNs and errors - those positions will get masked later anyway unmasked_ns_loss = loss_fn(y_true=tf.nn.relu(labels), y_pred=logits) ns_loss_mask = tf.cast(labels != -100, dtype=unmasked_ns_loss.dtype) # Just zero out samples where label is -100, no reduction masked_ns_loss = unmasked_ns_loss * ns_loss_mask return masked_ns_loss
class_definition
11,929
13,440
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
null
280
class TFPreTrainedModel(keras.Model, TFModelUtilsMixin, TFGenerationMixin, PushToHubMixin): r""" Base class for all TF models. [`TFPreTrainedModel`] takes care of storing the configuration of the models and handles methods for loading, downloading and saving models as well as a few methods common to all models to: - resize the input embeddings, - prune heads in the self-attention heads. Class attributes (overridden by derived classes): - **config_class** ([`PretrainedConfig`]) -- A subclass of [`PretrainedConfig`] to use as configuration class for this model architecture. - **base_model_prefix** (`str`) -- A string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model. - **main_input_name** (`str`) -- The name of the principal input to the model (often `input_ids` for NLP models, `pixel_values` for vision models and `input_values` for speech models). """ config_class = None base_model_prefix = "" main_input_name = "input_ids" _auto_class = None _using_dummy_loss = None _label_to_output_map = None # a list of re pattern of tensor names to ignore from the model when loading the model weights # (and avoid unnecessary warnings). _keys_to_ignore_on_load_missing = None # a list of re pattern of tensor names to ignore from the weights when loading the model weights # (and avoid unnecessary warnings). _keys_to_ignore_on_load_unexpected = None _requires_load_weight_prefix = False @property def dummy_inputs(self) -> Dict[str, tf.Tensor]: """ Dummy inputs to build the network. Returns: `Dict[str, tf.Tensor]`: The dummy inputs. """ dummies = {} for key, spec in self.input_signature.items(): # 2 is the most correct arbitrary size. I will not be taking questions dummy_shape = [dim if dim is not None else 2 for dim in spec.shape] if spec.shape[0] is None: # But let's make the batch size 1 to save memory anyway dummy_shape[0] = 1 dummies[key] = tf.ones(shape=dummy_shape, dtype=spec.dtype) if key == "token_type_ids": # Some models have token_type_ids but with a vocab_size of 1 dummies[key] = tf.zeros_like(dummies[key]) if self.config.add_cross_attention and "encoder_hidden_states" in inspect.signature(self.call).parameters: if "encoder_hidden_states" not in dummies: if self.main_input_name == "input_ids": dummies["encoder_hidden_states"] = tf.ones( shape=(1, 2, self.config.hidden_size), dtype=tf.float32, name="encoder_hidden_states" ) else: raise NotImplementedError( "Model has cross-attention but we couldn't infer the shape for the encoder hidden states. Please manually override dummy_inputs!" ) return dummies def build_in_name_scope(self): with tf.name_scope(self.name): self.build(input_shape=None) @property def framework(self) -> str: """ :str: Identifies that this is a TensorFlow model. """ return "tf" def build(self, input_shape=None): pass # This is just here to make sure we don't call the superclass build() def __init__(self, config, *inputs, **kwargs): super().__init__(*inputs, **kwargs) if not isinstance(config, PretrainedConfig): raise TypeError( f"Parameter config in `{self.__class__.__name__}(config)` should be an instance of class " "`PretrainedConfig`. To create a model from a pretrained model use " f"`model = {self.__class__.__name__}.from_pretrained(PRETRAINED_MODEL_NAME)`" ) # Save config and origin of the pretrained weights if given in model self.config = config self.name_or_path = config.name_or_path self.generation_config = GenerationConfig.from_model_config(config) if self.can_generate() else None self._set_save_spec(self.input_signature) def get_config(self): return self.config.to_dict() @functools.wraps(keras.Model.fit) def fit(self, *args, **kwargs): args, kwargs = convert_batch_encoding(*args, **kwargs) return super().fit(*args, **kwargs) @functools.wraps(keras.Model.train_on_batch) def train_on_batch(self, *args, **kwargs): args, kwargs = convert_batch_encoding(*args, **kwargs) return super().train_on_batch(*args, **kwargs) @functools.wraps(keras.Model.test_on_batch) def test_on_batch(self, *args, **kwargs): args, kwargs = convert_batch_encoding(*args, **kwargs) return super().test_on_batch(*args, **kwargs) @functools.wraps(keras.Model.predict_on_batch) def predict_on_batch(self, *args, **kwargs): args, kwargs = convert_batch_encoding(*args, **kwargs) return super().predict_on_batch(*args, **kwargs) @functools.wraps(keras.Model.predict) def predict(self, *args, **kwargs): args, kwargs = convert_batch_encoding(*args, **kwargs) return super().predict(*args, **kwargs) @functools.wraps(keras.Model.evaluate) def evaluate(self, *args, **kwargs): args, kwargs = convert_batch_encoding(*args, **kwargs) return super().evaluate(*args, **kwargs) @classmethod def from_config(cls, config, **kwargs): if isinstance(config, PretrainedConfig): return cls._from_config(config, **kwargs) return cls._from_config(cls.config_class.from_dict(config, **kwargs)) @classmethod def _from_config(cls, config, **kwargs): """ All context managers that the model should be initialized under go here. """ return cls(config, **kwargs) def get_head_mask(self, head_mask: tf.Tensor | None, num_hidden_layers: int) -> tf.Tensor: """ Prepare the head mask if needed. Args: head_mask (`tf.Tensor` with shape `[num_heads]` or `[num_hidden_layers x num_heads]`, *optional*): The mask indicating if we should keep the heads or not (1.0 for keep, 0.0 for discard). num_hidden_layers (`int`): The number of hidden layers in the model. Returns: `tf.Tensor` with shape `[num_hidden_layers x batch x num_heads x seq_length x seq_length]` or list with `[None]` for each layer. """ if head_mask is not None: head_mask = self._convert_head_mask_to_5d(head_mask, num_hidden_layers) else: head_mask = [None] * num_hidden_layers return head_mask def _convert_head_mask_to_5d(self, head_mask, num_hidden_layers): """-> [num_hidden_layers x batch x num_heads x seq_length x seq_length]""" if head_mask.shape.rank == 1: head_mask = head_mask[None, None, :, None, None] head_mask = tf.repeat(head_mask, repeats=num_hidden_layers, axis=0) elif head_mask.shape.rank == 2: head_mask = head_mask[:, None, :, None, None] assert head_mask.shape.rank == 5, f"head_mask.dim != 5, instead {head_mask.dim()}" head_mask = tf.cast(head_mask, tf.float32) # switch to float if need + fp16 compatibility return head_mask @tf.function def serving(self, inputs): """ Args: Method used for serving the model. Does not have a specific signature, but will be specialized as concrete functions when saving with `save_pretrained`. inputs (`Dict[str, tf.Tensor]`): The input of the saved model as a dictionary of tensors. """ output = self.call(inputs) return self.serving_output(output) @property def input_signature(self) -> Dict[str, tf.TensorSpec]: """ This property should return a dict mapping input names to tf.TensorSpec objects, representing the expected shape and dtype for model inputs. It is used for both serving and for generating dummy inputs. """ model_inputs = list(inspect.signature(self.call).parameters) sig = {} if "input_ids" in model_inputs: if self.__class__.__name__.endswith("ForMultipleChoice"): text_dims = 3 else: text_dims = 2 for input_name in ( "input_ids", "attention_mask", "token_type_ids", "decoder_input_ids", "decoder_attention_mask", ): if input_name in model_inputs: sig[input_name] = tf.TensorSpec([None] * text_dims, tf.int32, name=input_name) if "pixel_values" in model_inputs: pixel_values_shape = [None, None, None, None] if hasattr(self.config, "vision_config"): vision_config = self.config.vision_config else: vision_config = self.config if hasattr(vision_config, "num_channels"): pixel_values_shape[1] = vision_config.num_channels else: raise NotImplementedError( "Could not infer number of channels from config, please override input_signature to specify input shapes." ) if hasattr(vision_config, "image_size"): pixel_values_shape[2] = pixel_values_shape[3] = vision_config.image_size elif hasattr(vision_config, "input_size"): pixel_values_shape[2] = pixel_values_shape[3] = vision_config.input_size else: raise NotImplementedError( "Could not infer input image shape from config, please override input_signature to specify input shapes." ) sig["pixel_values"] = tf.TensorSpec(pixel_values_shape, tf.float32, name="pixel_values") if "input_features" in model_inputs: raise NotImplementedError("Audio models need a manually defined input_signature") return sig def serving_output(self, output): """ Prepare the output of the saved model. Can be overridden if specific serving modifications are required. """ if not isinstance(output, ModelOutput): return output for key in output: if key.endswith("hidden_states") and not getattr(self.config, "output_hidden_states", False): output[key] = None elif key.endswith("attentions") and not getattr(self.config, "output_attentions", False): output[key] = None elif key == "past_key_values" and not getattr(self.config, "use_cache", False): output[key] = None elif key == "cross_attentions" and not ( getattr(self.config, "output_attentions", False) and getattr(self.config, "add_cross_attention", False) ): output[key] = None if isinstance(output[key], (tuple, list)): try: output[key] = tf.convert_to_tensor(output[key]) except (ValueError, tf.errors.InvalidArgumentError): pass # Layers may not have the same dimensions return output @classmethod def can_generate(cls) -> bool: """ Returns whether this model can generate sequences with `.generate()`. Returns: `bool`: Whether this model can generate sequences with `.generate()`. """ # Detects whether `prepare_inputs_for_generation` has been overwritten, which is a requirement for generation. # Alternativelly, the model can also have a custom `generate` function. if "GenerationMixin" in str(cls.prepare_inputs_for_generation) and "GenerationMixin" in str(cls.generate): return False return True def get_input_embeddings(self) -> keras.layers.Layer: """ Returns the model's input embeddings layer. Returns: `tf.Variable`: The embeddings layer mapping vocabulary to hidden states. """ main_layer = getattr(self, self.base_model_prefix, self) if main_layer is not self: return main_layer.get_input_embeddings() else: raise NotImplementedError def _save_checkpoint(self, checkpoint_dir, epoch): if not os.path.isdir(checkpoint_dir): os.mkdir(checkpoint_dir) # We avoid tf.train.checkpoint or saving weights in TF format, even though that includes optimizer # state for us, because it requires special handling for objects like custom losses, which we use # internally and which users are likely to use too weights_path = os.path.join(checkpoint_dir, "weights.h5") self.save_weights(weights_path) extra_data = {"epoch": epoch, "optimizer_state": self.optimizer.get_weights()} extra_data_path = os.path.join(checkpoint_dir, "extra_data.pickle") with open(extra_data_path, "wb") as f: pickle.dump(extra_data, f) def prepare_tf_dataset( self, dataset: "datasets.Dataset", # noqa:F821 batch_size: int = 8, shuffle: bool = True, tokenizer: Optional["PreTrainedTokenizerBase"] = None, collate_fn: Optional[Callable] = None, collate_fn_args: Optional[Dict[str, Any]] = None, drop_remainder: Optional[bool] = None, prefetch: bool = True, ): """ Wraps a HuggingFace [`~datasets.Dataset`] as a `tf.data.Dataset` with collation and batching. This method is designed to create a "ready-to-use" dataset that can be passed directly to Keras methods like `fit()` without further modification. The method will drop columns from the dataset if they don't match input names for the model. If you want to specify the column names to return rather than using the names that match this model, we recommend using `Dataset.to_tf_dataset()` instead. Args: dataset (`Any`): A [~`datasets.Dataset`] to be wrapped as a `tf.data.Dataset`. batch_size (`int`, *optional*, defaults to 8): The size of batches to return. shuffle (`bool`, defaults to `True`): Whether to return samples from the dataset in random order. Usually `True` for training datasets and `False` for validation/test datasets. tokenizer ([`PreTrainedTokenizerBase`], *optional*): A `PreTrainedTokenizer` that will be used to pad samples to create batches. Has no effect if a specific `collate_fn` is passed instead. collate_fn (`Callable`, *optional*): A function that collates samples from the dataset into a single batch. Defaults to `DefaultDataCollator` if no `tokenizer` is supplied or `DataCollatorWithPadding` if a `tokenizer` is passed. collate_fn_args (`Dict[str, Any]`, *optional*): A dict of arguments to pass to the `collate_fn` alongside the list of samples. drop_remainder (`bool`, *optional*): Whether to drop the final batch, if the batch_size does not evenly divide the dataset length. Defaults to the same setting as `shuffle`. prefetch (`bool`, defaults to `True`): Whether to add prefetching to the end of the `tf.data` pipeline. This is almost always beneficial for performance, but can be disabled in edge cases. Returns: `Dataset`: A `tf.data.Dataset` which is ready to pass to the Keras API. """ requires_backends(self, ["datasets"]) import datasets if collate_fn is None: if tokenizer is None: collate_fn = DefaultDataCollator(return_tensors="np") else: collate_fn = DataCollatorWithPadding(tokenizer=tokenizer, return_tensors="np") if collate_fn_args is None: collate_fn_args = {} if not isinstance(dataset, datasets.Dataset): raise TypeError("Dataset argument should be a datasets.Dataset!") model_inputs = list(inspect.signature(self.call).parameters) model_labels = find_labels(self.__class__) if "cols_to_retain" in list(inspect.signature(dataset._get_output_signature).parameters.keys()): output_signature, _ = dataset._get_output_signature( dataset, batch_size=None, collate_fn=collate_fn, collate_fn_args=collate_fn_args, cols_to_retain=model_inputs, ) else: # TODO Matt: This is a workaround for older versions of datasets that are missing the `cols_to_retain` # argument. We should remove this once the minimum supported version of datasets is > 2.3.2 unwanted_columns = [ feature for feature in dataset.features if feature not in model_inputs and feature not in ("label_ids", "label") ] dataset = dataset.remove_columns(unwanted_columns) output_signature, _ = dataset._get_output_signature( dataset, batch_size=None, collate_fn=collate_fn, collate_fn_args=collate_fn_args ) output_columns = list(output_signature.keys()) feature_cols = [col for col in output_columns if col in model_inputs and col not in model_labels] label_cols = [col for col in output_columns if col in model_labels] # Backwards compatibility for older versions of datasets. Previously, if `columns` or `label_cols` # were a single element list, the returned element spec would be a single element. Now, passing [feature] # will return a dict structure {"feature": feature}, and passing a single string will return a single element. feature_cols = feature_cols[0] if len(feature_cols) == 1 else feature_cols label_cols = label_cols[0] if len(label_cols) == 1 else label_cols if drop_remainder is None: drop_remainder = shuffle tf_dataset = dataset.to_tf_dataset( columns=feature_cols, label_cols=label_cols, batch_size=batch_size, shuffle=shuffle, drop_remainder=drop_remainder, collate_fn=collate_fn, collate_fn_args=collate_fn_args, prefetch=prefetch, ) return tf_dataset def compile( self, optimizer="rmsprop", loss="auto_with_warning", metrics=None, loss_weights=None, weighted_metrics=None, run_eagerly=None, steps_per_execution=None, **kwargs, ): """ This is a thin wrapper that sets the model's loss output head as the loss if the user does not specify a loss function themselves. """ if loss in ("auto_with_warning", "passthrough"): # "passthrough" for workflow backward compatibility logger.info( "No loss specified in compile() - the model's internal loss computation will be used as the " "loss. Don't panic - this is a common way to train TensorFlow models in Transformers! " "To disable this behaviour please pass a loss argument, or explicitly pass " "`loss=None` if you do not want your model to compute a loss. You can also specify `loss='auto'` to " "get the internal loss without printing this info string." ) loss = "auto" if loss == "auto": loss = dummy_loss self._using_dummy_loss = True else: self._using_dummy_loss = False parent_args = list(inspect.signature(keras.Model.compile).parameters.keys()) # This argument got renamed, we need to support both versions if "steps_per_execution" in parent_args: super().compile( optimizer=optimizer, loss=loss, metrics=metrics, loss_weights=loss_weights, weighted_metrics=weighted_metrics, run_eagerly=run_eagerly, steps_per_execution=steps_per_execution, **kwargs, ) else: super().compile( optimizer=optimizer, loss=loss, metrics=metrics, loss_weights=loss_weights, weighted_metrics=weighted_metrics, run_eagerly=run_eagerly, experimental_steps_per_execution=steps_per_execution, **kwargs, ) def compute_loss(self, *args, **kwargs): if hasattr(keras.Model, "compute_loss"): # This will be true in TF 2.8 or greater return super().compute_loss(*args, **kwargs) else: warnings.warn( "The old compute_loss method is deprecated as it conflicts with the Keras compute_loss " "method added in TF 2.8. If you want the original HF compute_loss, please call " "hf_compute_loss() instead. From TF versions >= 2.8, or Transformers versions >= 5, " "calling compute_loss() will get the Keras method instead.", FutureWarning, ) return self.hf_compute_loss(*args, **kwargs) def get_label_to_output_name_mapping(self): arg_names = list(inspect.signature(self.call).parameters) if self._label_to_output_map is not None: return self._label_to_output_map elif "start_positions" in arg_names: return {"start_positions": "start_logits", "end_positions": "end_logits"} elif "sentence_order_label" in arg_names: return {"labels": "prediction_logits", "sentence_order_label": "sop_logits"} elif "next_sentence_label" in arg_names: return {"labels": "prediction_logits", "next_sentence_label": "seq_relationship_logits"} elif "mc_labels" in arg_names: return {"labels": "logits", "mc_labels": "mc_logits"} else: return {} def train_step(self, data): """ A modification of Keras's default `train_step` that correctly handles matching outputs to labels for our models and supports directly training on the loss output head. In addition, it ensures input keys are copied to the labels where appropriate. It will also copy label keys into the input dict when using the dummy loss, to ensure that they are available to the model during the forward pass. """ # We hardcode the most common renamings; models with weirder names can set `self._label_to_output_map` arg_names = list(inspect.signature(self.call).parameters) label_kwargs = find_labels(self.__class__) label_to_output = self.get_label_to_output_name_mapping() output_to_label = {val: key for key, val in label_to_output.items()} if not self._using_dummy_loss and parse(tf.__version__) < parse("2.11.0"): # Newer TF train steps leave this out data = expand_1d(data) x, y, sample_weight = keras.utils.unpack_x_y_sample_weight(data) # If the inputs are mutable dictionaries, make a shallow copy of them because we will modify # them during input/label pre-processing. This avoids surprising the user by wrecking their data. # In addition, modifying mutable Python inputs makes XLA compilation impossible. if isinstance(x, dict): x = x.copy() if isinstance(y, dict): y = y.copy() # When using a dummy loss, we ensure that separate labels are copied to the correct model arguments, # if those keys are not already present in the input dict if self._using_dummy_loss and y is not None: # If y is a tensor and the model only has one label-like input, map y to that input if len(label_kwargs) == 1 and isinstance(y, tf.Tensor): if isinstance(x, tf.Tensor): x = {arg_names[0]: x} label_kwarg = next(iter(label_kwargs)) if label_kwarg not in x: x[label_kwarg] = y # Otherwise, copy keys from y to x as long as they weren't already present in x elif isinstance(y, dict): if isinstance(x, tf.Tensor): x = {arg_names[0]: x} for key, val in y.items(): if key in arg_names and key not in x: x[key] = val elif output_to_label.get(key, None) in arg_names and key not in x: x[output_to_label[key]] = val if y is None: y = {key: val for key, val in x.items() if key in label_kwargs} if not y and not self._using_dummy_loss: raise ValueError("Could not find label column(s) in input dict and no separate labels were provided!") if isinstance(y, dict): # Rename labels at this point to match output heads y = {label_to_output.get(key, key): val for key, val in y.items()} # Run forward pass. with tf.GradientTape() as tape: if self._using_dummy_loss and "return_loss" in arg_names: y_pred = self(x, training=True, return_loss=True) else: y_pred = self(x, training=True) if self._using_dummy_loss: loss = self.compiled_loss(y_pred.loss, y_pred.loss, sample_weight, regularization_losses=self.losses) else: loss = None # This next block matches outputs to label keys. Tensorflow's standard method for doing this # can get very confused if any of the keys contain nested values (e.g. lists/tuples of Tensors) if isinstance(y, dict) and len(y) == 1: if list(y.keys())[0] in y_pred.keys(): y_pred = y_pred[list(y.keys())[0]] elif list(y_pred.keys())[0] == "loss": y_pred = y_pred[1] else: y_pred = y_pred[0] _, y = y.popitem() elif isinstance(y, dict): # If the labels are a dict, match keys from the output by name y_pred = {key: val for key, val in y_pred.items() if key in y} elif isinstance(y, tuple) or isinstance(y, list): # If the labels are a tuple/list, match keys to the output by order, skipping the loss. if list(y_pred.keys())[0] == "loss": y_pred = y_pred.to_tuple()[1:] else: y_pred = y_pred.to_tuple() y_pred = y_pred[: len(y)] # Remove unused fields in case those cause problems else: # If the labels are a single tensor, match them to the first non-loss tensor in the output if list(y_pred.keys())[0] == "loss": y_pred = y_pred[1] else: y_pred = y_pred[0] if loss is None: loss = self.compiled_loss(y, y_pred, sample_weight, regularization_losses=self.losses) # Run backwards pass. self.optimizer.minimize(loss, self.trainable_variables, tape=tape) self.compiled_metrics.update_state(y, y_pred, sample_weight) # Collect metrics to return return_metrics = {} for metric in self.metrics: result = metric.result() if isinstance(result, dict): return_metrics.update(result) else: return_metrics[metric.name] = result return return_metrics def test_step(self, data): """ A modification of Keras's default `train_step` that correctly handles matching outputs to labels for our models and supports directly training on the loss output head. In addition, it ensures input keys are copied to the labels where appropriate. It will also copy label keys into the input dict when using the dummy loss, to ensure that they are available to the model during the forward pass. """ # We hardcode the most common renamings; models with weirder names can set `self._label_to_output_map` arg_names = list(inspect.signature(self.call).parameters) label_kwargs = find_labels(self.__class__) label_to_output = self.get_label_to_output_name_mapping() output_to_label = {val: key for key, val in label_to_output.items()} if not self._using_dummy_loss and parse(tf.__version__) < parse("2.11.0"): # Newer versions leave this out data = expand_1d(data) x, y, sample_weight = keras.utils.unpack_x_y_sample_weight(data) # If the inputs are mutable dictionaries, make a shallow copy of them because we will modify # them during input/label pre-processing. This avoids surprising the user by wrecking their data. # In addition, modifying mutable Python inputs makes XLA compilation impossible. if isinstance(x, dict): x = x.copy() if isinstance(y, dict): y = y.copy() # When using a dummy loss, we ensure that separate labels are copied to the correct model arguments, # if those keys are not already present in the input dict if self._using_dummy_loss and y is not None: arg_names = list(inspect.signature(self.call).parameters) # If y is a tensor and the model only has one label-like input, map y to that input if len(label_kwargs) == 1 and isinstance(y, tf.Tensor): if isinstance(x, tf.Tensor): x = {arg_names[0]: x} label_kwarg = next(iter(label_kwargs)) if label_kwarg not in x: x[label_kwarg] = y # Otherwise, copy keys from y to x as long as they weren't already present in x elif isinstance(y, dict): if isinstance(x, tf.Tensor): x = {arg_names[0]: x} for key, val in y.items(): if key in arg_names and key not in x: x[key] = val elif output_to_label.get(key, None) in arg_names and key not in x: x[output_to_label[key]] = val if y is None: y = {key: val for key, val in x.items() if key in label_kwargs} if not y and not self._using_dummy_loss: raise ValueError("Could not find label column(s) in input dict and no separate labels were provided!") if isinstance(y, dict): # Rename labels at this point to match output heads y = {label_to_output.get(key, key): val for key, val in y.items()} # Run forward pass. if self._using_dummy_loss and "return_loss" in arg_names: y_pred = self(x, return_loss=True, training=False) else: y_pred = self(x, training=False) if self._using_dummy_loss: loss = self.compiled_loss(y_pred.loss, y_pred.loss, sample_weight, regularization_losses=self.losses) else: loss = None # This next block matches outputs to label keys. Tensorflow's standard method for doing this # can get very confused if any of the keys contain nested values (e.g. lists/tuples of Tensors) if isinstance(y, dict) and len(y) == 1: if list(y.keys())[0] in y_pred.keys(): y_pred = y_pred[list(y.keys())[0]] elif list(y_pred.keys())[0] == "loss": y_pred = y_pred[1] else: y_pred = y_pred[0] _, y = y.popitem() elif isinstance(y, dict): # If the labels are a dict, match keys from the output by name y_pred = {key: val for key, val in y_pred.items() if key in y} elif isinstance(y, tuple) or isinstance(y, list): # If the labels are a tuple/list, match keys to the output by order, skipping the loss. if list(y_pred.keys())[0] == "loss": y_pred = y_pred.to_tuple()[1:] else: y_pred = y_pred.to_tuple() y_pred = y_pred[: len(y)] # Remove unused fields in case those cause problems else: # If the labels are a single tensor, match them to the first non-loss tensor in the output if list(y_pred.keys())[0] == "loss": y_pred = y_pred[1] else: y_pred = y_pred[0] if loss is None: loss = self.compiled_loss(y, y_pred, sample_weight, regularization_losses=self.losses) self.compiled_metrics.update_state(y, y_pred, sample_weight) # Collect metrics to return return_metrics = {} for metric in self.metrics: result = metric.result() if isinstance(result, dict): return_metrics.update(result) else: return_metrics[metric.name] = result return return_metrics def create_model_card( self, output_dir, model_name: str, language: Optional[str] = None, license: Optional[str] = None, tags: Optional[str] = None, finetuned_from: Optional[str] = None, tasks: Optional[str] = None, dataset_tags: Optional[Union[str, List[str]]] = None, dataset: Optional[Union[str, List[str]]] = None, dataset_args: Optional[Union[str, List[str]]] = None, ): """ Creates a draft of a model card using the information available to the `Trainer`. Args: output_dir (`str` or `os.PathLike`): The folder in which to create the model card. model_name (`str`, *optional*): The name of the model. language (`str`, *optional*): The language of the model (if applicable) license (`str`, *optional*): The license of the model. Will default to the license of the pretrained model used, if the original model given to the `Trainer` comes from a repo on the Hub. tags (`str` or `List[str]`, *optional*): Some tags to be included in the metadata of the model card. finetuned_from (`str`, *optional*): The name of the model used to fine-tune this one (if applicable). Will default to the name of the repo of the original model given to the `Trainer` (if it comes from the Hub). tasks (`str` or `List[str]`, *optional*): One or several task identifiers, to be included in the metadata of the model card. dataset_tags (`str` or `List[str]`, *optional*): One or several dataset tags, to be included in the metadata of the model card. dataset (`str` or `List[str]`, *optional*): One or several dataset identifiers, to be included in the metadata of the model card. dataset_args (`str` or `List[str]`, *optional*): One or several dataset arguments, to be included in the metadata of the model card. """ # Avoids a circular import by doing this when necessary. from .modelcard import TrainingSummary # tests_ignore training_summary = TrainingSummary.from_keras( self, keras_history=self.history, language=language, license=license, tags=tags, model_name=model_name, finetuned_from=finetuned_from, tasks=tasks, dataset_tags=dataset_tags, dataset=dataset, dataset_args=dataset_args, ) model_card = training_summary.to_model_card() with open(os.path.join(output_dir, "README.md"), "w") as f: f.write(model_card) def set_input_embeddings(self, value): """ Set model's input embeddings Args: value (`tf.Variable`): The new weights mapping hidden states to vocabulary. """ main_layer = getattr(self, self.base_model_prefix) if main_layer is None: raise NotImplementedError("The model does not implements the base_model_prefix attribute.") try: main_layer.set_input_embeddings(value) except AttributeError: logger.info("Building the model") self.build_in_name_scope() main_layer.set_input_embeddings(value) def get_output_embeddings(self) -> Union[None, keras.layers.Layer]: """ Returns the model's output embeddings Returns: `tf.Variable`: The new weights mapping vocabulary to hidden states. """ if self.get_lm_head() is not None: lm_head = self.get_lm_head() try: return lm_head.get_output_embeddings() except AttributeError: logger.info("Building the model") self.build_in_name_scope() return lm_head().get_output_embeddings() return None # Overwrite for models with output embeddings def set_output_embeddings(self, value): """ Set model's output embeddings Args: value (`tf.Variable`): The new weights mapping hidden states to vocabulary. """ if self.get_lm_head() is not None: lm_head = self.get_lm_head() try: lm_head.set_output_embeddings(value) except AttributeError: logger.info("Building the model") self.build_in_name_scope() lm_head.set_output_embeddings(value) def get_output_layer_with_bias(self) -> Union[None, keras.layers.Layer]: """ Get the layer that handles a bias attribute in case the model has an LM head with weights tied to the embeddings Return: `keras.layers.Layer`: The layer that handles the bias, None if not an LM model. """ warnings.warn( "The method get_output_layer_with_bias is deprecated. Please use `get_lm_head` instead.", FutureWarning ) return self.get_lm_head() def get_prefix_bias_name(self) -> Union[None, str]: """ Get the concatenated _prefix name of the bias from the model name to the parent layer Return: `str`: The _prefix name of the bias. """ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning) return None def get_bias(self) -> Union[None, Dict[str, tf.Variable]]: """ Dict of bias attached to an LM head. The key represents the name of the bias attribute. Return: `tf.Variable`: The weights representing the bias, None if not an LM model. """ if self.get_lm_head() is not None: lm_head = self.get_lm_head() try: return lm_head.get_bias() except AttributeError: self.build_in_name_scope() return lm_head.get_bias() return None def set_bias(self, value): """ Set all the bias in the LM head. Args: value (`Dict[tf.Variable]`): All the new bias attached to an LM head. """ if self.get_lm_head() is not None: lm_head = self.get_lm_head() try: lm_head.set_bias(value) except AttributeError: self.build_in_name_scope() lm_head.set_bias(value) def get_lm_head(self) -> keras.layers.Layer: """ The LM Head layer. This method must be overwritten by all the models that have a lm head. Return: `keras.layers.Layer`: The LM head layer if the model has one, None if not. """ return None def resize_token_embeddings( self, new_num_tokens: Optional[int] = None ) -> Union[keras.layers.Embedding, tf.Variable]: """ Resizes input token embeddings matrix of the model if `new_num_tokens != config.vocab_size`. Takes care of tying weights embeddings afterwards if the model class has a `tie_weights()` method. Arguments: new_num_tokens (`int`, *optional*): The number of new tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just returns a pointer to the input tokens without doing anything. Return: `tf.Variable` or `keras.layers.Embedding`: Pointer to the input tokens of the model. """ # TODO (joao): flagged for replacement (by `_v2_resized_token_embeddings`) due to embeddings refactor # Run the new code path if the model has a keras embeddings layer if isinstance(self.get_input_embeddings(), keras.layers.Embedding): return self._v2_resized_token_embeddings(new_num_tokens) if new_num_tokens is None or new_num_tokens == self.config.vocab_size: return self._get_word_embedding_weight(self.get_input_embeddings()) model_embeds = self._resize_token_embeddings(new_num_tokens) # Update base model and current model config self.config.vocab_size = new_num_tokens return model_embeds def _v2_resized_token_embeddings(self, new_num_tokens: Optional[int] = None) -> keras.layers.Embedding: """ Resizes input token embeddings matrix of the model if `new_num_tokens != config.vocab_size`. Arguments: new_num_tokens (`int`, *optional*): The number of new tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just returns a pointer to the input tokens without doing anything. Return: `keras.layers.Embedding`: Pointer to the input tokens of the model. """ if new_num_tokens is None or new_num_tokens == self.config.vocab_size: return self.get_input_embeddings() model_embeds = self._v2_resize_token_embeddings(new_num_tokens) # Update base model and current model config self.config.vocab_size = new_num_tokens return model_embeds def _get_word_embedding_weight(model, embedding_layer): # TODO (joao): flagged for delection due to embeddings refactor # If the variable holds the weights themselves, return them if isinstance(embedding_layer, tf.Tensor): return embedding_layer # Otherwise, try to get them from the layer's attributes embeds = getattr(embedding_layer, "weight", None) if embeds is not None: return embeds embeds = getattr(embedding_layer, "decoder", None) if embeds is not None: return embeds # The reason why the attributes don't exist might be # because the model is not built, so retry getting # the argument after building the model model.build_in_name_scope() embeds = getattr(embedding_layer, "weight", None) if embeds is not None: return embeds embeds = getattr(embedding_layer, "decoder", None) if embeds is not None: return embeds return None def _resize_token_embeddings(self, new_num_tokens): # TODO (joao): flagged for replacement (by `_v2_resize_token_embeddings`) due to embeddings refactor old_embeddings = self._get_word_embedding_weight(self.get_input_embeddings()) new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens) # if word embeddings are not tied, make sure that lm head bias is resized as well if self.get_bias() is not None: old_lm_head_bias = self.get_bias() new_lm_head_bias = self._get_resized_lm_head_bias(old_lm_head_bias, new_num_tokens) self.set_bias(new_lm_head_bias) # if word embeddings are not tied, make sure that lm head decoder is resized as well if self.get_output_embeddings() is not None: old_lm_head_decoder = self._get_word_embedding_weight(self.get_output_embeddings()) new_lm_head_decoder = self._get_resized_lm_head_decoder(old_lm_head_decoder, new_num_tokens) self.set_output_embeddings(new_lm_head_decoder) self.set_input_embeddings(new_embeddings) return self.get_input_embeddings() def _v2_resize_token_embeddings(self, new_num_tokens): old_embeddings = self.get_input_embeddings() new_embeddings = self._v2_get_resized_embeddings(old_embeddings, new_num_tokens) self.set_input_embeddings(new_embeddings) # If word embeddings are not tied, make sure that lm head bias is resized as well if self.get_bias() is not None: old_lm_head_bias = self.get_bias() new_lm_head_bias = self._v2_get_resized_lm_head_bias(old_lm_head_bias, new_num_tokens) self.set_bias(new_lm_head_bias) # If word embeddings are not tied, make sure that lm head decoder is resized as well. tied_weights = self.get_input_embeddings() == self.get_output_embeddings() if self.get_output_embeddings() is not None and not tied_weights: old_lm_head_decoder = self._get_word_embedding_weight(self.get_output_embeddings()) # TODO (joao): this one probably needs a v2 version with other models new_lm_head_decoder = self._get_resized_lm_head_decoder(old_lm_head_decoder, new_num_tokens) self.set_output_embeddings(new_lm_head_decoder) return self.get_input_embeddings() def _get_resized_lm_head_bias(self, old_lm_head_bias, new_num_tokens): """ Build a resized bias from the old ones. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end Args: old_lm_head_bias (`tf.Variable`): Old lm head bias to be resized. new_num_tokens (`int`, *optional*): New number of tokens in the linear matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just returns None Return: `tf.Variable`: Pointer to the resized bias. """ # TODO (joao): flagged for replacement (by `_v2_get_resized_lm_head_bias`) due to embeddings refactor new_lm_head_bias = {} for attr, weight in old_lm_head_bias.items(): first_dim, old_num_tokens = (None, shape_list(weight)[0]) if tf.rank(weight) == 1 else shape_list(weight) size_diff = new_num_tokens - old_num_tokens final_shape = [new_num_tokens] if first_dim is None else [first_dim, new_num_tokens] # initialize new bias if tf.math.greater(size_diff, 0): padding_shape = [[0, size_diff]] if first_dim is None else [[0, 0], [0, size_diff]] current_bias = tf.pad(weight.value(), tf.convert_to_tensor(padding_shape), constant_values=-1) num_tokens_to_copy = min(old_num_tokens, new_num_tokens) mask_shape = [num_tokens_to_copy] if first_dim is None else [1, num_tokens_to_copy] bias_mask = tf.fill(tf.convert_to_tensor(mask_shape), True) bias_mask = tf.pad(bias_mask, tf.convert_to_tensor(padding_shape), constant_values=False) else: slice_from = [0] if first_dim is None else [0, 0] current_bias = tf.slice( weight.value(), tf.convert_to_tensor(slice_from), tf.convert_to_tensor(final_shape) ) bias_mask = tf.fill(tf.convert_to_tensor(final_shape), True) new_bias = self.add_weight( shape=final_shape, initializer="zeros", trainable=True, name=weight.name.split(":")[0], ) init_bias = tf.where(bias_mask, current_bias, new_bias.value()) new_bias.assign(init_bias) new_lm_head_bias[attr] = new_bias return new_lm_head_bias def _v2_get_resized_lm_head_bias( self, old_lm_head_bias: Dict[str, tf.Variable], new_num_tokens: int ) -> Dict[str, tf.Tensor]: """ Build a resized bias from the old ones. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end Args: old_lm_head_bias (`Dict[str, tf.Variable]`): Old lm head bias to be resized. new_num_tokens (`int`): New number of tokens in the linear matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. Return: `tf.Tensor`: Values for the resized bias. """ new_lm_head_bias = {} for attr, weight in old_lm_head_bias.items(): # Determine the size difference (depending on the shape) first_dim, old_num_tokens = (None, shape_list(weight)[0]) if tf.rank(weight) == 1 else shape_list(weight) size_diff = new_num_tokens - old_num_tokens # Copy the old bias values to the new bias if old_num_tokens > new_num_tokens: new_bias = weight.value()[..., :new_num_tokens] else: padding_shape = [[0, size_diff]] if first_dim is None else [[0, 0], [0, size_diff]] new_bias = tf.pad(weight.value(), tf.convert_to_tensor(padding_shape)) new_lm_head_bias[attr] = new_bias return new_lm_head_bias def _get_resized_lm_head_decoder(self, old_lm_head_decoder, new_num_tokens): """ Build a resized decoder from the old ones. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end Args: old_lm_head_decoder (`tf.Variable`): Old lm head decoder to be resized. new_num_tokens (`int`, *optional*): New number of tokens in the linear matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just returns None Return: `tf.Variable`: Pointer to the resized decoder or None if the output embeddings are different from the input ones. """ new_lm_head_decoder = old_lm_head_decoder is_input_output_equals = tf.reduce_any( self._get_word_embedding_weight(self.get_input_embeddings()) == old_lm_head_decoder ) if old_lm_head_decoder is not None and not is_input_output_equals: old_embedding_dim = shape_list(old_lm_head_decoder)[1] decoder_mask, current_decoder = init_copy_embeddings(old_lm_head_decoder, new_num_tokens) new_lm_head_decoder = self.add_weight( shape=(new_num_tokens, old_embedding_dim), initializer="zeros", trainable=True, name=old_lm_head_decoder.name.split(":")[0], ) init_decoder = tf.where(decoder_mask, current_decoder, new_lm_head_decoder.value()) new_lm_head_decoder.assign(init_decoder) return new_lm_head_decoder def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None) -> tf.Variable: """ Build a resized Embedding weights from a provided token Embedding weights. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end Args: old_embeddings (`tf.Variable`): Old embeddings to be resized. new_num_tokens (`int`, *optional*): New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just returns a pointer to the input tokens `tf.Variable` module of the model without doing anything. Return: `tf.Variable`: Pointer to the resized Embedding Module or the old Embedding Module if `new_num_tokens` is `None` """ # TODO (joao): flagged for replacement (by `_v2_get_resized_embeddings`) due to embeddings refactor old_embedding_dim = shape_list(old_embeddings)[1] init_range = getattr(self.config, "initializer_range", 0.02) embeddings_mask, current_embeddings = init_copy_embeddings(old_embeddings, new_num_tokens) new_embeddings = self.add_weight( name=old_embeddings.name.split(":")[0], shape=[new_num_tokens, old_embedding_dim], initializer=get_initializer(init_range), dtype=tf.float32, ) init_embeddings = tf.where(embeddings_mask, current_embeddings, new_embeddings.value()) new_embeddings.assign(init_embeddings) return new_embeddings def _v2_get_resized_embeddings( self, old_embeddings: keras.layers.Embedding, new_num_tokens: int ) -> keras.layers.Embedding: """ Build a resized Embedding layer from a provided Embedding layer. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. Args: old_embeddings (`keras.layers.Embedding`): Old embeddings to be resized. new_num_tokens (`int`, *optional*): New number of tokens in the embedding matrix. Return: `keras.layers.Embedding`: Resized Embedding layer. """ # Get the initialization range for the embeddings init_range = 0.02 # default value potential_initialization_variable_names = [ "initializer_range", # most common "initializer_factor", # e.g. T5 "init_std", # e.g BART ] for var_name in potential_initialization_variable_names: if hasattr(self.config, var_name): init_range = getattr(self.config, var_name) # Get a new (initialized) embeddings layer new_embeddings = keras.layers.Embedding( input_dim=new_num_tokens, output_dim=old_embeddings.output_dim, embeddings_initializer=keras.initializers.TruncatedNormal(stddev=init_range), name=old_embeddings.embeddings.name[:-13], # exact same scoped name except "/embeddings:0" ) new_embeddings(tf.constant([[0]])) # Copy the old embeddings to the new embeddings if old_embeddings.input_dim >= new_num_tokens: init_embeddings = old_embeddings.embeddings[:new_num_tokens] else: init_embeddings = tf.concat( [old_embeddings.embeddings, new_embeddings.embeddings[old_embeddings.input_dim :]], axis=0 ) new_embeddings.embeddings.assign(init_embeddings) return new_embeddings def prune_heads(self, heads_to_prune): """ Prunes heads of the base model. Arguments: heads_to_prune (`Dict[int, List[int]]`): Dictionary with keys being selected layer indices (`int`) and associated values being the list of heads to prune in said layer (list of `int`). For instance {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2. """ raise NotImplementedError def save_pretrained( self, save_directory, saved_model=False, version=1, push_to_hub=False, signatures=None, max_shard_size: Union[int, str] = "5GB", create_pr: bool = False, safe_serialization: bool = False, token: Optional[Union[str, bool]] = None, **kwargs, ): """ Save a model and its configuration file to a directory, so that it can be re-loaded using the [`~TFPreTrainedModel.from_pretrained`] class method. Arguments: save_directory (`str`): Directory to which to save. Will be created if it doesn't exist. saved_model (`bool`, *optional*, defaults to `False`): If the model has to be saved in saved model format as well or not. version (`int`, *optional*, defaults to 1): The version of the saved model. A saved model needs to be versioned in order to be properly loaded by TensorFlow Serving as detailed in the official documentation https://www.tensorflow.org/tfx/serving/serving_basic push_to_hub (`bool`, *optional*, defaults to `False`): Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the repository you want to push to with `repo_id` (will default to the name of `save_directory` in your namespace). signatures (`dict` or `tf.function`, *optional*): Model's signature used for serving. This will be passed to the `signatures` argument of model.save(). max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`): The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size lower than this size. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`). <Tip warning={true}> If a single weight of the model is bigger than `max_shard_size`, it will be in its own checkpoint shard which will be bigger than `max_shard_size`. </Tip> create_pr (`bool`, *optional*, defaults to `False`): Whether or not to create a PR with the uploaded files or directly commit. safe_serialization (`bool`, *optional*, defaults to `False`): Whether to save the model using `safetensors` or the traditional TensorFlow way (that uses `h5`). token (`str` or `bool`, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). kwargs (`Dict[str, Any]`, *optional*): Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. """ use_auth_token = kwargs.pop("use_auth_token", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) token = use_auth_token if token is not None: kwargs["token"] = token if os.path.isfile(save_directory): logger.error(f"Provided path ({save_directory}) should be a directory, not a file") return os.makedirs(save_directory, exist_ok=True) if push_to_hub: commit_message = kwargs.pop("commit_message", None) repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1]) repo_id = self._create_repo(repo_id, **kwargs) files_timestamps = self._get_files_timestamps(save_directory) if saved_model: # If `torch_dtype` is in the config with a torch dtype class as the value, we need to change it to string. # (Although TF doesn't care about this attribute, we can't just remove it or set it to `None`.) if getattr(self.config, "torch_dtype", None) is not None and not isinstance(self.config.torch_dtype, str): self.config.torch_dtype = str(self.config.torch_dtype).split(".")[1] if signatures is None: serving_default = self.serving.get_concrete_function(self.input_signature) if any(spec.dtype == tf.int32 for spec in self.input_signature.values()): int64_spec = { key: tf.TensorSpec( shape=spec.shape, dtype=tf.int64 if spec.dtype == tf.int32 else spec.dtype, name=spec.name ) for key, spec in self.input_signature.items() } int64_serving = self.serving.get_concrete_function(int64_spec) signatures = {"serving_default": serving_default, "int64_serving": int64_serving} else: signatures = serving_default saved_model_dir = os.path.join(save_directory, "saved_model", str(version)) self.save(saved_model_dir, include_optimizer=False, signatures=signatures) logger.info(f"Saved model created in {saved_model_dir}") # Save configuration file self.config.architectures = [self.__class__.__name__[2:]] # If we have a custom model, we copy the file defining it in the folder and set the attributes so it can be # loaded from the Hub. if self._auto_class is not None: custom_object_save(self, save_directory, config=self.config) self.config.save_pretrained(save_directory) if self.can_generate(): self.generation_config.save_pretrained(save_directory) # If we save using the predefined names, we can load using `from_pretrained` weights_name = SAFE_WEIGHTS_NAME if safe_serialization else TF2_WEIGHTS_NAME output_model_file = os.path.join(save_directory, weights_name) shards, index = tf_shard_checkpoint(self.weights, max_shard_size, weights_name=weights_name) # Clean the folder from a previous save for filename in os.listdir(save_directory): full_filename = os.path.join(save_directory, filename) # If we have a shard file that is not going to be replaced, we delete it, but only from the main process # in distributed settings to avoid race conditions. weights_no_suffix = weights_name.replace(".bin", "").replace(".safetensors", "") if ( filename.startswith(weights_no_suffix) and os.path.isfile(full_filename) and filename not in shards.keys() ): os.remove(full_filename) if index is None: if safe_serialization: state_dict = {strip_model_name_and_prefix(w.name): w.value() for w in self.weights} safe_save_file(state_dict, output_model_file, metadata={"format": "tf"}) else: self.save_weights(output_model_file) logger.info(f"Model weights saved in {output_model_file}") else: save_index_file = SAFE_WEIGHTS_INDEX_NAME if safe_serialization else TF2_WEIGHTS_INDEX_NAME save_index_file = os.path.join(save_directory, save_index_file) # Save the index as well with open(save_index_file, "w", encoding="utf-8") as index_file: content = json.dumps(index, indent=2, sort_keys=True) + "\n" index_file.write(content) logger.info( f"The model is bigger than the maximum size per checkpoint ({max_shard_size}) and is going to be " f"split in {len(shards)} checkpoint shards. You can find where each parameters has been saved in the " f"index located at {save_index_file}." ) for shard_file, shard in shards.items(): if safe_serialization: shard_state_dict = {strip_model_name_and_prefix(w.name): w.value() for w in shard} safe_save_file( shard_state_dict, os.path.join(save_directory, shard_file), metadata={"format": "tf"} ) else: with h5py.File(os.path.join(save_directory, shard_file), mode="w") as shard_file: layers = [] for layer in sorted(shard, key=lambda x: x.name): if "model." in layer.name or len(layer.name.split("/")) == 1: layer_name = layer.name else: layer_name = "/".join(layer.name.split("/")[1:]) param_dset = shard_file.create_dataset( layer_name, layer.numpy().shape, dtype=layer.numpy().dtype ) param_dset[:] = layer.numpy() layers.append(layer_name.encode("utf8")) save_attributes_to_hdf5_group(shard_file, "layer_names", layers) if push_to_hub: self._upload_modified_files( save_directory, repo_id, files_timestamps, commit_message=commit_message, token=token, ) @classmethod def from_pretrained( cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, config: Optional[Union[PretrainedConfig, str, os.PathLike]] = None, cache_dir: Optional[Union[str, os.PathLike]] = None, ignore_mismatched_sizes: bool = False, force_download: bool = False, local_files_only: bool = False, token: Optional[Union[str, bool]] = None, revision: str = "main", use_safetensors: bool = None, **kwargs, ): r""" Instantiate a pretrained TF 2.0 model from a pre-trained model configuration. The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning task. The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those weights are discarded. Parameters: pretrained_model_name_or_path (`str`, *optional*): Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. - A path to a *directory* containing model weights saved using [`~TFPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. - A path or url to a *PyTorch state_dict save file* (e.g, `./pt_model/pytorch_model.bin`). In this case, `from_pt` should be set to `True` and a configuration object should be provided as `config` argument. This loading path is slower than converting the PyTorch model in a TensorFlow model using the provided conversion scripts and loading the TensorFlow model afterwards. - `None` if you are both providing the configuration and state dictionary (resp. with keyword arguments `config` and `state_dict`). model_args (sequence of positional arguments, *optional*): All remaining positional arguments will be passed to the underlying model's `__init__` method. config (`Union[PretrainedConfig, str]`, *optional*): Can be either: - an instance of a class derived from [`PretrainedConfig`], - a string valid as input to [`~PretrainedConfig.from_pretrained`]. Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when: - The model is a model provided by the library (loaded with the *model id* string of a pretrained model). - The model was saved using [`~TFPreTrainedModel.save_pretrained`] and is reloaded by supplying the save directory. - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a configuration JSON file named *config.json* is found in the directory. from_pt (`bool`, *optional*, defaults to `False`): Load the model weights from a PyTorch state_dict save file (see docstring of `pretrained_model_name_or_path` argument). ignore_mismatched_sizes (`bool`, *optional*, defaults to `False`): Whether or not to raise an error if some of the weights from the checkpoint do not have the same size as the weights of the model (if for instance, you are instantiating a model with 10 labels from a checkpoint with 3 labels). cache_dir (`str`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. resume_download: Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v5 of Transformers. proxies: (`Dict[str, str], `optional`): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. output_loading_info(`bool`, *optional*, defaults to `False`): Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only(`bool`, *optional*, defaults to `False`): Whether or not to only look at local files (e.g., not try downloading the model). token (`str` or `bool`, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. <Tip> To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>"`. </Tip> mirror (`str`, *optional*): Mirror source to accelerate downloads in China. If you are from China and have an accessibility problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety. Please refer to the mirror site for more information. subfolder (`str`, *optional*, defaults to `""`): In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can specify the folder name here. tf_to_pt_weight_rename (`Callable`, *optional*): A function that is called to transform the names of weights during the PyTorch to TensorFlow crossloading process. This is not necessary for most models, but is useful to allow composite models to be crossloaded correctly. use_safetensors (`bool`, *optional*, defaults to `None`): Whether or not to use `safetensors` checkpoints. Defaults to `None`. If not specified and `safetensors` is not installed, it will be set to `False`. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., `output_attentions=True`). Behaves differently depending on whether a `config` is provided or automatically loaded: - If a configuration is provided with `config`, `**kwargs` will be directly passed to the underlying model's `__init__` method (we assume all relevant updates to the configuration have already been done) - If a configuration is not provided, `kwargs` will be first passed to the configuration class initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that corresponds to a configuration attribute will be used to override said attribute with the supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's `__init__` function. Examples: ```python >>> from transformers import BertConfig, TFBertModel >>> # Download model and configuration from huggingface.co and cache. >>> model = TFBertModel.from_pretrained("google-bert/bert-base-uncased") >>> # Model was saved using *save_pretrained('./test/saved_model/')* (for example purposes, not runnable). >>> model = TFBertModel.from_pretrained("./test/saved_model/") >>> # Update configuration during loading. >>> model = TFBertModel.from_pretrained("google-bert/bert-base-uncased", output_attentions=True) >>> assert model.config.output_attentions == True >>> # Loading from a Pytorch model file instead of a TensorFlow checkpoint (slower, for example purposes, not runnable). >>> config = BertConfig.from_json_file("./pt_model/my_pt_model_config.json") >>> model = TFBertModel.from_pretrained("./pt_model/my_pytorch_model.bin", from_pt=True, config=config) ```""" from_pt = kwargs.pop("from_pt", False) resume_download = kwargs.pop("resume_download", None) proxies = kwargs.pop("proxies", None) output_loading_info = kwargs.pop("output_loading_info", False) use_auth_token = kwargs.pop("use_auth_token", None) trust_remote_code = kwargs.pop("trust_remote_code", None) _ = kwargs.pop("mirror", None) load_weight_prefix = kwargs.pop("load_weight_prefix", None) from_pipeline = kwargs.pop("_from_pipeline", None) from_auto_class = kwargs.pop("_from_auto", False) subfolder = kwargs.pop("subfolder", "") commit_hash = kwargs.pop("_commit_hash", None) tf_to_pt_weight_rename = kwargs.pop("tf_to_pt_weight_rename", None) # Not relevant for TF models _ = kwargs.pop("adapter_kwargs", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) token = use_auth_token if trust_remote_code is True: logger.warning( "The argument `trust_remote_code` is to be used with Auto classes. It has no effect here and is" " ignored." ) user_agent = {"file_type": "model", "framework": "tensorflow", "from_auto_class": from_auto_class} if from_pipeline is not None: user_agent["using_pipeline"] = from_pipeline if is_offline_mode() and not local_files_only: logger.info("Offline mode: forcing local_files_only=True") local_files_only = True if use_safetensors is None and not is_safetensors_available(): use_safetensors = False # Load config if we don't provide a configuration if not isinstance(config, PretrainedConfig): config_path = config if config is not None else pretrained_model_name_or_path config, model_kwargs = cls.config_class.from_pretrained( config_path, cache_dir=cache_dir, return_unused_kwargs=True, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, _from_auto=from_auto_class, _from_pipeline=from_pipeline, _commit_hash=commit_hash, **kwargs, ) else: model_kwargs = kwargs if commit_hash is None: commit_hash = getattr(config, "_commit_hash", None) # This variable will flag if we're loading a sharded checkpoint. In this case the archive file is just the # index of the files. is_sharded = False # Load model if pretrained_model_name_or_path is not None: pretrained_model_name_or_path = str(pretrained_model_name_or_path) is_local = os.path.isdir(pretrained_model_name_or_path) if is_local: if from_pt and os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)): # Load from a PyTorch checkpoint in priority if from_pt archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME) elif from_pt and os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_INDEX_NAME)): # Load from a sharded PyTorch checkpoint archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_INDEX_NAME) is_sharded = True elif use_safetensors is not False and os.path.isfile( os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_NAME) ): # Load from a safetensors checkpoint archive_file = os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_NAME) elif use_safetensors is not False and os.path.isfile( os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_INDEX_NAME) ): # Load from a sharded safetensors checkpoint archive_file = os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_INDEX_NAME) is_sharded = True elif os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)): # Load from a TF 2.0 checkpoint archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME) elif os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_INDEX_NAME)): # Load from a sharded TF 2.0 checkpoint archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_INDEX_NAME) is_sharded = True # At this stage we don't have a weight file so we will raise an error. elif use_safetensors: raise EnvironmentError( f"Error no file named {SAFE_WEIGHTS_NAME} or {SAFE_WEIGHTS_INDEX_NAME} found in directory {pretrained_model_name_or_path}. " f"Please make sure that the model has been saved with `safe_serialization=True` or do not " f"set `use_safetensors=True`." ) elif os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)) or os.path.isfile( os.path.join(pretrained_model_name_or_path, WEIGHTS_INDEX_NAME) ): raise EnvironmentError( f"Error no file named {TF2_WEIGHTS_NAME} or {SAFE_WEIGHTS_NAME} found in directory {pretrained_model_name_or_path} " "but there is a file for PyTorch weights. Use `from_pt=True` to load this model from those " "weights." ) else: raise EnvironmentError( f"Error no file named {TF2_WEIGHTS_NAME}, {SAFE_WEIGHTS_NAME} or {WEIGHTS_NAME} found in directory " f"{pretrained_model_name_or_path}." ) elif os.path.isfile(pretrained_model_name_or_path): archive_file = pretrained_model_name_or_path is_local = True elif os.path.isfile(pretrained_model_name_or_path + ".index"): archive_file = pretrained_model_name_or_path + ".index" is_local = True elif is_remote_url(pretrained_model_name_or_path): filename = pretrained_model_name_or_path resolved_archive_file = download_url(pretrained_model_name_or_path) else: # set correct filename if from_pt: filename = WEIGHTS_NAME elif use_safetensors is not False: filename = SAFE_WEIGHTS_NAME else: filename = TF2_WEIGHTS_NAME try: # Load from URL or cache if already cached cached_file_kwargs = { "cache_dir": cache_dir, "force_download": force_download, "proxies": proxies, "resume_download": resume_download, "local_files_only": local_files_only, "token": token, "user_agent": user_agent, "revision": revision, "subfolder": subfolder, "_raise_exceptions_for_gated_repo": False, "_raise_exceptions_for_missing_entries": False, "_commit_hash": commit_hash, } resolved_archive_file = cached_file(pretrained_model_name_or_path, filename, **cached_file_kwargs) # Since we set _raise_exceptions_for_missing_entries=False, we don't get an exception but a None # result when internet is up, the repo and revision exist, but the file does not. if resolved_archive_file is None and filename == SAFE_WEIGHTS_NAME: # Did not find the safetensors file, let's fallback to TF. # No support for sharded safetensors yet, so we'll raise an error if that's all we find. filename = TF2_WEIGHTS_NAME resolved_archive_file = cached_file( pretrained_model_name_or_path, TF2_WEIGHTS_NAME, **cached_file_kwargs ) if resolved_archive_file is None and filename == TF2_WEIGHTS_NAME: # Maybe the checkpoint is sharded, we try to grab the index name in this case. resolved_archive_file = cached_file( pretrained_model_name_or_path, TF2_WEIGHTS_INDEX_NAME, **cached_file_kwargs ) if resolved_archive_file is not None: is_sharded = True if resolved_archive_file is None and filename == WEIGHTS_NAME: # Maybe the checkpoint is sharded, we try to grab the index name in this case. resolved_archive_file = cached_file( pretrained_model_name_or_path, WEIGHTS_INDEX_NAME, **cached_file_kwargs ) if resolved_archive_file is not None: is_sharded = True if resolved_archive_file is None: # Otherwise, maybe there is a PyTorch or Flax model file. We try those to give a helpful error # message. has_file_kwargs = { "revision": revision, "proxies": proxies, "token": token, "cache_dir": cache_dir, "local_files_only": local_files_only, } if has_file(pretrained_model_name_or_path, SAFE_WEIGHTS_INDEX_NAME, **has_file_kwargs): is_sharded = True elif has_file(pretrained_model_name_or_path, WEIGHTS_NAME, **has_file_kwargs): raise EnvironmentError( f"{pretrained_model_name_or_path} does not appear to have a file named" f" {TF2_WEIGHTS_NAME} but there is a file for PyTorch weights. Use `from_pt=True` to" " load this model from those weights." ) else: raise EnvironmentError( f"{pretrained_model_name_or_path} does not appear to have a file named {WEIGHTS_NAME}," f" {TF2_WEIGHTS_NAME} or {TF_WEIGHTS_NAME}" ) except EnvironmentError: # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted # to the original exception. raise except Exception: # For any other exception, we throw a generic error. raise EnvironmentError( f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it" " from 'https://huggingface.co/models', make sure you don't have a local directory with the" f" same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a" f" directory containing a file named {WEIGHTS_NAME}, {TF2_WEIGHTS_NAME} or {TF_WEIGHTS_NAME}" ) if is_local: logger.info(f"loading weights file {archive_file}") resolved_archive_file = archive_file filename = resolved_archive_file.split(os.path.sep)[-1] else: logger.info(f"loading weights file {filename} from cache at {resolved_archive_file}") else: resolved_archive_file = None # We'll need to download and cache each checkpoint shard if the checkpoint is sharded. if is_sharded: # resolved_archive_file becomes a list of files that point to the different checkpoint shards in this case. resolved_archive_file, sharded_metadata = get_checkpoint_shard_files( pretrained_model_name_or_path, resolved_archive_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, token=token, user_agent=user_agent, revision=revision, _commit_hash=commit_hash, ) safetensors_from_pt = False if filename == SAFE_WEIGHTS_NAME: with safe_open(resolved_archive_file, framework="tf") as f: safetensors_metadata = f.metadata() if safetensors_metadata is None or safetensors_metadata.get("format") not in ["pt", "tf", "flax", "mlx"]: raise OSError( f"The safetensors archive passed at {resolved_archive_file} does not contain the valid metadata." " Make sure you save your model with the `save_pretrained` method." ) safetensors_from_pt = safetensors_metadata.get("format") == "pt" elif filename == SAFE_WEIGHTS_INDEX_NAME: with safe_open(resolved_archive_file[0], framework="tf") as f: safetensors_metadata = f.metadata() if safetensors_metadata is None or safetensors_metadata.get("format") not in ["pt", "tf", "flax", "mlx"]: raise OSError( f"The safetensors archive passed at {resolved_archive_file} does not contain the valid metadata." " Make sure you save your model with the `save_pretrained` method." ) safetensors_from_pt = safetensors_metadata.get("format") == "pt" config.name_or_path = pretrained_model_name_or_path # composed models, *e.g.* TFRag, require special treatment when it comes to loading # pre-trained weights. if cls._requires_load_weight_prefix and model_kwargs.get("name") is not None: model_kwargs["load_weight_prefix"] = load_weight_prefix + "/" + model_kwargs.get("name") # Instantiate model. model = cls(config, *model_args, **model_kwargs) if tf_to_pt_weight_rename is None and hasattr(model, "tf_to_pt_weight_rename"): # TODO Matt: This is a temporary workaround to allow weight renaming, but requires a method # to be defined for each class that requires a rename. We can probably just have a class-level # dict and a single top-level method or something and cut down a lot of boilerplate code tf_to_pt_weight_rename = model.tf_to_pt_weight_rename if from_pt: from .modeling_tf_pytorch_utils import load_pytorch_checkpoint_in_tf2_model # Load from a PyTorch checkpoint return load_pytorch_checkpoint_in_tf2_model( model, resolved_archive_file, allow_missing_keys=True, output_loading_info=output_loading_info, _prefix=load_weight_prefix, tf_to_pt_weight_rename=tf_to_pt_weight_rename, ) # we might need to extend the variable scope for composite models if load_weight_prefix is not None: with tf.compat.v1.variable_scope(load_weight_prefix): model.build_in_name_scope() # build the network with dummy inputs else: model.build_in_name_scope() # build the network with dummy inputs if safetensors_from_pt and not is_sharded: from .modeling_tf_pytorch_utils import load_pytorch_state_dict_in_tf2_model with safe_open(resolved_archive_file, framework="tf") as safetensors_archive: # Load from a PyTorch safetensors checkpoint # We load in TF format here because PT weights often need to be transposed, and this is much # faster on GPU. Loading as numpy and transposing on CPU adds several seconds to load times. return load_pytorch_state_dict_in_tf2_model( model, safetensors_archive, tf_inputs=False, # No need to build the model again allow_missing_keys=True, output_loading_info=output_loading_info, _prefix=load_weight_prefix, ignore_mismatched_sizes=ignore_mismatched_sizes, tf_to_pt_weight_rename=tf_to_pt_weight_rename, ) elif safetensors_from_pt: from .modeling_tf_pytorch_utils import load_sharded_pytorch_safetensors_in_tf2_model return load_sharded_pytorch_safetensors_in_tf2_model( model, resolved_archive_file, tf_inputs=False, allow_missing_keys=True, output_loading_info=output_loading_info, _prefix=load_weight_prefix, ignore_mismatched_sizes=ignore_mismatched_sizes, tf_to_pt_weight_rename=tf_to_pt_weight_rename, ) # 'by_name' allow us to do transfer learning by skipping/adding layers # see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1339-L1357 try: if is_sharded: for file in resolved_archive_file: os.path.isfile(file), f"Error retrieving files {file}" if filename == SAFE_WEIGHTS_INDEX_NAME: missing_keys, unexpected_keys, mismatched_keys = load_tf_sharded_weights_from_safetensors( model, resolved_archive_file, ignore_mismatched_sizes=ignore_mismatched_sizes, _prefix=load_weight_prefix, ) else: missing_keys, unexpected_keys, mismatched_keys = load_tf_sharded_weights( model, resolved_archive_file, ignore_mismatched_sizes=ignore_mismatched_sizes, _prefix=load_weight_prefix, ) else: # Handles both H5 and safetensors missing_keys, unexpected_keys, mismatched_keys = load_tf_weights( model, resolved_archive_file, ignore_mismatched_sizes=ignore_mismatched_sizes, _prefix=load_weight_prefix, ) except OSError as e: try: with open(resolved_archive_file) as f: if f.read().startswith("version"): raise OSError( "You seem to have cloned a repository without having git-lfs installed. Please install " "git-lfs and run `git lfs install` followed by `git lfs pull` in the folder " "you cloned." ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise OSError( "Unable to load weights from h5 file. " "If you tried to load a TF 2.0 model from a PyTorch checkpoint, please set from_pt=True. " ) if cls._keys_to_ignore_on_load_missing is not None: for pat in cls._keys_to_ignore_on_load_missing: missing_keys = [k for k in missing_keys if re.search(pat, k) is None] if cls._keys_to_ignore_on_load_unexpected is not None: for pat in cls._keys_to_ignore_on_load_unexpected: unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] if len(unexpected_keys) > 0: logger.warning( f"Some layers from the model checkpoint at {pretrained_model_name_or_path} were not used when" f" initializing {model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are" f" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task or" " with another architecture (e.g. initializing a BertForSequenceClassification model from a" " BertForPreTraining model).\n- This IS NOT expected if you are initializing" f" {model.__class__.__name__} from the checkpoint of a model that you expect to be exactly identical" " (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)." ) else: logger.warning(f"All model checkpoint layers were used when initializing {model.__class__.__name__}.\n") if len(missing_keys) > 0: logger.warning( f"Some layers of {model.__class__.__name__} were not initialized from the model checkpoint at" f" {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\nYou should probably" " TRAIN this model on a down-stream task to be able to use it for predictions and inference." ) elif len(mismatched_keys) == 0: logger.warning( f"All the layers of {model.__class__.__name__} were initialized from the model checkpoint at" f" {pretrained_model_name_or_path}.\nIf your task is similar to the task the model of the checkpoint" f" was trained on, you can already use {model.__class__.__name__} for predictions without further" " training." ) if len(mismatched_keys) > 0: mismatched_warning = "\n".join( [ f"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated" for key, shape1, shape2 in mismatched_keys ] ) logger.warning( f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at" f" {pretrained_model_name_or_path} and are newly initialized because the shapes did not" f" match:\n{mismatched_warning}\nYou should probably TRAIN this model on a down-stream task to be able" " to use it for predictions and inference." ) # If it is a model with generation capabilities, attempt to load the generation config if model.can_generate(): try: model.generation_config = GenerationConfig.from_pretrained( pretrained_model_name_or_path, cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, _from_auto=from_auto_class, _from_pipeline=from_pipeline, **kwargs, ) except OSError: logger.info( "Generation config file not found, using a generation config created from the model config." ) pass if output_loading_info: loading_info = { "missing_keys": missing_keys, "unexpected_keys": unexpected_keys, "mismatched_keys": mismatched_keys, } return model, loading_info return model def push_to_hub( self, repo_id: str, use_temp_dir: Optional[bool] = None, commit_message: Optional[str] = None, private: Optional[bool] = None, max_shard_size: Optional[Union[int, str]] = "10GB", token: Optional[Union[bool, str]] = None, # (`use_auth_token` is deprecated: we have to keep it here as we don't have **kwargs) use_auth_token: Optional[Union[bool, str]] = None, create_pr: bool = False, **base_model_card_args, ) -> str: """ Upload the model files to the 🤗 Model Hub while synchronizing a local clone of the repo in `repo_path_or_name`. Parameters: repo_id (`str`): The name of the repository you want to push your model to. It should contain your organization name when pushing to a given organization. use_temp_dir (`bool`, *optional*): Whether or not to use a temporary directory to store the files saved before they are pushed to the Hub. Will default to `True` if there is no directory named like `repo_id`, `False` otherwise. commit_message (`str`, *optional*): Message to commit while pushing. Will default to `"Upload model"`. private (`bool`, *optional*): Whether to make the repo private. If `None` (default), the repo will be public unless the organization's default is private. This value is ignored if the repo already exists. token (`bool` or `str`, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). Will default to `True` if `repo_url` is not specified. max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`): Only applicable for models. The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size lower than this size. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`). create_pr (`bool`, *optional*, defaults to `False`): Whether or not to create a PR with the uploaded files or directly commit. Examples: ```python from transformers import TFAutoModel model = TFAutoModel.from_pretrained("google-bert/bert-base-cased") # Push the model to your namespace with the name "my-finetuned-bert". model.push_to_hub("my-finetuned-bert") # Push the model to an organization with the name "my-finetuned-bert". model.push_to_hub("huggingface/my-finetuned-bert") ``` """ if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) token = use_auth_token if "repo_path_or_name" in base_model_card_args: warnings.warn( "The `repo_path_or_name` argument is deprecated and will be removed in v5 of Transformers. Use " "`repo_id` instead." ) repo_id = base_model_card_args.pop("repo_path_or_name") # Deprecation warning will be sent after for repo_url and organization repo_url = base_model_card_args.pop("repo_url", None) organization = base_model_card_args.pop("organization", None) if os.path.isdir(repo_id): working_dir = repo_id repo_id = repo_id.split(os.path.sep)[-1] else: working_dir = repo_id.split("/")[-1] repo_id = self._create_repo( repo_id, private=private, token=token, repo_url=repo_url, organization=organization ) if use_temp_dir is None: use_temp_dir = not os.path.isdir(working_dir) with working_or_temp_dir(working_dir=working_dir, use_temp_dir=use_temp_dir) as work_dir: files_timestamps = self._get_files_timestamps(work_dir) # Save all files. self.save_pretrained(work_dir, max_shard_size=max_shard_size) if hasattr(self, "history") and hasattr(self, "create_model_card"): # This is a Keras model and we might be able to fish out its History and make a model card out of it base_model_card_args = { "output_dir": work_dir, "model_name": Path(repo_id).name, } base_model_card_args.update(base_model_card_args) self.create_model_card(**base_model_card_args) self._upload_modified_files( work_dir, repo_id, files_timestamps, commit_message=commit_message, token=token, create_pr=create_pr, ) @classmethod def register_for_auto_class(cls, auto_class="TFAutoModel"): """ Register this class with a given auto class. This should only be used for custom models as the ones in the library are already mapped with an auto class. <Tip warning={true}> This API is experimental and may have some slight breaking changes in the next releases. </Tip> Args: auto_class (`str` or `type`, *optional*, defaults to `"TFAutoModel"`): The auto class to register this new model with. """ if not isinstance(auto_class, str): auto_class = auto_class.__name__ import transformers.models.auto as auto_module if not hasattr(auto_module, auto_class): raise ValueError(f"{auto_class} is not a valid auto class.") cls._auto_class = auto_class
class_definition
47,939
154,641
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
null
281
class TFConv1D(keras.layers.Layer): """ 1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2). Basically works like a linear layer but the weights are transposed. Args: nf (`int`): The number of output features. nx (`int`): The number of input features. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation to use to initialize the weights. kwargs (`Dict[str, Any]`, *optional*): Additional keyword arguments passed along to the `__init__` of `keras.layers.Layer`. """ def __init__(self, nf, nx, initializer_range=0.02, **kwargs): super().__init__(**kwargs) self.nf = nf self.nx = nx self.initializer_range = initializer_range def build(self, input_shape): if self.built: return self.built = True self.weight = self.add_weight( "weight", shape=[self.nx, self.nf], initializer=get_initializer(self.initializer_range) ) self.bias = self.add_weight("bias", shape=[1, self.nf], initializer=tf.zeros_initializer()) def call(self, x): bz, sl = shape_list(x)[:2] x = tf.reshape(x, [-1, self.nx]) x = tf.matmul(x, self.weight) + self.bias x = tf.reshape(x, [bz, sl, self.nf]) return x
class_definition
154,644
156,043
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
null
282
class TFSharedEmbeddings(keras.layers.Layer): r""" Construct shared token embeddings. The weights of the embedding layer is usually shared with the weights of the linear decoder when doing language modeling. Args: vocab_size (`int`): The size of the vocabulary, e.g., the number of unique tokens. hidden_size (`int`): The size of the embedding vectors. initializer_range (`float`, *optional*): The standard deviation to use when initializing the weights. If no value is provided, it will default to \\(1/\sqrt{hidden\_size}\\). kwargs (`Dict[str, Any]`, *optional*): Additional keyword arguments passed along to the `__init__` of `keras.layers.Layer`. """ # TODO (joao): flagged for delection due to embeddings refactor def __init__(self, vocab_size: int, hidden_size: int, initializer_range: Optional[float] = None, **kwargs): super().__init__(**kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.initializer_range = hidden_size**-0.5 if initializer_range is None else initializer_range warnings.warn( "`TFSharedEmbeddings` is scheduled for deletion in v4.32, use `keras.layers.Embedding` instead.", DeprecationWarning, ) def build(self, input_shape): """ Build shared token embedding layer Shared weights logic adapted from https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24 """ self.weight = self.add_weight( "weight", shape=[self.vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range) ) super().build(input_shape) def get_config(self): config = { "vocab_size": self.vocab_size, "hidden_size": self.hidden_size, "initializer_range": self.initializer_range, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, inputs: tf.Tensor, mode: str = "embedding") -> tf.Tensor: """ Get token embeddings of inputs or decode final hidden state. Args: inputs (`tf.Tensor`): In embedding mode, should be an int64 tensor with shape `[batch_size, length]`. In linear mode, should be a float tensor with shape `[batch_size, length, hidden_size]`. mode (`str`, defaults to `"embedding"`): A valid value is either `"embedding"` or `"linear"`, the first one indicates that the layer should be used as an embedding layer, the second one that the layer should be used as a linear decoder. Returns: `tf.Tensor`: In embedding mode, the output is a float32 embedding tensor, with shape `[batch_size, length, embedding_size]`. In linear mode, the output is a float32 with shape `[batch_size, length, vocab_size]`. Raises: ValueError: if `mode` is not valid. Shared weights logic is adapted from [here](https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24). """ if mode == "embedding": return self._embedding(inputs) elif mode == "linear": return self._linear(inputs) else: raise ValueError(f"mode {mode} is not valid.") def _embedding(self, input_ids): """Applies embedding based on inputs tensor.""" return tf.gather(self.weight, input_ids) def _linear(self, inputs): """ Computes logits by running inputs through a linear layer. Args: inputs: A float32 tensor with shape [..., hidden_size] Returns: float32 tensor with shape [..., vocab_size]. """ first_dims = shape_list(inputs)[:-1] x = tf.reshape(inputs, [-1, self.hidden_size]) logits = tf.matmul(x, self.weight, transpose_b=True) return tf.reshape(logits, first_dims + [self.vocab_size])
class_definition
156,046
160,275
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
null
283
class TFSequenceSummary(keras.layers.Layer): """ Compute a single vector summary of a sequence hidden states. Args: config ([`PretrainedConfig`]): The config used by the model. Relevant arguments in the config class of the model are (refer to the actual config class of your model for the default values it uses): - **summary_type** (`str`) -- The method to use to make this summary. Accepted values are: - `"last"` -- Take the last token hidden state (like XLNet) - `"first"` -- Take the first token hidden state (like Bert) - `"mean"` -- Take the mean of all tokens hidden states - `"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2) - `"attn"` -- Not implemented now, use multi-head attention - **summary_use_proj** (`bool`) -- Add a projection after the vector extraction. - **summary_proj_to_labels** (`bool`) -- If `True`, the projection outputs to `config.num_labels` classes (otherwise to `config.hidden_size`). - **summary_activation** (`Optional[str]`) -- Set to `"tanh"` to add a tanh activation to the output, another string or `None` will add no activation. - **summary_first_dropout** (`float`) -- Optional dropout probability before the projection and activation. - **summary_last_dropout** (`float`)-- Optional dropout probability after the projection and activation. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation to use to initialize the weights. kwargs (`Dict[str, Any]`, *optional*): Additional keyword arguments passed along to the `__init__` of `keras.layers.Layer`. """ def __init__(self, config: PretrainedConfig, initializer_range: float = 0.02, **kwargs): super().__init__(**kwargs) self.summary_type = config.summary_type if hasattr(config, "summary_use_proj") else "last" if self.summary_type == "attn": # We should use a standard multi-head attention module with absolute positional embedding for that. # Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276 # We can probably just use the multi-head attention module of PyTorch >=1.1.0 raise NotImplementedError self.has_summary = hasattr(config, "summary_use_proj") and config.summary_use_proj if self.has_summary: if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0: num_classes = config.num_labels else: num_classes = config.hidden_size self.summary = keras.layers.Dense( num_classes, kernel_initializer=get_initializer(initializer_range), name="summary" ) self.has_activation = False activation_string = getattr(config, "summary_activation", None) if activation_string is not None: self.has_activation = True self.activation = get_tf_activation(activation_string) self.has_first_dropout = hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0 if self.has_first_dropout: self.first_dropout = keras.layers.Dropout(config.summary_first_dropout) self.has_last_dropout = hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0 if self.has_last_dropout: self.last_dropout = keras.layers.Dropout(config.summary_last_dropout) self.hidden_size = config.hidden_size def call(self, inputs, cls_index=None, training=False): if not isinstance(inputs, (dict, tuple, list)): hidden_states = inputs elif isinstance(inputs, (tuple, list)): hidden_states = inputs[0] cls_index = inputs[1] if len(inputs) > 1 else None assert len(inputs) <= 2, "Too many inputs." else: hidden_states = inputs.get("hidden_states") cls_index = inputs.get("cls_index", None) if self.summary_type == "last": output = hidden_states[:, -1] elif self.summary_type == "first": output = hidden_states[:, 0] elif self.summary_type == "mean": output = tf.reduce_mean(hidden_states, axis=1) elif self.summary_type == "cls_index": hidden_shape = shape_list(hidden_states) # e.g. [batch, num choices, seq length, hidden dims] if cls_index is None: cls_index = tf.fill( hidden_shape[:-2], hidden_shape[-2] - 1 ) # A tensor full of shape [batch] or [batch, num choices] full of sequence length cls_shape = shape_list(cls_index) if len(cls_shape) <= len(hidden_shape) - 2: cls_index = tf.expand_dims(cls_index, axis=-1) # else: # cls_index = cls_index[..., tf.newaxis] # cls_index = cls_index.expand((-1,) * (cls_index.dim()-1) + (hidden_states.size(-1),)) # shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states output = tf.gather(hidden_states, cls_index, batch_dims=len(hidden_shape) - 2) output = tf.squeeze( output, axis=len(hidden_shape) - 2 ) # shape of output: (batch, num choices, hidden_size) elif self.summary_type == "attn": raise NotImplementedError if self.has_first_dropout: output = self.first_dropout(output, training=training) if self.has_summary: output = self.summary(output) if self.has_activation: output = self.activation(output) if self.has_last_dropout: output = self.last_dropout(output, training=training) return output def build(self, input_shape): if self.built: return self.built = True if getattr(self, "summary", None) is not None: with tf.name_scope("summary"): self.summary.build(self.hidden_size)
class_definition
160,278
166,498
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
null
284
class DebugUnderflowOverflow: """ This debug class helps detect and understand where the model starts getting very large or very small, and more importantly `nan` or `inf` weight and activation elements. There are 2 working modes: 1. Underflow/overflow detection (default) 2. Specific batch absolute min/max tracing without detection Mode 1: Underflow/overflow detection To activate the underflow/overflow detection, initialize the object with the model : ```python debug_overflow = DebugUnderflowOverflow(model) ``` then run the training as normal and if `nan` or `inf` gets detected in at least one of the weight, input or output elements this module will throw an exception and will print `max_frames_to_save` frames that lead to this event, each frame reporting 1. the fully qualified module name plus the class name whose `forward` was run 2. the absolute min and max value of all elements for each module weights, and the inputs and output For example, here is the header and the last few frames in detection report for `google/mt5-small` run in fp16 mixed precision : ``` Detected inf/nan during batch_number=0 Last 21 forward frames: abs min abs max metadata [...] encoder.block.2.layer.1.DenseReluDense.wi_0 Linear 2.17e-07 4.50e+00 weight 1.79e-06 4.65e+00 input[0] 2.68e-06 3.70e+01 output encoder.block.2.layer.1.DenseReluDense.wi_1 Linear 8.08e-07 2.66e+01 weight 1.79e-06 4.65e+00 input[0] 1.27e-04 2.37e+02 output encoder.block.2.layer.1.DenseReluDense.wo Linear 1.01e-06 6.44e+00 weight 0.00e+00 9.74e+03 input[0] 3.18e-04 6.27e+04 output encoder.block.2.layer.1.DenseReluDense T5DenseGatedGeluDense 1.79e-06 4.65e+00 input[0] 3.18e-04 6.27e+04 output encoder.block.2.layer.1.dropout Dropout 3.18e-04 6.27e+04 input[0] 0.00e+00 inf output ``` You can see here, that `T5DenseGatedGeluDense.forward` resulted in output activations, whose absolute max value was around 62.7K, which is very close to fp16's top limit of 64K. In the next frame we have `Dropout` which renormalizes the weights, after it zeroed some of the elements, which pushes the absolute max value to more than 64K, and we get an overlow. As you can see it's the previous frames that we need to look into when the numbers start going into very large for fp16 numbers. The tracking is done in a forward hook, which gets invoked immediately after `forward` has completed. By default the last 21 frames are printed. You can change the default to adjust for your needs. For example : ```python debug_overflow = DebugUnderflowOverflow(model, max_frames_to_save=100) ``` To validate that you have set up this debugging feature correctly, and you intend to use it in a training that may take hours to complete, first run it with normal tracing enabled for one of a few batches as explained in the next section. Mode 2. Specific batch absolute min/max tracing without detection The second work mode is per-batch tracing with the underflow/overflow detection feature turned off. Let's say you want to watch the absolute min and max values for all the ingredients of each `forward` call of a given batch, and only do that for batches 1 and 3. Then you instantiate this class as : ```python debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1, 3]) ``` And now full batches 1 and 3 will be traced using the same format as explained above. Batches are 0-indexed. This is helpful if you know that the program starts misbehaving after a certain batch number, so you can fast-forward right to that area. Early stopping: You can also specify the batch number after which to stop the training, with : ```python debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1, 3], abort_after_batch_num=3) ``` This feature is mainly useful in the tracing mode, but you can use it for any mode. **Performance**: As this module measures absolute `min`/``max` of each weight of the model on every forward it'll slow the training down. Therefore remember to turn it off once the debugging needs have been met. Args: model (`nn.Module`): The model to debug. max_frames_to_save (`int`, *optional*, defaults to 21): How many frames back to record trace_batch_nums(`List[int]`, *optional*, defaults to `[]`): Which batch numbers to trace (turns detection off) abort_after_batch_num (`int``, *optional*): Whether to abort after a certain batch number has finished """ def __init__(self, model, max_frames_to_save=21, trace_batch_nums=[], abort_after_batch_num=None): self.model = model self.trace_batch_nums = trace_batch_nums self.abort_after_batch_num = abort_after_batch_num # keep a LIFO buffer of frames to dump as soon as inf/nan is encountered to give context to the problem emergence self.frames = collections.deque([], max_frames_to_save) self.frame = [] self.batch_number = 0 self.total_calls = 0 self.detected_overflow = False self.prefix = " " self.analyse_model() self.register_forward_hook() def save_frame(self, frame=None): if frame is not None: self.expand_frame(frame) self.frames.append("\n".join(self.frame)) self.frame = [] # start a new frame def expand_frame(self, line): self.frame.append(line) def trace_frames(self): print("\n".join(self.frames)) self.frames = [] def reset_saved_frames(self): self.frames = [] def dump_saved_frames(self): print(f"\nDetected inf/nan during batch_number={self.batch_number}") print(f"Last {len(self.frames)} forward frames:") print(f"{'abs min':8} {'abs max':8} metadata") print("\n".join(self.frames)) print("\n\n") self.frames = [] def analyse_model(self): # extract the fully qualified module names, to be able to report at run time. e.g.: # encoder.block.2.layer.0.SelfAttention.o # # for shared weights only the first shared module name will be registered self.module_names = {m: name for name, m in self.model.named_modules()} # self.longest_module_name = max(len(v) for v in self.module_names.values()) def analyse_variable(self, var, ctx): if torch.is_tensor(var): self.expand_frame(get_abs_min_max(var, ctx)) if detect_overflow(var, ctx): self.detected_overflow = True elif var is None: self.expand_frame(f"{'None':>17} {ctx}") else: self.expand_frame(f"{'not a tensor':>17} {ctx}") def batch_start_frame(self): self.expand_frame(f"\n\n{self.prefix} *** Starting batch number={self.batch_number} ***") self.expand_frame(f"{'abs min':8} {'abs max':8} metadata") def batch_end_frame(self): self.expand_frame(f"{self.prefix} *** Finished batch number={self.batch_number-1} ***\n\n") def create_frame(self, module, input, output): self.expand_frame(f"{self.prefix} {self.module_names[module]} {module.__class__.__name__}") # params for name, p in module.named_parameters(recurse=False): self.analyse_variable(p, name) # inputs if isinstance(input, tuple): for i, x in enumerate(input): self.analyse_variable(x, f"input[{i}]") else: self.analyse_variable(input, "input") # outputs if isinstance(output, tuple): for i, x in enumerate(output): # possibly a tuple of tuples if isinstance(x, tuple): for j, y in enumerate(x): self.analyse_variable(y, f"output[{i}][{j}]") else: self.analyse_variable(x, f"output[{i}]") else: self.analyse_variable(output, "output") self.save_frame() def register_forward_hook(self): self.model.apply(self._register_forward_hook) def _register_forward_hook(self, module): module.register_forward_hook(self.forward_hook) def forward_hook(self, module, input, output): # - input is a tuple of packed inputs (could be non-Tensors) # - output could be a Tensor or a tuple of Tensors and non-Tensors last_frame_of_batch = False trace_mode = True if self.batch_number in self.trace_batch_nums else False if trace_mode: self.reset_saved_frames() if self.total_calls == 0: self.batch_start_frame() self.total_calls += 1 # count batch numbers - the very first forward hook of the batch will be called when the # batch completes - i.e. it gets called very last - we know this batch has finished if module == self.model: self.batch_number += 1 last_frame_of_batch = True self.create_frame(module, input, output) # if last_frame_of_batch: # self.batch_end_frame() if trace_mode: self.trace_frames() if last_frame_of_batch: self.batch_start_frame() if self.detected_overflow and not trace_mode: self.dump_saved_frames() # now we can abort, as it's pointless to continue running raise ValueError( "DebugUnderflowOverflow: inf/nan detected, aborting as there is no point running further. " "Please scroll up above this traceback to see the activation values prior to this event." ) # abort after certain batch if requested to do so if self.abort_after_batch_num is not None and self.batch_number > self.abort_after_batch_num: raise ValueError( f"DebugUnderflowOverflow: aborting after {self.batch_number} batches due to" f" `abort_after_batch_num={self.abort_after_batch_num}` arg" )
class_definition
774
11,162
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/debug_utils.py
null
285
class DebugOption(ExplicitEnum): UNDERFLOW_OVERFLOW = "underflow_overflow" TPU_METRICS_DEBUG = "tpu_metrics_debug"
class_definition
12,784
12,906
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/debug_utils.py
null
286
class TFBaseModelOutput(ModelOutput): """ Base class for model's outputs, with potential hidden states and attentions. Args: last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(tf.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: tf.Tensor = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None
class_definition
804
2,119
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
null
287
class TFBaseModelOutputWithNoAttention(ModelOutput): """ Base class for model's outputs, with potential hidden states. Args: last_hidden_state (`tf.Tensor` shape `(batch_size, num_channels, height, width)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, num_channels, height, width)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. """ last_hidden_state: tf.Tensor = None hidden_states: Optional[Tuple[tf.Tensor, ...]] = None
class_definition
2,133
3,028
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
null
288
class TFBaseModelOutputWithPooling(ModelOutput): """ Base class for model's outputs that also contains a pooling of the last hidden states. Args: last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (`tf.Tensor` of shape `(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining. This output is usually *not* a good summary of the semantic content of the input, you're often better with averaging or pooling the sequence of hidden-states for the whole input sequence. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: tf.Tensor = None pooler_output: tf.Tensor = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None
class_definition
3,042
5,000
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
null
289
class TFBaseModelOutputWithPoolingAndNoAttention(ModelOutput): """ Base class for model's outputs that also contains a pooling of the last hidden states. Args: last_hidden_state (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (`tf.Tensor` of shape `(batch_size, hidden_size)`): Last layer hidden-state after a pooling operation on the spatial dimensions. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, num_channels, height, width)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. """ last_hidden_state: tf.Tensor = None pooler_output: tf.Tensor = None hidden_states: Optional[Tuple[tf.Tensor, ...]] = None
class_definition
5,014
6,146
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
null
290
class TFBaseModelOutputWithPoolingAndCrossAttentions(ModelOutput): """ Base class for model's outputs that also contains a pooling of the last hidden states. Args: last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (`tf.Tensor` of shape `(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining. This output is usually *not* a good summary of the semantic content of the input, you're often better with averaging or pooling the sequence of hidden-states for the whole input sequence. past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads, sequence_length, embed_size_per_head)`). Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. """ last_hidden_state: tf.Tensor = None pooler_output: tf.Tensor = None past_key_values: List[tf.Tensor] | None = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None cross_attentions: Tuple[tf.Tensor] | None = None
class_definition
6,160
9,186
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
null
291
class TFBaseModelOutputWithPast(ModelOutput): """ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding). Args: last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output. past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads, sequence_length, embed_size_per_head)`). Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: tf.Tensor = None past_key_values: List[tf.Tensor] | None = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None
class_definition
9,200
11,228
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
null
292
class TFBaseModelOutputWithCrossAttentions(ModelOutput): """ Base class for model's outputs, with potential hidden states and attentions. Args: last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(tf.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. """ last_hidden_state: tf.Tensor = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None cross_attentions: Tuple[tf.Tensor] | None = None
class_definition
11,242
13,094
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
null
293
class TFBaseModelOutputWithPastAndCrossAttentions(ModelOutput): """ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding). Args: last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output. past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads, sequence_length, embed_size_per_head)`). Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(tf.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. """ last_hidden_state: tf.Tensor = None past_key_values: List[tf.Tensor] | None = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None cross_attentions: Tuple[tf.Tensor] | None = None
class_definition
13,108
15,677
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
null
294
class TFSeq2SeqModelOutput(ModelOutput): """ Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential decoding. Args: last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the decoder of the model. If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output. past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads, sequence_length, embed_size_per_head)`). Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: tf.Tensor = None past_key_values: List[tf.Tensor] | None = None decoder_hidden_states: Tuple[tf.Tensor] | None = None decoder_attentions: Tuple[tf.Tensor] | None = None cross_attentions: Tuple[tf.Tensor] | None = None encoder_last_hidden_state: tf.Tensor | None = None encoder_hidden_states: Tuple[tf.Tensor] | None = None encoder_attentions: Tuple[tf.Tensor] | None = None
class_definition
15,691
19,600
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
null
295
class TFCausalLMOutput(ModelOutput): """ Base class for causal language model (or autoregressive) outputs. Args: loss (`tf.Tensor` of shape `(n,)`, *optional*, where n is the number of non-masked labels, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: tf.Tensor | None = None logits: tf.Tensor = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None
class_definition
19,614
21,157
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
null
296
class TFCausalLMOutputWithPast(ModelOutput): """ Base class for causal language model (or autoregressive) outputs. Args: loss (`tf.Tensor` of shape `(n,)`, *optional*, where n is the number of non-masked labels, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads, sequence_length, embed_size_per_head)`). Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: tf.Tensor | None = None logits: tf.Tensor = None past_key_values: List[tf.Tensor] | None = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None
class_definition
21,171
23,254
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
null
297
class TFCausalLMOutputWithCrossAttentions(ModelOutput): """ Base class for causal language model (or autoregressive) outputs. Args: loss (`tf.Tensor` of shape `(n,)`, *optional*, where n is the number of non-masked labels, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads, sequence_length, embed_size_per_head)`). Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. """ loss: tf.Tensor | None = None logits: tf.Tensor = None past_key_values: List[tf.Tensor] | None = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None cross_attentions: Tuple[tf.Tensor] | None = None
class_definition
23,268
25,880
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
null
298
class TFMaskedLMOutput(ModelOutput): """ Base class for masked language models outputs. Args: loss (`tf.Tensor` of shape `(n,)`, *optional*, where n is the number of non-masked labels, returned when `labels` is provided): Masked language modeling (MLM) loss. logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: tf.Tensor | None = None logits: tf.Tensor = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None
class_definition
25,894
27,403
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
null
299