Fix: resize_token_embeddings's Interface for transformers v4.49.0 (#26)
Browse files- Fix: resize_token_embeddings's Interface for transformers v4.49.0 (9990a98b2725653cba66babb82fbd007ae958b95)
Co-authored-by: YEN-FU LIN <[email protected]>
- modeling_florence2.py +4 -4
modeling_florence2.py
CHANGED
@@ -2078,8 +2078,8 @@ class Florence2LanguageForConditionalGeneration(Florence2LanguagePreTrainedModel
|
|
2078 |
def get_decoder(self):
|
2079 |
return self.model.get_decoder()
|
2080 |
|
2081 |
-
def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int] = None) -> nn.Embedding:
|
2082 |
-
new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of)
|
2083 |
self._resize_final_logits_bias(new_embeddings.weight.shape[0])
|
2084 |
return new_embeddings
|
2085 |
|
@@ -2587,8 +2587,8 @@ class Florence2ForConditionalGeneration(Florence2PreTrainedModel):
|
|
2587 |
def get_input_embeddings(self):
|
2588 |
return self.language_model.get_input_embeddings()
|
2589 |
|
2590 |
-
def resize_token_embeddings(self, new_num_tokens: Optional[int] = None, pad_to_multiple_of=None) -> nn.Embedding:
|
2591 |
-
model_embeds = self.language_model.resize_token_embeddings(new_num_tokens, pad_to_multiple_of)
|
2592 |
# update vocab size
|
2593 |
self.config.text_config.vocab_size = model_embeds.num_embeddings
|
2594 |
self.config.vocab_size = model_embeds.num_embeddings
|
|
|
2078 |
def get_decoder(self):
|
2079 |
return self.model.get_decoder()
|
2080 |
|
2081 |
+
def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int] = None, **kwargs) -> nn.Embedding:
|
2082 |
+
new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of, **kwargs)
|
2083 |
self._resize_final_logits_bias(new_embeddings.weight.shape[0])
|
2084 |
return new_embeddings
|
2085 |
|
|
|
2587 |
def get_input_embeddings(self):
|
2588 |
return self.language_model.get_input_embeddings()
|
2589 |
|
2590 |
+
def resize_token_embeddings(self, new_num_tokens: Optional[int] = None, pad_to_multiple_of=None, **kwargs) -> nn.Embedding:
|
2591 |
+
model_embeds = self.language_model.resize_token_embeddings(new_num_tokens, pad_to_multiple_of, **kwargs)
|
2592 |
# update vocab size
|
2593 |
self.config.text_config.vocab_size = model_embeds.num_embeddings
|
2594 |
self.config.vocab_size = model_embeds.num_embeddings
|