Upload MERaLiONForConditionalGeneration
Browse files- config.json +6 -2
- generation_config.json +2 -0
- modeling_meralion.py +8 -794
config.json
CHANGED
@@ -1,7 +1,10 @@
|
|
1 |
{
|
2 |
-
"
|
|
|
|
|
3 |
"auto_map": {
|
4 |
-
"AutoConfig": "configuration_meralion.MERaLiONConfig"
|
|
|
5 |
},
|
6 |
"head_dim": 256,
|
7 |
"hidden_size": 3584,
|
@@ -163,5 +166,6 @@
|
|
163 |
"sliding_window_size": 4096,
|
164 |
"torch_dtype": "bfloat16"
|
165 |
},
|
|
|
166 |
"transformers_version": "4.46.3"
|
167 |
}
|
|
|
1 |
{
|
2 |
+
"architectures": [
|
3 |
+
"MERaLiONForConditionalGeneration"
|
4 |
+
],
|
5 |
"auto_map": {
|
6 |
+
"AutoConfig": "configuration_meralion.MERaLiONConfig",
|
7 |
+
"AutoModelForSpeechSeq2Seq": "modeling_meralion.MERaLiONForConditionalGeneration"
|
8 |
},
|
9 |
"head_dim": 256,
|
10 |
"hidden_size": 3584,
|
|
|
166 |
"sliding_window_size": 4096,
|
167 |
"torch_dtype": "bfloat16"
|
168 |
},
|
169 |
+
"torch_dtype": "bfloat16",
|
170 |
"transformers_version": "4.46.3"
|
171 |
}
|
generation_config.json
CHANGED
@@ -3,6 +3,8 @@
|
|
3 |
"bos_token_id": 2,
|
4 |
"cache_implementation": "hybrid",
|
5 |
"eos_token_id": 107,
|
|
|
6 |
"pad_token_id": 0,
|
|
|
7 |
"transformers_version": "4.46.3"
|
8 |
}
|
|
|
3 |
"bos_token_id": 2,
|
4 |
"cache_implementation": "hybrid",
|
5 |
"eos_token_id": 107,
|
6 |
+
"no_repeat_ngram_size": 6,
|
7 |
"pad_token_id": 0,
|
8 |
+
"repetition_penalty": 1.05,
|
9 |
"transformers_version": "4.46.3"
|
10 |
}
|
modeling_meralion.py
CHANGED
@@ -1,6 +1,5 @@
|
|
1 |
"""PyTorch MERaLiON AudioLLM model."""
|
2 |
|
3 |
-
import math
|
4 |
from dataclasses import dataclass
|
5 |
from typing import List, Optional, Tuple, Union
|
6 |
|
@@ -8,26 +7,20 @@ import torch
|
|
8 |
import torch.utils.checkpoint
|
9 |
from torch import nn
|
10 |
|
11 |
-
from transformers
|
12 |
-
from transformers.
|
|
|
13 |
from transformers.generation import GenerationMixin
|
14 |
-
from transformers.modeling_outputs import ModelOutput
|
15 |
from transformers.modeling_utils import PreTrainedModel
|
16 |
from transformers.utils import (
|
17 |
add_start_docstrings,
|
18 |
add_start_docstrings_to_model_forward,
|
19 |
-
is_flash_attn_2_available,
|
20 |
-
is_flash_attn_greater_or_equal_2_10,
|
21 |
logging,
|
22 |
replace_return_docstrings,
|
23 |
)
|
24 |
|
25 |
-
from .configuration_meralion import MERaLiONConfig
|
26 |
-
from .modeling_text_decoder import MERaLiONTextForCausalLM
|
27 |
-
|
28 |
-
|
29 |
-
if is_flash_attn_2_available():
|
30 |
-
from transformers.modeling_flash_attention_utils import _flash_attention_forward
|
31 |
|
32 |
|
33 |
logger = logging.get_logger(__name__)
|
@@ -35,35 +28,6 @@ logger = logging.get_logger(__name__)
|
|
35 |
_CONFIG_FOR_DOC = "MERaLiONConfig"
|
36 |
|
37 |
|
38 |
-
def sinusoids(length: int, channels: int, max_timescale: float = 10000) -> torch.Tensor:
|
39 |
-
"""Returns sinusoids for positional embedding"""
|
40 |
-
if channels % 2 != 0:
|
41 |
-
raise ValueError(
|
42 |
-
f"Number of channels has to be divisible by 2 for sinusoidal positional embeddings, got {channels} channels."
|
43 |
-
)
|
44 |
-
log_timescale_increment = math.log(max_timescale) / (channels // 2 - 1)
|
45 |
-
inv_timescales = torch.exp(-log_timescale_increment * torch.arange(channels // 2))
|
46 |
-
scaled_time = torch.arange(length).view(-1, 1) * inv_timescales.view(1, -1)
|
47 |
-
return torch.cat([scaled_time.sin(), scaled_time.cos()], dim=1)
|
48 |
-
|
49 |
-
|
50 |
-
# Copied from transformers.models.bart.modeling_bart.shift_tokens_right
|
51 |
-
def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
|
52 |
-
"""
|
53 |
-
Shift input ids one token to the right.
|
54 |
-
"""
|
55 |
-
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
|
56 |
-
shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
|
57 |
-
shifted_input_ids[:, 0] = decoder_start_token_id
|
58 |
-
|
59 |
-
if pad_token_id is None:
|
60 |
-
raise ValueError("self.model.config.pad_token_id has to be defined.")
|
61 |
-
# replace possible -100 values in labels by `pad_token_id`
|
62 |
-
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
|
63 |
-
|
64 |
-
return shifted_input_ids
|
65 |
-
|
66 |
-
|
67 |
# Copied from transformers.models.llama.modeling_llama._prepare_4d_causal_attention_mask_with_cache_position
|
68 |
def _prepare_4d_causal_attention_mask_with_cache_position(
|
69 |
attention_mask: torch.Tensor,
|
@@ -117,756 +81,6 @@ def _prepare_4d_causal_attention_mask_with_cache_position(
|
|
117 |
return causal_mask
|
118 |
|
119 |
|
120 |
-
class MERaLiONSpeechAttention(nn.Module):
|
121 |
-
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
122 |
-
|
123 |
-
def __init__(
|
124 |
-
self,
|
125 |
-
embed_dim: int,
|
126 |
-
num_heads: int,
|
127 |
-
dropout: float = 0.0,
|
128 |
-
is_decoder: bool = False,
|
129 |
-
bias: bool = True,
|
130 |
-
is_causal: bool = False,
|
131 |
-
layer_idx: Optional[int] = None,
|
132 |
-
config: Optional[MERaLiONSpeechConfig] = None,
|
133 |
-
):
|
134 |
-
super().__init__()
|
135 |
-
self.embed_dim = embed_dim
|
136 |
-
self.num_heads = num_heads
|
137 |
-
self.dropout = dropout
|
138 |
-
self.head_dim = embed_dim // num_heads
|
139 |
-
self.config = config
|
140 |
-
|
141 |
-
if (self.head_dim * num_heads) != self.embed_dim:
|
142 |
-
raise ValueError(
|
143 |
-
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
|
144 |
-
f" and `num_heads`: {num_heads})."
|
145 |
-
)
|
146 |
-
self.scaling = self.head_dim**-0.5
|
147 |
-
self.is_decoder = is_decoder
|
148 |
-
self.is_causal = is_causal
|
149 |
-
|
150 |
-
if layer_idx is None and is_decoder:
|
151 |
-
logger.warning_once(
|
152 |
-
f"Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and "
|
153 |
-
"will to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
|
154 |
-
"when creating this class."
|
155 |
-
)
|
156 |
-
self.layer_idx = layer_idx
|
157 |
-
|
158 |
-
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=False)
|
159 |
-
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
|
160 |
-
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
|
161 |
-
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
|
162 |
-
|
163 |
-
# Copied from transformers.models.bart.modeling_bart.BartAttention._shape with BART->speech
|
164 |
-
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
|
165 |
-
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
|
166 |
-
|
167 |
-
def forward(
|
168 |
-
self,
|
169 |
-
hidden_states: torch.Tensor,
|
170 |
-
key_value_states: Optional[torch.Tensor] = None,
|
171 |
-
past_key_value: Optional[EncoderDecoderCache] = None,
|
172 |
-
attention_mask: Optional[torch.Tensor] = None,
|
173 |
-
layer_head_mask: Optional[torch.Tensor] = None,
|
174 |
-
output_attentions: bool = False,
|
175 |
-
cache_position: Optional[torch.LongTensor] = None,
|
176 |
-
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
177 |
-
"""Input shape: Batch x Time x Channel"""
|
178 |
-
|
179 |
-
# if key_value_states are provided this layer is used as a cross-attention layer
|
180 |
-
# for the decoder
|
181 |
-
is_cross_attention = key_value_states is not None
|
182 |
-
bsz, tgt_len, _ = hidden_states.size()
|
183 |
-
|
184 |
-
# get query proj
|
185 |
-
query_states = self._shape(self.q_proj(hidden_states) * self.scaling, tgt_len, bsz)
|
186 |
-
|
187 |
-
if past_key_value is not None:
|
188 |
-
is_updated = past_key_value.is_updated.get(self.layer_idx)
|
189 |
-
if is_cross_attention:
|
190 |
-
# after the first generated id, we can subsequently re-use all key/value_states from cache
|
191 |
-
past_key_value.is_updated[self.layer_idx] = True
|
192 |
-
past_key_value = past_key_value.cross_attention_cache
|
193 |
-
else:
|
194 |
-
past_key_value = past_key_value.self_attention_cache
|
195 |
-
|
196 |
-
# use key_value_states if cross attention
|
197 |
-
current_states = key_value_states if key_value_states is not None else hidden_states
|
198 |
-
if is_cross_attention and past_key_value and is_updated:
|
199 |
-
# reuse k,v, cross_attentions
|
200 |
-
key_states = past_key_value.key_cache[self.layer_idx]
|
201 |
-
value_states = past_key_value.value_cache[self.layer_idx]
|
202 |
-
else:
|
203 |
-
key_states = self._shape(self.k_proj(current_states), -1, bsz)
|
204 |
-
value_states = self._shape(self.v_proj(current_states), -1, bsz)
|
205 |
-
if past_key_value is not None:
|
206 |
-
# save all key/value_states to cache to be re-used for fast auto-regressive generation
|
207 |
-
cache_position = cache_position if not is_cross_attention else None
|
208 |
-
key_states, value_states = past_key_value.update(
|
209 |
-
key_states, value_states, self.layer_idx, {"cache_position": cache_position}
|
210 |
-
)
|
211 |
-
|
212 |
-
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3))
|
213 |
-
|
214 |
-
if attention_mask is not None: # no matter the length, we just slice it
|
215 |
-
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
|
216 |
-
attn_weights = attn_weights + causal_mask
|
217 |
-
|
218 |
-
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
|
219 |
-
|
220 |
-
if layer_head_mask is not None:
|
221 |
-
if layer_head_mask.size() != (self.num_heads,):
|
222 |
-
raise ValueError(
|
223 |
-
f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
|
224 |
-
f" {layer_head_mask.size()}"
|
225 |
-
)
|
226 |
-
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights
|
227 |
-
|
228 |
-
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
|
229 |
-
attn_output = torch.matmul(attn_probs, value_states)
|
230 |
-
|
231 |
-
if attn_output.size() != (bsz, self.num_heads, tgt_len, self.head_dim):
|
232 |
-
raise ValueError(
|
233 |
-
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
|
234 |
-
f" {attn_output.size()}"
|
235 |
-
)
|
236 |
-
|
237 |
-
attn_output = attn_output.transpose(1, 2)
|
238 |
-
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
|
239 |
-
# partitioned across GPUs when using tensor-parallelism.
|
240 |
-
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
|
241 |
-
|
242 |
-
attn_output = self.out_proj(attn_output)
|
243 |
-
|
244 |
-
return attn_output, attn_weights, past_key_value
|
245 |
-
|
246 |
-
|
247 |
-
class MERaLiONSpeechFlashAttention2(MERaLiONSpeechAttention):
|
248 |
-
"""
|
249 |
-
MERaLiONSpeech flash attention module. This module inherits from `MERaLiONSpeechAttention` as the weights of the module stays
|
250 |
-
untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
|
251 |
-
flash attention and deal with padding tokens in case the input contains any of them.
|
252 |
-
"""
|
253 |
-
|
254 |
-
# Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
|
255 |
-
def __init__(self, *args, **kwargs):
|
256 |
-
super().__init__(*args, **kwargs)
|
257 |
-
|
258 |
-
# TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
|
259 |
-
# flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
|
260 |
-
# Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
|
261 |
-
self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
|
262 |
-
|
263 |
-
def forward(
|
264 |
-
self,
|
265 |
-
hidden_states: torch.Tensor,
|
266 |
-
key_value_states: Optional[torch.Tensor] = None,
|
267 |
-
past_key_value: Optional[EncoderDecoderCache] = None,
|
268 |
-
attention_mask: Optional[torch.Tensor] = None,
|
269 |
-
layer_head_mask: Optional[torch.Tensor] = None,
|
270 |
-
output_attentions: bool = False,
|
271 |
-
cache_position: Optional[torch.LongTensor] = None,
|
272 |
-
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
273 |
-
if isinstance(past_key_value, StaticCache):
|
274 |
-
raise ValueError(
|
275 |
-
"The `static` cache implementation is not compatible with `attn_implementation='flash_attention_2'`. "
|
276 |
-
"Use `attn_implementation='sdpa'` in the meantime, and open an issue at https://github.com/huggingface/transformers"
|
277 |
-
)
|
278 |
-
# SpeechFlashAttention2 attention does not support output_attentions
|
279 |
-
if output_attentions:
|
280 |
-
raise ValueError("SpeechFlashAttention2 attention does not support output_attentions")
|
281 |
-
|
282 |
-
# if key_value_states are provided this layer is used as a cross-attention layer
|
283 |
-
# for the decoder
|
284 |
-
is_cross_attention = key_value_states is not None
|
285 |
-
bsz, tgt_len, _ = hidden_states.size()
|
286 |
-
|
287 |
-
# get query proj
|
288 |
-
query_states = torch.reshape(self.q_proj(hidden_states), (bsz, tgt_len, self.num_heads, self.head_dim))
|
289 |
-
|
290 |
-
if past_key_value is not None:
|
291 |
-
is_updated = past_key_value.is_updated.get(self.layer_idx)
|
292 |
-
if is_cross_attention:
|
293 |
-
# after the first generated id, we can subsequently re-use all key/value_states from cache
|
294 |
-
past_key_value.is_updated[self.layer_idx] = True
|
295 |
-
past_key_value = past_key_value.cross_attention_cache
|
296 |
-
else:
|
297 |
-
past_key_value = past_key_value.self_attention_cache
|
298 |
-
|
299 |
-
# use key_value_states if cross attention
|
300 |
-
current_states = key_value_states if key_value_states is not None else hidden_states
|
301 |
-
if is_cross_attention and past_key_value and is_updated:
|
302 |
-
# reuse k,v, cross_attentions
|
303 |
-
key_states = past_key_value.key_cache[self.layer_idx]
|
304 |
-
value_states = past_key_value.value_cache[self.layer_idx]
|
305 |
-
else:
|
306 |
-
key_states = self._shape(self.k_proj(current_states), -1, bsz)
|
307 |
-
value_states = self._shape(self.v_proj(current_states), -1, bsz)
|
308 |
-
if past_key_value is not None:
|
309 |
-
# save all key/value_states to cache to be re-used for fast auto-regressive generation
|
310 |
-
cache_position = cache_position if not is_cross_attention else None
|
311 |
-
key_states, value_states = past_key_value.update(
|
312 |
-
key_states, value_states, self.layer_idx, {"cache_position": cache_position}
|
313 |
-
)
|
314 |
-
|
315 |
-
# TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]
|
316 |
-
# We would need to refactor the KV cache to be able to avoid many of these transpose/reshape/view.
|
317 |
-
key_states = key_states.transpose(1, 2)
|
318 |
-
value_states = value_states.transpose(1, 2)
|
319 |
-
|
320 |
-
causal_mask = attention_mask
|
321 |
-
if attention_mask is not None: # no matter the length, we just slice it
|
322 |
-
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
|
323 |
-
|
324 |
-
# In PEFT, usually we cast the layer norms in float32 for training stability reasons
|
325 |
-
# therefore the input hidden states gets silently casted in float32. Hence, we need
|
326 |
-
# cast them back in the correct dtype just to be sure everything works as expected.
|
327 |
-
# This might slowdown training & inference so it is recommended to not cast the LayerNorms
|
328 |
-
# in fp32. (LlamaRMSNorm handles it correctly)
|
329 |
-
|
330 |
-
input_dtype = query_states.dtype
|
331 |
-
if input_dtype == torch.float32:
|
332 |
-
if torch.is_autocast_enabled():
|
333 |
-
target_dtype = torch.get_autocast_gpu_dtype()
|
334 |
-
# Handle the case where the model is quantized
|
335 |
-
elif hasattr(self.config, "_pre_quantization_dtype"):
|
336 |
-
target_dtype = self.config._pre_quantization_dtype
|
337 |
-
else:
|
338 |
-
target_dtype = self.q_proj.weight.dtype
|
339 |
-
|
340 |
-
logger.warning_once(
|
341 |
-
f"The input hidden states seems to be silently casted in float32, this might be related to"
|
342 |
-
f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
|
343 |
-
f" {target_dtype}."
|
344 |
-
)
|
345 |
-
|
346 |
-
query_states = query_states.to(target_dtype)
|
347 |
-
key_states = key_states.to(target_dtype)
|
348 |
-
value_states = value_states.to(target_dtype)
|
349 |
-
|
350 |
-
attn_output = _flash_attention_forward(
|
351 |
-
query_states,
|
352 |
-
key_states,
|
353 |
-
value_states,
|
354 |
-
causal_mask,
|
355 |
-
tgt_len,
|
356 |
-
dropout=self.dropout if self.training else 0.0,
|
357 |
-
is_causal=self.is_causal,
|
358 |
-
use_top_left_mask=self._flash_attn_uses_top_left_mask,
|
359 |
-
)
|
360 |
-
|
361 |
-
attn_output = attn_output.reshape(bsz, tgt_len, -1)
|
362 |
-
attn_output = self.out_proj(attn_output)
|
363 |
-
|
364 |
-
if not output_attentions:
|
365 |
-
attn_weights = None
|
366 |
-
|
367 |
-
return attn_output, attn_weights, past_key_value
|
368 |
-
|
369 |
-
|
370 |
-
class MERaLiONSpeechSdpaAttention(MERaLiONSpeechAttention):
|
371 |
-
def forward(
|
372 |
-
self,
|
373 |
-
hidden_states: torch.Tensor,
|
374 |
-
key_value_states: Optional[torch.Tensor] = None,
|
375 |
-
past_key_value: Optional[EncoderDecoderCache] = None,
|
376 |
-
attention_mask: Optional[torch.Tensor] = None,
|
377 |
-
layer_head_mask: Optional[torch.Tensor] = None,
|
378 |
-
output_attentions: bool = False,
|
379 |
-
cache_position: Optional[torch.LongTensor] = None,
|
380 |
-
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
381 |
-
"""Input shape: Batch x Time x Channel"""
|
382 |
-
if output_attentions or layer_head_mask is not None:
|
383 |
-
# TODO: Improve this warning with e.g. `model.config._attn_implementation = "manual"` once this is implemented.
|
384 |
-
logger.warning_once(
|
385 |
-
"MERaLiONSpeechModel is using MERaLiONSpeechSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True` or `layer_head_mask` not None. Falling back to the manual attention"
|
386 |
-
' implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
|
387 |
-
)
|
388 |
-
return super().forward(
|
389 |
-
hidden_states,
|
390 |
-
key_value_states=key_value_states,
|
391 |
-
past_key_value=past_key_value,
|
392 |
-
attention_mask=attention_mask,
|
393 |
-
layer_head_mask=layer_head_mask,
|
394 |
-
output_attentions=output_attentions,
|
395 |
-
cache_position=cache_position,
|
396 |
-
)
|
397 |
-
|
398 |
-
# if key_value_states are provided this layer is used as a cross-attention layer
|
399 |
-
# for the decoder
|
400 |
-
is_cross_attention = key_value_states is not None
|
401 |
-
bsz, tgt_len, _ = hidden_states.size()
|
402 |
-
|
403 |
-
# get query proj
|
404 |
-
query_states = self._shape(self.q_proj(hidden_states), tgt_len, bsz)
|
405 |
-
|
406 |
-
if past_key_value is not None:
|
407 |
-
is_updated = past_key_value.is_updated.get(self.layer_idx)
|
408 |
-
if is_cross_attention:
|
409 |
-
# after the first generated id, we can subsequently re-use all key/value_states from cache
|
410 |
-
past_key_value.is_updated[self.layer_idx] = True
|
411 |
-
past_key_value = past_key_value.cross_attention_cache
|
412 |
-
else:
|
413 |
-
past_key_value = past_key_value.self_attention_cache
|
414 |
-
|
415 |
-
# use key_value_states if cross attention
|
416 |
-
current_states = key_value_states if key_value_states is not None else hidden_states
|
417 |
-
if is_cross_attention and past_key_value and is_updated:
|
418 |
-
# reuse k,v, cross_attentions
|
419 |
-
key_states = past_key_value.key_cache[self.layer_idx]
|
420 |
-
value_states = past_key_value.value_cache[self.layer_idx]
|
421 |
-
else:
|
422 |
-
key_states = self._shape(self.k_proj(current_states), -1, bsz)
|
423 |
-
value_states = self._shape(self.v_proj(current_states), -1, bsz)
|
424 |
-
if past_key_value is not None:
|
425 |
-
# save all key/value_states to cache to be re-used for fast auto-regressive generation
|
426 |
-
cache_position = cache_position if not is_cross_attention else None
|
427 |
-
key_states, value_states = past_key_value.update(
|
428 |
-
key_states, value_states, self.layer_idx, {"cache_position": cache_position}
|
429 |
-
)
|
430 |
-
|
431 |
-
causal_mask = attention_mask
|
432 |
-
if attention_mask is not None: # no matter the length, we just slice it
|
433 |
-
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
|
434 |
-
|
435 |
-
# We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
|
436 |
-
# in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
|
437 |
-
# The tgt_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case tgt_len == 1.
|
438 |
-
is_causal = True if self.is_causal and causal_mask is None and tgt_len > 1 else False
|
439 |
-
|
440 |
-
# NOTE: SDPA with memory-efficient backend is currently (torch==2.1.2) bugged when using non-contiguous inputs and a custom attn_mask,
|
441 |
-
# but we are fine here as `_shape` do call `.contiguous()`. Reference: https://github.com/pytorch/pytorch/issues/112577
|
442 |
-
attn_output = torch.nn.functional.scaled_dot_product_attention(
|
443 |
-
query_states,
|
444 |
-
key_states,
|
445 |
-
value_states,
|
446 |
-
attn_mask=causal_mask,
|
447 |
-
dropout_p=self.dropout if self.training else 0.0,
|
448 |
-
is_causal=is_causal,
|
449 |
-
)
|
450 |
-
|
451 |
-
if attn_output.size() != (bsz, self.num_heads, tgt_len, self.head_dim):
|
452 |
-
raise ValueError(
|
453 |
-
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
|
454 |
-
f" {attn_output.size()}"
|
455 |
-
)
|
456 |
-
|
457 |
-
attn_output = attn_output.transpose(1, 2)
|
458 |
-
|
459 |
-
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
|
460 |
-
# partitioned across GPUs when using tensor-parallelism.
|
461 |
-
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
|
462 |
-
|
463 |
-
attn_output = self.out_proj(attn_output)
|
464 |
-
|
465 |
-
return attn_output, None, past_key_value
|
466 |
-
|
467 |
-
|
468 |
-
MERALION_SPEECH_ATTENTION_CLASSES = {
|
469 |
-
"eager": MERaLiONSpeechAttention,
|
470 |
-
"flash_attention_2": MERaLiONSpeechFlashAttention2,
|
471 |
-
"sdpa": MERaLiONSpeechSdpaAttention,
|
472 |
-
}
|
473 |
-
|
474 |
-
|
475 |
-
# Copied from transformers.models.mbart.modeling_mbart.MBartEncoderLayer with MBart->Speech, MBART->WHISPER
|
476 |
-
class MERaLiONSpeechEncoderLayer(nn.Module):
|
477 |
-
def __init__(self, config: MERaLiONSpeechConfig):
|
478 |
-
super().__init__()
|
479 |
-
self.embed_dim = config.d_model
|
480 |
-
|
481 |
-
self.self_attn = MERALION_SPEECH_ATTENTION_CLASSES[config._attn_implementation](
|
482 |
-
embed_dim=self.embed_dim,
|
483 |
-
num_heads=config.encoder_attention_heads,
|
484 |
-
dropout=config.attention_dropout,
|
485 |
-
config=config,
|
486 |
-
)
|
487 |
-
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
|
488 |
-
self.dropout = config.dropout
|
489 |
-
self.activation_fn = ACT2FN[config.activation_function]
|
490 |
-
self.activation_dropout = config.activation_dropout
|
491 |
-
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
|
492 |
-
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
|
493 |
-
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
|
494 |
-
|
495 |
-
def forward(
|
496 |
-
self,
|
497 |
-
hidden_states: torch.Tensor,
|
498 |
-
attention_mask: torch.Tensor,
|
499 |
-
layer_head_mask: torch.Tensor,
|
500 |
-
output_attentions: bool = False,
|
501 |
-
) -> torch.Tensor:
|
502 |
-
"""
|
503 |
-
Args:
|
504 |
-
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
505 |
-
attention_mask (`torch.FloatTensor`): attention mask of size
|
506 |
-
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
|
507 |
-
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
|
508 |
-
`(encoder_attention_heads,)`.
|
509 |
-
output_attentions (`bool`, *optional*):
|
510 |
-
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
511 |
-
returned tensors for more detail.
|
512 |
-
"""
|
513 |
-
residual = hidden_states
|
514 |
-
hidden_states = self.self_attn_layer_norm(hidden_states)
|
515 |
-
hidden_states, attn_weights, _ = self.self_attn(
|
516 |
-
hidden_states=hidden_states,
|
517 |
-
attention_mask=attention_mask,
|
518 |
-
layer_head_mask=layer_head_mask,
|
519 |
-
output_attentions=output_attentions,
|
520 |
-
)
|
521 |
-
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
|
522 |
-
hidden_states = residual + hidden_states
|
523 |
-
|
524 |
-
residual = hidden_states
|
525 |
-
hidden_states = self.final_layer_norm(hidden_states)
|
526 |
-
hidden_states = self.activation_fn(self.fc1(hidden_states))
|
527 |
-
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
|
528 |
-
hidden_states = self.fc2(hidden_states)
|
529 |
-
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
|
530 |
-
hidden_states = residual + hidden_states
|
531 |
-
|
532 |
-
if hidden_states.dtype == torch.float16 and (
|
533 |
-
torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
|
534 |
-
):
|
535 |
-
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
|
536 |
-
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
|
537 |
-
|
538 |
-
outputs = (hidden_states,)
|
539 |
-
|
540 |
-
if output_attentions:
|
541 |
-
outputs += (attn_weights,)
|
542 |
-
|
543 |
-
return outputs
|
544 |
-
|
545 |
-
|
546 |
-
class MERaLiONSpeechPreTrainedModel(PreTrainedModel):
|
547 |
-
config_class = MERaLiONSpeechConfig
|
548 |
-
base_model_prefix = "model"
|
549 |
-
main_input_name = "input_features"
|
550 |
-
supports_gradient_checkpointing = True
|
551 |
-
_no_split_modules = ["MERaLiONSpeechEncoderLayer", "MERaLiONSpeechDecoderLayer"]
|
552 |
-
_supports_flash_attn_2 = True
|
553 |
-
_supports_sdpa = True
|
554 |
-
_supports_cache_class = True
|
555 |
-
_supports_static_cache = True
|
556 |
-
|
557 |
-
def _init_weights(self, module):
|
558 |
-
std = self.config.init_std
|
559 |
-
if isinstance(module, (nn.Linear, nn.Conv1d)):
|
560 |
-
module.weight.data.normal_(mean=0.0, std=std)
|
561 |
-
if module.bias is not None:
|
562 |
-
module.bias.data.zero_()
|
563 |
-
elif isinstance(module, nn.Embedding):
|
564 |
-
module.weight.data.normal_(mean=0.0, std=std)
|
565 |
-
if module.padding_idx is not None:
|
566 |
-
module.weight.data[module.padding_idx].zero_()
|
567 |
-
elif isinstance(module, MERaLiONSpeechEncoder):
|
568 |
-
with torch.no_grad():
|
569 |
-
embed_positions = module.embed_positions.weight
|
570 |
-
embed_positions.copy_(sinusoids(*embed_positions.shape))
|
571 |
-
|
572 |
-
def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor):
|
573 |
-
"""
|
574 |
-
Computes the output length of the convolutional layers
|
575 |
-
"""
|
576 |
-
input_lengths = (input_lengths - 1) // 2 + 1
|
577 |
-
|
578 |
-
return input_lengths
|
579 |
-
|
580 |
-
|
581 |
-
MERALION_SPEECH_START_DOCSTRING = r"""
|
582 |
-
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
583 |
-
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
584 |
-
etc.)
|
585 |
-
|
586 |
-
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
587 |
-
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
588 |
-
and behavior.
|
589 |
-
|
590 |
-
Parameters:
|
591 |
-
config ([`MERaLiONSpeechConfig`]):
|
592 |
-
Model configuration class with all the parameters of the model. Initializing with a config file does not
|
593 |
-
load the weights associated with the model, only the configuration. Check out the
|
594 |
-
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
595 |
-
"""
|
596 |
-
|
597 |
-
MERALION_SPEECH_INPUTS_DOCSTRING = r"""
|
598 |
-
Args:
|
599 |
-
input_features (`torch.FloatTensor` of shape `(batch_size, feature_size, sequence_length)`):
|
600 |
-
Float values mel features extracted from the raw speech waveform. Raw speech waveform can be obtained by
|
601 |
-
loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via
|
602 |
-
the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the
|
603 |
-
[`AutoFeatureExtractor`] should be used for extracting the mel features, padding and conversion into a
|
604 |
-
tensor of type `torch.FloatTensor`. See [`~SpeechFeatureExtractor.__call__`]
|
605 |
-
attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
606 |
-
Mask to avoid performing *SpecAugment* data augmentation on padding token indices. Mask values selected in
|
607 |
-
`[0, 1]`:
|
608 |
-
|
609 |
-
- 1 for tokens that are **not masked**,
|
610 |
-
- 0 for tokens that are **masked**.
|
611 |
-
|
612 |
-
[What are attention masks?](../glossary#attention-mask)
|
613 |
-
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
|
614 |
-
Indices of decoder input sequence tokens in the vocabulary.
|
615 |
-
|
616 |
-
Indices can be obtained using [`SpeechTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
617 |
-
[`PreTrainedTokenizer.__call__`] for details.
|
618 |
-
|
619 |
-
[What are decoder input IDs?](../glossary#decoder-input-ids)
|
620 |
-
|
621 |
-
Speech uses the `decoder_start_token_id` as the starting token for `decoder_input_ids` generation. If
|
622 |
-
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
|
623 |
-
`past_key_values`).
|
624 |
-
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
|
625 |
-
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
|
626 |
-
be used by default.
|
627 |
-
|
628 |
-
If you want to change padding behavior, you should read
|
629 |
-
[`modeling_speech._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the BART
|
630 |
-
paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.
|
631 |
-
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
|
632 |
-
Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
|
633 |
-
|
634 |
-
- 1 indicates the head is **not masked**,
|
635 |
-
- 0 indicates the head is **masked**.
|
636 |
-
|
637 |
-
decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
|
638 |
-
Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
|
639 |
-
|
640 |
-
- 1 indicates the head is **not masked**,
|
641 |
-
- 0 indicates the head is **masked**.
|
642 |
-
|
643 |
-
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
|
644 |
-
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
|
645 |
-
|
646 |
-
- 1 indicates the head is **not masked**,
|
647 |
-
- 0 indicates the head is **masked**.
|
648 |
-
|
649 |
-
encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
|
650 |
-
Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
|
651 |
-
`last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
|
652 |
-
hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
|
653 |
-
past_key_values (`EncoderDecoderCache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
|
654 |
-
Pre-computed hidden-states that can be used to speed up auto-regressive (sequential) decoding. There are
|
655 |
-
four sets of pre-computed hidden-states: key and values states in the self-attention blocks (2) and
|
656 |
-
in the cross-attention blocks (2). The `past_key_values` are returned when `use_cache=True` is passed or
|
657 |
-
when `config.use_cache=True`
|
658 |
-
|
659 |
-
Two formats are allowed:
|
660 |
-
- An [`~cache_utils.EncoderDecoderCache`] instance;
|
661 |
-
- Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
|
662 |
-
`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
|
663 |
-
`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
|
664 |
-
|
665 |
-
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
|
666 |
-
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
|
667 |
-
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
|
668 |
-
decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
|
669 |
-
Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
|
670 |
-
representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
|
671 |
-
input (see `past_key_values`). This is useful if you want more control over how to convert
|
672 |
-
`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
|
673 |
-
use_cache (`bool`, *optional*):
|
674 |
-
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
675 |
-
`past_key_values`).
|
676 |
-
output_attentions (`bool`, *optional*):
|
677 |
-
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
678 |
-
tensors for more detail.
|
679 |
-
output_hidden_states (`bool`, *optional*):
|
680 |
-
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
681 |
-
more detail.
|
682 |
-
return_dict (`bool`, *optional*):
|
683 |
-
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
684 |
-
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
|
685 |
-
Indices depicting the position of the input sequence tokens in the sequence. It is used to update the cache
|
686 |
-
in the correct position and to infer the complete sequence length.
|
687 |
-
"""
|
688 |
-
|
689 |
-
MERALION_SPEECH_ENCODER_INPUTS_DOCSTRING = r"""
|
690 |
-
Args:
|
691 |
-
input_features (`torch.FloatTensor` of shape `(batch_size, feature_size, sequence_length)`):
|
692 |
-
Float values mel features extracted from the raw speech waveform. Raw speech waveform can be obtained by
|
693 |
-
loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via
|
694 |
-
the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the
|
695 |
-
[`AutoFeatureExtractor`] should be used for extracting the mel features, padding and conversion into a
|
696 |
-
tensor of type `torch.FloatTensor`. See [`~SpeechFeatureExtractor.__call__`]
|
697 |
-
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
|
698 |
-
Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
|
699 |
-
|
700 |
-
- 1 indicates the head is **not masked**,
|
701 |
-
- 0 indicates the head is **masked**.
|
702 |
-
encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
|
703 |
-
Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
|
704 |
-
`last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
|
705 |
-
hidden-states at the output of the last layer of the encoder.
|
706 |
-
output_attentions (`bool`, *optional*):
|
707 |
-
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
708 |
-
tensors for more detail.
|
709 |
-
output_hidden_states (`bool`, *optional*):
|
710 |
-
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
711 |
-
more detail.
|
712 |
-
return_dict (`bool`, *optional*):
|
713 |
-
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
714 |
-
"""
|
715 |
-
|
716 |
-
|
717 |
-
class MERaLiONSpeechEncoder(MERaLiONSpeechPreTrainedModel):
|
718 |
-
"""
|
719 |
-
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
|
720 |
-
[`MERaLiONSpeechEncoderLayer`].
|
721 |
-
|
722 |
-
Args:
|
723 |
-
config: MERaLiONSpeechConfig
|
724 |
-
"""
|
725 |
-
|
726 |
-
def __init__(self, config: MERaLiONSpeechConfig):
|
727 |
-
super().__init__(config)
|
728 |
-
self.dropout = config.dropout
|
729 |
-
self.layerdrop = config.encoder_layerdrop
|
730 |
-
|
731 |
-
embed_dim = config.d_model
|
732 |
-
self.num_mel_bins = config.num_mel_bins
|
733 |
-
self.padding_idx = config.pad_token_id
|
734 |
-
self.max_source_positions = config.max_source_positions
|
735 |
-
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
|
736 |
-
|
737 |
-
self.conv1 = nn.Conv1d(self.num_mel_bins, embed_dim, kernel_size=3, padding=1)
|
738 |
-
self.conv2 = nn.Conv1d(embed_dim, embed_dim, kernel_size=3, stride=2, padding=1)
|
739 |
-
|
740 |
-
self.embed_positions = nn.Embedding(self.max_source_positions, embed_dim)
|
741 |
-
self.embed_positions.requires_grad_(False)
|
742 |
-
|
743 |
-
self.layers = nn.ModuleList([MERaLiONSpeechEncoderLayer(config) for _ in range(config.encoder_layers)])
|
744 |
-
self.layer_norm = nn.LayerNorm(config.d_model)
|
745 |
-
|
746 |
-
self.gradient_checkpointing = False
|
747 |
-
# Initialize weights and apply final processing
|
748 |
-
self.post_init()
|
749 |
-
|
750 |
-
def _freeze_parameters(self):
|
751 |
-
for param in self.parameters():
|
752 |
-
param.requires_grad = False
|
753 |
-
self._requires_grad = False
|
754 |
-
|
755 |
-
def get_input_embeddings(self) -> nn.Module:
|
756 |
-
return self.conv1
|
757 |
-
|
758 |
-
def set_input_embeddings(self, value: nn.Module):
|
759 |
-
self.conv1 = value
|
760 |
-
|
761 |
-
def forward(
|
762 |
-
self,
|
763 |
-
input_features,
|
764 |
-
attention_mask=None,
|
765 |
-
head_mask=None,
|
766 |
-
output_attentions=None,
|
767 |
-
output_hidden_states=None,
|
768 |
-
return_dict=None,
|
769 |
-
):
|
770 |
-
r"""
|
771 |
-
Args:
|
772 |
-
input_features (`torch.LongTensor` of shape `(batch_size, feature_size, sequence_length)`):
|
773 |
-
Float values of mel features extracted from the raw speech waveform. Raw speech waveform can be
|
774 |
-
obtained by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a
|
775 |
-
`numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into
|
776 |
-
`input_features`, the [`AutoFeatureExtractor`] should be used for extracting the mel features, padding
|
777 |
-
and conversion into a tensor of type `torch.FloatTensor`. See [`~SpeechFeatureExtractor.__call__`]
|
778 |
-
attention_mask (`torch.Tensor`)`, *optional*):
|
779 |
-
Speech does not support masking of the `input_features`, this argument is preserved for compatibility,
|
780 |
-
but it is not used. By default the silence in the input log mel spectrogram are ignored.
|
781 |
-
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
|
782 |
-
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
|
783 |
-
|
784 |
-
- 1 indicates the head is **not masked**,
|
785 |
-
- 0 indicates the head is **masked**.
|
786 |
-
output_attentions (`bool`, *optional*):
|
787 |
-
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
788 |
-
returned tensors for more detail.
|
789 |
-
output_hidden_states (`bool`, *optional*):
|
790 |
-
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
|
791 |
-
for more detail.
|
792 |
-
return_dict (`bool`, *optional*):
|
793 |
-
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
794 |
-
"""
|
795 |
-
|
796 |
-
expected_seq_length = self.config.max_source_positions * self.conv1.stride[0] * self.conv2.stride[0]
|
797 |
-
if input_features.shape[-1] != expected_seq_length:
|
798 |
-
raise ValueError(
|
799 |
-
f"Speech expects the mel input features to be of length {expected_seq_length}, but found {input_features.shape[-1]}. Make sure to pad the input mel features to {expected_seq_length}."
|
800 |
-
)
|
801 |
-
|
802 |
-
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
803 |
-
output_hidden_states = (
|
804 |
-
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
805 |
-
)
|
806 |
-
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
807 |
-
inputs_embeds = nn.functional.gelu(self.conv1(input_features))
|
808 |
-
inputs_embeds = nn.functional.gelu(self.conv2(inputs_embeds))
|
809 |
-
|
810 |
-
inputs_embeds = inputs_embeds.permute(0, 2, 1)
|
811 |
-
embed_pos = self.embed_positions.weight
|
812 |
-
|
813 |
-
hidden_states = inputs_embeds + embed_pos
|
814 |
-
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
|
815 |
-
|
816 |
-
encoder_states = () if output_hidden_states else None
|
817 |
-
all_attentions = () if output_attentions else None
|
818 |
-
|
819 |
-
# check if head_mask has a correct number of layers specified if desired
|
820 |
-
if head_mask is not None:
|
821 |
-
assert head_mask.size()[0] == (
|
822 |
-
len(self.layers)
|
823 |
-
), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
|
824 |
-
|
825 |
-
for idx, encoder_layer in enumerate(self.layers):
|
826 |
-
if output_hidden_states:
|
827 |
-
encoder_states = encoder_states + (hidden_states,)
|
828 |
-
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
|
829 |
-
to_drop = False
|
830 |
-
if self.training:
|
831 |
-
dropout_probability = torch.rand([])
|
832 |
-
if dropout_probability < self.layerdrop: # skip the layer
|
833 |
-
to_drop = True
|
834 |
-
|
835 |
-
if to_drop:
|
836 |
-
layer_outputs = (None, None)
|
837 |
-
else:
|
838 |
-
if self.gradient_checkpointing and self.training:
|
839 |
-
layer_outputs = self._gradient_checkpointing_func(
|
840 |
-
encoder_layer.__call__,
|
841 |
-
hidden_states,
|
842 |
-
None,
|
843 |
-
(head_mask[idx] if head_mask is not None else None),
|
844 |
-
output_attentions,
|
845 |
-
)
|
846 |
-
else:
|
847 |
-
layer_outputs = encoder_layer(
|
848 |
-
hidden_states,
|
849 |
-
None,
|
850 |
-
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
|
851 |
-
output_attentions=output_attentions,
|
852 |
-
)
|
853 |
-
|
854 |
-
hidden_states = layer_outputs[0]
|
855 |
-
|
856 |
-
if output_attentions:
|
857 |
-
all_attentions = all_attentions + (layer_outputs[1],)
|
858 |
-
|
859 |
-
hidden_states = self.layer_norm(hidden_states)
|
860 |
-
if output_hidden_states:
|
861 |
-
encoder_states = encoder_states + (hidden_states,)
|
862 |
-
|
863 |
-
if not return_dict:
|
864 |
-
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
|
865 |
-
return BaseModelOutput(
|
866 |
-
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
|
867 |
-
)
|
868 |
-
|
869 |
-
|
870 |
# copied from Qwen2AudioCausalLMOutputWithPast
|
871 |
@dataclass
|
872 |
class MERaLiONOutputWithPast(ModelOutput):
|
@@ -932,7 +146,7 @@ class MERaLiONPreTrainedModel(PreTrainedModel):
|
|
932 |
config_class = MERaLiONConfig
|
933 |
base_model_prefix = "model"
|
934 |
supports_gradient_checkpointing = True
|
935 |
-
_no_split_modules = ["
|
936 |
_supports_flash_attn_2 = True
|
937 |
_supports_sdpa = True
|
938 |
_supports_cache_class = True
|
@@ -1090,13 +304,13 @@ class MERaLiONForConditionalGeneration(MERaLiONPreTrainedModel, GenerationMixin)
|
|
1090 |
|
1091 |
super().__init__(config)
|
1092 |
|
1093 |
-
self.speech_encoder =
|
1094 |
# self.speech_encoder = AutoModel.from_config(config.audio_config, attn_implementation=config._attn_implementation)
|
1095 |
|
1096 |
self.ln_speech = nn.LayerNorm(config.speech_config.d_model)
|
1097 |
self.speech_audio_adapter = MERaLiONSpeechAudioAdaper(config)
|
1098 |
self.vocab_size = config.text_config.vocab_size
|
1099 |
-
self.text_decoder =
|
1100 |
self.pad_token_id = self.config.pad_token_id if self.config.pad_token_id is not None else -1
|
1101 |
self._padding_side = "left" # set it to left by default, user can use setter to change padding_sides
|
1102 |
self.post_init()
|
|
|
1 |
"""PyTorch MERaLiON AudioLLM model."""
|
2 |
|
|
|
3 |
from dataclasses import dataclass
|
4 |
from typing import List, Optional, Tuple, Union
|
5 |
|
|
|
7 |
import torch.utils.checkpoint
|
8 |
from torch import nn
|
9 |
|
10 |
+
from transformers import Gemma2ForCausalLM
|
11 |
+
from transformers.models.whisper.modeling_whisper import WhisperEncoder
|
12 |
+
from transformers.cache_utils import HybridCache
|
13 |
from transformers.generation import GenerationMixin
|
14 |
+
from transformers.modeling_outputs import ModelOutput
|
15 |
from transformers.modeling_utils import PreTrainedModel
|
16 |
from transformers.utils import (
|
17 |
add_start_docstrings,
|
18 |
add_start_docstrings_to_model_forward,
|
|
|
|
|
19 |
logging,
|
20 |
replace_return_docstrings,
|
21 |
)
|
22 |
|
23 |
+
from .configuration_meralion import MERaLiONConfig
|
|
|
|
|
|
|
|
|
|
|
24 |
|
25 |
|
26 |
logger = logging.get_logger(__name__)
|
|
|
28 |
_CONFIG_FOR_DOC = "MERaLiONConfig"
|
29 |
|
30 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
# Copied from transformers.models.llama.modeling_llama._prepare_4d_causal_attention_mask_with_cache_position
|
32 |
def _prepare_4d_causal_attention_mask_with_cache_position(
|
33 |
attention_mask: torch.Tensor,
|
|
|
81 |
return causal_mask
|
82 |
|
83 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
# copied from Qwen2AudioCausalLMOutputWithPast
|
85 |
@dataclass
|
86 |
class MERaLiONOutputWithPast(ModelOutput):
|
|
|
146 |
config_class = MERaLiONConfig
|
147 |
base_model_prefix = "model"
|
148 |
supports_gradient_checkpointing = True
|
149 |
+
_no_split_modules = ["WhisperEncoderLayer", "WhisperDecoderLayer", "Gemma2DecoderLayer"]
|
150 |
_supports_flash_attn_2 = True
|
151 |
_supports_sdpa = True
|
152 |
_supports_cache_class = True
|
|
|
304 |
|
305 |
super().__init__(config)
|
306 |
|
307 |
+
self.speech_encoder = WhisperEncoder(config.speech_config)
|
308 |
# self.speech_encoder = AutoModel.from_config(config.audio_config, attn_implementation=config._attn_implementation)
|
309 |
|
310 |
self.ln_speech = nn.LayerNorm(config.speech_config.d_model)
|
311 |
self.speech_audio_adapter = MERaLiONSpeechAudioAdaper(config)
|
312 |
self.vocab_size = config.text_config.vocab_size
|
313 |
+
self.text_decoder = Gemma2ForCausalLM(config.text_config)
|
314 |
self.pad_token_id = self.config.pad_token_id if self.config.pad_token_id is not None else -1
|
315 |
self._padding_side = "left" # set it to left by default, user can use setter to change padding_sides
|
316 |
self.post_init()
|