gsaon commited on
Commit
ee8e2c4
·
verified ·
1 Parent(s): 62be1ec

Upload 16 files

Browse files
adapter_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "ibm-granite/granite-speech-3.2-8b",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 32,
14
+ "lora_dropout": 0.0,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 64,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "q_proj",
24
+ "v_proj"
25
+ ],
26
+ "task_type": "CAUSAL_LM",
27
+ "use_dora": false,
28
+ "use_rslora": false
29
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5795a9e05dcf02b82e6e3c934ac68b7e5c1522bdc9c7c86fb7a61f846d5adf7
3
+ size 68178800
added_tokens.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "<|audio|>": 49155,
3
+ "<|end_of_role|>": 49153,
4
+ "<|start_of_role|>": 49152,
5
+ "<|tool_call|>": 49154
6
+ }
configuration_granite_speech.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers.configuration_utils import PretrainedConfig
2
+ from transformers.models.auto import CONFIG_MAPPING, AutoConfig
3
+
4
+
5
+ class GraniteSpeechEncoderConfig(PretrainedConfig):
6
+ model_type = "granite_speech_encoder"
7
+
8
+ def __init__(
9
+ self,
10
+ input_dim=160,
11
+ num_layers=10,
12
+ hidden_dim=1024,
13
+ feedforward_mult=4,
14
+ num_heads=8,
15
+ dim_head=128,
16
+ output_dim=42,
17
+ context_size=200,
18
+ dropout=0.1,
19
+ conv_kernel_size=15,
20
+ conv_expansion_factor=2,
21
+ **kwargs,
22
+ ):
23
+ super().__init__(**kwargs)
24
+ self.input_dim = input_dim
25
+ self.num_layers = num_layers
26
+ self.hidden_dim = hidden_dim
27
+ self.feedforward_mult = feedforward_mult
28
+ self.num_heads = num_heads
29
+ self.dim_head = dim_head
30
+ self.output_dim = output_dim
31
+ self.context_size = context_size
32
+ self.dropout = dropout
33
+ self.conv_kernel_size = conv_kernel_size
34
+ self.conv_expansion_factor = conv_expansion_factor
35
+
36
+
37
+ ## adapted from transformers.models.blip.configuration_blip_2.Blip2VisionConfig
38
+ class GraniteSpeechProjectorConfig(PretrainedConfig):
39
+ model_type = "granite_speech_qformer"
40
+
41
+ def __init__(
42
+ self,
43
+ llm_dim=4096,
44
+ downsample_rate=5,
45
+ window_size=15,
46
+ hidden_size=1024,
47
+ num_attention_heads=16,
48
+ intermediate_size=4096,
49
+ num_hidden_layers=2,
50
+ encoder_hidden_size=1024,
51
+ cross_attention_frequency=1,
52
+ max_position_embeddings=2048,
53
+ hidden_act="gelu",
54
+ hidden_dropout_prob=0.1,
55
+ attention_probs_dropout_prob=0.1,
56
+ initializer_range=0.02,
57
+ layer_norm_eps=1e-12,
58
+ pad_token_id=0,
59
+ position_embedding_type="absolute",
60
+ use_qformer_text_input=False,
61
+ **kwargs,
62
+ ):
63
+ super().__init__(pad_token_id=pad_token_id, **kwargs)
64
+ self.hidden_size = hidden_size
65
+ self.num_hidden_layers = num_hidden_layers
66
+ self.num_attention_heads = num_attention_heads
67
+ self.hidden_act = hidden_act
68
+ self.intermediate_size = intermediate_size
69
+ self.hidden_dropout_prob = hidden_dropout_prob
70
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
71
+ self.max_position_embeddings = max_position_embeddings
72
+ self.initializer_range = initializer_range
73
+ self.layer_norm_eps = layer_norm_eps
74
+ self.position_embedding_type = position_embedding_type
75
+ self.cross_attention_frequency = cross_attention_frequency
76
+ self.encoder_hidden_size = encoder_hidden_size
77
+ self.use_qformer_text_input = use_qformer_text_input
78
+ self.downsample_rate = downsample_rate
79
+ self.window_size = window_size
80
+ self.llm_dim = llm_dim
81
+
82
+
83
+ class GraniteSpeechConfig(PretrainedConfig):
84
+ model_type = "granite_speech"
85
+ sub_configs = {
86
+ "text_config": AutoConfig,
87
+ "encoder_config": GraniteSpeechEncoderConfig,
88
+ "projector_config": GraniteSpeechProjectorConfig,
89
+ }
90
+
91
+ def __init__(
92
+ self,
93
+ encoder_config=None,
94
+ text_config=None,
95
+ projector_config=None,
96
+ audio_token_index=49155,
97
+ initializer_range=0.02,
98
+ has_lora_adapter=True,
99
+ **kwargs,
100
+ ):
101
+ if isinstance(text_config, dict):
102
+ text_config["model_type"] = text_config["model_type"] if "model_type" in text_config else "granite"
103
+ text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
104
+ elif text_config is None:
105
+ text_config = CONFIG_MAPPING["granite"]()
106
+
107
+ if isinstance(projector_config, dict):
108
+ # TODO - In the future, we should make this generic.
109
+ projector_config = GraniteSpeechProjectorConfig(**projector_config)
110
+ elif projector_config is None:
111
+ projector_config = GraniteSpeechProjectorConfig()
112
+
113
+ if not isinstance(encoder_config, GraniteSpeechEncoderConfig):
114
+ encoder_config = {} if encoder_config is None else encoder_config
115
+ encoder_config = GraniteSpeechEncoderConfig(**encoder_config)
116
+
117
+ self.text_config = text_config
118
+ self.encoder_config = encoder_config
119
+ self.projector_config = projector_config
120
+ self.audio_token_index = audio_token_index
121
+ self.initializer_range = initializer_range
122
+ self.has_lora_adapter = has_lora_adapter
123
+ super().__init__(**kwargs)
124
+
125
+
126
+ __all__ = ["GraniteSpeechEncoderConfig", "GraniteSpeechProjectorConfig", "GraniteSpeechConfig"]
feature_extraction_granite_speech.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2025 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Feature extractor class for Speech Granite
17
+ """
18
+
19
+ import math
20
+ from typing import List, Optional
21
+
22
+ from transformers.feature_extraction_utils import BatchFeature, FeatureExtractionMixin
23
+ from transformers.utils import is_torch_available, is_torchaudio_available, logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+ if is_torch_available():
29
+ import torch
30
+
31
+ if is_torchaudio_available():
32
+ import torchaudio
33
+
34
+
35
+ class GraniteSpeechFeatureExtractor(FeatureExtractionMixin):
36
+ model_input_names = ["input_features"]
37
+
38
+ def __init__(
39
+ self,
40
+ sampling_rate=16000,
41
+ n_fft=512,
42
+ win_length=400,
43
+ hop_length=160,
44
+ n_mels=80,
45
+ projector_window_size=15,
46
+ projector_downsample_rate=5,
47
+ **kwargs,
48
+ ):
49
+ super().__init__(**kwargs)
50
+ self.melspec_kwargs = {
51
+ "sample_rate": sampling_rate,
52
+ "n_fft": n_fft,
53
+ "win_length": win_length,
54
+ "hop_length": hop_length,
55
+ "n_mels": n_mels,
56
+ }
57
+ # HACK - for now, lazily initialize the mel spectrogram transform;
58
+ # the feature extractor mixin explodes otherwise because
59
+ # it tries to log the feature extractor, and the melspectrogram
60
+ # transform isn't json serializable...
61
+ self.melspec = None
62
+ self.projector_window_size = projector_window_size
63
+ self.projector_downsample_rate = projector_downsample_rate
64
+
65
+ def _ensure_melspec_transform_is_initialized(self):
66
+ if self.melspec is None:
67
+ self.melspec = torchaudio.transforms.MelSpectrogram(**self.melspec_kwargs)
68
+
69
+ def __call__(
70
+ self,
71
+ x: torch.Tensor,
72
+ device: Optional[str] = "cpu",
73
+ ) -> BatchFeature:
74
+ # TODO there is probably a better way to do both of these things...
75
+ self._ensure_melspec_transform_is_initialized()
76
+ if device is not None:
77
+ melspec = self.melspec.to(device)
78
+ x = x.to(device)
79
+ else:
80
+ melspec = self.melspec
81
+
82
+ B, _ = x.shape
83
+ with torch.no_grad():
84
+ mel = melspec(x.float())
85
+ logmel = mel.transpose(-1, -2).clip_(min=1e-10).log10_()
86
+ mx = logmel.amax(dim=(-2, -1), keepdim=True)
87
+ logmel = torch.maximum(logmel, mx - 8.0).div_(4).add_(1)
88
+ if logmel.shape[1] % 2 == 1:
89
+ logmel = logmel[:, :-1] # remove last frame if odd
90
+ x = logmel.reshape(B, -1, 2 * logmel.shape[-1]) # stacking and skipping by 2
91
+
92
+ if x.device != "cpu":
93
+ return x.detach().cpu()
94
+ return x
95
+
96
+ def _get_num_audio_features(self, audio_lengths: List[int]) -> List[int]:
97
+ """
98
+ Gets the (variable length) variable length number of features
99
+ (i.e., projector output) for the sequences being considered.
100
+ """
101
+ hop_length = self.melspec_kwargs["hop_length"]
102
+ effective_window_size = self.projector_window_size // self.projector_downsample_rate
103
+
104
+ projector_lengths = []
105
+ for raw_length in audio_lengths:
106
+ # mel sequence length computation
107
+ mel_length = raw_length // hop_length + 1
108
+ # encoder frame takes two mel features
109
+ encoder_length = mel_length // 2
110
+ nblocks = math.ceil(encoder_length / self.projector_window_size)
111
+ # projector output length
112
+ projector_length = nblocks * effective_window_size
113
+ projector_lengths.append(projector_length)
114
+
115
+ return projector_lengths
116
+
117
+
118
+ __all__ = ["GraniteSpeechFeatureExtractor"]
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "eos_token_id": 0,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.50.0.dev0"
7
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors.index.json ADDED
@@ -0,0 +1,762 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 16966988452
4
+ },
5
+ "weight_map": {
6
+ "encoder.out.bias": "model-00009-of-00009.safetensors",
7
+ "encoder.out.weight": "model-00009-of-00009.safetensors",
8
+ "encoder.out_mid.bias": "model-00009-of-00009.safetensors",
9
+ "encoder.out_mid.weight": "model-00009-of-00009.safetensors",
10
+ "encoder.rnn_tr.0.bias": "model-00009-of-00009.safetensors",
11
+ "encoder.rnn_tr.0.weight": "model-00009-of-00009.safetensors",
12
+ "encoder.rnn_tr.1.attn.fn.rel_pos_emb.weight": "model-00009-of-00009.safetensors",
13
+ "encoder.rnn_tr.1.attn.fn.to_kv.weight": "model-00009-of-00009.safetensors",
14
+ "encoder.rnn_tr.1.attn.fn.to_out.bias": "model-00009-of-00009.safetensors",
15
+ "encoder.rnn_tr.1.attn.fn.to_out.weight": "model-00009-of-00009.safetensors",
16
+ "encoder.rnn_tr.1.attn.fn.to_q.weight": "model-00009-of-00009.safetensors",
17
+ "encoder.rnn_tr.1.attn.norm.bias": "model-00009-of-00009.safetensors",
18
+ "encoder.rnn_tr.1.attn.norm.weight": "model-00009-of-00009.safetensors",
19
+ "encoder.rnn_tr.1.conv.net.0.bias": "model-00009-of-00009.safetensors",
20
+ "encoder.rnn_tr.1.conv.net.0.weight": "model-00009-of-00009.safetensors",
21
+ "encoder.rnn_tr.1.conv.net.2.bias": "model-00009-of-00009.safetensors",
22
+ "encoder.rnn_tr.1.conv.net.2.weight": "model-00009-of-00009.safetensors",
23
+ "encoder.rnn_tr.1.conv.net.4.conv.weight": "model-00009-of-00009.safetensors",
24
+ "encoder.rnn_tr.1.conv.net.5.bias": "model-00009-of-00009.safetensors",
25
+ "encoder.rnn_tr.1.conv.net.5.num_batches_tracked": "model-00009-of-00009.safetensors",
26
+ "encoder.rnn_tr.1.conv.net.5.running_mean": "model-00009-of-00009.safetensors",
27
+ "encoder.rnn_tr.1.conv.net.5.running_var": "model-00009-of-00009.safetensors",
28
+ "encoder.rnn_tr.1.conv.net.5.weight": "model-00009-of-00009.safetensors",
29
+ "encoder.rnn_tr.1.conv.net.7.bias": "model-00009-of-00009.safetensors",
30
+ "encoder.rnn_tr.1.conv.net.7.weight": "model-00009-of-00009.safetensors",
31
+ "encoder.rnn_tr.1.ff1.fn.fn.net.0.bias": "model-00009-of-00009.safetensors",
32
+ "encoder.rnn_tr.1.ff1.fn.fn.net.0.weight": "model-00009-of-00009.safetensors",
33
+ "encoder.rnn_tr.1.ff1.fn.fn.net.3.bias": "model-00009-of-00009.safetensors",
34
+ "encoder.rnn_tr.1.ff1.fn.fn.net.3.weight": "model-00009-of-00009.safetensors",
35
+ "encoder.rnn_tr.1.ff1.fn.norm.bias": "model-00009-of-00009.safetensors",
36
+ "encoder.rnn_tr.1.ff1.fn.norm.weight": "model-00009-of-00009.safetensors",
37
+ "encoder.rnn_tr.1.ff2.fn.fn.net.0.bias": "model-00009-of-00009.safetensors",
38
+ "encoder.rnn_tr.1.ff2.fn.fn.net.0.weight": "model-00009-of-00009.safetensors",
39
+ "encoder.rnn_tr.1.ff2.fn.fn.net.3.bias": "model-00009-of-00009.safetensors",
40
+ "encoder.rnn_tr.1.ff2.fn.fn.net.3.weight": "model-00009-of-00009.safetensors",
41
+ "encoder.rnn_tr.1.ff2.fn.norm.bias": "model-00009-of-00009.safetensors",
42
+ "encoder.rnn_tr.1.ff2.fn.norm.weight": "model-00009-of-00009.safetensors",
43
+ "encoder.rnn_tr.1.post_norm.bias": "model-00009-of-00009.safetensors",
44
+ "encoder.rnn_tr.1.post_norm.weight": "model-00009-of-00009.safetensors",
45
+ "encoder.rnn_tr.10.attn.fn.rel_pos_emb.weight": "model-00009-of-00009.safetensors",
46
+ "encoder.rnn_tr.10.attn.fn.to_kv.weight": "model-00009-of-00009.safetensors",
47
+ "encoder.rnn_tr.10.attn.fn.to_out.bias": "model-00009-of-00009.safetensors",
48
+ "encoder.rnn_tr.10.attn.fn.to_out.weight": "model-00009-of-00009.safetensors",
49
+ "encoder.rnn_tr.10.attn.fn.to_q.weight": "model-00009-of-00009.safetensors",
50
+ "encoder.rnn_tr.10.attn.norm.bias": "model-00009-of-00009.safetensors",
51
+ "encoder.rnn_tr.10.attn.norm.weight": "model-00009-of-00009.safetensors",
52
+ "encoder.rnn_tr.10.conv.net.0.bias": "model-00009-of-00009.safetensors",
53
+ "encoder.rnn_tr.10.conv.net.0.weight": "model-00009-of-00009.safetensors",
54
+ "encoder.rnn_tr.10.conv.net.2.bias": "model-00009-of-00009.safetensors",
55
+ "encoder.rnn_tr.10.conv.net.2.weight": "model-00009-of-00009.safetensors",
56
+ "encoder.rnn_tr.10.conv.net.4.conv.weight": "model-00009-of-00009.safetensors",
57
+ "encoder.rnn_tr.10.conv.net.5.bias": "model-00009-of-00009.safetensors",
58
+ "encoder.rnn_tr.10.conv.net.5.num_batches_tracked": "model-00009-of-00009.safetensors",
59
+ "encoder.rnn_tr.10.conv.net.5.running_mean": "model-00009-of-00009.safetensors",
60
+ "encoder.rnn_tr.10.conv.net.5.running_var": "model-00009-of-00009.safetensors",
61
+ "encoder.rnn_tr.10.conv.net.5.weight": "model-00009-of-00009.safetensors",
62
+ "encoder.rnn_tr.10.conv.net.7.bias": "model-00009-of-00009.safetensors",
63
+ "encoder.rnn_tr.10.conv.net.7.weight": "model-00009-of-00009.safetensors",
64
+ "encoder.rnn_tr.10.ff1.fn.fn.net.0.bias": "model-00009-of-00009.safetensors",
65
+ "encoder.rnn_tr.10.ff1.fn.fn.net.0.weight": "model-00009-of-00009.safetensors",
66
+ "encoder.rnn_tr.10.ff1.fn.fn.net.3.bias": "model-00009-of-00009.safetensors",
67
+ "encoder.rnn_tr.10.ff1.fn.fn.net.3.weight": "model-00009-of-00009.safetensors",
68
+ "encoder.rnn_tr.10.ff1.fn.norm.bias": "model-00009-of-00009.safetensors",
69
+ "encoder.rnn_tr.10.ff1.fn.norm.weight": "model-00009-of-00009.safetensors",
70
+ "encoder.rnn_tr.10.ff2.fn.fn.net.0.bias": "model-00009-of-00009.safetensors",
71
+ "encoder.rnn_tr.10.ff2.fn.fn.net.0.weight": "model-00009-of-00009.safetensors",
72
+ "encoder.rnn_tr.10.ff2.fn.fn.net.3.bias": "model-00009-of-00009.safetensors",
73
+ "encoder.rnn_tr.10.ff2.fn.fn.net.3.weight": "model-00009-of-00009.safetensors",
74
+ "encoder.rnn_tr.10.ff2.fn.norm.bias": "model-00009-of-00009.safetensors",
75
+ "encoder.rnn_tr.10.ff2.fn.norm.weight": "model-00009-of-00009.safetensors",
76
+ "encoder.rnn_tr.10.post_norm.bias": "model-00009-of-00009.safetensors",
77
+ "encoder.rnn_tr.10.post_norm.weight": "model-00009-of-00009.safetensors",
78
+ "encoder.rnn_tr.2.attn.fn.rel_pos_emb.weight": "model-00009-of-00009.safetensors",
79
+ "encoder.rnn_tr.2.attn.fn.to_kv.weight": "model-00009-of-00009.safetensors",
80
+ "encoder.rnn_tr.2.attn.fn.to_out.bias": "model-00009-of-00009.safetensors",
81
+ "encoder.rnn_tr.2.attn.fn.to_out.weight": "model-00009-of-00009.safetensors",
82
+ "encoder.rnn_tr.2.attn.fn.to_q.weight": "model-00009-of-00009.safetensors",
83
+ "encoder.rnn_tr.2.attn.norm.bias": "model-00009-of-00009.safetensors",
84
+ "encoder.rnn_tr.2.attn.norm.weight": "model-00009-of-00009.safetensors",
85
+ "encoder.rnn_tr.2.conv.net.0.bias": "model-00009-of-00009.safetensors",
86
+ "encoder.rnn_tr.2.conv.net.0.weight": "model-00009-of-00009.safetensors",
87
+ "encoder.rnn_tr.2.conv.net.2.bias": "model-00009-of-00009.safetensors",
88
+ "encoder.rnn_tr.2.conv.net.2.weight": "model-00009-of-00009.safetensors",
89
+ "encoder.rnn_tr.2.conv.net.4.conv.weight": "model-00009-of-00009.safetensors",
90
+ "encoder.rnn_tr.2.conv.net.5.bias": "model-00009-of-00009.safetensors",
91
+ "encoder.rnn_tr.2.conv.net.5.num_batches_tracked": "model-00009-of-00009.safetensors",
92
+ "encoder.rnn_tr.2.conv.net.5.running_mean": "model-00009-of-00009.safetensors",
93
+ "encoder.rnn_tr.2.conv.net.5.running_var": "model-00009-of-00009.safetensors",
94
+ "encoder.rnn_tr.2.conv.net.5.weight": "model-00009-of-00009.safetensors",
95
+ "encoder.rnn_tr.2.conv.net.7.bias": "model-00009-of-00009.safetensors",
96
+ "encoder.rnn_tr.2.conv.net.7.weight": "model-00009-of-00009.safetensors",
97
+ "encoder.rnn_tr.2.ff1.fn.fn.net.0.bias": "model-00009-of-00009.safetensors",
98
+ "encoder.rnn_tr.2.ff1.fn.fn.net.0.weight": "model-00009-of-00009.safetensors",
99
+ "encoder.rnn_tr.2.ff1.fn.fn.net.3.bias": "model-00009-of-00009.safetensors",
100
+ "encoder.rnn_tr.2.ff1.fn.fn.net.3.weight": "model-00009-of-00009.safetensors",
101
+ "encoder.rnn_tr.2.ff1.fn.norm.bias": "model-00009-of-00009.safetensors",
102
+ "encoder.rnn_tr.2.ff1.fn.norm.weight": "model-00009-of-00009.safetensors",
103
+ "encoder.rnn_tr.2.ff2.fn.fn.net.0.bias": "model-00009-of-00009.safetensors",
104
+ "encoder.rnn_tr.2.ff2.fn.fn.net.0.weight": "model-00009-of-00009.safetensors",
105
+ "encoder.rnn_tr.2.ff2.fn.fn.net.3.bias": "model-00009-of-00009.safetensors",
106
+ "encoder.rnn_tr.2.ff2.fn.fn.net.3.weight": "model-00009-of-00009.safetensors",
107
+ "encoder.rnn_tr.2.ff2.fn.norm.bias": "model-00009-of-00009.safetensors",
108
+ "encoder.rnn_tr.2.ff2.fn.norm.weight": "model-00009-of-00009.safetensors",
109
+ "encoder.rnn_tr.2.post_norm.bias": "model-00009-of-00009.safetensors",
110
+ "encoder.rnn_tr.2.post_norm.weight": "model-00009-of-00009.safetensors",
111
+ "encoder.rnn_tr.3.attn.fn.rel_pos_emb.weight": "model-00009-of-00009.safetensors",
112
+ "encoder.rnn_tr.3.attn.fn.to_kv.weight": "model-00009-of-00009.safetensors",
113
+ "encoder.rnn_tr.3.attn.fn.to_out.bias": "model-00009-of-00009.safetensors",
114
+ "encoder.rnn_tr.3.attn.fn.to_out.weight": "model-00009-of-00009.safetensors",
115
+ "encoder.rnn_tr.3.attn.fn.to_q.weight": "model-00009-of-00009.safetensors",
116
+ "encoder.rnn_tr.3.attn.norm.bias": "model-00009-of-00009.safetensors",
117
+ "encoder.rnn_tr.3.attn.norm.weight": "model-00009-of-00009.safetensors",
118
+ "encoder.rnn_tr.3.conv.net.0.bias": "model-00009-of-00009.safetensors",
119
+ "encoder.rnn_tr.3.conv.net.0.weight": "model-00009-of-00009.safetensors",
120
+ "encoder.rnn_tr.3.conv.net.2.bias": "model-00009-of-00009.safetensors",
121
+ "encoder.rnn_tr.3.conv.net.2.weight": "model-00009-of-00009.safetensors",
122
+ "encoder.rnn_tr.3.conv.net.4.conv.weight": "model-00009-of-00009.safetensors",
123
+ "encoder.rnn_tr.3.conv.net.5.bias": "model-00009-of-00009.safetensors",
124
+ "encoder.rnn_tr.3.conv.net.5.num_batches_tracked": "model-00009-of-00009.safetensors",
125
+ "encoder.rnn_tr.3.conv.net.5.running_mean": "model-00009-of-00009.safetensors",
126
+ "encoder.rnn_tr.3.conv.net.5.running_var": "model-00009-of-00009.safetensors",
127
+ "encoder.rnn_tr.3.conv.net.5.weight": "model-00009-of-00009.safetensors",
128
+ "encoder.rnn_tr.3.conv.net.7.bias": "model-00009-of-00009.safetensors",
129
+ "encoder.rnn_tr.3.conv.net.7.weight": "model-00009-of-00009.safetensors",
130
+ "encoder.rnn_tr.3.ff1.fn.fn.net.0.bias": "model-00009-of-00009.safetensors",
131
+ "encoder.rnn_tr.3.ff1.fn.fn.net.0.weight": "model-00009-of-00009.safetensors",
132
+ "encoder.rnn_tr.3.ff1.fn.fn.net.3.bias": "model-00009-of-00009.safetensors",
133
+ "encoder.rnn_tr.3.ff1.fn.fn.net.3.weight": "model-00009-of-00009.safetensors",
134
+ "encoder.rnn_tr.3.ff1.fn.norm.bias": "model-00009-of-00009.safetensors",
135
+ "encoder.rnn_tr.3.ff1.fn.norm.weight": "model-00009-of-00009.safetensors",
136
+ "encoder.rnn_tr.3.ff2.fn.fn.net.0.bias": "model-00009-of-00009.safetensors",
137
+ "encoder.rnn_tr.3.ff2.fn.fn.net.0.weight": "model-00009-of-00009.safetensors",
138
+ "encoder.rnn_tr.3.ff2.fn.fn.net.3.bias": "model-00009-of-00009.safetensors",
139
+ "encoder.rnn_tr.3.ff2.fn.fn.net.3.weight": "model-00009-of-00009.safetensors",
140
+ "encoder.rnn_tr.3.ff2.fn.norm.bias": "model-00009-of-00009.safetensors",
141
+ "encoder.rnn_tr.3.ff2.fn.norm.weight": "model-00009-of-00009.safetensors",
142
+ "encoder.rnn_tr.3.post_norm.bias": "model-00009-of-00009.safetensors",
143
+ "encoder.rnn_tr.3.post_norm.weight": "model-00009-of-00009.safetensors",
144
+ "encoder.rnn_tr.4.attn.fn.rel_pos_emb.weight": "model-00009-of-00009.safetensors",
145
+ "encoder.rnn_tr.4.attn.fn.to_kv.weight": "model-00009-of-00009.safetensors",
146
+ "encoder.rnn_tr.4.attn.fn.to_out.bias": "model-00009-of-00009.safetensors",
147
+ "encoder.rnn_tr.4.attn.fn.to_out.weight": "model-00009-of-00009.safetensors",
148
+ "encoder.rnn_tr.4.attn.fn.to_q.weight": "model-00009-of-00009.safetensors",
149
+ "encoder.rnn_tr.4.attn.norm.bias": "model-00009-of-00009.safetensors",
150
+ "encoder.rnn_tr.4.attn.norm.weight": "model-00009-of-00009.safetensors",
151
+ "encoder.rnn_tr.4.conv.net.0.bias": "model-00009-of-00009.safetensors",
152
+ "encoder.rnn_tr.4.conv.net.0.weight": "model-00009-of-00009.safetensors",
153
+ "encoder.rnn_tr.4.conv.net.2.bias": "model-00009-of-00009.safetensors",
154
+ "encoder.rnn_tr.4.conv.net.2.weight": "model-00009-of-00009.safetensors",
155
+ "encoder.rnn_tr.4.conv.net.4.conv.weight": "model-00009-of-00009.safetensors",
156
+ "encoder.rnn_tr.4.conv.net.5.bias": "model-00009-of-00009.safetensors",
157
+ "encoder.rnn_tr.4.conv.net.5.num_batches_tracked": "model-00009-of-00009.safetensors",
158
+ "encoder.rnn_tr.4.conv.net.5.running_mean": "model-00009-of-00009.safetensors",
159
+ "encoder.rnn_tr.4.conv.net.5.running_var": "model-00009-of-00009.safetensors",
160
+ "encoder.rnn_tr.4.conv.net.5.weight": "model-00009-of-00009.safetensors",
161
+ "encoder.rnn_tr.4.conv.net.7.bias": "model-00009-of-00009.safetensors",
162
+ "encoder.rnn_tr.4.conv.net.7.weight": "model-00009-of-00009.safetensors",
163
+ "encoder.rnn_tr.4.ff1.fn.fn.net.0.bias": "model-00009-of-00009.safetensors",
164
+ "encoder.rnn_tr.4.ff1.fn.fn.net.0.weight": "model-00009-of-00009.safetensors",
165
+ "encoder.rnn_tr.4.ff1.fn.fn.net.3.bias": "model-00009-of-00009.safetensors",
166
+ "encoder.rnn_tr.4.ff1.fn.fn.net.3.weight": "model-00009-of-00009.safetensors",
167
+ "encoder.rnn_tr.4.ff1.fn.norm.bias": "model-00009-of-00009.safetensors",
168
+ "encoder.rnn_tr.4.ff1.fn.norm.weight": "model-00009-of-00009.safetensors",
169
+ "encoder.rnn_tr.4.ff2.fn.fn.net.0.bias": "model-00009-of-00009.safetensors",
170
+ "encoder.rnn_tr.4.ff2.fn.fn.net.0.weight": "model-00009-of-00009.safetensors",
171
+ "encoder.rnn_tr.4.ff2.fn.fn.net.3.bias": "model-00009-of-00009.safetensors",
172
+ "encoder.rnn_tr.4.ff2.fn.fn.net.3.weight": "model-00009-of-00009.safetensors",
173
+ "encoder.rnn_tr.4.ff2.fn.norm.bias": "model-00009-of-00009.safetensors",
174
+ "encoder.rnn_tr.4.ff2.fn.norm.weight": "model-00009-of-00009.safetensors",
175
+ "encoder.rnn_tr.4.post_norm.bias": "model-00009-of-00009.safetensors",
176
+ "encoder.rnn_tr.4.post_norm.weight": "model-00009-of-00009.safetensors",
177
+ "encoder.rnn_tr.5.attn.fn.rel_pos_emb.weight": "model-00009-of-00009.safetensors",
178
+ "encoder.rnn_tr.5.attn.fn.to_kv.weight": "model-00009-of-00009.safetensors",
179
+ "encoder.rnn_tr.5.attn.fn.to_out.bias": "model-00009-of-00009.safetensors",
180
+ "encoder.rnn_tr.5.attn.fn.to_out.weight": "model-00009-of-00009.safetensors",
181
+ "encoder.rnn_tr.5.attn.fn.to_q.weight": "model-00009-of-00009.safetensors",
182
+ "encoder.rnn_tr.5.attn.norm.bias": "model-00009-of-00009.safetensors",
183
+ "encoder.rnn_tr.5.attn.norm.weight": "model-00009-of-00009.safetensors",
184
+ "encoder.rnn_tr.5.conv.net.0.bias": "model-00009-of-00009.safetensors",
185
+ "encoder.rnn_tr.5.conv.net.0.weight": "model-00009-of-00009.safetensors",
186
+ "encoder.rnn_tr.5.conv.net.2.bias": "model-00009-of-00009.safetensors",
187
+ "encoder.rnn_tr.5.conv.net.2.weight": "model-00009-of-00009.safetensors",
188
+ "encoder.rnn_tr.5.conv.net.4.conv.weight": "model-00009-of-00009.safetensors",
189
+ "encoder.rnn_tr.5.conv.net.5.bias": "model-00009-of-00009.safetensors",
190
+ "encoder.rnn_tr.5.conv.net.5.num_batches_tracked": "model-00009-of-00009.safetensors",
191
+ "encoder.rnn_tr.5.conv.net.5.running_mean": "model-00009-of-00009.safetensors",
192
+ "encoder.rnn_tr.5.conv.net.5.running_var": "model-00009-of-00009.safetensors",
193
+ "encoder.rnn_tr.5.conv.net.5.weight": "model-00009-of-00009.safetensors",
194
+ "encoder.rnn_tr.5.conv.net.7.bias": "model-00009-of-00009.safetensors",
195
+ "encoder.rnn_tr.5.conv.net.7.weight": "model-00009-of-00009.safetensors",
196
+ "encoder.rnn_tr.5.ff1.fn.fn.net.0.bias": "model-00009-of-00009.safetensors",
197
+ "encoder.rnn_tr.5.ff1.fn.fn.net.0.weight": "model-00009-of-00009.safetensors",
198
+ "encoder.rnn_tr.5.ff1.fn.fn.net.3.bias": "model-00009-of-00009.safetensors",
199
+ "encoder.rnn_tr.5.ff1.fn.fn.net.3.weight": "model-00009-of-00009.safetensors",
200
+ "encoder.rnn_tr.5.ff1.fn.norm.bias": "model-00009-of-00009.safetensors",
201
+ "encoder.rnn_tr.5.ff1.fn.norm.weight": "model-00009-of-00009.safetensors",
202
+ "encoder.rnn_tr.5.ff2.fn.fn.net.0.bias": "model-00009-of-00009.safetensors",
203
+ "encoder.rnn_tr.5.ff2.fn.fn.net.0.weight": "model-00009-of-00009.safetensors",
204
+ "encoder.rnn_tr.5.ff2.fn.fn.net.3.bias": "model-00009-of-00009.safetensors",
205
+ "encoder.rnn_tr.5.ff2.fn.fn.net.3.weight": "model-00009-of-00009.safetensors",
206
+ "encoder.rnn_tr.5.ff2.fn.norm.bias": "model-00009-of-00009.safetensors",
207
+ "encoder.rnn_tr.5.ff2.fn.norm.weight": "model-00009-of-00009.safetensors",
208
+ "encoder.rnn_tr.5.post_norm.bias": "model-00009-of-00009.safetensors",
209
+ "encoder.rnn_tr.5.post_norm.weight": "model-00009-of-00009.safetensors",
210
+ "encoder.rnn_tr.6.attn.fn.rel_pos_emb.weight": "model-00009-of-00009.safetensors",
211
+ "encoder.rnn_tr.6.attn.fn.to_kv.weight": "model-00009-of-00009.safetensors",
212
+ "encoder.rnn_tr.6.attn.fn.to_out.bias": "model-00009-of-00009.safetensors",
213
+ "encoder.rnn_tr.6.attn.fn.to_out.weight": "model-00009-of-00009.safetensors",
214
+ "encoder.rnn_tr.6.attn.fn.to_q.weight": "model-00009-of-00009.safetensors",
215
+ "encoder.rnn_tr.6.attn.norm.bias": "model-00009-of-00009.safetensors",
216
+ "encoder.rnn_tr.6.attn.norm.weight": "model-00009-of-00009.safetensors",
217
+ "encoder.rnn_tr.6.conv.net.0.bias": "model-00009-of-00009.safetensors",
218
+ "encoder.rnn_tr.6.conv.net.0.weight": "model-00009-of-00009.safetensors",
219
+ "encoder.rnn_tr.6.conv.net.2.bias": "model-00009-of-00009.safetensors",
220
+ "encoder.rnn_tr.6.conv.net.2.weight": "model-00009-of-00009.safetensors",
221
+ "encoder.rnn_tr.6.conv.net.4.conv.weight": "model-00009-of-00009.safetensors",
222
+ "encoder.rnn_tr.6.conv.net.5.bias": "model-00009-of-00009.safetensors",
223
+ "encoder.rnn_tr.6.conv.net.5.num_batches_tracked": "model-00009-of-00009.safetensors",
224
+ "encoder.rnn_tr.6.conv.net.5.running_mean": "model-00009-of-00009.safetensors",
225
+ "encoder.rnn_tr.6.conv.net.5.running_var": "model-00009-of-00009.safetensors",
226
+ "encoder.rnn_tr.6.conv.net.5.weight": "model-00009-of-00009.safetensors",
227
+ "encoder.rnn_tr.6.conv.net.7.bias": "model-00009-of-00009.safetensors",
228
+ "encoder.rnn_tr.6.conv.net.7.weight": "model-00009-of-00009.safetensors",
229
+ "encoder.rnn_tr.6.ff1.fn.fn.net.0.bias": "model-00009-of-00009.safetensors",
230
+ "encoder.rnn_tr.6.ff1.fn.fn.net.0.weight": "model-00009-of-00009.safetensors",
231
+ "encoder.rnn_tr.6.ff1.fn.fn.net.3.bias": "model-00009-of-00009.safetensors",
232
+ "encoder.rnn_tr.6.ff1.fn.fn.net.3.weight": "model-00009-of-00009.safetensors",
233
+ "encoder.rnn_tr.6.ff1.fn.norm.bias": "model-00009-of-00009.safetensors",
234
+ "encoder.rnn_tr.6.ff1.fn.norm.weight": "model-00009-of-00009.safetensors",
235
+ "encoder.rnn_tr.6.ff2.fn.fn.net.0.bias": "model-00009-of-00009.safetensors",
236
+ "encoder.rnn_tr.6.ff2.fn.fn.net.0.weight": "model-00009-of-00009.safetensors",
237
+ "encoder.rnn_tr.6.ff2.fn.fn.net.3.bias": "model-00009-of-00009.safetensors",
238
+ "encoder.rnn_tr.6.ff2.fn.fn.net.3.weight": "model-00009-of-00009.safetensors",
239
+ "encoder.rnn_tr.6.ff2.fn.norm.bias": "model-00009-of-00009.safetensors",
240
+ "encoder.rnn_tr.6.ff2.fn.norm.weight": "model-00009-of-00009.safetensors",
241
+ "encoder.rnn_tr.6.post_norm.bias": "model-00009-of-00009.safetensors",
242
+ "encoder.rnn_tr.6.post_norm.weight": "model-00009-of-00009.safetensors",
243
+ "encoder.rnn_tr.7.attn.fn.rel_pos_emb.weight": "model-00009-of-00009.safetensors",
244
+ "encoder.rnn_tr.7.attn.fn.to_kv.weight": "model-00009-of-00009.safetensors",
245
+ "encoder.rnn_tr.7.attn.fn.to_out.bias": "model-00009-of-00009.safetensors",
246
+ "encoder.rnn_tr.7.attn.fn.to_out.weight": "model-00009-of-00009.safetensors",
247
+ "encoder.rnn_tr.7.attn.fn.to_q.weight": "model-00009-of-00009.safetensors",
248
+ "encoder.rnn_tr.7.attn.norm.bias": "model-00009-of-00009.safetensors",
249
+ "encoder.rnn_tr.7.attn.norm.weight": "model-00009-of-00009.safetensors",
250
+ "encoder.rnn_tr.7.conv.net.0.bias": "model-00009-of-00009.safetensors",
251
+ "encoder.rnn_tr.7.conv.net.0.weight": "model-00009-of-00009.safetensors",
252
+ "encoder.rnn_tr.7.conv.net.2.bias": "model-00009-of-00009.safetensors",
253
+ "encoder.rnn_tr.7.conv.net.2.weight": "model-00009-of-00009.safetensors",
254
+ "encoder.rnn_tr.7.conv.net.4.conv.weight": "model-00009-of-00009.safetensors",
255
+ "encoder.rnn_tr.7.conv.net.5.bias": "model-00009-of-00009.safetensors",
256
+ "encoder.rnn_tr.7.conv.net.5.num_batches_tracked": "model-00009-of-00009.safetensors",
257
+ "encoder.rnn_tr.7.conv.net.5.running_mean": "model-00009-of-00009.safetensors",
258
+ "encoder.rnn_tr.7.conv.net.5.running_var": "model-00009-of-00009.safetensors",
259
+ "encoder.rnn_tr.7.conv.net.5.weight": "model-00009-of-00009.safetensors",
260
+ "encoder.rnn_tr.7.conv.net.7.bias": "model-00009-of-00009.safetensors",
261
+ "encoder.rnn_tr.7.conv.net.7.weight": "model-00009-of-00009.safetensors",
262
+ "encoder.rnn_tr.7.ff1.fn.fn.net.0.bias": "model-00009-of-00009.safetensors",
263
+ "encoder.rnn_tr.7.ff1.fn.fn.net.0.weight": "model-00009-of-00009.safetensors",
264
+ "encoder.rnn_tr.7.ff1.fn.fn.net.3.bias": "model-00009-of-00009.safetensors",
265
+ "encoder.rnn_tr.7.ff1.fn.fn.net.3.weight": "model-00009-of-00009.safetensors",
266
+ "encoder.rnn_tr.7.ff1.fn.norm.bias": "model-00009-of-00009.safetensors",
267
+ "encoder.rnn_tr.7.ff1.fn.norm.weight": "model-00009-of-00009.safetensors",
268
+ "encoder.rnn_tr.7.ff2.fn.fn.net.0.bias": "model-00009-of-00009.safetensors",
269
+ "encoder.rnn_tr.7.ff2.fn.fn.net.0.weight": "model-00009-of-00009.safetensors",
270
+ "encoder.rnn_tr.7.ff2.fn.fn.net.3.bias": "model-00009-of-00009.safetensors",
271
+ "encoder.rnn_tr.7.ff2.fn.fn.net.3.weight": "model-00009-of-00009.safetensors",
272
+ "encoder.rnn_tr.7.ff2.fn.norm.bias": "model-00009-of-00009.safetensors",
273
+ "encoder.rnn_tr.7.ff2.fn.norm.weight": "model-00009-of-00009.safetensors",
274
+ "encoder.rnn_tr.7.post_norm.bias": "model-00009-of-00009.safetensors",
275
+ "encoder.rnn_tr.7.post_norm.weight": "model-00009-of-00009.safetensors",
276
+ "encoder.rnn_tr.8.attn.fn.rel_pos_emb.weight": "model-00009-of-00009.safetensors",
277
+ "encoder.rnn_tr.8.attn.fn.to_kv.weight": "model-00009-of-00009.safetensors",
278
+ "encoder.rnn_tr.8.attn.fn.to_out.bias": "model-00009-of-00009.safetensors",
279
+ "encoder.rnn_tr.8.attn.fn.to_out.weight": "model-00009-of-00009.safetensors",
280
+ "encoder.rnn_tr.8.attn.fn.to_q.weight": "model-00009-of-00009.safetensors",
281
+ "encoder.rnn_tr.8.attn.norm.bias": "model-00009-of-00009.safetensors",
282
+ "encoder.rnn_tr.8.attn.norm.weight": "model-00009-of-00009.safetensors",
283
+ "encoder.rnn_tr.8.conv.net.0.bias": "model-00009-of-00009.safetensors",
284
+ "encoder.rnn_tr.8.conv.net.0.weight": "model-00009-of-00009.safetensors",
285
+ "encoder.rnn_tr.8.conv.net.2.bias": "model-00009-of-00009.safetensors",
286
+ "encoder.rnn_tr.8.conv.net.2.weight": "model-00009-of-00009.safetensors",
287
+ "encoder.rnn_tr.8.conv.net.4.conv.weight": "model-00009-of-00009.safetensors",
288
+ "encoder.rnn_tr.8.conv.net.5.bias": "model-00009-of-00009.safetensors",
289
+ "encoder.rnn_tr.8.conv.net.5.num_batches_tracked": "model-00009-of-00009.safetensors",
290
+ "encoder.rnn_tr.8.conv.net.5.running_mean": "model-00009-of-00009.safetensors",
291
+ "encoder.rnn_tr.8.conv.net.5.running_var": "model-00009-of-00009.safetensors",
292
+ "encoder.rnn_tr.8.conv.net.5.weight": "model-00009-of-00009.safetensors",
293
+ "encoder.rnn_tr.8.conv.net.7.bias": "model-00009-of-00009.safetensors",
294
+ "encoder.rnn_tr.8.conv.net.7.weight": "model-00009-of-00009.safetensors",
295
+ "encoder.rnn_tr.8.ff1.fn.fn.net.0.bias": "model-00009-of-00009.safetensors",
296
+ "encoder.rnn_tr.8.ff1.fn.fn.net.0.weight": "model-00009-of-00009.safetensors",
297
+ "encoder.rnn_tr.8.ff1.fn.fn.net.3.bias": "model-00009-of-00009.safetensors",
298
+ "encoder.rnn_tr.8.ff1.fn.fn.net.3.weight": "model-00009-of-00009.safetensors",
299
+ "encoder.rnn_tr.8.ff1.fn.norm.bias": "model-00009-of-00009.safetensors",
300
+ "encoder.rnn_tr.8.ff1.fn.norm.weight": "model-00009-of-00009.safetensors",
301
+ "encoder.rnn_tr.8.ff2.fn.fn.net.0.bias": "model-00009-of-00009.safetensors",
302
+ "encoder.rnn_tr.8.ff2.fn.fn.net.0.weight": "model-00009-of-00009.safetensors",
303
+ "encoder.rnn_tr.8.ff2.fn.fn.net.3.bias": "model-00009-of-00009.safetensors",
304
+ "encoder.rnn_tr.8.ff2.fn.fn.net.3.weight": "model-00009-of-00009.safetensors",
305
+ "encoder.rnn_tr.8.ff2.fn.norm.bias": "model-00009-of-00009.safetensors",
306
+ "encoder.rnn_tr.8.ff2.fn.norm.weight": "model-00009-of-00009.safetensors",
307
+ "encoder.rnn_tr.8.post_norm.bias": "model-00009-of-00009.safetensors",
308
+ "encoder.rnn_tr.8.post_norm.weight": "model-00009-of-00009.safetensors",
309
+ "encoder.rnn_tr.9.attn.fn.rel_pos_emb.weight": "model-00009-of-00009.safetensors",
310
+ "encoder.rnn_tr.9.attn.fn.to_kv.weight": "model-00009-of-00009.safetensors",
311
+ "encoder.rnn_tr.9.attn.fn.to_out.bias": "model-00009-of-00009.safetensors",
312
+ "encoder.rnn_tr.9.attn.fn.to_out.weight": "model-00009-of-00009.safetensors",
313
+ "encoder.rnn_tr.9.attn.fn.to_q.weight": "model-00009-of-00009.safetensors",
314
+ "encoder.rnn_tr.9.attn.norm.bias": "model-00009-of-00009.safetensors",
315
+ "encoder.rnn_tr.9.attn.norm.weight": "model-00009-of-00009.safetensors",
316
+ "encoder.rnn_tr.9.conv.net.0.bias": "model-00009-of-00009.safetensors",
317
+ "encoder.rnn_tr.9.conv.net.0.weight": "model-00009-of-00009.safetensors",
318
+ "encoder.rnn_tr.9.conv.net.2.bias": "model-00009-of-00009.safetensors",
319
+ "encoder.rnn_tr.9.conv.net.2.weight": "model-00009-of-00009.safetensors",
320
+ "encoder.rnn_tr.9.conv.net.4.conv.weight": "model-00009-of-00009.safetensors",
321
+ "encoder.rnn_tr.9.conv.net.5.bias": "model-00009-of-00009.safetensors",
322
+ "encoder.rnn_tr.9.conv.net.5.num_batches_tracked": "model-00009-of-00009.safetensors",
323
+ "encoder.rnn_tr.9.conv.net.5.running_mean": "model-00009-of-00009.safetensors",
324
+ "encoder.rnn_tr.9.conv.net.5.running_var": "model-00009-of-00009.safetensors",
325
+ "encoder.rnn_tr.9.conv.net.5.weight": "model-00009-of-00009.safetensors",
326
+ "encoder.rnn_tr.9.conv.net.7.bias": "model-00009-of-00009.safetensors",
327
+ "encoder.rnn_tr.9.conv.net.7.weight": "model-00009-of-00009.safetensors",
328
+ "encoder.rnn_tr.9.ff1.fn.fn.net.0.bias": "model-00009-of-00009.safetensors",
329
+ "encoder.rnn_tr.9.ff1.fn.fn.net.0.weight": "model-00009-of-00009.safetensors",
330
+ "encoder.rnn_tr.9.ff1.fn.fn.net.3.bias": "model-00009-of-00009.safetensors",
331
+ "encoder.rnn_tr.9.ff1.fn.fn.net.3.weight": "model-00009-of-00009.safetensors",
332
+ "encoder.rnn_tr.9.ff1.fn.norm.bias": "model-00009-of-00009.safetensors",
333
+ "encoder.rnn_tr.9.ff1.fn.norm.weight": "model-00009-of-00009.safetensors",
334
+ "encoder.rnn_tr.9.ff2.fn.fn.net.0.bias": "model-00009-of-00009.safetensors",
335
+ "encoder.rnn_tr.9.ff2.fn.fn.net.0.weight": "model-00009-of-00009.safetensors",
336
+ "encoder.rnn_tr.9.ff2.fn.fn.net.3.bias": "model-00009-of-00009.safetensors",
337
+ "encoder.rnn_tr.9.ff2.fn.fn.net.3.weight": "model-00009-of-00009.safetensors",
338
+ "encoder.rnn_tr.9.ff2.fn.norm.bias": "model-00009-of-00009.safetensors",
339
+ "encoder.rnn_tr.9.ff2.fn.norm.weight": "model-00009-of-00009.safetensors",
340
+ "encoder.rnn_tr.9.post_norm.bias": "model-00009-of-00009.safetensors",
341
+ "encoder.rnn_tr.9.post_norm.weight": "model-00009-of-00009.safetensors",
342
+ "language_model.model.embed_tokens.weight": "model-00001-of-00009.safetensors",
343
+ "language_model.model.layers.0.input_layernorm.weight": "model-00001-of-00009.safetensors",
344
+ "language_model.model.layers.0.mlp.down_proj.weight": "model-00001-of-00009.safetensors",
345
+ "language_model.model.layers.0.mlp.gate_proj.weight": "model-00001-of-00009.safetensors",
346
+ "language_model.model.layers.0.mlp.up_proj.weight": "model-00001-of-00009.safetensors",
347
+ "language_model.model.layers.0.post_attention_layernorm.weight": "model-00001-of-00009.safetensors",
348
+ "language_model.model.layers.0.self_attn.k_proj.weight": "model-00001-of-00009.safetensors",
349
+ "language_model.model.layers.0.self_attn.o_proj.weight": "model-00001-of-00009.safetensors",
350
+ "language_model.model.layers.0.self_attn.q_proj.weight": "model-00001-of-00009.safetensors",
351
+ "language_model.model.layers.0.self_attn.v_proj.weight": "model-00001-of-00009.safetensors",
352
+ "language_model.model.layers.1.input_layernorm.weight": "model-00001-of-00009.safetensors",
353
+ "language_model.model.layers.1.mlp.down_proj.weight": "model-00001-of-00009.safetensors",
354
+ "language_model.model.layers.1.mlp.gate_proj.weight": "model-00001-of-00009.safetensors",
355
+ "language_model.model.layers.1.mlp.up_proj.weight": "model-00001-of-00009.safetensors",
356
+ "language_model.model.layers.1.post_attention_layernorm.weight": "model-00001-of-00009.safetensors",
357
+ "language_model.model.layers.1.self_attn.k_proj.weight": "model-00001-of-00009.safetensors",
358
+ "language_model.model.layers.1.self_attn.o_proj.weight": "model-00001-of-00009.safetensors",
359
+ "language_model.model.layers.1.self_attn.q_proj.weight": "model-00001-of-00009.safetensors",
360
+ "language_model.model.layers.1.self_attn.v_proj.weight": "model-00001-of-00009.safetensors",
361
+ "language_model.model.layers.10.input_layernorm.weight": "model-00003-of-00009.safetensors",
362
+ "language_model.model.layers.10.mlp.down_proj.weight": "model-00003-of-00009.safetensors",
363
+ "language_model.model.layers.10.mlp.gate_proj.weight": "model-00003-of-00009.safetensors",
364
+ "language_model.model.layers.10.mlp.up_proj.weight": "model-00003-of-00009.safetensors",
365
+ "language_model.model.layers.10.post_attention_layernorm.weight": "model-00003-of-00009.safetensors",
366
+ "language_model.model.layers.10.self_attn.k_proj.weight": "model-00003-of-00009.safetensors",
367
+ "language_model.model.layers.10.self_attn.o_proj.weight": "model-00003-of-00009.safetensors",
368
+ "language_model.model.layers.10.self_attn.q_proj.weight": "model-00003-of-00009.safetensors",
369
+ "language_model.model.layers.10.self_attn.v_proj.weight": "model-00003-of-00009.safetensors",
370
+ "language_model.model.layers.11.input_layernorm.weight": "model-00003-of-00009.safetensors",
371
+ "language_model.model.layers.11.mlp.down_proj.weight": "model-00003-of-00009.safetensors",
372
+ "language_model.model.layers.11.mlp.gate_proj.weight": "model-00003-of-00009.safetensors",
373
+ "language_model.model.layers.11.mlp.up_proj.weight": "model-00003-of-00009.safetensors",
374
+ "language_model.model.layers.11.post_attention_layernorm.weight": "model-00003-of-00009.safetensors",
375
+ "language_model.model.layers.11.self_attn.k_proj.weight": "model-00003-of-00009.safetensors",
376
+ "language_model.model.layers.11.self_attn.o_proj.weight": "model-00003-of-00009.safetensors",
377
+ "language_model.model.layers.11.self_attn.q_proj.weight": "model-00003-of-00009.safetensors",
378
+ "language_model.model.layers.11.self_attn.v_proj.weight": "model-00003-of-00009.safetensors",
379
+ "language_model.model.layers.12.input_layernorm.weight": "model-00003-of-00009.safetensors",
380
+ "language_model.model.layers.12.mlp.down_proj.weight": "model-00003-of-00009.safetensors",
381
+ "language_model.model.layers.12.mlp.gate_proj.weight": "model-00003-of-00009.safetensors",
382
+ "language_model.model.layers.12.mlp.up_proj.weight": "model-00003-of-00009.safetensors",
383
+ "language_model.model.layers.12.post_attention_layernorm.weight": "model-00003-of-00009.safetensors",
384
+ "language_model.model.layers.12.self_attn.k_proj.weight": "model-00003-of-00009.safetensors",
385
+ "language_model.model.layers.12.self_attn.o_proj.weight": "model-00003-of-00009.safetensors",
386
+ "language_model.model.layers.12.self_attn.q_proj.weight": "model-00003-of-00009.safetensors",
387
+ "language_model.model.layers.12.self_attn.v_proj.weight": "model-00003-of-00009.safetensors",
388
+ "language_model.model.layers.13.input_layernorm.weight": "model-00003-of-00009.safetensors",
389
+ "language_model.model.layers.13.mlp.down_proj.weight": "model-00003-of-00009.safetensors",
390
+ "language_model.model.layers.13.mlp.gate_proj.weight": "model-00003-of-00009.safetensors",
391
+ "language_model.model.layers.13.mlp.up_proj.weight": "model-00003-of-00009.safetensors",
392
+ "language_model.model.layers.13.post_attention_layernorm.weight": "model-00003-of-00009.safetensors",
393
+ "language_model.model.layers.13.self_attn.k_proj.weight": "model-00003-of-00009.safetensors",
394
+ "language_model.model.layers.13.self_attn.o_proj.weight": "model-00003-of-00009.safetensors",
395
+ "language_model.model.layers.13.self_attn.q_proj.weight": "model-00003-of-00009.safetensors",
396
+ "language_model.model.layers.13.self_attn.v_proj.weight": "model-00003-of-00009.safetensors",
397
+ "language_model.model.layers.14.input_layernorm.weight": "model-00004-of-00009.safetensors",
398
+ "language_model.model.layers.14.mlp.down_proj.weight": "model-00004-of-00009.safetensors",
399
+ "language_model.model.layers.14.mlp.gate_proj.weight": "model-00004-of-00009.safetensors",
400
+ "language_model.model.layers.14.mlp.up_proj.weight": "model-00004-of-00009.safetensors",
401
+ "language_model.model.layers.14.post_attention_layernorm.weight": "model-00004-of-00009.safetensors",
402
+ "language_model.model.layers.14.self_attn.k_proj.weight": "model-00004-of-00009.safetensors",
403
+ "language_model.model.layers.14.self_attn.o_proj.weight": "model-00004-of-00009.safetensors",
404
+ "language_model.model.layers.14.self_attn.q_proj.weight": "model-00004-of-00009.safetensors",
405
+ "language_model.model.layers.14.self_attn.v_proj.weight": "model-00004-of-00009.safetensors",
406
+ "language_model.model.layers.15.input_layernorm.weight": "model-00004-of-00009.safetensors",
407
+ "language_model.model.layers.15.mlp.down_proj.weight": "model-00004-of-00009.safetensors",
408
+ "language_model.model.layers.15.mlp.gate_proj.weight": "model-00004-of-00009.safetensors",
409
+ "language_model.model.layers.15.mlp.up_proj.weight": "model-00004-of-00009.safetensors",
410
+ "language_model.model.layers.15.post_attention_layernorm.weight": "model-00004-of-00009.safetensors",
411
+ "language_model.model.layers.15.self_attn.k_proj.weight": "model-00004-of-00009.safetensors",
412
+ "language_model.model.layers.15.self_attn.o_proj.weight": "model-00004-of-00009.safetensors",
413
+ "language_model.model.layers.15.self_attn.q_proj.weight": "model-00004-of-00009.safetensors",
414
+ "language_model.model.layers.15.self_attn.v_proj.weight": "model-00004-of-00009.safetensors",
415
+ "language_model.model.layers.16.input_layernorm.weight": "model-00004-of-00009.safetensors",
416
+ "language_model.model.layers.16.mlp.down_proj.weight": "model-00004-of-00009.safetensors",
417
+ "language_model.model.layers.16.mlp.gate_proj.weight": "model-00004-of-00009.safetensors",
418
+ "language_model.model.layers.16.mlp.up_proj.weight": "model-00004-of-00009.safetensors",
419
+ "language_model.model.layers.16.post_attention_layernorm.weight": "model-00004-of-00009.safetensors",
420
+ "language_model.model.layers.16.self_attn.k_proj.weight": "model-00004-of-00009.safetensors",
421
+ "language_model.model.layers.16.self_attn.o_proj.weight": "model-00004-of-00009.safetensors",
422
+ "language_model.model.layers.16.self_attn.q_proj.weight": "model-00004-of-00009.safetensors",
423
+ "language_model.model.layers.16.self_attn.v_proj.weight": "model-00004-of-00009.safetensors",
424
+ "language_model.model.layers.17.input_layernorm.weight": "model-00004-of-00009.safetensors",
425
+ "language_model.model.layers.17.mlp.down_proj.weight": "model-00004-of-00009.safetensors",
426
+ "language_model.model.layers.17.mlp.gate_proj.weight": "model-00004-of-00009.safetensors",
427
+ "language_model.model.layers.17.mlp.up_proj.weight": "model-00004-of-00009.safetensors",
428
+ "language_model.model.layers.17.post_attention_layernorm.weight": "model-00004-of-00009.safetensors",
429
+ "language_model.model.layers.17.self_attn.k_proj.weight": "model-00004-of-00009.safetensors",
430
+ "language_model.model.layers.17.self_attn.o_proj.weight": "model-00004-of-00009.safetensors",
431
+ "language_model.model.layers.17.self_attn.q_proj.weight": "model-00004-of-00009.safetensors",
432
+ "language_model.model.layers.17.self_attn.v_proj.weight": "model-00004-of-00009.safetensors",
433
+ "language_model.model.layers.18.input_layernorm.weight": "model-00004-of-00009.safetensors",
434
+ "language_model.model.layers.18.mlp.down_proj.weight": "model-00004-of-00009.safetensors",
435
+ "language_model.model.layers.18.mlp.gate_proj.weight": "model-00004-of-00009.safetensors",
436
+ "language_model.model.layers.18.mlp.up_proj.weight": "model-00004-of-00009.safetensors",
437
+ "language_model.model.layers.18.post_attention_layernorm.weight": "model-00004-of-00009.safetensors",
438
+ "language_model.model.layers.18.self_attn.k_proj.weight": "model-00004-of-00009.safetensors",
439
+ "language_model.model.layers.18.self_attn.o_proj.weight": "model-00004-of-00009.safetensors",
440
+ "language_model.model.layers.18.self_attn.q_proj.weight": "model-00004-of-00009.safetensors",
441
+ "language_model.model.layers.18.self_attn.v_proj.weight": "model-00004-of-00009.safetensors",
442
+ "language_model.model.layers.19.input_layernorm.weight": "model-00005-of-00009.safetensors",
443
+ "language_model.model.layers.19.mlp.down_proj.weight": "model-00005-of-00009.safetensors",
444
+ "language_model.model.layers.19.mlp.gate_proj.weight": "model-00005-of-00009.safetensors",
445
+ "language_model.model.layers.19.mlp.up_proj.weight": "model-00005-of-00009.safetensors",
446
+ "language_model.model.layers.19.post_attention_layernorm.weight": "model-00005-of-00009.safetensors",
447
+ "language_model.model.layers.19.self_attn.k_proj.weight": "model-00005-of-00009.safetensors",
448
+ "language_model.model.layers.19.self_attn.o_proj.weight": "model-00005-of-00009.safetensors",
449
+ "language_model.model.layers.19.self_attn.q_proj.weight": "model-00005-of-00009.safetensors",
450
+ "language_model.model.layers.19.self_attn.v_proj.weight": "model-00005-of-00009.safetensors",
451
+ "language_model.model.layers.2.input_layernorm.weight": "model-00001-of-00009.safetensors",
452
+ "language_model.model.layers.2.mlp.down_proj.weight": "model-00001-of-00009.safetensors",
453
+ "language_model.model.layers.2.mlp.gate_proj.weight": "model-00001-of-00009.safetensors",
454
+ "language_model.model.layers.2.mlp.up_proj.weight": "model-00001-of-00009.safetensors",
455
+ "language_model.model.layers.2.post_attention_layernorm.weight": "model-00001-of-00009.safetensors",
456
+ "language_model.model.layers.2.self_attn.k_proj.weight": "model-00001-of-00009.safetensors",
457
+ "language_model.model.layers.2.self_attn.o_proj.weight": "model-00001-of-00009.safetensors",
458
+ "language_model.model.layers.2.self_attn.q_proj.weight": "model-00001-of-00009.safetensors",
459
+ "language_model.model.layers.2.self_attn.v_proj.weight": "model-00001-of-00009.safetensors",
460
+ "language_model.model.layers.20.input_layernorm.weight": "model-00005-of-00009.safetensors",
461
+ "language_model.model.layers.20.mlp.down_proj.weight": "model-00005-of-00009.safetensors",
462
+ "language_model.model.layers.20.mlp.gate_proj.weight": "model-00005-of-00009.safetensors",
463
+ "language_model.model.layers.20.mlp.up_proj.weight": "model-00005-of-00009.safetensors",
464
+ "language_model.model.layers.20.post_attention_layernorm.weight": "model-00005-of-00009.safetensors",
465
+ "language_model.model.layers.20.self_attn.k_proj.weight": "model-00005-of-00009.safetensors",
466
+ "language_model.model.layers.20.self_attn.o_proj.weight": "model-00005-of-00009.safetensors",
467
+ "language_model.model.layers.20.self_attn.q_proj.weight": "model-00005-of-00009.safetensors",
468
+ "language_model.model.layers.20.self_attn.v_proj.weight": "model-00005-of-00009.safetensors",
469
+ "language_model.model.layers.21.input_layernorm.weight": "model-00005-of-00009.safetensors",
470
+ "language_model.model.layers.21.mlp.down_proj.weight": "model-00005-of-00009.safetensors",
471
+ "language_model.model.layers.21.mlp.gate_proj.weight": "model-00005-of-00009.safetensors",
472
+ "language_model.model.layers.21.mlp.up_proj.weight": "model-00005-of-00009.safetensors",
473
+ "language_model.model.layers.21.post_attention_layernorm.weight": "model-00005-of-00009.safetensors",
474
+ "language_model.model.layers.21.self_attn.k_proj.weight": "model-00005-of-00009.safetensors",
475
+ "language_model.model.layers.21.self_attn.o_proj.weight": "model-00005-of-00009.safetensors",
476
+ "language_model.model.layers.21.self_attn.q_proj.weight": "model-00005-of-00009.safetensors",
477
+ "language_model.model.layers.21.self_attn.v_proj.weight": "model-00005-of-00009.safetensors",
478
+ "language_model.model.layers.22.input_layernorm.weight": "model-00005-of-00009.safetensors",
479
+ "language_model.model.layers.22.mlp.down_proj.weight": "model-00005-of-00009.safetensors",
480
+ "language_model.model.layers.22.mlp.gate_proj.weight": "model-00005-of-00009.safetensors",
481
+ "language_model.model.layers.22.mlp.up_proj.weight": "model-00005-of-00009.safetensors",
482
+ "language_model.model.layers.22.post_attention_layernorm.weight": "model-00005-of-00009.safetensors",
483
+ "language_model.model.layers.22.self_attn.k_proj.weight": "model-00005-of-00009.safetensors",
484
+ "language_model.model.layers.22.self_attn.o_proj.weight": "model-00005-of-00009.safetensors",
485
+ "language_model.model.layers.22.self_attn.q_proj.weight": "model-00005-of-00009.safetensors",
486
+ "language_model.model.layers.22.self_attn.v_proj.weight": "model-00005-of-00009.safetensors",
487
+ "language_model.model.layers.23.input_layernorm.weight": "model-00005-of-00009.safetensors",
488
+ "language_model.model.layers.23.mlp.down_proj.weight": "model-00005-of-00009.safetensors",
489
+ "language_model.model.layers.23.mlp.gate_proj.weight": "model-00005-of-00009.safetensors",
490
+ "language_model.model.layers.23.mlp.up_proj.weight": "model-00005-of-00009.safetensors",
491
+ "language_model.model.layers.23.post_attention_layernorm.weight": "model-00005-of-00009.safetensors",
492
+ "language_model.model.layers.23.self_attn.k_proj.weight": "model-00005-of-00009.safetensors",
493
+ "language_model.model.layers.23.self_attn.o_proj.weight": "model-00005-of-00009.safetensors",
494
+ "language_model.model.layers.23.self_attn.q_proj.weight": "model-00005-of-00009.safetensors",
495
+ "language_model.model.layers.23.self_attn.v_proj.weight": "model-00005-of-00009.safetensors",
496
+ "language_model.model.layers.24.input_layernorm.weight": "model-00006-of-00009.safetensors",
497
+ "language_model.model.layers.24.mlp.down_proj.weight": "model-00006-of-00009.safetensors",
498
+ "language_model.model.layers.24.mlp.gate_proj.weight": "model-00006-of-00009.safetensors",
499
+ "language_model.model.layers.24.mlp.up_proj.weight": "model-00006-of-00009.safetensors",
500
+ "language_model.model.layers.24.post_attention_layernorm.weight": "model-00006-of-00009.safetensors",
501
+ "language_model.model.layers.24.self_attn.k_proj.weight": "model-00006-of-00009.safetensors",
502
+ "language_model.model.layers.24.self_attn.o_proj.weight": "model-00006-of-00009.safetensors",
503
+ "language_model.model.layers.24.self_attn.q_proj.weight": "model-00006-of-00009.safetensors",
504
+ "language_model.model.layers.24.self_attn.v_proj.weight": "model-00006-of-00009.safetensors",
505
+ "language_model.model.layers.25.input_layernorm.weight": "model-00006-of-00009.safetensors",
506
+ "language_model.model.layers.25.mlp.down_proj.weight": "model-00006-of-00009.safetensors",
507
+ "language_model.model.layers.25.mlp.gate_proj.weight": "model-00006-of-00009.safetensors",
508
+ "language_model.model.layers.25.mlp.up_proj.weight": "model-00006-of-00009.safetensors",
509
+ "language_model.model.layers.25.post_attention_layernorm.weight": "model-00006-of-00009.safetensors",
510
+ "language_model.model.layers.25.self_attn.k_proj.weight": "model-00006-of-00009.safetensors",
511
+ "language_model.model.layers.25.self_attn.o_proj.weight": "model-00006-of-00009.safetensors",
512
+ "language_model.model.layers.25.self_attn.q_proj.weight": "model-00006-of-00009.safetensors",
513
+ "language_model.model.layers.25.self_attn.v_proj.weight": "model-00006-of-00009.safetensors",
514
+ "language_model.model.layers.26.input_layernorm.weight": "model-00006-of-00009.safetensors",
515
+ "language_model.model.layers.26.mlp.down_proj.weight": "model-00006-of-00009.safetensors",
516
+ "language_model.model.layers.26.mlp.gate_proj.weight": "model-00006-of-00009.safetensors",
517
+ "language_model.model.layers.26.mlp.up_proj.weight": "model-00006-of-00009.safetensors",
518
+ "language_model.model.layers.26.post_attention_layernorm.weight": "model-00006-of-00009.safetensors",
519
+ "language_model.model.layers.26.self_attn.k_proj.weight": "model-00006-of-00009.safetensors",
520
+ "language_model.model.layers.26.self_attn.o_proj.weight": "model-00006-of-00009.safetensors",
521
+ "language_model.model.layers.26.self_attn.q_proj.weight": "model-00006-of-00009.safetensors",
522
+ "language_model.model.layers.26.self_attn.v_proj.weight": "model-00006-of-00009.safetensors",
523
+ "language_model.model.layers.27.input_layernorm.weight": "model-00006-of-00009.safetensors",
524
+ "language_model.model.layers.27.mlp.down_proj.weight": "model-00006-of-00009.safetensors",
525
+ "language_model.model.layers.27.mlp.gate_proj.weight": "model-00006-of-00009.safetensors",
526
+ "language_model.model.layers.27.mlp.up_proj.weight": "model-00006-of-00009.safetensors",
527
+ "language_model.model.layers.27.post_attention_layernorm.weight": "model-00006-of-00009.safetensors",
528
+ "language_model.model.layers.27.self_attn.k_proj.weight": "model-00006-of-00009.safetensors",
529
+ "language_model.model.layers.27.self_attn.o_proj.weight": "model-00006-of-00009.safetensors",
530
+ "language_model.model.layers.27.self_attn.q_proj.weight": "model-00006-of-00009.safetensors",
531
+ "language_model.model.layers.27.self_attn.v_proj.weight": "model-00006-of-00009.safetensors",
532
+ "language_model.model.layers.28.input_layernorm.weight": "model-00006-of-00009.safetensors",
533
+ "language_model.model.layers.28.mlp.down_proj.weight": "model-00006-of-00009.safetensors",
534
+ "language_model.model.layers.28.mlp.gate_proj.weight": "model-00006-of-00009.safetensors",
535
+ "language_model.model.layers.28.mlp.up_proj.weight": "model-00006-of-00009.safetensors",
536
+ "language_model.model.layers.28.post_attention_layernorm.weight": "model-00006-of-00009.safetensors",
537
+ "language_model.model.layers.28.self_attn.k_proj.weight": "model-00006-of-00009.safetensors",
538
+ "language_model.model.layers.28.self_attn.o_proj.weight": "model-00006-of-00009.safetensors",
539
+ "language_model.model.layers.28.self_attn.q_proj.weight": "model-00006-of-00009.safetensors",
540
+ "language_model.model.layers.28.self_attn.v_proj.weight": "model-00006-of-00009.safetensors",
541
+ "language_model.model.layers.29.input_layernorm.weight": "model-00007-of-00009.safetensors",
542
+ "language_model.model.layers.29.mlp.down_proj.weight": "model-00007-of-00009.safetensors",
543
+ "language_model.model.layers.29.mlp.gate_proj.weight": "model-00007-of-00009.safetensors",
544
+ "language_model.model.layers.29.mlp.up_proj.weight": "model-00007-of-00009.safetensors",
545
+ "language_model.model.layers.29.post_attention_layernorm.weight": "model-00007-of-00009.safetensors",
546
+ "language_model.model.layers.29.self_attn.k_proj.weight": "model-00007-of-00009.safetensors",
547
+ "language_model.model.layers.29.self_attn.o_proj.weight": "model-00007-of-00009.safetensors",
548
+ "language_model.model.layers.29.self_attn.q_proj.weight": "model-00007-of-00009.safetensors",
549
+ "language_model.model.layers.29.self_attn.v_proj.weight": "model-00007-of-00009.safetensors",
550
+ "language_model.model.layers.3.input_layernorm.weight": "model-00001-of-00009.safetensors",
551
+ "language_model.model.layers.3.mlp.down_proj.weight": "model-00001-of-00009.safetensors",
552
+ "language_model.model.layers.3.mlp.gate_proj.weight": "model-00001-of-00009.safetensors",
553
+ "language_model.model.layers.3.mlp.up_proj.weight": "model-00001-of-00009.safetensors",
554
+ "language_model.model.layers.3.post_attention_layernorm.weight": "model-00001-of-00009.safetensors",
555
+ "language_model.model.layers.3.self_attn.k_proj.weight": "model-00001-of-00009.safetensors",
556
+ "language_model.model.layers.3.self_attn.o_proj.weight": "model-00001-of-00009.safetensors",
557
+ "language_model.model.layers.3.self_attn.q_proj.weight": "model-00001-of-00009.safetensors",
558
+ "language_model.model.layers.3.self_attn.v_proj.weight": "model-00001-of-00009.safetensors",
559
+ "language_model.model.layers.30.input_layernorm.weight": "model-00007-of-00009.safetensors",
560
+ "language_model.model.layers.30.mlp.down_proj.weight": "model-00007-of-00009.safetensors",
561
+ "language_model.model.layers.30.mlp.gate_proj.weight": "model-00007-of-00009.safetensors",
562
+ "language_model.model.layers.30.mlp.up_proj.weight": "model-00007-of-00009.safetensors",
563
+ "language_model.model.layers.30.post_attention_layernorm.weight": "model-00007-of-00009.safetensors",
564
+ "language_model.model.layers.30.self_attn.k_proj.weight": "model-00007-of-00009.safetensors",
565
+ "language_model.model.layers.30.self_attn.o_proj.weight": "model-00007-of-00009.safetensors",
566
+ "language_model.model.layers.30.self_attn.q_proj.weight": "model-00007-of-00009.safetensors",
567
+ "language_model.model.layers.30.self_attn.v_proj.weight": "model-00007-of-00009.safetensors",
568
+ "language_model.model.layers.31.input_layernorm.weight": "model-00007-of-00009.safetensors",
569
+ "language_model.model.layers.31.mlp.down_proj.weight": "model-00007-of-00009.safetensors",
570
+ "language_model.model.layers.31.mlp.gate_proj.weight": "model-00007-of-00009.safetensors",
571
+ "language_model.model.layers.31.mlp.up_proj.weight": "model-00007-of-00009.safetensors",
572
+ "language_model.model.layers.31.post_attention_layernorm.weight": "model-00007-of-00009.safetensors",
573
+ "language_model.model.layers.31.self_attn.k_proj.weight": "model-00007-of-00009.safetensors",
574
+ "language_model.model.layers.31.self_attn.o_proj.weight": "model-00007-of-00009.safetensors",
575
+ "language_model.model.layers.31.self_attn.q_proj.weight": "model-00007-of-00009.safetensors",
576
+ "language_model.model.layers.31.self_attn.v_proj.weight": "model-00007-of-00009.safetensors",
577
+ "language_model.model.layers.32.input_layernorm.weight": "model-00007-of-00009.safetensors",
578
+ "language_model.model.layers.32.mlp.down_proj.weight": "model-00007-of-00009.safetensors",
579
+ "language_model.model.layers.32.mlp.gate_proj.weight": "model-00007-of-00009.safetensors",
580
+ "language_model.model.layers.32.mlp.up_proj.weight": "model-00007-of-00009.safetensors",
581
+ "language_model.model.layers.32.post_attention_layernorm.weight": "model-00007-of-00009.safetensors",
582
+ "language_model.model.layers.32.self_attn.k_proj.weight": "model-00007-of-00009.safetensors",
583
+ "language_model.model.layers.32.self_attn.o_proj.weight": "model-00007-of-00009.safetensors",
584
+ "language_model.model.layers.32.self_attn.q_proj.weight": "model-00007-of-00009.safetensors",
585
+ "language_model.model.layers.32.self_attn.v_proj.weight": "model-00007-of-00009.safetensors",
586
+ "language_model.model.layers.33.input_layernorm.weight": "model-00007-of-00009.safetensors",
587
+ "language_model.model.layers.33.mlp.down_proj.weight": "model-00007-of-00009.safetensors",
588
+ "language_model.model.layers.33.mlp.gate_proj.weight": "model-00007-of-00009.safetensors",
589
+ "language_model.model.layers.33.mlp.up_proj.weight": "model-00007-of-00009.safetensors",
590
+ "language_model.model.layers.33.post_attention_layernorm.weight": "model-00007-of-00009.safetensors",
591
+ "language_model.model.layers.33.self_attn.k_proj.weight": "model-00007-of-00009.safetensors",
592
+ "language_model.model.layers.33.self_attn.o_proj.weight": "model-00007-of-00009.safetensors",
593
+ "language_model.model.layers.33.self_attn.q_proj.weight": "model-00007-of-00009.safetensors",
594
+ "language_model.model.layers.33.self_attn.v_proj.weight": "model-00007-of-00009.safetensors",
595
+ "language_model.model.layers.34.input_layernorm.weight": "model-00008-of-00009.safetensors",
596
+ "language_model.model.layers.34.mlp.down_proj.weight": "model-00008-of-00009.safetensors",
597
+ "language_model.model.layers.34.mlp.gate_proj.weight": "model-00008-of-00009.safetensors",
598
+ "language_model.model.layers.34.mlp.up_proj.weight": "model-00008-of-00009.safetensors",
599
+ "language_model.model.layers.34.post_attention_layernorm.weight": "model-00008-of-00009.safetensors",
600
+ "language_model.model.layers.34.self_attn.k_proj.weight": "model-00008-of-00009.safetensors",
601
+ "language_model.model.layers.34.self_attn.o_proj.weight": "model-00008-of-00009.safetensors",
602
+ "language_model.model.layers.34.self_attn.q_proj.weight": "model-00008-of-00009.safetensors",
603
+ "language_model.model.layers.34.self_attn.v_proj.weight": "model-00008-of-00009.safetensors",
604
+ "language_model.model.layers.35.input_layernorm.weight": "model-00008-of-00009.safetensors",
605
+ "language_model.model.layers.35.mlp.down_proj.weight": "model-00008-of-00009.safetensors",
606
+ "language_model.model.layers.35.mlp.gate_proj.weight": "model-00008-of-00009.safetensors",
607
+ "language_model.model.layers.35.mlp.up_proj.weight": "model-00008-of-00009.safetensors",
608
+ "language_model.model.layers.35.post_attention_layernorm.weight": "model-00008-of-00009.safetensors",
609
+ "language_model.model.layers.35.self_attn.k_proj.weight": "model-00008-of-00009.safetensors",
610
+ "language_model.model.layers.35.self_attn.o_proj.weight": "model-00008-of-00009.safetensors",
611
+ "language_model.model.layers.35.self_attn.q_proj.weight": "model-00008-of-00009.safetensors",
612
+ "language_model.model.layers.35.self_attn.v_proj.weight": "model-00008-of-00009.safetensors",
613
+ "language_model.model.layers.36.input_layernorm.weight": "model-00008-of-00009.safetensors",
614
+ "language_model.model.layers.36.mlp.down_proj.weight": "model-00008-of-00009.safetensors",
615
+ "language_model.model.layers.36.mlp.gate_proj.weight": "model-00008-of-00009.safetensors",
616
+ "language_model.model.layers.36.mlp.up_proj.weight": "model-00008-of-00009.safetensors",
617
+ "language_model.model.layers.36.post_attention_layernorm.weight": "model-00008-of-00009.safetensors",
618
+ "language_model.model.layers.36.self_attn.k_proj.weight": "model-00008-of-00009.safetensors",
619
+ "language_model.model.layers.36.self_attn.o_proj.weight": "model-00008-of-00009.safetensors",
620
+ "language_model.model.layers.36.self_attn.q_proj.weight": "model-00008-of-00009.safetensors",
621
+ "language_model.model.layers.36.self_attn.v_proj.weight": "model-00008-of-00009.safetensors",
622
+ "language_model.model.layers.37.input_layernorm.weight": "model-00008-of-00009.safetensors",
623
+ "language_model.model.layers.37.mlp.down_proj.weight": "model-00008-of-00009.safetensors",
624
+ "language_model.model.layers.37.mlp.gate_proj.weight": "model-00008-of-00009.safetensors",
625
+ "language_model.model.layers.37.mlp.up_proj.weight": "model-00008-of-00009.safetensors",
626
+ "language_model.model.layers.37.post_attention_layernorm.weight": "model-00008-of-00009.safetensors",
627
+ "language_model.model.layers.37.self_attn.k_proj.weight": "model-00008-of-00009.safetensors",
628
+ "language_model.model.layers.37.self_attn.o_proj.weight": "model-00008-of-00009.safetensors",
629
+ "language_model.model.layers.37.self_attn.q_proj.weight": "model-00008-of-00009.safetensors",
630
+ "language_model.model.layers.37.self_attn.v_proj.weight": "model-00008-of-00009.safetensors",
631
+ "language_model.model.layers.38.input_layernorm.weight": "model-00008-of-00009.safetensors",
632
+ "language_model.model.layers.38.mlp.down_proj.weight": "model-00008-of-00009.safetensors",
633
+ "language_model.model.layers.38.mlp.gate_proj.weight": "model-00008-of-00009.safetensors",
634
+ "language_model.model.layers.38.mlp.up_proj.weight": "model-00008-of-00009.safetensors",
635
+ "language_model.model.layers.38.post_attention_layernorm.weight": "model-00008-of-00009.safetensors",
636
+ "language_model.model.layers.38.self_attn.k_proj.weight": "model-00008-of-00009.safetensors",
637
+ "language_model.model.layers.38.self_attn.o_proj.weight": "model-00008-of-00009.safetensors",
638
+ "language_model.model.layers.38.self_attn.q_proj.weight": "model-00008-of-00009.safetensors",
639
+ "language_model.model.layers.38.self_attn.v_proj.weight": "model-00008-of-00009.safetensors",
640
+ "language_model.model.layers.39.input_layernorm.weight": "model-00009-of-00009.safetensors",
641
+ "language_model.model.layers.39.mlp.down_proj.weight": "model-00009-of-00009.safetensors",
642
+ "language_model.model.layers.39.mlp.gate_proj.weight": "model-00009-of-00009.safetensors",
643
+ "language_model.model.layers.39.mlp.up_proj.weight": "model-00009-of-00009.safetensors",
644
+ "language_model.model.layers.39.post_attention_layernorm.weight": "model-00009-of-00009.safetensors",
645
+ "language_model.model.layers.39.self_attn.k_proj.weight": "model-00009-of-00009.safetensors",
646
+ "language_model.model.layers.39.self_attn.o_proj.weight": "model-00009-of-00009.safetensors",
647
+ "language_model.model.layers.39.self_attn.q_proj.weight": "model-00009-of-00009.safetensors",
648
+ "language_model.model.layers.39.self_attn.v_proj.weight": "model-00009-of-00009.safetensors",
649
+ "language_model.model.layers.4.input_layernorm.weight": "model-00002-of-00009.safetensors",
650
+ "language_model.model.layers.4.mlp.down_proj.weight": "model-00002-of-00009.safetensors",
651
+ "language_model.model.layers.4.mlp.gate_proj.weight": "model-00002-of-00009.safetensors",
652
+ "language_model.model.layers.4.mlp.up_proj.weight": "model-00002-of-00009.safetensors",
653
+ "language_model.model.layers.4.post_attention_layernorm.weight": "model-00002-of-00009.safetensors",
654
+ "language_model.model.layers.4.self_attn.k_proj.weight": "model-00002-of-00009.safetensors",
655
+ "language_model.model.layers.4.self_attn.o_proj.weight": "model-00002-of-00009.safetensors",
656
+ "language_model.model.layers.4.self_attn.q_proj.weight": "model-00002-of-00009.safetensors",
657
+ "language_model.model.layers.4.self_attn.v_proj.weight": "model-00002-of-00009.safetensors",
658
+ "language_model.model.layers.5.input_layernorm.weight": "model-00002-of-00009.safetensors",
659
+ "language_model.model.layers.5.mlp.down_proj.weight": "model-00002-of-00009.safetensors",
660
+ "language_model.model.layers.5.mlp.gate_proj.weight": "model-00002-of-00009.safetensors",
661
+ "language_model.model.layers.5.mlp.up_proj.weight": "model-00002-of-00009.safetensors",
662
+ "language_model.model.layers.5.post_attention_layernorm.weight": "model-00002-of-00009.safetensors",
663
+ "language_model.model.layers.5.self_attn.k_proj.weight": "model-00002-of-00009.safetensors",
664
+ "language_model.model.layers.5.self_attn.o_proj.weight": "model-00002-of-00009.safetensors",
665
+ "language_model.model.layers.5.self_attn.q_proj.weight": "model-00002-of-00009.safetensors",
666
+ "language_model.model.layers.5.self_attn.v_proj.weight": "model-00002-of-00009.safetensors",
667
+ "language_model.model.layers.6.input_layernorm.weight": "model-00002-of-00009.safetensors",
668
+ "language_model.model.layers.6.mlp.down_proj.weight": "model-00002-of-00009.safetensors",
669
+ "language_model.model.layers.6.mlp.gate_proj.weight": "model-00002-of-00009.safetensors",
670
+ "language_model.model.layers.6.mlp.up_proj.weight": "model-00002-of-00009.safetensors",
671
+ "language_model.model.layers.6.post_attention_layernorm.weight": "model-00002-of-00009.safetensors",
672
+ "language_model.model.layers.6.self_attn.k_proj.weight": "model-00002-of-00009.safetensors",
673
+ "language_model.model.layers.6.self_attn.o_proj.weight": "model-00002-of-00009.safetensors",
674
+ "language_model.model.layers.6.self_attn.q_proj.weight": "model-00002-of-00009.safetensors",
675
+ "language_model.model.layers.6.self_attn.v_proj.weight": "model-00002-of-00009.safetensors",
676
+ "language_model.model.layers.7.input_layernorm.weight": "model-00002-of-00009.safetensors",
677
+ "language_model.model.layers.7.mlp.down_proj.weight": "model-00002-of-00009.safetensors",
678
+ "language_model.model.layers.7.mlp.gate_proj.weight": "model-00002-of-00009.safetensors",
679
+ "language_model.model.layers.7.mlp.up_proj.weight": "model-00002-of-00009.safetensors",
680
+ "language_model.model.layers.7.post_attention_layernorm.weight": "model-00002-of-00009.safetensors",
681
+ "language_model.model.layers.7.self_attn.k_proj.weight": "model-00002-of-00009.safetensors",
682
+ "language_model.model.layers.7.self_attn.o_proj.weight": "model-00002-of-00009.safetensors",
683
+ "language_model.model.layers.7.self_attn.q_proj.weight": "model-00002-of-00009.safetensors",
684
+ "language_model.model.layers.7.self_attn.v_proj.weight": "model-00002-of-00009.safetensors",
685
+ "language_model.model.layers.8.input_layernorm.weight": "model-00002-of-00009.safetensors",
686
+ "language_model.model.layers.8.mlp.down_proj.weight": "model-00002-of-00009.safetensors",
687
+ "language_model.model.layers.8.mlp.gate_proj.weight": "model-00002-of-00009.safetensors",
688
+ "language_model.model.layers.8.mlp.up_proj.weight": "model-00002-of-00009.safetensors",
689
+ "language_model.model.layers.8.post_attention_layernorm.weight": "model-00002-of-00009.safetensors",
690
+ "language_model.model.layers.8.self_attn.k_proj.weight": "model-00002-of-00009.safetensors",
691
+ "language_model.model.layers.8.self_attn.o_proj.weight": "model-00002-of-00009.safetensors",
692
+ "language_model.model.layers.8.self_attn.q_proj.weight": "model-00002-of-00009.safetensors",
693
+ "language_model.model.layers.8.self_attn.v_proj.weight": "model-00002-of-00009.safetensors",
694
+ "language_model.model.layers.9.input_layernorm.weight": "model-00003-of-00009.safetensors",
695
+ "language_model.model.layers.9.mlp.down_proj.weight": "model-00003-of-00009.safetensors",
696
+ "language_model.model.layers.9.mlp.gate_proj.weight": "model-00003-of-00009.safetensors",
697
+ "language_model.model.layers.9.mlp.up_proj.weight": "model-00003-of-00009.safetensors",
698
+ "language_model.model.layers.9.post_attention_layernorm.weight": "model-00003-of-00009.safetensors",
699
+ "language_model.model.layers.9.self_attn.k_proj.weight": "model-00003-of-00009.safetensors",
700
+ "language_model.model.layers.9.self_attn.o_proj.weight": "model-00003-of-00009.safetensors",
701
+ "language_model.model.layers.9.self_attn.q_proj.weight": "model-00003-of-00009.safetensors",
702
+ "language_model.model.layers.9.self_attn.v_proj.weight": "model-00003-of-00009.safetensors",
703
+ "language_model.model.norm.weight": "model-00009-of-00009.safetensors",
704
+ "projector.linear.bias": "model-00009-of-00009.safetensors",
705
+ "projector.linear.weight": "model-00009-of-00009.safetensors",
706
+ "projector.qformer.encoder.layer.0.attention.attention.key.bias": "model-00009-of-00009.safetensors",
707
+ "projector.qformer.encoder.layer.0.attention.attention.key.weight": "model-00009-of-00009.safetensors",
708
+ "projector.qformer.encoder.layer.0.attention.attention.query.bias": "model-00009-of-00009.safetensors",
709
+ "projector.qformer.encoder.layer.0.attention.attention.query.weight": "model-00009-of-00009.safetensors",
710
+ "projector.qformer.encoder.layer.0.attention.attention.value.bias": "model-00009-of-00009.safetensors",
711
+ "projector.qformer.encoder.layer.0.attention.attention.value.weight": "model-00009-of-00009.safetensors",
712
+ "projector.qformer.encoder.layer.0.attention.output.LayerNorm.bias": "model-00009-of-00009.safetensors",
713
+ "projector.qformer.encoder.layer.0.attention.output.LayerNorm.weight": "model-00009-of-00009.safetensors",
714
+ "projector.qformer.encoder.layer.0.attention.output.dense.bias": "model-00009-of-00009.safetensors",
715
+ "projector.qformer.encoder.layer.0.attention.output.dense.weight": "model-00009-of-00009.safetensors",
716
+ "projector.qformer.encoder.layer.0.crossattention.attention.key.bias": "model-00009-of-00009.safetensors",
717
+ "projector.qformer.encoder.layer.0.crossattention.attention.key.weight": "model-00009-of-00009.safetensors",
718
+ "projector.qformer.encoder.layer.0.crossattention.attention.query.bias": "model-00009-of-00009.safetensors",
719
+ "projector.qformer.encoder.layer.0.crossattention.attention.query.weight": "model-00009-of-00009.safetensors",
720
+ "projector.qformer.encoder.layer.0.crossattention.attention.value.bias": "model-00009-of-00009.safetensors",
721
+ "projector.qformer.encoder.layer.0.crossattention.attention.value.weight": "model-00009-of-00009.safetensors",
722
+ "projector.qformer.encoder.layer.0.crossattention.output.LayerNorm.bias": "model-00009-of-00009.safetensors",
723
+ "projector.qformer.encoder.layer.0.crossattention.output.LayerNorm.weight": "model-00009-of-00009.safetensors",
724
+ "projector.qformer.encoder.layer.0.crossattention.output.dense.bias": "model-00009-of-00009.safetensors",
725
+ "projector.qformer.encoder.layer.0.crossattention.output.dense.weight": "model-00009-of-00009.safetensors",
726
+ "projector.qformer.encoder.layer.0.intermediate_query.dense.bias": "model-00009-of-00009.safetensors",
727
+ "projector.qformer.encoder.layer.0.intermediate_query.dense.weight": "model-00009-of-00009.safetensors",
728
+ "projector.qformer.encoder.layer.0.output_query.LayerNorm.bias": "model-00009-of-00009.safetensors",
729
+ "projector.qformer.encoder.layer.0.output_query.LayerNorm.weight": "model-00009-of-00009.safetensors",
730
+ "projector.qformer.encoder.layer.0.output_query.dense.bias": "model-00009-of-00009.safetensors",
731
+ "projector.qformer.encoder.layer.0.output_query.dense.weight": "model-00009-of-00009.safetensors",
732
+ "projector.qformer.encoder.layer.1.attention.attention.key.bias": "model-00009-of-00009.safetensors",
733
+ "projector.qformer.encoder.layer.1.attention.attention.key.weight": "model-00009-of-00009.safetensors",
734
+ "projector.qformer.encoder.layer.1.attention.attention.query.bias": "model-00009-of-00009.safetensors",
735
+ "projector.qformer.encoder.layer.1.attention.attention.query.weight": "model-00009-of-00009.safetensors",
736
+ "projector.qformer.encoder.layer.1.attention.attention.value.bias": "model-00009-of-00009.safetensors",
737
+ "projector.qformer.encoder.layer.1.attention.attention.value.weight": "model-00009-of-00009.safetensors",
738
+ "projector.qformer.encoder.layer.1.attention.output.LayerNorm.bias": "model-00009-of-00009.safetensors",
739
+ "projector.qformer.encoder.layer.1.attention.output.LayerNorm.weight": "model-00009-of-00009.safetensors",
740
+ "projector.qformer.encoder.layer.1.attention.output.dense.bias": "model-00009-of-00009.safetensors",
741
+ "projector.qformer.encoder.layer.1.attention.output.dense.weight": "model-00009-of-00009.safetensors",
742
+ "projector.qformer.encoder.layer.1.crossattention.attention.key.bias": "model-00009-of-00009.safetensors",
743
+ "projector.qformer.encoder.layer.1.crossattention.attention.key.weight": "model-00009-of-00009.safetensors",
744
+ "projector.qformer.encoder.layer.1.crossattention.attention.query.bias": "model-00009-of-00009.safetensors",
745
+ "projector.qformer.encoder.layer.1.crossattention.attention.query.weight": "model-00009-of-00009.safetensors",
746
+ "projector.qformer.encoder.layer.1.crossattention.attention.value.bias": "model-00009-of-00009.safetensors",
747
+ "projector.qformer.encoder.layer.1.crossattention.attention.value.weight": "model-00009-of-00009.safetensors",
748
+ "projector.qformer.encoder.layer.1.crossattention.output.LayerNorm.bias": "model-00009-of-00009.safetensors",
749
+ "projector.qformer.encoder.layer.1.crossattention.output.LayerNorm.weight": "model-00009-of-00009.safetensors",
750
+ "projector.qformer.encoder.layer.1.crossattention.output.dense.bias": "model-00009-of-00009.safetensors",
751
+ "projector.qformer.encoder.layer.1.crossattention.output.dense.weight": "model-00009-of-00009.safetensors",
752
+ "projector.qformer.encoder.layer.1.intermediate_query.dense.bias": "model-00009-of-00009.safetensors",
753
+ "projector.qformer.encoder.layer.1.intermediate_query.dense.weight": "model-00009-of-00009.safetensors",
754
+ "projector.qformer.encoder.layer.1.output_query.LayerNorm.bias": "model-00009-of-00009.safetensors",
755
+ "projector.qformer.encoder.layer.1.output_query.LayerNorm.weight": "model-00009-of-00009.safetensors",
756
+ "projector.qformer.encoder.layer.1.output_query.dense.bias": "model-00009-of-00009.safetensors",
757
+ "projector.qformer.encoder.layer.1.output_query.dense.weight": "model-00009-of-00009.safetensors",
758
+ "projector.qformer.layernorm.bias": "model-00009-of-00009.safetensors",
759
+ "projector.qformer.layernorm.weight": "model-00009-of-00009.safetensors",
760
+ "projector.query": "model-00009-of-00009.safetensors"
761
+ }
762
+ }
modeling_granite_speech.py ADDED
@@ -0,0 +1,1393 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from dataclasses import dataclass
3
+ from typing import List, Optional, Tuple, Union
4
+
5
+ import torch
6
+ import torch.nn.functional as F
7
+ import torch.utils.checkpoint
8
+ from torch import einsum, nn
9
+
10
+ from transformers.activations import ACT2FN
11
+ from transformers.generation import GenerationMixin
12
+ from transformers.modeling_outputs import (
13
+ BaseModelOutputWithPastAndCrossAttentions,
14
+ BaseModelOutputWithPoolingAndCrossAttentions,
15
+ ModelOutput,
16
+ )
17
+ from transformers.modeling_utils import PreTrainedModel
18
+ from transformers.models.auto import AutoModelForCausalLM
19
+ from transformers.pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
20
+ from transformers.utils import (
21
+ add_start_docstrings,
22
+ add_start_docstrings_to_model_forward,
23
+ is_peft_available,
24
+ logging,
25
+ replace_return_docstrings,
26
+ )
27
+
28
+ from .configuration_granite_speech import (
29
+ GraniteSpeechConfig,
30
+ GraniteSpeechEncoderConfig,
31
+ GraniteSpeechProjectorConfig,
32
+ )
33
+
34
+
35
+ logger = logging.get_logger(__name__)
36
+
37
+ _CONFIG_FOR_DOC = "GraniteSpeechConfig"
38
+
39
+
40
+ @dataclass
41
+ class GraniteSpeechCausalLMOutputWithPast(ModelOutput):
42
+ """
43
+ Base class for LlavaNext causal language model (or autoregressive) outputs.
44
+
45
+ Args:
46
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
47
+ Language modeling loss (for next-token prediction).
48
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
49
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
50
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
51
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
52
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`)
53
+
54
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
55
+ `past_key_values` input) to speed up sequential decoding.
56
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
57
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
58
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
59
+
60
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
61
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
62
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
63
+ sequence_length)`.
64
+
65
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
66
+ heads.
67
+ """
68
+
69
+ loss: Optional[torch.FloatTensor] = None
70
+ logits: torch.FloatTensor = None
71
+ past_key_values: Optional[List[torch.FloatTensor]] = None
72
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
73
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
74
+
75
+
76
+ ### Projector
77
+ # Currently, we copy the Qformer code directly to avoid depending on Blip2;
78
+ # it would be better to create the model from config, similar to the LLM,
79
+ # but to do this, we will need to register the QFormer model into an automodel,
80
+ # which will should involve pulling it out into its own dir so that it is accessible
81
+ # under transformers.models.X.
82
+
83
+
84
+ # Copied from transformers.models.blip_2.modeling_blip_2.Blip2QFormerMultiHeadAttention with Blip2->GraniteSpeech
85
+ class GraniteSpeechQFormerMultiHeadAttention(nn.Module):
86
+ def __init__(self, config, is_cross_attention=False):
87
+ super().__init__()
88
+ self.config = config
89
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
90
+ raise ValueError(
91
+ "The hidden size (%d) is not a multiple of the number of attention heads (%d)"
92
+ % (config.hidden_size, config.num_attention_heads)
93
+ )
94
+
95
+ self.num_attention_heads = config.num_attention_heads
96
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
97
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
98
+
99
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
100
+ if is_cross_attention:
101
+ self.key = nn.Linear(config.encoder_hidden_size, self.all_head_size)
102
+ self.value = nn.Linear(config.encoder_hidden_size, self.all_head_size)
103
+ else:
104
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
105
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
106
+
107
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
108
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
109
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
110
+ self.max_position_embeddings = config.max_position_embeddings
111
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
112
+ self.save_attention = False
113
+
114
+ def save_attn_gradients(self, attn_gradients):
115
+ self.attn_gradients = attn_gradients
116
+
117
+ def get_attn_gradients(self):
118
+ return self.attn_gradients
119
+
120
+ def save_attention_map(self, attention_map):
121
+ self.attention_map = attention_map
122
+
123
+ def get_attention_map(self):
124
+ return self.attention_map
125
+
126
+ def transpose_for_scores(self, x):
127
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
128
+ x = x.view(*new_x_shape)
129
+ return x.permute(0, 2, 1, 3)
130
+
131
+ def forward(
132
+ self,
133
+ hidden_states,
134
+ attention_mask=None,
135
+ head_mask=None,
136
+ encoder_hidden_states=None,
137
+ encoder_attention_mask=None,
138
+ past_key_value=None,
139
+ output_attentions=False,
140
+ ):
141
+ # If this is instantiated as a cross-attention module, the keys
142
+ # and values come from an encoder; the attention mask needs to be
143
+ # such that the encoder's padding tokens are not attended to.
144
+ is_cross_attention = encoder_hidden_states is not None
145
+
146
+ if is_cross_attention:
147
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
148
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
149
+ attention_mask = encoder_attention_mask
150
+ elif past_key_value is not None:
151
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
152
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
153
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
154
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
155
+ else:
156
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
157
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
158
+
159
+ mixed_query_layer = self.query(hidden_states)
160
+
161
+ query_layer = self.transpose_for_scores(mixed_query_layer)
162
+
163
+ past_key_value = (key_layer, value_layer)
164
+
165
+ # Take the dot product between "query" and "key" to get the raw attention scores.
166
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
167
+
168
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
169
+ seq_length = hidden_states.size()[1]
170
+ position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
171
+ position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
172
+ distance = position_ids_l - position_ids_r
173
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
174
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
175
+
176
+ if self.position_embedding_type == "relative_key":
177
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
178
+ attention_scores = attention_scores + relative_position_scores
179
+ elif self.position_embedding_type == "relative_key_query":
180
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
181
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
182
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
183
+
184
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
185
+
186
+ if attention_mask is not None:
187
+ # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
188
+ attention_scores = attention_scores + attention_mask
189
+
190
+ # Normalize the attention scores to probabilities.
191
+ attention_probs = nn.Softmax(dim=-1)(attention_scores)
192
+
193
+ if is_cross_attention and self.save_attention:
194
+ self.save_attention_map(attention_probs)
195
+ attention_probs.register_hook(self.save_attn_gradients)
196
+
197
+ # This is actually dropping out entire tokens to attend to, which might
198
+ # seem a bit unusual, but is taken from the original Transformer paper.
199
+ attention_probs_dropped = self.dropout(attention_probs)
200
+
201
+ # Mask heads if we want to
202
+ if head_mask is not None:
203
+ attention_probs_dropped = attention_probs_dropped * head_mask
204
+
205
+ context_layer = torch.matmul(attention_probs_dropped, value_layer)
206
+
207
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
208
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
209
+ context_layer = context_layer.view(*new_context_layer_shape)
210
+
211
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
212
+
213
+ outputs = outputs + (past_key_value,)
214
+ return outputs
215
+
216
+
217
+ # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->GraniteSpeechQFormer
218
+ class GraniteSpeechQFormerSelfOutput(nn.Module):
219
+ def __init__(self, config):
220
+ super().__init__()
221
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
222
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
223
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
224
+
225
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
226
+ hidden_states = self.dense(hidden_states)
227
+ hidden_states = self.dropout(hidden_states)
228
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
229
+ return hidden_states
230
+
231
+
232
+ # Copied from transformers.models.blip_2.modeling_blip_2.Blip2QFormerAttention with Blip2->GraniteSpeech
233
+ class GraniteSpeechQFormerAttention(nn.Module):
234
+ def __init__(self, config, is_cross_attention=False):
235
+ super().__init__()
236
+ self.attention = GraniteSpeechQFormerMultiHeadAttention(config, is_cross_attention)
237
+ self.output = GraniteSpeechQFormerSelfOutput(config)
238
+ self.pruned_heads = set()
239
+
240
+ def prune_heads(self, heads):
241
+ if len(heads) == 0:
242
+ return
243
+ heads, index = find_pruneable_heads_and_indices(
244
+ heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
245
+ )
246
+
247
+ # Prune linear layers
248
+ self.attention.query = prune_linear_layer(self.attention.query, index)
249
+ self.attention.key = prune_linear_layer(self.attention.key, index)
250
+ self.attention.value = prune_linear_layer(self.attention.value, index)
251
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
252
+
253
+ # Update hyper params and store pruned heads
254
+ self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
255
+ self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
256
+ self.pruned_heads = self.pruned_heads.union(heads)
257
+
258
+ def forward(
259
+ self,
260
+ hidden_states: torch.Tensor,
261
+ attention_mask: Optional[torch.FloatTensor] = None,
262
+ head_mask: Optional[torch.FloatTensor] = None,
263
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
264
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
265
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
266
+ output_attentions: Optional[bool] = False,
267
+ ) -> Tuple[torch.Tensor]:
268
+ self_outputs = self.attention(
269
+ hidden_states,
270
+ attention_mask,
271
+ head_mask,
272
+ encoder_hidden_states,
273
+ encoder_attention_mask,
274
+ past_key_value,
275
+ output_attentions,
276
+ )
277
+ attention_output = self.output(self_outputs[0], hidden_states)
278
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
279
+ return outputs
280
+
281
+
282
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->GraniteSpeechQFormer
283
+ class GraniteSpeechQFormerIntermediate(nn.Module):
284
+ def __init__(self, config):
285
+ super().__init__()
286
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
287
+ if isinstance(config.hidden_act, str):
288
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
289
+ else:
290
+ self.intermediate_act_fn = config.hidden_act
291
+
292
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
293
+ hidden_states = self.dense(hidden_states)
294
+ hidden_states = self.intermediate_act_fn(hidden_states)
295
+ return hidden_states
296
+
297
+
298
+ # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->GraniteSpeechQFormer
299
+ class GraniteSpeechQFormerOutput(nn.Module):
300
+ def __init__(self, config):
301
+ super().__init__()
302
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
303
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
304
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
305
+
306
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
307
+ hidden_states = self.dense(hidden_states)
308
+ hidden_states = self.dropout(hidden_states)
309
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
310
+ return hidden_states
311
+
312
+
313
+ # Copied from transformers.models.blip_2.modeling_blip_2.Blip2QFormerLayer with Blip2->GraniteSpeech
314
+ class GraniteSpeechQFormerLayer(nn.Module):
315
+ def __init__(self, config, layer_idx):
316
+ super().__init__()
317
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
318
+ self.seq_len_dim = 1
319
+ self.attention = GraniteSpeechQFormerAttention(config)
320
+
321
+ self.layer_idx = layer_idx
322
+
323
+ if layer_idx % config.cross_attention_frequency == 0:
324
+ self.crossattention = GraniteSpeechQFormerAttention(config, is_cross_attention=True)
325
+ self.has_cross_attention = True
326
+ else:
327
+ self.has_cross_attention = False
328
+
329
+ if config.use_qformer_text_input:
330
+ self.intermediate = GraniteSpeechQFormerIntermediate(config)
331
+ self.output = GraniteSpeechQFormerOutput(config)
332
+
333
+ self.intermediate_query = GraniteSpeechQFormerIntermediate(config)
334
+ self.output_query = GraniteSpeechQFormerOutput(config)
335
+
336
+ def forward(
337
+ self,
338
+ hidden_states,
339
+ attention_mask=None,
340
+ head_mask=None,
341
+ encoder_hidden_states=None,
342
+ encoder_attention_mask=None,
343
+ past_key_value=None,
344
+ output_attentions=False,
345
+ query_length=0,
346
+ ):
347
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
348
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
349
+ self_attention_outputs = self.attention(
350
+ hidden_states,
351
+ attention_mask,
352
+ head_mask,
353
+ output_attentions=output_attentions,
354
+ past_key_value=self_attn_past_key_value,
355
+ )
356
+ attention_output = self_attention_outputs[0]
357
+ outputs = self_attention_outputs[1:-1]
358
+
359
+ present_key_value = self_attention_outputs[-1]
360
+
361
+ if query_length > 0:
362
+ query_attention_output = attention_output[:, :query_length, :]
363
+
364
+ if self.has_cross_attention:
365
+ if encoder_hidden_states is None:
366
+ raise ValueError("encoder_hidden_states must be given for cross-attention layers")
367
+ cross_attention_outputs = self.crossattention(
368
+ query_attention_output,
369
+ attention_mask,
370
+ head_mask,
371
+ encoder_hidden_states,
372
+ encoder_attention_mask,
373
+ output_attentions=output_attentions,
374
+ )
375
+ query_attention_output = cross_attention_outputs[0]
376
+ # add cross attentions if we output attention weights
377
+ outputs = outputs + cross_attention_outputs[1:-1]
378
+
379
+ layer_output = apply_chunking_to_forward(
380
+ self.feed_forward_chunk_query,
381
+ self.chunk_size_feed_forward,
382
+ self.seq_len_dim,
383
+ query_attention_output,
384
+ )
385
+
386
+ if attention_output.shape[1] > query_length:
387
+ layer_output_text = apply_chunking_to_forward(
388
+ self.feed_forward_chunk,
389
+ self.chunk_size_feed_forward,
390
+ self.seq_len_dim,
391
+ attention_output[:, query_length:, :],
392
+ )
393
+ layer_output = torch.cat([layer_output, layer_output_text], dim=1)
394
+ else:
395
+ layer_output = apply_chunking_to_forward(
396
+ self.feed_forward_chunk,
397
+ self.chunk_size_feed_forward,
398
+ self.seq_len_dim,
399
+ attention_output,
400
+ )
401
+ outputs = (layer_output,) + outputs
402
+
403
+ outputs = outputs + (present_key_value,)
404
+
405
+ return outputs
406
+
407
+ def feed_forward_chunk(self, attention_output):
408
+ intermediate_output = self.intermediate(attention_output)
409
+ layer_output = self.output(intermediate_output, attention_output)
410
+ return layer_output
411
+
412
+ def feed_forward_chunk_query(self, attention_output):
413
+ intermediate_output = self.intermediate_query(attention_output)
414
+ layer_output = self.output_query(intermediate_output, attention_output)
415
+ return layer_output
416
+
417
+
418
+ # Copied from transformers.models.blip_2.modeling_blip_2.Blip2QFormerEncoder with Blip2->GraniteSpeech
419
+ class GraniteSpeechQFormerEncoder(nn.Module):
420
+ def __init__(self, config):
421
+ super().__init__()
422
+ self.config = config
423
+ self.layer = nn.ModuleList(
424
+ [GraniteSpeechQFormerLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
425
+ )
426
+ self.gradient_checkpointing = False
427
+
428
+ def forward(
429
+ self,
430
+ hidden_states,
431
+ attention_mask=None,
432
+ head_mask=None,
433
+ encoder_hidden_states=None,
434
+ encoder_attention_mask=None,
435
+ past_key_values=None,
436
+ use_cache=None,
437
+ output_attentions=False,
438
+ output_hidden_states=False,
439
+ return_dict=True,
440
+ query_length=0,
441
+ ):
442
+ all_hidden_states = () if output_hidden_states else None
443
+ all_self_attentions = () if output_attentions else None
444
+ all_cross_attentions = () if output_attentions else None
445
+
446
+ next_decoder_cache = () if use_cache else None
447
+
448
+ for i in range(self.config.num_hidden_layers):
449
+ layer_module = self.layer[i]
450
+ if output_hidden_states:
451
+ all_hidden_states = all_hidden_states + (hidden_states,)
452
+
453
+ layer_head_mask = head_mask[i] if head_mask is not None else None
454
+ past_key_value = past_key_values[i] if past_key_values is not None else None
455
+
456
+ if getattr(self.config, "gradient_checkpointing", False) and self.training:
457
+ if use_cache:
458
+ logger.warning(
459
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
460
+ )
461
+ use_cache = False
462
+ layer_outputs = self._gradient_checkpointing_func(
463
+ layer_module.__call__,
464
+ hidden_states,
465
+ attention_mask,
466
+ layer_head_mask,
467
+ encoder_hidden_states,
468
+ encoder_attention_mask,
469
+ )
470
+ else:
471
+ layer_outputs = layer_module(
472
+ hidden_states,
473
+ attention_mask,
474
+ layer_head_mask,
475
+ encoder_hidden_states,
476
+ encoder_attention_mask,
477
+ past_key_value,
478
+ output_attentions,
479
+ query_length,
480
+ )
481
+
482
+ hidden_states = layer_outputs[0]
483
+ if use_cache:
484
+ next_decoder_cache += (layer_outputs[-1],)
485
+ if output_attentions:
486
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
487
+ if layer_module.has_cross_attention:
488
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
489
+
490
+ if output_hidden_states:
491
+ all_hidden_states = all_hidden_states + (hidden_states,)
492
+
493
+ if not return_dict:
494
+ return tuple(
495
+ v
496
+ for v in [
497
+ hidden_states,
498
+ next_decoder_cache,
499
+ all_hidden_states,
500
+ all_self_attentions,
501
+ all_cross_attentions,
502
+ ]
503
+ if v is not None
504
+ )
505
+ return BaseModelOutputWithPastAndCrossAttentions(
506
+ last_hidden_state=hidden_states,
507
+ past_key_values=next_decoder_cache,
508
+ hidden_states=all_hidden_states,
509
+ attentions=all_self_attentions,
510
+ cross_attentions=all_cross_attentions,
511
+ )
512
+
513
+
514
+ class GraniteSpeechEncoderProjectorPreTrainedModel(PreTrainedModel):
515
+ """
516
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
517
+ models.
518
+ """
519
+
520
+ config_class = GraniteSpeechProjectorConfig
521
+ base_model_prefix = "qformer"
522
+ supports_gradient_checkpointing = True
523
+
524
+ _no_split_modules = [
525
+ "GraniteSpeechQFormerMultiHeadAttention",
526
+ "T5Block",
527
+ "OPTDecoderLayer",
528
+ ]
529
+ _skip_keys_device_placement = "past_key_values"
530
+ _keep_in_fp32_modules = ["query_tokens"]
531
+
532
+ def _init_weights(self, module):
533
+ """Initialize the weights"""
534
+ factor = self.config.initializer_range
535
+ if isinstance(module, nn.Conv2d) or isinstance(module, nn.Embedding) or isinstance(module, nn.Linear):
536
+ module.weight.data.normal_(mean=0.0, std=factor)
537
+ if hasattr(module, "bias") and module.bias is not None:
538
+ module.bias.data.zero_()
539
+
540
+ elif isinstance(module, nn.LayerNorm):
541
+ module.bias.data.zero_()
542
+ module.weight.data.fill_(1.0)
543
+ elif isinstance(module, nn.Linear) and module.bias is not None:
544
+ module.bias.data.zero_()
545
+
546
+
547
+ class GraniteSpeechQFormerModel(GraniteSpeechEncoderProjectorPreTrainedModel):
548
+ """
549
+ Querying Transformer (Q-Former), used in GraniteSpeech.
550
+ """
551
+
552
+ def __init__(self, config: GraniteSpeechProjectorConfig):
553
+ super().__init__(config)
554
+ self.config = config
555
+
556
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
557
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
558
+
559
+ self.encoder = GraniteSpeechQFormerEncoder(config)
560
+
561
+ self.post_init()
562
+
563
+ # Copied from transformers.models.blip_2.modeling_blip_2.Blip2QFormerModel.get_input_embeddings
564
+ def get_input_embeddings(self):
565
+ return self.embeddings.word_embeddings
566
+
567
+ # Copied from transformers.models.blip_2.modeling_blip_2.Blip2QFormerModel.set_input_embeddings
568
+ def set_input_embeddings(self, value):
569
+ self.embeddings.word_embeddings = value
570
+
571
+ # Copied from transformers.models.blip_2.modeling_blip_2.Blip2QFormerModel._prune_heads
572
+ def _prune_heads(self, heads_to_prune):
573
+ """
574
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
575
+ class PreTrainedModel
576
+ """
577
+ for layer, heads in heads_to_prune.items():
578
+ self.encoder.layer[layer].attention.prune_heads(heads)
579
+
580
+ # Copied from transformers.models.blip_2.modeling_blip_2.Blip2QFormerModel.get_extended_attention_mask
581
+ def get_extended_attention_mask(
582
+ self,
583
+ attention_mask: torch.Tensor,
584
+ input_shape: Tuple[int],
585
+ device: torch.device,
586
+ has_query: bool = False,
587
+ ) -> torch.Tensor:
588
+ """
589
+ Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
590
+
591
+ Arguments:
592
+ attention_mask (`torch.Tensor`):
593
+ Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
594
+ input_shape (`Tuple[int]`):
595
+ The shape of the input to the model.
596
+ device (`torch.device`):
597
+ The device of the input to the model.
598
+
599
+ Returns:
600
+ `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`.
601
+ """
602
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
603
+ # ourselves in which case we just need to make it broadcastable to all heads.
604
+ if attention_mask.dim() == 3:
605
+ extended_attention_mask = attention_mask[:, None, :, :]
606
+ elif attention_mask.dim() == 2:
607
+ # Provided a padding mask of dimensions [batch_size, seq_length]
608
+ # - the model is an encoder, so make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
609
+ extended_attention_mask = attention_mask[:, None, None, :]
610
+ else:
611
+ raise ValueError(
612
+ "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
613
+ input_shape, attention_mask.shape
614
+ )
615
+ )
616
+
617
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
618
+ # masked positions, this operation will create a tensor which is 0.0 for
619
+ # positions we want to attend and -10000.0 for masked positions.
620
+ # Since we are adding it to the raw scores before the softmax, this is
621
+ # effectively the same as removing these entirely.
622
+ extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
623
+ extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
624
+ return extended_attention_mask
625
+
626
+ # Copied from transformers.models.blip_2.modeling_blip_2.Blip2QFormerModel.forward
627
+ def forward(
628
+ self,
629
+ query_embeds: torch.FloatTensor,
630
+ query_length: Optional[int] = None,
631
+ attention_mask: Optional[torch.FloatTensor] = None,
632
+ head_mask: Optional[torch.FloatTensor] = None,
633
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
634
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
635
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
636
+ use_cache: Optional[bool] = None,
637
+ output_attentions: Optional[bool] = None,
638
+ output_hidden_states: Optional[bool] = None,
639
+ return_dict: Optional[bool] = None,
640
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
641
+ r"""
642
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, `optional`):
643
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
644
+ the model is configured as a decoder.
645
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, `optional`):
646
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
647
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
648
+ - 1 for tokens that are **not masked**,
649
+ - 0 for tokens that are **masked**.
650
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of:
651
+ shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and
652
+ value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are
653
+ used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key
654
+ value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape
655
+ `(batch_size, sequence_length)`.
656
+ use_cache (`bool`, `optional`):
657
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
658
+ `past_key_values`).
659
+ """
660
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
661
+ output_hidden_states = (
662
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
663
+ )
664
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
665
+
666
+ # past_key_values_length
667
+ past_key_values_length = (
668
+ past_key_values[0][0].shape[2] - self.config.query_length if past_key_values is not None else 0
669
+ )
670
+
671
+ query_length = (
672
+ query_length if query_length is not None else query_embeds.shape[1] if query_embeds is not None else 0
673
+ )
674
+
675
+ embedding_output = self.layernorm(query_embeds)
676
+ embedding_output = self.dropout(embedding_output)
677
+
678
+ input_shape = embedding_output.size()[:-1]
679
+ batch_size, seq_length = input_shape
680
+ device = embedding_output.device
681
+
682
+ if attention_mask is None:
683
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
684
+
685
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
686
+ # ourselves in which case we just need to make it broadcastable to all heads.
687
+ extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, device)
688
+
689
+ # If a 2D or 3D attention mask is provided for the cross-attention
690
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
691
+ if encoder_hidden_states is not None:
692
+ if isinstance(encoder_hidden_states, list):
693
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size()
694
+ else:
695
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
696
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
697
+
698
+ if isinstance(encoder_attention_mask, list):
699
+ encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask]
700
+ elif encoder_attention_mask is None:
701
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
702
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
703
+ else:
704
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
705
+ else:
706
+ encoder_extended_attention_mask = None
707
+
708
+ # Prepare head mask if needed
709
+ # 1.0 in head_mask indicate we keep the head
710
+ # attention_probs has shape bsz x n_heads x N x N
711
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
712
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
713
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
714
+
715
+ encoder_outputs = self.encoder(
716
+ embedding_output,
717
+ attention_mask=extended_attention_mask,
718
+ head_mask=head_mask,
719
+ encoder_hidden_states=encoder_hidden_states,
720
+ encoder_attention_mask=encoder_extended_attention_mask,
721
+ past_key_values=past_key_values,
722
+ use_cache=use_cache,
723
+ output_attentions=output_attentions,
724
+ output_hidden_states=output_hidden_states,
725
+ return_dict=return_dict,
726
+ query_length=query_length,
727
+ )
728
+ sequence_output = encoder_outputs[0]
729
+ pooled_output = sequence_output[:, 0, :]
730
+
731
+ if not return_dict:
732
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
733
+
734
+ return BaseModelOutputWithPoolingAndCrossAttentions(
735
+ last_hidden_state=sequence_output,
736
+ pooler_output=pooled_output,
737
+ past_key_values=encoder_outputs.past_key_values,
738
+ hidden_states=encoder_outputs.hidden_states,
739
+ attentions=encoder_outputs.attentions,
740
+ cross_attentions=encoder_outputs.cross_attentions,
741
+ )
742
+
743
+
744
+ # TODO (alex) - refactor GraniteSpeechQformer to be available under
745
+ # transformers.models.X, delete all of the code above, and
746
+ # create the model through AutoModel.
747
+
748
+
749
+ class GraniteSpeechEncoderProjectorQFormer(nn.Module):
750
+ def __init__(self, config: GraniteSpeechProjectorConfig):
751
+ super().__init__()
752
+ self.hidden_size = config.hidden_size
753
+ self.ds_rate = config.downsample_rate
754
+ self.window_size = config.window_size
755
+ self.num_queries = self.window_size // self.ds_rate
756
+ self.query = nn.Parameter(torch.zeros(1, self.num_queries, config.hidden_size))
757
+ self.query.data.normal_(mean=0.0, std=1.0)
758
+ # NOTE: It would be better to create this from config, similar to the LLM.
759
+ # To do this, we need to register the QFormer model into an automodel, which
760
+ # will require pulling it out into its own dir so that it's accessible under
761
+ # transformers.models.X
762
+ self.qformer = GraniteSpeechQFormerModel(config)
763
+ self.linear = nn.Linear(config.hidden_size, config.llm_dim)
764
+
765
+ def forward(self, x, atts):
766
+ batch_size, seq_len, dim = x.size()
767
+ nblocks = math.ceil(seq_len / self.window_size)
768
+ pad = nblocks * self.window_size - seq_len
769
+ x = nn.functional.pad(x, (0, 0, 0, pad), "constant", 0)
770
+ x = x.view(batch_size * nblocks, self.window_size, dim)
771
+
772
+ query_output = self.qformer(
773
+ query_embeds=self.query.data,
774
+ encoder_hidden_states=x,
775
+ encoder_attention_mask=atts,
776
+ return_dict=True,
777
+ )
778
+ query_proj = self.linear(
779
+ query_output.last_hidden_state.view(batch_size, nblocks * self.window_size // self.ds_rate, -1)
780
+ )
781
+ return query_proj
782
+
783
+
784
+ ### Encoder
785
+ class GraniteSpeechCTCModel(nn.Module):
786
+ def __init__(self, config: GraniteSpeechEncoderConfig):
787
+ super(GraniteSpeechCTCModel, self).__init__()
788
+
789
+ self.rnn_tr = nn.ModuleList(
790
+ [nn.Linear(config.input_dim, config.hidden_dim, bias=True)]
791
+ + [
792
+ GraniteSpeechConformerBlock(
793
+ dim=config.hidden_dim,
794
+ dim_head=config.dim_head,
795
+ heads=config.num_heads,
796
+ ff_mult=config.feedforward_mult,
797
+ conv_expansion_factor=config.conv_expansion_factor,
798
+ conv_kernel_size=config.conv_kernel_size,
799
+ context_size=config.context_size, # attention context size
800
+ attn_dropout=config.dropout,
801
+ ff_dropout=config.dropout,
802
+ conv_dropout=config.dropout,
803
+ )
804
+ for layer_idx in range(config.num_layers)
805
+ ]
806
+ )
807
+
808
+ self.out = nn.Linear(config.hidden_dim, config.output_dim, bias=True)
809
+ self.out_mid = nn.Linear(config.output_dim, config.hidden_dim, bias=True)
810
+ self.context_size = config.context_size
811
+ self.input_dim = config.input_dim
812
+ self.num_layers = config.num_layers
813
+ self.hidden_dim = config.hidden_dim
814
+ self.output_dim = config.output_dim
815
+
816
+ def forward(self, x: torch.Tensor):
817
+ x = self.rnn_tr[0](x)
818
+ for idx, layer in enumerate(self.rnn_tr[1:], start=1):
819
+ x = layer(x, self.context_size)
820
+ if idx == self.num_layers // 2:
821
+ x_mid = x.clone()
822
+ x_mid = self.out(x_mid)
823
+ x += self.out_mid(nn.Softmax(dim=-1)(x_mid))
824
+ return x
825
+
826
+
827
+ # NOTE: Conformer adapated from: https://github.com/lucidrains/conformer.git
828
+ class GraniteSpeechConformerPermute(nn.Module):
829
+ def __init__(self, dims):
830
+ super().__init__()
831
+ self.dims = dims
832
+
833
+ def forward(self, x):
834
+ x = x.permute(self.dims)
835
+ return x
836
+
837
+
838
+ class GraniteSpeechConformerDepthWiseConv1d(nn.Module):
839
+ def __init__(self, chan_in, chan_out, kernel_size, padding):
840
+ super().__init__()
841
+ self.padding = padding
842
+ self.conv = nn.Conv1d(chan_in, chan_out, kernel_size, groups=chan_in, bias=False)
843
+
844
+ def forward(self, x):
845
+ x = F.pad(x, self.padding)
846
+ return self.conv(x)
847
+
848
+
849
+ class GraniteSpeechConformerScale(nn.Module):
850
+ def __init__(self, scale, fn):
851
+ super().__init__()
852
+ self.fn = fn
853
+ self.scale = scale
854
+
855
+ def forward(self, x, **kwargs):
856
+ return self.fn(x, **kwargs) * self.scale
857
+
858
+
859
+ class GraniteSpeechConformerPreNorm(nn.Module):
860
+ def __init__(self, dim, fn):
861
+ super().__init__()
862
+ self.fn = fn
863
+ self.norm = nn.LayerNorm(dim)
864
+
865
+ def forward(self, x, **kwargs):
866
+ x = self.norm(x)
867
+ return self.fn(x, **kwargs)
868
+
869
+
870
+ class GraniteSpeechConformerPreNormAttn(nn.Module):
871
+ def __init__(self, dim, fn):
872
+ super().__init__()
873
+ self.fn = fn
874
+ self.norm = nn.LayerNorm(dim)
875
+
876
+ def forward(self, x, context_size, **kwargs):
877
+ x = self.norm(x)
878
+ return self.fn(x, context_size, **kwargs)
879
+
880
+
881
+ class GraniteSpeechConformerAttention(nn.Module):
882
+ def __init__(
883
+ self,
884
+ dim,
885
+ heads=8,
886
+ dim_head=64,
887
+ dropout=0.0,
888
+ context_size=200,
889
+ max_pos_emb=512,
890
+ ):
891
+ super().__init__()
892
+ inner_dim = dim_head * heads
893
+ self.heads = heads
894
+ self.dim_head = dim_head
895
+ self.scale = dim_head**-0.5
896
+ self.to_q = nn.Linear(dim, inner_dim, bias=False)
897
+ self.to_kv = nn.Linear(dim, inner_dim * 2, bias=False)
898
+ self.to_out = nn.Linear(inner_dim, dim)
899
+
900
+ self.max_pos_emb = max_pos_emb
901
+ self.rel_pos_emb = nn.Embedding(2 * max_pos_emb + 1, dim_head)
902
+
903
+ self.dropout = nn.Dropout(dropout)
904
+
905
+ def forward(self, x, context_size):
906
+ device, h, max_pos_emb = x.device, self.heads, self.max_pos_emb
907
+ bs, n, d = x.shape
908
+ assert context_size > 0 and context_size <= max_pos_emb
909
+
910
+ nb = math.ceil(n / context_size)
911
+ nr = n % context_size
912
+ if nr > 0:
913
+ # right padding to reach block size
914
+ x = torch.nn.functional.pad(x, (0, 0, 0, context_size - nr))
915
+
916
+ q, k, v = (self.to_q(x), *self.to_kv(x).chunk(2, dim=-1))
917
+ q, k, v = [t.reshape(bs, nb, context_size, h, -1).transpose(2, 3) for t in (q, k, v)]
918
+
919
+ dots = einsum("b m h i d, b m h j d -> b m h i j", q, k) * self.scale
920
+
921
+ # shaw's relative positional embedding
922
+ seq = torch.arange(context_size, device=device)
923
+ dist = seq.view(-1, 1) - seq.view(1, -1)
924
+ dist = torch.clamp(dist, -context_size, context_size) + max_pos_emb
925
+ rel_pos_emb = self.rel_pos_emb(dist).to(q)
926
+ pos_attn = einsum("b m h c d, c r d -> b m h c r", q, rel_pos_emb) * self.scale
927
+ dots = dots + pos_attn
928
+
929
+ if nr > 0:
930
+ # masked attention in the extended block
931
+ mask = torch.ones(context_size, context_size, dtype=bool, device=device)
932
+ mask[:nr, :nr] = 0
933
+ mask_value = -torch.finfo(dots.dtype).max
934
+ dots[:, -1, :].masked_fill_(mask, mask_value)
935
+
936
+ attn = dots.softmax(dim=-1)
937
+
938
+ out = einsum("b m h i j, b m h j d -> b m h i d", attn, v)
939
+ out = out.transpose(2, 3).reshape(bs, x.shape[1], -1)
940
+ out = self.to_out(out[:, :n, :])
941
+ return self.dropout(out)
942
+
943
+
944
+ class GraniteSpeechConformerFeedForward(nn.Module):
945
+ def __init__(self, dim, mult=4, dropout=0.0):
946
+ super().__init__()
947
+ self.net = nn.Sequential(
948
+ nn.Linear(dim, dim * mult), nn.SiLU(), nn.Dropout(dropout), nn.Linear(dim * mult, dim), nn.Dropout(dropout)
949
+ )
950
+
951
+ def forward(self, x):
952
+ return self.net(x)
953
+
954
+
955
+ class GraniteSpeechConformerConvModule(nn.Module):
956
+ def __init__(self, dim, causal=False, expansion_factor=2, kernel_size=31, dropout=0.0):
957
+ super().__init__()
958
+
959
+ inner_dim = dim * expansion_factor
960
+ padding = self.calc_same_padding(kernel_size) if not causal else (kernel_size - 1, 0)
961
+
962
+ self.net = nn.Sequential(
963
+ nn.LayerNorm(dim),
964
+ GraniteSpeechConformerPermute(dims=(0, 2, 1)),
965
+ nn.Conv1d(dim, inner_dim * 2, 1),
966
+ nn.GLU(dim=1),
967
+ GraniteSpeechConformerDepthWiseConv1d(inner_dim, inner_dim, kernel_size=kernel_size, padding=padding),
968
+ nn.BatchNorm1d(inner_dim) if not causal else nn.Identity(),
969
+ nn.SiLU(),
970
+ nn.Conv1d(inner_dim, dim, 1),
971
+ GraniteSpeechConformerPermute(dims=(0, 2, 1)),
972
+ nn.Dropout(dropout),
973
+ )
974
+
975
+ def forward(self, x):
976
+ return self.net(x)
977
+
978
+ @staticmethod
979
+ def calc_same_padding(kernel_size: int):
980
+ pad = kernel_size // 2
981
+ return (pad, pad - (kernel_size + 1) % 2)
982
+
983
+
984
+ class GraniteSpeechConformerBlock(nn.Module):
985
+ def __init__(
986
+ self,
987
+ *,
988
+ dim,
989
+ dim_head=64,
990
+ heads=8,
991
+ ff_mult=2,
992
+ conv_expansion_factor=2,
993
+ conv_kernel_size=31,
994
+ context_size=-1,
995
+ attn_dropout=0.0,
996
+ ff_dropout=0.0,
997
+ conv_dropout=0.0,
998
+ ):
999
+ super().__init__()
1000
+ self.ff1 = GraniteSpeechConformerFeedForward(dim=dim, mult=ff_mult, dropout=ff_dropout)
1001
+ self.attn = GraniteSpeechConformerAttention(
1002
+ dim=dim,
1003
+ dim_head=dim_head,
1004
+ heads=heads,
1005
+ dropout=attn_dropout,
1006
+ context_size=context_size,
1007
+ )
1008
+ self.conv = GraniteSpeechConformerConvModule(
1009
+ dim=dim,
1010
+ causal=False,
1011
+ expansion_factor=conv_expansion_factor,
1012
+ kernel_size=conv_kernel_size,
1013
+ dropout=conv_dropout,
1014
+ )
1015
+ self.ff2 = GraniteSpeechConformerFeedForward(dim=dim, mult=ff_mult, dropout=ff_dropout)
1016
+
1017
+ self.attn = GraniteSpeechConformerPreNormAttn(dim, self.attn)
1018
+ self.ff1 = GraniteSpeechConformerScale(0.5, GraniteSpeechConformerPreNorm(dim, self.ff1))
1019
+ self.ff2 = GraniteSpeechConformerScale(0.5, GraniteSpeechConformerPreNorm(dim, self.ff2))
1020
+
1021
+ self.post_norm = nn.LayerNorm(dim)
1022
+
1023
+ def forward(self, x, context_size):
1024
+ x = self.ff1(x) + x
1025
+ x = self.attn(x, context_size) + x
1026
+ x = self.conv(x) + x
1027
+ x = self.ff2(x) + x
1028
+ x = self.post_norm(x)
1029
+ return x
1030
+
1031
+
1032
+ GRANITE_SPEECH_START_DOCSTRING = r"""
1033
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
1034
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
1035
+ etc.)
1036
+
1037
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
1038
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
1039
+ and behavior.
1040
+
1041
+ Parameters:
1042
+ config (`GraniteSpeechConfig`):
1043
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
1044
+ load the weights associated with the model, only the configuration. Check out the
1045
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
1046
+ """
1047
+
1048
+
1049
+ @add_start_docstrings(
1050
+ "The bare Granite Speech Model outputting raw hidden-states without any specific head on top.",
1051
+ GRANITE_SPEECH_START_DOCSTRING,
1052
+ )
1053
+ class GraniteSpeechPreTrainedModel(PreTrainedModel):
1054
+ config_class = GraniteSpeechConfig
1055
+ _supports_cache_class = True
1056
+ _supports_flash_attn_2 = True
1057
+ _supports_sdpa = True
1058
+
1059
+ def _init_weights(self, module):
1060
+ std = self.config.initializer_range
1061
+ if isinstance(module, (nn.Linear, nn.Conv1d)):
1062
+ module.weight.data.normal_(mean=0.0, std=std)
1063
+ if module.bias is not None:
1064
+ module.bias.data.zero_()
1065
+ elif isinstance(module, nn.Embedding):
1066
+ module.weight.data.normal_(mean=0.0, std=std)
1067
+ if module.padding_idx is not None:
1068
+ module.weight.data[module.padding_idx].zero_()
1069
+
1070
+
1071
+ GRANITE_SPEECH_INPUTS_DOCSTRING = r"""
1072
+ Args:
1073
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
1074
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
1075
+ it.
1076
+
1077
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1078
+ [`PreTrainedTokenizer.__call__`] for details.
1079
+
1080
+ [What are input IDs?](../glossary#input-ids)
1081
+ input_features (`torch.FloatTensor` of shape `(batch_size, audio seq len, mel feat dim)):
1082
+ The tensors corresponding to the input audios. input features can be obtained using
1083
+ [`AutoFeatureExtractor`]. See [`GraniteSpeechFeatureExtractor.__call__`] for details.
1084
+ [`GraniteSpeechProcessor`] uses [`GraniteSpeechFeatureExtractor`] for processing audio.
1085
+ input_mask (`torch.Tensor`, *optional*)
1086
+ Mask for extracted audio features that should should be ignored when creating the merged
1087
+ multimodal representation (i.e., due to padding).
1088
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1089
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1090
+
1091
+ - 1 for tokens that are **not masked**,
1092
+ - 0 for tokens that are **masked**.
1093
+
1094
+ [What are attention masks?](../glossary#attention-mask)
1095
+
1096
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1097
+ [`PreTrainedTokenizer.__call__`] for details.
1098
+
1099
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
1100
+ `past_key_values`).
1101
+
1102
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
1103
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
1104
+ information on the default strategy.
1105
+
1106
+ - 1 indicates the head is **not masked**,
1107
+ - 0 indicates the head is **masked**.
1108
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1109
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
1110
+ config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
1111
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
1112
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
1113
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
1114
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
1115
+
1116
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
1117
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
1118
+
1119
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1120
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1121
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1122
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1123
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1124
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
1125
+ model's internal embedding lookup matrix.
1126
+ use_cache (`bool`, *optional*):
1127
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1128
+ `past_key_values`).
1129
+ output_attentions (`bool`, *optional*):
1130
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1131
+ tensors for more detail.
1132
+ output_hidden_states (`bool`, *optional*):
1133
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1134
+ more detail.
1135
+ return_dict (`bool`, *optional*):
1136
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1137
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
1138
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
1139
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
1140
+ the complete sequence length.
1141
+ """
1142
+
1143
+
1144
+ @add_start_docstrings(
1145
+ """The Granite Speech model, which consists of an audio encoder, projector, and language model.""",
1146
+ GRANITE_SPEECH_START_DOCSTRING,
1147
+ )
1148
+ class GraniteSpeechForConditionalGeneration(GraniteSpeechPreTrainedModel, GenerationMixin):
1149
+ def __init__(self, config: GraniteSpeechConfig):
1150
+ super().__init__(config)
1151
+ # NOTE: It doesn't matter when we initialize from config, but we should be careful
1152
+ # to make sure this does not pick up the adapter_config if in the future we use
1153
+ # from_pretrained or something similar, since that should be set by the composite
1154
+ # model; don't need to consider it twice
1155
+ self.language_model = AutoModelForCausalLM.from_config(config.text_config)
1156
+
1157
+ if self.language_model._tied_weights_keys is not None:
1158
+ self._tied_weights_keys = [f"language_model.{k}" for k in self.language_model._tied_weights_keys]
1159
+
1160
+ self.encoder = GraniteSpeechCTCModel(config.encoder_config)
1161
+ self.projector = GraniteSpeechEncoderProjectorQFormer(config.projector_config)
1162
+
1163
+ if config.has_lora_adapter and not is_peft_available():
1164
+ logger.warning(
1165
+ "Config indicates that a lora adapter should be present, but "
1166
+ "peft is not installed; this will cause the model to perform "
1167
+ "incorrectly when audio inputs are provided. Please install "
1168
+ "peft and reload the model!"
1169
+ )
1170
+
1171
+ self.post_init()
1172
+
1173
+ def set_input_embeddings(self, value):
1174
+ self.language_model.set_input_embeddings(value)
1175
+
1176
+ def set_output_embeddings(self, new_embeddings):
1177
+ self.language_model.set_output_embeddings(new_embeddings)
1178
+
1179
+ def get_input_embeddings(self):
1180
+ return self.language_model.get_input_embeddings()
1181
+
1182
+ def get_output_embeddings(self):
1183
+ return self.language_model.get_output_embeddings()
1184
+
1185
+ def get_audio_features(self, input_features):
1186
+ encoder_embeds = self.encoder(input_features)
1187
+ projected_embeds = self.projector(encoder_embeds, None)
1188
+ return projected_embeds
1189
+
1190
+ @add_start_docstrings_to_model_forward(GRANITE_SPEECH_INPUTS_DOCSTRING)
1191
+ @replace_return_docstrings(output_type=GraniteSpeechCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1192
+ def forward(
1193
+ self,
1194
+ input_ids: torch.LongTensor = None,
1195
+ input_features: torch.FloatTensor = None,
1196
+ input_features_mask: Optional[torch.Tensor] = None,
1197
+ attention_mask: Optional[torch.Tensor] = None,
1198
+ position_ids: Optional[torch.LongTensor] = None,
1199
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1200
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1201
+ labels: Optional[torch.LongTensor] = None,
1202
+ use_cache: Optional[bool] = None,
1203
+ output_attentions: Optional[bool] = None,
1204
+ output_hidden_states: Optional[bool] = None,
1205
+ return_dict: Optional[bool] = None,
1206
+ cache_position: Optional[torch.LongTensor] = None,
1207
+ logits_to_keep: Union[int, torch.Tensor] = 0,
1208
+ **lm_kwargs,
1209
+ ) -> Union[Tuple[torch.Tensor], GraniteSpeechCausalLMOutputWithPast]:
1210
+ r"""
1211
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1212
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1213
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1214
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1215
+
1216
+ logits_to_keep (`int` or `torch.Tensor`, *optional*):
1217
+ If an `int`, compute logits for the last `logits_to_keep` tokens. If `0`, calculate logits for all
1218
+ `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
1219
+ token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
1220
+ If a `torch.Tensor`, must be 1D corresponding to the indices to keep in the sequence length dimension.
1221
+ This is useful when using packed tensor format (single dimension for batch and sequence length).
1222
+
1223
+ Returns:
1224
+
1225
+ Example:
1226
+
1227
+ TODO - add example for usage.
1228
+ """
1229
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1230
+ output_hidden_states = (
1231
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1232
+ )
1233
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1234
+
1235
+ if (input_ids is None) ^ (inputs_embeds is not None):
1236
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
1237
+
1238
+ if input_features is not None and inputs_embeds is not None:
1239
+ raise ValueError(
1240
+ "You cannot specify both input_features and inputs_embeds at the same time, and must specify either one"
1241
+ )
1242
+
1243
+ if inputs_embeds is None:
1244
+ # Get the base embeddings; set all audio tokens to 0 index
1245
+ # to avoid out of vocabulary issues with the LLM embedding.
1246
+ # Audio features will be masked into is_audio_idx indices later.
1247
+ is_audio_idx = input_ids == self.config.audio_token_index
1248
+ llm_input_ids = input_ids.clone()
1249
+ llm_input_ids[is_audio_idx] = 0
1250
+ inputs_embeds = self.get_input_embeddings()(llm_input_ids)
1251
+
1252
+ if input_features is not None:
1253
+ if input_features.dtype != self.dtype:
1254
+ logger.warning(f"input features are casted to {self.dtype}")
1255
+ input_features = input_features.to(self.dtype)
1256
+ # Get the audio features from the encoder / projector
1257
+ audio_features = self.get_audio_features(input_features)
1258
+
1259
+ # Merge the audio features into the LLM embeddings
1260
+ inputs_embeds = self.get_merged_audio_embeddings(
1261
+ input_ids=input_ids, audio_features=audio_features, input_features_mask=input_features_mask
1262
+ )
1263
+
1264
+ outputs = self.language_model(
1265
+ attention_mask=attention_mask,
1266
+ position_ids=position_ids,
1267
+ past_key_values=past_key_values,
1268
+ inputs_embeds=inputs_embeds,
1269
+ use_cache=use_cache,
1270
+ output_attentions=output_attentions,
1271
+ output_hidden_states=output_hidden_states,
1272
+ return_dict=return_dict,
1273
+ cache_position=cache_position,
1274
+ logits_to_keep=logits_to_keep,
1275
+ **lm_kwargs,
1276
+ )
1277
+ logits = outputs[0]
1278
+
1279
+ loss = None
1280
+ if labels is not None:
1281
+ # Shift so that tokens < n predict n
1282
+ if attention_mask is not None:
1283
+ # we use the input attention mask to shift the logits and labels, because it is 2D.
1284
+ # we also crop attn mask in case it is longer, which happens in PrefixTuning with peft
1285
+ shift_attention_mask = attention_mask[:, -(logits.shape[1] - 1) :].to(logits.device)
1286
+ shift_logits = logits[..., :-1, :][shift_attention_mask.to(logits.device) != 0].contiguous()
1287
+ shift_labels = labels[..., 1:][shift_attention_mask.to(labels.device) != 0].contiguous()
1288
+ else:
1289
+ shift_logits = logits[..., :-1, :].contiguous()
1290
+ shift_labels = labels[..., 1:].contiguous()
1291
+ # Flatten the tokens
1292
+ loss_fct = nn.CrossEntropyLoss()
1293
+ loss = loss_fct(
1294
+ shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1).to(shift_logits.device)
1295
+ )
1296
+
1297
+ if not return_dict:
1298
+ output = (logits,) + outputs[1:]
1299
+ return (loss,) + output if loss is not None else output
1300
+
1301
+ return GraniteSpeechCausalLMOutputWithPast(
1302
+ loss=loss,
1303
+ logits=logits,
1304
+ past_key_values=outputs.past_key_values,
1305
+ hidden_states=outputs.hidden_states,
1306
+ attentions=outputs.attentions,
1307
+ )
1308
+
1309
+ def prepare_inputs_for_generation(
1310
+ self,
1311
+ input_ids,
1312
+ past_key_values=None,
1313
+ inputs_embeds=None,
1314
+ input_features=None,
1315
+ attention_mask=None,
1316
+ cache_position=None,
1317
+ logits_to_keep=None,
1318
+ **kwargs,
1319
+ ):
1320
+ # Overwritten -- in specific circumstances we don't want to forward audio inputs to the model
1321
+
1322
+ model_inputs = self.language_model.prepare_inputs_for_generation(
1323
+ input_ids,
1324
+ past_key_values=past_key_values,
1325
+ inputs_embeds=inputs_embeds,
1326
+ attention_mask=attention_mask,
1327
+ cache_position=cache_position,
1328
+ logits_to_keep=logits_to_keep,
1329
+ **kwargs,
1330
+ )
1331
+
1332
+ # If we're in cached decoding stage, input_features should be None because
1333
+ # input ids do not contain special audio token anymore Otherwise we need
1334
+ # input feature values to be passed to the model
1335
+ if cache_position[0] == 0:
1336
+ model_inputs["input_features"] = input_features
1337
+ return model_inputs
1338
+
1339
+ def get_merged_audio_embeddings(self, input_ids, audio_features, input_features_mask):
1340
+ """
1341
+ Adds the audio token to the model's LLM vocabulary so that we can pass it
1342
+ through the tokenizer; it's assumed that the embeddings corresponding to the
1343
+ <|audio|> token will be clobbered with speech features.
1344
+
1345
+ TODO - This needs to be adapted to handle batches of variable length sequences
1346
+ and potentially labels.
1347
+ """
1348
+ is_audio_index = input_ids == self.config.audio_token_index
1349
+ llm_input_ids = torch.where(is_audio_index, 0, input_ids)
1350
+ inputs_embeds = self.language_model.get_input_embeddings()(llm_input_ids) # [bsz, # features, hidden size]
1351
+
1352
+ # Mask the audio features into the text embeddings
1353
+ special_audio_mask = is_audio_index.unsqueeze(-1)
1354
+ audio_features = audio_features.to(inputs_embeds.device, inputs_embeds.dtype)[input_features_mask]
1355
+ inputs_embeds = inputs_embeds.masked_scatter(
1356
+ special_audio_mask,
1357
+ audio_features,
1358
+ )
1359
+ return inputs_embeds
1360
+
1361
+ def generate(self, *args, **kwargs):
1362
+ """This model is expected to have a lora adapater, which is only
1363
+ enabled when considering audio inputs. As such, we override generate
1364
+ to conditionally enable / disable the lora adapter based on whether
1365
+ or not any input features were provided.
1366
+ """
1367
+ input_features = kwargs.pop("input_features", None)
1368
+ if is_peft_available and self._hf_peft_config_loaded:
1369
+ if input_features is not None:
1370
+ self.enable_adapters()
1371
+ else:
1372
+ self.disable_adapters()
1373
+ return super().generate(*args, input_features=input_features, **kwargs)
1374
+
1375
+ def save_pretrained(self, *args, **kwargs):
1376
+ # overwrite save_pretrained to first save the adapter if we have one
1377
+ # NOTE - this will use the base model path we are exporting in the lora
1378
+ # adapter, which may not necessarily be the best behavior, but for now
1379
+ # we keep this for portability, since using the local dir causes problems
1380
+ # if the model is loaded from outside of the current working dir.
1381
+ if is_peft_available and self._hf_peft_config_loaded:
1382
+ super().save_pretrained(*args, **kwargs)
1383
+ # Then save the base model afterwards
1384
+ self._hf_peft_config_loaded = False
1385
+ super().save_pretrained(*args, **kwargs)
1386
+
1387
+
1388
+ __all__ = [
1389
+ "GraniteSpeechForConditionalGeneration",
1390
+ "GraniteSpeechPreTrainedModel",
1391
+ "GraniteSpeechEncoderProjectorPreTrainedModel",
1392
+ "GraniteSpeechQFormerModel",
1393
+ ]
preprocessor_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {}
processing_granite_speech.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2025 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Processor class for Speech Granite.
17
+ """
18
+
19
+ from collections.abc import Sequence
20
+ from typing import List, Union
21
+
22
+ import numpy as np
23
+ import torch
24
+
25
+ from transformers.feature_extraction_utils import BatchFeature
26
+ from transformers.processing_utils import ProcessorMixin
27
+ from transformers.tokenization_utils import PreTokenizedInput, TextInput
28
+ from transformers.utils import logging
29
+
30
+
31
+ logger = logging.get_logger(__name__)
32
+
33
+ # 🚨🚨🚨 HACK 🚨🚨🚨
34
+ # This is needed to avoid custom registration issues for now,
35
+ # since we have a custom subclass for the feature extractor as well.
36
+ import transformers
37
+ from .feature_extraction_granite_speech import GraniteSpeechFeatureExtractor
38
+ transformers.GraniteSpeechFeatureExtractor = GraniteSpeechFeatureExtractor
39
+ # The above code is the only change in the modeling code from the following
40
+ # commit on Alex's fork: 397e03a4d76c5f3d8a651e47ade9f27c635e1617
41
+
42
+ class GraniteSpeechProcessor(ProcessorMixin):
43
+ attributes = ["feature_extractor", "tokenizer"]
44
+ valid_kwargs = ["audio_token"]
45
+
46
+ feature_extractor_class = "GraniteSpeechFeatureExtractor"
47
+ tokenizer_class = "AutoTokenizer"
48
+
49
+ def __init__(
50
+ self,
51
+ feature_extractor,
52
+ tokenizer,
53
+ audio_token="<|audio|>",
54
+ ):
55
+ self.audio_token = tokenizer.audio_token if hasattr(tokenizer, "audio_token") else audio_token
56
+ super().__init__(feature_extractor, tokenizer)
57
+
58
+ def __call__(
59
+ self,
60
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
61
+ audios: Union[torch.Tensor, List[torch.Tensor]] = None,
62
+ device: str = "cpu",
63
+ **kwargs,
64
+ ) -> BatchFeature:
65
+ speech_inputs = {}
66
+ text_inputs = {}
67
+
68
+ text = self._get_validated_text(text)
69
+ expected_num_audios = sum(t.count(self.audio_token) for t in text)
70
+
71
+ if audios is not None:
72
+ audios, audio_lengths = self._get_validated_audios(audios)
73
+ if any(text.count(self.audio_token) != 1 for text in text):
74
+ raise ValueError("Only one audio sample is currently supported per input")
75
+ if len(audio_lengths) != expected_num_audios:
76
+ raise ValueError("Text/Audio mismatch. The number of audios and audio tokens do not match")
77
+
78
+ # Calculate Mel features & the number of placeholders we will need
79
+ speech_inputs["input_features"] = self.feature_extractor(
80
+ audios,
81
+ device=device,
82
+ )
83
+ num_audio_features = self.feature_extractor._get_num_audio_features(audio_lengths)
84
+ speech_inputs["input_features_mask"] = torch.arange(max(num_audio_features)).view(1, -1) <= torch.tensor(
85
+ num_audio_features
86
+ ).view(-1, 1)
87
+
88
+ # duplicate the audio placeholders to match the feature dims
89
+ text = self._expand_audio_placeholders(text, num_audio_features)
90
+ else:
91
+ assert expected_num_audios == 0, "No audio is provided, expecting no audio tokens"
92
+
93
+ text_inputs = self.tokenizer(text, padding=True, **kwargs)
94
+ return BatchFeature(data={**text_inputs, **speech_inputs})
95
+
96
+ def _expand_audio_placeholders(self, text: list[str], num_audio_features: List[int]):
97
+ """
98
+ Expands audio placeholders in the formatted text to match the number of
99
+ features of the corresponding embeddings; we can use the resulting text
100
+ to conveniently mask the audio features into the text embeddings.
101
+ """
102
+ prompt_strings = []
103
+ num_replaced = 0
104
+ for sample in text:
105
+ while self.audio_token in sample:
106
+ sample = sample.replace(
107
+ self.audio_token,
108
+ "<placeholder>" * num_audio_features[num_replaced],
109
+ 1,
110
+ )
111
+ num_replaced += 1
112
+ prompt_strings.append(sample)
113
+
114
+ prompt_strings = [sample.replace("<placeholder>", self.audio_token) for sample in prompt_strings]
115
+ return prompt_strings
116
+
117
+ ##### Validation
118
+ def _get_validated_text(self, text: Union[str, list]) -> List[str]:
119
+ if isinstance(text, str):
120
+ return [text]
121
+ elif isinstance(text, list) and isinstance(text[0], str):
122
+ return text
123
+ raise TypeError("Invalid text provided! Text should be a string or list of strings.")
124
+
125
+ def _get_validated_audios(self, audios):
126
+ # Coerce to PyTorch tensors if we have numpy arrays, since
127
+ # currently we have a dependency on torch/torchaudio anyway
128
+ if isinstance(audios, np.ndarray):
129
+ audios = torch.from_numpy(audios)
130
+ elif isinstance(audios, Sequence) and isinstance(audios[0], np.ndarray):
131
+ audios = [torch.from_numpy(arr) for arr in audios]
132
+
133
+ if isinstance(audios, torch.Tensor):
134
+ if audios.ndim == 1:
135
+ audios = audios.unsqueeze(0)
136
+ if not torch.is_floating_point(audios):
137
+ raise ValueError("Invalid audio provided. Audio should be a floating point between 0 and 1")
138
+
139
+ if audios.shape[0] > 1:
140
+ logger.warning("Audio samples are already collated; assuming they all have the same length")
141
+ lengths = [audios.shape[-1]] * audios.shape[0]
142
+ return audios, lengths
143
+
144
+ elif isinstance(audios, Sequence) and isinstance(audios[0], torch.Tensor):
145
+ if not torch.is_floating_point(audios[0]):
146
+ raise ValueError("Invalid audio provided. Audio should be a floating point between 0 and 1")
147
+ lengths = [audio.shape[-1] for audio in audios]
148
+ padding = [max(lengths) - length for length in lengths]
149
+ # ensure all audios have a batch dimension:
150
+ audios = [audio.view(1, -1) for audio in audios]
151
+ padded = [torch.nn.functional.pad(audio, (0, pad)) for audio, pad in zip(audios, padding)]
152
+ audios = torch.cat(padded, dim=0)
153
+ return audios, lengths
154
+
155
+ raise TypeError("Invalid audio provided. Audio should be a one or more torch tensors or numpy arrays")
156
+
157
+
158
+ __all__ = ["GraniteSpeechProcessor"]
special_tokens_map.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|start_of_role|>",
4
+ "<|end_of_role|>",
5
+ "<|tool_call|>"
6
+ ],
7
+ "bos_token": {
8
+ "content": "<|end_of_text|>",
9
+ "lstrip": false,
10
+ "normalized": false,
11
+ "rstrip": false,
12
+ "single_word": false
13
+ },
14
+ "eos_token": {
15
+ "content": "<|end_of_text|>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "pad_token": {
22
+ "content": "<|end_of_text|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false
27
+ },
28
+ "unk_token": {
29
+ "content": "<|end_of_text|>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false
34
+ }
35
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<|end_of_text|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<fim_prefix>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "<fim_middle>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "3": {
30
+ "content": "<fim_suffix>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "4": {
38
+ "content": "<fim_pad>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "5": {
46
+ "content": "<filename>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "6": {
54
+ "content": "<gh_stars>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "7": {
62
+ "content": "<issue_start>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "8": {
70
+ "content": "<issue_comment>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "9": {
78
+ "content": "<issue_closed>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "10": {
86
+ "content": "<jupyter_start>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "11": {
94
+ "content": "<jupyter_text>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "12": {
102
+ "content": "<jupyter_code>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "13": {
110
+ "content": "<jupyter_output>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "14": {
118
+ "content": "<empty_output>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": true
124
+ },
125
+ "15": {
126
+ "content": "<commit_before>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": true
132
+ },
133
+ "16": {
134
+ "content": "<commit_msg>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": true
140
+ },
141
+ "17": {
142
+ "content": "<commit_after>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": true
148
+ },
149
+ "18": {
150
+ "content": "<reponame>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": true
156
+ },
157
+ "49152": {
158
+ "content": "<|start_of_role|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": true
164
+ },
165
+ "49153": {
166
+ "content": "<|end_of_role|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": true
172
+ },
173
+ "49154": {
174
+ "content": "<|tool_call|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": true
180
+ },
181
+ "49155": {
182
+ "content": "<|audio|>",
183
+ "lstrip": false,
184
+ "normalized": false,
185
+ "rstrip": false,
186
+ "single_word": false,
187
+ "special": true
188
+ }
189
+ },
190
+ "additional_special_tokens": [
191
+ "<|start_of_role|>",
192
+ "<|end_of_role|>",
193
+ "<|tool_call|>"
194
+ ],
195
+ "bos_token": "<|end_of_text|>",
196
+ "chat_template": "{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content'] %}\n {%- set loop_messages = messages[1:] %}\n{%- else %}\n {%- set system_message = \"Knowledge Cutoff Date: April 2024.\nToday's Date: \" + strftime_now('%B %d, %Y') + \".\nYou are Granite, developed by IBM.\" %}\n {%- if tools and documents %}\n {%- set system_message = system_message + \" You are a helpful AI assistant with access to the following tools. When a tool is required to answer the user's query, respond with <|tool_call|> followed by a JSON list of tools used. If a tool does not exist in the provided list of tools, notify the user that you do not have the ability to fulfill the request.\n\nWrite the response to the user's input by strictly aligning with the facts in the provided documents. If the information needed to answer the question is not available in the documents, inform the user that the question cannot be answered based on the available data.\" %}\n {%- elif tools %}\n {%- set system_message = system_message + \" You are a helpful AI assistant with access to the following tools. When a tool is required to answer the user's query, respond with <|tool_call|> followed by a JSON list of tools used. If a tool does not exist in the provided list of tools, notify the user that you do not have the ability to fulfill the request.\" %}\n {%- elif documents %}\n {%- set system_message = system_message + \" Write the response to the user's input by strictly aligning with the facts in the provided documents. If the information needed to answer the question is not available in the documents, inform the user that the question cannot be answered based on the available data.\" %}\n {%- elif thinking %}\n {%- set system_message = system_message + \" You are a helpful AI assistant.\nRespond to every user query in a comprehensive and detailed way. You can write down your thoughts and reasoning process before responding. In the thought process, engage in a comprehensive cycle of analysis, summarization, exploration, reassessment, reflection, backtracing, and iteration to develop well-considered thinking process. In the response section, based on various attempts, explorations, and reflections from the thoughts section, systematically present the final solution that you deem correct. The response should summarize the thought process. Write your thoughts after 'Here is my thought process:' and write your response after 'Here is my response:' for each user query.\" %}\n {%- else %}\n {%- set system_message = system_message + \" You are a helpful AI assistant.\" %} \n {%- endif %}\n {%- if 'citations' in controls and documents %}\n {%- set system_message = system_message + '\n\nIn your response, use the symbols <co> and </co> to indicate when a fact comes from a document in the search result, e.g <co>0</co> for a fact from document 0. Afterwards, list all the citations with their corresponding documents in an ordered list.' %}\n {%- endif %}\n {%- if 'hallucinations' in controls and documents %}\n {%- set system_message = system_message + '\n\nFinally, after the response is written, include a numbered list of sentences from the response that are potentially hallucinated and not based in the documents.' %}\n {%- endif %}\n {%- set loop_messages = messages %}\n{%- endif %}\n{{- '<|start_of_role|>system<|end_of_role|>' + system_message + '<|end_of_text|>\n' }}\n{%- if tools %}\n {{- '<|start_of_role|>tools<|end_of_role|>' }}\n {{- tools | tojson(indent=4) }}\n {{- '<|end_of_text|>\n' }}\n{%- endif %}\n{%- if documents %}\n {{- '<|start_of_role|>documents<|end_of_role|>' }}\n {%- for document in documents %}\n {{- 'Document ' + loop.index0 | string + '\n' }}\n {{- document['text'] }}\n {%- if not loop.last %}\n {{- '\n\n'}}\n {%- endif%}\n {%- endfor %}\n {{- '<|end_of_text|>\n' }}\n{%- endif %}\n{%- for message in loop_messages %}\n {{- '<|start_of_role|>' + message['role'] + '<|end_of_role|>' + message['content'] + '<|end_of_text|>\n' }}\n {%- if loop.last and add_generation_prompt %}\n {{- '<|start_of_role|>assistant' }}\n {%- if controls %}\n {{- ' ' + controls | tojson()}}\n {%- endif %}\n {{- '<|end_of_role|>' }}\n {%- endif %}\n{%- endfor %}",
197
+ "clean_up_tokenization_spaces": true,
198
+ "eos_token": "<|end_of_text|>",
199
+ "errors": "replace",
200
+ "extra_special_tokens": {},
201
+ "model_max_length": 9223372036854775807,
202
+ "pad_token": "<|end_of_text|>",
203
+ "padding_side": "left",
204
+ "tokenizer_class": "GPT2Tokenizer",
205
+ "unk_token": "<|end_of_text|>",
206
+ "vocab_size": 49152
207
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff