Audio-to-Audio
Safetensors
Japanese
xcodec2
speech
Not-For-All-Audiences
OmniAICreator commited on
Commit
9c49e5f
·
verified ·
1 Parent(s): 5a306f0

Add files using upload-large-folder tool

Browse files
ckpt/final.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57cabb38fafa6f376df8bc6c159425a8a3212363eb3d16ea56975119e8a4b510
3
+ size 6450664823
config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "XCodec2Model"
4
+ ],
5
+ "codec_decoder_hidden_size": 1024,
6
+ "codec_encoder_hidden_size": 1024,
7
+ "model_type": "xcodec2",
8
+ "semantic_hidden_size": 1024,
9
+ "torch_dtype": "float32",
10
+ "transformers_version": "4.48.0",
11
+ "use_vocos": true
12
+ }
configuration_bigcodec.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig
2
+
3
+ class BigCodecConfig(PretrainedConfig):
4
+ model_type = "bigcodec"
5
+
6
+ def __init__(
7
+ self,
8
+ # 下面这些只是示例超参
9
+ semantic_hidden_size=1024,
10
+ codec_encoder_hidden_size=1024,
11
+ codec_decoder_hidden_size=1024,
12
+ use_vocos=True,
13
+ **kwargs
14
+ ):
15
+ super().__init__(**kwargs)
16
+ self.semantic_hidden_size = semantic_hidden_size
17
+ self.codec_encoder_hidden_size = codec_encoder_hidden_size
18
+ self.codec_decoder_hidden_size = codec_decoder_hidden_size
19
+ self.use_vocos = use_vocos
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7fb997858dbb0e866a10db7f4b31d4e4006a0cb12188311459b34d7b9094253
3
+ size 3291106408
modeling_xcodec2.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from transformers import PreTrainedModel
4
+ from configuration_bigcodec import BigCodecConfig
5
+
6
+ # 请确保这些模块路径是正确的
7
+ from vq.codec_encoder import CodecEncoder_Transformer
8
+ from vq.codec_decoder_vocos import CodecDecoderVocos
9
+ from vq.module import SemanticEncoder
10
+ from transformers import AutoFeatureExtractor, Wav2Vec2BertModel
11
+
12
+ class XCodec2Model(PreTrainedModel):
13
+ config_class = BigCodecConfig
14
+
15
+ def __init__(self, config: BigCodecConfig):
16
+ super().__init__(config)
17
+
18
+ # 1) 语义模型
19
+ self.semantic_model = Wav2Vec2BertModel.from_pretrained(
20
+ "facebook/w2v-bert-2.0",
21
+ output_hidden_states=True
22
+ )
23
+ self.semantic_model.eval()
24
+
25
+ self.SemanticEncoder_module = SemanticEncoder(
26
+ config.semantic_hidden_size,
27
+ config.semantic_hidden_size,
28
+ config.semantic_hidden_size
29
+ )
30
+
31
+ # 2) Codec Encoder
32
+ self.CodecEnc = CodecEncoder_Transformer()
33
+
34
+ # 3) Codec Decoder
35
+ self.generator = CodecDecoderVocos()
36
+
37
+ # 4) 两个全连接层
38
+ self.fc_prior = nn.Linear(2048, 2048)
39
+ self.fc_post_a = nn.Linear(2048, 1024)
40
+ feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/w2v-bert-2.0")
41
+ self.feature_extractor = feature_extractor
42
+
43
+ def forward(self, input_waveform, sample_rate=16000):
44
+ """
45
+ 这里的 forward 不一定要叫 forward,也可以拆成别的方法;
46
+ 但是如果想兼容 pipeline,需要在 forward 里给出核心逻辑。
47
+
48
+ 参数:
49
+ input_waveform: [batch_size, waveform_length]
50
+ sample_rate: 默认 16000
51
+ 返回:
52
+ 重构后的语音音频 (Tensor)
53
+ """
54
+ # 1) 特征提取
55
+ # 如果需要 padding,可以在这里做
56
+ input_features = self.feature_extractor(
57
+ input_waveform,
58
+ sampling_rate=sample_rate,
59
+ return_tensors="pt"
60
+ ).input_features.to(self.device) # [batch, frames, feat_dim]
61
+
62
+ # 2) 语义层
63
+ semantic_output = self.semantic_model(input_features)
64
+ semantic_hidden_16 = semantic_output.hidden_states[16] # 取第16层
65
+ semantic_hidden_16 = semantic_hidden_16.transpose(1, 2) # [batch, hidden_dim, frames]
66
+ semantic_encoded = self.SemanticEncoder_module(semantic_hidden_16)
67
+
68
+ # 3) codec encoder
69
+ wav = input_waveform.unsqueeze(1).to(self.device) # shape: [batch, 1, time]
70
+ vq_emb = self.CodecEnc(wav) # [batch, time//down, 1024] 只是示例
71
+ vq_emb = vq_emb.transpose(1, 2) # -> [batch, 1024, frames]
72
+
73
+ # 对齐语义向量的时间帧数,这里只做示例处理
74
+ # 真实做法里可能要先对齐维度
75
+ if vq_emb.shape[-1] != semantic_encoded.shape[-1]:
76
+ # 简单强行截断或补零都行,需要你自己决定
77
+ min_len = min(vq_emb.shape[-1], semantic_encoded.shape[-1])
78
+ vq_emb = vq_emb[:, :, :min_len]
79
+ semantic_encoded = semantic_encoded[:, :, :min_len]
80
+
81
+ # 4) 拼接
82
+ concat_emb = torch.cat([semantic_encoded, vq_emb], dim=1) # [batch, 1024 + 1024, frames]
83
+
84
+ # 5) fc_prior
85
+ concat_emb = self.fc_prior(concat_emb.transpose(1, 2)).transpose(1, 2)
86
+
87
+ # 6) decoder 的量化部分
88
+ _, vq_code, _ = self.generator(concat_emb, vq=True)
89
+ vq_post_emb = self.generator.quantizer.get_output_from_indices(vq_code.transpose(1, 2))
90
+ vq_post_emb = vq_post_emb.transpose(1, 2)
91
+
92
+ # 7) fc_post_a
93
+ vq_post_emb = self.fc_post_a(vq_post_emb.transpose(1, 2)).transpose(1, 2)
94
+
95
+ # 8) 最后解码成波形
96
+ recon_audio = self.generator(vq_post_emb.transpose(1, 2), vq=False)[0]
97
+ # recon_audio: [batch, time]
98
+ return recon_audio
99
+
100
+ def encode_code(self, input_waveform, sample_rate=16000):
101
+ """
102
+ 将输入的音频编码为代码表示。
103
+
104
+ 参数:
105
+ input_waveform: [batch_size, waveform_length]
106
+ sample_rate: 默认 16000
107
+ 返回:
108
+ 编码后的代码 (Tensor)
109
+ """
110
+ with torch.no_grad():
111
+ # 1) 特征提取
112
+ input_features = self.feature_extractor(
113
+ input_waveform,
114
+ sampling_rate=sample_rate,
115
+ return_tensors="pt"
116
+ ).input_features.to(self.device) # [batch, frames, feat_dim]
117
+
118
+ # 2) 语义层
119
+ semantic_output = self.semantic_model(input_features)
120
+ semantic_hidden_16 = semantic_output.hidden_states[16] # 取第16层
121
+ semantic_hidden_16 = semantic_hidden_16.transpose(1, 2) # [batch, hidden_dim, frames]
122
+ semantic_encoded = self.SemanticEncoder_module(semantic_hidden_16)
123
+
124
+ # 3) codec encoder
125
+ wav = input_waveform.unsqueeze(1).to(self.device) # shape: [batch, 1, time]
126
+ vq_emb = self.CodecEnc(wav) # [batch, time//down, 1024] 只是示例
127
+ vq_emb = vq_emb.transpose(1, 2) # -> [batch, 1024, frames]
128
+
129
+ # 对齐语义向量的时间帧数,这里只做示例处理
130
+ if vq_emb.shape[-1] != semantic_encoded.shape[-1]:
131
+ min_len = min(vq_emb.shape[-1], semantic_encoded.shape[-1])
132
+ vq_emb = vq_emb[:, :, :min_len]
133
+ semantic_encoded = semantic_encoded[:, :, :min_len]
134
+
135
+ # 4) 拼接
136
+ concat_emb = torch.cat([semantic_encoded, vq_emb], dim=1) # [batch, 2048, frames]
137
+
138
+ # 5) fc_prior
139
+ concat_emb = self.fc_prior(concat_emb.transpose(1, 2)).transpose(1, 2)
140
+
141
+ # 6) decoder 的量化部分,获取code
142
+ _, vq_code, _ = self.generator(concat_emb, vq=True)
143
+ # vq_code: [batch, frames]
144
+ return vq_code
145
+
146
+ def decode_code(self, vq_code):
147
+ """
148
+ 将编码后的代码解码回音频。
149
+
150
+ 参数:
151
+ vq_code: 编码后的代码 (Tensor) [batch, frames]
152
+ 返回:
153
+ 解码后的音频 (Tensor) [batch, waveform_length]
154
+ """
155
+ with torch.no_grad():
156
+ # 获取量化后的嵌入
157
+ vq_post_emb = self.generator.quantizer.get_output_from_indices(vq_code.transpose(1, 2))
158
+ vq_post_emb = vq_post_emb.transpose(1, 2) # [batch, 1024, frames]
159
+
160
+ # 7) fc_post_a
161
+ vq_post_emb = self.fc_post_a(vq_post_emb.transpose(1, 2)).transpose(1, 2) # [batch, 1024, frames]
162
+
163
+ # 8) 最后解码成波形
164
+ recon_audio = self.generator(vq_post_emb.transpose(1, 2), vq=False)[0] # [batch, time]
165
+ return recon_audio