yangwang825 commited on
Commit
6796087
·
verified ·
1 Parent(s): bfdf73c

Upload EcapaTdnnForSequenceClassification

Browse files
Files changed (3) hide show
  1. config.json +6 -2
  2. model.safetensors +1 -1
  3. tdnn_attention.py +2 -2
config.json CHANGED
@@ -1,11 +1,14 @@
1
  {
2
- "_attn_implementation_autoset": true,
3
  "angular": true,
4
  "angular_margin": 0.2,
5
  "angular_scale": 30,
 
 
 
6
  "attention_channels": 128,
7
  "auto_map": {
8
- "AutoConfig": "configuration_ecapa_tdnn.EcapaTdnnConfig"
 
9
  },
10
  "bos_token_id": 1,
11
  "decoder_config": {
@@ -2577,6 +2580,7 @@
2577
  },
2578
  "time_masks": 5,
2579
  "time_width": 0.03,
 
2580
  "transformers_version": "4.48.3",
2581
  "use_torchaudio": true,
2582
  "use_vectorized_spec_augment": true,
 
1
  {
 
2
  "angular": true,
3
  "angular_margin": 0.2,
4
  "angular_scale": 30,
5
+ "architectures": [
6
+ "EcapaTdnnForSequenceClassification"
7
+ ],
8
  "attention_channels": 128,
9
  "auto_map": {
10
+ "AutoConfig": "configuration_ecapa_tdnn.EcapaTdnnConfig",
11
+ "AutoModelForAudioClassification": "modeling_ecapa_tdnn.EcapaTdnnForSequenceClassification"
12
  },
13
  "bos_token_id": 1,
14
  "decoder_config": {
 
2580
  },
2581
  "time_masks": 5,
2582
  "time_width": 0.03,
2583
+ "torch_dtype": "float32",
2584
  "transformers_version": "4.48.3",
2585
  "use_torchaudio": true,
2586
  "use_vectorized_spec_augment": true,
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5e437b7e91fe5f7a7a3012f7afb06cb155c7a01cc0662e249cffb4f7a6cd6b52
3
  size 26039912
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4da89b0b6d405974f1e332bdc9945fae76222d7ddf0f955653fba9a00cca0339
3
  size 26039912
tdnn_attention.py CHANGED
@@ -273,8 +273,8 @@ class TdnnSeModule(nn.Module):
273
 
274
  self.apply(lambda x: init_weights(x, mode=init_mode))
275
 
276
- def forward(self, input, length=None):
277
- x = self.group_tdnn_block(input)
278
  x = self.se_layer(x, length)
279
  return x + input
280
 
 
273
 
274
  self.apply(lambda x: init_weights(x, mode=init_mode))
275
 
276
+ def forward(self, inputs, length=None):
277
+ x = self.group_tdnn_block(inputs)
278
  x = self.se_layer(x, length)
279
  return x + input
280