Ali Sartaz Khan
commited on
Commit
·
186927e
1
Parent(s):
1658f1b
converted w2v2-large ckpt
Browse files- config.json +108 -0
- dict.ltr.txt +28 -0
- facebook/wav2vec2-large/config.json +109 -0
- facebook/wav2vec2-large/model.safetensors +3 -0
- facebook/wav2vec2-large/preprocessor_config.json +8 -0
- libri960_big.pt +3 -0
- run_convert.sh +16 -0
- run_forward.py +152 -0
config.json
ADDED
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"activation_dropout": 0.1,
|
3 |
+
"adapter_attn_dim": null,
|
4 |
+
"adapter_kernel_size": 3,
|
5 |
+
"adapter_stride": 2,
|
6 |
+
"add_adapter": false,
|
7 |
+
"apply_spec_augment": true,
|
8 |
+
"architectures": [
|
9 |
+
"Wav2Vec2ForPreTraining"
|
10 |
+
],
|
11 |
+
"attention_dropout": 0.1,
|
12 |
+
"bos_token_id": 1,
|
13 |
+
"classifier_proj_size": 256,
|
14 |
+
"codevector_dim": 768,
|
15 |
+
"contrastive_logits_temperature": 0.1,
|
16 |
+
"conv_bias": false,
|
17 |
+
"conv_dim": [
|
18 |
+
512,
|
19 |
+
512,
|
20 |
+
512,
|
21 |
+
512,
|
22 |
+
512,
|
23 |
+
512,
|
24 |
+
512
|
25 |
+
],
|
26 |
+
"conv_kernel": [
|
27 |
+
10,
|
28 |
+
3,
|
29 |
+
3,
|
30 |
+
3,
|
31 |
+
3,
|
32 |
+
2,
|
33 |
+
2
|
34 |
+
],
|
35 |
+
"conv_stride": [
|
36 |
+
5,
|
37 |
+
2,
|
38 |
+
2,
|
39 |
+
2,
|
40 |
+
2,
|
41 |
+
2,
|
42 |
+
2
|
43 |
+
],
|
44 |
+
"ctc_loss_reduction": "sum",
|
45 |
+
"ctc_zero_infinity": false,
|
46 |
+
"diversity_loss_weight": 0.1,
|
47 |
+
"do_stable_layer_norm": false,
|
48 |
+
"eos_token_id": 2,
|
49 |
+
"feat_extract_activation": "gelu",
|
50 |
+
"feat_extract_dropout": 0.0,
|
51 |
+
"feat_extract_norm": "group",
|
52 |
+
"feat_proj_dropout": 0.1,
|
53 |
+
"feat_quantizer_dropout": 0.0,
|
54 |
+
"final_dropout": 0.1,
|
55 |
+
"gradient_checkpointing": false,
|
56 |
+
"hidden_act": "gelu",
|
57 |
+
"hidden_dropout": 0.1,
|
58 |
+
"hidden_dropout_prob": 0.1,
|
59 |
+
"hidden_size": 1024,
|
60 |
+
"initializer_range": 0.02,
|
61 |
+
"intermediate_size": 4096,
|
62 |
+
"layer_norm_eps": 1e-05,
|
63 |
+
"layerdrop": 0.1,
|
64 |
+
"mask_feature_length": 10,
|
65 |
+
"mask_feature_min_masks": 0,
|
66 |
+
"mask_feature_prob": 0.0,
|
67 |
+
"mask_time_length": 10,
|
68 |
+
"mask_time_min_masks": 2,
|
69 |
+
"mask_time_prob": 0.05,
|
70 |
+
"model_type": "wav2vec2",
|
71 |
+
"num_adapter_layers": 3,
|
72 |
+
"num_attention_heads": 16,
|
73 |
+
"num_codevector_groups": 2,
|
74 |
+
"num_codevectors_per_group": 320,
|
75 |
+
"num_conv_pos_embedding_groups": 16,
|
76 |
+
"num_conv_pos_embeddings": 128,
|
77 |
+
"num_feat_extract_layers": 7,
|
78 |
+
"num_hidden_layers": 24,
|
79 |
+
"num_negatives": 100,
|
80 |
+
"output_hidden_size": 1024,
|
81 |
+
"pad_token_id": 0,
|
82 |
+
"proj_codevector_dim": 768,
|
83 |
+
"tdnn_dilation": [
|
84 |
+
1,
|
85 |
+
2,
|
86 |
+
3,
|
87 |
+
1,
|
88 |
+
1
|
89 |
+
],
|
90 |
+
"tdnn_dim": [
|
91 |
+
512,
|
92 |
+
512,
|
93 |
+
512,
|
94 |
+
512,
|
95 |
+
1500
|
96 |
+
],
|
97 |
+
"tdnn_kernel": [
|
98 |
+
5,
|
99 |
+
3,
|
100 |
+
3,
|
101 |
+
1,
|
102 |
+
1
|
103 |
+
],
|
104 |
+
"transformers_version": "4.49.0.dev0",
|
105 |
+
"use_weighted_layer_sum": false,
|
106 |
+
"vocab_size": 32,
|
107 |
+
"xvector_output_dim": 512
|
108 |
+
}
|
dict.ltr.txt
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
| 94802
|
2 |
+
E 51860
|
3 |
+
T 38431
|
4 |
+
A 33152
|
5 |
+
O 31495
|
6 |
+
N 28855
|
7 |
+
I 28794
|
8 |
+
H 27187
|
9 |
+
S 26071
|
10 |
+
R 23546
|
11 |
+
D 18289
|
12 |
+
L 16308
|
13 |
+
U 12400
|
14 |
+
M 10685
|
15 |
+
W 10317
|
16 |
+
C 9844
|
17 |
+
F 9062
|
18 |
+
G 8924
|
19 |
+
Y 8226
|
20 |
+
P 6890
|
21 |
+
B 6339
|
22 |
+
V 3936
|
23 |
+
K 3456
|
24 |
+
' 1023
|
25 |
+
X 636
|
26 |
+
J 598
|
27 |
+
Q 437
|
28 |
+
Z 213
|
facebook/wav2vec2-large/config.json
ADDED
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"activation_dropout": 0.1,
|
3 |
+
"adapter_attn_dim": null,
|
4 |
+
"adapter_kernel_size": 3,
|
5 |
+
"adapter_stride": 2,
|
6 |
+
"add_adapter": false,
|
7 |
+
"apply_spec_augment": true,
|
8 |
+
"architectures": [
|
9 |
+
"Wav2Vec2ForPreTraining"
|
10 |
+
],
|
11 |
+
"attention_dropout": 0.1,
|
12 |
+
"bos_token_id": 1,
|
13 |
+
"classifier_proj_size": 256,
|
14 |
+
"codevector_dim": 768,
|
15 |
+
"contrastive_logits_temperature": 0.1,
|
16 |
+
"conv_bias": false,
|
17 |
+
"conv_dim": [
|
18 |
+
512,
|
19 |
+
512,
|
20 |
+
512,
|
21 |
+
512,
|
22 |
+
512,
|
23 |
+
512,
|
24 |
+
512
|
25 |
+
],
|
26 |
+
"conv_kernel": [
|
27 |
+
10,
|
28 |
+
3,
|
29 |
+
3,
|
30 |
+
3,
|
31 |
+
3,
|
32 |
+
2,
|
33 |
+
2
|
34 |
+
],
|
35 |
+
"conv_stride": [
|
36 |
+
5,
|
37 |
+
2,
|
38 |
+
2,
|
39 |
+
2,
|
40 |
+
2,
|
41 |
+
2,
|
42 |
+
2
|
43 |
+
],
|
44 |
+
"ctc_loss_reduction": "sum",
|
45 |
+
"ctc_zero_infinity": false,
|
46 |
+
"diversity_loss_weight": 0.1,
|
47 |
+
"do_stable_layer_norm": false,
|
48 |
+
"eos_token_id": 2,
|
49 |
+
"feat_extract_activation": "gelu",
|
50 |
+
"feat_extract_dropout": 0.0,
|
51 |
+
"feat_extract_norm": "group",
|
52 |
+
"feat_proj_dropout": 0.1,
|
53 |
+
"feat_quantizer_dropout": 0.0,
|
54 |
+
"final_dropout": 0.1,
|
55 |
+
"gradient_checkpointing": false,
|
56 |
+
"hidden_act": "gelu",
|
57 |
+
"hidden_dropout": 0.1,
|
58 |
+
"hidden_dropout_prob": 0.1,
|
59 |
+
"hidden_size": 1024,
|
60 |
+
"initializer_range": 0.02,
|
61 |
+
"intermediate_size": 4096,
|
62 |
+
"layer_norm_eps": 1e-05,
|
63 |
+
"layerdrop": 0.1,
|
64 |
+
"mask_feature_length": 10,
|
65 |
+
"mask_feature_min_masks": 0,
|
66 |
+
"mask_feature_prob": 0.0,
|
67 |
+
"mask_time_length": 10,
|
68 |
+
"mask_time_min_masks": 2,
|
69 |
+
"mask_time_prob": 0.05,
|
70 |
+
"model_type": "wav2vec2",
|
71 |
+
"num_adapter_layers": 3,
|
72 |
+
"num_attention_heads": 16,
|
73 |
+
"num_codevector_groups": 2,
|
74 |
+
"num_codevectors_per_group": 320,
|
75 |
+
"num_conv_pos_embedding_groups": 16,
|
76 |
+
"num_conv_pos_embeddings": 128,
|
77 |
+
"num_feat_extract_layers": 7,
|
78 |
+
"num_hidden_layers": 24,
|
79 |
+
"num_negatives": 100,
|
80 |
+
"output_hidden_size": 1024,
|
81 |
+
"pad_token_id": 0,
|
82 |
+
"proj_codevector_dim": 768,
|
83 |
+
"tdnn_dilation": [
|
84 |
+
1,
|
85 |
+
2,
|
86 |
+
3,
|
87 |
+
1,
|
88 |
+
1
|
89 |
+
],
|
90 |
+
"tdnn_dim": [
|
91 |
+
512,
|
92 |
+
512,
|
93 |
+
512,
|
94 |
+
512,
|
95 |
+
1500
|
96 |
+
],
|
97 |
+
"tdnn_kernel": [
|
98 |
+
5,
|
99 |
+
3,
|
100 |
+
3,
|
101 |
+
1,
|
102 |
+
1
|
103 |
+
],
|
104 |
+
"torch_dtype": "float32",
|
105 |
+
"transformers_version": "4.49.0.dev0",
|
106 |
+
"use_weighted_layer_sum": false,
|
107 |
+
"vocab_size": 32,
|
108 |
+
"xvector_output_dim": 512
|
109 |
+
}
|
facebook/wav2vec2-large/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ca232d88f02bf12c5e87575148aa99e8b1d11c98f2c9251a89bfd4d3e5512040
|
3 |
+
size 1269574136
|
facebook/wav2vec2-large/preprocessor_config.json
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"do_normalize": true,
|
3 |
+
"feature_size": 1,
|
4 |
+
"padding_side": "right",
|
5 |
+
"padding_value": 0.0,
|
6 |
+
"return_attention_mask": false,
|
7 |
+
"sampling_rate": 16000
|
8 |
+
}
|
libri960_big.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c078e25708237c540e307b2687422792b17b8f0df8b63b8b07a4ddcbef66955c
|
3 |
+
size 3173903620
|
run_convert.sh
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env bash
|
2 |
+
hf_name=${1}
|
3 |
+
ckpt=${2}
|
4 |
+
dict=${3}
|
5 |
+
|
6 |
+
curPath=$(pwd)
|
7 |
+
|
8 |
+
cp ${dict} ${curPath}/dict.ltr.txt
|
9 |
+
|
10 |
+
# load a config that is equal to the config of the model you wish to convert
|
11 |
+
python -c "from transformers import Wav2Vec2Config; config = Wav2Vec2Config.from_pretrained('$hf_name'); config.save_pretrained('./');"
|
12 |
+
|
13 |
+
# pretrained only
|
14 |
+
eval "python /nlp/scr/askhan1/transformers/src/transformers/models/wav2vec2/convert_wav2vec2_original_pytorch_checkpoint_to_pytorch.py --pytorch_dump_folder ${hf_name} --checkpoint_path ${ckpt} --config_path ./config.json --not_finetuned"
|
15 |
+
# fine-tuned
|
16 |
+
#eval "python ../transformers/src/transformers/models/wav2vec2/convert_wav2vec2_original_pytorch_checkpoint_to_pytorch.py --pytorch_dump_folder ${hf_name} --checkpoint_path ${ckpt} --config_path ./config.json --dict_path ${curPath}/data/temp/dict.ltr.txt"
|
run_forward.py
ADDED
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
import datasets
|
3 |
+
import fairseq
|
4 |
+
import torch
|
5 |
+
import os
|
6 |
+
|
7 |
+
import soundfile as sf
|
8 |
+
from datasets import load_dataset
|
9 |
+
import sys
|
10 |
+
from shutil import copyfile
|
11 |
+
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor, Wav2Vec2Model, Wav2Vec2CTCTokenizer, Wav2Vec2FeatureExtractor
|
12 |
+
|
13 |
+
hf_path = str(sys.argv[1])
|
14 |
+
fairseq_wav2vec2_path = str(sys.argv[2])
|
15 |
+
finetuned = bool(int(sys.argv[3]))
|
16 |
+
|
17 |
+
|
18 |
+
if finetuned:
|
19 |
+
processor = Wav2Vec2Processor.from_pretrained(hf_path)
|
20 |
+
model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task(
|
21 |
+
[fairseq_wav2vec2_path], arg_overrides={"data": "../add_wav2vec/data/temp"}
|
22 |
+
)
|
23 |
+
hf_model = Wav2Vec2ForCTC.from_pretrained(hf_path)
|
24 |
+
else:
|
25 |
+
processor = Wav2Vec2FeatureExtractor.from_pretrained(hf_path)
|
26 |
+
model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task([fairseq_wav2vec2_path])
|
27 |
+
hf_model = Wav2Vec2Model.from_pretrained(hf_path)
|
28 |
+
|
29 |
+
model = model[0]
|
30 |
+
model.eval()
|
31 |
+
|
32 |
+
|
33 |
+
def test_feature_extractor(hf_feat_extractor, fsq_feat_extract, example_wav):
|
34 |
+
# set hf_feat_extractor.output to dummy
|
35 |
+
fsq_output = fsq_feat_extract(example_wav)
|
36 |
+
hf_output = hf_feat_extractor(example_wav)
|
37 |
+
|
38 |
+
assert (
|
39 |
+
hf_output.shape == fsq_output.shape
|
40 |
+
), f"Shapes don't match. Got {hf_output.shape} for HF and {fsq_output.shape} for fsq"
|
41 |
+
assert torch.allclose(hf_output, fsq_output, atol=1e-3)
|
42 |
+
|
43 |
+
|
44 |
+
def test_full_encoder(hf_model, fsq_model, example_wav, attention_mask):
|
45 |
+
fsq_output = fsq_model(example_wav, padding_mask=attention_mask.ne(1), mask=False, features_only=True)["x"]
|
46 |
+
hf_output = hf_model(example_wav, attention_mask=attention_mask)[0]
|
47 |
+
|
48 |
+
assert (
|
49 |
+
hf_output.shape == fsq_output.shape
|
50 |
+
), f"Shapes don't match. Got {hf_output.shape} for HF and {fsq_output.shape} for fsq"
|
51 |
+
assert torch.allclose(hf_output, fsq_output, atol=1e-2)
|
52 |
+
|
53 |
+
|
54 |
+
def test_full_model(hf_model, fsq_model, example_wav, attention_mask):
|
55 |
+
fsq_output = fsq_model(source=example_wav, padding_mask=attention_mask.ne(1))["encoder_out"]
|
56 |
+
hf_output = hf_model(example_wav, attention_mask=attention_mask)[0].transpose(0, 1)
|
57 |
+
|
58 |
+
assert (
|
59 |
+
hf_output.shape == fsq_output.shape
|
60 |
+
), f"Shapes don't match. Got {hf_output.shape} for HF and {fsq_output.shape} for fsq"
|
61 |
+
assert torch.allclose(hf_output, fsq_output, atol=1e-2)
|
62 |
+
|
63 |
+
|
64 |
+
def test_loss(hf_model, fsq_model, example_wav, attention_mask, target):
|
65 |
+
from fairseq.criterions.ctc import CtcCriterion, CtcCriterionConfig
|
66 |
+
from fairseq.tasks.audio_pretraining import AudioPretrainingConfig, AudioPretrainingTask
|
67 |
+
audio_cfg = AudioPretrainingConfig(labels="ltr", data="./data")
|
68 |
+
task = AudioPretrainingTask.setup_task(audio_cfg)
|
69 |
+
ctc = CtcCriterion(CtcCriterionConfig(), task)
|
70 |
+
fsq_model.train()
|
71 |
+
|
72 |
+
labels_dict = processor.tokenizer(target, padding="longest", return_tensors="pt")
|
73 |
+
labels = labels_dict.input_ids
|
74 |
+
target_lengths = labels_dict.attention_mask.sum(-1)
|
75 |
+
|
76 |
+
sample = {
|
77 |
+
"net_input": {
|
78 |
+
"source": example_wav,
|
79 |
+
"padding_mask": attention_mask.ne(1),
|
80 |
+
},
|
81 |
+
"target": labels,
|
82 |
+
"target_lengths": target_lengths,
|
83 |
+
"id": torch.zeros((1,)),
|
84 |
+
}
|
85 |
+
|
86 |
+
loss, _, _ = ctc(fsq_model, sample)
|
87 |
+
|
88 |
+
labels = labels_dict.attention_mask * labels + (1 - labels_dict.attention_mask) * -100
|
89 |
+
|
90 |
+
hf_model.config.ctc_loss_reduction = "mean"
|
91 |
+
hf_loss = hf_model(example_wav, attention_mask=attention_mask, labels=labels).loss
|
92 |
+
|
93 |
+
print("Loss", loss)
|
94 |
+
print("Hf loss", hf_loss)
|
95 |
+
|
96 |
+
|
97 |
+
def test_all(example_wav, attention_mask):
|
98 |
+
with torch.no_grad():
|
99 |
+
if finetuned:
|
100 |
+
test_feature_extractor(
|
101 |
+
hf_model.wav2vec2.feature_extractor, model.w2v_encoder.w2v_model.feature_extractor, example_wav
|
102 |
+
)
|
103 |
+
else:
|
104 |
+
test_feature_extractor(
|
105 |
+
hf_model.feature_extractor, model.feature_extractor, example_wav
|
106 |
+
)
|
107 |
+
print("Succeded feature extractor Test")
|
108 |
+
|
109 |
+
with torch.no_grad():
|
110 |
+
# IMPORTANT: It is assumed that layer_norm_first is FALSE
|
111 |
+
# This is the case for `wav2vec_small_960h.pt`, but might not be for all models
|
112 |
+
# Adapt if necessary
|
113 |
+
if finetuned:
|
114 |
+
test_full_encoder(hf_model.wav2vec2, model.w2v_encoder.w2v_model, example_wav, attention_mask)
|
115 |
+
else:
|
116 |
+
test_full_encoder(hf_model, model, example_wav, attention_mask)
|
117 |
+
print("Succeded full encoder test")
|
118 |
+
|
119 |
+
if finetuned:
|
120 |
+
with torch.no_grad():
|
121 |
+
# IMPORTANT: It is assumed that layer_norm_first is FALSE
|
122 |
+
# This is the case for `wav2vec_small_960h.pt`, but might not be for all models
|
123 |
+
# Adapt if necessary
|
124 |
+
test_full_model(hf_model, model, example_wav, attention_mask)
|
125 |
+
print("Succeded full model test")
|
126 |
+
|
127 |
+
|
128 |
+
dummy_speech_data = datasets.load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation")
|
129 |
+
|
130 |
+
|
131 |
+
def map_to_array(batch):
|
132 |
+
speech_array, _ = sf.read(batch["file"])
|
133 |
+
batch["speech"] = speech_array
|
134 |
+
return batch
|
135 |
+
|
136 |
+
def map_to_array_mp3(batch, i):
|
137 |
+
speech_array, sr = sf.read(f"/home/patrick/hugging_face/add_wav2vec/common_voice/cv-corpus-6.1-2020-12-11/nl/converted/sample_{i}.wav")
|
138 |
+
batch["speech"] = speech_array
|
139 |
+
batch["sampling_rate"] = sr
|
140 |
+
return batch
|
141 |
+
|
142 |
+
|
143 |
+
dummy_speech_data = dummy_speech_data.map(map_to_array, remove_columns=["file"])
|
144 |
+
inputs = processor(dummy_speech_data[:3]["speech"], return_tensors="pt", padding="longest", return_attention_mask=True)
|
145 |
+
|
146 |
+
transciption = dummy_speech_data[:3]["text"]
|
147 |
+
|
148 |
+
input_values = inputs.input_values
|
149 |
+
attention_mask = inputs.attention_mask
|
150 |
+
|
151 |
+
test_all(input_values, attention_mask)
|
152 |
+
#test_loss(hf_model, model, input_values, attention_mask, transciption)
|