ayymen commited on
Commit
f65c04b
·
verified ·
1 Parent(s): 1bfba97

Upload folder using huggingface_hub

Browse files
best_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc4c1f9b843f26a0ebcf81ebc2cfbc19ecd91c868e687e8273f63617b4403cd7
3
+ size 1042628926
best_model_188159.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc4c1f9b843f26a0ebcf81ebc2cfbc19ecd91c868e687e8273f63617b4403cd7
3
+ size 1042628926
checkpoint_209000.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c849b99e8cacbf5490f18a29115164d6711801968f0fa8b0b02a7e81aa77b549
3
+ size 1042628926
checkpoint_210000.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86507207c3471aa7bab907703ad429de2c2b58d9931eb5f7bba91655120c1ff7
3
+ size 1042628926
config.json ADDED
@@ -0,0 +1,275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "output_path": "yourtts_luo",
3
+ "logger_uri": null,
4
+ "run_name": "YourTTS-Luo",
5
+ "project_name": "YourTTS",
6
+ "run_description": "\n - YourTTS trained using the Luo OpenBible dataset.\n ",
7
+ "print_step": 50,
8
+ "plot_step": 100,
9
+ "model_param_stats": false,
10
+ "wandb_entity": null,
11
+ "dashboard_logger": "tensorboard",
12
+ "save_on_interrupt": true,
13
+ "log_model_step": 1000,
14
+ "save_step": 1000,
15
+ "save_n_checkpoints": 2,
16
+ "save_checkpoints": true,
17
+ "save_all_best": false,
18
+ "save_best_after": 0,
19
+ "target_loss": "loss_1",
20
+ "print_eval": true,
21
+ "test_delay_epochs": 0,
22
+ "run_eval": true,
23
+ "run_eval_steps": null,
24
+ "distributed_backend": "nccl",
25
+ "distributed_url": "tcp://localhost:54321",
26
+ "mixed_precision": true,
27
+ "precision": "fp16",
28
+ "epochs": 1000,
29
+ "batch_size": 20,
30
+ "eval_batch_size": 20,
31
+ "grad_clip": [
32
+ 1000,
33
+ 1000
34
+ ],
35
+ "scheduler_after_epoch": true,
36
+ "lr": 0.001,
37
+ "optimizer": "AdamW",
38
+ "optimizer_params": {
39
+ "betas": [
40
+ 0.8,
41
+ 0.99
42
+ ],
43
+ "eps": 1e-09,
44
+ "weight_decay": 0.01
45
+ },
46
+ "lr_scheduler": null,
47
+ "lr_scheduler_params": null,
48
+ "use_grad_scaler": false,
49
+ "allow_tf32": false,
50
+ "cudnn_enable": true,
51
+ "cudnn_deterministic": false,
52
+ "cudnn_benchmark": false,
53
+ "training_seed": 54321,
54
+ "model": "vits",
55
+ "num_loader_workers": 8,
56
+ "num_eval_loader_workers": 0,
57
+ "use_noise_augment": false,
58
+ "audio": {
59
+ "fft_size": 1024,
60
+ "sample_rate": 24000,
61
+ "win_length": 1024,
62
+ "hop_length": 256,
63
+ "num_mels": 80,
64
+ "mel_fmin": 0.0,
65
+ "mel_fmax": null
66
+ },
67
+ "use_phonemes": false,
68
+ "phonemizer": null,
69
+ "phoneme_language": null,
70
+ "compute_input_seq_cache": true,
71
+ "text_cleaner": "no_cleaners",
72
+ "enable_eos_bos_chars": false,
73
+ "test_sentences_file": "",
74
+ "phoneme_cache_path": null,
75
+ "characters": {
76
+ "characters_class": "TTS.tts.models.vits.VitsCharacters",
77
+ "vocab_dict": null,
78
+ "pad": "_",
79
+ "eos": "&",
80
+ "bos": "*",
81
+ "blank": null,
82
+ "characters": "abcdefghijklmnoprstuvwyz\u02bc",
83
+ "punctuations": " !',.:;?\u2019",
84
+ "phonemes": null,
85
+ "is_unique": true,
86
+ "is_sorted": true
87
+ },
88
+ "add_blank": true,
89
+ "batch_group_size": 4,
90
+ "loss_masking": null,
91
+ "min_audio_len": 19200,
92
+ "max_audio_len": 288000,
93
+ "min_text_len": 1,
94
+ "max_text_len": Infinity,
95
+ "compute_f0": false,
96
+ "compute_energy": false,
97
+ "compute_linear_spec": true,
98
+ "precompute_num_workers": 12,
99
+ "start_by_longest": true,
100
+ "shuffle": false,
101
+ "drop_last": false,
102
+ "datasets": [
103
+ {
104
+ "formatter": "",
105
+ "dataset_name": "luo_openbible",
106
+ "path": "data",
107
+ "meta_file_train": "manifest_train.jsonl",
108
+ "ignored_speakers": null,
109
+ "language": "luo",
110
+ "phonemizer": "",
111
+ "meta_file_val": "manifest_dev.jsonl",
112
+ "meta_file_attn_mask": ""
113
+ }
114
+ ],
115
+ "test_sentences": [
116
+ [
117
+ "jo kolosai achiel.",
118
+ "one",
119
+ null,
120
+ "luo"
121
+ ],
122
+ [
123
+ "magoyo erokamano ni wuoro ka un gi mor.",
124
+ "one",
125
+ null,
126
+ "luo"
127
+ ],
128
+ [
129
+ "epafra bende nonyisowa kuom hera ma roho maler osemiyou.",
130
+ "one",
131
+ null,
132
+ "luo"
133
+ ]
134
+ ],
135
+ "eval_split_max_size": null,
136
+ "eval_split_size": 0.01,
137
+ "use_speaker_weighted_sampler": false,
138
+ "speaker_weighted_sampler_alpha": 1.0,
139
+ "use_language_weighted_sampler": false,
140
+ "language_weighted_sampler_alpha": 1.0,
141
+ "use_length_weighted_sampler": false,
142
+ "length_weighted_sampler_alpha": 1.0,
143
+ "model_args": {
144
+ "num_chars": 36,
145
+ "out_channels": 513,
146
+ "spec_segment_size": 62,
147
+ "hidden_channels": 192,
148
+ "hidden_channels_ffn_text_encoder": 768,
149
+ "num_heads_text_encoder": 2,
150
+ "num_layers_text_encoder": 10,
151
+ "kernel_size_text_encoder": 3,
152
+ "dropout_p_text_encoder": 0.1,
153
+ "dropout_p_duration_predictor": 0.5,
154
+ "kernel_size_posterior_encoder": 5,
155
+ "dilation_rate_posterior_encoder": 1,
156
+ "num_layers_posterior_encoder": 16,
157
+ "kernel_size_flow": 5,
158
+ "dilation_rate_flow": 1,
159
+ "num_layers_flow": 4,
160
+ "resblock_type_decoder": "2",
161
+ "resblock_kernel_sizes_decoder": [
162
+ 3,
163
+ 7,
164
+ 11
165
+ ],
166
+ "resblock_dilation_sizes_decoder": [
167
+ [
168
+ 1,
169
+ 3,
170
+ 5
171
+ ],
172
+ [
173
+ 1,
174
+ 3,
175
+ 5
176
+ ],
177
+ [
178
+ 1,
179
+ 3,
180
+ 5
181
+ ]
182
+ ],
183
+ "upsample_rates_decoder": [
184
+ 8,
185
+ 8,
186
+ 2,
187
+ 2
188
+ ],
189
+ "upsample_initial_channel_decoder": 512,
190
+ "upsample_kernel_sizes_decoder": [
191
+ 16,
192
+ 16,
193
+ 4,
194
+ 4
195
+ ],
196
+ "periods_multi_period_discriminator": [
197
+ 2,
198
+ 3,
199
+ 5,
200
+ 7,
201
+ 11
202
+ ],
203
+ "use_sdp": true,
204
+ "noise_scale": 1.0,
205
+ "inference_noise_scale": 0.667,
206
+ "length_scale": 1,
207
+ "noise_scale_dp": 1.0,
208
+ "inference_noise_scale_dp": 1.0,
209
+ "max_inference_len": null,
210
+ "init_discriminator": true,
211
+ "use_spectral_norm_disriminator": false,
212
+ "use_speaker_embedding": false,
213
+ "num_speakers": 0,
214
+ "speakers_file": "yourtts_luo/YourTTS-Luo-February-19-2025_07+16PM-0000000/speakers.pth",
215
+ "d_vector_file": [
216
+ "data/speakers.pth"
217
+ ],
218
+ "speaker_embedding_channels": 256,
219
+ "use_d_vector_file": true,
220
+ "d_vector_dim": 512,
221
+ "detach_dp_input": true,
222
+ "use_language_embedding": true,
223
+ "embedded_language_dim": 4,
224
+ "num_languages": 0,
225
+ "language_ids_file": "yourtts_luo/YourTTS-Luo-February-19-2025_07+16PM-0000000/language_ids.json",
226
+ "use_speaker_encoder_as_loss": false,
227
+ "speaker_encoder_config_path": "yourtts_luo/checkpoints_yourtts_cml_tts_dataset/config_se.json",
228
+ "speaker_encoder_model_path": "yourtts_luo/checkpoints_yourtts_cml_tts_dataset/model_se.pth",
229
+ "condition_dp_on_speaker": true,
230
+ "freeze_encoder": false,
231
+ "freeze_DP": false,
232
+ "freeze_PE": false,
233
+ "freeze_flow_decoder": false,
234
+ "freeze_waveform_decoder": false,
235
+ "encoder_sample_rate": null,
236
+ "interpolate_z": true,
237
+ "reinit_DP": false,
238
+ "reinit_text_encoder": false
239
+ },
240
+ "lr_gen": 0.0002,
241
+ "lr_disc": 0.0002,
242
+ "lr_scheduler_gen": "ExponentialLR",
243
+ "lr_scheduler_gen_params": {
244
+ "gamma": 0.999875,
245
+ "last_epoch": -1
246
+ },
247
+ "lr_scheduler_disc": "ExponentialLR",
248
+ "lr_scheduler_disc_params": {
249
+ "gamma": 0.999875,
250
+ "last_epoch": -1
251
+ },
252
+ "kl_loss_alpha": 1.0,
253
+ "disc_loss_alpha": 1.0,
254
+ "gen_loss_alpha": 1.0,
255
+ "feat_loss_alpha": 1.0,
256
+ "mel_loss_alpha": 45.0,
257
+ "dur_loss_alpha": 1.0,
258
+ "speaker_encoder_loss_alpha": 9.0,
259
+ "return_wav": true,
260
+ "use_weighted_sampler": false,
261
+ "weighted_sampler_attrs": null,
262
+ "weighted_sampler_multipliers": null,
263
+ "r": 1,
264
+ "num_speakers": 0,
265
+ "use_speaker_embedding": false,
266
+ "speakers_file": "yourtts_luo/YourTTS-Luo-February-19-2025_07+16PM-0000000/speakers.pth",
267
+ "speaker_embedding_channels": 256,
268
+ "language_ids_file": "yourtts_luo/YourTTS-Luo-February-19-2025_07+16PM-0000000/language_ids.json",
269
+ "use_language_embedding": true,
270
+ "use_d_vector_file": true,
271
+ "d_vector_file": [
272
+ "data/speakers.pth"
273
+ ],
274
+ "d_vector_dim": 512
275
+ }
config.json.tmp ADDED
@@ -0,0 +1,275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "output_path": "yourtts_luo",
3
+ "logger_uri": null,
4
+ "run_name": "YourTTS-Luo",
5
+ "project_name": "YourTTS",
6
+ "run_description": "\n - YourTTS trained using the Luo OpenBible dataset.\n ",
7
+ "print_step": 50,
8
+ "plot_step": 100,
9
+ "model_param_stats": false,
10
+ "wandb_entity": null,
11
+ "dashboard_logger": "tensorboard",
12
+ "save_on_interrupt": true,
13
+ "log_model_step": 1000,
14
+ "save_step": 1000,
15
+ "save_n_checkpoints": 2,
16
+ "save_checkpoints": true,
17
+ "save_all_best": false,
18
+ "save_best_after": 0,
19
+ "target_loss": "loss_1",
20
+ "print_eval": true,
21
+ "test_delay_epochs": 0,
22
+ "run_eval": true,
23
+ "run_eval_steps": null,
24
+ "distributed_backend": "nccl",
25
+ "distributed_url": "tcp://localhost:54321",
26
+ "mixed_precision": true,
27
+ "precision": "fp16",
28
+ "epochs": 1000,
29
+ "batch_size": 20,
30
+ "eval_batch_size": 20,
31
+ "grad_clip": [
32
+ 1000,
33
+ 1000
34
+ ],
35
+ "scheduler_after_epoch": true,
36
+ "lr": 0.001,
37
+ "optimizer": "AdamW",
38
+ "optimizer_params": {
39
+ "betas": [
40
+ 0.8,
41
+ 0.99
42
+ ],
43
+ "eps": 1e-09,
44
+ "weight_decay": 0.01
45
+ },
46
+ "lr_scheduler": null,
47
+ "lr_scheduler_params": null,
48
+ "use_grad_scaler": false,
49
+ "allow_tf32": false,
50
+ "cudnn_enable": true,
51
+ "cudnn_deterministic": false,
52
+ "cudnn_benchmark": false,
53
+ "training_seed": 54321,
54
+ "model": "vits",
55
+ "num_loader_workers": 8,
56
+ "num_eval_loader_workers": 0,
57
+ "use_noise_augment": false,
58
+ "audio": {
59
+ "fft_size": 1024,
60
+ "sample_rate": 24000,
61
+ "win_length": 1024,
62
+ "hop_length": 256,
63
+ "num_mels": 80,
64
+ "mel_fmin": 0.0,
65
+ "mel_fmax": null
66
+ },
67
+ "use_phonemes": false,
68
+ "phonemizer": null,
69
+ "phoneme_language": null,
70
+ "compute_input_seq_cache": true,
71
+ "text_cleaner": "no_cleaners",
72
+ "enable_eos_bos_chars": false,
73
+ "test_sentences_file": "",
74
+ "phoneme_cache_path": null,
75
+ "characters": {
76
+ "characters_class": "TTS.tts.models.vits.VitsCharacters",
77
+ "vocab_dict": null,
78
+ "pad": "_",
79
+ "eos": "&",
80
+ "bos": "*",
81
+ "blank": null,
82
+ "characters": "abcdefghijklmnoprstuvwyz\u02bc",
83
+ "punctuations": " !',.:;?\u2019",
84
+ "phonemes": null,
85
+ "is_unique": true,
86
+ "is_sorted": true
87
+ },
88
+ "add_blank": true,
89
+ "batch_group_size": 4,
90
+ "loss_masking": null,
91
+ "min_audio_len": 19200,
92
+ "max_audio_len": 288000,
93
+ "min_text_len": 1,
94
+ "max_text_len": Infinity,
95
+ "compute_f0": false,
96
+ "compute_energy": false,
97
+ "compute_linear_spec": true,
98
+ "precompute_num_workers": 12,
99
+ "start_by_longest": true,
100
+ "shuffle": false,
101
+ "drop_last": false,
102
+ "datasets": [
103
+ {
104
+ "formatter": "",
105
+ "dataset_name": "luo_openbible",
106
+ "path": "data",
107
+ "meta_file_train": "manifest_train.jsonl",
108
+ "ignored_speakers": null,
109
+ "language": "luo",
110
+ "phonemizer": "",
111
+ "meta_file_val": "manifest_dev.jsonl",
112
+ "meta_file_attn_mask": ""
113
+ }
114
+ ],
115
+ "test_sentences": [
116
+ [
117
+ "jo kolosai achiel.",
118
+ "one",
119
+ null,
120
+ "luo"
121
+ ],
122
+ [
123
+ "magoyo erokamano ni wuoro ka un gi mor.",
124
+ "one",
125
+ null,
126
+ "luo"
127
+ ],
128
+ [
129
+ "epafra bende nonyisowa kuom hera ma roho maler osemiyou.",
130
+ "one",
131
+ null,
132
+ "luo"
133
+ ]
134
+ ],
135
+ "eval_split_max_size": null,
136
+ "eval_split_size": 0.01,
137
+ "use_speaker_weighted_sampler": false,
138
+ "speaker_weighted_sampler_alpha": 1.0,
139
+ "use_language_weighted_sampler": false,
140
+ "language_weighted_sampler_alpha": 1.0,
141
+ "use_length_weighted_sampler": false,
142
+ "length_weighted_sampler_alpha": 1.0,
143
+ "model_args": {
144
+ "num_chars": 36,
145
+ "out_channels": 513,
146
+ "spec_segment_size": 62,
147
+ "hidden_channels": 192,
148
+ "hidden_channels_ffn_text_encoder": 768,
149
+ "num_heads_text_encoder": 2,
150
+ "num_layers_text_encoder": 10,
151
+ "kernel_size_text_encoder": 3,
152
+ "dropout_p_text_encoder": 0.1,
153
+ "dropout_p_duration_predictor": 0.5,
154
+ "kernel_size_posterior_encoder": 5,
155
+ "dilation_rate_posterior_encoder": 1,
156
+ "num_layers_posterior_encoder": 16,
157
+ "kernel_size_flow": 5,
158
+ "dilation_rate_flow": 1,
159
+ "num_layers_flow": 4,
160
+ "resblock_type_decoder": "2",
161
+ "resblock_kernel_sizes_decoder": [
162
+ 3,
163
+ 7,
164
+ 11
165
+ ],
166
+ "resblock_dilation_sizes_decoder": [
167
+ [
168
+ 1,
169
+ 3,
170
+ 5
171
+ ],
172
+ [
173
+ 1,
174
+ 3,
175
+ 5
176
+ ],
177
+ [
178
+ 1,
179
+ 3,
180
+ 5
181
+ ]
182
+ ],
183
+ "upsample_rates_decoder": [
184
+ 8,
185
+ 8,
186
+ 2,
187
+ 2
188
+ ],
189
+ "upsample_initial_channel_decoder": 512,
190
+ "upsample_kernel_sizes_decoder": [
191
+ 16,
192
+ 16,
193
+ 4,
194
+ 4
195
+ ],
196
+ "periods_multi_period_discriminator": [
197
+ 2,
198
+ 3,
199
+ 5,
200
+ 7,
201
+ 11
202
+ ],
203
+ "use_sdp": true,
204
+ "noise_scale": 1.0,
205
+ "inference_noise_scale": 0.667,
206
+ "length_scale": 1,
207
+ "noise_scale_dp": 1.0,
208
+ "inference_noise_scale_dp": 1.0,
209
+ "max_inference_len": null,
210
+ "init_discriminator": true,
211
+ "use_spectral_norm_disriminator": false,
212
+ "use_speaker_embedding": false,
213
+ "num_speakers": 0,
214
+ "speakers_file": "yourtts_luo/YourTTS-Luo-February-19-2025_07+16PM-0000000/speakers.pth",
215
+ "d_vector_file": [
216
+ "data/speakers.pth"
217
+ ],
218
+ "speaker_embedding_channels": 256,
219
+ "use_d_vector_file": true,
220
+ "d_vector_dim": 512,
221
+ "detach_dp_input": true,
222
+ "use_language_embedding": true,
223
+ "embedded_language_dim": 4,
224
+ "num_languages": 0,
225
+ "language_ids_file": "yourtts_luo/YourTTS-Luo-February-19-2025_07+16PM-0000000/language_ids.json",
226
+ "use_speaker_encoder_as_loss": false,
227
+ "speaker_encoder_config_path": "https://github.com/coqui-ai/TTS/releases/download/speaker_encoder_model/config_se.json",
228
+ "speaker_encoder_model_path": "https://github.com/coqui-ai/TTS/releases/download/speaker_encoder_model/model_se.pth.tar",
229
+ "condition_dp_on_speaker": true,
230
+ "freeze_encoder": false,
231
+ "freeze_DP": false,
232
+ "freeze_PE": false,
233
+ "freeze_flow_decoder": false,
234
+ "freeze_waveform_decoder": false,
235
+ "encoder_sample_rate": null,
236
+ "interpolate_z": true,
237
+ "reinit_DP": false,
238
+ "reinit_text_encoder": false
239
+ },
240
+ "lr_gen": 0.0002,
241
+ "lr_disc": 0.0002,
242
+ "lr_scheduler_gen": "ExponentialLR",
243
+ "lr_scheduler_gen_params": {
244
+ "gamma": 0.999875,
245
+ "last_epoch": -1
246
+ },
247
+ "lr_scheduler_disc": "ExponentialLR",
248
+ "lr_scheduler_disc_params": {
249
+ "gamma": 0.999875,
250
+ "last_epoch": -1
251
+ },
252
+ "kl_loss_alpha": 1.0,
253
+ "disc_loss_alpha": 1.0,
254
+ "gen_loss_alpha": 1.0,
255
+ "feat_loss_alpha": 1.0,
256
+ "mel_loss_alpha": 45.0,
257
+ "dur_loss_alpha": 1.0,
258
+ "speaker_encoder_loss_alpha": 9.0,
259
+ "return_wav": true,
260
+ "use_weighted_sampler": false,
261
+ "weighted_sampler_attrs": null,
262
+ "weighted_sampler_multipliers": null,
263
+ "r": 1,
264
+ "num_speakers": 0,
265
+ "use_speaker_embedding": false,
266
+ "speakers_file": "yourtts_luo/YourTTS-Luo-February-19-2025_07+16PM-0000000/speakers.pth",
267
+ "speaker_embedding_channels": 256,
268
+ "language_ids_file": "yourtts_luo/YourTTS-Luo-February-19-2025_07+16PM-0000000/language_ids.json",
269
+ "use_language_embedding": true,
270
+ "use_d_vector_file": true,
271
+ "d_vector_file": [
272
+ "data/speakers.pth"
273
+ ],
274
+ "d_vector_dim": 512
275
+ }
events.out.tfevents.1739992596.fbcaa2f54084.30.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50bf1402dd27ee6fd4af7928671be7317711d417a1954bc14300fc5f33fa591b
3
+ size 183689712
language_ids.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "luo": 0
3
+ }
speakers.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13c9d3177b5b8d0e597035fb1a5080a8765823ece136f0a760cd855bdd98e3b8
3
+ size 864
train_yourtts.py ADDED
@@ -0,0 +1,315 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import torch
4
+ from trainer import Trainer, TrainerArgs
5
+
6
+ from TTS.config import load_config
7
+ from TTS.config.shared_configs import BaseDatasetConfig
8
+ from TTS.tts.configs.vits_config import VitsConfig
9
+ from TTS.tts.datasets import load_tts_samples
10
+ from TTS.tts.models.vits import CharactersConfig, Vits, VitsArgs, VitsAudioConfig
11
+ from TTS.tts.utils.speakers import SpeakerManager
12
+ from TTS.tts.utils.managers import save_file
13
+ from tqdm import tqdm
14
+ import json
15
+ import gdown
16
+ import tarfile
17
+
18
+ torch.set_num_threads(24)
19
+
20
+
21
+ def nemo(root_path, meta_file, **kwargs):
22
+ """
23
+ Normalizes NeMo-style json manifest files to TTS format
24
+ """
25
+ meta_path = os.path.join(root_path, meta_file)
26
+ items = []
27
+ with open(meta_path, "r", encoding="utf-8") as ttf:
28
+ for line in ttf:
29
+ cols = json.loads(line)
30
+ wav_file = cols["audio_filepath"]
31
+ text = cols["text"]
32
+ speaker_name = cols["speaker_name"] if "speaker_name" in cols else "one"
33
+ language = cols["language"] if "language" in cols else ""
34
+ items.append({"text": text, "audio_file": wav_file, "speaker_name": speaker_name, "language": language, "root_path": root_path})
35
+ return items
36
+
37
+
38
+ def compute_embeddings(
39
+ model_path,
40
+ config_path,
41
+ output_path,
42
+ old_speakers_file=None,
43
+ old_append=False,
44
+ config_dataset_path=None,
45
+ formatter=None,
46
+ dataset_name=None,
47
+ dataset_path=None,
48
+ meta_file_train=None,
49
+ meta_file_val=None,
50
+ disable_cuda=False,
51
+ no_eval=False,
52
+ ):
53
+ use_cuda = torch.cuda.is_available() and not disable_cuda
54
+
55
+ if config_dataset_path is not None:
56
+ c_dataset = load_config(config_dataset_path)
57
+ meta_data_train, meta_data_eval = load_tts_samples(c_dataset.datasets, eval_split=not no_eval)
58
+ else:
59
+ c_dataset = BaseDatasetConfig()
60
+ c_dataset.dataset_name = dataset_name
61
+ c_dataset.path = dataset_path
62
+ if meta_file_train is not None:
63
+ c_dataset.meta_file_train = meta_file_train
64
+ if meta_file_val is not None:
65
+ c_dataset.meta_file_val = meta_file_val
66
+ meta_data_train, meta_data_eval = load_tts_samples(c_dataset, eval_split=not no_eval, formatter=formatter)
67
+
68
+ if meta_data_eval is None:
69
+ samples = meta_data_train
70
+ else:
71
+ samples = meta_data_train + meta_data_eval
72
+
73
+ encoder_manager = SpeakerManager(
74
+ encoder_model_path=model_path,
75
+ encoder_config_path=config_path,
76
+ d_vectors_file_path=old_speakers_file,
77
+ use_cuda=use_cuda,
78
+ )
79
+
80
+ class_name_key = encoder_manager.encoder_config.class_name_key
81
+
82
+ # compute speaker embeddings
83
+ if old_speakers_file is not None and old_append:
84
+ speaker_mapping = encoder_manager.embeddings
85
+ else:
86
+ speaker_mapping = {}
87
+
88
+ for fields in tqdm(samples):
89
+ class_name = fields[class_name_key]
90
+ audio_file = fields["audio_file"]
91
+ embedding_key = fields["audio_unique_name"]
92
+
93
+ # Only update the speaker name when the embedding is already in the old file.
94
+ if embedding_key in speaker_mapping:
95
+ speaker_mapping[embedding_key]["name"] = class_name
96
+ continue
97
+
98
+ if old_speakers_file is not None and embedding_key in encoder_manager.clip_ids:
99
+ # get the embedding from the old file
100
+ embedd = encoder_manager.get_embedding_by_clip(embedding_key)
101
+ else:
102
+ # extract the embedding
103
+ embedd = encoder_manager.compute_embedding_from_clip(audio_file)
104
+
105
+ # create speaker_mapping if target dataset is defined
106
+ speaker_mapping[embedding_key] = {}
107
+ speaker_mapping[embedding_key]["name"] = class_name
108
+ speaker_mapping[embedding_key]["embedding"] = embedd
109
+
110
+ if speaker_mapping:
111
+ # save speaker_mapping if target dataset is defined
112
+ if os.path.isdir(output_path):
113
+ mapping_file_path = os.path.join(output_path, "speakers.pth")
114
+ else:
115
+ mapping_file_path = output_path
116
+
117
+ if os.path.dirname(mapping_file_path) != "":
118
+ os.makedirs(os.path.dirname(mapping_file_path), exist_ok=True)
119
+
120
+ save_file(speaker_mapping, mapping_file_path)
121
+ print("Speaker embeddings saved at:", mapping_file_path)
122
+
123
+
124
+ # Name of the run for the Trainer
125
+ RUN_NAME = "YourTTS-Luo"
126
+
127
+ # Path where you want to save the models outputs (configs, checkpoints and tensorboard logs)
128
+ OUT_PATH = "yourtts_luo"
129
+
130
+ # If you want to do transfer learning and speedup your training you can set here the path to the CML-TTS available checkpoint that can be downloaded here: https://drive.google.com/u/2/uc?id=1yDCSJ1pFZQTHhL09GMbOrdjcPULApa0p
131
+ RESTORE_PATH = os.path.join(OUT_PATH, "checkpoints_yourtts_cml_tts_dataset/best_model.pth")
132
+
133
+ URL = "https://drive.google.com/u/2/uc?id=1yDCSJ1pFZQTHhL09GMbOrdjcPULApa0p"
134
+ OUTPUT_CHECKPOINTS_FILEPATH = os.path.join(OUT_PATH, "checkpoints_yourtts_cml_tts_dataset.tar.bz")
135
+
136
+ # Download the CML-TTS checkpoint if it does not exist
137
+ if not os.path.exists(RESTORE_PATH):
138
+ print(f"Downloading the CML-TTS checkpoint from {URL}")
139
+ gdown.download(url=URL, output=OUTPUT_CHECKPOINTS_FILEPATH, quiet=False, fuzzy=True)
140
+ with tarfile.open(OUTPUT_CHECKPOINTS_FILEPATH, "r:bz2") as tar:
141
+ tar.extractall(OUT_PATH)
142
+ else:
143
+ print(f"Checkpoint already exists at {RESTORE_PATH}")
144
+
145
+ # This paramter is useful to debug, it skips the training epochs and just do the evaluation and produce the test sentences
146
+ SKIP_TRAIN_EPOCH = False
147
+
148
+ # Set here the batch size to be used in training and evaluation
149
+ BATCH_SIZE = 4
150
+
151
+ # Note: If you add new datasets, please make sure that the dataset sampling rate and this parameter are matching, otherwise resample your audios
152
+ SAMPLE_RATE = 24000
153
+
154
+ # Max audio length in seconds to be used in training
155
+ MAX_AUDIO_LEN_IN_SECONDS = 12
156
+ # Min audio length in seconds to be used in training
157
+ MIN_AUDIO_LEN_IN_SECONDS = 0.8
158
+
159
+ dataset_conf = BaseDatasetConfig(
160
+ dataset_name="luo_openbible",
161
+ meta_file_train="manifest_train.jsonl",
162
+ meta_file_val="manifest_dev.jsonl",
163
+ language="luo",
164
+ path="data"
165
+ )
166
+
167
+ ### Extract speaker embeddings
168
+ SPEAKER_ENCODER_CHECKPOINT_PATH = (
169
+ "https://github.com/coqui-ai/TTS/releases/download/speaker_encoder_model/model_se.pth.tar"
170
+ )
171
+ SPEAKER_ENCODER_CONFIG_PATH = "https://github.com/coqui-ai/TTS/releases/download/speaker_encoder_model/config_se.json"
172
+
173
+ D_VECTOR_FILES = [] # List of speaker embeddings/d-vectors to be used during the training
174
+
175
+ # Checks if the speakers embeddings are already computated, if not compute it
176
+ embeddings_file = os.path.join(dataset_conf.path, "speakers.pth")
177
+ if not os.path.isfile(embeddings_file):
178
+ print(f">>> Computing the speaker embeddings for the {dataset_conf.dataset_name} dataset")
179
+ compute_embeddings(
180
+ SPEAKER_ENCODER_CHECKPOINT_PATH,
181
+ SPEAKER_ENCODER_CONFIG_PATH,
182
+ embeddings_file,
183
+ formatter=nemo,
184
+ dataset_name=dataset_conf.dataset_name,
185
+ dataset_path=dataset_conf.path,
186
+ meta_file_train=dataset_conf.meta_file_train,
187
+ meta_file_val=dataset_conf.meta_file_val,
188
+ )
189
+ D_VECTOR_FILES.append(embeddings_file)
190
+
191
+ # Audio config used in training.
192
+ audio_config = VitsAudioConfig(
193
+ sample_rate=SAMPLE_RATE,
194
+ hop_length=256,
195
+ win_length=1024,
196
+ fft_size=1024,
197
+ mel_fmin=0.0,
198
+ mel_fmax=None,
199
+ num_mels=80,
200
+ )
201
+
202
+ # Init VITSArgs setting the arguments that are needed for the YourTTS model
203
+ model_args = VitsArgs(
204
+ spec_segment_size=62,
205
+ hidden_channels=192,
206
+ hidden_channels_ffn_text_encoder=768,
207
+ num_heads_text_encoder=2,
208
+ num_layers_text_encoder=10,
209
+ kernel_size_text_encoder=3,
210
+ dropout_p_text_encoder=0.1,
211
+ d_vector_file=D_VECTOR_FILES,
212
+ use_d_vector_file=True,
213
+ d_vector_dim=512,
214
+ speaker_encoder_model_path=SPEAKER_ENCODER_CHECKPOINT_PATH,
215
+ speaker_encoder_config_path=SPEAKER_ENCODER_CONFIG_PATH,
216
+ resblock_type_decoder="2", # In the paper, we accidentally trained the YourTTS using ResNet blocks type 2, if you like you can use the ResNet blocks type 1 like the VITS model
217
+ # Useful parameters to enable the Speaker Consistency Loss (SCL) described in the paper
218
+ use_speaker_encoder_as_loss=False,
219
+ # Useful parameters to enable multilingual training
220
+ use_language_embedding=True,
221
+ embedded_language_dim=4,
222
+ )
223
+
224
+ CHARS = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'r', 's', 't', 'u', 'v', 'w', 'y', 'z', 'ʼ']
225
+ PUNCT = [' ', '!', "'", ',', '.', ':', ';', '?', '’']
226
+
227
+ # General training config, here you can change the batch size and others useful parameters
228
+ config = VitsConfig(
229
+ output_path=OUT_PATH,
230
+ model_args=model_args,
231
+ run_name=RUN_NAME,
232
+ project_name="YourTTS",
233
+ run_description="""
234
+ - YourTTS trained using the Luo OpenBible dataset.
235
+ """,
236
+ dashboard_logger="tensorboard",
237
+ logger_uri=None,
238
+ audio=audio_config,
239
+ batch_size=BATCH_SIZE,
240
+ batch_group_size=4,
241
+ eval_batch_size=BATCH_SIZE,
242
+ num_loader_workers=8,
243
+ # eval_split_max_size=256,
244
+ print_step=50,
245
+ plot_step=100,
246
+ # log_model_step=1000,
247
+ save_step=1000,
248
+ save_n_checkpoints=2,
249
+ save_checkpoints=True,
250
+ target_loss="loss_1",
251
+ print_eval=True,
252
+ compute_input_seq_cache=True,
253
+ add_blank=True,
254
+ text_cleaner="no_cleaners",
255
+ characters=CharactersConfig(
256
+ characters_class="TTS.tts.models.vits.VitsCharacters",
257
+ pad="_",
258
+ eos="&",
259
+ bos="*",
260
+ blank=None,
261
+ characters="".join(CHARS),
262
+ punctuations="".join(PUNCT),
263
+ ),
264
+ phoneme_cache_path=None,
265
+ precompute_num_workers=12,
266
+ start_by_longest=True,
267
+ datasets=[dataset_conf],
268
+ cudnn_benchmark=False,
269
+ min_audio_len=int(SAMPLE_RATE * MIN_AUDIO_LEN_IN_SECONDS),
270
+ max_audio_len=SAMPLE_RATE * MAX_AUDIO_LEN_IN_SECONDS,
271
+ mixed_precision=True,
272
+ test_sentences=[
273
+ ["jo kolosai achiel.", "one", None, "luo"],
274
+ ["magoyo erokamano ni wuoro ka un gi mor.", "one", None, "luo"],
275
+ ["epafra bende nonyisowa kuom hera ma roho maler osemiyou.", "one", None, "luo"],
276
+ ],
277
+ # Enable the weighted sampler
278
+ # use_weighted_sampler=True,
279
+ # Ensures that all speakers are seen in the training batch equally no matter how many samples each speaker has
280
+ # weighted_sampler_attrs={"language": 1.0, "speaker_name": 1.0},
281
+ # weighted_sampler_attrs={"language": 1.0},
282
+ # weighted_sampler_multipliers={
283
+ # # "speaker_name": {
284
+ # # you can force the batching scheme to give a higher weight to a certain speaker and then this speaker will appears more frequently on the batch.
285
+ # # It will speedup the speaker adaptation process. Considering the CML train dataset and "new_speaker" as the speaker name of the speaker that you want to adapt.
286
+ # # The line above will make the balancer consider the "new_speaker" as 106 speakers so 1/4 of the number of speakers present on CML dataset.
287
+ # # 'new_speaker': 106, # (CML tot. train speaker)/4 = (424/4) = 106
288
+ # # }
289
+ # },
290
+ # It defines the Speaker Consistency Loss (SCL) α to 9 like the YourTTS paper
291
+ speaker_encoder_loss_alpha=9.0,
292
+ )
293
+
294
+ # Load all the datasets samples and split traning and evaluation sets
295
+ train_samples, eval_samples = load_tts_samples(
296
+ config.datasets,
297
+ eval_split=True,
298
+ formatter=nemo,
299
+ eval_split_max_size=config.eval_split_max_size,
300
+ eval_split_size=config.eval_split_size,
301
+ )
302
+
303
+ # Init the model
304
+ model = Vits.init_from_config(config)
305
+
306
+ # Init the trainer and 🚀
307
+ trainer = Trainer(
308
+ TrainerArgs(restore_path=RESTORE_PATH, skip_train_epoch=SKIP_TRAIN_EPOCH),
309
+ config,
310
+ output_path=OUT_PATH,
311
+ model=model,
312
+ train_samples=train_samples,
313
+ eval_samples=eval_samples,
314
+ )
315
+ trainer.fit()
trainer_0_log.txt ADDED
The diff for this file is too large to render. See raw diff
 
trainer_1_log.txt ADDED
File without changes