ayymen commited on
Commit
cd44c6c
·
verified ·
1 Parent(s): 9368369

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ GPT_XTTS_CHICHEWA_FT-March-23-2025_07+54AM-8e59ec3/trainer_0_log.txt filter=lfs diff=lfs merge=lfs -text
GPT_XTTS_CHICHEWA_FT-March-23-2025_07+54AM-8e59ec3/best_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d34291d44dcf70c64b9355ec48ef5543975a7faacb92ee962bf9cc2f01bdbc90
3
+ size 5649899013
GPT_XTTS_CHICHEWA_FT-March-23-2025_07+54AM-8e59ec3/best_model_1546195.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d34291d44dcf70c64b9355ec48ef5543975a7faacb92ee962bf9cc2f01bdbc90
3
+ size 5649899013
GPT_XTTS_CHICHEWA_FT-March-23-2025_07+54AM-8e59ec3/checkpoint_1570000.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e17fcbcd2457073555761070dcddf95a3c04307fec9123606fb9e50022e5ddf
3
+ size 5649899013
GPT_XTTS_CHICHEWA_FT-March-23-2025_07+54AM-8e59ec3/checkpoint_1580000.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2bfd38ad9a2c74a7a53bf8793d84045217b805e7b5ad1ae7d7caed7b5397a689
3
+ size 5649899013
GPT_XTTS_CHICHEWA_FT-March-23-2025_07+54AM-8e59ec3/config.json ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "output_path": "xtts_chichewa",
3
+ "logger_uri": null,
4
+ "run_name": "GPT_XTTS_CHICHEWA_FT",
5
+ "project_name": "XTTS_trainer",
6
+ "run_description": [
7
+ "\n GPT XTTS training\n "
8
+ ],
9
+ "print_step": 50,
10
+ "plot_step": 100,
11
+ "model_param_stats": false,
12
+ "wandb_entity": null,
13
+ "dashboard_logger": "tensorboard",
14
+ "save_on_interrupt": true,
15
+ "log_model_step": 100,
16
+ "save_step": 10000,
17
+ "save_n_checkpoints": 2,
18
+ "save_checkpoints": true,
19
+ "save_all_best": false,
20
+ "save_best_after": 0,
21
+ "target_loss": null,
22
+ "print_eval": false,
23
+ "test_delay_epochs": 0,
24
+ "run_eval": true,
25
+ "run_eval_steps": null,
26
+ "distributed_backend": "nccl",
27
+ "distributed_url": "tcp://localhost:54321",
28
+ "mixed_precision": true,
29
+ "precision": "bf16",
30
+ "epochs": 1000,
31
+ "batch_size": 1,
32
+ "eval_batch_size": 1,
33
+ "grad_clip": 0.0,
34
+ "scheduler_after_epoch": true,
35
+ "lr": 5e-06,
36
+ "optimizer": "AdamW",
37
+ "optimizer_params": {
38
+ "betas": [
39
+ 0.9,
40
+ 0.96
41
+ ],
42
+ "eps": 1e-08,
43
+ "weight_decay": 0.01
44
+ },
45
+ "lr_scheduler": "MultiStepLR",
46
+ "lr_scheduler_params": {
47
+ "milestones": [
48
+ 5000,
49
+ 150000,
50
+ 300000
51
+ ],
52
+ "gamma": 0.5,
53
+ "last_epoch": -1
54
+ },
55
+ "use_grad_scaler": false,
56
+ "allow_tf32": false,
57
+ "cudnn_enable": true,
58
+ "cudnn_deterministic": false,
59
+ "cudnn_benchmark": false,
60
+ "training_seed": 54321,
61
+ "model": "xtts",
62
+ "num_loader_workers": 8,
63
+ "num_eval_loader_workers": 0,
64
+ "use_noise_augment": false,
65
+ "audio": {
66
+ "sample_rate": 22050,
67
+ "output_sample_rate": 24000,
68
+ "dvae_sample_rate": 22050
69
+ },
70
+ "use_phonemes": false,
71
+ "phonemizer": null,
72
+ "phoneme_language": null,
73
+ "compute_input_seq_cache": false,
74
+ "text_cleaner": null,
75
+ "enable_eos_bos_chars": false,
76
+ "test_sentences_file": "",
77
+ "phoneme_cache_path": null,
78
+ "characters": null,
79
+ "add_blank": false,
80
+ "batch_group_size": 0,
81
+ "loss_masking": null,
82
+ "min_audio_len": 1,
83
+ "max_audio_len": Infinity,
84
+ "min_text_len": 1,
85
+ "max_text_len": Infinity,
86
+ "compute_f0": false,
87
+ "compute_energy": false,
88
+ "compute_linear_spec": false,
89
+ "precompute_num_workers": 0,
90
+ "start_by_longest": false,
91
+ "shuffle": false,
92
+ "drop_last": false,
93
+ "datasets": [
94
+ {
95
+ "formatter": "",
96
+ "dataset_name": "",
97
+ "path": "",
98
+ "meta_file_train": "",
99
+ "ignored_speakers": null,
100
+ "language": "",
101
+ "phonemizer": "",
102
+ "meta_file_val": "",
103
+ "meta_file_attn_mask": ""
104
+ }
105
+ ],
106
+ "test_sentences": [
107
+ {
108
+ "text": "umene unafika kwa inu.",
109
+ "speaker_wav": [
110
+ "/app/data/clips/JOS_004_030.wav"
111
+ ],
112
+ "language": "nya"
113
+ },
114
+ {
115
+ "text": "tukiko adzakuwuzani zonse za ine.",
116
+ "speaker_wav": [
117
+ "/app/data/clips/JOS_004_030.wav"
118
+ ],
119
+ "language": "nya"
120
+ },
121
+ {
122
+ "text": "iye anachita mtendere kudzera m\u02bcmagazi ake, wokhetsedwa pa mtanda.",
123
+ "speaker_wav": [
124
+ "/app/data/clips/JOS_004_030.wav"
125
+ ],
126
+ "language": "nya"
127
+ }
128
+ ],
129
+ "eval_split_max_size": null,
130
+ "eval_split_size": 0.01,
131
+ "use_speaker_weighted_sampler": false,
132
+ "speaker_weighted_sampler_alpha": 1.0,
133
+ "use_language_weighted_sampler": false,
134
+ "language_weighted_sampler_alpha": 1.0,
135
+ "use_length_weighted_sampler": false,
136
+ "length_weighted_sampler_alpha": 1.0,
137
+ "model_args": {
138
+ "gpt_batch_size": 1,
139
+ "enable_redaction": false,
140
+ "kv_cache": true,
141
+ "gpt_checkpoint": "",
142
+ "clvp_checkpoint": null,
143
+ "decoder_checkpoint": null,
144
+ "num_chars": 255,
145
+ "tokenizer_file": "xtts_chichewa/XTTS_v2.0_original_model_files/vocab.json",
146
+ "gpt_max_audio_tokens": 605,
147
+ "gpt_max_text_tokens": 402,
148
+ "gpt_max_prompt_tokens": 70,
149
+ "gpt_layers": 30,
150
+ "gpt_n_model_channels": 1024,
151
+ "gpt_n_heads": 16,
152
+ "gpt_number_text_tokens": 8388,
153
+ "gpt_start_text_token": 261,
154
+ "gpt_stop_text_token": 0,
155
+ "gpt_num_audio_tokens": 1026,
156
+ "gpt_start_audio_token": 1024,
157
+ "gpt_stop_audio_token": 1025,
158
+ "gpt_code_stride_len": 1024,
159
+ "gpt_use_masking_gt_prompt_approach": true,
160
+ "gpt_use_perceiver_resampler": true,
161
+ "input_sample_rate": 22050,
162
+ "output_sample_rate": 24000,
163
+ "output_hop_length": 256,
164
+ "decoder_input_dim": 1024,
165
+ "d_vector_dim": 512,
166
+ "cond_d_vector_in_each_upsampling_layer": true,
167
+ "duration_const": 102400,
168
+ "min_conditioning_length": 11025,
169
+ "max_conditioning_length": 132300,
170
+ "gpt_loss_text_ce_weight": 0.01,
171
+ "gpt_loss_mel_ce_weight": 1.0,
172
+ "debug_loading_failures": true,
173
+ "max_wav_length": 264600,
174
+ "max_text_length": 300,
175
+ "mel_norm_file": "xtts_chichewa/XTTS_v2.0_original_model_files/mel_stats.pth",
176
+ "dvae_checkpoint": "xtts_chichewa/XTTS_v2.0_original_model_files/dvae.pth",
177
+ "xtts_checkpoint": "xtts_chichewa/XTTS_v2.0_original_model_files/model.pth",
178
+ "vocoder": ""
179
+ },
180
+ "model_dir": null,
181
+ "languages": [
182
+ "en",
183
+ "es",
184
+ "fr",
185
+ "de",
186
+ "it",
187
+ "pt",
188
+ "pl",
189
+ "tr",
190
+ "ru",
191
+ "nl",
192
+ "cs",
193
+ "ar",
194
+ "zh-cn",
195
+ "hu",
196
+ "ko",
197
+ "ja",
198
+ "hi",
199
+ "nya"
200
+ ],
201
+ "temperature": 0.75,
202
+ "length_penalty": 1.0,
203
+ "repetition_penalty": 5.0,
204
+ "top_k": 50,
205
+ "top_p": 0.85,
206
+ "num_gpt_outputs": 1,
207
+ "gpt_cond_len": 30,
208
+ "gpt_cond_chunk_len": 4,
209
+ "max_ref_len": 30,
210
+ "sound_norm_refs": false,
211
+ "optimizer_wd_only_on_weights": true,
212
+ "weighted_loss_attrs": {},
213
+ "weighted_loss_multipliers": {},
214
+ "github_branch": "* main"
215
+ }
GPT_XTTS_CHICHEWA_FT-March-23-2025_07+54AM-8e59ec3/events.out.tfevents.1742716482.2336b9583558.1.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:80ddece0f0d6e276aa65e097bf60d347ba211705b49bbb6c9f611ced4504313f
3
+ size 32369038
GPT_XTTS_CHICHEWA_FT-March-23-2025_07+54AM-8e59ec3/train_gpt_xtts.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from trainer import Trainer, TrainerArgs
4
+
5
+ from TTS.config.shared_configs import BaseDatasetConfig
6
+ from TTS.tts.datasets import load_tts_samples
7
+ from TTS.tts.layers.xtts.trainer.gpt_trainer import GPTArgs, GPTTrainer, GPTTrainerConfig, XttsAudioConfig
8
+ from TTS.utils.manage import ModelManager
9
+ from math import ceil
10
+
11
+ LANG_TO_ISO = {
12
+ "hausa": "ha",
13
+ "luo": "luo",
14
+ "chichewa": "nya"
15
+ }
16
+
17
+ subdirs = [d for d in os.listdir() if os.path.isdir(d) and d.startswith('xtts')]
18
+ OUT_PATH = subdirs[0]
19
+ LANG_NAME = OUT_PATH.split('_')[1]
20
+
21
+ # Logging parameters
22
+ RUN_NAME = f"GPT_XTTS_{LANG_NAME.upper()}_FT"
23
+ PROJECT_NAME = "XTTS_trainer"
24
+ DASHBOARD_LOGGER = "tensorboard"
25
+ LOGGER_URI = None
26
+
27
+ # Training Parameters
28
+ OPTIMIZER_WD_ONLY_ON_WEIGHTS = True # for multi-gpu training please make it False
29
+ START_WITH_EVAL = True # if True it will start with evaluation
30
+ BATCH_SIZE = 1 # set here the batch size
31
+ GRAD_ACUMM_STEPS = ceil(252 / BATCH_SIZE) # set here the grad accumulation steps
32
+ # Note: we recommend that BATCH_SIZE * GRAD_ACUMM_STEPS need to be at least 252 for more efficient training. You can increase/decrease BATCH_SIZE but then set GRAD_ACUMM_STEPS accordingly.
33
+
34
+ # Define here the dataset that you want to use for the fine-tuning on.
35
+ config_dataset = BaseDatasetConfig(
36
+ formatter="coqui",
37
+ dataset_name="ft_dataset",
38
+ path="data/",
39
+ meta_file_train="manifest_train.csv",
40
+ meta_file_val="manifest_dev.csv",
41
+ language=LANG_TO_ISO[LANG_NAME],
42
+ )
43
+
44
+ # Add here the configs of the datasets
45
+ DATASETS_CONFIG_LIST = [config_dataset]
46
+
47
+ # Define the path where XTTS v2.0.1 files will be downloaded
48
+ CHECKPOINTS_OUT_PATH = os.path.join(OUT_PATH, "XTTS_v2.0_original_model_files/")
49
+ os.makedirs(CHECKPOINTS_OUT_PATH, exist_ok=True)
50
+
51
+
52
+ # DVAE files
53
+ DVAE_CHECKPOINT_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/main/dvae.pth"
54
+ MEL_NORM_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/main/mel_stats.pth"
55
+
56
+ # Set the path to the downloaded files
57
+ DVAE_CHECKPOINT = os.path.join(CHECKPOINTS_OUT_PATH, os.path.basename(DVAE_CHECKPOINT_LINK))
58
+ MEL_NORM_FILE = os.path.join(CHECKPOINTS_OUT_PATH, os.path.basename(MEL_NORM_LINK))
59
+
60
+ # download DVAE files if needed
61
+ if not os.path.isfile(DVAE_CHECKPOINT) or not os.path.isfile(MEL_NORM_FILE):
62
+ print(" > Downloading DVAE files!")
63
+ ModelManager._download_model_files([MEL_NORM_LINK, DVAE_CHECKPOINT_LINK], CHECKPOINTS_OUT_PATH, progress_bar=True)
64
+
65
+
66
+ # Download XTTS v2.0 checkpoint if needed
67
+ TOKENIZER_FILE_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/main/vocab.json"
68
+ XTTS_CHECKPOINT_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/main/model.pth"
69
+ XTTS_CONFIG_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/main/config.json"
70
+
71
+ # XTTS transfer learning parameters: You we need to provide the paths of XTTS model checkpoint that you want to do the fine tuning.
72
+ TOKENIZER_FILE = os.path.join(CHECKPOINTS_OUT_PATH, os.path.basename(TOKENIZER_FILE_LINK)) # vocab.json file
73
+ XTTS_CHECKPOINT = os.path.join(CHECKPOINTS_OUT_PATH, os.path.basename(XTTS_CHECKPOINT_LINK)) # model.pth file
74
+ XTTS_CONFIG_FILE = os.path.join(CHECKPOINTS_OUT_PATH, os.path.basename(XTTS_CONFIG_LINK)) # config.json file
75
+
76
+ # download XTTS v2.0 files if needed
77
+ if not os.path.isfile(TOKENIZER_FILE):
78
+ print(" > Downloading XTTS v2.0 tokenizer!")
79
+ ModelManager._download_model_files(
80
+ [TOKENIZER_FILE_LINK], CHECKPOINTS_OUT_PATH, progress_bar=True
81
+ )
82
+ if not os.path.isfile(XTTS_CHECKPOINT):
83
+ print(" > Downloading XTTS v2.0 checkpoint!")
84
+ ModelManager._download_model_files(
85
+ [XTTS_CHECKPOINT_LINK], CHECKPOINTS_OUT_PATH, progress_bar=True
86
+ )
87
+ if not os.path.isfile(XTTS_CONFIG_FILE):
88
+ print(" > Downloading XTTS v2.0 config!")
89
+ ModelManager._download_model_files(
90
+ [XTTS_CONFIG_LINK], CHECKPOINTS_OUT_PATH, progress_bar=True
91
+ )
92
+
93
+ # load training samples
94
+ train_samples, eval_samples = load_tts_samples(
95
+ DATASETS_CONFIG_LIST,
96
+ eval_split=True,
97
+ )
98
+ print(f"Train samples: {len(train_samples)}")
99
+ print(f"Eval samples: {len(eval_samples)}")
100
+
101
+ # get the longest text audio file to use as speaker reference
102
+ samples_len = [len(item["text"].split(" ")) for item in train_samples]
103
+ longest_text_idx = samples_len.index(max(samples_len))
104
+ SPEAKER_REFERENCE = [train_samples[longest_text_idx]["audio_file"]] # speaker reference to be used in training test sentences
105
+ print(f"Using speaker reference: {SPEAKER_REFERENCE}")
106
+ LANGUAGE = config_dataset.language
107
+
108
+ HAUSA_TEST_SENTENCES = [
109
+ "Umarnai don zaman tsarki.",
110
+ "wanda kuma ya faɗa mana ƙaunar da kuke yi cikin Ruhu.",
111
+ "Gama mun ji labarin bangaskiyarku a cikin Yesu Kiristi da kuma ƙaunar da kuke yi saboda dukan tsarkaka."
112
+ ]
113
+
114
+ LUO_TEST_SENTENCES = [
115
+ "jo kolosai achiel.",
116
+ "magoyo erokamano ni wuoro ka un gi mor.",
117
+ "epafra bende nonyisowa kuom hera ma roho maler osemiyou."
118
+ ]
119
+
120
+ CHICHEWA_TEST_SENTENCES = [
121
+ "umene unafika kwa inu.",
122
+ "tukiko adzakuwuzani zonse za ine.",
123
+ "iye anachita mtendere kudzera mʼmagazi ake, wokhetsedwa pa mtanda."
124
+ ]
125
+
126
+ TEST_SENTENCES = {
127
+ "hausa": [{"text": text, "speaker_wav": SPEAKER_REFERENCE, "language": LANGUAGE} for text in HAUSA_TEST_SENTENCES],
128
+ "luo": [{"text": text, "speaker_wav": SPEAKER_REFERENCE, "language": LANGUAGE} for text in LUO_TEST_SENTENCES],
129
+ "chichewa": [{"text": text, "speaker_wav": SPEAKER_REFERENCE, "language": LANGUAGE} for text in CHICHEWA_TEST_SENTENCES]
130
+ }
131
+
132
+
133
+ def main():
134
+ # init args and config
135
+ model_args = GPTArgs(
136
+ max_conditioning_length=132300, # 6 secs
137
+ min_conditioning_length=11025, # 0.5 secs
138
+ debug_loading_failures=True,
139
+ max_wav_length=12*22050, # 12 secs
140
+ max_text_length=300,
141
+ mel_norm_file=MEL_NORM_FILE,
142
+ dvae_checkpoint=DVAE_CHECKPOINT,
143
+ xtts_checkpoint=XTTS_CHECKPOINT, # checkpoint path of the model that you want to fine-tune
144
+ tokenizer_file=TOKENIZER_FILE,
145
+ gpt_num_audio_tokens=1026,
146
+ gpt_start_audio_token=1024,
147
+ gpt_stop_audio_token=1025,
148
+ gpt_use_masking_gt_prompt_approach=True,
149
+ gpt_use_perceiver_resampler=True,
150
+ )
151
+ # define audio config
152
+ audio_config = XttsAudioConfig(sample_rate=22050, dvae_sample_rate=22050, output_sample_rate=24000)
153
+ # training parameters config
154
+ config = GPTTrainerConfig()
155
+
156
+ config.load_json(XTTS_CONFIG_FILE)
157
+
158
+ config.mixed_precision = True
159
+ config.precision = "bf16"
160
+ config.epochs = 1000
161
+ config.output_path = OUT_PATH
162
+ config.model_args = model_args
163
+ config.run_name = RUN_NAME
164
+ config.project_name = PROJECT_NAME
165
+ config.run_description = """
166
+ GPT XTTS training
167
+ """,
168
+ config.dashboard_logger = DASHBOARD_LOGGER
169
+ config.logger_uri = LOGGER_URI
170
+ config.audio = audio_config
171
+ config.batch_size = BATCH_SIZE
172
+ config.eval_batch_size = BATCH_SIZE
173
+ config.num_loader_workers = 8
174
+ config.print_step = 50
175
+ config.plot_step = 100
176
+ config.log_model_step = 100
177
+ config.save_step = 10000
178
+ config.save_n_checkpoints = 2
179
+ config.save_checkpoints = True
180
+ config.save_best_after = 0
181
+ config.print_eval = False
182
+ # Optimizer values like tortoise, pytorch implementation with modifications to not apply WD to non-weight parameters.
183
+ config.optimizer = "AdamW"
184
+ config.optimizer_wd_only_on_weights = OPTIMIZER_WD_ONLY_ON_WEIGHTS
185
+ config.optimizer_params = {"betas": [0.9, 0.96], "eps": 1e-8, "weight_decay": 1e-2}
186
+ config.lr = 5e-06 # learning rate
187
+ config.lr_scheduler = "MultiStepLR"
188
+ config.lr_scheduler_params = {"milestones": [5000, 150000, 300000], "gamma": 0.5, "last_epoch": -1}
189
+ config.test_sentences=TEST_SENTENCES[LANG_NAME]
190
+
191
+ # init the model from config
192
+ model = GPTTrainer.init_from_config(config)
193
+
194
+ # init the trainer and 🚀
195
+ trainer = Trainer(
196
+ TrainerArgs(
197
+ restore_path=None, # xtts checkpoint is restored via xtts_checkpoint key so no need of restore it using Trainer restore_path parameter
198
+ skip_train_epoch=False,
199
+ start_with_eval=START_WITH_EVAL,
200
+ grad_accum_steps=GRAD_ACUMM_STEPS,
201
+ ),
202
+ config,
203
+ output_path=OUT_PATH,
204
+ model=model,
205
+ train_samples=train_samples,
206
+ eval_samples=eval_samples,
207
+ )
208
+ trainer.fit()
209
+
210
+
211
+ if __name__ == "__main__":
212
+ main()
GPT_XTTS_CHICHEWA_FT-March-23-2025_07+54AM-8e59ec3/trainer_0_log.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ddf5ec0490c371299222ce85a5cc3d06479533e7a87e7936e54578c619a72f08
3
+ size 11881298
XTTS_v2.0_original_model_files/config.json ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "output_path": "output",
3
+ "logger_uri": null,
4
+ "run_name": "run",
5
+ "project_name": null,
6
+ "run_description": "\ud83d\udc38Coqui trainer run.",
7
+ "print_step": 25,
8
+ "plot_step": 100,
9
+ "model_param_stats": false,
10
+ "wandb_entity": null,
11
+ "dashboard_logger": "tensorboard",
12
+ "save_on_interrupt": true,
13
+ "log_model_step": null,
14
+ "save_step": 10000,
15
+ "save_n_checkpoints": 5,
16
+ "save_checkpoints": true,
17
+ "save_all_best": false,
18
+ "save_best_after": 10000,
19
+ "target_loss": null,
20
+ "print_eval": false,
21
+ "test_delay_epochs": 0,
22
+ "run_eval": true,
23
+ "run_eval_steps": null,
24
+ "distributed_backend": "nccl",
25
+ "distributed_url": "tcp://localhost:54321",
26
+ "mixed_precision": false,
27
+ "precision": "fp16",
28
+ "epochs": 1000,
29
+ "batch_size": 32,
30
+ "eval_batch_size": 16,
31
+ "grad_clip": 0.0,
32
+ "scheduler_after_epoch": true,
33
+ "lr": 0.001,
34
+ "optimizer": "radam",
35
+ "optimizer_params": null,
36
+ "lr_scheduler": null,
37
+ "lr_scheduler_params": {},
38
+ "use_grad_scaler": false,
39
+ "allow_tf32": false,
40
+ "cudnn_enable": true,
41
+ "cudnn_deterministic": false,
42
+ "cudnn_benchmark": false,
43
+ "training_seed": 54321,
44
+ "model": "xtts",
45
+ "num_loader_workers": 0,
46
+ "num_eval_loader_workers": 0,
47
+ "use_noise_augment": false,
48
+ "audio": {
49
+ "sample_rate": 22050,
50
+ "output_sample_rate": 24000
51
+ },
52
+ "use_phonemes": false,
53
+ "phonemizer": null,
54
+ "phoneme_language": null,
55
+ "compute_input_seq_cache": false,
56
+ "text_cleaner": null,
57
+ "enable_eos_bos_chars": false,
58
+ "test_sentences_file": "",
59
+ "phoneme_cache_path": null,
60
+ "characters": null,
61
+ "add_blank": false,
62
+ "batch_group_size": 0,
63
+ "loss_masking": null,
64
+ "min_audio_len": 1,
65
+ "max_audio_len": Infinity,
66
+ "min_text_len": 1,
67
+ "max_text_len": Infinity,
68
+ "compute_f0": false,
69
+ "compute_energy": false,
70
+ "compute_linear_spec": false,
71
+ "precompute_num_workers": 0,
72
+ "start_by_longest": false,
73
+ "shuffle": false,
74
+ "drop_last": false,
75
+ "datasets": [
76
+ {
77
+ "formatter": "",
78
+ "dataset_name": "",
79
+ "path": "",
80
+ "meta_file_train": "",
81
+ "ignored_speakers": null,
82
+ "language": "",
83
+ "phonemizer": "",
84
+ "meta_file_val": "",
85
+ "meta_file_attn_mask": ""
86
+ }
87
+ ],
88
+ "test_sentences": [],
89
+ "eval_split_max_size": null,
90
+ "eval_split_size": 0.01,
91
+ "use_speaker_weighted_sampler": false,
92
+ "speaker_weighted_sampler_alpha": 1.0,
93
+ "use_language_weighted_sampler": false,
94
+ "language_weighted_sampler_alpha": 1.0,
95
+ "use_length_weighted_sampler": false,
96
+ "length_weighted_sampler_alpha": 1.0,
97
+ "model_args": {
98
+ "gpt_batch_size": 1,
99
+ "enable_redaction": false,
100
+ "kv_cache": true,
101
+ "gpt_checkpoint": null,
102
+ "clvp_checkpoint": null,
103
+ "decoder_checkpoint": null,
104
+ "num_chars": 255,
105
+ "tokenizer_file": "",
106
+ "gpt_max_audio_tokens": 605,
107
+ "gpt_max_text_tokens": 402,
108
+ "gpt_max_prompt_tokens": 70,
109
+ "gpt_layers": 30,
110
+ "gpt_n_model_channels": 1024,
111
+ "gpt_n_heads": 16,
112
+ "gpt_number_text_tokens": 6681,
113
+ "gpt_start_text_token": null,
114
+ "gpt_stop_text_token": null,
115
+ "gpt_num_audio_tokens": 1026,
116
+ "gpt_start_audio_token": 1024,
117
+ "gpt_stop_audio_token": 1025,
118
+ "gpt_code_stride_len": 1024,
119
+ "gpt_use_masking_gt_prompt_approach": true,
120
+ "gpt_use_perceiver_resampler": true,
121
+ "input_sample_rate": 22050,
122
+ "output_sample_rate": 24000,
123
+ "output_hop_length": 256,
124
+ "decoder_input_dim": 1024,
125
+ "d_vector_dim": 512,
126
+ "cond_d_vector_in_each_upsampling_layer": true,
127
+ "duration_const": 102400
128
+ },
129
+ "model_dir": null,
130
+ "languages": [
131
+ "en",
132
+ "es",
133
+ "fr",
134
+ "de",
135
+ "it",
136
+ "pt",
137
+ "pl",
138
+ "tr",
139
+ "ru",
140
+ "nl",
141
+ "cs",
142
+ "ar",
143
+ "zh-cn",
144
+ "hu",
145
+ "ko",
146
+ "ja",
147
+ "hi",
148
+ "nya"
149
+ ],
150
+ "temperature": 0.75,
151
+ "length_penalty": 1.0,
152
+ "repetition_penalty": 5.0,
153
+ "top_k": 50,
154
+ "top_p": 0.85,
155
+ "num_gpt_outputs": 1,
156
+ "gpt_cond_len": 30,
157
+ "gpt_cond_chunk_len": 4,
158
+ "max_ref_len": 30,
159
+ "sound_norm_refs": false
160
+ }
XTTS_v2.0_original_model_files/dvae.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7c8d0fbfe32522d95070bb6e0b429ca7a67376dee05433a1bb7b2d09bfc1b93
3
+ size 210893114
XTTS_v2.0_original_model_files/mel_stats.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f69422a8a8f344c4fca2f0c6b8d41d2151d6615b7321e48e6bb15ae949b119c
3
+ size 1067
XTTS_v2.0_original_model_files/model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7ea20001c6a0a841c77e252d8409f6a74fb423e79b3206a0771ba5989776187
3
+ size 1867929118
XTTS_v2.0_original_model_files/vocab.json ADDED
The diff for this file is too large to render. See raw diff