import os from trainer import Trainer, TrainerArgs from TTS.config.shared_configs import BaseDatasetConfig from TTS.tts.datasets import load_tts_samples from TTS.tts.layers.xtts.trainer.gpt_trainer import GPTArgs, GPTTrainer, GPTTrainerConfig, XttsAudioConfig from TTS.utils.manage import ModelManager from math import ceil LANG_TO_ISO = { "hausa": "ha", "luo": "luo", "chichewa": "nya" } subdirs = [d for d in os.listdir() if os.path.isdir(d) and d.startswith('xtts')] OUT_PATH = subdirs[0] LANG_NAME = OUT_PATH.split('_')[1] # Logging parameters RUN_NAME = f"GPT_XTTS_{LANG_NAME.upper()}_FT" PROJECT_NAME = "XTTS_trainer" DASHBOARD_LOGGER = "tensorboard" LOGGER_URI = None # Training Parameters OPTIMIZER_WD_ONLY_ON_WEIGHTS = True # for multi-gpu training please make it False START_WITH_EVAL = True # if True it will start with evaluation BATCH_SIZE = 1 # set here the batch size GRAD_ACUMM_STEPS = ceil(252 / BATCH_SIZE) # set here the grad accumulation steps # Note: we recommend that BATCH_SIZE * GRAD_ACUMM_STEPS need to be at least 252 for more efficient training. You can increase/decrease BATCH_SIZE but then set GRAD_ACUMM_STEPS accordingly. # Define here the dataset that you want to use for the fine-tuning on. config_dataset = BaseDatasetConfig( formatter="coqui", dataset_name="ft_dataset", path="data/", meta_file_train="manifest_train.csv", meta_file_val="manifest_dev.csv", language=LANG_TO_ISO[LANG_NAME], ) # Add here the configs of the datasets DATASETS_CONFIG_LIST = [config_dataset] # Define the path where XTTS v2.0.1 files will be downloaded CHECKPOINTS_OUT_PATH = os.path.join(OUT_PATH, "XTTS_v2.0_original_model_files/") os.makedirs(CHECKPOINTS_OUT_PATH, exist_ok=True) # DVAE files DVAE_CHECKPOINT_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/main/dvae.pth" MEL_NORM_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/main/mel_stats.pth" # Set the path to the downloaded files DVAE_CHECKPOINT = os.path.join(CHECKPOINTS_OUT_PATH, os.path.basename(DVAE_CHECKPOINT_LINK)) MEL_NORM_FILE = os.path.join(CHECKPOINTS_OUT_PATH, os.path.basename(MEL_NORM_LINK)) # download DVAE files if needed if not os.path.isfile(DVAE_CHECKPOINT) or not os.path.isfile(MEL_NORM_FILE): print(" > Downloading DVAE files!") ModelManager._download_model_files([MEL_NORM_LINK, DVAE_CHECKPOINT_LINK], CHECKPOINTS_OUT_PATH, progress_bar=True) # Download XTTS v2.0 checkpoint if needed TOKENIZER_FILE_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/main/vocab.json" XTTS_CHECKPOINT_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/main/model.pth" XTTS_CONFIG_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/main/config.json" # XTTS transfer learning parameters: You we need to provide the paths of XTTS model checkpoint that you want to do the fine tuning. TOKENIZER_FILE = os.path.join(CHECKPOINTS_OUT_PATH, os.path.basename(TOKENIZER_FILE_LINK)) # vocab.json file XTTS_CHECKPOINT = os.path.join(CHECKPOINTS_OUT_PATH, os.path.basename(XTTS_CHECKPOINT_LINK)) # model.pth file XTTS_CONFIG_FILE = os.path.join(CHECKPOINTS_OUT_PATH, os.path.basename(XTTS_CONFIG_LINK)) # config.json file # download XTTS v2.0 files if needed if not os.path.isfile(TOKENIZER_FILE): print(" > Downloading XTTS v2.0 tokenizer!") ModelManager._download_model_files( [TOKENIZER_FILE_LINK], CHECKPOINTS_OUT_PATH, progress_bar=True ) if not os.path.isfile(XTTS_CHECKPOINT): print(" > Downloading XTTS v2.0 checkpoint!") ModelManager._download_model_files( [XTTS_CHECKPOINT_LINK], CHECKPOINTS_OUT_PATH, progress_bar=True ) if not os.path.isfile(XTTS_CONFIG_FILE): print(" > Downloading XTTS v2.0 config!") ModelManager._download_model_files( [XTTS_CONFIG_LINK], CHECKPOINTS_OUT_PATH, progress_bar=True ) # load training samples train_samples, eval_samples = load_tts_samples( DATASETS_CONFIG_LIST, eval_split=True, ) print(f"Train samples: {len(train_samples)}") print(f"Eval samples: {len(eval_samples)}") # get the longest text audio file to use as speaker reference samples_len = [len(item["text"].split(" ")) for item in train_samples] longest_text_idx = samples_len.index(max(samples_len)) SPEAKER_REFERENCE = [train_samples[longest_text_idx]["audio_file"]] # speaker reference to be used in training test sentences print(f"Using speaker reference: {SPEAKER_REFERENCE}") LANGUAGE = config_dataset.language HAUSA_TEST_SENTENCES = [ "Umarnai don zaman tsarki.", "wanda kuma ya faɗa mana ƙaunar da kuke yi cikin Ruhu.", "Gama mun ji labarin bangaskiyarku a cikin Yesu Kiristi da kuma ƙaunar da kuke yi saboda dukan tsarkaka." ] LUO_TEST_SENTENCES = [ "jo kolosai achiel.", "magoyo erokamano ni wuoro ka un gi mor.", "epafra bende nonyisowa kuom hera ma roho maler osemiyou." ] CHICHEWA_TEST_SENTENCES = [ "umene unafika kwa inu.", "tukiko adzakuwuzani zonse za ine.", "iye anachita mtendere kudzera mʼmagazi ake, wokhetsedwa pa mtanda." ] TEST_SENTENCES = { "hausa": [{"text": text, "speaker_wav": SPEAKER_REFERENCE, "language": LANGUAGE} for text in HAUSA_TEST_SENTENCES], "luo": [{"text": text, "speaker_wav": SPEAKER_REFERENCE, "language": LANGUAGE} for text in LUO_TEST_SENTENCES], "chichewa": [{"text": text, "speaker_wav": SPEAKER_REFERENCE, "language": LANGUAGE} for text in CHICHEWA_TEST_SENTENCES] } def main(): # init args and config model_args = GPTArgs( max_conditioning_length=132300, # 6 secs min_conditioning_length=11025, # 0.5 secs debug_loading_failures=True, max_wav_length=12*22050, # 12 secs max_text_length=300, mel_norm_file=MEL_NORM_FILE, dvae_checkpoint=DVAE_CHECKPOINT, xtts_checkpoint=XTTS_CHECKPOINT, # checkpoint path of the model that you want to fine-tune tokenizer_file=TOKENIZER_FILE, gpt_num_audio_tokens=1026, gpt_start_audio_token=1024, gpt_stop_audio_token=1025, gpt_use_masking_gt_prompt_approach=True, gpt_use_perceiver_resampler=True, ) # define audio config audio_config = XttsAudioConfig(sample_rate=22050, dvae_sample_rate=22050, output_sample_rate=24000) # training parameters config config = GPTTrainerConfig() config.load_json(XTTS_CONFIG_FILE) config.mixed_precision = True config.precision = "bf16" config.epochs = 1000 config.output_path = OUT_PATH config.model_args = model_args config.run_name = RUN_NAME config.project_name = PROJECT_NAME config.run_description = """ GPT XTTS training """, config.dashboard_logger = DASHBOARD_LOGGER config.logger_uri = LOGGER_URI config.audio = audio_config config.batch_size = BATCH_SIZE config.eval_batch_size = BATCH_SIZE config.num_loader_workers = 8 config.print_step = 50 config.plot_step = 100 config.log_model_step = 100 config.save_step = 10000 config.save_n_checkpoints = 2 config.save_checkpoints = True config.save_best_after = 0 config.print_eval = False # Optimizer values like tortoise, pytorch implementation with modifications to not apply WD to non-weight parameters. config.optimizer = "AdamW" config.optimizer_wd_only_on_weights = OPTIMIZER_WD_ONLY_ON_WEIGHTS config.optimizer_params = {"betas": [0.9, 0.96], "eps": 1e-8, "weight_decay": 1e-2} config.lr = 5e-06 # learning rate config.lr_scheduler = "MultiStepLR" config.lr_scheduler_params = {"milestones": [5000, 150000, 300000], "gamma": 0.5, "last_epoch": -1} config.test_sentences=TEST_SENTENCES[LANG_NAME] # init the model from config model = GPTTrainer.init_from_config(config) # init the trainer and 🚀 trainer = Trainer( TrainerArgs( restore_path=None, # xtts checkpoint is restored via xtts_checkpoint key so no need of restore it using Trainer restore_path parameter skip_train_epoch=False, start_with_eval=START_WITH_EVAL, grad_accum_steps=GRAD_ACUMM_STEPS, ), config, output_path=OUT_PATH, model=model, train_samples=train_samples, eval_samples=eval_samples, ) trainer.fit() if __name__ == "__main__": main()