diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..b4adeeb5e105e13a0bb0211536d60b18551d0219 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +*.pdf filter=lfs diff=lfs merge=lfs -text +*.png filter=lfs diff=lfs merge=lfs -text diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..0ea89b1e3b1b756f25d9a9995a9b5a137647ebf4 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 Sony Research Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/MeanAudio b/MeanAudio deleted file mode 160000 index 5f221b4b30ba3f89e8711c54961461c48d4999b8..0000000000000000000000000000000000000000 --- a/MeanAudio +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 5f221b4b30ba3f89e8711c54961461c48d4999b8 diff --git a/config/__init__.py b/config/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/config/base_config.yaml b/config/base_config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d50cb9c52cb351aeb694a85ced2734a16fb28c38 --- /dev/null +++ b/config/base_config.yaml @@ -0,0 +1,65 @@ +defaults: + - data: t5_clap # chenge here to load different data in testing (data.AudioCaps_test) + - override hydra/job_logging: custom-simplest + - _self_ + +hydra: + run: + dir: ./exps/${exp_id} + output_subdir: ${now:%Y-%m-%d_%H-%M-%S}-hydra + +enable_email: False + +## model +model: meanaudio_mf +text_encoder_name: t5_clap # [t5, clip, t5_clap, t5_clap_cat]: change here for different feature utils (only for runner-FeatureUtils/infer, not used for using pre-computed dataset) +concat_text_fc: False + +exp_id: default +debug: False +cudnn_benchmark: True +compile: False # set compile to false by default +amp: True +weights: null +# weights: null + +checkpoint: null + +seed: 14159265 +num_workers: 10 # per-GPU +pin_memory: False # set to True if your system can handle it, i.e., have enough memory + +# NOTE: This DOSE NOT affect the model during inference in any way +# they are just for the dataloader to fill in the missing data in multi-modal loading +# to change the sequence length for the model, see networks.py +data_dim: + text_seq_len: 77 + text_dim: 1024 + text_c_dim: 512 # 1024 for pooled T5, 512 for CLAP + +# ema configuration +ema: + enable: True + sigma_rels: [0.05, 0.1] + update_every: 1 + checkpoint_every: 10_000 + checkpoint_folder: ${hydra:run.dir}/ema_ckpts + default_output_sigma: 0.05 + + +# sampling, only for flow matching +sampling: + mean: 0.0 + scale: 1.0 + min_sigma: 0.0 + method: euler + num_steps: 25 + +# classifier-free guidance +null_condition_probability: 0.1 +cfg_strength: 1 + +# checkpoint paths to external modules +vae_16k_ckpt: ./weights/v1-16.pth +vae_44k_ckpt: ./weights/v1-44.pth +bigvgan_vocoder_ckpt: ./weights/best_netG.pt \ No newline at end of file diff --git a/config/data/t5_clap.yaml b/config/data/t5_clap.yaml new file mode 100644 index 0000000000000000000000000000000000000000..255e817739ead183885eedf844cb44f5afa5e81c --- /dev/null +++ b/config/data/t5_clap.yaml @@ -0,0 +1,58 @@ +# AudioCaps +AudioCaps_npz: + tag: train + tsv: data/audiocaps/train-memmap.tsv + npz_dir: data/audiocaps/train-npz-t5-clap + output_subdir: null + repa_npz_dir: null + +AudioCaps_val_npz: + tag: val + tsv: data/audiocaps/val-memmap.tsv + npz_dir: data/audiocaps/val-npz-t5-clap + output_subdir: null + repa_npz_dir: null + gt_cache: data/audiocaps/val-features + +AudioCaps_test_npz: + tag: test + tsv: data/audiocaps/test-memmap.tsv + npz_dir: data/audiocaps/test-npz-t5-clap + output_subdir: null + repa_npz_dir: null + gt_cache: data/audiocaps/test-features + +latent_mean: 'sets/latent_mean.pt' +latent_std: 'sets/latent_std.pt' + +# Clotho +Clotho_npz: + tsv: /hpc_stor03/sjtu_home/xiquan.li/data/MMAudio/clotho/dev-memmap-t5-clap.tsv + npz_dir: /hpc_stor03/sjtu_home/xiquan.li/data/MMAudio/clotho/dev-npz-t5-clap + repa_npz_dir: null + +# WavCaps +AudioSetSL_npz: + tsv: /hpc_stor03/sjtu_home/xiquan.li/data/MMAudio/wavcaps/audioset-sl-memmap-t5-clap.tsv + npz_dir: /hpc_stor03/sjtu_home/xiquan.li/data/MMAudio/wavcaps/audioset-sl-npz-t5-clap + repa_npz_dir: null + +BBCSound_npz: + tsv: /hpc_stor03/sjtu_home/xiquan.li/data/MMAudio/wavcaps/bbc-sound-effects-memmap-t5-clap.tsv + npz_dir: /hpc_stor03/sjtu_home/xiquan.li/data/MMAudio/wavcaps/bbc-sound-effects-npz-t5-clap + repa_npz_dir: null + +FreeSound1_npz: + tsv: /hpc_stor03/sjtu_home/junxi.liu/shared/freesound-memmap-t5-clap-1.tsv + npz_dir: /hpc_stor03/sjtu_home/junxi.liu/shared/freesound-npz-t5-clap-1 + repa_npz_dir: null + +FreeSound2_npz: + tsv: /hpc_stor03/sjtu_home/junxi.liu/shared/freesound-memmap-t5-clap-2.tsv + npz_dir: /hpc_stor03/sjtu_home/junxi.liu/shared/freesound-npz-t5-clap-2 + repa_npz_dir: null + +FreeSound3_npz: + tsv: /hpc_stor03/sjtu_home/junxi.liu/shared/freesound-memmap-t5-clap-3.tsv + npz_dir: /hpc_stor03/sjtu_home/junxi.liu/shared/freesound-npz-t5-clap-3 + repa_npz_dir: null \ No newline at end of file diff --git a/config/eval_config.yaml b/config/eval_config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b145e78858f8899b80dd46d829b17eb3460dbdb6 --- /dev/null +++ b/config/eval_config.yaml @@ -0,0 +1,23 @@ +## This config fire is no longer used +## We pass everything by train_config to ensure training/eval consistency + +defaults: + - base_config_at + - override hydra/job_logging: custom-simplest + - _self_ + +hydra: + run: + dir: ./exps/${exp_id} + output_subdir: eval-${now:%Y-%m-%d_%H-%M-%S}-hydra + +exp_id: ${model} +dataset: audiocaps +duration_s: 10.0 + +# for inference, this is the per-GPU batch size +batch_size: 16 # eval batch size + +output_name: null + +enable_grad_scaler: False \ No newline at end of file diff --git a/config/hydra/job_logging/custom-eval.yaml b/config/hydra/job_logging/custom-eval.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f7905388da4bbeda974043d45027ed4aba21b3e3 --- /dev/null +++ b/config/hydra/job_logging/custom-eval.yaml @@ -0,0 +1,32 @@ +# python logging configuration for tasks +version: 1 +formatters: + simple: + format: '[%(asctime)s][%(levelname)s][r${oc.env:LOCAL_RANK}] - %(message)s' + datefmt: '%Y-%m-%d %H:%M:%S' + colorlog: + '()': 'colorlog.ColoredFormatter' + format: '[%(cyan)s%(asctime)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - %(message)s' + datefmt: '%Y-%m-%d %H:%M:%S' + log_colors: + DEBUG: purple + INFO: green + WARNING: yellow + ERROR: red + CRITICAL: red +handlers: + console: + class: logging.StreamHandler + formatter: colorlog + stream: ext://sys.stdout + file: + class: logging.FileHandler + formatter: simple + # absolute file path + filename: ${hydra.runtime.output_dir}/eval-${now:%Y-%m-%d_%H-%M-%S}-rank${oc.env:LOCAL_RANK}.log + mode: w +root: + level: INFO + handlers: [console, file] + +disable_existing_loggers: false \ No newline at end of file diff --git a/config/hydra/job_logging/custom-no-rank.yaml b/config/hydra/job_logging/custom-no-rank.yaml new file mode 100644 index 0000000000000000000000000000000000000000..df790823dce19d7d65407ecd28f19c24cad9eb98 --- /dev/null +++ b/config/hydra/job_logging/custom-no-rank.yaml @@ -0,0 +1,32 @@ +# python logging configuration for tasks +version: 1 +formatters: + simple: + format: '[%(asctime)s][%(levelname)s] - %(message)s' + datefmt: '%Y-%m-%d %H:%M:%S' + colorlog: + '()': 'colorlog.ColoredFormatter' + format: '[%(cyan)s%(asctime)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - %(message)s' + datefmt: '%Y-%m-%d %H:%M:%S' + log_colors: + DEBUG: purple + INFO: green + WARNING: yellow + ERROR: red + CRITICAL: red +handlers: + console: + class: logging.StreamHandler + formatter: colorlog + stream: ext://sys.stdout + file: + class: logging.FileHandler + formatter: simple + # absolute file path + filename: ${hydra.runtime.output_dir}/${now:%Y-%m-%d_%H-%M-%S}-eval.log + mode: w +root: + level: INFO + handlers: [console, file] + +disable_existing_loggers: false \ No newline at end of file diff --git a/config/hydra/job_logging/custom-simplest.yaml b/config/hydra/job_logging/custom-simplest.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f7fafe7d4b26d3ac01685df8567223928a97bcf3 --- /dev/null +++ b/config/hydra/job_logging/custom-simplest.yaml @@ -0,0 +1,26 @@ +# python logging configuration for tasks +version: 1 +formatters: + simple: + format: '[%(asctime)s][%(levelname)s] - %(message)s' + datefmt: '%Y-%m-%d %H:%M:%S' + colorlog: + '()': 'colorlog.ColoredFormatter' + format: '[%(cyan)s%(asctime)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - %(message)s' + datefmt: '%Y-%m-%d %H:%M:%S' + log_colors: + DEBUG: purple + INFO: green + WARNING: yellow + ERROR: red + CRITICAL: red +handlers: + console: + class: logging.StreamHandler + formatter: colorlog + stream: ext://sys.stdout +root: + level: INFO + handlers: [console] + +disable_existing_loggers: false \ No newline at end of file diff --git a/config/hydra/job_logging/custom.yaml b/config/hydra/job_logging/custom.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4d2b8d730f4b50d18373e9ab3bc8cb196be107ce --- /dev/null +++ b/config/hydra/job_logging/custom.yaml @@ -0,0 +1,33 @@ +# @package hydra.job_logging +# python logging configuration for tasks +version: 1 +formatters: + simple: + format: '[%(asctime)s][%(levelname)s][r${oc.env:LOCAL_RANK}] - %(message)s' + datefmt: '%Y-%m-%d %H:%M:%S' + colorlog: + '()': 'colorlog.ColoredFormatter' + format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)sr${oc.env:LOCAL_RANK}%(reset)s][%(log_color)s%(levelname)s%(reset)s] - %(message)s' + datefmt: '%Y-%m-%d %H:%M:%S' + log_colors: + DEBUG: purple + INFO: green + WARNING: yellow + ERROR: red + CRITICAL: red +handlers: + console: + class: logging.StreamHandler + formatter: colorlog + stream: ext://sys.stdout + file: + class: logging.FileHandler + formatter: simple + # absolute file path + filename: ${hydra.runtime.output_dir}/train-${now:%Y-%m-%d_%H-%M-%S}-rank${oc.env:LOCAL_RANK}.log + mode: w +root: + level: INFO + handlers: [console, file] + +disable_existing_loggers: false \ No newline at end of file diff --git a/config/train_config.yaml b/config/train_config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..050df54bc7995fe937779c8739c6b6e4abbdc504 --- /dev/null +++ b/config/train_config.yaml @@ -0,0 +1,46 @@ +defaults: + - base_config + - override data: t5_clap # change here for loading different text features in training/evaluation + - override hydra/job_logging: custom + - _self_ + +hydra: + run: + dir: ./exps/${exp_id} + output_subdir: train-${now:%Y-%m-%d_%H-%M-%S}-hydra + +ema: + start: 0 + +mini_train: False +example_train: False +enable_grad_scaler: True +ac_oversample_rate: 5 + +log_text_interval: 50 +log_extra_interval: 10_000 +val_interval: 10_000 +eval_interval: 10_000 +save_eval_interval: 10_000 +save_weights_interval: 5_000 +save_checkpoint_interval: 10_000 +save_copy_iterations: [] + +batch_size: 128 +eval_batch_size: 4 + +num_iterations: 100_000 +learning_rate: 1e-4 +linear_warmup_steps: 1_000 + +lr_schedule: step +lr_schedule_steps: [40_000, 45_000] # this is not used, lr_schedule_steps will be determined by the number of iterations +lr_schedule_gamma: 0.1 + +clip_grad_norm: 1.0 +weight_decay: 1.0e-6 + +output_name: null # for eval + +use_meanflow: True +use_repa: False diff --git a/data/.gitkeep b/data/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/eval.py b/eval.py new file mode 100644 index 0000000000000000000000000000000000000000..fac68b34f35b3b036faeb97cf008c84da0c88ad7 --- /dev/null +++ b/eval.py @@ -0,0 +1,151 @@ +import logging +from argparse import ArgumentParser +from pathlib import Path +import os +import torch +import torchaudio +import csv +from meanaudio.eval_utils import (ModelConfig, all_model_cfg, generate_fm, generate_mf, setup_eval_logging) +from meanaudio.model.flow_matching import FlowMatching +from meanaudio.model.mean_flow import MeanFlow +from meanaudio.model.networks import MeanAudio, get_mean_audio +from meanaudio.model.utils.features_utils import FeaturesUtils + +torch.backends.cuda.matmul.allow_tf32 = True +torch.backends.cudnn.allow_tf32 = True + +from tqdm import tqdm +log = logging.getLogger() + + +@torch.inference_mode() +def main(): + setup_eval_logging() + + parser = ArgumentParser() + parser.add_argument('--variant', + type=str, + default='meanaudio_mf', + help='meanaudio_mf, fluxaudio_fm') + + parser.add_argument('--audio_path', type=str, help='Input audio', default='') + parser.add_argument('--duration', type=float, default=9.975) # for 312 latents, seq_config should has a duration of 9.975s + parser.add_argument('--cfg_strength', type=float, default=4.5, + help='If you use meanflow, CFG is integrated in model training. So simply set this <1 to avoid an additional unconditional infer.') + parser.add_argument('--num_steps', type=int, default=25) + parser.add_argument('--output', type=Path, help='Output directory', default='./output') + parser.add_argument('--seed', type=int, help='Random seed', default=42) + parser.add_argument('--full_precision', action='store_true') + parser.add_argument('--model_path', type=str, help='Ckpt path of trained model') + parser.add_argument('--encoder_name', choices=['clip', 't5', 't5_clap'], type=str, help='text encoder name') + parser.add_argument('--use_rope', action='store_true', help='Whether or not use position embedding for model') + parser.add_argument('--text_c_dim', type=int, default=512, + help='Dim of the text_features_c, 1024 for pooled T5 and 512 for CLAP') + parser.add_argument('--debug', action='store_true') + parser.add_argument('--use_meanflow', action='store_true', help='Whether or not use mean flow for inference') + args = parser.parse_args() + + if args.debug: + import debugpy + debugpy.listen(6665) + print("Waiting for debugger attach (rank 0)...") + debugpy.wait_for_client() + + if args.variant not in all_model_cfg: + raise ValueError(f'Unknown model variant: {args.variant}') + model: ModelConfig = all_model_cfg[args.variant] # model is just the model config + # model.download_if_needed() + seq_cfg = model.seq_cfg + + negative_prompt: str = '' + output_dir: str = args.output.expanduser() + seed: int = args.seed + num_steps: int = args.num_steps + duration: float = args.duration + cfg_strength: float = args.cfg_strength + + device = 'cpu' + if torch.cuda.is_available(): + device = 'cuda' + elif torch.backends.mps.is_available(): + device = 'mps' + else: + log.warning('CUDA/MPS are not available, running on CPU') + dtype = torch.float32 if args.full_precision else torch.bfloat16 + + output_dir.mkdir(parents=True, exist_ok=True) + print(model.model_name) + # load a pretrained model + net: MeanAudio = get_mean_audio(model.model_name, + use_rope=args.use_rope, + text_c_dim=args.text_c_dim).to(device, dtype).eval() + net.load_weights(torch.load(args.model_path, map_location=device, weights_only=True)) + log.info(f'Loaded weights from {args.model_path}') + + # misc setup + rng = torch.Generator(device=device) + rng.manual_seed(seed) + if args.use_meanflow: + mf = MeanFlow(steps=num_steps) + else: + fm = FlowMatching(min_sigma=0, inference_mode='euler', num_steps=num_steps) + + feature_utils = FeaturesUtils(tod_vae_ckpt=model.vae_path, + enable_conditions=True, + encoder_name=args.encoder_name, + mode=model.mode, + bigvgan_vocoder_ckpt=model.bigvgan_16k_path, + need_vae_encoder=False) + feature_utils = feature_utils.to(device, dtype).eval() + + seq_cfg.duration = duration + net.update_seq_lengths(seq_cfg.latent_seq_len) + + eval_file = './sets/test-audiocaps.tsv' + audio_ids=[] + text_prompts=[] + with open(eval_file, 'r') as f: + reader = csv.DictReader(f, delimiter='\t') + for row in reader: + audio_ids.append(row['id']) + text_prompts.append(row['caption']) + + for k in tqdm(range(0, len(text_prompts))): + prompt = text_prompts[k] + if args.use_meanflow: + log.info(f'Prompt: {prompt}') + log.info(f'Negative prompt: {negative_prompt}') + audios = generate_mf([prompt], + negative_text=[negative_prompt], + feature_utils=feature_utils, + net=net, + mf=mf, + rng=rng, + cfg_strength=cfg_strength) + audio = audios.float().cpu()[0] + save_paths = output_dir / f'{audio_ids[k]}.wav' + torchaudio.save(save_paths, audio, seq_cfg.sampling_rate) + log.info(f'Audio saved to {save_paths}') + log.info('Memory usage: %.2f GB', torch.cuda.max_memory_allocated() / (2**30)) + + else: + prompt = text_prompts[k] + log.info(f'Prompt: {prompt}') + log.info(f'Negative prompt: {negative_prompt}') + audios = generate_fm([prompt], + negative_text=[negative_prompt], + feature_utils=feature_utils, + net=net, + fm=fm, + rng=rng, + cfg_strength=cfg_strength) + audio = audios.float().cpu()[0] + + save_paths = output_dir / f'{audio_ids[k]}.wav' + torchaudio.save(save_paths, audio, seq_cfg.sampling_rate) + log.info(f'Audio saved to {save_paths}') + log.info('Memory usage: %.2f GB', torch.cuda.max_memory_allocated() / (2**30)) + + +if __name__ == '__main__': + main() diff --git a/infer.py b/infer.py new file mode 100644 index 0000000000000000000000000000000000000000..b92519547e43c6e974476573546270b82eff8f48 --- /dev/null +++ b/infer.py @@ -0,0 +1,143 @@ +import warnings +warnings.filterwarnings("ignore", category=FutureWarning) + +import logging +from argparse import ArgumentParser +from pathlib import Path +import torch +import torchaudio +from meanaudio.eval_utils import (ModelConfig, all_model_cfg, generate_mf, generate_fm, setup_eval_logging) +from meanaudio.model.flow_matching import FlowMatching +from meanaudio.model.mean_flow import MeanFlow +from meanaudio.model.networks import MeanAudio, get_mean_audio +from meanaudio.model.utils.features_utils import FeaturesUtils + +torch.backends.cuda.matmul.allow_tf32 = True +torch.backends.cudnn.allow_tf32 = True +from tqdm import tqdm +log = logging.getLogger() + + +@torch.inference_mode() +def main(): + setup_eval_logging() + + parser = ArgumentParser() + parser.add_argument('--variant', + type=str, + default='small_16k_mf', + help='small_16k_mf, small_16k_fm') + + parser.add_argument('--prompt', type=str, help='Input prompt', default='') + parser.add_argument('--negative_prompt', type=str, help='Negative prompt', default='') + parser.add_argument('--duration', type=float, default=9.975) # for 312 latents, seq_config should has a duration of 9.975s + parser.add_argument('--cfg_strength', type=float, default=4.5) + parser.add_argument('--num_steps', type=int, default=25) + + parser.add_argument('--output', type=Path, help='Output directory', default='./output') + parser.add_argument('--seed', type=int, help='Random seed', default=42) + parser.add_argument('--full_precision', action='store_true') + parser.add_argument('--model_path', type=str, help='Ckpt path of trained model') + parser.add_argument('--encoder_name', choices=['clip', 't5', 't5_clap'], type=str, help='text encoder name') + parser.add_argument('--use_rope', action='store_true', help='Whether or not use position embedding for model') + parser.add_argument('--text_c_dim', type=int, default=512, + help='Dim of the text_features_c, 1024 for pooled T5 and 512 for CLAP') + parser.add_argument('--debug', action='store_true') + parser.add_argument('--use_meanflow', action='store_true', help='Whether or not use mean flow for inference') + args = parser.parse_args() + + if args.debug: + import debugpy + debugpy.listen(6666) + print("Waiting for debugger attach (rank 0)...") + debugpy.wait_for_client() + + if args.variant not in all_model_cfg: + raise ValueError(f'Unknown model variant: {args.variant}') + model: ModelConfig = all_model_cfg[args.variant] # model is just the model config + seq_cfg = model.seq_cfg + + negative_prompt: str = args.negative_prompt + output_dir: str = args.output.expanduser() + seed: int = args.seed + num_steps: int = args.num_steps + duration: float = args.duration + cfg_strength: float = args.cfg_strength + + device = 'cpu' + if torch.cuda.is_available(): + device = 'cuda' + elif torch.backends.mps.is_available(): + device = 'mps' + else: + log.warning('CUDA/MPS are not available, running on CPU') + dtype = torch.float32 if args.full_precision else torch.bfloat16 + + output_dir.mkdir(parents=True, exist_ok=True) + # load a pretrained model + net: MeanAudio = get_mean_audio(model.model_name, + use_rope=args.use_rope, + text_c_dim=args.text_c_dim).to(device, dtype).eval() + net.load_weights(torch.load(args.model_path, map_location=device, weights_only=True)) + log.info(f'Loaded weights from {args.model_path}') + + # misc setup + rng = torch.Generator(device=device) + rng.manual_seed(seed) + if args.use_meanflow: + mf = MeanFlow(steps=num_steps) + else: + fm = FlowMatching(min_sigma=0, inference_mode='euler', num_steps=num_steps) + + feature_utils = FeaturesUtils(tod_vae_ckpt=model.vae_path, + enable_conditions=True, + encoder_name=args.encoder_name, + mode=model.mode, + bigvgan_vocoder_ckpt=model.bigvgan_16k_path, + need_vae_encoder=False) + feature_utils = feature_utils.to(device, dtype).eval() + + seq_cfg.duration = duration + net.update_seq_lengths(seq_cfg.latent_seq_len) + prompts: str = [args.prompt] + + + if args.use_meanflow: + for prompt in tqdm(prompts): + log.info(f'Prompt: {prompt}') + log.info(f'Negative prompt: {negative_prompt}') + audios = generate_mf([prompt], + negative_text=[negative_prompt], + feature_utils=feature_utils, + net=net, + mf=mf, + rng=rng, + cfg_strength=cfg_strength) + audio = audios.float().cpu()[0] + safe_filename = prompt.replace(' ', '_').replace('/', '_').replace('.', '') + save_path = output_dir / f'{safe_filename}--numsteps{num_steps}--seed{args.seed}.wav' + torchaudio.save( save_path, audio, seq_cfg.sampling_rate) + log.info(f'Audio saved to {save_path}') + log.info('Memory usage: %.2f GB', torch.cuda.max_memory_allocated() / (2**30)) + else: + for prompt in tqdm(prompts): + log.info(f'Prompt: {prompt}') + log.info(f'Negative prompt: {negative_prompt}') + audios = generate_fm([prompt], + negative_text=[negative_prompt], + feature_utils=feature_utils, + net=net, + fm=fm, + rng=rng, + cfg_strength=cfg_strength) + audio = audios.float().cpu()[0] + safe_filename = prompt.replace(' ', '_').replace('/', '_').replace('.', '') + save_path = output_dir / f'{safe_filename}--numsteps{num_steps}--seed{args.seed}.wav' + torchaudio.save(save_path, audio, seq_cfg.sampling_rate) + + log.info(f'Audio saved to {save_path}') + log.info('Memory usage: %.2f GB', torch.cuda.max_memory_allocated() / (2**30)) + + +if __name__ == '__main__': + main() diff --git a/meanaudio/__init__.py b/meanaudio/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/meanaudio/data/__init__.py b/meanaudio/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/meanaudio/data/av_utils.py b/meanaudio/data/av_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e4998d9f43543259a77dda0490c9996b16e037f0 --- /dev/null +++ b/meanaudio/data/av_utils.py @@ -0,0 +1,162 @@ +from dataclasses import dataclass +from fractions import Fraction +from pathlib import Path +from typing import Optional + +import av +import numpy as np +import torch +from av import AudioFrame + + +@dataclass +class VideoInfo: + duration_sec: float + fps: Fraction + clip_frames: torch.Tensor + sync_frames: torch.Tensor + all_frames: Optional[list[np.ndarray]] + + @property + def height(self): + return self.all_frames[0].shape[0] + + @property + def width(self): + return self.all_frames[0].shape[1] + + @classmethod + def from_image_info(cls, image_info: 'ImageInfo', duration_sec: float, + fps: Fraction) -> 'VideoInfo': + num_frames = int(duration_sec * fps) + all_frames = [image_info.original_frame] * num_frames + return cls(duration_sec=duration_sec, + fps=fps, + clip_frames=image_info.clip_frames, + sync_frames=image_info.sync_frames, + all_frames=all_frames) + + +@dataclass +class ImageInfo: + clip_frames: torch.Tensor + sync_frames: torch.Tensor + original_frame: Optional[np.ndarray] + + @property + def height(self): + return self.original_frame.shape[0] + + @property + def width(self): + return self.original_frame.shape[1] + + +def read_frames(video_path: Path, list_of_fps: list[float], start_sec: float, end_sec: float, + need_all_frames: bool) -> tuple[list[np.ndarray], list[np.ndarray], Fraction]: + output_frames = [[] for _ in list_of_fps] + next_frame_time_for_each_fps = [0.0 for _ in list_of_fps] + time_delta_for_each_fps = [1 / fps for fps in list_of_fps] + all_frames = [] + + # container = av.open(video_path) + with av.open(video_path) as container: + stream = container.streams.video[0] + fps = stream.guessed_rate + stream.thread_type = 'AUTO' + for packet in container.demux(stream): + for frame in packet.decode(): + frame_time = frame.time + if frame_time < start_sec: + continue + if frame_time > end_sec: + break + + frame_np = None + if need_all_frames: + frame_np = frame.to_ndarray(format='rgb24') + all_frames.append(frame_np) + + for i, _ in enumerate(list_of_fps): + this_time = frame_time + while this_time >= next_frame_time_for_each_fps[i]: + if frame_np is None: + frame_np = frame.to_ndarray(format='rgb24') + + output_frames[i].append(frame_np) + next_frame_time_for_each_fps[i] += time_delta_for_each_fps[i] + + output_frames = [np.stack(frames) for frames in output_frames] + return output_frames, all_frames, fps + + +def reencode_with_audio(video_info: VideoInfo, output_path: Path, audio: torch.Tensor, + sampling_rate: int): + container = av.open(output_path, 'w') + output_video_stream = container.add_stream('h264', video_info.fps) + output_video_stream.codec_context.bit_rate = 10 * 1e6 # 10 Mbps + output_video_stream.width = video_info.width + output_video_stream.height = video_info.height + output_video_stream.pix_fmt = 'yuv420p' + + output_audio_stream = container.add_stream('aac', sampling_rate) + + # encode video + for image in video_info.all_frames: + image = av.VideoFrame.from_ndarray(image) + packet = output_video_stream.encode(image) + container.mux(packet) + + for packet in output_video_stream.encode(): + container.mux(packet) + + # convert float tensor audio to numpy array + audio_np = audio.numpy().astype(np.float32) + audio_frame = AudioFrame.from_ndarray(audio_np, format='flt', layout='mono') + audio_frame.sample_rate = sampling_rate + + for packet in output_audio_stream.encode(audio_frame): + container.mux(packet) + + for packet in output_audio_stream.encode(): + container.mux(packet) + + container.close() + + +def remux_with_audio(video_path: Path, audio: torch.Tensor, output_path: Path, sampling_rate: int): + """ + NOTE: I don't think we can get the exact video duration right without re-encoding + so we are not using this but keeping it here for reference + """ + video = av.open(video_path) + output = av.open(output_path, 'w') + input_video_stream = video.streams.video[0] + output_video_stream = output.add_stream(template=input_video_stream) + output_audio_stream = output.add_stream('aac', sampling_rate) + + duration_sec = audio.shape[-1] / sampling_rate + + for packet in video.demux(input_video_stream): + # We need to skip the "flushing" packets that `demux` generates. + if packet.dts is None: + continue + # We need to assign the packet to the new stream. + packet.stream = output_video_stream + output.mux(packet) + + # convert float tensor audio to numpy array + audio_np = audio.numpy().astype(np.float32) + audio_frame = av.AudioFrame.from_ndarray(audio_np, format='flt', layout='mono') + audio_frame.sample_rate = sampling_rate + + for packet in output_audio_stream.encode(audio_frame): + output.mux(packet) + + for packet in output_audio_stream.encode(): + output.mux(packet) + + video.close() + output.close() + + output.close() diff --git a/meanaudio/data/data_setup.py b/meanaudio/data/data_setup.py new file mode 100644 index 0000000000000000000000000000000000000000..498ce9ed8875749110d1993d986f9a7a65de8529 --- /dev/null +++ b/meanaudio/data/data_setup.py @@ -0,0 +1,137 @@ +import logging +import random + +import numpy as np +import torch +from omegaconf import DictConfig +from torch.utils.data import DataLoader, Dataset +from torch.utils.data.dataloader import default_collate +from torch.utils.data.distributed import DistributedSampler + +from meanaudio.data.extracted_audio import ExtractedAudio +from meanaudio.data.mm_dataset import MultiModalDataset +from meanaudio.utils.dist_utils import local_rank + +log = logging.getLogger() + + +# Re-seed randomness every time we start a worker +def worker_init_fn(worker_id: int): + worker_seed = torch.initial_seed() % (2**31) + worker_id + local_rank * 1000 + np.random.seed(worker_seed) + random.seed(worker_seed) + log.debug(f'Worker {worker_id} re-seeded with seed {worker_seed} in rank {local_rank}') + + +def load_audio_data(cfg: DictConfig, data_cfg: DictConfig) -> Dataset: + dataset = ExtractedAudio(tsv_path=data_cfg.tsv, + concat_text_fc=cfg.concat_text_fc, # FIX here we determine usage of concat based on global config + data_dim=cfg.data_dim, + npz_dir=data_cfg.npz_dir, + repa_npz_dir=data_cfg.repa_npz_dir, + exclude_cls=cfg.get('exclude_cls', False), + repa_version=cfg.get('repa_version', 1)) + return dataset + + +def setup_training_datasets(cfg: DictConfig) -> tuple[Dataset, DistributedSampler, DataLoader]: + + if cfg.mini_train: + audiocaps_mini = load_audio_data(cfg, cfg.data.AudioCaps_val_npz) # use val set as the miniset + dataset = MultiModalDataset([], + [audiocaps_mini]) + + else: + + audiocaps_npz = load_audio_data(cfg, cfg.data.AudioCaps_npz) + # !TODO: think of a better way to handle different datasets + + # freesound1_npz = load_audio_data_npz(cfg, cfg.data.FreeSound1_npz) + # freesound2_npz = load_audio_data_npz(cfg, cfg.data.FreeSound2_npz) + # freesound3_npz = load_audio_data_npz(cfg, cfg.data.FreeSound3_npz) + + # audioset_sl_npz = load_audio_data_npz(cfg, cfg.data.AudioSetSL_npz) + # bbcsound_npz = load_audio_data_npz(cfg, cfg.data.BBCSound_npz) + # clotho_npz = load_audio_data_npz(cfg, cfg.data.Clotho_npz) + + dataset = MultiModalDataset([], [audiocaps_npz]) + # dataset = MultiModalDataset([], [audiocaps_npz]*cfg.ac_oversample_rate + [audioset_sl_npz, bbcsound_npz, clotho_npz, + # freesound1_npz, freesound2_npz, freesound3_npz]) + + + batch_size = cfg.batch_size # per-gpu batch size + num_workers = cfg.num_workers + pin_memory = cfg.pin_memory + sampler, loader = construct_loader(dataset, + batch_size, + num_workers, + shuffle=True, + drop_last=True, + pin_memory=pin_memory) + + return dataset, sampler, loader + + +def setup_test_datasets(cfg): # used in sample + dataset = load_audio_data(cfg, cfg.data.AudioCaps_test_npz) # ALL with NPZ format + + batch_size = cfg.eval_batch_size # FIX: from train config + num_workers = cfg.num_workers + pin_memory = cfg.pin_memory + sampler, loader = construct_loader(dataset, + batch_size, + num_workers, + shuffle=False, + drop_last=False, + pin_memory=pin_memory) + + return dataset, sampler, loader + + +def setup_val_datasets(cfg: DictConfig) -> tuple[Dataset, DataLoader, DataLoader]: + dataset = load_audio_data(cfg, cfg.data.AudioCaps_val_npz) + + val_batch_size = cfg.batch_size + val_eval_batch_size = cfg.eval_batch_size + num_workers = cfg.num_workers + pin_memory = cfg.pin_memory + _, val_loader = construct_loader(dataset, + val_batch_size, + num_workers, + shuffle=False, + drop_last=False, + pin_memory=pin_memory) + _, eval_loader = construct_loader(dataset, + val_eval_batch_size, + num_workers, + shuffle=False, + drop_last=False, + pin_memory=pin_memory) + + return dataset, val_loader, eval_loader + + +def error_avoidance_collate(batch): + batch = list(filter(lambda x: x is not None, batch)) # batch = [x for x in batch if x is not None] + return default_collate(batch) + + +def construct_loader(dataset: Dataset, + batch_size: int, + num_workers: int, + *, + shuffle: bool = True, + drop_last: bool = True, + pin_memory: bool = False, + error_avoidance: bool = False) -> tuple[DistributedSampler, DataLoader]: + train_sampler = DistributedSampler(dataset, rank=local_rank, shuffle=shuffle) + train_loader = DataLoader(dataset, + batch_size, + sampler=train_sampler, + num_workers=num_workers, + worker_init_fn=worker_init_fn, + drop_last=drop_last, + persistent_workers=num_workers > 0, + pin_memory=pin_memory, + collate_fn=error_avoidance_collate if error_avoidance else None) + return train_sampler, train_loader diff --git a/meanaudio/data/eval/__init__.py b/meanaudio/data/eval/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/meanaudio/data/eval/audiocaps.py b/meanaudio/data/eval/audiocaps.py new file mode 100644 index 0000000000000000000000000000000000000000..35f4fd9e1e300503b0100825e698f82edfd735d1 --- /dev/null +++ b/meanaudio/data/eval/audiocaps.py @@ -0,0 +1,39 @@ +import logging +import os +from collections import defaultdict +from pathlib import Path +from typing import Union + +import pandas as pd +import torch +from torch.utils.data.dataset import Dataset + +log = logging.getLogger() + + +class AudioCapsData(Dataset): + + def __init__(self, audio_path: Union[str, Path], csv_path: Union[str, Path]): + df = pd.read_csv(csv_path).to_dict(orient='records') + + audio_files = sorted(os.listdir(audio_path)) + audio_files = set( + [Path(f).stem for f in audio_files if f.endswith('.wav') or f.endswith('.flac')]) + + self.data = [] + for row in df: + self.data.append({ + 'name': row['name'], + 'caption': row['caption'], + }) + + self.audio_path = Path(audio_path) + self.csv_path = Path(csv_path) + + log.info(f'Found {len(self.data)} matching audio files in {self.audio_path}') + + def __getitem__(self, idx: int) -> torch.Tensor: + return self.data[idx] + + def __len__(self): + return len(self.data) diff --git a/meanaudio/data/eval/moviegen.py b/meanaudio/data/eval/moviegen.py new file mode 100644 index 0000000000000000000000000000000000000000..97969d68385f70eb49e8eb25fc6c3733a0cedda8 --- /dev/null +++ b/meanaudio/data/eval/moviegen.py @@ -0,0 +1,131 @@ +import json +import logging +import os +from pathlib import Path +from typing import Union + +import torch +from torch.utils.data.dataset import Dataset +from torchvision.transforms import v2 +from torio.io import StreamingMediaDecoder + +from mmaudio.utils.dist_utils import local_rank + +log = logging.getLogger() + +_CLIP_SIZE = 384 +_CLIP_FPS = 8.0 + +_SYNC_SIZE = 224 +_SYNC_FPS = 25.0 + + +class MovieGenData(Dataset): + + def __init__( + self, + video_root: Union[str, Path], + sync_root: Union[str, Path], + jsonl_root: Union[str, Path], + *, + duration_sec: float = 10.0, + read_clip: bool = True, + ): + self.video_root = Path(video_root) + self.sync_root = Path(sync_root) + self.jsonl_root = Path(jsonl_root) + self.read_clip = read_clip + + videos = sorted(os.listdir(self.video_root)) + videos = [v[:-4] for v in videos] # remove extensions + self.captions = {} + + for v in videos: + with open(self.jsonl_root / (v + '.jsonl')) as f: + data = json.load(f) + self.captions[v] = data['audio_prompt'] + + if local_rank == 0: + log.info(f'{len(videos)} videos found in {video_root}') + + self.duration_sec = duration_sec + + self.clip_expected_length = int(_CLIP_FPS * self.duration_sec) + self.sync_expected_length = int(_SYNC_FPS * self.duration_sec) + + self.clip_augment = v2.Compose([ + v2.Resize((_CLIP_SIZE, _CLIP_SIZE), interpolation=v2.InterpolationMode.BICUBIC), + v2.ToImage(), + v2.ToDtype(torch.float32, scale=True), + ]) + + self.sync_augment = v2.Compose([ + v2.Resize((_SYNC_SIZE, _SYNC_SIZE), interpolation=v2.InterpolationMode.BICUBIC), + v2.CenterCrop(_SYNC_SIZE), + v2.ToImage(), + v2.ToDtype(torch.float32, scale=True), + v2.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]), + ]) + + self.videos = videos + + def sample(self, idx: int) -> dict[str, torch.Tensor]: + video_id = self.videos[idx] + caption = self.captions[video_id] + + reader = StreamingMediaDecoder(self.video_root / (video_id + '.mp4')) + reader.add_basic_video_stream( + frames_per_chunk=int(_CLIP_FPS * self.duration_sec), + frame_rate=_CLIP_FPS, + format='rgb24', + ) + reader.add_basic_video_stream( + frames_per_chunk=int(_SYNC_FPS * self.duration_sec), + frame_rate=_SYNC_FPS, + format='rgb24', + ) + + reader.fill_buffer() + data_chunk = reader.pop_chunks() + + clip_chunk = data_chunk[0] + sync_chunk = data_chunk[1] + if clip_chunk is None: + raise RuntimeError(f'CLIP video returned None {video_id}') + if clip_chunk.shape[0] < self.clip_expected_length: + raise RuntimeError(f'CLIP video too short {video_id}') + + if sync_chunk is None: + raise RuntimeError(f'Sync video returned None {video_id}') + if sync_chunk.shape[0] < self.sync_expected_length: + raise RuntimeError(f'Sync video too short {video_id}') + + # truncate the video + clip_chunk = clip_chunk[:self.clip_expected_length] + if clip_chunk.shape[0] != self.clip_expected_length: + raise RuntimeError(f'CLIP video wrong length {video_id}, ' + f'expected {self.clip_expected_length}, ' + f'got {clip_chunk.shape[0]}') + clip_chunk = self.clip_augment(clip_chunk) + + sync_chunk = sync_chunk[:self.sync_expected_length] + if sync_chunk.shape[0] != self.sync_expected_length: + raise RuntimeError(f'Sync video wrong length {video_id}, ' + f'expected {self.sync_expected_length}, ' + f'got {sync_chunk.shape[0]}') + sync_chunk = self.sync_augment(sync_chunk) + + data = { + 'name': video_id, + 'caption': caption, + 'clip_video': clip_chunk, + 'sync_video': sync_chunk, + } + + return data + + def __getitem__(self, idx: int) -> dict[str, torch.Tensor]: + return self.sample(idx) + + def __len__(self): + return len(self.captions) diff --git a/meanaudio/data/eval/video_dataset.py b/meanaudio/data/eval/video_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..0b84a963e6da0c31984a3105dc87a6e9a1918c62 --- /dev/null +++ b/meanaudio/data/eval/video_dataset.py @@ -0,0 +1,197 @@ +import json +import logging +import os +from pathlib import Path +from typing import Union + +import pandas as pd +import torch +from torch.utils.data.dataset import Dataset +from torchvision.transforms import v2 +from torio.io import StreamingMediaDecoder + +from mmaudio.utils.dist_utils import local_rank + +log = logging.getLogger() + +_CLIP_SIZE = 384 +_CLIP_FPS = 8.0 + +_SYNC_SIZE = 224 +_SYNC_FPS = 25.0 + + +class VideoDataset(Dataset): + + def __init__( + self, + video_root: Union[str, Path], + *, + duration_sec: float = 8.0, + ): + self.video_root = Path(video_root) + + self.duration_sec = duration_sec + + self.clip_expected_length = int(_CLIP_FPS * self.duration_sec) + self.sync_expected_length = int(_SYNC_FPS * self.duration_sec) + + self.clip_transform = v2.Compose([ + v2.Resize((_CLIP_SIZE, _CLIP_SIZE), interpolation=v2.InterpolationMode.BICUBIC), + v2.ToImage(), + v2.ToDtype(torch.float32, scale=True), + ]) + + self.sync_transform = v2.Compose([ + v2.Resize(_SYNC_SIZE, interpolation=v2.InterpolationMode.BICUBIC), + v2.CenterCrop(_SYNC_SIZE), + v2.ToImage(), + v2.ToDtype(torch.float32, scale=True), + v2.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]), + ]) + + # to be implemented by subclasses + self.captions = {} + self.videos = sorted(list(self.captions.keys())) + + def sample(self, idx: int) -> dict[str, torch.Tensor]: + video_id = self.videos[idx] + caption = self.captions[video_id] + + reader = StreamingMediaDecoder(self.video_root / (video_id + '.mp4')) + reader.add_basic_video_stream( + frames_per_chunk=int(_CLIP_FPS * self.duration_sec), + frame_rate=_CLIP_FPS, + format='rgb24', + ) + reader.add_basic_video_stream( + frames_per_chunk=int(_SYNC_FPS * self.duration_sec), + frame_rate=_SYNC_FPS, + format='rgb24', + ) + + reader.fill_buffer() + data_chunk = reader.pop_chunks() + + clip_chunk = data_chunk[0] + sync_chunk = data_chunk[1] + if clip_chunk is None: + raise RuntimeError(f'CLIP video returned None {video_id}') + if clip_chunk.shape[0] < self.clip_expected_length: + raise RuntimeError( + f'CLIP video too short {video_id}, expected {self.clip_expected_length}, got {clip_chunk.shape[0]}' + ) + + if sync_chunk is None: + raise RuntimeError(f'Sync video returned None {video_id}') + if sync_chunk.shape[0] < self.sync_expected_length: + raise RuntimeError( + f'Sync video too short {video_id}, expected {self.sync_expected_length}, got {sync_chunk.shape[0]}' + ) + + # truncate the video + clip_chunk = clip_chunk[:self.clip_expected_length] + if clip_chunk.shape[0] != self.clip_expected_length: + raise RuntimeError(f'CLIP video wrong length {video_id}, ' + f'expected {self.clip_expected_length}, ' + f'got {clip_chunk.shape[0]}') + clip_chunk = self.clip_transform(clip_chunk) + + sync_chunk = sync_chunk[:self.sync_expected_length] + if sync_chunk.shape[0] != self.sync_expected_length: + raise RuntimeError(f'Sync video wrong length {video_id}, ' + f'expected {self.sync_expected_length}, ' + f'got {sync_chunk.shape[0]}') + sync_chunk = self.sync_transform(sync_chunk) + + data = { + 'name': video_id, + 'caption': caption, + 'clip_video': clip_chunk, + 'sync_video': sync_chunk, + } + + return data + + def __getitem__(self, idx: int) -> dict[str, torch.Tensor]: + try: + return self.sample(idx) + except Exception as e: + log.error(f'Error loading video {self.videos[idx]}: {e}') + return None + + def __len__(self): + return len(self.captions) + + +class VGGSound(VideoDataset): + + def __init__( + self, + video_root: Union[str, Path], + csv_path: Union[str, Path], + *, + duration_sec: float = 8.0, + ): + super().__init__(video_root, duration_sec=duration_sec) + self.video_root = Path(video_root) + self.csv_path = Path(csv_path) + + videos = sorted(os.listdir(self.video_root)) + if local_rank == 0: + log.info(f'{len(videos)} videos found in {video_root}') + self.captions = {} + + df = pd.read_csv(csv_path, header=None, names=['id', 'sec', 'caption', + 'split']).to_dict(orient='records') + + videos_no_found = [] + for row in df: + if row['split'] == 'test': + start_sec = int(row['sec']) + video_id = str(row['id']) + # this is how our videos are named + video_name = f'{video_id}_{start_sec:06d}' + if video_name + '.mp4' not in videos: + videos_no_found.append(video_name) + continue + + self.captions[video_name] = row['caption'] + + if local_rank == 0: + log.info(f'{len(videos)} videos found in {video_root}') + log.info(f'{len(self.captions)} useable videos found') + if videos_no_found: + log.info(f'{len(videos_no_found)} found in {csv_path} but not in {video_root}') + log.info( + 'A small amount is expected, as not all videos are still available on YouTube') + + self.videos = sorted(list(self.captions.keys())) + + +class MovieGen(VideoDataset): + + def __init__( + self, + video_root: Union[str, Path], + jsonl_root: Union[str, Path], + *, + duration_sec: float = 10.0, + ): + super().__init__(video_root, duration_sec=duration_sec) + self.video_root = Path(video_root) + self.jsonl_root = Path(jsonl_root) + + videos = sorted(os.listdir(self.video_root)) + videos = [v[:-4] for v in videos] # remove extensions + self.captions = {} + + for v in videos: + with open(self.jsonl_root / (v + '.jsonl')) as f: + data = json.load(f) + self.captions[v] = data['audio_prompt'] + + if local_rank == 0: + log.info(f'{len(videos)} videos found in {video_root}') + + self.videos = videos diff --git a/meanaudio/data/extracted_audio.py b/meanaudio/data/extracted_audio.py new file mode 100644 index 0000000000000000000000000000000000000000..f508e0414a1bfe126011c3e5cc648679f5776dff --- /dev/null +++ b/meanaudio/data/extracted_audio.py @@ -0,0 +1,175 @@ +import logging +from pathlib import Path +from typing import Union, Optional + +import pandas as pd +import torch +from tensordict import TensorDict +from torch.utils.data.dataset import Dataset +from torch.utils.data import DataLoader + +from meanaudio.utils.dist_utils import local_rank +import numpy as np +import glob +import torch.nn.functional as F +log = logging.getLogger() + + +class ExtractedAudio(Dataset): + def __init__( + self, + tsv_path: Union[str, Path], + *, + concat_text_fc: bool, + npz_dir: Union[str, Path], + data_dim: dict[str, int], + repa_npz_dir: Optional[Union[str, Path]], # if passed, repa features (zs) would be returned + exclude_cls: Optional[bool], + repa_version: Optional[int], + ): + super().__init__() + self.data_dim = data_dim + self.df_list = pd.read_csv(tsv_path, sep='\t').to_dict('records') # id, caption + self.ids = [str(d['id']) for d in self.df_list] + npz_files = glob.glob(f"{npz_dir}/*.npz") + self.concat_text_fc = concat_text_fc + self.exclude_cls = exclude_cls + self.repa_version = repa_version + + if self.concat_text_fc: + log.info(f'We will concat the pooled text_features and text_features_c for text condition') + + # dimension check + sample = np.load(f'{npz_dir}/0.npz') + mean_s = [len(npz_files)] + list(sample['mean'].shape) + std_s = [len(npz_files)] + list(sample['std'].shape) + text_features_s = [len(npz_files)] + list(sample['text_features'].shape) + text_features_c_s = [len(npz_files)] + list(sample['text_features_c'].shape) + if self.concat_text_fc: + text_features_c_s[-1] = text_features_c_s[-1] + text_features_s[-1] + + log.info(f'Loading {len(npz_files)} npz files from {npz_dir}') + log.info(f'Loaded mean: {mean_s}.') + log.info(f'Loaded std: {std_s}.') + log.info(f'Loaded text features: {text_features_s}.') + log.info(f'Loaded text features_c: {text_features_c_s}.') + + assert len(npz_files) == len(self.df_list), 'Number mismatch between npz files and tsv items' + assert mean_s[1] == self.data_dim['latent_seq_len'], \ + f'{mean_s[1]} != {self.data_dim["latent_seq_len"]}' + assert std_s[1] == self.data_dim['latent_seq_len'], \ + f'{std_s[1]} != {self.data_dim["latent_seq_len"]}' + assert text_features_s[1] == self.data_dim['text_seq_len'], \ + f'{text_features_s[1]} != {self.data_dim["text_seq_len"]}' + assert text_features_s[-1] == self.data_dim['text_dim'], \ + f'{text_features_s[-1]} != {self.data_dim["text_dim"]}' + assert text_features_c_s[-1] == self.data_dim['text_c_dim'], \ + f'{text_features_c_s[-1]} != {self.data_dim["text_c_dim"]}' + + self.npz_dir = npz_dir + if repa_npz_dir != None: + self.repa_npz_dir = repa_npz_dir + sample = np.load(f'{repa_npz_dir}/0.npz') + repa_npz_files = glob.glob(f"{repa_npz_dir}/*.npz") + log.info(f'Loading {len(repa_npz_files)} npz representations from {repa_npz_dir}') + es_s = [len(repa_npz_files)] + list(sample['es'].shape) + if self.repa_version == 2: + es_s[1] = 65 # ad-hoc 8x downsampling for EAT + elif self.repa_version == 3: + es_s[1] = 1 # we only use cls token for alignment + else: + if self.exclude_cls: + es_s[1] = es_s[1] - 1 + + log.info(f'Loaded es: {es_s}') + assert len(repa_npz_files) == len(npz_files), 'Number mismatch between repa npz files and latent npz files' + assert es_s[1] == self.data_dim['repa_seq_len'], \ + f'{es_s[1]} != {self.data_dim["repa_seq_len"]}' + assert es_s[-1] == self.data_dim['repa_seq_dim'], \ + f'{es_s[-1]} != {self.data_dim["repa_seq_dim"]}' + else: + self.repa_npz_dir = None + + def compute_latent_stats(self) -> tuple[torch.Tensor, torch.Tensor]: + # !TODO here we may consider load pre-computed latent mean & std + raise NotImplementedError('Please manually compute latent stats outside. ') + + def __getitem__(self, idx): + npz_path = f'{self.npz_dir}/{idx}.npz' + np_data = np.load(npz_path) + text_features = torch.from_numpy(np_data['text_features']) + text_features_c = torch.from_numpy(np_data['text_features_c']) + if self.concat_text_fc: + text_features_c = torch.cat([text_features.mean(dim=-2), + text_features_c], dim=-1) # [b, d+d_c] + + out_dict = { + 'id': str(self.df_list[idx]['id']), + 'a_mean': torch.from_numpy(np_data['mean']), + 'a_std': torch.from_numpy(np_data['std']), + 'text_features': text_features, + 'text_features_c': text_features_c, + 'caption': self.df_list[idx]['caption'], + } + if self.repa_npz_dir != None: + repa_npz_path = f'{self.repa_npz_dir}/{idx}.npz' + repa_np_data = np.load(repa_npz_path) + zs = torch.from_numpy(repa_np_data['es']) + + if self.repa_version == 1: + if self.exclude_cls: + zs = zs[1:,:] + if self.repa_version == 2: + z_cls = zs[0] # (dim) + # zs = zs[1:,:].view(64, 8, 768) + zs = F.avg_pool2d(zs[1:,:].unsqueeze(0), + kernel_size=(8, 1), + stride=(8, 1)).squeeze() # (64, 768) + zs = torch.cat((z_cls.unsqueeze(0), zs), dim=0) + elif self.repa_version == 3: # cls token + zs = zs[0].unsqueeze(0) + + out_dict['zs'] = zs #!TODO Here field is WRONG for eat features (should be zs) + + return out_dict + + def __len__(self): + return len(self.ids) + + +if __name__ == '__main__': + + from meanaudio.utils.dist_utils import info_if_rank_zero, local_rank, world_size + import torch.distributed as distributed + from datetime import timedelta + from torch.utils.data.distributed import DistributedSampler + + + def distributed_setup(): + distributed.init_process_group(backend="nccl", timeout=timedelta(hours=2)) + log.info(f'Initialized: local_rank={local_rank}, world_size={world_size}') + return local_rank, world_size + + distributed_setup() + + tsv_path = '/hpc_stor03/sjtu_home/xiquan.li/TTA/MMAudio/training/audiocaps/train-memmap-t5-clap.tsv' + + data_dim = {'latent_seq_len': 312, + 'text_seq_len': 77, + 'text_dim': 1024, + 'text_c_dim': 512} + + dataset = ExtractedAudio(tsv_path=tsv_path, + npz_dir=npz_dir, + data_dim=data_dim) + loader = DataLoader(dataset, + 16, + num_workers=8, + persistent_workers=8, + pin_memory=False) + train_sampler = DistributedSampler(dataset, rank=local_rank, shuffle=True) + + + for b in loader: + print(b['a_mean'].shape) + break \ No newline at end of file diff --git a/meanaudio/data/extraction/__init__.py b/meanaudio/data/extraction/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/meanaudio/data/extraction/vgg_sound.py b/meanaudio/data/extraction/vgg_sound.py new file mode 100644 index 0000000000000000000000000000000000000000..0e074fc107c3c3c67d2488e5b909f85627f4eb9a --- /dev/null +++ b/meanaudio/data/extraction/vgg_sound.py @@ -0,0 +1,195 @@ +import logging +import os +from pathlib import Path +from typing import Optional, Union + +import pandas as pd +import torch +import torchaudio +from torch.utils.data.dataset import Dataset +from torchvision.transforms import v2 +from torio.io import StreamingMediaDecoder + +from mmaudio.utils.dist_utils import local_rank + +log = logging.getLogger() + +_CLIP_SIZE = 384 +_CLIP_FPS = 8.0 + +_SYNC_SIZE = 224 +_SYNC_FPS = 25.0 + + +class VGGSound(Dataset): + + def __init__( + self, + root: Union[str, Path], + *, + tsv_path: Union[str, Path] = 'sets/vgg3-train.tsv', + sample_rate: int = 16_000, + duration_sec: float = 8.0, + audio_samples: Optional[int] = None, + normalize_audio: bool = False, + ): + self.root = Path(root) + self.normalize_audio = normalize_audio + if audio_samples is None: + self.audio_samples = int(sample_rate * duration_sec) + else: + self.audio_samples = audio_samples + effective_duration = audio_samples / sample_rate + # make sure the duration is close enough, within 15ms + assert abs(effective_duration - duration_sec) < 0.015, \ + f'audio_samples {audio_samples} does not match duration_sec {duration_sec}' + + print("Loading videos started") + videos = sorted(os.listdir(self.root)) + videos = set([Path(v).stem for v in videos]) # remove extensions + print("Loading videos ended") + self.labels = {} + self.videos = [] + missing_videos = [] + + # read the tsv for subset information + df_list = pd.read_csv(tsv_path, sep='\t', dtype={'id': str}).to_dict('records') + for record in df_list: + id = record['id'] + label = record['label'] + if id in videos: + self.labels[id] = label + self.videos.append(id) + else: + missing_videos.append(id) + + if local_rank == 0: + log.info(f'{len(videos)} videos found in {root}') + log.info(f'{len(self.videos)} videos found in {tsv_path}') + log.info(f'{len(missing_videos)} videos missing in {root}') + + self.sample_rate = sample_rate + self.duration_sec = duration_sec + + self.expected_audio_length = audio_samples + self.clip_expected_length = int(_CLIP_FPS * self.duration_sec) + self.sync_expected_length = int(_SYNC_FPS * self.duration_sec) + + self.clip_transform = v2.Compose([ + v2.Resize((_CLIP_SIZE, _CLIP_SIZE), interpolation=v2.InterpolationMode.BICUBIC), + v2.ToImage(), + v2.ToDtype(torch.float32, scale=True), + ]) + + self.sync_transform = v2.Compose([ + v2.Resize(_SYNC_SIZE, interpolation=v2.InterpolationMode.BICUBIC), + v2.CenterCrop(_SYNC_SIZE), + v2.ToImage(), + v2.ToDtype(torch.float32, scale=True), + v2.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]), + ]) + + self.resampler = {} + + def sample(self, idx: int) -> dict[str, torch.Tensor]: + video_id = self.videos[idx] + label = self.labels[video_id] + + reader = StreamingMediaDecoder(self.root / (video_id + '.mp4')) + reader.add_basic_video_stream( + frames_per_chunk=int(_CLIP_FPS * self.duration_sec), + frame_rate=_CLIP_FPS, + format='rgb24', + ) + reader.add_basic_video_stream( + frames_per_chunk=int(_SYNC_FPS * self.duration_sec), + frame_rate=_SYNC_FPS, + format='rgb24', + ) + reader.add_basic_audio_stream(frames_per_chunk=2**30, ) + + reader.fill_buffer() + data_chunk = reader.pop_chunks() + + clip_chunk = data_chunk[0] + sync_chunk = data_chunk[1] + audio_chunk = data_chunk[2] + + if clip_chunk is None: + raise RuntimeError(f'CLIP video returned None {video_id}') + if clip_chunk.shape[0] < self.clip_expected_length: + raise RuntimeError( + f'CLIP video too short {video_id}, expected {self.clip_expected_length}, got {clip_chunk.shape[0]}' + ) + + if sync_chunk is None: + raise RuntimeError(f'Sync video returned None {video_id}') + if sync_chunk.shape[0] < self.sync_expected_length: + raise RuntimeError( + f'Sync video too short {video_id}, expected {self.sync_expected_length}, got {sync_chunk.shape[0]}' + ) + + # process audio + sample_rate = int(reader.get_out_stream_info(2).sample_rate) + audio_chunk = audio_chunk.transpose(0, 1) + audio_chunk = audio_chunk.mean(dim=0) # mono + if self.normalize_audio: + abs_max = audio_chunk.abs().max() + audio_chunk = audio_chunk / abs_max * 0.95 + if abs_max <= 1e-6: + raise RuntimeError(f'Audio is silent {video_id}') + + # resample + if sample_rate == self.sample_rate: + audio_chunk = audio_chunk + else: + if sample_rate not in self.resampler: + # https://pytorch.org/audio/stable/tutorials/audio_resampling_tutorial.html#kaiser-best + self.resampler[sample_rate] = torchaudio.transforms.Resample( + sample_rate, + self.sample_rate, + lowpass_filter_width=64, + rolloff=0.9475937167399596, + resampling_method='sinc_interp_kaiser', + beta=14.769656459379492, + ) + audio_chunk = self.resampler[sample_rate](audio_chunk) + + if audio_chunk.shape[0] < self.expected_audio_length: + raise RuntimeError(f'Audio too short {video_id}') + audio_chunk = audio_chunk[:self.expected_audio_length] + + # truncate the video + clip_chunk = clip_chunk[:self.clip_expected_length] + if clip_chunk.shape[0] != self.clip_expected_length: + raise RuntimeError(f'CLIP video wrong length {video_id}, ' + f'expected {self.clip_expected_length}, ' + f'got {clip_chunk.shape[0]}') + clip_chunk = self.clip_transform(clip_chunk) + + sync_chunk = sync_chunk[:self.sync_expected_length] + if sync_chunk.shape[0] != self.sync_expected_length: + raise RuntimeError(f'Sync video wrong length {video_id}, ' + f'expected {self.sync_expected_length}, ' + f'got {sync_chunk.shape[0]}') + sync_chunk = self.sync_transform(sync_chunk) + + data = { + 'id': video_id, + 'caption': label, + 'audio': audio_chunk, + 'clip_video': clip_chunk, + 'sync_video': sync_chunk, + } + + return data + + def __getitem__(self, idx: int) -> dict[str, torch.Tensor]: + try: + return self.sample(idx) + except Exception as e: + log.error(f'Error loading video {self.videos[idx]}: {e}') + return None + + def __len__(self): + return len(self.labels) diff --git a/meanaudio/data/extraction/wav_dataset.py b/meanaudio/data/extraction/wav_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..5d8699eea2722c534b190cc2944f16c6a1a5fa21 --- /dev/null +++ b/meanaudio/data/extraction/wav_dataset.py @@ -0,0 +1,153 @@ +import logging +import os +from pathlib import Path +from typing import Union + +import open_clip +import pandas as pd +import torch +import torchaudio +from torch.utils.data.dataset import Dataset +import torch.nn.functional as F + +log = logging.getLogger() + + +class WavTextClipsDataset(Dataset): + + def __init__( + self, + root: Union[str, Path], + *, + captions_tsv: Union[str, Path], + clips_tsv: Union[str, Path], + sample_rate: int, + num_samples: int, + duration: int = 10, + normalize_audio: bool = False, + reject_silent: bool = False, + tokenizer_id: str = 'ViT-H-14-378-quickgelu', + multi_caption: bool = False + ): + self.root = Path(root) + self.sample_rate = sample_rate + self.num_samples = num_samples + self.normalize_audio = normalize_audio + self.reject_silent = reject_silent + self.duration = duration + self.tokenizer = open_clip.get_tokenizer(tokenizer_id) # only for clip, for t5 and clap we will get caption embeddings outside + + audios = sorted(os.listdir(self.root)) + audios = set([ + Path(audio).stem for audio in audios # file name w/o extension + if audio.endswith('.wav') or audio.endswith('.flac') + ]) + self.captions = {} + + # read the caption tsv + df_list = pd.read_csv(captions_tsv, sep='\t', dtype={'id': str}).to_dict('records') + for record in df_list: + id = record['id'] # file name + caption = record['caption'] + if not multi_caption: + self.captions[id] = caption # captions: {name(no partition index): caption} !Only ONE caption will be selected for an audio clip + else: + if id not in self.captions.keys(): + self.captions[id] = [caption] + else: + self.captions[id].append(caption) + + # read the clip tsv + df_list = pd.read_csv(clips_tsv, sep='\t', dtype={ + 'id': str, + 'name': str + }).to_dict('records') + self.clips = [] + for record in df_list: # partition + name = record['name'] + if name not in self.captions: + log.warning(f'Audio {name} not found in {captions_tsv}') + continue + + if not multi_caption: + record['caption'] = self.captions[name] + self.clips.append(record) # add caption to partition csv + else: + for caption in self.captions[name]: + r = record.copy() + r['caption'] = caption + self.clips.append(r) # add caption to partition csv + + log.info(f'Found {len(self.clips)} audio files in {self.root}') + + self.resampler = {} + + def __getitem__(self, idx: int) -> torch.Tensor: + try: + clip = self.clips[idx] + audio_name = clip['name'] + audio_id = clip['id'] + caption = clip['caption'] + start_sample = clip['start_sample'] + end_sample = clip['end_sample'] + + audio_path = self.root / f'{audio_name}.flac' + if not audio_path.exists(): + audio_path = self.root / f'{audio_name}.wav' + assert audio_path.exists() + + audio_chunk, sample_rate = torchaudio.load(audio_path) + audio_chunk = audio_chunk.mean(dim=0) # mono + abs_max = audio_chunk.abs().max() + if self.normalize_audio: + audio_chunk = audio_chunk / abs_max * 0.95 + + if self.reject_silent and abs_max < 1e-6: + log.warning(f'Rejecting silent audio') + return None + if audio_chunk.size(0) < end_sample: + audio_chunk = F.pad( + audio_chunk, + (0, end_sample - audio_chunk.size(0)), + mode='constant', + value=0 + ) + else: + audio_chunk = audio_chunk[start_sample:end_sample] + + # resample + if sample_rate == self.sample_rate: + audio_chunk = audio_chunk + else: + if sample_rate not in self.resampler: + # https://pytorch.org/audio/stable/tutorials/audio_resampling_tutorial.html#kaiser-best + self.resampler[sample_rate] = torchaudio.transforms.Resample( + sample_rate, + self.sample_rate, + lowpass_filter_width=64, + rolloff=0.9475937167399596, + resampling_method='sinc_interp_kaiser', + beta=14.769656459379492, + ) + audio_chunk = self.resampler[sample_rate](audio_chunk) + + if audio_chunk.shape[0] < self.num_samples: + raise ValueError('Audio is too short') + audio_chunk = audio_chunk[:self.num_samples] + + tokens = self.tokenizer([caption])[0] + + output = { + 'waveform': audio_chunk, + 'id': audio_id, + 'caption': caption, + 'tokens': tokens, + } + + return output + except Exception as e: + log.error(f'Error reading {audio_path}: {e}') + return None + + def __len__(self): + return len(self.clips) diff --git a/meanaudio/data/mm_dataset.py b/meanaudio/data/mm_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..63ac4209ffad076cf28d9ade8aeab73c5d74444c --- /dev/null +++ b/meanaudio/data/mm_dataset.py @@ -0,0 +1,50 @@ +import bisect + +import torch +from torch.utils.data.dataset import Dataset + + +# modified from https://pytorch.org/docs/stable/_modules/torch/utils/data/dataset.html#ConcatDataset +class MultiModalDataset(Dataset): + datasets: list[Dataset] + cumulative_sizes: list[int] + + @staticmethod + def cumsum(sequence): + r, s = [], 0 + for e in sequence: + l = len(e) + r.append(l + s) + s += l + return r + + def __init__(self, video_datasets: list[Dataset], audio_datasets: list[Dataset]): + super().__init__() + self.video_datasets = list(video_datasets) if video_datasets else [] + self.audio_datasets = list(audio_datasets) if audio_datasets else [] + self.datasets = self.video_datasets + self.audio_datasets + + self.cumulative_sizes = self.cumsum(self.datasets) + + def __len__(self): + return self.cumulative_sizes[-1] + + def __getitem__(self, idx): + if idx < 0: + if -idx > len(self): + raise ValueError("absolute value of index should not exceed dataset length") + idx = len(self) + idx + dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx) # which dataset idx falls into + if dataset_idx == 0: + sample_idx = idx + else: + sample_idx = idx - self.cumulative_sizes[dataset_idx - 1] + return self.datasets[dataset_idx][sample_idx] + + def compute_latent_stats(self) -> tuple[torch.Tensor, torch.Tensor]: + if self.video_datasets == []: + raise NotImplementedError(f'This function should not be called for audio-text dataset', + 'Please load latents stats manually instead') + return self.audio_datasets[0].compute_latent_stats() # audio-text training + else: + return self.video_datasets[0].compute_latent_stats() # video-text training \ No newline at end of file diff --git a/meanaudio/data/utils.py b/meanaudio/data/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..1f8865add7194cafe58c52c90c64adbb8ecb214a --- /dev/null +++ b/meanaudio/data/utils.py @@ -0,0 +1,148 @@ +import logging +import os +import random +import tempfile +from pathlib import Path +from typing import Any, Optional, Union + +import torch +import torch.distributed as dist +from tensordict import MemoryMappedTensor +from torch.utils.data import DataLoader +from torch.utils.data.dataset import Dataset +from tqdm import tqdm + +from meanaudio.utils.dist_utils import local_rank, world_size + +scratch_path = Path(os.environ['SLURM_SCRATCH'] if 'SLURM_SCRATCH' in os.environ else '/dev/shm') +shm_path = Path('/dev/shm') + +log = logging.getLogger() + + +def reseed(seed): + random.seed(seed) + torch.manual_seed(seed) + + +def local_scatter_torch(obj: Optional[Any]): + if world_size == 1: + # Just one worker. Do nothing. + return obj + + array = [obj] * world_size + target_array = [None] + if local_rank == 0: + dist.scatter_object_list(target_array, scatter_object_input_list=array, src=0) + else: + dist.scatter_object_list(target_array, scatter_object_input_list=None, src=0) + return target_array[0] + + +class ShardDataset(Dataset): + + def __init__(self, root): + self.root = root + self.shards = sorted(os.listdir(root)) + + def __len__(self): + return len(self.shards) + + def __getitem__(self, idx): + return torch.load(os.path.join(self.root, self.shards[idx]), weights_only=True) + + +def get_tmp_dir(in_memory: bool) -> Path: + return shm_path if in_memory else scratch_path + + +def load_shards_and_share(data_path: Union[str, Path], ids: list[int], + in_memory: bool) -> MemoryMappedTensor: + if local_rank == 0: + with tempfile.NamedTemporaryFile(prefix='shared-tensor-', dir=get_tmp_dir(in_memory)) as f: + log.info(f'Loading shards from {data_path} into {f.name}...') + data = load_shards(data_path, ids=ids, tmp_file_path=f.name) + data = share_tensor_to_all(data) + torch.distributed.barrier() + f.close() # why does the context manager not close the file for me? + else: + log.info('Waiting for the data to be shared with me...') + data = share_tensor_to_all(None) + torch.distributed.barrier() + + return data + + +def load_shards( + data_path: Union[str, Path], + ids: list[int], + *, + tmp_file_path: str, +) -> Union[torch.Tensor, dict[str, torch.Tensor]]: + + id_set = set(ids) + shards = sorted(os.listdir(data_path)) + log.info(f'Found {len(shards)} shards in {data_path}.') + first_shard = torch.load(os.path.join(data_path, shards[0]), weights_only=True) + + log.info(f'Rank {local_rank} created file {tmp_file_path}') + first_item = next(iter(first_shard.values())) + log.info(f'First item shape: {first_item.shape}') + mm_tensor = MemoryMappedTensor.empty(shape=(len(ids), *first_item.shape), + dtype=torch.float32, + filename=tmp_file_path, + existsok=True) + total_count = 0 + used_index = set() + id_indexing = {i: idx for idx, i in enumerate(ids)} + # faster with no workers; otherwise we need to set_sharing_strategy('file_system') + loader = DataLoader(ShardDataset(data_path), batch_size=1, num_workers=0) + for data in tqdm(loader, desc='Loading shards'): + for i, v in data.items(): + if i not in id_set: + continue + + # tensor_index = ids.index(i) + tensor_index = id_indexing[i] + if tensor_index in used_index: + raise ValueError(f'Duplicate id {i} found in {data_path}.') + used_index.add(tensor_index) + mm_tensor[tensor_index] = v + total_count += 1 + + assert total_count == len(ids), f'Expected {len(ids)} tensors, got {total_count}.' + log.info(f'Loaded {total_count} tensors from {data_path}.') + + return mm_tensor + + +def share_tensor_to_all(x: Optional[MemoryMappedTensor]) -> MemoryMappedTensor: + """ + x: the tensor to be shared; None if local_rank != 0 + return: the shared tensor + """ + + # there is no need to share your stuff with anyone if you are alone; must be in memory + if world_size == 1: + return x + + if local_rank == 0: + assert x is not None, 'x must not be None if local_rank == 0' + else: + assert x is None, 'x must be None if local_rank != 0' + + if local_rank == 0: + filename = x.filename + meta_information = (filename, x.shape, x.dtype) + else: + meta_information = None + + filename, data_shape, data_type = local_scatter_torch(meta_information) + if local_rank == 0: + data = x + else: + data = MemoryMappedTensor.from_filename(filename=filename, + dtype=data_type, + shape=data_shape) + + return data diff --git a/meanaudio/eval_utils.py b/meanaudio/eval_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c32c8fd062d0668b9a3c73e37f26d9c535de80df --- /dev/null +++ b/meanaudio/eval_utils.py @@ -0,0 +1,167 @@ +import dataclasses +import logging +from pathlib import Path +from typing import Optional + +import numpy as np +import torch +from colorlog import ColoredFormatter +from PIL import Image +from torchvision.transforms import v2 + +from meanaudio.data.av_utils import ImageInfo, VideoInfo, read_frames, reencode_with_audio +from meanaudio.model.flow_matching import FlowMatching +from meanaudio.model.mean_flow import MeanFlow +from meanaudio.model.networks import MeanAudio, FluxAudio +from meanaudio.model.sequence_config import CONFIG_16K, CONFIG_44K, SequenceConfig +from meanaudio.model.utils.features_utils import FeaturesUtils +from meanaudio.utils.download_utils import download_model_if_needed + +log = logging.getLogger() + + +@dataclasses.dataclass +class ModelConfig: + model_name: str + model_path: Path + vae_path: Path + bigvgan_16k_path: Optional[Path] + mode: str + + @property + def seq_cfg(self) -> SequenceConfig: + if self.mode == '16k': + return CONFIG_16K # get sequence config when calling cfg.seq_cfgs + elif self.mode == '44k': + return CONFIG_44K + + def download_if_needed(self): + raise NotImplementedError("Downloading models is not supported") + download_model_if_needed(self.model_path) + download_model_if_needed(self.vae_path) + if self.bigvgan_16k_path is not None: + download_model_if_needed(self.bigvgan_16k_path) + + +fluxaudio_fm = ModelConfig(model_name='fluxaudio_fm', + model_path=Path('./weights/fluxaudio_fm.pth'), + vae_path=Path('./weights/v1-16.pth'), + bigvgan_16k_path=Path('./weights/best_netG.pt'), + mode='16k') +meanaudio_mf = ModelConfig(model_name='meanaudio_mf', + model_path=Path('./weights/meanaudio_mf.pth'), + vae_path=Path('./weights/v1-16.pth'), + bigvgan_16k_path=Path('./weights/best_netG.pt'), + mode='16k') + +all_model_cfg: dict[str, ModelConfig] = { + 'fluxaudio_fm': fluxaudio_fm, + 'meanaudio_mf': meanaudio_mf, +} + + +def generate_fm( + text: Optional[list[str]], + *, + negative_text: Optional[list[str]] = None, + feature_utils: FeaturesUtils, + net: FluxAudio, + fm: FlowMatching, + rng: torch.Generator, + cfg_strength: float, +) -> torch.Tensor: + # generate audio with vanilla flow matching + + device = feature_utils.device + dtype = feature_utils.dtype + + bs = len(text) + + if text is not None: + text_features, text_features_c = feature_utils.encode_text(text) + else: + text_features, text_features_c = net.get_empty_string_sequence(bs) + + if negative_text is not None: + assert len(negative_text) == bs + negative_text_features = feature_utils.encode_text(negative_text) + else: + negative_text_features = net.get_empty_string_sequence(bs) + + x0 = torch.randn(bs, + net.latent_seq_len, + net.latent_dim, + device=device, + dtype=dtype, + generator=rng) + preprocessed_conditions = net.preprocess_conditions(text_features, text_features_c) + empty_conditions = net.get_empty_conditions( + bs, negative_text_features=negative_text_features if negative_text is not None else None) + + cfg_ode_wrapper = lambda t, x: net.ode_wrapper(t, x, preprocessed_conditions, empty_conditions, + cfg_strength) + x1 = fm.to_data(cfg_ode_wrapper, x0) + x1 = net.unnormalize(x1) + spec = feature_utils.decode(x1) + audio = feature_utils.vocode(spec) + return audio + + +def generate_mf( + text: Optional[list[str]], + *, + negative_text: Optional[list[str]] = None, + feature_utils: FeaturesUtils, + net: MeanAudio, + mf: MeanFlow, + rng: torch.Generator, + cfg_strength: float, +) -> torch.Tensor: + # generate audio with mean flow + device = feature_utils.device + dtype = feature_utils.dtype + + bs = len(text) + + if text is not None: + text_features, text_features_c = feature_utils.encode_text(text) + else: + text_features, text_features_c = net.get_empty_string_sequence(bs) + + if negative_text is not None: + assert len(negative_text) == bs + negative_text_features = feature_utils.encode_text(negative_text) + else: + negative_text_features = net.get_empty_string_sequence(bs) + + x0 = torch.randn(bs, + net.latent_seq_len, + net.latent_dim, + device=device, + dtype=dtype, + generator=rng) + preprocessed_conditions = net.preprocess_conditions(text_features, text_features_c) + empty_conditions = net.get_empty_conditions( + bs, negative_text_features=negative_text_features if negative_text is not None else None) + + cfg_ode_wrapper = lambda t, r, x: net.ode_wrapper(t, r, x, preprocessed_conditions, empty_conditions, + cfg_strength) + x1 = mf.to_data(cfg_ode_wrapper, x0) + x1 = net.unnormalize(x1) + spec = feature_utils.decode(x1) + audio = feature_utils.vocode(spec) + return audio + + +LOGFORMAT = "[%(log_color)s%(levelname)-8s%(reset)s]: %(log_color)s%(message)s%(reset)s" + + +def setup_eval_logging(log_level: int = logging.INFO): + logging.root.setLevel(log_level) # set up root logger <=> logging.getLogger().setLevel(log_level) + formatter = ColoredFormatter(LOGFORMAT) + stream = logging.StreamHandler() # to Console + stream.setLevel(log_level) + stream.setFormatter(formatter) + log = logging.getLogger() + log.setLevel(log_level) + log.addHandler(stream) \ No newline at end of file diff --git a/meanaudio/ext/__init__.py b/meanaudio/ext/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/meanaudio/ext/__init__.py @@ -0,0 +1 @@ + diff --git a/meanaudio/ext/autoencoder/__init__.py b/meanaudio/ext/autoencoder/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e5a876391c1e48970e93ff45f212f21f86d4d0c9 --- /dev/null +++ b/meanaudio/ext/autoencoder/__init__.py @@ -0,0 +1 @@ +from .autoencoder import AutoEncoderModule diff --git a/meanaudio/ext/autoencoder/autoencoder.py b/meanaudio/ext/autoencoder/autoencoder.py new file mode 100644 index 0000000000000000000000000000000000000000..25ff41dae3ab2e748170eaf668ef646d6c5c31a1 --- /dev/null +++ b/meanaudio/ext/autoencoder/autoencoder.py @@ -0,0 +1,52 @@ +from typing import Literal, Optional + +import torch +import torch.nn as nn + +from meanaudio.ext.autoencoder.vae import VAE, get_my_vae +from meanaudio.ext.bigvgan import BigVGAN +from meanaudio.ext.bigvgan_v2.bigvgan import BigVGAN as BigVGANv2 +from meanaudio.model.utils.distributions import DiagonalGaussianDistribution + + +class AutoEncoderModule(nn.Module): + + def __init__(self, + *, + vae_ckpt_path, + vocoder_ckpt_path: Optional[str] = None, + mode: Literal['16k', '44k'], + need_vae_encoder: bool = True): + super().__init__() + self.vae: VAE = get_my_vae(mode).eval() + vae_state_dict = torch.load(vae_ckpt_path, weights_only=True, map_location='cpu') + self.vae.load_state_dict(vae_state_dict) + self.vae.remove_weight_norm() + + if mode == '16k': + assert vocoder_ckpt_path is not None + self.vocoder = BigVGAN(vocoder_ckpt_path).eval() + elif mode == '44k': + self.vocoder = BigVGANv2.from_pretrained('nvidia/bigvgan_v2_44khz_128band_512x', + use_cuda_kernel=False) + self.vocoder.remove_weight_norm() + else: + raise ValueError(f'Unknown mode: {mode}') + + for param in self.parameters(): + param.requires_grad = False + + if not need_vae_encoder: + del self.vae.encoder + + @torch.inference_mode() + def encode(self, x: torch.Tensor) -> DiagonalGaussianDistribution: + return self.vae.encode(x) + + @torch.inference_mode() + def decode(self, z: torch.Tensor) -> torch.Tensor: + return self.vae.decode(z) + + @torch.inference_mode() + def vocode(self, spec: torch.Tensor) -> torch.Tensor: + return self.vocoder(spec) diff --git a/meanaudio/ext/autoencoder/edm2_utils.py b/meanaudio/ext/autoencoder/edm2_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a18ffba5cc42214fddf1300034be2eff2760025c --- /dev/null +++ b/meanaudio/ext/autoencoder/edm2_utils.py @@ -0,0 +1,168 @@ +# Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# This work is licensed under a Creative Commons +# Attribution-NonCommercial-ShareAlike 4.0 International License. +# You should have received a copy of the license along with this +# work. If not, see http://creativecommons.org/licenses/by-nc-sa/4.0/ +"""Improved diffusion model architecture proposed in the paper +"Analyzing and Improving the Training Dynamics of Diffusion Models".""" + +import numpy as np +import torch + +#---------------------------------------------------------------------------- +# Variant of constant() that inherits dtype and device from the given +# reference tensor by default. + +_constant_cache = dict() + + +def constant(value, shape=None, dtype=None, device=None, memory_format=None): + value = np.asarray(value) + if shape is not None: + shape = tuple(shape) + if dtype is None: + dtype = torch.get_default_dtype() + if device is None: + device = torch.device('cpu') + if memory_format is None: + memory_format = torch.contiguous_format + + key = (value.shape, value.dtype, value.tobytes(), shape, dtype, device, memory_format) + tensor = _constant_cache.get(key, None) + if tensor is None: + tensor = torch.as_tensor(value.copy(), dtype=dtype, device=device) + if shape is not None: + tensor, _ = torch.broadcast_tensors(tensor, torch.empty(shape)) + tensor = tensor.contiguous(memory_format=memory_format) + _constant_cache[key] = tensor + return tensor + + +def const_like(ref, value, shape=None, dtype=None, device=None, memory_format=None): + if dtype is None: + dtype = ref.dtype + if device is None: + device = ref.device + return constant(value, shape=shape, dtype=dtype, device=device, memory_format=memory_format) + + +#---------------------------------------------------------------------------- +# Normalize given tensor to unit magnitude with respect to the given +# dimensions. Default = all dimensions except the first. + + +def normalize(x, dim=None, eps=1e-4): + if dim is None: + dim = list(range(1, x.ndim)) + norm = torch.linalg.vector_norm(x, dim=dim, keepdim=True, dtype=torch.float32) + norm = torch.add(eps, norm, alpha=np.sqrt(norm.numel() / x.numel())) + return x / norm.to(x.dtype) + + +class Normalize(torch.nn.Module): + + def __init__(self, dim=None, eps=1e-4): + super().__init__() + self.dim = dim + self.eps = eps + + def forward(self, x): + return normalize(x, dim=self.dim, eps=self.eps) + + +#---------------------------------------------------------------------------- +# Upsample or downsample the given tensor with the given filter, +# or keep it as is. + + +def resample(x, f=[1, 1], mode='keep'): + if mode == 'keep': + return x + f = np.float32(f) + assert f.ndim == 1 and len(f) % 2 == 0 + pad = (len(f) - 1) // 2 + f = f / f.sum() + f = np.outer(f, f)[np.newaxis, np.newaxis, :, :] + f = const_like(x, f) + c = x.shape[1] + if mode == 'down': + return torch.nn.functional.conv2d(x, + f.tile([c, 1, 1, 1]), + groups=c, + stride=2, + padding=(pad, )) + assert mode == 'up' + return torch.nn.functional.conv_transpose2d(x, (f * 4).tile([c, 1, 1, 1]), + groups=c, + stride=2, + padding=(pad, )) + + +#---------------------------------------------------------------------------- +# Magnitude-preserving SiLU (Equation 81). + + +def mp_silu(x): + return torch.nn.functional.silu(x) / 0.596 + + +class MPSiLU(torch.nn.Module): + + def forward(self, x): + return mp_silu(x) + + +#---------------------------------------------------------------------------- +# Magnitude-preserving sum (Equation 88). + + +def mp_sum(a, b, t=0.5): + return a.lerp(b, t) / np.sqrt((1 - t)**2 + t**2) + + +#---------------------------------------------------------------------------- +# Magnitude-preserving concatenation (Equation 103). + + +def mp_cat(a, b, dim=1, t=0.5): + Na = a.shape[dim] + Nb = b.shape[dim] + C = np.sqrt((Na + Nb) / ((1 - t)**2 + t**2)) + wa = C / np.sqrt(Na) * (1 - t) + wb = C / np.sqrt(Nb) * t + return torch.cat([wa * a, wb * b], dim=dim) + + +#---------------------------------------------------------------------------- +# Magnitude-preserving convolution or fully-connected layer (Equation 47) +# with force weight normalization (Equation 66). + + +class MPConv1D(torch.nn.Module): + + def __init__(self, in_channels, out_channels, kernel_size): + super().__init__() + self.out_channels = out_channels + self.weight = torch.nn.Parameter(torch.randn(out_channels, in_channels, kernel_size)) + + self.weight_norm_removed = False + + def forward(self, x, gain=1): + assert self.weight_norm_removed, 'call remove_weight_norm() before inference' + + w = self.weight * gain + if w.ndim == 2: + return x @ w.t() + assert w.ndim == 3 + return torch.nn.functional.conv1d(x, w, padding=(w.shape[-1] // 2, )) + + def remove_weight_norm(self): + w = self.weight.to(torch.float32) + w = normalize(w) # traditional weight normalization + w = w / np.sqrt(w[0].numel()) + w = w.to(self.weight.dtype) + self.weight.data.copy_(w) + + self.weight_norm_removed = True + return self diff --git a/meanaudio/ext/autoencoder/vae.py b/meanaudio/ext/autoencoder/vae.py new file mode 100644 index 0000000000000000000000000000000000000000..a5203c7070566588de0e3c12d5c0117f0ac03115 --- /dev/null +++ b/meanaudio/ext/autoencoder/vae.py @@ -0,0 +1,369 @@ +import logging +from typing import Optional + +import torch +import torch.nn as nn + +from meanaudio.ext.autoencoder.edm2_utils import MPConv1D +from meanaudio.ext.autoencoder.vae_modules import (AttnBlock1D, Downsample1D, ResnetBlock1D, + Upsample1D, nonlinearity) +from meanaudio.model.utils.distributions import DiagonalGaussianDistribution + +log = logging.getLogger() + +DATA_MEAN_80D = [ + -1.6058, -1.3676, -1.2520, -1.2453, -1.2078, -1.2224, -1.2419, -1.2439, -1.2922, -1.2927, + -1.3170, -1.3543, -1.3401, -1.3836, -1.3907, -1.3912, -1.4313, -1.4152, -1.4527, -1.4728, + -1.4568, -1.5101, -1.5051, -1.5172, -1.5623, -1.5373, -1.5746, -1.5687, -1.6032, -1.6131, + -1.6081, -1.6331, -1.6489, -1.6489, -1.6700, -1.6738, -1.6953, -1.6969, -1.7048, -1.7280, + -1.7361, -1.7495, -1.7658, -1.7814, -1.7889, -1.8064, -1.8221, -1.8377, -1.8417, -1.8643, + -1.8857, -1.8929, -1.9173, -1.9379, -1.9531, -1.9673, -1.9824, -2.0042, -2.0215, -2.0436, + -2.0766, -2.1064, -2.1418, -2.1855, -2.2319, -2.2767, -2.3161, -2.3572, -2.3954, -2.4282, + -2.4659, -2.5072, -2.5552, -2.6074, -2.6584, -2.7107, -2.7634, -2.8266, -2.8981, -2.9673 +] + +DATA_STD_80D = [ + 1.0291, 1.0411, 1.0043, 0.9820, 0.9677, 0.9543, 0.9450, 0.9392, 0.9343, 0.9297, 0.9276, 0.9263, + 0.9242, 0.9254, 0.9232, 0.9281, 0.9263, 0.9315, 0.9274, 0.9247, 0.9277, 0.9199, 0.9188, 0.9194, + 0.9160, 0.9161, 0.9146, 0.9161, 0.9100, 0.9095, 0.9145, 0.9076, 0.9066, 0.9095, 0.9032, 0.9043, + 0.9038, 0.9011, 0.9019, 0.9010, 0.8984, 0.8983, 0.8986, 0.8961, 0.8962, 0.8978, 0.8962, 0.8973, + 0.8993, 0.8976, 0.8995, 0.9016, 0.8982, 0.8972, 0.8974, 0.8949, 0.8940, 0.8947, 0.8936, 0.8939, + 0.8951, 0.8956, 0.9017, 0.9167, 0.9436, 0.9690, 1.0003, 1.0225, 1.0381, 1.0491, 1.0545, 1.0604, + 1.0761, 1.0929, 1.1089, 1.1196, 1.1176, 1.1156, 1.1117, 1.1070 +] + +DATA_MEAN_128D = [ + -3.3462, -2.6723, -2.4893, -2.3143, -2.2664, -2.3317, -2.1802, -2.4006, -2.2357, -2.4597, + -2.3717, -2.4690, -2.5142, -2.4919, -2.6610, -2.5047, -2.7483, -2.5926, -2.7462, -2.7033, + -2.7386, -2.8112, -2.7502, -2.9594, -2.7473, -3.0035, -2.8891, -2.9922, -2.9856, -3.0157, + -3.1191, -2.9893, -3.1718, -3.0745, -3.1879, -3.2310, -3.1424, -3.2296, -3.2791, -3.2782, + -3.2756, -3.3134, -3.3509, -3.3750, -3.3951, -3.3698, -3.4505, -3.4509, -3.5089, -3.4647, + -3.5536, -3.5788, -3.5867, -3.6036, -3.6400, -3.6747, -3.7072, -3.7279, -3.7283, -3.7795, + -3.8259, -3.8447, -3.8663, -3.9182, -3.9605, -3.9861, -4.0105, -4.0373, -4.0762, -4.1121, + -4.1488, -4.1874, -4.2461, -4.3170, -4.3639, -4.4452, -4.5282, -4.6297, -4.7019, -4.7960, + -4.8700, -4.9507, -5.0303, -5.0866, -5.1634, -5.2342, -5.3242, -5.4053, -5.4927, -5.5712, + -5.6464, -5.7052, -5.7619, -5.8410, -5.9188, -6.0103, -6.0955, -6.1673, -6.2362, -6.3120, + -6.3926, -6.4797, -6.5565, -6.6511, -6.8130, -6.9961, -7.1275, -7.2457, -7.3576, -7.4663, + -7.6136, -7.7469, -7.8815, -8.0132, -8.1515, -8.3071, -8.4722, -8.7418, -9.3975, -9.6628, + -9.7671, -9.8863, -9.9992, -10.0860, -10.1709, -10.5418, -11.2795, -11.3861 +] + +DATA_STD_128D = [ + 2.3804, 2.4368, 2.3772, 2.3145, 2.2803, 2.2510, 2.2316, 2.2083, 2.1996, 2.1835, 2.1769, 2.1659, + 2.1631, 2.1618, 2.1540, 2.1606, 2.1571, 2.1567, 2.1612, 2.1579, 2.1679, 2.1683, 2.1634, 2.1557, + 2.1668, 2.1518, 2.1415, 2.1449, 2.1406, 2.1350, 2.1313, 2.1415, 2.1281, 2.1352, 2.1219, 2.1182, + 2.1327, 2.1195, 2.1137, 2.1080, 2.1179, 2.1036, 2.1087, 2.1036, 2.1015, 2.1068, 2.0975, 2.0991, + 2.0902, 2.1015, 2.0857, 2.0920, 2.0893, 2.0897, 2.0910, 2.0881, 2.0925, 2.0873, 2.0960, 2.0900, + 2.0957, 2.0958, 2.0978, 2.0936, 2.0886, 2.0905, 2.0845, 2.0855, 2.0796, 2.0840, 2.0813, 2.0817, + 2.0838, 2.0840, 2.0917, 2.1061, 2.1431, 2.1976, 2.2482, 2.3055, 2.3700, 2.4088, 2.4372, 2.4609, + 2.4731, 2.4847, 2.5072, 2.5451, 2.5772, 2.6147, 2.6529, 2.6596, 2.6645, 2.6726, 2.6803, 2.6812, + 2.6899, 2.6916, 2.6931, 2.6998, 2.7062, 2.7262, 2.7222, 2.7158, 2.7041, 2.7485, 2.7491, 2.7451, + 2.7485, 2.7233, 2.7297, 2.7233, 2.7145, 2.6958, 2.6788, 2.6439, 2.6007, 2.4786, 2.2469, 2.1877, + 2.1392, 2.0717, 2.0107, 1.9676, 1.9140, 1.7102, 0.9101, 0.7164 +] + + +class VAE(nn.Module): + + def __init__( + self, + *, + data_dim: int, + embed_dim: int, + hidden_dim: int, + ): + super().__init__() + + if data_dim == 80: + self.data_mean = nn.Buffer(torch.tensor(DATA_MEAN_80D, dtype=torch.float32)) + self.data_std = nn.Buffer(torch.tensor(DATA_STD_80D, dtype=torch.float32)) + elif data_dim == 128: + self.data_mean = nn.Buffer(torch.tensor(DATA_MEAN_128D, dtype=torch.float32)) + self.data_std = nn.Buffer(torch.tensor(DATA_STD_128D, dtype=torch.float32)) + + self.data_mean = self.data_mean.view(1, -1, 1) + self.data_std = self.data_std.view(1, -1, 1) + + self.encoder = Encoder1D( + dim=hidden_dim, + ch_mult=(1, 2, 4), + num_res_blocks=2, + attn_layers=[3], + down_layers=[0], + in_dim=data_dim, + embed_dim=embed_dim, + ) + self.decoder = Decoder1D( + dim=hidden_dim, + ch_mult=(1, 2, 4), + num_res_blocks=2, + attn_layers=[3], + down_layers=[0], + in_dim=data_dim, + out_dim=data_dim, + embed_dim=embed_dim, + ) + + self.embed_dim = embed_dim + # self.quant_conv = nn.Conv1d(2 * embed_dim, 2 * embed_dim, 1) + # self.post_quant_conv = nn.Conv1d(embed_dim, embed_dim, 1) + + self.initialize_weights() + + def initialize_weights(self): + pass + + def encode(self, x: torch.Tensor, normalize: bool = True) -> DiagonalGaussianDistribution: + if normalize: + x = self.normalize(x) + moments = self.encoder(x) + posterior = DiagonalGaussianDistribution(moments) + return posterior + + def decode(self, z: torch.Tensor, unnormalize: bool = True) -> torch.Tensor: + dec = self.decoder(z) + if unnormalize: + dec = self.unnormalize(dec) + return dec + + def normalize(self, x: torch.Tensor) -> torch.Tensor: + return (x - self.data_mean) / self.data_std + + def unnormalize(self, x: torch.Tensor) -> torch.Tensor: + return x * self.data_std + self.data_mean + + def forward( + self, + x: torch.Tensor, + sample_posterior: bool = True, + rng: Optional[torch.Generator] = None, + normalize: bool = True, + unnormalize: bool = True, + ) -> tuple[torch.Tensor, DiagonalGaussianDistribution]: + + posterior = self.encode(x, normalize=normalize) + if sample_posterior: + z = posterior.sample(rng) + else: + z = posterior.mode() + dec = self.decode(z, unnormalize=unnormalize) + return dec, posterior + + def load_weights(self, src_dict) -> None: + self.load_state_dict(src_dict, strict=True) + + @property + def device(self) -> torch.device: + return next(self.parameters()).device + + def get_last_layer(self): + return self.decoder.conv_out.weight + + def remove_weight_norm(self): + for name, m in self.named_modules(): + if isinstance(m, MPConv1D): + m.remove_weight_norm() + log.debug(f"Removed weight norm from {name}") + return self + + +class Encoder1D(nn.Module): + + def __init__(self, + *, + dim: int, + ch_mult: tuple[int] = (1, 2, 4, 8), + num_res_blocks: int, + attn_layers: list[int] = [], + down_layers: list[int] = [], + resamp_with_conv: bool = True, + in_dim: int, + embed_dim: int, + double_z: bool = True, + kernel_size: int = 3, + clip_act: float = 256.0): + super().__init__() + self.dim = dim + self.num_layers = len(ch_mult) + self.num_res_blocks = num_res_blocks + self.in_channels = in_dim + self.clip_act = clip_act + self.down_layers = down_layers + self.attn_layers = attn_layers + self.conv_in = MPConv1D(in_dim, self.dim, kernel_size=kernel_size) + + in_ch_mult = (1, ) + tuple(ch_mult) + self.in_ch_mult = in_ch_mult + # downsampling + self.down = nn.ModuleList() + for i_level in range(self.num_layers): + block = nn.ModuleList() + attn = nn.ModuleList() + block_in = dim * in_ch_mult[i_level] + block_out = dim * ch_mult[i_level] + for i_block in range(self.num_res_blocks): + block.append( + ResnetBlock1D(in_dim=block_in, + out_dim=block_out, + kernel_size=kernel_size, + use_norm=True)) + block_in = block_out + if i_level in attn_layers: + attn.append(AttnBlock1D(block_in)) + down = nn.Module() + down.block = block + down.attn = attn + if i_level in down_layers: + down.downsample = Downsample1D(block_in, resamp_with_conv) + self.down.append(down) + + # middle + self.mid = nn.Module() + self.mid.block_1 = ResnetBlock1D(in_dim=block_in, + out_dim=block_in, + kernel_size=kernel_size, + use_norm=True) + self.mid.attn_1 = AttnBlock1D(block_in) + self.mid.block_2 = ResnetBlock1D(in_dim=block_in, + out_dim=block_in, + kernel_size=kernel_size, + use_norm=True) + + # end + self.conv_out = MPConv1D(block_in, + 2 * embed_dim if double_z else embed_dim, + kernel_size=kernel_size) + + self.learnable_gain = nn.Parameter(torch.zeros([])) + + def forward(self, x): + + # downsampling + hs = [self.conv_in(x)] + for i_level in range(self.num_layers): + for i_block in range(self.num_res_blocks): + h = self.down[i_level].block[i_block](hs[-1]) + if len(self.down[i_level].attn) > 0: + h = self.down[i_level].attn[i_block](h) + h = h.clamp(-self.clip_act, self.clip_act) + hs.append(h) + if i_level in self.down_layers: + hs.append(self.down[i_level].downsample(hs[-1])) + + # middle + h = hs[-1] + h = self.mid.block_1(h) + h = self.mid.attn_1(h) + h = self.mid.block_2(h) + h = h.clamp(-self.clip_act, self.clip_act) + + # end + h = nonlinearity(h) + h = self.conv_out(h, gain=(self.learnable_gain + 1)) + return h + + +class Decoder1D(nn.Module): + + def __init__(self, + *, + dim: int, + out_dim: int, + ch_mult: tuple[int] = (1, 2, 4, 8), + num_res_blocks: int, + attn_layers: list[int] = [], + down_layers: list[int] = [], + kernel_size: int = 3, + resamp_with_conv: bool = True, + in_dim: int, + embed_dim: int, + clip_act: float = 256.0): + super().__init__() + self.ch = dim + self.num_layers = len(ch_mult) + self.num_res_blocks = num_res_blocks + self.in_channels = in_dim + self.clip_act = clip_act + self.down_layers = [i + 1 for i in down_layers] # each downlayer add one + + # compute in_ch_mult, block_in and curr_res at lowest res + block_in = dim * ch_mult[self.num_layers - 1] + + # z to block_in + self.conv_in = MPConv1D(embed_dim, block_in, kernel_size=kernel_size) + + # middle + self.mid = nn.Module() + self.mid.block_1 = ResnetBlock1D(in_dim=block_in, out_dim=block_in, use_norm=True) + self.mid.attn_1 = AttnBlock1D(block_in) + self.mid.block_2 = ResnetBlock1D(in_dim=block_in, out_dim=block_in, use_norm=True) + + # upsampling + self.up = nn.ModuleList() + for i_level in reversed(range(self.num_layers)): + block = nn.ModuleList() + attn = nn.ModuleList() + block_out = dim * ch_mult[i_level] + for i_block in range(self.num_res_blocks + 1): + block.append(ResnetBlock1D(in_dim=block_in, out_dim=block_out, use_norm=True)) + block_in = block_out + if i_level in attn_layers: + attn.append(AttnBlock1D(block_in)) + up = nn.Module() + up.block = block + up.attn = attn + if i_level in self.down_layers: + up.upsample = Upsample1D(block_in, resamp_with_conv) + self.up.insert(0, up) # prepend to get consistent order + + # end + self.conv_out = MPConv1D(block_in, out_dim, kernel_size=kernel_size) + self.learnable_gain = nn.Parameter(torch.zeros([])) + + def forward(self, z): + # z to block_in + h = self.conv_in(z) + + # middle + h = self.mid.block_1(h) + h = self.mid.attn_1(h) + h = self.mid.block_2(h) + h = h.clamp(-self.clip_act, self.clip_act) + + # upsampling + for i_level in reversed(range(self.num_layers)): + for i_block in range(self.num_res_blocks + 1): + h = self.up[i_level].block[i_block](h) + if len(self.up[i_level].attn) > 0: + h = self.up[i_level].attn[i_block](h) + h = h.clamp(-self.clip_act, self.clip_act) + if i_level in self.down_layers: + h = self.up[i_level].upsample(h) + + h = nonlinearity(h) + h = self.conv_out(h, gain=(self.learnable_gain + 1)) + return h + + +def VAE_16k(**kwargs) -> VAE: + return VAE(data_dim=80, embed_dim=20, hidden_dim=384, **kwargs) + + +def VAE_44k(**kwargs) -> VAE: + return VAE(data_dim=128, embed_dim=40, hidden_dim=512, **kwargs) + + +def get_my_vae(name: str, **kwargs) -> VAE: + if name == '16k': + return VAE_16k(**kwargs) + if name == '44k': + return VAE_44k(**kwargs) + raise ValueError(f'Unknown model: {name}') + + +if __name__ == '__main__': + network = get_my_vae('standard') + + # print the number of parameters in terms of millions + num_params = sum(p.numel() for p in network.parameters()) / 1e6 + print(f'Number of parameters: {num_params:.2f}M') diff --git a/meanaudio/ext/autoencoder/vae_modules.py b/meanaudio/ext/autoencoder/vae_modules.py new file mode 100644 index 0000000000000000000000000000000000000000..63066746e07b1c58e5a7445930e8128c1e926164 --- /dev/null +++ b/meanaudio/ext/autoencoder/vae_modules.py @@ -0,0 +1,117 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from einops import rearrange + +from meanaudio.ext.autoencoder.edm2_utils import (MPConv1D, mp_silu, mp_sum, normalize) + + +def nonlinearity(x): + # swish + return mp_silu(x) + + +class ResnetBlock1D(nn.Module): + + def __init__(self, *, in_dim, out_dim=None, conv_shortcut=False, kernel_size=3, use_norm=True): + super().__init__() + self.in_dim = in_dim + out_dim = in_dim if out_dim is None else out_dim + self.out_dim = out_dim + self.use_conv_shortcut = conv_shortcut + self.use_norm = use_norm + + self.conv1 = MPConv1D(in_dim, out_dim, kernel_size=kernel_size) + self.conv2 = MPConv1D(out_dim, out_dim, kernel_size=kernel_size) + if self.in_dim != self.out_dim: + if self.use_conv_shortcut: + self.conv_shortcut = MPConv1D(in_dim, out_dim, kernel_size=kernel_size) + else: + self.nin_shortcut = MPConv1D(in_dim, out_dim, kernel_size=1) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + + # pixel norm + if self.use_norm: + x = normalize(x, dim=1) + + h = x + h = nonlinearity(h) + h = self.conv1(h) + + h = nonlinearity(h) + h = self.conv2(h) + + if self.in_dim != self.out_dim: + if self.use_conv_shortcut: + x = self.conv_shortcut(x) + else: + x = self.nin_shortcut(x) + + return mp_sum(x, h, t=0.3) + + +class AttnBlock1D(nn.Module): + + def __init__(self, in_channels, num_heads=1): + super().__init__() + self.in_channels = in_channels + + self.num_heads = num_heads + self.qkv = MPConv1D(in_channels, in_channels * 3, kernel_size=1) + self.proj_out = MPConv1D(in_channels, in_channels, kernel_size=1) + + def forward(self, x): + h = x + y = self.qkv(h) + y = y.reshape(y.shape[0], self.num_heads, -1, 3, y.shape[-1]) + q, k, v = normalize(y, dim=2).unbind(3) + + q = rearrange(q, 'b h c l -> b h l c') + k = rearrange(k, 'b h c l -> b h l c') + v = rearrange(v, 'b h c l -> b h l c') + + h = F.scaled_dot_product_attention(q, k, v) + h = rearrange(h, 'b h l c -> b (h c) l') + + h = self.proj_out(h) + + return mp_sum(x, h, t=0.3) + + +class Upsample1D(nn.Module): + + def __init__(self, in_channels, with_conv): + super().__init__() + self.with_conv = with_conv + if self.with_conv: + self.conv = MPConv1D(in_channels, in_channels, kernel_size=3) + + def forward(self, x): + x = F.interpolate(x, scale_factor=2.0, mode='nearest-exact') # support 3D tensor(B,C,T) + if self.with_conv: + x = self.conv(x) + return x + + +class Downsample1D(nn.Module): + + def __init__(self, in_channels, with_conv): + super().__init__() + self.with_conv = with_conv + if self.with_conv: + # no asymmetric padding in torch conv, must do it ourselves + self.conv1 = MPConv1D(in_channels, in_channels, kernel_size=1) + self.conv2 = MPConv1D(in_channels, in_channels, kernel_size=1) + + def forward(self, x): + + if self.with_conv: + x = self.conv1(x) + + x = F.avg_pool1d(x, kernel_size=2, stride=2) + + if self.with_conv: + x = self.conv2(x) + + return x diff --git a/meanaudio/ext/bigvgan/LICENSE b/meanaudio/ext/bigvgan/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..e9663595cc28938f88d6299acd3ba791542e4c0c --- /dev/null +++ b/meanaudio/ext/bigvgan/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 NVIDIA CORPORATION. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/meanaudio/ext/bigvgan/__init__.py b/meanaudio/ext/bigvgan/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..00f13e9bf9ccb0b4ec37e1c70869f9a9a538871f --- /dev/null +++ b/meanaudio/ext/bigvgan/__init__.py @@ -0,0 +1 @@ +from .bigvgan import BigVGAN diff --git a/meanaudio/ext/bigvgan/activations.py b/meanaudio/ext/bigvgan/activations.py new file mode 100644 index 0000000000000000000000000000000000000000..61f2808a5466b3cf4d041059700993af5527dd29 --- /dev/null +++ b/meanaudio/ext/bigvgan/activations.py @@ -0,0 +1,120 @@ +# Implementation adapted from https://github.com/EdwardDixon/snake under the MIT license. +# LICENSE is in incl_licenses directory. + +import torch +from torch import nn, sin, pow +from torch.nn import Parameter + + +class Snake(nn.Module): + ''' + Implementation of a sine-based periodic activation function + Shape: + - Input: (B, C, T) + - Output: (B, C, T), same shape as the input + Parameters: + - alpha - trainable parameter + References: + - This activation function is from this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda: + https://arxiv.org/abs/2006.08195 + Examples: + >>> a1 = snake(256) + >>> x = torch.randn(256) + >>> x = a1(x) + ''' + def __init__(self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False): + ''' + Initialization. + INPUT: + - in_features: shape of the input + - alpha: trainable parameter + alpha is initialized to 1 by default, higher values = higher-frequency. + alpha will be trained along with the rest of your model. + ''' + super(Snake, self).__init__() + self.in_features = in_features + + # initialize alpha + self.alpha_logscale = alpha_logscale + if self.alpha_logscale: # log scale alphas initialized to zeros + self.alpha = Parameter(torch.zeros(in_features) * alpha) + else: # linear scale alphas initialized to ones + self.alpha = Parameter(torch.ones(in_features) * alpha) + + self.alpha.requires_grad = alpha_trainable + + self.no_div_by_zero = 0.000000001 + + def forward(self, x): + ''' + Forward pass of the function. + Applies the function to the input elementwise. + Snake ∶= x + 1/a * sin^2 (xa) + ''' + alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # line up with x to [B, C, T] + if self.alpha_logscale: + alpha = torch.exp(alpha) + x = x + (1.0 / (alpha + self.no_div_by_zero)) * pow(sin(x * alpha), 2) + + return x + + +class SnakeBeta(nn.Module): + ''' + A modified Snake function which uses separate parameters for the magnitude of the periodic components + Shape: + - Input: (B, C, T) + - Output: (B, C, T), same shape as the input + Parameters: + - alpha - trainable parameter that controls frequency + - beta - trainable parameter that controls magnitude + References: + - This activation function is a modified version based on this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda: + https://arxiv.org/abs/2006.08195 + Examples: + >>> a1 = snakebeta(256) + >>> x = torch.randn(256) + >>> x = a1(x) + ''' + def __init__(self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False): + ''' + Initialization. + INPUT: + - in_features: shape of the input + - alpha - trainable parameter that controls frequency + - beta - trainable parameter that controls magnitude + alpha is initialized to 1 by default, higher values = higher-frequency. + beta is initialized to 1 by default, higher values = higher-magnitude. + alpha will be trained along with the rest of your model. + ''' + super(SnakeBeta, self).__init__() + self.in_features = in_features + + # initialize alpha + self.alpha_logscale = alpha_logscale + if self.alpha_logscale: # log scale alphas initialized to zeros + self.alpha = Parameter(torch.zeros(in_features) * alpha) + self.beta = Parameter(torch.zeros(in_features) * alpha) + else: # linear scale alphas initialized to ones + self.alpha = Parameter(torch.ones(in_features) * alpha) + self.beta = Parameter(torch.ones(in_features) * alpha) + + self.alpha.requires_grad = alpha_trainable + self.beta.requires_grad = alpha_trainable + + self.no_div_by_zero = 0.000000001 + + def forward(self, x): + ''' + Forward pass of the function. + Applies the function to the input elementwise. + SnakeBeta ∶= x + 1/b * sin^2 (xa) + ''' + alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # line up with x to [B, C, T] + beta = self.beta.unsqueeze(0).unsqueeze(-1) + if self.alpha_logscale: + alpha = torch.exp(alpha) + beta = torch.exp(beta) + x = x + (1.0 / (beta + self.no_div_by_zero)) * pow(sin(x * alpha), 2) + + return x \ No newline at end of file diff --git a/meanaudio/ext/bigvgan/alias_free_torch/__init__.py b/meanaudio/ext/bigvgan/alias_free_torch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a2318b63198250856809c0cb46210a4147b829bc --- /dev/null +++ b/meanaudio/ext/bigvgan/alias_free_torch/__init__.py @@ -0,0 +1,6 @@ +# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0 +# LICENSE is in incl_licenses directory. + +from .filter import * +from .resample import * +from .act import * \ No newline at end of file diff --git a/meanaudio/ext/bigvgan/alias_free_torch/act.py b/meanaudio/ext/bigvgan/alias_free_torch/act.py new file mode 100644 index 0000000000000000000000000000000000000000..028debd697dd60458aae75010057df038bd3518a --- /dev/null +++ b/meanaudio/ext/bigvgan/alias_free_torch/act.py @@ -0,0 +1,28 @@ +# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0 +# LICENSE is in incl_licenses directory. + +import torch.nn as nn +from .resample import UpSample1d, DownSample1d + + +class Activation1d(nn.Module): + def __init__(self, + activation, + up_ratio: int = 2, + down_ratio: int = 2, + up_kernel_size: int = 12, + down_kernel_size: int = 12): + super().__init__() + self.up_ratio = up_ratio + self.down_ratio = down_ratio + self.act = activation + self.upsample = UpSample1d(up_ratio, up_kernel_size) + self.downsample = DownSample1d(down_ratio, down_kernel_size) + + # x: [B,C,T] + def forward(self, x): + x = self.upsample(x) + x = self.act(x) + x = self.downsample(x) + + return x \ No newline at end of file diff --git a/meanaudio/ext/bigvgan/alias_free_torch/filter.py b/meanaudio/ext/bigvgan/alias_free_torch/filter.py new file mode 100644 index 0000000000000000000000000000000000000000..7ad6ea87c1f10ddd94c544037791d7a4634d5ae1 --- /dev/null +++ b/meanaudio/ext/bigvgan/alias_free_torch/filter.py @@ -0,0 +1,95 @@ +# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0 +# LICENSE is in incl_licenses directory. + +import torch +import torch.nn as nn +import torch.nn.functional as F +import math + +if 'sinc' in dir(torch): + sinc = torch.sinc +else: + # This code is adopted from adefossez's julius.core.sinc under the MIT License + # https://adefossez.github.io/julius/julius/core.html + # LICENSE is in incl_licenses directory. + def sinc(x: torch.Tensor): + """ + Implementation of sinc, i.e. sin(pi * x) / (pi * x) + __Warning__: Different to julius.sinc, the input is multiplied by `pi`! + """ + return torch.where(x == 0, + torch.tensor(1., device=x.device, dtype=x.dtype), + torch.sin(math.pi * x) / math.pi / x) + + +# This code is adopted from adefossez's julius.lowpass.LowPassFilters under the MIT License +# https://adefossez.github.io/julius/julius/lowpass.html +# LICENSE is in incl_licenses directory. +def kaiser_sinc_filter1d(cutoff, half_width, kernel_size): # return filter [1,1,kernel_size] + even = (kernel_size % 2 == 0) + half_size = kernel_size // 2 + + #For kaiser window + delta_f = 4 * half_width + A = 2.285 * (half_size - 1) * math.pi * delta_f + 7.95 + if A > 50.: + beta = 0.1102 * (A - 8.7) + elif A >= 21.: + beta = 0.5842 * (A - 21)**0.4 + 0.07886 * (A - 21.) + else: + beta = 0. + window = torch.kaiser_window(kernel_size, beta=beta, periodic=False) + + # ratio = 0.5/cutoff -> 2 * cutoff = 1 / ratio + if even: + time = (torch.arange(-half_size, half_size) + 0.5) + else: + time = torch.arange(kernel_size) - half_size + if cutoff == 0: + filter_ = torch.zeros_like(time) + else: + filter_ = 2 * cutoff * window * sinc(2 * cutoff * time) + # Normalize filter to have sum = 1, otherwise we will have a small leakage + # of the constant component in the input signal. + filter_ /= filter_.sum() + filter = filter_.view(1, 1, kernel_size) + + return filter + + +class LowPassFilter1d(nn.Module): + def __init__(self, + cutoff=0.5, + half_width=0.6, + stride: int = 1, + padding: bool = True, + padding_mode: str = 'replicate', + kernel_size: int = 12): + # kernel_size should be even number for stylegan3 setup, + # in this implementation, odd number is also possible. + super().__init__() + if cutoff < -0.: + raise ValueError("Minimum cutoff must be larger than zero.") + if cutoff > 0.5: + raise ValueError("A cutoff above 0.5 does not make sense.") + self.kernel_size = kernel_size + self.even = (kernel_size % 2 == 0) + self.pad_left = kernel_size // 2 - int(self.even) + self.pad_right = kernel_size // 2 + self.stride = stride + self.padding = padding + self.padding_mode = padding_mode + filter = kaiser_sinc_filter1d(cutoff, half_width, kernel_size) + self.register_buffer("filter", filter) + + #input [B, C, T] + def forward(self, x): + _, C, _ = x.shape + + if self.padding: + x = F.pad(x, (self.pad_left, self.pad_right), + mode=self.padding_mode) + out = F.conv1d(x, self.filter.expand(C, -1, -1), + stride=self.stride, groups=C) + + return out \ No newline at end of file diff --git a/meanaudio/ext/bigvgan/alias_free_torch/resample.py b/meanaudio/ext/bigvgan/alias_free_torch/resample.py new file mode 100644 index 0000000000000000000000000000000000000000..750e6c3402cc5ac939c4b9d075246562e0e1d1a7 --- /dev/null +++ b/meanaudio/ext/bigvgan/alias_free_torch/resample.py @@ -0,0 +1,49 @@ +# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0 +# LICENSE is in incl_licenses directory. + +import torch.nn as nn +from torch.nn import functional as F +from .filter import LowPassFilter1d +from .filter import kaiser_sinc_filter1d + + +class UpSample1d(nn.Module): + def __init__(self, ratio=2, kernel_size=None): + super().__init__() + self.ratio = ratio + self.kernel_size = int(6 * ratio // 2) * 2 if kernel_size is None else kernel_size + self.stride = ratio + self.pad = self.kernel_size // ratio - 1 + self.pad_left = self.pad * self.stride + (self.kernel_size - self.stride) // 2 + self.pad_right = self.pad * self.stride + (self.kernel_size - self.stride + 1) // 2 + filter = kaiser_sinc_filter1d(cutoff=0.5 / ratio, + half_width=0.6 / ratio, + kernel_size=self.kernel_size) + self.register_buffer("filter", filter) + + # x: [B, C, T] + def forward(self, x): + _, C, _ = x.shape + + x = F.pad(x, (self.pad, self.pad), mode='replicate') + x = self.ratio * F.conv_transpose1d( + x, self.filter.expand(C, -1, -1), stride=self.stride, groups=C) + x = x[..., self.pad_left:-self.pad_right] + + return x + + +class DownSample1d(nn.Module): + def __init__(self, ratio=2, kernel_size=None): + super().__init__() + self.ratio = ratio + self.kernel_size = int(6 * ratio // 2) * 2 if kernel_size is None else kernel_size + self.lowpass = LowPassFilter1d(cutoff=0.5 / ratio, + half_width=0.6 / ratio, + stride=ratio, + kernel_size=self.kernel_size) + + def forward(self, x): + xx = self.lowpass(x) + + return xx \ No newline at end of file diff --git a/meanaudio/ext/bigvgan/bigvgan.py b/meanaudio/ext/bigvgan/bigvgan.py new file mode 100644 index 0000000000000000000000000000000000000000..c4def4c6a6dc909bdf4326d1b1e36f9f93cd31b0 --- /dev/null +++ b/meanaudio/ext/bigvgan/bigvgan.py @@ -0,0 +1,32 @@ +from pathlib import Path + +import torch +import torch.nn as nn +from omegaconf import OmegaConf + +from meanaudio.ext.bigvgan.models import BigVGANVocoder + +_bigvgan_vocoder_path = Path(__file__).parent / 'bigvgan_vocoder.yml' + + +class BigVGAN(nn.Module): + + def __init__(self, ckpt_path, config_path=_bigvgan_vocoder_path): + super().__init__() + vocoder_cfg = OmegaConf.load(config_path) + self.vocoder = BigVGANVocoder(vocoder_cfg).eval() + vocoder_ckpt = torch.load(ckpt_path, map_location='cpu', weights_only=True)['generator'] + self.vocoder.load_state_dict(vocoder_ckpt) + + self.weight_norm_removed = False + self.remove_weight_norm() + + @torch.inference_mode() + def forward(self, x): + assert self.weight_norm_removed, 'call remove_weight_norm() before inference' + return self.vocoder(x) + + def remove_weight_norm(self): + self.vocoder.remove_weight_norm() + self.weight_norm_removed = True + return self diff --git a/meanaudio/ext/bigvgan/bigvgan_vocoder.yml b/meanaudio/ext/bigvgan/bigvgan_vocoder.yml new file mode 100644 index 0000000000000000000000000000000000000000..d4db31ec45336e757d94d5099ed16cb3c906c24a --- /dev/null +++ b/meanaudio/ext/bigvgan/bigvgan_vocoder.yml @@ -0,0 +1,63 @@ +resblock: '1' +num_gpus: 0 +batch_size: 64 +num_mels: 80 +learning_rate: 0.0001 +adam_b1: 0.8 +adam_b2: 0.99 +lr_decay: 0.999 +seed: 1234 +upsample_rates: +- 4 +- 4 +- 2 +- 2 +- 2 +- 2 +upsample_kernel_sizes: +- 8 +- 8 +- 4 +- 4 +- 4 +- 4 +upsample_initial_channel: 1536 +resblock_kernel_sizes: +- 3 +- 7 +- 11 +resblock_dilation_sizes: +- - 1 + - 3 + - 5 +- - 1 + - 3 + - 5 +- - 1 + - 3 + - 5 +activation: snakebeta +snake_logscale: true +resolutions: +- - 1024 + - 120 + - 600 +- - 2048 + - 240 + - 1200 +- - 512 + - 50 + - 240 +mpd_reshapes: +- 2 +- 3 +- 5 +- 7 +- 11 +use_spectral_norm: false +discriminator_channel_mult: 1 +num_workers: 4 +dist_config: + dist_backend: nccl + dist_url: tcp://localhost:54341 + world_size: 1 diff --git a/meanaudio/ext/bigvgan/env.py b/meanaudio/ext/bigvgan/env.py new file mode 100644 index 0000000000000000000000000000000000000000..b8be238d4db710c8c9a338d336baea0138f18d1f --- /dev/null +++ b/meanaudio/ext/bigvgan/env.py @@ -0,0 +1,18 @@ +# Adapted from https://github.com/jik876/hifi-gan under the MIT license. +# LICENSE is in incl_licenses directory. + +import os +import shutil + + +class AttrDict(dict): + def __init__(self, *args, **kwargs): + super(AttrDict, self).__init__(*args, **kwargs) + self.__dict__ = self + + +def build_env(config, config_name, path): + t_path = os.path.join(path, config_name) + if config != t_path: + os.makedirs(path, exist_ok=True) + shutil.copyfile(config, os.path.join(path, config_name)) \ No newline at end of file diff --git a/meanaudio/ext/bigvgan/incl_licenses/LICENSE_1 b/meanaudio/ext/bigvgan/incl_licenses/LICENSE_1 new file mode 100644 index 0000000000000000000000000000000000000000..5afae394d6b37da0e12ba6b290d2512687f421ac --- /dev/null +++ b/meanaudio/ext/bigvgan/incl_licenses/LICENSE_1 @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Jungil Kong + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/meanaudio/ext/bigvgan/incl_licenses/LICENSE_2 b/meanaudio/ext/bigvgan/incl_licenses/LICENSE_2 new file mode 100644 index 0000000000000000000000000000000000000000..322b758863c4219be68291ae3826218baa93cb4c --- /dev/null +++ b/meanaudio/ext/bigvgan/incl_licenses/LICENSE_2 @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Edward Dixon + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/meanaudio/ext/bigvgan/incl_licenses/LICENSE_3 b/meanaudio/ext/bigvgan/incl_licenses/LICENSE_3 new file mode 100644 index 0000000000000000000000000000000000000000..56ee3c8c4cc2b4b32e0975d17258f9ba515fdbcc --- /dev/null +++ b/meanaudio/ext/bigvgan/incl_licenses/LICENSE_3 @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/meanaudio/ext/bigvgan/incl_licenses/LICENSE_4 b/meanaudio/ext/bigvgan/incl_licenses/LICENSE_4 new file mode 100644 index 0000000000000000000000000000000000000000..48fd1a1ba8d81a94b6c7d1c2ff1a1f307cc5371d --- /dev/null +++ b/meanaudio/ext/bigvgan/incl_licenses/LICENSE_4 @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2019, Seungwon Park 박승원 +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/meanaudio/ext/bigvgan/incl_licenses/LICENSE_5 b/meanaudio/ext/bigvgan/incl_licenses/LICENSE_5 new file mode 100644 index 0000000000000000000000000000000000000000..01ae5538e6b7c787bb4f5d6f2cd9903520d6e465 --- /dev/null +++ b/meanaudio/ext/bigvgan/incl_licenses/LICENSE_5 @@ -0,0 +1,16 @@ +Copyright 2020 Alexandre Défossez + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and +associated documentation files (the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, publish, distribute, +sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or +substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT +NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/meanaudio/ext/bigvgan/models.py b/meanaudio/ext/bigvgan/models.py new file mode 100644 index 0000000000000000000000000000000000000000..693642cdb6577e8dcdcbe9d0c583b71dc48613fb --- /dev/null +++ b/meanaudio/ext/bigvgan/models.py @@ -0,0 +1,255 @@ +# Copyright (c) 2022 NVIDIA CORPORATION. +# Licensed under the MIT license. + +# Adapted from https://github.com/jik876/hifi-gan under the MIT license. +# LICENSE is in incl_licenses directory. + +import torch +import torch.nn as nn +from torch.nn import Conv1d, ConvTranspose1d +from torch.nn.utils.parametrizations import weight_norm +from torch.nn.utils.parametrize import remove_parametrizations + +from meanaudio.ext.bigvgan import activations +from meanaudio.ext.bigvgan.alias_free_torch import * +from meanaudio.ext.bigvgan.utils import get_padding, init_weights + +LRELU_SLOPE = 0.1 + + +class AMPBlock1(torch.nn.Module): + + def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5), activation=None): + super(AMPBlock1, self).__init__() + self.h = h + + self.convs1 = nn.ModuleList([ + weight_norm( + Conv1d(channels, + channels, + kernel_size, + 1, + dilation=dilation[0], + padding=get_padding(kernel_size, dilation[0]))), + weight_norm( + Conv1d(channels, + channels, + kernel_size, + 1, + dilation=dilation[1], + padding=get_padding(kernel_size, dilation[1]))), + weight_norm( + Conv1d(channels, + channels, + kernel_size, + 1, + dilation=dilation[2], + padding=get_padding(kernel_size, dilation[2]))) + ]) + self.convs1.apply(init_weights) + + self.convs2 = nn.ModuleList([ + weight_norm( + Conv1d(channels, + channels, + kernel_size, + 1, + dilation=1, + padding=get_padding(kernel_size, 1))), + weight_norm( + Conv1d(channels, + channels, + kernel_size, + 1, + dilation=1, + padding=get_padding(kernel_size, 1))), + weight_norm( + Conv1d(channels, + channels, + kernel_size, + 1, + dilation=1, + padding=get_padding(kernel_size, 1))) + ]) + self.convs2.apply(init_weights) + + self.num_layers = len(self.convs1) + len(self.convs2) # total number of conv layers + + if activation == 'snake': # periodic nonlinearity with snake function and anti-aliasing + self.activations = nn.ModuleList([ + Activation1d( + activation=activations.Snake(channels, alpha_logscale=h.snake_logscale)) + for _ in range(self.num_layers) + ]) + elif activation == 'snakebeta': # periodic nonlinearity with snakebeta function and anti-aliasing + self.activations = nn.ModuleList([ + Activation1d( + activation=activations.SnakeBeta(channels, alpha_logscale=h.snake_logscale)) + for _ in range(self.num_layers) + ]) + else: + raise NotImplementedError( + "activation incorrectly specified. check the config file and look for 'activation'." + ) + + def forward(self, x): + acts1, acts2 = self.activations[::2], self.activations[1::2] + for c1, c2, a1, a2 in zip(self.convs1, self.convs2, acts1, acts2): + xt = a1(x) + xt = c1(xt) + xt = a2(xt) + xt = c2(xt) + x = xt + x + + return x + + def remove_weight_norm(self): + for l in self.convs1: + remove_parametrizations(l, 'weight') + for l in self.convs2: + remove_parametrizations(l, 'weight') + + +class AMPBlock2(torch.nn.Module): + + def __init__(self, h, channels, kernel_size=3, dilation=(1, 3), activation=None): + super(AMPBlock2, self).__init__() + self.h = h + + self.convs = nn.ModuleList([ + weight_norm( + Conv1d(channels, + channels, + kernel_size, + 1, + dilation=dilation[0], + padding=get_padding(kernel_size, dilation[0]))), + weight_norm( + Conv1d(channels, + channels, + kernel_size, + 1, + dilation=dilation[1], + padding=get_padding(kernel_size, dilation[1]))) + ]) + self.convs.apply(init_weights) + + self.num_layers = len(self.convs) # total number of conv layers + + if activation == 'snake': # periodic nonlinearity with snake function and anti-aliasing + self.activations = nn.ModuleList([ + Activation1d( + activation=activations.Snake(channels, alpha_logscale=h.snake_logscale)) + for _ in range(self.num_layers) + ]) + elif activation == 'snakebeta': # periodic nonlinearity with snakebeta function and anti-aliasing + self.activations = nn.ModuleList([ + Activation1d( + activation=activations.SnakeBeta(channels, alpha_logscale=h.snake_logscale)) + for _ in range(self.num_layers) + ]) + else: + raise NotImplementedError( + "activation incorrectly specified. check the config file and look for 'activation'." + ) + + def forward(self, x): + for c, a in zip(self.convs, self.activations): + xt = a(x) + xt = c(xt) + x = xt + x + + return x + + def remove_weight_norm(self): + for l in self.convs: + remove_parametrizations(l, 'weight') + + +class BigVGANVocoder(torch.nn.Module): + # this is our main BigVGAN model. Applies anti-aliased periodic activation for resblocks. + def __init__(self, h): + super().__init__() + self.h = h + + self.num_kernels = len(h.resblock_kernel_sizes) + self.num_upsamples = len(h.upsample_rates) + + # pre conv + self.conv_pre = weight_norm(Conv1d(h.num_mels, h.upsample_initial_channel, 7, 1, padding=3)) + + # define which AMPBlock to use. BigVGAN uses AMPBlock1 as default + resblock = AMPBlock1 if h.resblock == '1' else AMPBlock2 + + # transposed conv-based upsamplers. does not apply anti-aliasing + self.ups = nn.ModuleList() + for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)): + self.ups.append( + nn.ModuleList([ + weight_norm( + ConvTranspose1d(h.upsample_initial_channel // (2**i), + h.upsample_initial_channel // (2**(i + 1)), + k, + u, + padding=(k - u) // 2)) + ])) + + # residual blocks using anti-aliased multi-periodicity composition modules (AMP) + self.resblocks = nn.ModuleList() + for i in range(len(self.ups)): + ch = h.upsample_initial_channel // (2**(i + 1)) + for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)): + self.resblocks.append(resblock(h, ch, k, d, activation=h.activation)) + + # post conv + if h.activation == "snake": # periodic nonlinearity with snake function and anti-aliasing + activation_post = activations.Snake(ch, alpha_logscale=h.snake_logscale) + self.activation_post = Activation1d(activation=activation_post) + elif h.activation == "snakebeta": # periodic nonlinearity with snakebeta function and anti-aliasing + activation_post = activations.SnakeBeta(ch, alpha_logscale=h.snake_logscale) + self.activation_post = Activation1d(activation=activation_post) + else: + raise NotImplementedError( + "activation incorrectly specified. check the config file and look for 'activation'." + ) + + self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3)) + + # weight initialization + for i in range(len(self.ups)): + self.ups[i].apply(init_weights) + self.conv_post.apply(init_weights) + + def forward(self, x): + # pre conv + x = self.conv_pre(x) + + for i in range(self.num_upsamples): + # upsampling + for i_up in range(len(self.ups[i])): + x = self.ups[i][i_up](x) + # AMP blocks + xs = None + for j in range(self.num_kernels): + if xs is None: + xs = self.resblocks[i * self.num_kernels + j](x) + else: + xs += self.resblocks[i * self.num_kernels + j](x) + x = xs / self.num_kernels + + # post conv + x = self.activation_post(x) + x = self.conv_post(x) + x = torch.tanh(x) + + return x + + def remove_weight_norm(self): + print('Removing weight norm...') + for l in self.ups: + for l_i in l: + remove_parametrizations(l_i, 'weight') + for l in self.resblocks: + l.remove_weight_norm() + remove_parametrizations(self.conv_pre, 'weight') + remove_parametrizations(self.conv_post, 'weight') diff --git a/meanaudio/ext/bigvgan/utils.py b/meanaudio/ext/bigvgan/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..aff7e653533d3390756c53a0215801b06cc924b5 --- /dev/null +++ b/meanaudio/ext/bigvgan/utils.py @@ -0,0 +1,31 @@ +# Adapted from https://github.com/jik876/hifi-gan under the MIT license. +# LICENSE is in incl_licenses directory. + +import os + +import torch +from torch.nn.utils.parametrizations import weight_norm + + +def init_weights(m, mean=0.0, std=0.01): + classname = m.__class__.__name__ + if classname.find("Conv") != -1: + m.weight.data.normal_(mean, std) + + +def apply_weight_norm(m): + classname = m.__class__.__name__ + if classname.find("Conv") != -1: + weight_norm(m) + + +def get_padding(kernel_size, dilation=1): + return int((kernel_size * dilation - dilation) / 2) + + +def load_checkpoint(filepath, device): + assert os.path.isfile(filepath) + print("Loading '{}'".format(filepath)) + checkpoint_dict = torch.load(filepath, map_location=device) + print("Complete.") + return checkpoint_dict diff --git a/meanaudio/ext/bigvgan_v2/LICENSE b/meanaudio/ext/bigvgan_v2/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..4c78361c86d4f685117d60d6623e2197fcfed706 --- /dev/null +++ b/meanaudio/ext/bigvgan_v2/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 NVIDIA CORPORATION. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/meanaudio/ext/bigvgan_v2/__init__.py b/meanaudio/ext/bigvgan_v2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/meanaudio/ext/bigvgan_v2/activations.py b/meanaudio/ext/bigvgan_v2/activations.py new file mode 100644 index 0000000000000000000000000000000000000000..4f08ddab5b55d6dcaf3e968af98889e0770c44f5 --- /dev/null +++ b/meanaudio/ext/bigvgan_v2/activations.py @@ -0,0 +1,126 @@ +# Implementation adapted from https://github.com/EdwardDixon/snake under the MIT license. +# LICENSE is in incl_licenses directory. + +import torch +from torch import nn, sin, pow +from torch.nn import Parameter + + +class Snake(nn.Module): + """ + Implementation of a sine-based periodic activation function + Shape: + - Input: (B, C, T) + - Output: (B, C, T), same shape as the input + Parameters: + - alpha - trainable parameter + References: + - This activation function is from this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda: + https://arxiv.org/abs/2006.08195 + Examples: + >>> a1 = snake(256) + >>> x = torch.randn(256) + >>> x = a1(x) + """ + + def __init__( + self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False + ): + """ + Initialization. + INPUT: + - in_features: shape of the input + - alpha: trainable parameter + alpha is initialized to 1 by default, higher values = higher-frequency. + alpha will be trained along with the rest of your model. + """ + super(Snake, self).__init__() + self.in_features = in_features + + # Initialize alpha + self.alpha_logscale = alpha_logscale + if self.alpha_logscale: # Log scale alphas initialized to zeros + self.alpha = Parameter(torch.zeros(in_features) * alpha) + else: # Linear scale alphas initialized to ones + self.alpha = Parameter(torch.ones(in_features) * alpha) + + self.alpha.requires_grad = alpha_trainable + + self.no_div_by_zero = 0.000000001 + + def forward(self, x): + """ + Forward pass of the function. + Applies the function to the input elementwise. + Snake ∶= x + 1/a * sin^2 (xa) + """ + alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # Line up with x to [B, C, T] + if self.alpha_logscale: + alpha = torch.exp(alpha) + x = x + (1.0 / (alpha + self.no_div_by_zero)) * pow(sin(x * alpha), 2) + + return x + + +class SnakeBeta(nn.Module): + """ + A modified Snake function which uses separate parameters for the magnitude of the periodic components + Shape: + - Input: (B, C, T) + - Output: (B, C, T), same shape as the input + Parameters: + - alpha - trainable parameter that controls frequency + - beta - trainable parameter that controls magnitude + References: + - This activation function is a modified version based on this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda: + https://arxiv.org/abs/2006.08195 + Examples: + >>> a1 = snakebeta(256) + >>> x = torch.randn(256) + >>> x = a1(x) + """ + + def __init__( + self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False + ): + """ + Initialization. + INPUT: + - in_features: shape of the input + - alpha - trainable parameter that controls frequency + - beta - trainable parameter that controls magnitude + alpha is initialized to 1 by default, higher values = higher-frequency. + beta is initialized to 1 by default, higher values = higher-magnitude. + alpha will be trained along with the rest of your model. + """ + super(SnakeBeta, self).__init__() + self.in_features = in_features + + # Initialize alpha + self.alpha_logscale = alpha_logscale + if self.alpha_logscale: # Log scale alphas initialized to zeros + self.alpha = Parameter(torch.zeros(in_features) * alpha) + self.beta = Parameter(torch.zeros(in_features) * alpha) + else: # Linear scale alphas initialized to ones + self.alpha = Parameter(torch.ones(in_features) * alpha) + self.beta = Parameter(torch.ones(in_features) * alpha) + + self.alpha.requires_grad = alpha_trainable + self.beta.requires_grad = alpha_trainable + + self.no_div_by_zero = 0.000000001 + + def forward(self, x): + """ + Forward pass of the function. + Applies the function to the input elementwise. + SnakeBeta ∶= x + 1/b * sin^2 (xa) + """ + alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # Line up with x to [B, C, T] + beta = self.beta.unsqueeze(0).unsqueeze(-1) + if self.alpha_logscale: + alpha = torch.exp(alpha) + beta = torch.exp(beta) + x = x + (1.0 / (beta + self.no_div_by_zero)) * pow(sin(x * alpha), 2) + + return x diff --git a/meanaudio/ext/bigvgan_v2/alias_free_activation/cuda/__init__.py b/meanaudio/ext/bigvgan_v2/alias_free_activation/cuda/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/meanaudio/ext/bigvgan_v2/alias_free_activation/cuda/activation1d.py b/meanaudio/ext/bigvgan_v2/alias_free_activation/cuda/activation1d.py new file mode 100644 index 0000000000000000000000000000000000000000..fbc0fd8f28a37ad949fbdb9832f51b5b933c6ff2 --- /dev/null +++ b/meanaudio/ext/bigvgan_v2/alias_free_activation/cuda/activation1d.py @@ -0,0 +1,77 @@ +# Copyright (c) 2024 NVIDIA CORPORATION. +# Licensed under the MIT license. + +import torch +import torch.nn as nn +from alias_free_activation.torch.resample import UpSample1d, DownSample1d + +# load fused CUDA kernel: this enables importing anti_alias_activation_cuda +from alias_free_activation.cuda import load + +anti_alias_activation_cuda = load.load() + + +class FusedAntiAliasActivation(torch.autograd.Function): + """ + Assumes filter size 12, replication padding on upsampling/downsampling, and logscale alpha/beta parameters as inputs. + The hyperparameters are hard-coded in the kernel to maximize speed. + NOTE: The fused kenrel is incorrect for Activation1d with different hyperparameters. + """ + + @staticmethod + def forward(ctx, inputs, up_ftr, down_ftr, alpha, beta): + activation_results = anti_alias_activation_cuda.forward( + inputs, up_ftr, down_ftr, alpha, beta + ) + + return activation_results + + @staticmethod + def backward(ctx, output_grads): + raise NotImplementedError + return output_grads, None, None + + +class Activation1d(nn.Module): + def __init__( + self, + activation, + up_ratio: int = 2, + down_ratio: int = 2, + up_kernel_size: int = 12, + down_kernel_size: int = 12, + fused: bool = True, + ): + super().__init__() + self.up_ratio = up_ratio + self.down_ratio = down_ratio + self.act = activation + self.upsample = UpSample1d(up_ratio, up_kernel_size) + self.downsample = DownSample1d(down_ratio, down_kernel_size) + + self.fused = fused # Whether to use fused CUDA kernel or not + + def forward(self, x): + if not self.fused: + x = self.upsample(x) + x = self.act(x) + x = self.downsample(x) + return x + else: + if self.act.__class__.__name__ == "Snake": + beta = self.act.alpha.data # Snake uses same params for alpha and beta + else: + beta = ( + self.act.beta.data + ) # Snakebeta uses different params for alpha and beta + alpha = self.act.alpha.data + if ( + not self.act.alpha_logscale + ): # Exp baked into cuda kernel, cancel it out with a log + alpha = torch.log(alpha) + beta = torch.log(beta) + + x = FusedAntiAliasActivation.apply( + x, self.upsample.filter, self.downsample.lowpass.filter, alpha, beta + ) + return x diff --git a/meanaudio/ext/bigvgan_v2/alias_free_activation/cuda/anti_alias_activation.cpp b/meanaudio/ext/bigvgan_v2/alias_free_activation/cuda/anti_alias_activation.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c5651f77143bd678169eb11564a7cf7a7969a59e --- /dev/null +++ b/meanaudio/ext/bigvgan_v2/alias_free_activation/cuda/anti_alias_activation.cpp @@ -0,0 +1,23 @@ +/* coding=utf-8 + * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + #include + +extern "C" torch::Tensor fwd_cuda(torch::Tensor const &input, torch::Tensor const &up_filter, torch::Tensor const &down_filter, torch::Tensor const &alpha, torch::Tensor const &beta); + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("forward", &fwd_cuda, "Anti-Alias Activation forward (CUDA)"); +} \ No newline at end of file diff --git a/meanaudio/ext/bigvgan_v2/alias_free_activation/cuda/anti_alias_activation_cuda.cu b/meanaudio/ext/bigvgan_v2/alias_free_activation/cuda/anti_alias_activation_cuda.cu new file mode 100644 index 0000000000000000000000000000000000000000..8c442334869fe72d639ec203fa4fac07f96a0ee1 --- /dev/null +++ b/meanaudio/ext/bigvgan_v2/alias_free_activation/cuda/anti_alias_activation_cuda.cu @@ -0,0 +1,246 @@ +/* coding=utf-8 + * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "type_shim.h" +#include +#include +#include +#include +#include + +namespace +{ + // Hard-coded hyperparameters + // WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and + constexpr int ELEMENTS_PER_LDG_STG = 1; //(WARP_ITERATIONS < 4) ? 1 : 4; + constexpr int BUFFER_SIZE = 32; + constexpr int FILTER_SIZE = 12; + constexpr int HALF_FILTER_SIZE = 6; + constexpr int UPSAMPLE_REPLICATION_PAD = 5; // 5 on each side, matching torch impl + constexpr int DOWNSAMPLE_REPLICATION_PAD_LEFT = 5; // matching torch impl + constexpr int DOWNSAMPLE_REPLICATION_PAD_RIGHT = 6; // matching torch impl + + template + __global__ void anti_alias_activation_forward( + output_t *dst, + const input_t *src, + const input_t *up_ftr, + const input_t *down_ftr, + const input_t *alpha, + const input_t *beta, + int batch_size, + int channels, + int seq_len) + { + // Up and downsample filters + input_t up_filter[FILTER_SIZE]; + input_t down_filter[FILTER_SIZE]; + + // Load data from global memory including extra indices reserved for replication paddings + input_t elements[2 * FILTER_SIZE + 2 * BUFFER_SIZE + 2 * UPSAMPLE_REPLICATION_PAD] = {0}; + input_t intermediates[2 * FILTER_SIZE + 2 * BUFFER_SIZE + DOWNSAMPLE_REPLICATION_PAD_LEFT + DOWNSAMPLE_REPLICATION_PAD_RIGHT] = {0}; + + // Output stores downsampled output before writing to dst + output_t output[BUFFER_SIZE]; + + // blockDim/threadIdx = (128, 1, 1) + // gridDim/blockIdx = (seq_blocks, channels, batches) + int block_offset = (blockIdx.x * 128 * BUFFER_SIZE + seq_len * (blockIdx.y + gridDim.y * blockIdx.z)); + int local_offset = threadIdx.x * BUFFER_SIZE; + int seq_offset = blockIdx.x * 128 * BUFFER_SIZE + local_offset; + + // intermediate have double the seq_len + int intermediate_local_offset = threadIdx.x * BUFFER_SIZE * 2; + int intermediate_seq_offset = blockIdx.x * 128 * BUFFER_SIZE * 2 + intermediate_local_offset; + + // Get values needed for replication padding before moving pointer + const input_t *right_most_pntr = src + (seq_len * (blockIdx.y + gridDim.y * blockIdx.z)); + input_t seq_left_most_value = right_most_pntr[0]; + input_t seq_right_most_value = right_most_pntr[seq_len - 1]; + + // Move src and dst pointers + src += block_offset + local_offset; + dst += block_offset + local_offset; + + // Alpha and beta values for snake activatons. Applies exp by default + alpha = alpha + blockIdx.y; + input_t alpha_val = expf(alpha[0]); + beta = beta + blockIdx.y; + input_t beta_val = expf(beta[0]); + + #pragma unroll + for (int it = 0; it < FILTER_SIZE; it += 1) + { + up_filter[it] = up_ftr[it]; + down_filter[it] = down_ftr[it]; + } + + // Apply replication padding for upsampling, matching torch impl + #pragma unroll + for (int it = -HALF_FILTER_SIZE; it < BUFFER_SIZE + HALF_FILTER_SIZE; it += 1) + { + int element_index = seq_offset + it; // index for element + if ((element_index < 0) && (element_index >= -UPSAMPLE_REPLICATION_PAD)) + { + elements[2 * (HALF_FILTER_SIZE + it)] = 2 * seq_left_most_value; + } + if ((element_index >= seq_len) && (element_index < seq_len + UPSAMPLE_REPLICATION_PAD)) + { + elements[2 * (HALF_FILTER_SIZE + it)] = 2 * seq_right_most_value; + } + if ((element_index >= 0) && (element_index < seq_len)) + { + elements[2 * (HALF_FILTER_SIZE + it)] = 2 * src[it]; + } + } + + // Apply upsampling strided convolution and write to intermediates. It reserves DOWNSAMPLE_REPLICATION_PAD_LEFT for replication padding of the downsampilng conv later + #pragma unroll + for (int it = 0; it < (2 * BUFFER_SIZE + 2 * FILTER_SIZE); it += 1) + { + input_t acc = 0.0; + int element_index = intermediate_seq_offset + it; // index for intermediate + #pragma unroll + for (int f_idx = 0; f_idx < FILTER_SIZE; f_idx += 1) + { + if ((element_index + f_idx) >= 0) + { + acc += up_filter[f_idx] * elements[it + f_idx]; + } + } + intermediates[it + DOWNSAMPLE_REPLICATION_PAD_LEFT] = acc; + } + + // Apply activation function. It reserves DOWNSAMPLE_REPLICATION_PAD_LEFT and DOWNSAMPLE_REPLICATION_PAD_RIGHT for replication padding of the downsampilng conv later + double no_div_by_zero = 0.000000001; + #pragma unroll + for (int it = 0; it < 2 * BUFFER_SIZE + 2 * FILTER_SIZE; it += 1) + { + intermediates[it + DOWNSAMPLE_REPLICATION_PAD_LEFT] += (1.0 / (beta_val + no_div_by_zero)) * sinf(intermediates[it + DOWNSAMPLE_REPLICATION_PAD_LEFT] * alpha_val) * sinf(intermediates[it + DOWNSAMPLE_REPLICATION_PAD_LEFT] * alpha_val); + } + + // Apply replication padding before downsampling conv from intermediates + #pragma unroll + for (int it = 0; it < DOWNSAMPLE_REPLICATION_PAD_LEFT; it += 1) + { + intermediates[it] = intermediates[DOWNSAMPLE_REPLICATION_PAD_LEFT]; + } + #pragma unroll + for (int it = DOWNSAMPLE_REPLICATION_PAD_LEFT + 2 * BUFFER_SIZE + 2 * FILTER_SIZE; it < DOWNSAMPLE_REPLICATION_PAD_LEFT + 2 * BUFFER_SIZE + 2 * FILTER_SIZE + DOWNSAMPLE_REPLICATION_PAD_RIGHT; it += 1) + { + intermediates[it] = intermediates[DOWNSAMPLE_REPLICATION_PAD_LEFT + 2 * BUFFER_SIZE + 2 * FILTER_SIZE - 1]; + } + + // Apply downsample strided convolution (assuming stride=2) from intermediates + #pragma unroll + for (int it = 0; it < BUFFER_SIZE; it += 1) + { + input_t acc = 0.0; + #pragma unroll + for (int f_idx = 0; f_idx < FILTER_SIZE; f_idx += 1) + { + // Add constant DOWNSAMPLE_REPLICATION_PAD_RIGHT to match torch implementation + acc += down_filter[f_idx] * intermediates[it * 2 + f_idx + DOWNSAMPLE_REPLICATION_PAD_RIGHT]; + } + output[it] = acc; + } + + // Write output to dst + #pragma unroll + for (int it = 0; it < BUFFER_SIZE; it += ELEMENTS_PER_LDG_STG) + { + int element_index = seq_offset + it; + if (element_index < seq_len) + { + dst[it] = output[it]; + } + } + + } + + template + void dispatch_anti_alias_activation_forward( + output_t *dst, + const input_t *src, + const input_t *up_ftr, + const input_t *down_ftr, + const input_t *alpha, + const input_t *beta, + int batch_size, + int channels, + int seq_len) + { + if (seq_len == 0) + { + return; + } + else + { + // Use 128 threads per block to maximimize gpu utilization + constexpr int threads_per_block = 128; + constexpr int seq_len_per_block = 4096; + int blocks_per_seq_len = (seq_len + seq_len_per_block - 1) / seq_len_per_block; + dim3 blocks(blocks_per_seq_len, channels, batch_size); + dim3 threads(threads_per_block, 1, 1); + + anti_alias_activation_forward + <<>>(dst, src, up_ftr, down_ftr, alpha, beta, batch_size, channels, seq_len); + } + } +} + +extern "C" torch::Tensor fwd_cuda(torch::Tensor const &input, torch::Tensor const &up_filter, torch::Tensor const &down_filter, torch::Tensor const &alpha, torch::Tensor const &beta) +{ + // Input is a 3d tensor with dimensions [batches, channels, seq_len] + const int batches = input.size(0); + const int channels = input.size(1); + const int seq_len = input.size(2); + + // Output + auto act_options = input.options().requires_grad(false); + + torch::Tensor anti_alias_activation_results = + torch::empty({batches, channels, seq_len}, act_options); + + void *input_ptr = static_cast(input.data_ptr()); + void *up_filter_ptr = static_cast(up_filter.data_ptr()); + void *down_filter_ptr = static_cast(down_filter.data_ptr()); + void *alpha_ptr = static_cast(alpha.data_ptr()); + void *beta_ptr = static_cast(beta.data_ptr()); + void *anti_alias_activation_results_ptr = static_cast(anti_alias_activation_results.data_ptr()); + + DISPATCH_FLOAT_HALF_AND_BFLOAT( + input.scalar_type(), + "dispatch anti alias activation_forward", + dispatch_anti_alias_activation_forward( + reinterpret_cast(anti_alias_activation_results_ptr), + reinterpret_cast(input_ptr), + reinterpret_cast(up_filter_ptr), + reinterpret_cast(down_filter_ptr), + reinterpret_cast(alpha_ptr), + reinterpret_cast(beta_ptr), + batches, + channels, + seq_len);); + return anti_alias_activation_results; +} \ No newline at end of file diff --git a/meanaudio/ext/bigvgan_v2/alias_free_activation/cuda/compat.h b/meanaudio/ext/bigvgan_v2/alias_free_activation/cuda/compat.h new file mode 100644 index 0000000000000000000000000000000000000000..25818b2edf4cb0dc9130e62c7c4de8d16a01baa5 --- /dev/null +++ b/meanaudio/ext/bigvgan_v2/alias_free_activation/cuda/compat.h @@ -0,0 +1,29 @@ +/* coding=utf-8 + * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*This code is copied fron NVIDIA apex: + * https://github.com/NVIDIA/apex + * with minor changes. */ + +#ifndef TORCH_CHECK +#define TORCH_CHECK AT_CHECK +#endif + +#ifdef VERSION_GE_1_3 +#define DATA_PTR data_ptr +#else +#define DATA_PTR data +#endif diff --git a/meanaudio/ext/bigvgan_v2/alias_free_activation/cuda/load.py b/meanaudio/ext/bigvgan_v2/alias_free_activation/cuda/load.py new file mode 100644 index 0000000000000000000000000000000000000000..ca5d01de398249e75e9e2298958764acb436edba --- /dev/null +++ b/meanaudio/ext/bigvgan_v2/alias_free_activation/cuda/load.py @@ -0,0 +1,86 @@ +# Copyright (c) 2024 NVIDIA CORPORATION. +# Licensed under the MIT license. + +import os +import pathlib +import subprocess + +from torch.utils import cpp_extension + +""" +Setting this param to a list has a problem of generating different compilation commands (with diferent order of architectures) and leading to recompilation of fused kernels. +Set it to empty stringo avoid recompilation and assign arch flags explicity in extra_cuda_cflags below +""" +os.environ["TORCH_CUDA_ARCH_LIST"] = "" + + +def load(): + # Check if cuda 11 is installed for compute capability 8.0 + cc_flag = [] + _, bare_metal_major, _ = _get_cuda_bare_metal_version(cpp_extension.CUDA_HOME) + if int(bare_metal_major) >= 11: + cc_flag.append("-gencode") + cc_flag.append("arch=compute_80,code=sm_80") + + # Build path + srcpath = pathlib.Path(__file__).parent.absolute() + buildpath = srcpath / "build" + _create_build_dir(buildpath) + + # Helper function to build the kernels. + def _cpp_extention_load_helper(name, sources, extra_cuda_flags): + return cpp_extension.load( + name=name, + sources=sources, + build_directory=buildpath, + extra_cflags=[ + "-O3", + ], + extra_cuda_cflags=[ + "-O3", + "-gencode", + "arch=compute_70,code=sm_70", + "--use_fast_math", + ] + + extra_cuda_flags + + cc_flag, + verbose=True, + ) + + extra_cuda_flags = [ + "-U__CUDA_NO_HALF_OPERATORS__", + "-U__CUDA_NO_HALF_CONVERSIONS__", + "--expt-relaxed-constexpr", + "--expt-extended-lambda", + ] + + sources = [ + srcpath / "anti_alias_activation.cpp", + srcpath / "anti_alias_activation_cuda.cu", + ] + anti_alias_activation_cuda = _cpp_extention_load_helper( + "anti_alias_activation_cuda", sources, extra_cuda_flags + ) + + return anti_alias_activation_cuda + + +def _get_cuda_bare_metal_version(cuda_dir): + raw_output = subprocess.check_output( + [cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True + ) + output = raw_output.split() + release_idx = output.index("release") + 1 + release = output[release_idx].split(".") + bare_metal_major = release[0] + bare_metal_minor = release[1][0] + + return raw_output, bare_metal_major, bare_metal_minor + + +def _create_build_dir(buildpath): + try: + os.mkdir(buildpath) + except OSError: + if not os.path.isdir(buildpath): + print(f"Creation of the build directory {buildpath} failed") diff --git a/meanaudio/ext/bigvgan_v2/alias_free_activation/cuda/type_shim.h b/meanaudio/ext/bigvgan_v2/alias_free_activation/cuda/type_shim.h new file mode 100644 index 0000000000000000000000000000000000000000..5db7e8a397e982d4d30d16ab6060814b98b7ab83 --- /dev/null +++ b/meanaudio/ext/bigvgan_v2/alias_free_activation/cuda/type_shim.h @@ -0,0 +1,92 @@ +/* coding=utf-8 + * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "compat.h" + +#define DISPATCH_FLOAT_HALF_AND_BFLOAT(TYPE, NAME, ...) \ + switch (TYPE) \ + { \ + case at::ScalarType::Float: \ + { \ + using scalar_t = float; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::Half: \ + { \ + using scalar_t = at::Half; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::BFloat16: \ + { \ + using scalar_t = at::BFloat16; \ + __VA_ARGS__; \ + break; \ + } \ + default: \ + AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \ + } + +#define DISPATCH_FLOAT_HALF_AND_BFLOAT_INOUT_TYPES(TYPEIN, TYPEOUT, NAME, ...) \ + switch (TYPEIN) \ + { \ + case at::ScalarType::Float: \ + { \ + using scalar_t_in = float; \ + switch (TYPEOUT) \ + { \ + case at::ScalarType::Float: \ + { \ + using scalar_t_out = float; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::Half: \ + { \ + using scalar_t_out = at::Half; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::BFloat16: \ + { \ + using scalar_t_out = at::BFloat16; \ + __VA_ARGS__; \ + break; \ + } \ + default: \ + AT_ERROR(#NAME, " not implemented for '", toString(TYPEOUT), "'"); \ + } \ + break; \ + } \ + case at::ScalarType::Half: \ + { \ + using scalar_t_in = at::Half; \ + using scalar_t_out = at::Half; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::BFloat16: \ + { \ + using scalar_t_in = at::BFloat16; \ + using scalar_t_out = at::BFloat16; \ + __VA_ARGS__; \ + break; \ + } \ + default: \ + AT_ERROR(#NAME, " not implemented for '", toString(TYPEIN), "'"); \ + } diff --git a/meanaudio/ext/bigvgan_v2/alias_free_activation/torch/__init__.py b/meanaudio/ext/bigvgan_v2/alias_free_activation/torch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8f756ed83f87f9839e457b240f60469bc187707d --- /dev/null +++ b/meanaudio/ext/bigvgan_v2/alias_free_activation/torch/__init__.py @@ -0,0 +1,6 @@ +# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0 +# LICENSE is in incl_licenses directory. + +from .filter import * +from .resample import * +from .act import * diff --git a/meanaudio/ext/bigvgan_v2/alias_free_activation/torch/act.py b/meanaudio/ext/bigvgan_v2/alias_free_activation/torch/act.py new file mode 100644 index 0000000000000000000000000000000000000000..bd44e9500020986578dcc4f5fc771d39bb5bf26f --- /dev/null +++ b/meanaudio/ext/bigvgan_v2/alias_free_activation/torch/act.py @@ -0,0 +1,32 @@ +# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0 +# LICENSE is in incl_licenses directory. + +import torch.nn as nn + +from meanaudio.ext.bigvgan_v2.alias_free_activation.torch.resample import (DownSample1d, UpSample1d) + + +class Activation1d(nn.Module): + + def __init__( + self, + activation, + up_ratio: int = 2, + down_ratio: int = 2, + up_kernel_size: int = 12, + down_kernel_size: int = 12, + ): + super().__init__() + self.up_ratio = up_ratio + self.down_ratio = down_ratio + self.act = activation + self.upsample = UpSample1d(up_ratio, up_kernel_size) + self.downsample = DownSample1d(down_ratio, down_kernel_size) + + # x: [B,C,T] + def forward(self, x): + x = self.upsample(x) + x = self.act(x) + x = self.downsample(x) + + return x diff --git a/meanaudio/ext/bigvgan_v2/alias_free_activation/torch/filter.py b/meanaudio/ext/bigvgan_v2/alias_free_activation/torch/filter.py new file mode 100644 index 0000000000000000000000000000000000000000..0fa35b0d5ddf8d6cb04cd9d47364ca033cebcd32 --- /dev/null +++ b/meanaudio/ext/bigvgan_v2/alias_free_activation/torch/filter.py @@ -0,0 +1,101 @@ +# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0 +# LICENSE is in incl_licenses directory. + +import torch +import torch.nn as nn +import torch.nn.functional as F +import math + +if "sinc" in dir(torch): + sinc = torch.sinc +else: + # This code is adopted from adefossez's julius.core.sinc under the MIT License + # https://adefossez.github.io/julius/julius/core.html + # LICENSE is in incl_licenses directory. + def sinc(x: torch.Tensor): + """ + Implementation of sinc, i.e. sin(pi * x) / (pi * x) + __Warning__: Different to julius.sinc, the input is multiplied by `pi`! + """ + return torch.where( + x == 0, + torch.tensor(1.0, device=x.device, dtype=x.dtype), + torch.sin(math.pi * x) / math.pi / x, + ) + + +# This code is adopted from adefossez's julius.lowpass.LowPassFilters under the MIT License +# https://adefossez.github.io/julius/julius/lowpass.html +# LICENSE is in incl_licenses directory. +def kaiser_sinc_filter1d( + cutoff, half_width, kernel_size +): # return filter [1,1,kernel_size] + even = kernel_size % 2 == 0 + half_size = kernel_size // 2 + + # For kaiser window + delta_f = 4 * half_width + A = 2.285 * (half_size - 1) * math.pi * delta_f + 7.95 + if A > 50.0: + beta = 0.1102 * (A - 8.7) + elif A >= 21.0: + beta = 0.5842 * (A - 21) ** 0.4 + 0.07886 * (A - 21.0) + else: + beta = 0.0 + window = torch.kaiser_window(kernel_size, beta=beta, periodic=False) + + # ratio = 0.5/cutoff -> 2 * cutoff = 1 / ratio + if even: + time = torch.arange(-half_size, half_size) + 0.5 + else: + time = torch.arange(kernel_size) - half_size + if cutoff == 0: + filter_ = torch.zeros_like(time) + else: + filter_ = 2 * cutoff * window * sinc(2 * cutoff * time) + """ + Normalize filter to have sum = 1, otherwise we will have a small leakage of the constant component in the input signal. + """ + filter_ /= filter_.sum() + filter = filter_.view(1, 1, kernel_size) + + return filter + + +class LowPassFilter1d(nn.Module): + def __init__( + self, + cutoff=0.5, + half_width=0.6, + stride: int = 1, + padding: bool = True, + padding_mode: str = "replicate", + kernel_size: int = 12, + ): + """ + kernel_size should be even number for stylegan3 setup, in this implementation, odd number is also possible. + """ + super().__init__() + if cutoff < -0.0: + raise ValueError("Minimum cutoff must be larger than zero.") + if cutoff > 0.5: + raise ValueError("A cutoff above 0.5 does not make sense.") + self.kernel_size = kernel_size + self.even = kernel_size % 2 == 0 + self.pad_left = kernel_size // 2 - int(self.even) + self.pad_right = kernel_size // 2 + self.stride = stride + self.padding = padding + self.padding_mode = padding_mode + filter = kaiser_sinc_filter1d(cutoff, half_width, kernel_size) + self.register_buffer("filter", filter) + + # Input [B, C, T] + def forward(self, x): + _, C, _ = x.shape + + if self.padding: + x = F.pad(x, (self.pad_left, self.pad_right), mode=self.padding_mode) + out = F.conv1d(x, self.filter.expand(C, -1, -1), stride=self.stride, groups=C) + + return out diff --git a/meanaudio/ext/bigvgan_v2/alias_free_activation/torch/resample.py b/meanaudio/ext/bigvgan_v2/alias_free_activation/torch/resample.py new file mode 100644 index 0000000000000000000000000000000000000000..8ca574d822b6e0987add2804474649a620867c00 --- /dev/null +++ b/meanaudio/ext/bigvgan_v2/alias_free_activation/torch/resample.py @@ -0,0 +1,54 @@ +# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0 +# LICENSE is in incl_licenses directory. + +import torch.nn as nn +from torch.nn import functional as F + +from meanaudio.ext.bigvgan_v2.alias_free_activation.torch.filter import (LowPassFilter1d, + kaiser_sinc_filter1d) + + +class UpSample1d(nn.Module): + + def __init__(self, ratio=2, kernel_size=None): + super().__init__() + self.ratio = ratio + self.kernel_size = (int(6 * ratio // 2) * 2 if kernel_size is None else kernel_size) + self.stride = ratio + self.pad = self.kernel_size // ratio - 1 + self.pad_left = self.pad * self.stride + (self.kernel_size - self.stride) // 2 + self.pad_right = (self.pad * self.stride + (self.kernel_size - self.stride + 1) // 2) + filter = kaiser_sinc_filter1d(cutoff=0.5 / ratio, + half_width=0.6 / ratio, + kernel_size=self.kernel_size) + self.register_buffer("filter", filter) + + # x: [B, C, T] + def forward(self, x): + _, C, _ = x.shape + + x = F.pad(x, (self.pad, self.pad), mode="replicate") + x = self.ratio * F.conv_transpose1d( + x, self.filter.expand(C, -1, -1), stride=self.stride, groups=C) + x = x[..., self.pad_left:-self.pad_right] + + return x + + +class DownSample1d(nn.Module): + + def __init__(self, ratio=2, kernel_size=None): + super().__init__() + self.ratio = ratio + self.kernel_size = (int(6 * ratio // 2) * 2 if kernel_size is None else kernel_size) + self.lowpass = LowPassFilter1d( + cutoff=0.5 / ratio, + half_width=0.6 / ratio, + stride=ratio, + kernel_size=self.kernel_size, + ) + + def forward(self, x): + xx = self.lowpass(x) + + return xx diff --git a/meanaudio/ext/bigvgan_v2/bigvgan.py b/meanaudio/ext/bigvgan_v2/bigvgan.py new file mode 100644 index 0000000000000000000000000000000000000000..8dff969d85d11c4ac72751d87e54ecd09ff69a85 --- /dev/null +++ b/meanaudio/ext/bigvgan_v2/bigvgan.py @@ -0,0 +1,439 @@ +# Copyright (c) 2024 NVIDIA CORPORATION. +# Licensed under the MIT license. + +# Adapted from https://github.com/jik876/hifi-gan under the MIT license. +# LICENSE is in incl_licenses directory. + +import json +import os +from pathlib import Path +from typing import Dict, Optional, Union + +import torch +import torch.nn as nn +from huggingface_hub import PyTorchModelHubMixin, hf_hub_download +from torch.nn import Conv1d, ConvTranspose1d +from torch.nn.utils.parametrizations import weight_norm +from torch.nn.utils.parametrize import remove_parametrizations + +from meanaudio.ext.bigvgan_v2 import activations +from meanaudio.ext.bigvgan_v2.alias_free_activation.torch.act import \ + Activation1d as TorchActivation1d +from meanaudio.ext.bigvgan_v2.env import AttrDict +from meanaudio.ext.bigvgan_v2.utils import get_padding, init_weights + + +def load_hparams_from_json(path) -> AttrDict: + with open(path) as f: + data = f.read() + return AttrDict(json.loads(data)) + + +class AMPBlock1(torch.nn.Module): + """ + AMPBlock applies Snake / SnakeBeta activation functions with trainable parameters that control periodicity, defined for each layer. + AMPBlock1 has additional self.convs2 that contains additional Conv1d layers with a fixed dilation=1 followed by each layer in self.convs1 + + Args: + h (AttrDict): Hyperparameters. + channels (int): Number of convolution channels. + kernel_size (int): Size of the convolution kernel. Default is 3. + dilation (tuple): Dilation rates for the convolutions. Each dilation layer has two convolutions. Default is (1, 3, 5). + activation (str): Activation function type. Should be either 'snake' or 'snakebeta'. Default is None. + """ + + def __init__( + self, + h: AttrDict, + channels: int, + kernel_size: int = 3, + dilation: tuple = (1, 3, 5), + activation: str = None, + ): + super().__init__() + + self.h = h + + self.convs1 = nn.ModuleList([ + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + stride=1, + dilation=d, + padding=get_padding(kernel_size, d), + )) for d in dilation + ]) + self.convs1.apply(init_weights) + + self.convs2 = nn.ModuleList([ + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + stride=1, + dilation=1, + padding=get_padding(kernel_size, 1), + )) for _ in range(len(dilation)) + ]) + self.convs2.apply(init_weights) + + self.num_layers = len(self.convs1) + len(self.convs2) # Total number of conv layers + + # Select which Activation1d, lazy-load cuda version to ensure backward compatibility + if self.h.get("use_cuda_kernel", False): + from alias_free_activation.cuda.activation1d import \ + Activation1d as CudaActivation1d + + Activation1d = CudaActivation1d + else: + Activation1d = TorchActivation1d + + # Activation functions + if activation == "snake": + self.activations = nn.ModuleList([ + Activation1d( + activation=activations.Snake(channels, alpha_logscale=h.snake_logscale)) + for _ in range(self.num_layers) + ]) + elif activation == "snakebeta": + self.activations = nn.ModuleList([ + Activation1d( + activation=activations.SnakeBeta(channels, alpha_logscale=h.snake_logscale)) + for _ in range(self.num_layers) + ]) + else: + raise NotImplementedError( + "activation incorrectly specified. check the config file and look for 'activation'." + ) + + def forward(self, x): + acts1, acts2 = self.activations[::2], self.activations[1::2] + for c1, c2, a1, a2 in zip(self.convs1, self.convs2, acts1, acts2): + xt = a1(x) + xt = c1(xt) + xt = a2(xt) + xt = c2(xt) + x = xt + x + + return x + + def remove_weight_norm(self): + for l in self.convs1: + remove_parametrizations(l, 'weight') + for l in self.convs2: + remove_parametrizations(l, 'weight') + + +class AMPBlock2(torch.nn.Module): + """ + AMPBlock applies Snake / SnakeBeta activation functions with trainable parameters that control periodicity, defined for each layer. + Unlike AMPBlock1, AMPBlock2 does not contain extra Conv1d layers with fixed dilation=1 + + Args: + h (AttrDict): Hyperparameters. + channels (int): Number of convolution channels. + kernel_size (int): Size of the convolution kernel. Default is 3. + dilation (tuple): Dilation rates for the convolutions. Each dilation layer has two convolutions. Default is (1, 3, 5). + activation (str): Activation function type. Should be either 'snake' or 'snakebeta'. Default is None. + """ + + def __init__( + self, + h: AttrDict, + channels: int, + kernel_size: int = 3, + dilation: tuple = (1, 3, 5), + activation: str = None, + ): + super().__init__() + + self.h = h + + self.convs = nn.ModuleList([ + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + stride=1, + dilation=d, + padding=get_padding(kernel_size, d), + )) for d in dilation + ]) + self.convs.apply(init_weights) + + self.num_layers = len(self.convs) # Total number of conv layers + + # Select which Activation1d, lazy-load cuda version to ensure backward compatibility + if self.h.get("use_cuda_kernel", False): + from alias_free_activation.cuda.activation1d import \ + Activation1d as CudaActivation1d + + Activation1d = CudaActivation1d + else: + Activation1d = TorchActivation1d + + # Activation functions + if activation == "snake": + self.activations = nn.ModuleList([ + Activation1d( + activation=activations.Snake(channels, alpha_logscale=h.snake_logscale)) + for _ in range(self.num_layers) + ]) + elif activation == "snakebeta": + self.activations = nn.ModuleList([ + Activation1d( + activation=activations.SnakeBeta(channels, alpha_logscale=h.snake_logscale)) + for _ in range(self.num_layers) + ]) + else: + raise NotImplementedError( + "activation incorrectly specified. check the config file and look for 'activation'." + ) + + def forward(self, x): + for c, a in zip(self.convs, self.activations): + xt = a(x) + xt = c(xt) + x = xt + x + return x + + def remove_weight_norm(self): + for l in self.convs: + remove_weight_norm(l) + + +class BigVGAN( + torch.nn.Module, + PyTorchModelHubMixin, + library_name="bigvgan", + repo_url="https://github.com/NVIDIA/BigVGAN", + docs_url="https://github.com/NVIDIA/BigVGAN/blob/main/README.md", + pipeline_tag="audio-to-audio", + license="mit", + tags=["neural-vocoder", "audio-generation", "arxiv:2206.04658"], +): + """ + BigVGAN is a neural vocoder model that applies anti-aliased periodic activation for residual blocks (resblocks). + New in BigVGAN-v2: it can optionally use optimized CUDA kernels for AMP (anti-aliased multi-periodicity) blocks. + + Args: + h (AttrDict): Hyperparameters. + use_cuda_kernel (bool): If set to True, loads optimized CUDA kernels for AMP. This should be used for inference only, as training is not supported with CUDA kernels. + + Note: + - The `use_cuda_kernel` parameter should be used for inference only, as training with CUDA kernels is not supported. + - Ensure that the activation function is correctly specified in the hyperparameters (h.activation). + """ + + def __init__(self, h: AttrDict, use_cuda_kernel: bool = False): + super().__init__() + self.h = h + self.h["use_cuda_kernel"] = use_cuda_kernel + + # Select which Activation1d, lazy-load cuda version to ensure backward compatibility + if self.h.get("use_cuda_kernel", False): + from alias_free_activation.cuda.activation1d import \ + Activation1d as CudaActivation1d + + Activation1d = CudaActivation1d + else: + Activation1d = TorchActivation1d + + self.num_kernels = len(h.resblock_kernel_sizes) + self.num_upsamples = len(h.upsample_rates) + + # Pre-conv + self.conv_pre = weight_norm(Conv1d(h.num_mels, h.upsample_initial_channel, 7, 1, padding=3)) + + # Define which AMPBlock to use. BigVGAN uses AMPBlock1 as default + if h.resblock == "1": + resblock_class = AMPBlock1 + elif h.resblock == "2": + resblock_class = AMPBlock2 + else: + raise ValueError( + f"Incorrect resblock class specified in hyperparameters. Got {h.resblock}") + + # Transposed conv-based upsamplers. does not apply anti-aliasing + self.ups = nn.ModuleList() + for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)): + self.ups.append( + nn.ModuleList([ + weight_norm( + ConvTranspose1d( + h.upsample_initial_channel // (2**i), + h.upsample_initial_channel // (2**(i + 1)), + k, + u, + padding=(k - u) // 2, + )) + ])) + + # Residual blocks using anti-aliased multi-periodicity composition modules (AMP) + self.resblocks = nn.ModuleList() + for i in range(len(self.ups)): + ch = h.upsample_initial_channel // (2**(i + 1)) + for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)): + self.resblocks.append(resblock_class(h, ch, k, d, activation=h.activation)) + + # Post-conv + activation_post = (activations.Snake(ch, alpha_logscale=h.snake_logscale) + if h.activation == "snake" else + (activations.SnakeBeta(ch, alpha_logscale=h.snake_logscale) + if h.activation == "snakebeta" else None)) + if activation_post is None: + raise NotImplementedError( + "activation incorrectly specified. check the config file and look for 'activation'." + ) + + self.activation_post = Activation1d(activation=activation_post) + + # Whether to use bias for the final conv_post. Default to True for backward compatibility + self.use_bias_at_final = h.get("use_bias_at_final", True) + self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3, bias=self.use_bias_at_final)) + + # Weight initialization + for i in range(len(self.ups)): + self.ups[i].apply(init_weights) + self.conv_post.apply(init_weights) + + # Final tanh activation. Defaults to True for backward compatibility + self.use_tanh_at_final = h.get("use_tanh_at_final", True) + + def forward(self, x): + # Pre-conv + x = self.conv_pre(x) + + for i in range(self.num_upsamples): + # Upsampling + for i_up in range(len(self.ups[i])): + x = self.ups[i][i_up](x) + # AMP blocks + xs = None + for j in range(self.num_kernels): + if xs is None: + xs = self.resblocks[i * self.num_kernels + j](x) + else: + xs += self.resblocks[i * self.num_kernels + j](x) + x = xs / self.num_kernels + + # Post-conv + x = self.activation_post(x) + x = self.conv_post(x) + # Final tanh activation + if self.use_tanh_at_final: + x = torch.tanh(x) + else: + x = torch.clamp(x, min=-1.0, max=1.0) # Bound the output to [-1, 1] + + return x + + def remove_weight_norm(self): + try: + print("Removing weight norm...") + for l in self.ups: + for l_i in l: + remove_parametrizations(l_i, 'weight') + for l in self.resblocks: + l.remove_weight_norm() + remove_parametrizations(self.conv_pre, 'weight') + remove_parametrizations(self.conv_post, 'weight') + except ValueError: + print("[INFO] Model already removed weight norm. Skipping!") + pass + + # Additional methods for huggingface_hub support + def _save_pretrained(self, save_directory: Path) -> None: + """Save weights and config.json from a Pytorch model to a local directory.""" + + model_path = save_directory / "bigvgan_generator.pt" + torch.save({"generator": self.state_dict()}, model_path) + + config_path = save_directory / "config.json" + with open(config_path, "w") as config_file: + json.dump(self.h, config_file, indent=4) + + @classmethod + def _from_pretrained( + cls, + *, + model_id: str, + revision: str, + cache_dir: str, + force_download: bool, + proxies: Optional[Dict], + resume_download: bool, + local_files_only: bool, + token: Union[str, bool, None], + map_location: str = "cpu", # Additional argument + strict: bool = False, # Additional argument + use_cuda_kernel: bool = False, + **model_kwargs, + ): + """Load Pytorch pretrained weights and return the loaded model.""" + + # Download and load hyperparameters (h) used by BigVGAN + if os.path.isdir(model_id): + print("Loading config.json from local directory") + config_file = os.path.join(model_id, "config.json") + else: + config_file = hf_hub_download( + repo_id=model_id, + filename="config.json", + revision=revision, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + token=token, + local_files_only=local_files_only, + ) + h = load_hparams_from_json(config_file) + + # instantiate BigVGAN using h + if use_cuda_kernel: + print( + f"[WARNING] You have specified use_cuda_kernel=True during BigVGAN.from_pretrained(). Only inference is supported (training is not implemented)!" + ) + print( + f"[WARNING] You need nvcc and ninja installed in your system that matches your PyTorch build is using to build the kernel. If not, the model will fail to initialize or generate incorrect waveform!" + ) + print( + f"[WARNING] For detail, see the official GitHub repository: https://github.com/NVIDIA/BigVGAN?tab=readme-ov-file#using-custom-cuda-kernel-for-synthesis" + ) + model = cls(h, use_cuda_kernel=use_cuda_kernel) + + # Download and load pretrained generator weight + if os.path.isdir(model_id): + print("Loading weights from local directory") + model_file = os.path.join(model_id, "bigvgan_generator.pt") + else: + print(f"Loading weights from {model_id}") + model_file = hf_hub_download( + repo_id=model_id, + filename="bigvgan_generator.pt", + revision=revision, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + token=token, + local_files_only=local_files_only, + ) + + checkpoint_dict = torch.load(model_file, map_location=map_location, weights_only=True) + + try: + model.load_state_dict(checkpoint_dict["generator"]) + except RuntimeError: + print( + f"[INFO] the pretrained checkpoint does not contain weight norm. Loading the checkpoint after removing weight norm!" + ) + model.remove_weight_norm() + model.load_state_dict(checkpoint_dict["generator"]) + + return model diff --git a/meanaudio/ext/bigvgan_v2/env.py b/meanaudio/ext/bigvgan_v2/env.py new file mode 100644 index 0000000000000000000000000000000000000000..b8be238d4db710c8c9a338d336baea0138f18d1f --- /dev/null +++ b/meanaudio/ext/bigvgan_v2/env.py @@ -0,0 +1,18 @@ +# Adapted from https://github.com/jik876/hifi-gan under the MIT license. +# LICENSE is in incl_licenses directory. + +import os +import shutil + + +class AttrDict(dict): + def __init__(self, *args, **kwargs): + super(AttrDict, self).__init__(*args, **kwargs) + self.__dict__ = self + + +def build_env(config, config_name, path): + t_path = os.path.join(path, config_name) + if config != t_path: + os.makedirs(path, exist_ok=True) + shutil.copyfile(config, os.path.join(path, config_name)) \ No newline at end of file diff --git a/meanaudio/ext/bigvgan_v2/incl_licenses/LICENSE_1 b/meanaudio/ext/bigvgan_v2/incl_licenses/LICENSE_1 new file mode 100644 index 0000000000000000000000000000000000000000..5afae394d6b37da0e12ba6b290d2512687f421ac --- /dev/null +++ b/meanaudio/ext/bigvgan_v2/incl_licenses/LICENSE_1 @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Jungil Kong + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/meanaudio/ext/bigvgan_v2/incl_licenses/LICENSE_2 b/meanaudio/ext/bigvgan_v2/incl_licenses/LICENSE_2 new file mode 100644 index 0000000000000000000000000000000000000000..322b758863c4219be68291ae3826218baa93cb4c --- /dev/null +++ b/meanaudio/ext/bigvgan_v2/incl_licenses/LICENSE_2 @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Edward Dixon + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/meanaudio/ext/bigvgan_v2/incl_licenses/LICENSE_3 b/meanaudio/ext/bigvgan_v2/incl_licenses/LICENSE_3 new file mode 100644 index 0000000000000000000000000000000000000000..56ee3c8c4cc2b4b32e0975d17258f9ba515fdbcc --- /dev/null +++ b/meanaudio/ext/bigvgan_v2/incl_licenses/LICENSE_3 @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/meanaudio/ext/bigvgan_v2/incl_licenses/LICENSE_4 b/meanaudio/ext/bigvgan_v2/incl_licenses/LICENSE_4 new file mode 100644 index 0000000000000000000000000000000000000000..48fd1a1ba8d81a94b6c7d1c2ff1a1f307cc5371d --- /dev/null +++ b/meanaudio/ext/bigvgan_v2/incl_licenses/LICENSE_4 @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2019, Seungwon Park 박승원 +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/meanaudio/ext/bigvgan_v2/incl_licenses/LICENSE_5 b/meanaudio/ext/bigvgan_v2/incl_licenses/LICENSE_5 new file mode 100644 index 0000000000000000000000000000000000000000..01ae5538e6b7c787bb4f5d6f2cd9903520d6e465 --- /dev/null +++ b/meanaudio/ext/bigvgan_v2/incl_licenses/LICENSE_5 @@ -0,0 +1,16 @@ +Copyright 2020 Alexandre Défossez + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and +associated documentation files (the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, publish, distribute, +sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or +substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT +NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/meanaudio/ext/bigvgan_v2/incl_licenses/LICENSE_6 b/meanaudio/ext/bigvgan_v2/incl_licenses/LICENSE_6 new file mode 100644 index 0000000000000000000000000000000000000000..2569ec0b6c85f94f3cd071ba16e9028ccf156be2 --- /dev/null +++ b/meanaudio/ext/bigvgan_v2/incl_licenses/LICENSE_6 @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023-present, Descript + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/meanaudio/ext/bigvgan_v2/incl_licenses/LICENSE_7 b/meanaudio/ext/bigvgan_v2/incl_licenses/LICENSE_7 new file mode 100644 index 0000000000000000000000000000000000000000..c37bdaf99c6921f5849425d546069e972f52d7fa --- /dev/null +++ b/meanaudio/ext/bigvgan_v2/incl_licenses/LICENSE_7 @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Charactr Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/meanaudio/ext/bigvgan_v2/incl_licenses/LICENSE_8 b/meanaudio/ext/bigvgan_v2/incl_licenses/LICENSE_8 new file mode 100644 index 0000000000000000000000000000000000000000..ab3d7ffe795779f54e339078e4e752ad9019aae8 --- /dev/null +++ b/meanaudio/ext/bigvgan_v2/incl_licenses/LICENSE_8 @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Amphion + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/meanaudio/ext/bigvgan_v2/utils.py b/meanaudio/ext/bigvgan_v2/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..3b1d41670fa1ee257b2ed22c61086ba7a32c7cb0 --- /dev/null +++ b/meanaudio/ext/bigvgan_v2/utils.py @@ -0,0 +1,31 @@ +# Adapted from https://github.com/jik876/hifi-gan under the MIT license. +# LICENSE is in incl_licenses directory. + +import os + +import torch +from torch.nn.utils import weight_norm + + +def init_weights(m, mean=0.0, std=0.01): + classname = m.__class__.__name__ + if classname.find("Conv") != -1: + m.weight.data.normal_(mean, std) + + +def apply_weight_norm(m): + classname = m.__class__.__name__ + if classname.find("Conv") != -1: + weight_norm(m) + + +def get_padding(kernel_size, dilation=1): + return int((kernel_size * dilation - dilation) / 2) + + +def load_checkpoint(filepath, device): + assert os.path.isfile(filepath) + print(f"Loading '{filepath}'") + checkpoint_dict = torch.load(filepath, map_location=device) + print("Complete.") + return checkpoint_dict diff --git a/meanaudio/ext/mel_converter.py b/meanaudio/ext/mel_converter.py new file mode 100644 index 0000000000000000000000000000000000000000..15266d22fb95176229643597a5fea8304888007d --- /dev/null +++ b/meanaudio/ext/mel_converter.py @@ -0,0 +1,106 @@ +# Reference: # https://github.com/bytedance/Make-An-Audio-2 +from typing import Literal + +import torch +import torch.nn as nn +from librosa.filters import mel as librosa_mel_fn + + +def dynamic_range_compression_torch(x, C=1, clip_val=1e-5, *, norm_fn): + return norm_fn(torch.clamp(x, min=clip_val) * C) + + +def spectral_normalize_torch(magnitudes, norm_fn): + output = dynamic_range_compression_torch(magnitudes, norm_fn=norm_fn) + return output + + +class MelConverter(nn.Module): + + def __init__( + self, + *, + sampling_rate: float, + n_fft: int, + num_mels: int, + hop_size: int, + win_size: int, + fmin: float, + fmax: float, + norm_fn, + ): + super().__init__() + self.sampling_rate = sampling_rate + self.n_fft = n_fft + self.num_mels = num_mels + self.hop_size = hop_size + self.win_size = win_size + self.fmin = fmin + self.fmax = fmax + self.norm_fn = norm_fn + + mel = librosa_mel_fn(sr=self.sampling_rate, + n_fft=self.n_fft, + n_mels=self.num_mels, + fmin=self.fmin, + fmax=self.fmax) + mel_basis = torch.from_numpy(mel).float() + hann_window = torch.hann_window(self.win_size) + + self.register_buffer('mel_basis', mel_basis) + self.register_buffer('hann_window', hann_window) + + @property + def device(self): + return self.mel_basis.device + + def forward(self, waveform: torch.Tensor, center: bool = False) -> torch.Tensor: + waveform = waveform.clamp(min=-1., max=1.).to(self.device) + + waveform = torch.nn.functional.pad( + waveform.unsqueeze(1), + [int((self.n_fft - self.hop_size) / 2), + int((self.n_fft - self.hop_size) / 2)], + mode='reflect') + waveform = waveform.squeeze(1) + + spec = torch.stft(waveform, + self.n_fft, + hop_length=self.hop_size, + win_length=self.win_size, + window=self.hann_window, + center=center, + pad_mode='reflect', + normalized=False, + onesided=True, + return_complex=True) + + spec = torch.view_as_real(spec) + spec = torch.sqrt(spec.pow(2).sum(-1) + (1e-9)) + spec = torch.matmul(self.mel_basis, spec) + spec = spectral_normalize_torch(spec, self.norm_fn) + + return spec + + +def get_mel_converter(mode: Literal['16k', '44k']) -> MelConverter: + if mode == '16k': + return MelConverter(sampling_rate=16_000, + n_fft=1024, + num_mels=80, + hop_size=256, + win_size=1024, + fmin=0, + fmax=8_000, + norm_fn=torch.log10) + elif mode == '44k': + return MelConverter(sampling_rate=44_100, + n_fft=2048, + num_mels=128, + hop_size=512, + win_size=2048, + fmin=0, + fmax=44100 / 2, + norm_fn=torch.log) + else: + raise ValueError(f'Unknown mode: {mode}') diff --git a/meanaudio/ext/rotary_embeddings.py b/meanaudio/ext/rotary_embeddings.py new file mode 100644 index 0000000000000000000000000000000000000000..ae2b66c2182d05ac22c5daebab9463dd91772828 --- /dev/null +++ b/meanaudio/ext/rotary_embeddings.py @@ -0,0 +1,60 @@ +from typing import Union + +import torch +from einops import rearrange +from torch import Tensor + +# Ref: https://github.com/black-forest-labs/flux/blob/main/src/flux/math.py +# Ref: https://github.com/lucidrains/rotary-embedding-torch + + +def compute_rope_rotations(length: int, + dim: int, + theta: int, + *, + freq_scaling: float = 1.0, + device: Union[torch.device, str] = 'cpu') -> Tensor: + assert dim % 2 == 0 + + with torch.amp.autocast(device_type='cuda', enabled=False): + pos = torch.arange(length, dtype=torch.float32, device=device) + freqs = 1.0 / (theta**(torch.arange(0, dim, 2, dtype=torch.float32, device=device) / dim)) + freqs *= freq_scaling + + rot = torch.einsum('..., f -> ... f', pos, freqs) + rot = torch.stack([torch.cos(rot), -torch.sin(rot), torch.sin(rot), torch.cos(rot)], dim=-1) + rot = rearrange(rot, 'n d (i j) -> 1 n d i j', i=2, j=2) + return rot + + +def apply_rope(x: Tensor, rot: Tensor) -> tuple[Tensor, Tensor]: + with torch.amp.autocast(device_type='cuda', enabled=False): + _x = x.float() + _x = _x.view(*_x.shape[:-1], -1, 1, 2) + x_out = rot[..., 0] * _x[..., 0] + rot[..., 1] * _x[..., 1] + return x_out.reshape(*x.shape).to(dtype=x.dtype) + + +if __name__ == '__main__': + latent_seq_len = 312 + hidden_dim = 64 * 7 + num_heads = 7 + base_freq = 1 + latent_rot = compute_rope_rotations(latent_seq_len, + hidden_dim // num_heads, + 10000, + freq_scaling=base_freq) + print(latent_rot.shape) + + + latent_seq_len = 624 + hidden_dim = 64 * 7 + num_heads = 7 + base_freq = 1 + latent_rot_2 = compute_rope_rotations(latent_seq_len, + hidden_dim // num_heads, + 10000, + freq_scaling=base_freq) + print(latent_rot_2.shape) + print(torch.all(latent_rot_2[:, :312, :] == latent_rot)) + diff --git a/meanaudio/ext/stft_converter.py b/meanaudio/ext/stft_converter.py new file mode 100644 index 0000000000000000000000000000000000000000..62922067ef3b1d3b8727ec39e7d664ccb304d9fe --- /dev/null +++ b/meanaudio/ext/stft_converter.py @@ -0,0 +1,183 @@ +# Reference: # https://github.com/bytedance/Make-An-Audio-2 + +import torch +import torch.nn as nn +import torchaudio +from einops import rearrange +from librosa.filters import mel as librosa_mel_fn + + +def dynamic_range_compression_torch(x, C=1, clip_val=1e-5, norm_fn=torch.log10): + return norm_fn(torch.clamp(x, min=clip_val) * C) + + +def spectral_normalize_torch(magnitudes, norm_fn): + output = dynamic_range_compression_torch(magnitudes, norm_fn=norm_fn) + return output + + +class STFTConverter(nn.Module): + + def __init__( + self, + *, + sampling_rate: float = 16_000, + n_fft: int = 1024, + num_mels: int = 128, + hop_size: int = 256, + win_size: int = 1024, + fmin: float = 0, + fmax: float = 8_000, + norm_fn=torch.log, + ): + super().__init__() + self.sampling_rate = sampling_rate + self.n_fft = n_fft + self.num_mels = num_mels + self.hop_size = hop_size + self.win_size = win_size + self.fmin = fmin + self.fmax = fmax + self.norm_fn = norm_fn + + mel = librosa_mel_fn(sr=self.sampling_rate, + n_fft=self.n_fft, + n_mels=self.num_mels, + fmin=self.fmin, + fmax=self.fmax) + mel_basis = torch.from_numpy(mel).float() + hann_window = torch.hann_window(self.win_size) + + self.register_buffer('mel_basis', mel_basis) + self.register_buffer('hann_window', hann_window) + + @property + def device(self): + return self.hann_window.device + + def forward(self, waveform: torch.Tensor) -> torch.Tensor: + # input: batch_size * length + bs = waveform.shape[0] + waveform = waveform.clamp(min=-1., max=1.) + + spec = torch.stft(waveform, + self.n_fft, + hop_length=self.hop_size, + win_length=self.win_size, + window=self.hann_window, + center=True, + pad_mode='reflect', + normalized=False, + onesided=True, + return_complex=True) + + spec = torch.view_as_real(spec) + # print('After stft', spec.shape, spec.min(), spec.max(), spec.mean()) + + power = spec.pow(2).sum(-1) + angle = torch.atan2(spec[..., 1], spec[..., 0]) + + print('power', power.shape, power.min(), power.max(), power.mean()) + print('angle', angle.shape, angle.min(), angle.max(), angle.mean()) + + # print('mel', self.mel_basis.shape, self.mel_basis.min(), self.mel_basis.max(), + # self.mel_basis.mean()) + + # spec = rearrange(spec, 'b f t c -> (b c) f t') + + # spec = self.mel_transform(spec) + + # spec = torch.matmul(self.mel_basis, spec) + + # print('After mel', spec.shape, spec.min(), spec.max(), spec.mean()) + + # spec = spectral_normalize_torch(spec, self.norm_fn) + + # print('After norm', spec.shape, spec.min(), spec.max(), spec.mean()) + + # compute magnitude + # magnitude = torch.sqrt((spec**2).sum(-1)) + # normalize by magnitude + # scaled_magnitude = torch.log10(magnitude.clamp(min=1e-5)) * 10 + # spec = spec / magnitude.unsqueeze(-1) * scaled_magnitude.unsqueeze(-1) + + # power = torch.log10(power.clamp(min=1e-5)) * 10 + power = torch.log10(power.clamp(min=1e-5)) + + print('After scaling', power.shape, power.min(), power.max(), power.mean()) + + spec = torch.stack([power, angle], dim=-1) + + # spec = rearrange(spec, '(b c) f t -> b c f t', b=bs) + spec = rearrange(spec, 'b f t c -> b c f t', b=bs) + + # spec[:, :, 400:] = 0 + + return spec + + def invert(self, spec: torch.Tensor, length: int) -> torch.Tensor: + bs = spec.shape[0] + + # spec = rearrange(spec, 'b c f t -> (b c) f t') + # print(spec.shape, self.mel_basis.shape) + # spec = torch.linalg.lstsq(self.mel_basis.unsqueeze(0), spec).solution + # spec = torch.linalg.pinv(self.mel_basis.unsqueeze(0)) @ spec + + # spec = self.invmel_transform(spec) + + spec = rearrange(spec, 'b c f t -> b f t c', b=bs).contiguous() + + # spec[..., 0] = 10**(spec[..., 0] / 10) + + power = spec[..., 0] + power = 10**power + + # print('After unscaling', spec[..., 0].shape, spec[..., 0].min(), spec[..., 0].max(), + # spec[..., 0].mean()) + + unit_vector = torch.stack([ + torch.cos(spec[..., 1]), + torch.sin(spec[..., 1]), + ], dim=-1) + + spec = torch.sqrt(power) * unit_vector + + # spec = rearrange(spec, '(b c) f t -> b f t c', b=bs).contiguous() + spec = torch.view_as_complex(spec) + + waveform = torch.istft( + spec, + self.n_fft, + length=length, + hop_length=self.hop_size, + win_length=self.win_size, + window=self.hann_window, + center=True, + normalized=False, + onesided=True, + return_complex=False, + ) + + return waveform + + +if __name__ == '__main__': + + converter = STFTConverter(sampling_rate=16000) + + signal = torchaudio.load('./output/ZZ6GRocWW38_000090.wav')[0] + # resample signal at 44100 Hz + # signal = torchaudio.transforms.Resample(16_000, 44_100)(signal) + + L = signal.shape[1] + print('Input signal', signal.shape) + spec = converter(signal) + + print('Final spec', spec.shape) + + signal_recon = converter.invert(spec, length=L) + print('Output signal', signal_recon.shape, signal_recon.min(), signal_recon.max(), + signal_recon.mean()) + + print('MSE', torch.nn.functional.mse_loss(signal, signal_recon)) + torchaudio.save('./output/ZZ6GRocWW38_000090_recon.wav', signal_recon, 16000) diff --git a/meanaudio/ext/stft_converter_mel.py b/meanaudio/ext/stft_converter_mel.py new file mode 100644 index 0000000000000000000000000000000000000000..f6b32d4cb9a23cd74f723e7d8307fd82fa1abba0 --- /dev/null +++ b/meanaudio/ext/stft_converter_mel.py @@ -0,0 +1,234 @@ +# Reference: # https://github.com/bytedance/Make-An-Audio-2 + +import torch +import torch.nn as nn +import torchaudio +from einops import rearrange +from librosa.filters import mel as librosa_mel_fn + + +def dynamic_range_compression_torch(x, C=1, clip_val=1e-5, norm_fn=torch.log10): + return norm_fn(torch.clamp(x, min=clip_val) * C) + + +def spectral_normalize_torch(magnitudes, norm_fn): + output = dynamic_range_compression_torch(magnitudes, norm_fn=norm_fn) + return output + + +class STFTConverter(nn.Module): + + def __init__( + self, + *, + sampling_rate: float = 16_000, + n_fft: int = 1024, + num_mels: int = 128, + hop_size: int = 256, + win_size: int = 1024, + fmin: float = 0, + fmax: float = 8_000, + norm_fn=torch.log, + ): + super().__init__() + self.sampling_rate = sampling_rate + self.n_fft = n_fft + self.num_mels = num_mels + self.hop_size = hop_size + self.win_size = win_size + self.fmin = fmin + self.fmax = fmax + self.norm_fn = norm_fn + + mel = librosa_mel_fn(sr=self.sampling_rate, + n_fft=self.n_fft, + n_mels=self.num_mels, + fmin=self.fmin, + fmax=self.fmax) + mel_basis = torch.from_numpy(mel).float() + hann_window = torch.hann_window(self.win_size) + + self.register_buffer('mel_basis', mel_basis) + self.register_buffer('hann_window', hann_window) + + @property + def device(self): + return self.hann_window.device + + def forward(self, waveform: torch.Tensor) -> torch.Tensor: + # input: batch_size * length + bs = waveform.shape[0] + waveform = waveform.clamp(min=-1., max=1.) + + spec = torch.stft(waveform, + self.n_fft, + hop_length=self.hop_size, + win_length=self.win_size, + window=self.hann_window, + center=True, + pad_mode='reflect', + normalized=False, + onesided=True, + return_complex=True) + + spec = torch.view_as_real(spec) + # print('After stft', spec.shape, spec.min(), spec.max(), spec.mean()) + + power = (spec.pow(2).sum(-1))**(0.5) + angle = torch.atan2(spec[..., 1], spec[..., 0]) + + print('power 1', power.shape, power.min(), power.max(), power.mean()) + print('angle 1', angle.shape, angle.min(), angle.max(), angle.mean(), angle[:, :2, :2]) + + # print('mel', self.mel_basis.shape, self.mel_basis.min(), self.mel_basis.max(), + # self.mel_basis.mean()) + + # spec = self.mel_transform(spec) + + # power = torch.matmul(self.mel_basis, power) + + spec = rearrange(spec, 'b f t c -> (b c) f t') + spec = self.mel_basis.unsqueeze(0) @ spec + spec = rearrange(spec, '(b c) f t -> b f t c', b=bs) + + power = (spec.pow(2).sum(-1))**(0.5) + angle = torch.atan2(spec[..., 1], spec[..., 0]) + + print('power', power.shape, power.min(), power.max(), power.mean()) + print('angle', angle.shape, angle.min(), angle.max(), angle.mean(), angle[:, :2, :2]) + + # print('After mel', spec.shape, spec.min(), spec.max(), spec.mean()) + + # spec = spectral_normalize_torch(spec, self.norm_fn) + + # print('After norm', spec.shape, spec.min(), spec.max(), spec.mean()) + + # compute magnitude + # magnitude = torch.sqrt((spec**2).sum(-1)) + # normalize by magnitude + # scaled_magnitude = torch.log10(magnitude.clamp(min=1e-5)) * 10 + # spec = spec / magnitude.unsqueeze(-1) * scaled_magnitude.unsqueeze(-1) + + # power = torch.log10(power.clamp(min=1e-5)) * 10 + power = torch.log10(power.clamp(min=1e-8)) + + print('After scaling', power.shape, power.min(), power.max(), power.mean()) + + # spec = torch.stack([power, angle], dim=-1) + + # spec = rearrange(spec, '(b c) f t -> b c f t', b=bs) + # spec = rearrange(spec, 'b f t c -> b c f t', b=bs) + + # spec[:, :, 400:] = 0 + + return power, angle + # return spec[..., 0], spec[..., 1] + + def invert(self, spec: torch.Tensor, length: int) -> torch.Tensor: + + power, angle = spec + + bs = power.shape[0] + + # spec = rearrange(spec, 'b c f t -> (b c) f t') + # print(spec.shape, self.mel_basis.shape) + # spec = torch.linalg.lstsq(self.mel_basis.unsqueeze(0), spec).solution + # spec = torch.linalg.pinv(self.mel_basis.unsqueeze(0)) @ spec + + # spec = self.invmel_transform(spec) + + # spec = rearrange(spec, 'b c f t -> b f t c', b=bs).contiguous() + + # spec[..., 0] = 10**(spec[..., 0] / 10) + + # power = spec[..., 0] + power = 10**power + + # print('After unscaling', spec[..., 0].shape, spec[..., 0].min(), spec[..., 0].max(), + # spec[..., 0].mean()) + + unit_vector = torch.stack([ + torch.cos(angle), + torch.sin(angle), + ], dim=-1) + + spec = power.unsqueeze(-1) * unit_vector + + # power = torch.linalg.lstsq(self.mel_basis.unsqueeze(0), power).solution + spec = rearrange(spec, 'b f t c -> (b c) f t') + spec = torch.linalg.pinv(self.mel_basis.unsqueeze(0)) @ spec + # spec = torch.linalg.lstsq(self.mel_basis.unsqueeze(0), spec).solution + spec = rearrange(spec, '(b c) f t -> b f t c', b=bs).contiguous() + + power = (spec.pow(2).sum(-1))**(0.5) + angle = torch.atan2(spec[..., 1], spec[..., 0]) + + print('power 2', power.shape, power.min(), power.max(), power.mean()) + print('angle 2', angle.shape, angle.min(), angle.max(), angle.mean(), angle[:, :2, :2]) + + # spec = rearrange(spec, '(b c) f t -> b f t c', b=bs).contiguous() + spec = torch.view_as_complex(spec) + + waveform = torch.istft( + spec, + self.n_fft, + length=length, + hop_length=self.hop_size, + win_length=self.win_size, + window=self.hann_window, + center=True, + normalized=False, + onesided=True, + return_complex=False, + ) + + return waveform + + +if __name__ == '__main__': + + converter = STFTConverter(sampling_rate=16000) + + signal = torchaudio.load('./output/ZZ6GRocWW38_000090.wav')[0] + # resample signal at 44100 Hz + # signal = torchaudio.transforms.Resample(16_000, 44_100)(signal) + + L = signal.shape[1] + print('Input signal', signal.shape) + spec = converter(signal) + + power, angle = spec + + # print(power.shape, angle.shape) + # print(power, power.min(), power.max(), power.mean()) + # power = power.clamp(-1, 1) + # angle = angle.clamp(-1, 1) + + import matplotlib.pyplot as plt + + # Visualize power + plt.figure() + plt.imshow(power[0].detach().numpy(), aspect='auto', origin='lower') + plt.colorbar() + plt.title('Power') + plt.xlabel('Time') + plt.ylabel('Frequency') + plt.savefig('./output/power.png') + + # Visualize angle + plt.figure() + plt.imshow(angle[0].detach().numpy(), aspect='auto', origin='lower') + plt.colorbar() + plt.title('Angle') + plt.xlabel('Time') + plt.ylabel('Frequency') + plt.savefig('./output/angle.png') + + # print('Final spec', spec.shape) + + signal_recon = converter.invert(spec, length=L) + print('Output signal', signal_recon.shape, signal_recon.min(), signal_recon.max(), + signal_recon.mean()) + + print('MSE', torch.nn.functional.mse_loss(signal, signal_recon)) + torchaudio.save('./output/ZZ6GRocWW38_000090_recon.wav', signal_recon, 16000) diff --git a/meanaudio/model/__init__.py b/meanaudio/model/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/meanaudio/model/embeddings.py b/meanaudio/model/embeddings.py new file mode 100644 index 0000000000000000000000000000000000000000..a888cc102ec3ee8ba0f0bd5579c2e662692d78c8 --- /dev/null +++ b/meanaudio/model/embeddings.py @@ -0,0 +1,59 @@ +import torch +import torch.nn as nn + +# https://github.com/facebookresearch/DiT + + +class TimestepEmbedder(nn.Module): + """ + Embeds scalar timesteps into vector representations. + """ + + def __init__(self, dim, frequency_embedding_size, max_period): + super().__init__() + self.mlp = nn.Sequential( + nn.Linear(frequency_embedding_size, dim), + nn.SiLU(), + nn.Linear(dim, dim), + ) + self.dim = dim + self.max_period = max_period + assert dim % 2 == 0, 'dim must be even.' + + with torch.autocast('cuda', enabled=False): + self.freqs = nn.Buffer( + 1.0 / (10000**(torch.arange(0, frequency_embedding_size, 2, dtype=torch.float32) / + frequency_embedding_size)), + persistent=False) + freq_scale = 10000 / max_period + self.freqs = freq_scale * self.freqs + + def timestep_embedding(self, t): + """ + Create sinusoidal timestep embeddings. + :param t: a 1-D Tensor of N indices, one per batch element. + These may be fractional. + :param dim: the dimension of the output. + :param max_period: controls the minimum frequency of the embeddings. + :return: an (N, D) Tensor of positional embeddings. + """ + # https://github.com/openai/glide-text2im/blob/main/glide_text2im/nn.py + + args = t[:, None].float() * self.freqs[None] + embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) + return embedding + + def forward(self, t): + t_freq = self.timestep_embedding(t).to(t.dtype) + t_emb = self.mlp(t_freq) + return t_emb + +if __name__ == "__main__": + # simple usage + hidden_dim = 512 + t_embed = TimestepEmbedder(hidden_dim, + frequency_embedding_size=256, + max_period=10000) # the embedder already contains MLP layers for projection + t = torch.arange(0, 10, 0.1).float() + t_emb = t_embed(t) + print(t, t.shape) \ No newline at end of file diff --git a/meanaudio/model/flow_matching.py b/meanaudio/model/flow_matching.py new file mode 100644 index 0000000000000000000000000000000000000000..631943748eff15fe2845a47e94571dd509a9153d --- /dev/null +++ b/meanaudio/model/flow_matching.py @@ -0,0 +1,71 @@ +import logging +from typing import Callable, Optional + +import torch +from torchdiffeq import odeint + +log = logging.getLogger() + + +## partially from https://github.com/gle-bellier/flow-matching +class FlowMatching: + + def __init__(self, min_sigma: float = 0.0, inference_mode='euler', num_steps: int = 25): + # inference_mode: 'euler' or 'adaptive' + # num_steps: number of steps in the euler inference mode + super().__init__() + self.min_sigma = min_sigma + self.inference_mode = inference_mode + self.num_steps = num_steps + + # self.fm = ExactOptimalTransportConditionalFlowMatcher(sigma=min_sigma) + + assert self.inference_mode in ['euler', 'adaptive'] + if self.inference_mode == 'adaptive' and num_steps > 0: + log.info('The number of steps is ignored in adaptive inference mode ') + + def get_conditional_flow(self, x0: torch.Tensor, x1: torch.Tensor, + t: torch.Tensor) -> torch.Tensor: + # which is psi_t(x), eq 22 in flow matching for generative models + t = t[:, None, None].expand_as(x0) + return (1 - (1 - self.min_sigma) * t) * x0 + t * x1 # (1-(1-sigma)*t)*x0 + t*x1 + + def loss(self, predicted_v: torch.Tensor, x0: torch.Tensor, x1: torch.Tensor) -> torch.Tensor: + # return the mean error without reducing the batch dimension + reduce_dim = list(range(1, len(predicted_v.shape))) + target_v = x1 - (1 - self.min_sigma) * x0 + return (predicted_v - target_v).pow(2).mean(dim=reduce_dim) + + def get_x0_xt_c( + self, + x1: torch.Tensor, + t: torch.Tensor, + Cs: list[torch.Tensor], + generator: Optional[torch.Generator] = None + ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + x0 = torch.empty_like(x1).normal_(generator=generator) + + xt = self.get_conditional_flow(x0, x1, t) + return x0, x1, xt, Cs + + def to_prior(self, fn: Callable, x1: torch.Tensor) -> torch.Tensor: + return self.run_t0_to_t1(fn, x1, 1, 0) + + def to_data(self, fn: Callable, x0: torch.Tensor) -> torch.Tensor: + return self.run_t0_to_t1(fn, x0, 0, 1) + + def run_t0_to_t1(self, fn: Callable, x0: torch.Tensor, t0: float, t1: float) -> torch.Tensor: + # fn: a function that takes (t, x) and returns the direction x0->x1 + + if self.inference_mode == 'adaptive': + return odeint(fn, x0, torch.tensor([t0, t1], device=x0.device, dtype=x0.dtype)) + elif self.inference_mode == 'euler': + x = x0 + steps = torch.linspace(t0, t1 - self.min_sigma, self.num_steps + 1) + for ti, t in enumerate(steps[:-1]): + flow = fn(t, x) + next_t = steps[ti + 1] + dt = next_t - t + x = x + dt * flow + + return x diff --git a/meanaudio/model/low_level.py b/meanaudio/model/low_level.py new file mode 100644 index 0000000000000000000000000000000000000000..85930b252d0796f491c6df20114a94fc50572b1c --- /dev/null +++ b/meanaudio/model/low_level.py @@ -0,0 +1,120 @@ +import torch +from torch import nn +from torch.nn import functional as F + + +class ChannelLastConv1d(nn.Conv1d): + + def forward(self, x: torch.Tensor) -> torch.Tensor: + # x: B, seq, D + x = x.permute(0, 2, 1) # B, D, seq + x = super().forward(x) + x = x.permute(0, 2, 1) + return x + + +# https://github.com/Stability-AI/sd3-ref +class MLP(nn.Module): # gated FFN + + def __init__( + self, + dim: int, + hidden_dim: int, + multiple_of: int = 256, + ): + """ + Initialize the FeedForward module. + + Args: + dim (int): Input dimension. + hidden_dim (int): Hidden dimension of the feedforward layer. + multiple_of (int): Value to ensure hidden dimension is a multiple of this value. + + Attributes: + w1 (ColumnParallelLinear): Linear transformation for the first layer. + w2 (RowParallelLinear): Linear transformation for the second layer. + w3 (ColumnParallelLinear): Linear transformation for the third layer. + + """ + super().__init__() + hidden_dim = int(2 * hidden_dim / 3) + hidden_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of) + + self.w1 = nn.Linear(dim, hidden_dim, bias=False) + self.w2 = nn.Linear(hidden_dim, dim, bias=False) + self.w3 = nn.Linear(dim, hidden_dim, bias=False) + + def forward(self, x): + return self.w2(F.silu(self.w1(x)) * self.w3(x)) + + +class ConvMLP(nn.Module): + + def __init__( + self, + dim: int, + hidden_dim: int, + multiple_of: int = 256, + kernel_size: int = 3, + padding: int = 1, + ): + """ + Initialize the FeedForward module. + + Args: + dim (int): Input dimension. + hidden_dim (int): Hidden dimension of the feedforward layer. + multiple_of (int): Value to ensure hidden dimension is a multiple of this value. + + Attributes: + w1 (ColumnParallelLinear): Linear transformation for the first layer. + w2 (RowParallelLinear): Linear transformation for the second layer. + w3 (ColumnParallelLinear): Linear transformation for the third layer. + + """ + super().__init__() + hidden_dim = int(2 * hidden_dim / 3) + hidden_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of) + + self.w1 = ChannelLastConv1d(dim, + hidden_dim, + bias=False, + kernel_size=kernel_size, + padding=padding) + self.w2 = ChannelLastConv1d(hidden_dim, + dim, + bias=False, + kernel_size=kernel_size, + padding=padding) + self.w3 = ChannelLastConv1d(dim, + hidden_dim, + bias=False, + kernel_size=kernel_size, + padding=padding) + + def forward(self, x): + return self.w2(F.silu(self.w1(x)) * self.w3(x)) + +if __name__ == "__main__": + latent_dim = 20 + hidden_dim = 64 * 7 + conv1d = ChannelLastConv1d( + in_channels = latent_dim, + out_channels = hidden_dim, + kernel_size = 7, + padding = 3 + ) + num_params = sum([p.numel() for p in conv1d.parameters()]) + print(conv1d) + print(f"Num params for conv1d: {num_params}") + + B, T, D = 128, 250, 20 + x = torch.randn(B, T, D) + h = conv1d(x) + + conv_mlp = ConvMLP(hidden_dim, hidden_dim * 4, kernel_size=7, padding=3) + num_params = sum([p.numel() for p in conv_mlp.parameters()]) + print(conv_mlp) + print(f"Nim params for convmlp: {num_params}") + y = conv_mlp(h) + print(y.shape) \ No newline at end of file diff --git a/meanaudio/model/mean_flow.py b/meanaudio/model/mean_flow.py new file mode 100644 index 0000000000000000000000000000000000000000..d0723b7017d7e432a05537f3f8a6a6381027f573 --- /dev/null +++ b/meanaudio/model/mean_flow.py @@ -0,0 +1,178 @@ +import logging +from typing import Callable, Optional + +import torch +from torchdiffeq import odeint +import torch.nn as nn +log = logging.getLogger() + +import torch +import torch.nn.functional as F +from einops import rearrange +from functools import partial +import numpy as np +import math + + +def normalize_to_neg1_1(x): + return x * 2 - 1 + + +def unnormalize_to_0_1(x): + return (x + 1) * 0.5 + + +def stopgrad(x): + return x.detach() + + +def adaptive_l2_loss(error, gamma=0, c=1e-3): + """ + Adaptive L2 loss: sg(w) * ||Δ||_2^2, where w = 1 / (||Δ||^2 + c)^p, p = 1 - γ + Args: + error: Tensor of shape (B, C, W, H) + gamma: Power used in original ||Δ||^{2γ} loss + c: Small constant for stability + Returns: + Scalar loss + """ + delta_sq = torch.mean(error ** 2, dim=(1, 2), keepdim=False) + p = 1.0 - gamma + w = 1.0 / (delta_sq + c).pow(p) + loss = delta_sq # ||Δ||^2 + return stopgrad(w) * loss + + +def cosine_annealing(start, end, step, total_steps): + cos_inner = math.pi * step / total_steps + return end + 0.5 * (start - end) * (1 + math.cos(cos_inner)) + + +## partially from https://github.com/haidog-yaqub/MeanFlow +class MeanFlow(): + def __init__( + self, + steps=1, + flow_ratio=0.75, + time_dist=['lognorm', -0.4, 1.0], + w=0.3, + k=0.9, + cfg_uncond='u', + jvp_api='autograd', + ): + super().__init__() + self.flow_ratio = flow_ratio + self.time_dist = time_dist + self.w = w + self.k = k + self.steps = steps + + self.cfg_uncond = cfg_uncond + self.jvp_api = jvp_api + assert jvp_api in ['funtorch', 'autograd'], "jvp_api must be 'funtorch' or 'autograd'" + if jvp_api == 'funtorch': + self.jvp_fn = torch.func.jvp + self.create_graph = False + elif jvp_api == 'autograd': + self.jvp_fn = torch.autograd.functional.jvp + self.create_graph = True + log.info(f'MeanFlow initialized with {steps} steps') + + def sample_t_r(self, batch_size, device): + if self.time_dist[0] == 'uniform': + samples = np.random.rand(batch_size, 2).astype(np.float32) + + elif self.time_dist[0] == 'lognorm': + mu, sigma = self.time_dist[-2], self.time_dist[-1] + normal_samples = np.random.randn(batch_size, 2).astype(np.float32) * sigma + mu + samples = 1 / (1 + np.exp(-normal_samples)) + + t_np = np.maximum(samples[:, 0], samples[:, 1]) + r_np = np.minimum(samples[:, 0], samples[:, 1]) + + # we don't use self.flow ratio if we use scheduler + # !TODO: implement flow ratio scheduler + num_selected = int(self.flow_ratio * batch_size) + indices = np.random.permutation(batch_size)[:num_selected] + r_np[indices] = t_np[indices] + + t = torch.tensor(t_np, device=device) + r = torch.tensor(r_np, device=device) + return t, r + + def to_prior(self, fn: Callable, x1: torch.Tensor) -> torch.Tensor: + return self.run_t0_to_t1(fn, x1) + + @torch.no_grad() + def to_data(self, fn: Callable, x0: torch.Tensor) -> torch.Tensor: + return self.run_t0_to_t1(fn, x0) + + def run_t0_to_t1(self, fn: Callable, x0: torch.Tensor) -> torch.Tensor: + t = torch.ones((x0.shape[0],), device=x0.device,dtype=x0.dtype) + r = torch.zeros((x0.shape[0],), device=x0.device,dtype=x0.dtype) + steps = torch.linspace(1, 0, self.steps + 1).to(device=x0.device,dtype=x0.dtype) + for ti, t in enumerate(steps[:-1]): + t = t.expand(x0.shape[0]) + next_t = steps[ti + 1].expand(x0.shape[0]) + u_flow = fn(t=t, r=next_t, x=x0) + dt = (t - next_t).mean() + x0 = x0 - dt * u_flow + return x0 + + def loss(self, + fn: Callable, + x0: torch.Tensor, + text_f: torch.Tensor, + text_f_c: torch.Tensor, + text_f_undrop: torch.Tensor, + text_f_c_undrop: torch.Tensor, + empty_string_feat: torch.Tensor, + empty_string_feat_c: torch.Tensor): + if isinstance(fn, torch.nn.parallel.DistributedDataParallel): + fn = fn.module + batch_size = x0.shape[0] + device = x0.device + e = torch.randn_like(x0) + t, r = self.sample_t_r(batch_size, device) + t_ = rearrange(t, "b -> b 1 1 ") + r_ = rearrange(r, "b -> b 1 1 ") + z = (1 - t_) * x0 + t_ * e # r < t + v = e - x0 + + if self.w is not None: + u_text_f = empty_string_feat.expand(batch_size, -1, -1) + u_text_f_c = empty_string_feat_c.expand(batch_size, -1) + u_t = fn(latent=z, + text_f=u_text_f, + text_f_c=u_text_f_c, + r=t, + t=t).detach().requires_grad_(False) + u_t_c = fn(latent=z, + text_f=text_f_undrop, + text_f_c=text_f_c_undrop, + r=t, + t=t).detach().requires_grad_(False) + + v_hat = self.w * v + self.k * u_t_c + (1 - self.w - self.k) * u_t + else: + v_hat = v + + device = z.device + model_partial = partial(fn, text_f=text_f,text_f_c=text_f_c) + jvp_args = ( + lambda z_f, r_f, t_f: model_partial(latent=z_f, r=r_f, t=t_f), + (z, r, t), + (v_hat, torch.zeros_like(r), torch.ones_like(t)), + ) + if self.create_graph: + u, dudt = self.jvp_fn(*jvp_args, create_graph=True) + else: + u, dudt = self.jvp_fn(*jvp_args) + u_tgt = v_hat - (t_ - r_) * dudt + error = u - stopgrad(u_tgt) + loss = adaptive_l2_loss(error) + return loss, r, t + + +if __name__ == '__main__': + pass \ No newline at end of file diff --git a/meanaudio/model/networks.py b/meanaudio/model/networks.py new file mode 100644 index 0000000000000000000000000000000000000000..98dcb2487615adf8016548ca730eeaae252fda70 --- /dev/null +++ b/meanaudio/model/networks.py @@ -0,0 +1,630 @@ +import logging +from dataclasses import dataclass +from typing import Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F + +import sys +from pathlib import Path +sys.path.append(str(Path(__file__).parent.parent.parent)) + +from meanaudio.ext.rotary_embeddings import compute_rope_rotations +from meanaudio.model.embeddings import TimestepEmbedder +from meanaudio.model.low_level import MLP, ChannelLastConv1d, ConvMLP +from meanaudio.model.transformer_layers import (FinalBlock, JointBlock, MMDitSingleBlock) + +log = logging.getLogger() + + +@dataclass +class PreprocessedConditions: + text_f: torch.Tensor + text_f_c: torch.Tensor + + +class FluxAudio(nn.Module): + # Flux style latent transformer for TTA, single time step embedding + + def __init__(self, + *, + latent_dim: int, + text_dim: int, + text_c_dim: int, + hidden_dim: int, + depth: int, + fused_depth: int, + num_heads: int, + mlp_ratio: float = 4.0, + latent_seq_len: int, + text_seq_len: int = 77, + latent_mean: Optional[torch.Tensor] = None, + latent_std: Optional[torch.Tensor] = None, + empty_string_feat: Optional[torch.Tensor] = None, + empty_string_feat_c: Optional[torch.Tensor] = None, + use_rope: bool = False) -> None: + super().__init__() + + self.latent_dim = latent_dim + self._latent_seq_len = latent_seq_len + self._text_seq_len = text_seq_len + self.hidden_dim = hidden_dim + self.num_heads = num_heads + self.use_rope = use_rope + self.mm_depth = depth - fused_depth + + self.audio_input_proj = nn.Sequential( + ChannelLastConv1d(latent_dim, hidden_dim, kernel_size=7, padding=3), + nn.SELU(), + ConvMLP(hidden_dim, hidden_dim * 4, kernel_size=7, padding=3), + ) + + self.text_input_proj = nn.Sequential( + nn.Linear(text_dim, hidden_dim), + MLP(hidden_dim, hidden_dim * 4), + ) + + self.text_cond_proj = nn.Sequential( + nn.Linear(text_c_dim, hidden_dim), + MLP(hidden_dim, hidden_dim*4) + ) + + self.final_layer = FinalBlock(hidden_dim, latent_dim) + + self.t_embed = TimestepEmbedder(hidden_dim, + frequency_embedding_size=256, + max_period=10000) + + self.joint_blocks = nn.ModuleList([ + JointBlock(hidden_dim, + num_heads, + mlp_ratio=mlp_ratio, + pre_only=(i == depth - fused_depth - 1)) for i in range(depth - fused_depth) # last layer is pre-only (only appllied to text and vision) + ]) + + self.fused_blocks = nn.ModuleList([ + MMDitSingleBlock(hidden_dim, num_heads, mlp_ratio=mlp_ratio, kernel_size=3, padding=1) + for i in range(fused_depth) + ]) + + if latent_mean is None: + # these values are not meant to be used + # if you don't provide mean/std here, we should load them later from a checkpoint + assert latent_std is None + latent_mean = torch.ones(latent_dim).view(1, 1, -1).fill_(float('nan')) + latent_std = torch.ones(latent_dim).view(1, 1, -1).fill_(float('nan')) + else: + assert latent_std is not None + assert latent_mean.numel() == latent_dim, f'{latent_mean.numel()=} != {latent_dim=}' + + if empty_string_feat is None: + empty_string_feat = torch.zeros((text_seq_len, text_dim)) + if empty_string_feat_c is None: + empty_string_feat_c = torch.zeros((text_c_dim)) + + assert empty_string_feat.shape[-1] == text_dim, f'{empty_string_feat.shape[-1]} == {text_dim}' + assert empty_string_feat_c.shape[-1] == text_c_dim, f'{empty_string_feat_c.shape[-1]} == {text_c_dim}' + + self.latent_mean = nn.Parameter(latent_mean.view(1, 1, -1), requires_grad=False) # (1, 1, d) + self.latent_std = nn.Parameter(latent_std.view(1, 1, -1), requires_grad=False) # (1, 1, d) + + self.empty_string_feat = nn.Parameter(empty_string_feat, requires_grad=False) + self.empty_string_feat_c = nn.Parameter(empty_string_feat_c, requires_grad=False) + + + self.initialize_weights() + if self.use_rope: + log.info("Network: Enabling RoPE embeddings") + self.initialize_rotations() + else: + log.info("Network: RoPE embedding disabled") + self.latent_rot = None + self.text_rot = None + + def initialize_rotations(self): + base_freq = 1.0 + latent_rot = compute_rope_rotations(self._latent_seq_len, + self.hidden_dim // self.num_heads, + 10000, + freq_scaling=base_freq, + device=self.device) + text_rot = compute_rope_rotations(self._text_seq_len, + self.hidden_dim // self.num_heads, + 10000, + freq_scaling=base_freq, + device=self.device) + + self.latent_rot = nn.Buffer(latent_rot, persistent=False) # will not be saved into state dict + self.text_rot = nn.Buffer(text_rot, persistent=False) + + def update_seq_lengths(self, latent_seq_len: int) -> None: + self._latent_seq_len = latent_seq_len + if self.use_rope: + self.initialize_rotations() # after changing seq_len we need to re-initialize RoPE to match new seq_len + + def initialize_weights(self): + + def _basic_init(module): + if isinstance(module, nn.Linear): + torch.nn.init.xavier_uniform_(module.weight) + if module.bias is not None: + nn.init.constant_(module.bias, 0) + + self.apply(_basic_init) + + # Initialize timestep embedding MLP: + nn.init.normal_(self.t_embed.mlp[0].weight, std=0.02) + nn.init.normal_(self.t_embed.mlp[2].weight, std=0.02) + + # Zero-out adaLN modulation layers in DiT blocks: + for block in self.joint_blocks: + nn.init.constant_(block.latent_block.adaLN_modulation[-1].weight, 0) # the linear layer -> 6 coefficients + nn.init.constant_(block.latent_block.adaLN_modulation[-1].bias, 0) + nn.init.constant_(block.text_block.adaLN_modulation[-1].weight, 0) + nn.init.constant_(block.text_block.adaLN_modulation[-1].bias, 0) + for block in self.fused_blocks: + nn.init.constant_(block.adaLN_modulation[-1].weight, 0) + nn.init.constant_(block.adaLN_modulation[-1].bias, 0) + + # Zero-out output layers: + nn.init.constant_(self.final_layer.adaLN_modulation[-1].weight, 0) + nn.init.constant_(self.final_layer.adaLN_modulation[-1].bias, 0) + nn.init.constant_(self.final_layer.conv.weight, 0) + nn.init.constant_(self.final_layer.conv.bias, 0) + + def normalize(self, x: torch.Tensor) -> torch.Tensor: + # return (x - self.latent_mean) / self.latent_std + return x.sub_(self.latent_mean).div_(self.latent_std) + + def unnormalize(self, x: torch.Tensor) -> torch.Tensor: + # return x * self.latent_std + self.latent_mean + return x.mul_(self.latent_std).add_(self.latent_mean) + + def preprocess_conditions(self, text_f: torch.Tensor, text_f_c: torch.Tensor) -> PreprocessedConditions: + """ + cache computations that do not depend on the latent/time step + i.e., the features are reused over steps during inference + """ + assert text_f.shape[1] == self._text_seq_len, f'{text_f.shape=} {self._text_seq_len=}' + + bs = text_f.shape[0] + + # get global and local text features + # NOTE here the order of projection has been changed so global and local features are projected seperately + text_f_c = self.text_cond_proj(text_f_c) # (B, D) + text_f = self.text_input_proj(text_f) # (B, VN, D) + + return PreprocessedConditions(text_f=text_f, + text_f_c=text_f_c) + + def predict_flow(self, latent: torch.Tensor, t: torch.Tensor, + conditions: PreprocessedConditions) -> torch.Tensor: + """ + for non-cacheable computations + """ + assert latent.shape[1] == self._latent_seq_len, f'{latent.shape=} {self._latent_seq_len=}' + + text_f = conditions.text_f + text_f_c = conditions.text_f_c + + latent = self.audio_input_proj(latent) # (B, N, D) + + global_c = self.t_embed(t).unsqueeze(1) + text_f_c.unsqueeze(1) # (B, 1, D) + + extended_c = global_c # extended_c: Latent_c, global_c: Text_c + + for block in self.joint_blocks: + latent, text_f = block(latent, text_f, global_c, extended_c, self.latent_rot, self.text_rot) # (B, N, D) + + for block in self.fused_blocks: + latent = block(latent, extended_c, self.latent_rot) + + flow = self.final_layer(latent, extended_c) # (B, N, out_dim), remove t + return flow + + def forward(self, latent: torch.Tensor, text_f: torch.Tensor, text_f_c: torch.Tensor, t: torch.Tensor) -> torch.Tensor: + """ + latent: (B, N, C) + text_f: (B, T, D) + t: (B,) + """ + conditions = self.preprocess_conditions(text_f, text_f_c) # cachable operations + flow = self.predict_flow(latent, t, conditions) # non-cachable operations + return flow + + def get_empty_string_sequence(self, bs: int) -> tuple[torch.Tensor, torch.Tensor]: + return self.empty_string_feat.unsqueeze(0).expand(bs, -1, -1), \ + self.empty_string_feat_c.unsqueeze(0).expand(bs, -1) # (b, d) + + def get_empty_conditions( + self, + bs: int, + *, + negative_text_features: Optional[torch.Tensor] = None) -> PreprocessedConditions: + if negative_text_features is not None: + empty_string_feat, empty_string_feat_c = negative_text_features + else: + empty_string_feat, empty_string_feat_c = self.get_empty_string_sequence(1) + + conditions = self.preprocess_conditions(empty_string_feat, + empty_string_feat_c) # use encoder's empty features + + if negative_text_features is None: + conditions.text_f = conditions.text_f.expand(bs, -1, -1) + + conditions.text_f_c = conditions.text_f_c.expand(bs, -1) + + return conditions + + def ode_wrapper(self, t: torch.Tensor, latent: torch.Tensor, conditions: PreprocessedConditions, + empty_conditions: PreprocessedConditions, cfg_strength: float) -> torch.Tensor: + t = t * torch.ones(len(latent), device=latent.device, dtype=latent.dtype) + + if cfg_strength < 1.0: + return self.predict_flow(latent, t, conditions) + else: + return (cfg_strength * self.predict_flow(latent, t, conditions) + + (1 - cfg_strength) * self.predict_flow(latent, t, empty_conditions)) + + def load_weights(self, src_dict) -> None: + if 't_embed.freqs' in src_dict: + del src_dict['t_embed.freqs'] + if 'latent_rot' in src_dict: + del src_dict['latent_rot'] + if 'text_rot' in src_dict: + del src_dict['text_rot'] + + if 'empty_string_feat_c' not in src_dict.keys(): # FIXME: issue of version mismatch here + src_dict['empty_string_feat_c'] = src_dict['empty_string_feat'].mean(dim=0) + self.load_state_dict(src_dict, strict=True) + + @property + def device(self) -> torch.device: + return self.latent_mean.device + + @property + def latent_seq_len(self) -> int: + return self._latent_seq_len + + +class MeanAudio(nn.Module): + # Flux style latent transformer for TTA, dual time step embedding + + def __init__(self, + *, + latent_dim: int, + text_dim: int, + text_c_dim: int, + hidden_dim: int, + depth: int, + fused_depth: int, + num_heads: int, + mlp_ratio: float = 4.0, + latent_seq_len: int, + text_seq_len: int = 77, + latent_mean: Optional[torch.Tensor] = None, + latent_std: Optional[torch.Tensor] = None, + empty_string_feat: Optional[torch.Tensor] = None, + empty_string_feat_c: Optional[torch.Tensor] = None, + use_rope: bool = False) -> None: + super().__init__() + + self.latent_dim = latent_dim + self._latent_seq_len = latent_seq_len + self._text_seq_len = text_seq_len + self.hidden_dim = hidden_dim + self.num_heads = num_heads + self.use_rope = use_rope + self.mm_depth = depth - fused_depth + + self.audio_input_proj = nn.Sequential( + ChannelLastConv1d(latent_dim, hidden_dim, kernel_size=7, padding=3), + nn.SELU(), + ConvMLP(hidden_dim, hidden_dim * 4, kernel_size=7, padding=3), + ) + + self.text_input_proj = nn.Sequential( + nn.Linear(text_dim, hidden_dim), + MLP(hidden_dim, hidden_dim * 4), + ) + + self.text_cond_proj = nn.Sequential( + nn.Linear(text_c_dim, hidden_dim), + MLP(hidden_dim, hidden_dim*4) + ) + + self.final_layer = FinalBlock(hidden_dim, latent_dim) + + self.t_embed = TimestepEmbedder(hidden_dim, + frequency_embedding_size=256, + max_period=10000) + #add + self.r_embed = TimestepEmbedder(hidden_dim, + frequency_embedding_size=256, + max_period=10000) + self.joint_blocks = nn.ModuleList([ + JointBlock(hidden_dim, + num_heads, + mlp_ratio=mlp_ratio, + pre_only=(i == depth - fused_depth - 1)) for i in range(depth - fused_depth) # last layer is pre-only (only appllied to text and vision) + ]) + + self.fused_blocks = nn.ModuleList([ + MMDitSingleBlock(hidden_dim, num_heads, mlp_ratio=mlp_ratio, kernel_size=3, padding=1) + for i in range(fused_depth) + ]) + + if latent_mean is None: + # these values are not meant to be used + # if you don't provide mean/std here, we should load them later from a checkpoint + assert latent_std is None + latent_mean = torch.ones(latent_dim).view(1, 1, -1).fill_(float('nan')) + latent_std = torch.ones(latent_dim).view(1, 1, -1).fill_(float('nan')) + else: + assert latent_std is not None + assert latent_mean.numel() == latent_dim, f'{latent_mean.numel()=} != {latent_dim=}' + + if empty_string_feat is None: + empty_string_feat = torch.zeros((text_seq_len, text_dim)) + if empty_string_feat_c is None: + empty_string_feat_c = torch.zeros((text_c_dim)) + + assert empty_string_feat.shape[-1] == text_dim, f'{empty_string_feat.shape[-1]} == {text_dim}' + assert empty_string_feat_c.shape[-1] == text_c_dim, f'{empty_string_feat_c.shape[-1]} == {text_c_dim}' + + self.latent_mean = nn.Parameter(latent_mean.view(1, 1, -1), requires_grad=False) # (1, 1, d) + self.latent_std = nn.Parameter(latent_std.view(1, 1, -1), requires_grad=False) # (1, 1, d) + + self.empty_string_feat = nn.Parameter(empty_string_feat, requires_grad=False) + self.empty_string_feat_c = nn.Parameter(empty_string_feat_c, requires_grad=False) + + + self.initialize_weights() + if self.use_rope: + log.info("Network: Enabling RoPE embeddings") + self.initialize_rotations() + else: + log.info("Network: RoPE embedding disabled") + self.latent_rot = None + self.text_rot = None + + def initialize_rotations(self): + base_freq = 1.0 + latent_rot = compute_rope_rotations(self._latent_seq_len, + self.hidden_dim // self.num_heads, + 10000, + freq_scaling=base_freq, + device=self.device) + text_rot = compute_rope_rotations(self._text_seq_len, + self.hidden_dim // self.num_heads, + 10000, + freq_scaling=base_freq, + device=self.device) + + self.latent_rot = nn.Buffer(latent_rot, persistent=False) # will not be saved into state dict + self.text_rot = nn.Buffer(text_rot, persistent=False) + + def update_seq_lengths(self, latent_seq_len: int) -> None: + self._latent_seq_len = latent_seq_len + if self.use_rope: + self.initialize_rotations() # after changing seq_len we need to re-initialize RoPE to match new seq_len + + def initialize_weights(self): + + def _basic_init(module): + if isinstance(module, nn.Linear): + torch.nn.init.xavier_uniform_(module.weight) + if module.bias is not None: + nn.init.constant_(module.bias, 0) + + self.apply(_basic_init) + + # Initialize timestep embedding MLP: + nn.init.normal_(self.t_embed.mlp[0].weight, std=0.02) + nn.init.normal_(self.t_embed.mlp[2].weight, std=0.02) + + # Zero-out adaLN modulation layers in DiT blocks: + for block in self.joint_blocks: + nn.init.constant_(block.latent_block.adaLN_modulation[-1].weight, 0) # the linear layer -> 6 coefficients + nn.init.constant_(block.latent_block.adaLN_modulation[-1].bias, 0) + nn.init.constant_(block.text_block.adaLN_modulation[-1].weight, 0) + nn.init.constant_(block.text_block.adaLN_modulation[-1].bias, 0) + for block in self.fused_blocks: + nn.init.constant_(block.adaLN_modulation[-1].weight, 0) + nn.init.constant_(block.adaLN_modulation[-1].bias, 0) + + # Zero-out output layers: + nn.init.constant_(self.final_layer.adaLN_modulation[-1].weight, 0) + nn.init.constant_(self.final_layer.adaLN_modulation[-1].bias, 0) + nn.init.constant_(self.final_layer.conv.weight, 0) + nn.init.constant_(self.final_layer.conv.bias, 0) + + def normalize(self, x: torch.Tensor) -> torch.Tensor: + # return (x - self.latent_mean) / self.latent_std + return x.sub_(self.latent_mean).div_(self.latent_std) + + def unnormalize(self, x: torch.Tensor) -> torch.Tensor: + # return x * self.latent_std + self.latent_mean + return x.mul_(self.latent_std).add_(self.latent_mean) + + def preprocess_conditions(self, text_f: torch.Tensor, text_f_c: torch.Tensor) -> PreprocessedConditions: + """ + cache computations that do not depend on the latent/time step + i.e., the features are reused over steps during inference + """ + assert text_f.shape[1] == self._text_seq_len, f'{text_f.shape=} {self._text_seq_len=}' + + bs = text_f.shape[0] + + # get global and local text features + # NOTE here the order of projection has been changed so global and local features are projected seperately + text_f_c = self.text_cond_proj(text_f_c) # (B, D) + text_f = self.text_input_proj(text_f) # (B, VN, D) + + return PreprocessedConditions(text_f=text_f, + text_f_c=text_f_c) + + def predict_flow(self, latent: torch.Tensor, t: torch.Tensor,r: torch.Tensor,#need r torch.Tensor: + """ + for non-cacheable computations + """ + #assert r<=t,"r should smaller than t" + + assert latent.shape[1] == self._latent_seq_len, f'{latent.shape=} {self._latent_seq_len=}' + + text_f = conditions.text_f + text_f_c = conditions.text_f_c + + latent = self.audio_input_proj(latent) # (B, N, D) + #easy try:same embed + global_c = self.t_embed(t).unsqueeze(1) + self.r_embed(r).unsqueeze(1) + text_f_c.unsqueeze(1) # (B, 1, D) + + extended_c = global_c # !TODO add fine-grained control + + for block in self.joint_blocks: + latent, text_f = block(latent, text_f, global_c, extended_c, self.latent_rot, self.text_rot) # (B, N, D) + + for block in self.fused_blocks: + latent = block(latent, extended_c, self.latent_rot) + + flow = self.final_layer(latent, extended_c) # (B, N, out_dim), remove t + return flow + + def forward(self, latent: torch.Tensor, text_f: torch.Tensor, text_f_c: torch.Tensor, r: torch.Tensor,t: torch.Tensor) -> torch.Tensor: + """ + latent: (B, N, C) + text_f: (B, T, D) + text_f_c + r: (B,) + t:(B,) + """ + #print("2") + + conditions = self.preprocess_conditions(text_f, text_f_c) # cachable operations + #print(conditions) + flow = self.predict_flow(latent, t,r, conditions) # non-cachable operations + return flow + + def get_empty_string_sequence(self, bs: int) -> tuple[torch.Tensor, torch.Tensor]: + return self.empty_string_feat.unsqueeze(0).expand(bs, -1, -1), \ + self.empty_string_feat_c.unsqueeze(0).expand(bs, -1) # (b, d) + + def get_empty_conditions( + self, + bs: int, + *, + negative_text_features: Optional[torch.Tensor] = None) -> PreprocessedConditions: + if negative_text_features is not None: + empty_string_feat, empty_string_feat_c = negative_text_features + else: + empty_string_feat, empty_string_feat_c = self.get_empty_string_sequence(1) + + conditions = self.preprocess_conditions(empty_string_feat, + empty_string_feat_c) # use encoder's empty features + if negative_text_features is None: + conditions.text_f = conditions.text_f.expand(bs, -1, -1) + + conditions.text_f_c = conditions.text_f_c.expand(bs, -1) + + return conditions + + def ode_wrapper(self, t: torch.Tensor, r: torch.Tensor, latent: torch.Tensor, conditions: PreprocessedConditions, + empty_conditions: PreprocessedConditions, cfg_strength: float) -> torch.Tensor: + t = t * torch.ones(len(latent), device=latent.device, dtype=latent.dtype) + r = r * torch.ones(len(latent), device=latent.device, dtype=latent.dtype) + #(r) + if cfg_strength < 1.0: + return self.predict_flow(latent, t,r, conditions) + else: + return (cfg_strength * self.predict_flow(latent, t,r, conditions) + + (1 - cfg_strength) * self.predict_flow(latent, t,r, empty_conditions)) + + + def load_weights(self, src_dict) -> None: + def remove_prefix(storage): + from collections import OrderedDict + new_state_dict = OrderedDict() + for k, v in storage.items(): + name = k.replace("ema_model.", "") + new_state_dict[name] = v + + return new_state_dict + + src_dict=remove_prefix(src_dict) + if 't_embed.freqs' in src_dict: + del src_dict['t_embed.freqs'] + if 'r_embed.freqs' in src_dict: + del src_dict['r_embed.freqs'] + if 'latent_rot' in src_dict: + del src_dict['latent_rot'] + if 'text_rot' in src_dict: + del src_dict['text_rot'] + + if 'empty_string_feat_c' not in src_dict.keys(): # FIXME: issue of version mismatch here + src_dict['empty_string_feat_c'] = src_dict['empty_string_feat'].mean(dim=0) + if '_extra_state' in src_dict: + del src_dict['_extra_state'] + self.load_state_dict(src_dict, strict=True) + + @property + def device(self) -> torch.device: + return self.latent_mean.device + + @property + def latent_seq_len(self) -> int: + return self._latent_seq_len + + +def fluxaudio_fm(**kwargs) -> FluxAudio: + num_heads = 7 + return FluxAudio(latent_dim=20, + text_dim=1024, + hidden_dim=64 * num_heads, + depth=12, + fused_depth=8, + num_heads=num_heads, + latent_seq_len=312, # for 10s audio + **kwargs) +def meanaudio_mf(**kwargs) -> MeanAudio: + num_heads = 7 + return MeanAudio(latent_dim=20, + text_dim=1024, + hidden_dim=64 * num_heads, + depth=12, + fused_depth=8, + num_heads=num_heads, + latent_seq_len=312, # for 10s audio + **kwargs) + + +def get_mean_audio(name: str, **kwargs) -> MeanAudio: + if name == 'meanaudio_mf': + return meanaudio_mf(**kwargs) + if name == 'fluxaudio_fm': + return fluxaudio_fm(**kwargs) + + raise ValueError(f'Unknown model name: {name}') + + +if __name__ == '__main__': + from meanaudio.model.utils.sample_utils import log_normal_sample + + logging.basicConfig( + level=logging.INFO, + format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", + handlers=[ + # logging.FileHandler("main.log"), + logging.StreamHandler() + ] + ) + + network: MeanAudio = get_mean_audio('meanaudio_mf', + use_rope=False, + text_c_dim=512) + + x = torch.randn(256, 312, 20) + print(x.shape) + print('Finish') + diff --git a/meanaudio/model/sequence_config.py b/meanaudio/model/sequence_config.py new file mode 100644 index 0000000000000000000000000000000000000000..257aad55064bd254da596cd7e2e5c25a23b0bfac --- /dev/null +++ b/meanaudio/model/sequence_config.py @@ -0,0 +1,36 @@ +import dataclasses +import math + + +@dataclasses.dataclass +class SequenceConfig: + # general + duration: float + + # audio + sampling_rate: int + spectrogram_frame_rate: int + latent_downsample_rate: int = 2 + + @property + def num_audio_frames(self) -> int: + # we need an integer number of latents + return self.latent_seq_len * self.spectrogram_frame_rate * self.latent_downsample_rate + + @property + def latent_seq_len(self) -> int: + return int( + math.ceil(self.duration * self.sampling_rate / self.spectrogram_frame_rate / + self.latent_downsample_rate)) + +CONFIG_16K = SequenceConfig(duration=9.975, sampling_rate=16000, spectrogram_frame_rate=256) # !TODO fix sequnce config here -> Latent length = 312 +CONFIG_44K = SequenceConfig(duration=9.975, sampling_rate=44100, spectrogram_frame_rate=512) + + +if __name__ == '__main__': + assert CONFIG_16K.latent_seq_len == 312 + assert CONFIG_16K.clip_seq_len == 64 + assert CONFIG_16K.sync_seq_len == 192 + assert CONFIG_16K.num_audio_frames == 128000 # 312 * 256 * 2 + + print('Passed') diff --git a/meanaudio/model/transformer_layers.py b/meanaudio/model/transformer_layers.py new file mode 100644 index 0000000000000000000000000000000000000000..0f4ba708da384638367f054a2c286aacc084d3f5 --- /dev/null +++ b/meanaudio/model/transformer_layers.py @@ -0,0 +1,192 @@ +from typing import Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F +from einops import rearrange +from einops.layers.torch import Rearrange + +from meanaudio.ext.rotary_embeddings import apply_rope +from meanaudio.model.low_level import MLP, ChannelLastConv1d, ConvMLP + + +def modulate(x: torch.Tensor, shift: torch.Tensor, scale: torch.Tensor): + return x * (1 + scale) + shift # scale is actually the add term for x (res connect for modulation) + + +def attention(q: torch.Tensor, k: torch.Tensor, v: torch.Tensor): + q = q.contiguous() + k = k.contiguous() + v = v.contiguous() + # flash attention is not compatible with JVP calculation + with torch.backends.cuda.sdp_kernel(enable_flash=False, enable_math=True, enable_mem_efficient=False): + out = F.scaled_dot_product_attention(q, k, v) + out = rearrange(out, 'b h n d -> b n (h d)').contiguous() + return out + + +class SelfAttention(nn.Module): + + def __init__(self, dim: int, nheads: int): + super().__init__() + self.dim = dim + self.nheads = nheads + + self.qkv = nn.Linear(dim, dim * 3, bias=True) + self.q_norm = nn.RMSNorm(dim // nheads) + self.k_norm = nn.RMSNorm(dim // nheads) + + self.split_into_heads = Rearrange('b n (h d j) -> b h n d j', + h=nheads, + d=dim // nheads, + j=3) + + def pre_attention( # get qkv for input x, apply rotary pos embedding if needed + self, x: torch.Tensor, + rot: Optional[torch.Tensor]) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + # x: batch_size * n_tokens * n_channels + qkv = self.qkv(x) + q, k, v = self.split_into_heads(qkv).chunk(3, dim=-1) # chunk: split the input into 3 components + q = q.squeeze(-1) + k = k.squeeze(-1) + v = v.squeeze(-1) + q = self.q_norm(q) + k = self.k_norm(k) + + if rot is not None: + q = apply_rope(q, rot) + k = apply_rope(k, rot) + + return q, k, v + + def forward( + self, + x: torch.Tensor, # batch_size * n_tokens * n_channels + ) -> torch.Tensor: + q, v, k = self.pre_attention(x) + out = attention(q, k, v) + return out + + +class MMDitSingleBlock(nn.Module): + + def __init__(self, + dim: int, + nhead: int, + mlp_ratio: float = 4.0, + pre_only: bool = False, + kernel_size: int = 7, + padding: int = 3): + super().__init__() + self.norm1 = nn.LayerNorm(dim, elementwise_affine=False) + self.attn = SelfAttention(dim, nhead) + + self.pre_only = pre_only + if pre_only: + self.adaLN_modulation = nn.Sequential(nn.SiLU(), nn.Linear(dim, 2 * dim, bias=True)) + else: + if kernel_size == 1: + self.linear1 = nn.Linear(dim, dim) + else: + self.linear1 = ChannelLastConv1d(dim, dim, kernel_size=kernel_size, padding=padding) + self.norm2 = nn.LayerNorm(dim, elementwise_affine=False) + + if kernel_size == 1: + self.ffn = MLP(dim, int(dim * mlp_ratio)) + else: + self.ffn = ConvMLP(dim, + int(dim * mlp_ratio), + kernel_size=kernel_size, + padding=padding) + + self.adaLN_modulation = nn.Sequential(nn.SiLU(), nn.Linear(dim, 6 * dim, bias=True)) + + def pre_attention(self, x: torch.Tensor, c: torch.Tensor, rot: Optional[torch.Tensor]): + """get qkv from x and modulation coefficients from condition""" + # x: BS * N * D + # cond: BS * D + modulation = self.adaLN_modulation(c) # get modulation coefficients + if self.pre_only: + (shift_msa, scale_msa) = modulation.chunk(2, dim=-1) + gate_msa = shift_mlp = scale_mlp = gate_mlp = None + else: + (shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, + gate_mlp) = modulation.chunk(6, dim=-1) + + x = modulate(self.norm1(x), shift_msa, scale_msa) # first AdaLN + q, k, v = self.attn.pre_attention(x, rot) # linear for qkv + return (q, k, v), (gate_msa, shift_mlp, scale_mlp, gate_mlp) + + def post_attention(self, x: torch.Tensor, attn_out: torch.Tensor, c: tuple[torch.Tensor]): + if self.pre_only: + return x + + (gate_msa, shift_mlp, scale_mlp, gate_mlp) = c + x = x + self.linear1(attn_out) * gate_msa # first linear/ConvMLP & scaling & residual + r = modulate(self.norm2(x), shift_mlp, scale_mlp) # second AdaLN + x = x + self.ffn(r) * gate_mlp # second linear/ConvMLP & scaling & residual + + return x + + def forward(self, x: torch.Tensor, cond: torch.Tensor, + rot: Optional[torch.Tensor]) -> torch.Tensor: + # x: BS * N * D + # cond: BS * D + x_qkv, x_conditions = self.pre_attention(x, cond, rot) + attn_out = attention(*x_qkv) + x = self.post_attention(x, attn_out, x_conditions) + + return x + + +class JointBlock(nn.Module): + + def __init__(self, dim: int, nhead: int, mlp_ratio: float = 4.0, pre_only: bool = False): + super().__init__() + self.pre_only = pre_only + self.latent_block = MMDitSingleBlock(dim, + nhead, + mlp_ratio, + pre_only=False, + kernel_size=3, + padding=1) + self.text_block = MMDitSingleBlock(dim, nhead, mlp_ratio, pre_only=pre_only, kernel_size=1) + + def forward(self, latent: torch.Tensor, text_f: torch.Tensor, + global_c: torch.Tensor, extended_c: torch.Tensor, + latent_rot: torch.Tensor, text_rot: torch.Tensor, + ) -> tuple[torch.Tensor, torch.Tensor]: + # latent: BS * N1 * D + # c: BS * (1/N) * D + x_qkv, x_mod = self.latent_block.pre_attention(latent, extended_c, rot=latent_rot) # fine-grained features are only used for the audio branch + t_qkv, t_mod = self.text_block.pre_attention(text_f, global_c, rot=text_rot) + + latent_len = latent.shape[1] + text_len = text_f.shape[1] + + joint_qkv = [torch.cat([x_qkv[i], t_qkv[i]], dim=2) for i in range(3)] + + attn_out = attention(*joint_qkv) # core of joint block: joint attention + x_attn_out = attn_out[:, :latent_len] + t_attn_out = attn_out[:, latent_len:] + + latent = self.latent_block.post_attention(latent, x_attn_out, x_mod) + if not self.pre_only: + text_f = self.text_block.post_attention(text_f, t_attn_out, t_mod) # for pre-only layer we don't do post attention for condition features + + return latent, text_f + + +class FinalBlock(nn.Module): + + def __init__(self, dim, out_dim): + super().__init__() + self.adaLN_modulation = nn.Sequential(nn.SiLU(), nn.Linear(dim, 2 * dim, bias=True)) + self.norm = nn.LayerNorm(dim, elementwise_affine=False) + self.conv = ChannelLastConv1d(dim, out_dim, kernel_size=7, padding=3) + + def forward(self, latent, c): + shift, scale = self.adaLN_modulation(c).chunk(2, dim=-1) + latent = modulate(self.norm(latent), shift, scale) + latent = self.conv(latent) + return latent diff --git a/meanaudio/model/utils/__init__.py b/meanaudio/model/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/meanaudio/model/utils/distributions.py b/meanaudio/model/utils/distributions.py new file mode 100644 index 0000000000000000000000000000000000000000..1d526a5b0b3dd2ae556d806a3397e1cf43c07fb9 --- /dev/null +++ b/meanaudio/model/utils/distributions.py @@ -0,0 +1,46 @@ +from typing import Optional + +import numpy as np +import torch + + +class DiagonalGaussianDistribution: + + def __init__(self, parameters, deterministic=False): + self.parameters = parameters + self.mean, self.logvar = torch.chunk(parameters, 2, dim=1) + self.logvar = torch.clamp(self.logvar, -30.0, 20.0) + self.deterministic = deterministic + self.std = torch.exp(0.5 * self.logvar) + self.var = torch.exp(self.logvar) + if self.deterministic: + self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device) + + def sample(self, rng: Optional[torch.Generator] = None): + # x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device) + + r = torch.empty_like(self.mean).normal_(generator=rng) + x = self.mean + self.std * r + + return x + + def kl(self, other=None): + if self.deterministic: + return torch.Tensor([0.]) + else: + if other is None: + + return 0.5 * torch.pow(self.mean, 2) + self.var - 1.0 - self.logvar + else: + return 0.5 * (torch.pow(self.mean - other.mean, 2) / other.var + + self.var / other.var - 1.0 - self.logvar + other.logvar) + + def nll(self, sample, dims=[1, 2, 3]): + if self.deterministic: + return torch.Tensor([0.]) + logtwopi = np.log(2.0 * np.pi) + return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var, + dim=dims) + + def mode(self): + return self.mean diff --git a/meanaudio/model/utils/features_utils.py b/meanaudio/model/utils/features_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..221cc27a5b9e9769ffd90d4e8d4dd66640c0c4d5 --- /dev/null +++ b/meanaudio/model/utils/features_utils.py @@ -0,0 +1,177 @@ +from typing import Literal, Optional + +import open_clip +import torch +import torch.nn as nn +import torch.nn.functional as F +from einops import rearrange +from open_clip import create_model_from_pretrained +from torchvision.transforms import Normalize +from transformers import T5EncoderModel, AutoTokenizer + +from meanaudio.ext.autoencoder import AutoEncoderModule +from meanaudio.ext.mel_converter import get_mel_converter +from meanaudio.model.utils.distributions import DiagonalGaussianDistribution +import laion_clap +import logging + + +def patch_clip(clip_model): + # a hack to make it output last hidden states + # https://github.com/mlfoundations/open_clip/blob/fc5a37b72d705f760ebbc7915b84729816ed471f/src/open_clip/model.py#L269 + def new_encode_text(self, text, normalize: bool = False): + cast_dtype = self.transformer.get_cast_dtype() + + x = self.token_embedding(text).to(cast_dtype) # [batch_size, n_ctx, d_model] + + x = x + self.positional_embedding.to(cast_dtype) + x = self.transformer(x, attn_mask=self.attn_mask) + x = self.ln_final(x) # [batch_size, n_ctx, transformer.width] + return F.normalize(x, dim=-1) if normalize else x + + clip_model.encode_text = new_encode_text.__get__(clip_model) + return clip_model + + +class FeaturesUtils(nn.Module): + + def __init__( + self, + *, + tod_vae_ckpt: Optional[str] = None, + bigvgan_vocoder_ckpt: Optional[str] = None, + enable_conditions: bool = True, + encoder_name=Literal['clip', 't5', 't5_clap', 't5_clap_cat'], + mode=Literal['16k', '44k'], + need_vae_encoder: bool = True, + ): + super().__init__() + + if enable_conditions: + self.encoder_name = encoder_name + if encoder_name == 'clip': + self.text_encoder = create_model_from_pretrained('hf-hub:apple/DFN5B-CLIP-ViT-H-14-384', + return_transform=False) + self.clip_preprocess = Normalize(mean=[0.48145466, 0.4578275, 0.40821073], + std=[0.26862954, 0.26130258, 0.27577711]) + self.text_encoder = patch_clip(self.text_encoder) + + self.tokenizer = open_clip.get_tokenizer('ViT-H-14-378-quickgelu') # same as 'ViT-H-14' + elif encoder_name == 't5': + logging.info('FeatureUtils: Loading google/flan-t5-large ... ') # root logger + self.tokenizer = AutoTokenizer.from_pretrained('google/flan-t5-large') + self.text_encoder = T5EncoderModel.from_pretrained('google/flan-t5-large').eval() + + elif encoder_name == 't5_clap' or encoder_name == 't5_clap_cat': + self.tokenizer = AutoTokenizer.from_pretrained('google/flan-t5-large') + self.text_encoder = T5EncoderModel.from_pretrained('google/flan-t5-large').eval() + self.laion_clap_model = laion_clap.CLAP_Module(enable_fusion=False, amodel='HTSAT-base').eval() + self._clap_ckpt_path = "./weights/music_speech_audioset_epoch_15_esc_89.98.pt" + self.laion_clap_model.load_ckpt(self._clap_ckpt_path, verbose=False) + + else: + raise ValueError(f"Encoder {encoder_name} is not allowed, select from ['clip', 't5']") + + else: + self.text_encoder = None + self.tokenizer = None + + if tod_vae_ckpt is not None: + self.mel_converter = get_mel_converter(mode) + self.tod = AutoEncoderModule(vae_ckpt_path=tod_vae_ckpt, + vocoder_ckpt_path=bigvgan_vocoder_ckpt, + mode=mode, + need_vae_encoder=need_vae_encoder) + else: + self.tod = None + + def compile(self): + if self.text_encoder is not None: + self.text_encoder.encode_text = torch.compile(self.text_encoder.encode_text) # ONLY for CLIP text encoder + self.decode = torch.compile(self.decode) + self.vocode = torch.compile(self.vocode) + + def train(self, mode: bool) -> None: + return super().train(False) + + @torch.inference_mode() + def encode_text(self, text: list[str]) -> torch.Tensor: + assert self.text_encoder is not None, 'Text encoder is not loaded' + assert self.tokenizer is not None, 'Tokenizer is not loaded' + # x: (B, L) + if self.encoder_name == 'clip': + tokens = self.tokenizer(text).to(self.device) + text_features = self.text_encoder.encode_text(tokens, normalize=True) + elif self.encoder_name == 't5': + tokens = self.tokenizer( + text, + max_length=77, + padding="max_length", + truncation=True, + return_tensors="pt" + ) + input_ids, attention_mask = tokens.input_ids.cuda(), tokens.attention_mask.cuda() + text_features = self.text_encoder( + input_ids=input_ids, + attention_mask=attention_mask + )[0] + text_features_c = text_features.mean(dim=1) + elif self.encoder_name == 't5_clap' or self.encoder_name == 't5_clap_cat': + tokens = self.tokenizer( + text, + max_length=77, + padding="max_length", + truncation=True, + return_tensors="pt" + ) + input_ids, attention_mask = tokens.input_ids.cuda(), tokens.attention_mask.cuda() + text_features = self.text_encoder( + input_ids=input_ids, + attention_mask=attention_mask + )[0] + text_features_c = self.laion_clap_model.get_text_embedding(text, use_tensor=True) + + if self.encoder_name == 't5_clap_cat': + text_features_c = torch.cat([text_features.mean(dim=-2), text_features_c], dim=-1) + return text_features, text_features_c + + @torch.inference_mode() + def encode_audio(self, x) -> DiagonalGaussianDistribution: + assert self.tod is not None, 'VAE is not loaded' + # x: (B * L) + mel = self.mel_converter(x) + dist = self.tod.encode(mel) + + return dist + + @torch.inference_mode() + def vocode(self, mel: torch.Tensor) -> torch.Tensor: + assert self.tod is not None, 'VAE is not loaded' + return self.tod.vocode(mel) + + @torch.inference_mode() + def decode(self, z: torch.Tensor) -> torch.Tensor: + assert self.tod is not None, 'VAE is not loaded' + return self.tod.decode(z.transpose(1, 2)) + + @property + def device(self): + return next(self.parameters()).device + + @property + def dtype(self): + return next(self.parameters()).dtype + + +if __name__ == '__main__': + # features = FeaturesUtilsAT( + # tod_vae_ckpt='./ext_weights/v1-16.pth', + # bigvgan_vocoder_ckpt='./ext_weights/best_netG.pt', + # mode='16k', + # encoder_name='t5' + # ) + # print(features) + + clap_ckpt = "./weights/music_speech_audioset_epoch_15_esc_89.98.pt" + weights = torch.load(clap_ckpt, weights_only=False) + print(weights.keys()) diff --git a/meanaudio/model/utils/parameter_groups.py b/meanaudio/model/utils/parameter_groups.py new file mode 100644 index 0000000000000000000000000000000000000000..89c3993083f470dfc6b18a5c90f908ea37bde12b --- /dev/null +++ b/meanaudio/model/utils/parameter_groups.py @@ -0,0 +1,72 @@ +import logging + +log = logging.getLogger() + + +def get_parameter_groups(model, cfg, print_log=False): + """ + Assign different weight decays and learning rates to different parameters. + Returns a parameter group which can be passed to the optimizer. + """ + weight_decay = cfg.weight_decay + # embed_weight_decay = cfg.embed_weight_decay + # backbone_lr_ratio = cfg.backbone_lr_ratio + base_lr = cfg.learning_rate + + backbone_params = [] + embed_params = [] + other_params = [] + + # embedding_names = ['summary_pos', 'query_init', 'query_emb', 'obj_pe'] + # embedding_names = [e + '.weight' for e in embedding_names] + + # inspired by detectron2 + memo = set() + for name, param in model.named_parameters(): + if not param.requires_grad: + continue + # Avoid duplicating parameters + if param in memo: + continue + memo.add(param) + + if name.startswith('module'): + name = name[7:] + + inserted = False + # if name.startswith('pixel_encoder.'): + # backbone_params.append(param) + # inserted = True + # if print_log: + # log.info(f'{name} counted as a backbone parameter.') + # else: + # for e in embedding_names: + # if name.endswith(e): + # embed_params.append(param) + # inserted = True + # if print_log: + # log.info(f'{name} counted as an embedding parameter.') + # break + + # if not inserted: + other_params.append(param) + + parameter_groups = [ + # { + # 'params': backbone_params, + # 'lr': base_lr * backbone_lr_ratio, + # 'weight_decay': weight_decay + # }, + # { + # 'params': embed_params, + # 'lr': base_lr, + # 'weight_decay': embed_weight_decay + # }, + { + 'params': other_params, + 'lr': base_lr, + 'weight_decay': weight_decay + }, + ] + + return parameter_groups diff --git a/meanaudio/model/utils/sample_utils.py b/meanaudio/model/utils/sample_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e05b94645371df250e7ed9c103dff0e20774fbd6 --- /dev/null +++ b/meanaudio/model/utils/sample_utils.py @@ -0,0 +1,49 @@ +from typing import Optional + +import torch + + +def log_normal_sample(x: torch.Tensor, + generator: Optional[torch.Generator] = None, + m: float = 0.0, + s: float = 1.0) -> torch.Tensor: + bs = x.shape[0] + s = torch.randn(bs, device=x.device, generator=generator) * s + m + return torch.sigmoid(s) +import torch +from typing import Optional, Tuple + +def log_normal_sample_r_t( + x: torch.Tensor, + generator: Optional[torch.Generator] = None, + m: float = 0.0, + s: float = 1.0, + epsilon: float = 1.0 # 控制第二个张量的最小增量 +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + 生成两个张量,确保第二个张量的每个元素都大于第一个张量。 + + 参数: + x (torch.Tensor): 输入张量(用于确定 batch_size 和设备) + generator (torch.Generator, optional): 随机数生成器 + m (float): 正态分布的均值(默认为 0) + s (float): 正态分布的标准差(默认为 1) + epsilon (float): 控制第二个张量的最小增量(默认为 1) + + 返回: + Tuple[torch.Tensor, torch.Tensor]: 两个经过 sigmoid 处理的张量,第二个的每个元素均大于第一个 + """ + bs = x.shape[0] + device = x.device + + # 生成第一个张量的原始值 + s1 = torch.randn(bs, device=device, generator=generator) * s + m + + # 生成第二个张量,确保每个元素比第一个大: + # 使用绝对值正态分布作为增量,保证非负性 + increment = torch.abs(torch.randn(bs, device=device, generator=generator)) * epsilon + s2 = s1 + increment + + # 应用 sigmoid 并返回 + #第二个比第一个大 + return torch.sigmoid(s1), torch.sigmoid(s2) \ No newline at end of file diff --git a/meanaudio/runner_flowmatching.py b/meanaudio/runner_flowmatching.py new file mode 100644 index 0000000000000000000000000000000000000000..4ca51b98f640ed3f9f422dc3d061a7b78398fadc --- /dev/null +++ b/meanaudio/runner_flowmatching.py @@ -0,0 +1,608 @@ +""" +trainer.py - wrapper and utility functions for network training +Compute loss, back-prop, update parameters, logging, etc. +""" +import os +from pathlib import Path +from typing import Optional, Union + +import torch +import torch.distributed +import torch.optim as optim +from av_bench.evaluate import evaluate +from av_bench.extract import extract +from nitrous_ema import PostHocEMA +from omegaconf import DictConfig +from torch.nn.parallel import DistributedDataParallel as DDP + +from meanaudio.model.flow_matching import FlowMatching +from meanaudio.model.networks import get_mean_audio +from meanaudio.model.sequence_config import CONFIG_16K, CONFIG_44K +from meanaudio.model.utils.features_utils import FeaturesUtils +from meanaudio.model.utils.parameter_groups import get_parameter_groups +from meanaudio.model.utils.sample_utils import log_normal_sample +from meanaudio.utils.dist_utils import (info_if_rank_zero, local_rank, string_if_rank_zero) +from meanaudio.utils.log_integrator import Integrator +from meanaudio.utils.logger import TensorboardLogger +from meanaudio.utils.time_estimator import PartialTimeEstimator, TimeEstimator +import wandb + + +class RunnerFlowMatching: + + def __init__(self, + cfg: DictConfig, + log: TensorboardLogger, + run_path: Union[str, Path], + for_training: bool = True, + latent_mean: Optional[torch.Tensor] = None, + latent_std: Optional[torch.Tensor] = None): + self.exp_id = cfg.exp_id + self.use_amp = cfg.amp + self.enable_grad_scaler = cfg.enable_grad_scaler + self.for_training = for_training + self.cfg = cfg + self.use_wandb = cfg.get("use_wandb", False) + + if self.use_wandb and local_rank == 0: + wandb.init( + project = "MeanAudio", + name = cfg.exp_id, + # config = cfg + ) + + # sequence config + self.seq_cfg = CONFIG_16K # for 10s audio + mode = '16k' + + self.sample_rate = self.seq_cfg.sampling_rate + self.duration_sec = self.seq_cfg.duration + + # model: TODO - move these into networks.py + if cfg['text_encoder_name'] == 'clip': + empty_string_feat = torch.load('./weights/empty_string.pth', weights_only=True)[0] + log.info('Loading empty string feature from ./weights/empty_string.pth for CLIP ...') + elif cfg['text_encoder_name'] == 't5': + empty_string_feat = torch.load('./weights/empty_string_t5.pth', weights_only=True)[0] + empty_string_feat_c = torch.load('./weights/empty_string_t5_c.pth', weights_only=True)[0] + log.info('Loading empty string feature from ./weights/empty_string_t5.pth and ./weights/empty_string_t5_c.pth for T5') + elif cfg['text_encoder_name'] == 't5_clap': + empty_string_feat = torch.load('./weights/empty_string_t5.pth', weights_only=True)[0] # abandon the first (btz) dim. + empty_string_feat_c = torch.load('./weights/empty_string_clap_c.pth', weights_only=True)[0] + log.info('Loading empty string feature from ./weights/empty_string_t5.pth and ./weights/empty_string_clap_c.pth for T5 and CLAP') + elif cfg['text_encoder_name'] == 't5_clap_cat': + empty_string_feat = torch.load('./weights/empty_string_t5.pth', weights_only=True)[0] # abandon the first (btz) dim. + empty_string_feat_c = torch.load('./weights/empty_string_clap_c.pth', weights_only=True)[0] + empty_string_feat_c = torch.cat([empty_string_feat.mean(dim=-2), empty_string_feat_c], dim=-1) + log.info('Loading empty string feature from ./weights/empty_string_t5.pth and ./weights/empty_string_clap_c.pth for T5 and CLAP, concating condition features ... ') + else: + raise NotImplementedError(f'Encoder {cfg["text_encoder_name"]} not implemented') + + self.network = DDP(get_mean_audio(cfg.model, # get the model based on base_config.yaml + latent_mean=latent_mean, # mean and std calculated from the dataset + latent_std=latent_std, + empty_string_feat=empty_string_feat, + empty_string_feat_c=empty_string_feat_c, + use_rope=cfg.use_rope, + text_c_dim=cfg.data_dim.text_c_dim).cuda(), + device_ids=[local_rank], + broadcast_buffers=False) + + self.fm = FlowMatching(cfg.sampling.min_sigma, + inference_mode=cfg.sampling.method, + num_steps=cfg.sampling.num_steps) + + # ema profile + if for_training and cfg.ema.enable and local_rank == 0: + self.ema = PostHocEMA(self.network.module, + sigma_rels=cfg.ema.sigma_rels, + update_every=cfg.ema.update_every, + checkpoint_every_num_steps=cfg.ema.checkpoint_every, + checkpoint_folder=cfg.ema.checkpoint_folder, + step_size_correction=True).cuda() + self.ema_start = cfg.ema.start + else: + self.ema = None + + self.rng = torch.Generator(device='cuda') + self.rng.manual_seed(cfg['seed'] + local_rank) + + # setting up feature extractors and VAEs + text_encoder_name = cfg['text_encoder_name'] + + if mode == '16k': + self.features = FeaturesUtils( + tod_vae_ckpt=cfg['vae_16k_ckpt'], + bigvgan_vocoder_ckpt=cfg['bigvgan_vocoder_ckpt'], + encoder_name=text_encoder_name, + enable_conditions=True, + mode=mode, + need_vae_encoder=False, + ) + elif mode == '44k': + self.features = FeaturesUtils( + tod_vae_ckpt=cfg['vae_44k_ckpt'], + encoder_name=text_encoder_name, + enable_conditions=True, + mode=mode, + need_vae_encoder=False, + ) + self.features = self.features.cuda().eval() + + if cfg.compile: + self.features.compile() + + # hyperparameters + self.log_normal_sampling_mean = cfg.sampling.mean + self.log_normal_sampling_scale = cfg.sampling.scale + self.null_condition_probability = cfg.null_condition_probability + self.cfg_strength = cfg.cfg_strength + log.info(f'Initializing flow matching with cfg_strength: {cfg.cfg_strength}') + + # setting up logging + self.log = log + self.run_path = Path(run_path) + + string_if_rank_zero(self.log, 'model_size', + f'{sum([param.nelement() for param in self.network.parameters()])}') + string_if_rank_zero( + self.log, 'number_of_parameters_that_require_gradient: ', + str( + sum([ + param.nelement() + for param in filter(lambda p: p.requires_grad, self.network.parameters()) + ]))) + info_if_rank_zero(self.log, 'torch version: ' + torch.__version__) + self.train_integrator = Integrator(self.log, distributed=True) + self.val_integrator = Integrator(self.log, distributed=True) + + # setting up optimizer and loss + if for_training: + self.enter_train() + parameter_groups = get_parameter_groups(self.network, cfg, print_log=(local_rank == 0)) + self.optimizer = optim.AdamW(parameter_groups, + lr=cfg['learning_rate'], + weight_decay=cfg['weight_decay'], + betas=[0.9, 0.95], + eps=1e-6 if self.use_amp else 1e-8, + fused=True) + if self.enable_grad_scaler: + self.scaler = torch.amp.GradScaler(init_scale=2048) + self.clip_grad_norm = cfg['clip_grad_norm'] + + # linearly warmup learning rate + linear_warmup_steps = cfg['linear_warmup_steps'] + + def warmup(currrent_step: int): + return (currrent_step + 1) / (linear_warmup_steps + 1) + + warmup_scheduler = optim.lr_scheduler.LambdaLR(self.optimizer, lr_lambda=warmup) + + # setting up learning rate scheduler + if cfg['lr_schedule'] == 'constant': + next_scheduler = optim.lr_scheduler.LambdaLR(self.optimizer, lr_lambda=lambda _: 1) + elif cfg['lr_schedule'] == 'poly': + total_num_iter = cfg['iterations'] + next_scheduler = optim.lr_scheduler.LambdaLR(self.optimizer, + lr_lambda=lambda x: + (1 - (x / total_num_iter))**0.9) + elif cfg['lr_schedule'] == 'step': + total_num_iter = cfg['num_iterations'] + lr_schedule_steps = [int(0.8 * total_num_iter), int(0.9 * total_num_iter)] + self.log.info(f'Assigning lr steps: {lr_schedule_steps}') + next_scheduler = optim.lr_scheduler.MultiStepLR(self.optimizer, + lr_schedule_steps, + cfg['lr_schedule_gamma']) + else: + raise NotImplementedError + + self.scheduler = optim.lr_scheduler.SequentialLR(self.optimizer, + [warmup_scheduler, next_scheduler], + [linear_warmup_steps]) + + # Logging info + self.log_text_interval = cfg['log_text_interval'] + self.log_extra_interval = cfg['log_extra_interval'] + self.save_weights_interval = cfg['save_weights_interval'] + self.save_checkpoint_interval = cfg['save_checkpoint_interval'] + self.save_copy_iterations = cfg['save_copy_iterations'] + self.num_iterations = cfg['num_iterations'] + + # update() is called when we log metrics, within the logger + self.log.batch_timer = TimeEstimator(self.num_iterations, self.log_text_interval) + # update() is called every iteration, in this script + self.log.data_timer = PartialTimeEstimator(self.num_iterations, 1, ema_alpha=0.9) + else: + self.enter_val() + + def train_fn( + self, + text_f: torch.Tensor, + text_f_c: torch.Tensor, + a_mean: torch.Tensor, + a_std: torch.Tensor, + ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + # sample + a_randn = torch.empty_like(a_mean).normal_(generator=self.rng) + x1 = a_mean + a_std * a_randn + bs = x1.shape[0] # batch_size * seq_len * num_channels + + # normalize the latents + x1 = self.network.module.normalize(x1) + + t = log_normal_sample(x1, + generator=self.rng, + m=self.log_normal_sampling_mean, + s=self.log_normal_sampling_scale) # t: (btz) + x0, x1, xt, [text_f, text_f_c] = self.fm.get_x0_xt_c(x1, + t, + Cs=[text_f, text_f_c], + generator=self.rng) # do nothing to conditions + + # classifier-free training, seperate guidance for features + samples = torch.rand(bs, device=x1.device, generator=self.rng) + null_text = (samples < self.null_condition_probability) + text_f[null_text] = self.network.module.empty_string_feat + + # samples = torch.rand(bs, device=x1.device, generator=self.rng) + null_text_c = (samples < self.null_condition_probability) # here we do null condition together + text_f_c[null_text_c] = self.network.module.empty_string_feat_c + + pred_v = self.network(xt, text_f, text_f_c, t) + loss = self.fm.loss(pred_v, x0, x1) + mean_loss = loss.mean() + return x1, loss, mean_loss, t + + def val_fn( + self, + text_f: torch.Tensor, + text_f_c: torch.Tensor, + x1: torch.Tensor, + ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + bs = x1.shape[0] # batch_size * seq_len * num_channels + # normalize the latents + x1 = self.network.module.normalize(x1) + t = log_normal_sample(x1, + generator=self.rng, + m=self.log_normal_sampling_mean, + s=self.log_normal_sampling_scale) + x0, x1, xt, [text_f, text_f_c] = self.fm.get_x0_xt_c(x1, + t, + Cs=[text_f, text_f_c], + generator=self.rng) + + # classifier-free training + samples = torch.rand(bs, device=x1.device, generator=self.rng) + null_text = (samples < self.null_condition_probability) + text_f[null_text] = self.network.module.empty_string_feat + + # samples = torch.rand(bs, device=x1.device, generator=self.rng) + null_text_c = (samples < self.null_condition_probability) + text_f_c[null_text_c] = self.network.module.empty_string_feat_c + + pred_v = self.network(xt, text_f, text_f_c, t) + + loss = self.fm.loss(pred_v, x0, x1) + mean_loss = loss.mean() + return loss, mean_loss, t + + def train_pass(self, data, it: int = 0): + + if not self.for_training: + raise ValueError('train_pass() should not be called when not training.') + + self.enter_train() + with torch.amp.autocast('cuda', enabled=self.use_amp, dtype=torch.bfloat16): + text_f = data['text_features'].cuda(non_blocking=True) + text_f_c = data['text_features_c'].cuda(non_blocking=True) + a_mean = data['a_mean'].cuda(non_blocking=True) + a_std = data['a_std'].cuda(non_blocking=True) + + self.log.data_timer.end() + if it % self.log_extra_interval == 0: + unmasked_text_f = text_f.clone() + unmasked_text_f_c = text_f_c.clone() + x1, loss, mean_loss, t = self.train_fn(text_f, text_f_c, a_mean, a_std) + + self.train_integrator.add_dict({'loss': mean_loss}) + + if it % self.log_text_interval == 0 and it != 0: + lr = self.scheduler.get_last_lr()[0] + self.train_integrator.add_scalar('lr', lr) + self.train_integrator.add_binned_tensor('binned_loss', loss, t) + self.train_integrator.finalize('train', it) + self.train_integrator.reset_except_hooks() + + if self.use_wandb and local_rank == 0: + wandb.log( + { + "lr": lr, + "train/loss": mean_loss.detach().float() + }, + step=it # explicitly x-axis it + ) + + # Backward pass + self.optimizer.zero_grad(set_to_none=True) + if self.enable_grad_scaler: + self.scaler.scale(mean_loss).backward() + self.scaler.unscale_(self.optimizer) + grad_norm = torch.nn.utils.clip_grad_norm_(self.network.parameters(), + self.clip_grad_norm) + self.scaler.step(self.optimizer) + self.scaler.update() + else: + mean_loss.backward() + grad_norm = torch.nn.utils.clip_grad_norm_(self.network.parameters(), + self.clip_grad_norm) + self.optimizer.step() + + if self.ema is not None and it >= self.ema_start: + self.ema.update() + self.scheduler.step() + self.integrator.add_scalar('grad_norm', grad_norm) + + self.enter_val() + with torch.amp.autocast('cuda', enabled=self.use_amp, + dtype=torch.bfloat16), torch.inference_mode(): + try: + if it % self.log_extra_interval == 0: + # save GT audio + # unnormalize the latents + x1 = self.network.module.unnormalize(x1[0:1]) + mel = self.features.decode(x1) + audio = self.features.vocode(mel).cpu()[0] # 1 * num_samples + self.log.log_spectrogram('train', f'spec-gt-r{local_rank}', mel.cpu()[0], it) + self.log.log_audio('train', + f'audio-gt-r{local_rank}', + audio, + it, + sample_rate=self.sample_rate) + + # save audio from sampling + x0 = torch.empty_like(x1[0:1]).normal_(generator=self.rng) + text_f = unmasked_text_f[0:1] + text_f_c = unmasked_text_f_c[0:1] # the first element with same sequence + conditions = self.network.module.preprocess_conditions(text_f, text_f_c) + empty_conditions = self.network.module.get_empty_conditions(x0.shape[0]) + cfg_ode_wrapper = lambda t, x: self.network.module.ode_wrapper( + t, x, conditions, empty_conditions, self.cfg_strength) + x1_hat = self.fm.to_data(cfg_ode_wrapper, x0) + x1_hat = self.network.module.unnormalize(x1_hat) + mel = self.features.decode(x1_hat) + audio = self.features.vocode(mel).cpu()[0] + self.log.log_spectrogram('train', f'spec-r{local_rank}', mel.cpu()[0], it) + self.log.log_audio('train', + f'audio-r{local_rank}', + audio, + it, + sample_rate=self.sample_rate) + except Exception as e: + self.log.warning(f'Error in extra logging: {e}') + if self.cfg.debug: + raise + + # Save network weights and checkpoint if needed + save_copy = it in self.save_copy_iterations + + if (it % self.save_weights_interval == 0 and it != 0) or save_copy: + self.save_weights(it) + + if it % self.save_checkpoint_interval == 0 and it != 0: + self.save_checkpoint(it, save_copy=save_copy) + + self.log.data_timer.start() + + @torch.inference_mode() + def validation_pass(self, data, it: int = 0): + self.enter_val() + with torch.amp.autocast('cuda', enabled=self.use_amp, dtype=torch.bfloat16): + text_f = data['text_features'].cuda(non_blocking=True) + text_f_c = data['text_features_c'].cuda(non_blocking=True) + a_mean = data['a_mean'].cuda(non_blocking=True) + a_std = data['a_std'].cuda(non_blocking=True) + + a_randn = torch.empty_like(a_mean).normal_(generator=self.rng) + x1 = a_mean + a_std * a_randn # differs from train_pass is that validation_pass pass x1 into val_fn + + self.log.data_timer.end() + loss, mean_loss, t = self.val_fn(text_f.clone(), text_f_c.clone(), x1) + + self.val_integrator.add_binned_tensor('binned_loss', loss, t) + self.val_integrator.add_dict({'loss': mean_loss}) + + self.log.data_timer.start() + return mean_loss.detach().float() + + @torch.inference_mode() + def inference_pass(self, + data, # batch data + it: int, + data_cfg: DictConfig, + *, + save_eval: bool = True) -> Path: + self.enter_val() + with torch.amp.autocast('cuda', enabled=self.use_amp, dtype=torch.bfloat16): + text_f = data['text_features'].cuda(non_blocking=True) + text_f_c = data['text_features_c'].cuda(non_blocking=True) + a_mean = data['a_mean'].cuda(non_blocking=True) # for the shape only + + # sample + x0 = torch.empty_like(a_mean).normal_(generator=self.rng) + conditions = self.network.module.preprocess_conditions(text_f, text_f_c) + empty_conditions = self.network.module.get_empty_conditions(x0.shape[0]) + cfg_ode_wrapper = lambda t, x: self.network.module.ode_wrapper( + t, x, conditions, empty_conditions, self.cfg_strength) + x1_hat = self.fm.to_data(cfg_ode_wrapper, x0) + x1_hat = self.network.module.unnormalize(x1_hat) + mel = self.features.decode(x1_hat) + audio = self.features.vocode(mel).cpu() # (btz, n_samples) + for i in range(audio.shape[0]): + audio_id = data['id'][i] + + if data_cfg.output_subdir is not None: + # validation + if save_eval: + iter_naming = f'{it:09d}' + else: + iter_naming = 'val-cache' + audio_dir = self.log.log_audio(iter_naming, # write audios + f'{audio_id}', + audio[i], + it=None, + sample_rate=self.sample_rate, + subdir=Path(data_cfg.output_subdir)) + else: + # full test set, usually + audio_dir = self.log.log_audio(f'{data_cfg.tag}-sampled', + f'{audio_id}', + audio[i], + it=None, + sample_rate=self.sample_rate) + + return Path(audio_dir) + + @torch.inference_mode() + def eval(self, audio_dir: Path, it: int, data_cfg: DictConfig) -> dict[str, float]: + with torch.amp.autocast('cuda', enabled=False): + if local_rank == 0: + extract(audio_path=audio_dir, + output_path=audio_dir / 'cache', + device='cuda', + batch_size=16, # btz=16: avoid OOM + num_workers=4, + skip_video_related=True, # avoid extracting video related features + audio_length=10) + output_metrics = evaluate(gt_audio_cache=Path(data_cfg.gt_cache), + skip_video_related=True, + pred_audio_cache=audio_dir / 'cache') + for k, v in output_metrics.items(): + # pad k to 10 characters + # pad v to 10 decimal places + self.log.log_scalar(f'{data_cfg.tag}/{k}', v, it) + self.log.info(f'{data_cfg.tag}/{k:<10}: {v:.10f}') + if k in ["FD-VGG", "FD-PASST", "FD-PANN", "MS-CLAP-Score", + "LAION-CLAP-Score", "ISC-PANNS-mean", "KL-PANNS-softmax"]: + if self.use_wandb and local_rank == 0: + wandb.log({f'{data_cfg.tag}/{k}': v}, step=it) + + else: + output_metrics = None + + return output_metrics + + def save_weights(self, it, save_copy=False): # only save net's weights + if local_rank != 0: + return + + os.makedirs(self.run_path, exist_ok=True) + if save_copy: + model_path = self.run_path / f'{self.exp_id}_{it}.pth' + torch.save(self.network.module.state_dict(), model_path) + self.log.info(f'Network weights saved to {model_path}.') + + # if last exists, move it to a shadow copy + model_path = self.run_path / f'{self.exp_id}_last.pth' + if model_path.exists(): + shadow_path = model_path.with_name(model_path.name.replace('last', 'shadow')) + model_path.replace(shadow_path) + self.log.info(f'Network weights shadowed to {shadow_path}.') + + torch.save(self.network.module.state_dict(), model_path) + self.log.info(f'Network weights saved to {model_path}.') + + def save_checkpoint(self, it, save_copy=False): # save it, optim, net together + if local_rank != 0: + return + + checkpoint = { + 'it': it, + 'weights': self.network.module.state_dict(), + 'optimizer': self.optimizer.state_dict(), + 'scheduler': self.scheduler.state_dict(), + 'ema': self.ema.state_dict() if self.ema is not None else None, + } + + os.makedirs(self.run_path, exist_ok=True) + if save_copy: + model_path = self.run_path / f'{self.exp_id}_ckpt_{it}.pth' + torch.save(checkpoint, model_path) + self.log.info(f'Checkpoint saved to {model_path}.') + + # if ckpt_last exists, move it to a shadow copy + model_path = self.run_path / f'{self.exp_id}_ckpt_last.pth' + if model_path.exists(): + shadow_path = model_path.with_name(model_path.name.replace('last', 'shadow')) + model_path.replace(shadow_path) # moves the file + self.log.info(f'Checkpoint shadowed to {shadow_path}.') + + torch.save(checkpoint, model_path) + self.log.info(f'Checkpoint saved to {model_path}.') + + def get_latest_checkpoint_path(self): + ckpt_path = self.run_path / f'{self.exp_id}_ckpt_last.pth' + if not ckpt_path.exists(): + info_if_rank_zero(self.log, f'No checkpoint found at {ckpt_path}.') + return None + return ckpt_path + + def get_latest_weight_path(self): + weight_path = self.run_path / f'{self.exp_id}_last.pth' + if not weight_path.exists(): + self.log.info(f'No weight found at {weight_path}.') + return None + return weight_path + + def get_final_ema_weight_path(self): # for sample (final testing) + weight_path = self.run_path / f'{self.exp_id}_ema_final.pth' + if not weight_path.exists(): + self.log.info(f'No weight found at {weight_path}.') + return None + return weight_path + + def load_checkpoint(self, path): + # This method loads everything and should be used to resume training + map_location = 'cuda:%d' % local_rank + checkpoint = torch.load(path, map_location={'cuda:0': map_location}, weights_only=True) + + it = checkpoint['it'] + weights = checkpoint['weights'] + optimizer = checkpoint['optimizer'] + scheduler = checkpoint['scheduler'] + if self.ema is not None: + self.ema.load_state_dict(checkpoint['ema']) + self.log.info(f'EMA states loaded from step {self.ema.step}') + + map_location = 'cuda:%d' % local_rank + self.network.module.load_state_dict(weights) # directly load weights to model + self.optimizer.load_state_dict(optimizer) + self.scheduler.load_state_dict(scheduler) + + self.log.info(f'Global iteration {it} loaded.') + self.log.info('Network weights, optimizer states, and scheduler states loaded.') + + return it + + def load_weights_in_memory(self, src_dict): + self.network.module.load_weights(src_dict) + self.log.info('Network weights loaded from memory.') + + def load_weights(self, path): + # This method loads only the network weight and should be used to load a pretrained model + map_location = 'cuda:%d' % local_rank + src_dict = torch.load(path, map_location={'cuda:0': map_location}, weights_only=True) + + self.log.info(f'Importing network weights from {path}...') + self.load_weights_in_memory(src_dict) + + def weights(self): + return self.network.module.state_dict() + + def enter_train(self): + self.integrator = self.train_integrator + self.network.train() + return self + + def enter_val(self): + self.network.eval() + return self \ No newline at end of file diff --git a/meanaudio/runner_meanflow.py b/meanaudio/runner_meanflow.py new file mode 100644 index 0000000000000000000000000000000000000000..916407f0a19f886d45c43da6b4ed10e59f4cc4f0 --- /dev/null +++ b/meanaudio/runner_meanflow.py @@ -0,0 +1,656 @@ +import os + +import torch +import torch.distributed +from pathlib import Path +from typing import Optional, Union + +import torch +import torch.distributed +import torch.optim as optim +from av_bench.evaluate import evaluate +from av_bench.extract import extract +from nitrous_ema import PostHocEMA +from omegaconf import DictConfig +from torch.nn.parallel import DistributedDataParallel as DDP + +from meanaudio.model.flow_matching import FlowMatching +from meanaudio.model.mean_flow import MeanFlow +from meanaudio.model.networks import get_mean_audio +from meanaudio.model.sequence_config import CONFIG_16K, CONFIG_44K +from meanaudio.model.utils.features_utils import FeaturesUtils +from meanaudio.model.utils.parameter_groups import get_parameter_groups +from meanaudio.model.utils.sample_utils import log_normal_sample,log_normal_sample_r_t +from meanaudio.utils.dist_utils import (info_if_rank_zero, local_rank, string_if_rank_zero) +from meanaudio.utils.log_integrator import Integrator +from meanaudio.utils.logger import TensorboardLogger +from meanaudio.utils.time_estimator import PartialTimeEstimator, TimeEstimator +import wandb + +class RunnerMeanFlow: + + def __init__(self, + cfg: DictConfig, + log: TensorboardLogger, + run_path: Union[str, Path], + for_training: bool = True, + latent_mean: Optional[torch.Tensor] = None, + latent_std: Optional[torch.Tensor] = None): + self.exp_id = cfg.exp_id + self.use_amp = cfg.amp + self.enable_grad_scaler = cfg.enable_grad_scaler + self.for_training = for_training + self.cfg = cfg + self.use_wandb = cfg.get("use_wandb", False) + + if self.use_wandb and local_rank == 0: + wandb.init( + project = "MeanAudio", + name = cfg.exp_id, + settings=wandb.Settings(init_timeout=120), + # config = cfg + ) + + # sequence config + self.seq_cfg = CONFIG_16K + mode = '16k' + + self.sample_rate = self.seq_cfg.sampling_rate + self.duration_sec = self.seq_cfg.duration + + # model + if cfg['text_encoder_name'] == 'clip': + empty_string_feat = torch.load('./weights/empty_string.pth', weights_only=True)[0] + log.info('Loading empty string feature from ./weights/empty_string.pth for CLIP ...') + elif cfg['text_encoder_name'] == 't5': + empty_string_feat = torch.load('./weights/empty_string_t5.pth', weights_only=True)[0] + empty_string_feat_c = torch.load('./weights/empty_string_t5_c.pth', weights_only=True)[0] + log.info('Loading empty string feature from ./weights/empty_string_t5.pth and ./weights/empty_string_t5_c.pth for T5') + elif cfg['text_encoder_name'] == 't5_clap': + empty_string_feat = torch.load('./weights/empty_string_t5.pth', weights_only=True)[0] # abandon the first (btz) dim. + empty_string_feat_c = torch.load('./weights/empty_string_clap_c.pth', weights_only=True)[0] + log.info('Loading empty string feature from ./weights/empty_string_t5.pth and ./weights/empty_string_clap_c.pth for T5 and CLAP') + elif cfg['text_encoder_name'] == 't5_clap_cat': + empty_string_feat = torch.load('./weights/empty_string_t5.pth', weights_only=True)[0] # abandon the first (btz) dim. + empty_string_feat_c = torch.load('./weights/empty_string_clap_c.pth', weights_only=True)[0] + empty_string_feat_c = torch.cat([empty_string_feat.mean(dim=-2), empty_string_feat_c], dim=-1) + log.info('Loading empty string feature from ./weights/empty_string_t5.pth and ./weights/empty_string_clap_c.pth for T5 and CLAP, concating condition features ... ') + else: + raise NotImplementedError(f'Encoder {cfg["text_encoder_name"]} not implemented') + self.network = DDP(get_mean_audio(cfg.model, # get the model based on base_config.yaml + latent_mean=latent_mean, # mean and std calculated from the dataset + latent_std=latent_std, + empty_string_feat=empty_string_feat, + empty_string_feat_c=empty_string_feat_c, + use_rope=cfg.use_rope, + text_c_dim=cfg.data_dim.text_c_dim).cuda(), + device_ids=[local_rank], + broadcast_buffers=False, + find_unused_parameters=True) + if cfg.compile: + self.train_fn = torch.compile(self.train_fn) + self.val_fn = torch.compile(self.val_fn) + + self.mf = MeanFlow() + + # ema profile + if for_training and cfg.ema.enable and local_rank == 0: + self.ema = PostHocEMA(self.network.module, + sigma_rels=cfg.ema.sigma_rels, + update_every=cfg.ema.update_every, + checkpoint_every_num_steps=cfg.ema.checkpoint_every, + checkpoint_folder=cfg.ema.checkpoint_folder, + step_size_correction=True).cuda() + self.ema_start = cfg.ema.start + else: + self.ema = None + + self.rng = torch.Generator(device='cuda') + self.rng.manual_seed(cfg['seed'] + local_rank) + + # setting up feature extractors and VAEs + text_encoder_name = cfg['text_encoder_name'] + if mode == '16k': + self.features = FeaturesUtils( + tod_vae_ckpt=cfg['vae_16k_ckpt'], + bigvgan_vocoder_ckpt=cfg['bigvgan_vocoder_ckpt'], + encoder_name=text_encoder_name, + enable_conditions=True, + mode=mode, + need_vae_encoder=False, + ) + elif mode == '44k': + self.features = FeaturesUtils( + tod_vae_ckpt=cfg['vae_44k_ckpt'], + encoder_name=text_encoder_name, + enable_conditions=True, + mode=mode, + need_vae_encoder=False, + ) + self.features = self.features.cuda().eval() + + if cfg.compile: + self.features.compile() + + # TODO: change these parameters compatible with meanflow + self.log_normal_sampling_mean = cfg.sampling.mean + self.log_normal_sampling_scale = cfg.sampling.scale + self.null_condition_probability = cfg.null_condition_probability + self.cfg_strength = cfg.cfg_strength + + # setting up logging + self.log = log + self.run_path = Path(run_path) + string_if_rank_zero(self.log, 'model_size', + f'{sum([param.nelement() for param in self.network.parameters()])}') + string_if_rank_zero( + self.log, 'number_of_parameters_that_require_gradient: ', + str( + sum([ + param.nelement() + for param in filter(lambda p: p.requires_grad, self.network.parameters()) + ]))) + info_if_rank_zero(self.log, 'torch version: ' + torch.__version__) + self.train_integrator = Integrator(self.log, distributed=True) + self.val_integrator = Integrator(self.log, distributed=True) + + # setting up optimizer and loss + if for_training: + self.enter_train() + parameter_groups = get_parameter_groups(self.network, cfg, print_log=(local_rank == 0)) + self.optimizer = optim.AdamW(parameter_groups, + lr=cfg['learning_rate'], + weight_decay=cfg['weight_decay'], + betas=[0.9, 0.95], + eps=1e-6 if self.use_amp else 1e-8, + fused=True) + if self.enable_grad_scaler: + self.scaler = torch.amp.GradScaler(init_scale=2048) + self.clip_grad_norm = cfg['clip_grad_norm'] + + # linearly warmup learning rate + linear_warmup_steps = cfg['linear_warmup_steps'] + + def warmup(currrent_step: int): + return (currrent_step + 1) / (linear_warmup_steps + 1) + + warmup_scheduler = optim.lr_scheduler.LambdaLR(self.optimizer, lr_lambda=warmup) + + # setting up learning rate scheduler + if cfg['lr_schedule'] == 'constant': + next_scheduler = optim.lr_scheduler.LambdaLR(self.optimizer, lr_lambda=lambda _: 1) + elif cfg['lr_schedule'] == 'poly': + total_num_iter = cfg['iterations'] + next_scheduler = optim.lr_scheduler.LambdaLR(self.optimizer, + lr_lambda=lambda x: + (1 - (x / total_num_iter))**0.9) + elif cfg['lr_schedule'] == 'step': + total_num_iter = cfg['num_iterations'] + lr_schedule_steps = [int(0.8 * total_num_iter), int(0.9 * total_num_iter)] + log.info(f'Assigning lr steps: {lr_schedule_steps}') + next_scheduler = optim.lr_scheduler.MultiStepLR(self.optimizer, + lr_schedule_steps, + cfg['lr_schedule_gamma']) + else: + raise NotImplementedError + + self.scheduler = optim.lr_scheduler.SequentialLR(self.optimizer, + [warmup_scheduler, next_scheduler], + [linear_warmup_steps]) + + # Logging info + self.log_text_interval = cfg['log_text_interval'] + self.log_extra_interval = cfg['log_extra_interval'] + self.save_weights_interval = cfg['save_weights_interval'] + self.save_checkpoint_interval = cfg['save_checkpoint_interval'] + self.save_copy_iterations = cfg['save_copy_iterations'] + self.num_iterations = cfg['num_iterations'] + + # update() is called when we log metrics, within the logger + self.log.batch_timer = TimeEstimator(self.num_iterations, self.log_text_interval) + # update() is called every iteration, in this script + self.log.data_timer = PartialTimeEstimator(self.num_iterations, 1, ema_alpha=0.9) + else: + self.enter_val() + + def train_fn( + self, + text_f: torch.Tensor, + text_f_c: torch.Tensor, + a_mean: torch.Tensor, + a_std: torch.Tensor, + # it: torch.Tensor + ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + # sample + a_randn = torch.empty_like(a_mean).normal_(generator=self.rng) + x1 = a_mean + a_std * a_randn + bs = x1.shape[0] # batch_size * seq_len * num_channels + + x1 = self.network.module.normalize(x1) + + text_f_undrop = text_f + text_f_c_undrop = text_f_c + samples = torch.rand(bs, device=x1.device, generator=self.rng) + null_text = (samples < self.null_condition_probability) + text_f[null_text] = self.network.module.empty_string_feat + + null_text_c = (samples < self.null_condition_probability) # here we do null condition together + text_f_c[null_text_c] = self.network.module.empty_string_feat_c + loss, r, t = self.mf.loss(self.network, + x1, + text_f, + text_f_c, + text_f_undrop, + text_f_c_undrop, + self.network.module.empty_string_feat, + self.network.module.empty_string_feat_c) + mean_loss = loss.mean() + return x1, loss, mean_loss, t, r + + def val_fn( + self, + text_f: torch.Tensor, + text_f_c: torch.Tensor, + x1: torch.Tensor, + ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + bs = x1.shape[0] # batch_size * seq_len * num_channels + # normalize the latents + x1 = self.network.module.normalize(x1) + + text_f_undrop = text_f + text_f_c_undrop = text_f_c + samples = torch.rand(bs, device=x1.device, generator=self.rng) + null_text = (samples < self.null_condition_probability) + text_f[null_text] = self.network.module.empty_string_feat + + null_text_c = (samples < self.null_condition_probability) + text_f_c[null_text_c] = self.network.module.empty_string_feat_c + + loss, r, t = self.mf.loss(self.network, + x1, + text_f, + text_f_c, + text_f_undrop, + text_f_c_undrop, + self.network.module.empty_string_feat, + self.network.module.empty_string_feat_c) + mean_loss = loss.mean() + return loss, mean_loss, t, r + + def train_pass(self, data, it: int = 0): + + if not self.for_training: + raise ValueError('train_pass() should not be called when not training.') + + self.enter_train() + with torch.amp.autocast('cuda', enabled=self.use_amp, dtype=torch.bfloat16): + text_f = data['text_features'].cuda(non_blocking=True) + text_f_c = data['text_features_c'].cuda(non_blocking=True) + a_mean = data['a_mean'].cuda(non_blocking=True) + a_std = data['a_std'].cuda(non_blocking=True) + + self.log.data_timer.end() + if it % self.log_extra_interval == 0: + unmasked_text_f = text_f.clone() + unmasked_text_f_c = text_f_c.clone() + #with torch.amp.autocast('cuda', enabled=False): + x1, loss, mean_loss, t,r = self.train_fn(text_f, text_f_c, a_mean, a_std) + + self.train_integrator.add_dict({'loss': mean_loss}) + + if it % self.log_text_interval == 0 and it != 0: + lr = self.scheduler.get_last_lr()[0] + self.train_integrator.add_scalar('lr', lr) + self.train_integrator.add_binned_tensor('binned_loss', loss, t) + self.train_integrator.finalize('train', it) + self.train_integrator.reset_except_hooks() + + if self.use_wandb and local_rank == 0: + wandb.log( + { + "lr": lr, + "train/loss": mean_loss.detach().float() + }, + step=it # explicitly x-axis it + ) + + # Backward pass + self.optimizer.zero_grad(set_to_none=True) + if self.enable_grad_scaler: + self.scaler.scale(mean_loss).backward() + self.scaler.unscale_(self.optimizer) + grad_norm = torch.nn.utils.clip_grad_norm_(self.network.parameters(), + self.clip_grad_norm) + self.scaler.step(self.optimizer) + self.scaler.update() + else: + mean_loss.backward() + grad_norm = torch.nn.utils.clip_grad_norm_(self.network.parameters(), + self.clip_grad_norm) + self.optimizer.step() + + if self.ema is not None and it >= self.ema_start: + self.ema.update() + self.scheduler.step() + self.integrator.add_scalar('grad_norm', grad_norm) + + self.enter_val() + with torch.amp.autocast('cuda', enabled=self.use_amp, + dtype=torch.bfloat16), torch.inference_mode(): + try: + if it % self.log_extra_interval == 0: + # save GT audio + # unnormalize the latents + x1 = self.network.module.unnormalize(x1[0:1]) + mel = self.features.decode(x1) + audio = self.features.vocode(mel).cpu()[0] # 1 * num_samples + self.log.log_spectrogram('train', f'spec-gt-r{local_rank}', mel.cpu()[0], it) + self.log.log_audio('train', + f'audio-gt-r{local_rank}', + audio, + it, + sample_rate=self.sample_rate) + + # save audio from sampling + x0 = torch.empty_like(x1[0:1]).normal_(generator=self.rng) + text_f = unmasked_text_f[0:1] + text_f_c = unmasked_text_f_c[0:1] # the first element with same sequence + conditions = self.network.module.preprocess_conditions(text_f, text_f_c) + empty_conditions = self.network.module.get_empty_conditions(x0.shape[0]) + cfg_ode_wrapper = lambda t,r,x: self.network.module.ode_wrapper( + t,r,x, conditions, empty_conditions, self.cfg_strength) + x1_hat = self.mf.to_data(cfg_ode_wrapper, x0) + x1_hat = self.network.module.unnormalize(x1_hat) + mel = self.features.decode(x1_hat) + audio = self.features.vocode(mel).cpu()[0] + self.log.log_spectrogram('train', f'spec-r{local_rank}', mel.cpu()[0], it) + self.log.log_audio('train', + f'audio-r{local_rank}', + audio, + it, + sample_rate=self.sample_rate) + except Exception as e: + self.log.warning(f'Error in extra logging: {e}') + if self.cfg.debug: + raise + + # Save network weights and checkpoint if needed + save_copy = it in self.save_copy_iterations + + if (it % self.save_weights_interval == 0 and it != 0) or save_copy: + self.save_weights(it) + + if it % self.save_checkpoint_interval == 0 and it != 0: + self.save_checkpoint(it, save_copy=save_copy) + + self.log.data_timer.start() + + @torch.inference_mode() + def validation_pass(self, data, it: int = 0): + self.enter_val() + with torch.amp.autocast('cuda', enabled=self.use_amp, dtype=torch.bfloat16): + text_f = data['text_features'].cuda(non_blocking=True) + text_f_c = data['text_features_c'].cuda(non_blocking=True) + a_mean = data['a_mean'].cuda(non_blocking=True) + a_std = data['a_std'].cuda(non_blocking=True) + a_randn = torch.empty_like(a_mean).normal_(generator=self.rng) + x1 = a_mean + a_std * a_randn # differs from train_pass is that validation_pass pass x1 into val_fn + + self.log.data_timer.end() + # with torch.amp.autocast('cuda', enabled=False): + loss, mean_loss, t, r = self.val_fn(text_f.clone(), text_f_c.clone(), x1) + + self.val_integrator.add_binned_tensor('binned_loss', loss, t) + self.val_integrator.add_dict({'loss': mean_loss}) + + self.log.data_timer.start() + return mean_loss.detach().float() + + @torch.inference_mode() + def inference_pass(self, + data, # batch data + it: int, + data_cfg: DictConfig, + *, + save_eval: bool = True) -> Path: + self.enter_val() + with torch.amp.autocast('cuda', enabled=self.use_amp, dtype=torch.bfloat16): + text_f = data['text_features'].cuda(non_blocking=True) + text_f_c = data['text_features_c'].cuda(non_blocking=True) + a_mean = data['a_mean'].cuda(non_blocking=True) # for the shape only + + # sample + x0 = torch.empty_like(a_mean).normal_(generator=self.rng) + conditions = self.network.module.preprocess_conditions(text_f, text_f_c) + empty_conditions = self.network.module.get_empty_conditions(x0.shape[0]) + cfg_ode_wrapper = lambda t, r, x: self.network.module.ode_wrapper( + t, r, x, conditions, empty_conditions, self.cfg_strength) + x1_hat = self.mf.to_data(cfg_ode_wrapper, x0) + x1_hat = self.network.module.unnormalize(x1_hat) + mel = self.features.decode(x1_hat) + audio = self.features.vocode(mel).cpu() # (btz, n_samples) + for i in range(audio.shape[0]): + audio_id = data['id'][i] + + if data_cfg.output_subdir is not None: + # validation + if save_eval: + iter_naming = f'{it:09d}' + else: + iter_naming = 'val-cache' + audio_dir = self.log.log_audio(iter_naming, # write audios + f'{audio_id}', + audio[i], + it=None, + sample_rate=self.sample_rate, + subdir=Path(data_cfg.output_subdir)) + else: + # full test set, usually + audio_dir = self.log.log_audio(f'{data_cfg.tag}-sampled', + f'{audio_id}', + audio[i], + it=None, + sample_rate=self.sample_rate) + del text_f, text_f_c, a_mean + torch.cuda.empty_cache() + + return Path(audio_dir) + + @torch.inference_mode() + def eval(self, audio_dir: Path, it: int, data_cfg: DictConfig) -> dict[str, float]: + with torch.amp.autocast('cuda', enabled=False): + if local_rank == 0: + extract(audio_path=audio_dir, + output_path=audio_dir / 'cache', + device='cuda', + batch_size=16, # btz=16: avoid OOM + skip_video_related=True, # avoid extracting video related features + audio_length=10) + output_metrics = evaluate(gt_audio_cache=Path(data_cfg.gt_cache), + skip_video_related=True, + pred_audio_cache=audio_dir / 'cache') + for k, v in output_metrics.items(): + # pad k to 10 characters + # pad v to 10 decimal places + self.log.log_scalar(f'{data_cfg.tag}/{k}', v, it) + self.log.info(f'{data_cfg.tag}/{k:<10}: {v:.10f}') + if k in ["FD-VGG", "FD-PASST", "FD-PANN", "MS-CLAP-Score", + "LAION-CLAP-Score", "ISC-PANNS-mean", "KL-PANNS-softmax"]: + if self.use_wandb and local_rank == 0: + wandb.log({f'{data_cfg.tag}/{k}': v}, step=it) + + else: + output_metrics = None + + return output_metrics + + def save_weights(self, it, save_copy=False): # only save net's weights + if local_rank != 0: + return + + os.makedirs(self.run_path, exist_ok=True) + if save_copy: + model_path = self.run_path / f'{self.exp_id}_{it}.pth' + torch.save(self.network.module.state_dict(), model_path) + self.log.info(f'Network weights saved to {model_path}.') + + # if last exists, move it to a shadow copy + model_path = self.run_path / f'{self.exp_id}_last.pth' + if model_path.exists(): + shadow_path = model_path.with_name(model_path.name.replace('last', 'shadow')) + model_path.replace(shadow_path) + self.log.info(f'Network weights shadowed to {shadow_path}.') + + torch.save(self.network.module.state_dict(), model_path) + self.log.info(f'Network weights saved to {model_path}.') + + def save_checkpoint(self, it, save_copy=False): # save it, optim, net together + if local_rank != 0: + return + + checkpoint = { + 'it': it, + 'weights': self.network.module.state_dict(), + 'optimizer': self.optimizer.state_dict(), + 'scheduler': self.scheduler.state_dict(), + 'ema': self.ema.state_dict() if self.ema is not None else None, + } + + os.makedirs(self.run_path, exist_ok=True) + if save_copy: + model_path = self.run_path / f'{self.exp_id}_ckpt_{it}.pth' + torch.save(checkpoint, model_path) + self.log.info(f'Checkpoint saved to {model_path}.') + + # if ckpt_last exists, move it to a shadow copy + model_path = self.run_path / f'{self.exp_id}_ckpt_last.pth' + if model_path.exists(): + shadow_path = model_path.with_name(model_path.name.replace('last', 'shadow')) + model_path.replace(shadow_path) # moves the file + self.log.info(f'Checkpoint shadowed to {shadow_path}.') + + torch.save(checkpoint, model_path) + self.log.info(f'Checkpoint saved to {model_path}.') + + def get_latest_checkpoint_path(self): + ckpt_path = self.run_path / f'{self.exp_id}_ckpt_last.pth' + if not ckpt_path.exists(): + info_if_rank_zero(self.log, f'No checkpoint found at {ckpt_path}.') + return None + return ckpt_path + + def get_latest_weight_path(self): + weight_path = self.run_path / f'{self.exp_id}_last.pth' + if not weight_path.exists(): + self.log.info(f'No weight found at {weight_path}.') + return None + return weight_path + + def get_final_ema_weight_path(self): # for sample (final testing) + weight_path = self.run_path / f'{self.exp_id}_ema_final.pth' + if not weight_path.exists(): + self.log.info(f'No weight found at {weight_path}.') + return None + return weight_path + + def load_checkpoint(self, path): + # This method loads everything and should be used to resume training + map_location = 'cuda:%d' % local_rank + checkpoint = torch.load(path, map_location={'cuda:0': map_location}, weights_only=True) + + it = checkpoint['it'] + weights = checkpoint['weights'] + optimizer = checkpoint['optimizer'] + scheduler = checkpoint['scheduler'] + if self.ema is not None: + self.ema.load_state_dict(checkpoint['ema']) + self.log.info(f'EMA states loaded from step {self.ema.step}') + + map_location = 'cuda:%d' % local_rank + self.network.module.load_state_dict(weights) # directly load weights to model + self.optimizer.load_state_dict(optimizer) + self.scheduler.load_state_dict(scheduler) + + self.log.info(f'Global iteration {it} loaded.') + self.log.info('Network weights, optimizer states, and scheduler states loaded.') + + return it + + # def load_checkpoint(self, path): + # self.log.info(f'Loading checkpoint from {path}') + # # This method loads everything and should be used to resume training + # map_location = 'cuda:%d' % local_rank + # checkpoint = torch.load(path, map_location={'cuda:0': map_location}, weights_only=True) + + # it = 0 + # # it = checkpoint['it'] + # weights = checkpoint['weights'] # this is not ema weights + # #optimizer = checkpoint['optimizer'] + # #scheduler = checkpoint['scheduler'] + # #self.ema=None + # #if self.ema is not None: + # # self.ema.load_state_dict(checkpoint['ema']) + # # self.log.info(f'EMA states loaded from step {self.ema.step}') + + # map_location = 'cuda:%d' % local_rank + # #self.network.module.load_state_dict(weights) # directly load weights to model + # model_weights = weights.copy() + # fallback_mapping = { + # "r_embed.mlp.0.weight":"t_embed.mlp.0.weight", + # "r_embed.mlp.0.bias":"t_embed.mlp.0.bias", + # "r_embed.mlp.2.weight":"t_embed.mlp.2.weight", + # "r_embed.mlp.2.bias": "t_embed.mlp.2.bias" + # } + # for param_name, param in self.network.module.named_parameters(): + # if param_name in weights: + # continue + + # for target_prefix, source_prefix in fallback_mapping.items(): + # if param_name==target_prefix: + + # source_name=source_prefix + # print(f"{param_name} not found. Copying from {source_name}") + # model_weights[param_name] = weights[source_name].clone() + + # self.network.module.load_state_dict(model_weights, strict=False) + # self.log.info(f'Global iteration {it} loaded.') + # self.log.info('Network weights, optimizer states, and scheduler states loaded.') + + # return it + + def load_weights_in_memory(self, src_dict): + self.network.module.load_weights(src_dict) + self.log.info('Network weights loaded from memory.') + + def load_weights(self, path): + # This method loads only the network weight and should be used to load a pretrained model + map_location = 'cuda:%d' % local_rank + src_dict = torch.load(path, map_location={'cuda:0': map_location}, weights_only=True) + + fallback_mapping = { + "r_embed.mlp.0.weight": "t_embed.mlp.0.weight", + "r_embed.mlp.0.bias": "t_embed.mlp.0.bias", + "r_embed.mlp.2.weight": "t_embed.mlp.2.weight", + "r_embed.mlp.2.bias": "t_embed.mlp.2.bias" + } + + for target_prefix, source_prefix in fallback_mapping.items(): + if target_prefix not in src_dict.keys(): + self.log.info(f"Copying from {source_prefix} to {target_prefix}") + src_dict[target_prefix] = src_dict[source_prefix].clone() + + self.log.info(f'Importing network weights from {path}...') + self.load_weights_in_memory(src_dict) + + def weights(self): + return self.network.module.state_dict() + + def enter_train(self): + self.integrator = self.train_integrator + self.network.train() + return self + + def enter_val(self): + self.network.eval() + return self + diff --git a/meanaudio/sample.py b/meanaudio/sample.py new file mode 100644 index 0000000000000000000000000000000000000000..bb0d5ec89f93291d5e2039c25c3ef613f69efb64 --- /dev/null +++ b/meanaudio/sample.py @@ -0,0 +1,96 @@ +import json +import logging +import os +import random + +import numpy as np +import torch +from hydra.core.hydra_config import HydraConfig +from omegaconf import DictConfig, open_dict +from tqdm import tqdm + +from meanaudio.data.data_setup import setup_test_datasets +from meanaudio.runner_flowmatching import RunnerFlowMatching +from meanaudio.runner_meanflow import RunnerMeanFlow +from meanaudio.utils.dist_utils import info_if_rank_zero +from meanaudio.utils.logger import TensorboardLogger +import torch.distributed as distributed + +local_rank = int(os.environ['LOCAL_RANK']) +world_size = int(os.environ['WORLD_SIZE']) + + +def sample(cfg: DictConfig): + # initial setup + num_gpus = world_size + run_dir = HydraConfig.get().run.dir # ./output/$exp_name + + # wrap python logger with a tensorboard logger + log = TensorboardLogger(cfg.exp_id, + run_dir, + logging.getLogger(), + is_rank0=(local_rank == 0), + enable_email=cfg.enable_email and not cfg.debug) + + info_if_rank_zero(log, f'All configuration: {cfg}') + info_if_rank_zero(log, f'Number of GPUs detected: {num_gpus}') + + # cuda setup + torch.cuda.set_device(local_rank) + torch.backends.cudnn.benchmark = cfg.cudnn_benchmark + + # number of dataloader workers + info_if_rank_zero(log, f'Number of dataloader workers (per GPU): {cfg.num_workers}') + + # Set seeds to ensure the same initialization + torch.manual_seed(cfg.seed) + np.random.seed(cfg.seed) + random.seed(cfg.seed) + + # setting up configurations + info_if_rank_zero(log, f'Configuration: {cfg}') + info_if_rank_zero(log, f'Batch size (per GPU): {cfg.eval_batch_size}') + + # construct the trainer + if not cfg.use_repa: + if not cfg.use_meanflow: + runner = RunnerFlowMatching(cfg, log=log, run_path=run_dir, for_training=False).enter_val() + else: + runner = RunnerMeanFlow(cfg, log=log, run_path=run_dir, for_training=False).enter_val() + else: + raise NotImplementedError('REPA is not supported yet') + runner = RunnerAT_REPA(cfg, log=log, run_path=run_dir, for_training=False).enter_val() + + ## we only load the ema ckpt for final eval + weights = runner.get_final_ema_weight_path() + if weights is not None: + info_if_rank_zero(log, f'Automatically finding weight: {weights}') + runner.load_weights(weights) + + # setup datasets + dataset, sampler, loader = setup_test_datasets(cfg) + data_cfg = cfg.data.AudioCaps_test_npz # base_at data config + with open_dict(data_cfg): + if cfg.output_name is not None: + # append to the tag + data_cfg.tag = f'{data_cfg.tag}-{cfg.output_name}' + + # loop + audio_path = None + for curr_iter, data in enumerate(tqdm(loader)): + new_audio_path = runner.inference_pass(data, curr_iter, data_cfg) # generate audio + if audio_path is None: + audio_path = new_audio_path + else: + assert audio_path == new_audio_path, 'Different audio path detected' + + distributed.barrier() # waiting till all processes finish generation + info_if_rank_zero(log, f'Inference completed. Audio path: {audio_path}') + output_metrics = runner.eval(audio_path, curr_iter, data_cfg) + + if local_rank == 0: + # write the output metrics to run_dir + output_metrics_path = os.path.join(run_dir, f'{data_cfg.tag}-output_metrics.json') + with open(output_metrics_path, 'w') as f: + json.dump(output_metrics, f, indent=4) + print(f"Results saved in {output_metrics_path}") diff --git a/meanaudio/utils/__init__.py b/meanaudio/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/meanaudio/utils/dist_utils.py b/meanaudio/utils/dist_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..826bfc3cea4d3252546a298be9a8485822d966cc --- /dev/null +++ b/meanaudio/utils/dist_utils.py @@ -0,0 +1,15 @@ +import os +from logging import Logger +import torch.distributed as dist +from meanaudio.utils.logger import TensorboardLogger + +local_rank = int(os.environ['LOCAL_RANK']) if 'LOCAL_RANK' in os.environ else 0 +world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1 + +def info_if_rank_zero(logger: Logger, msg: str): + if local_rank == 0: + logger.info(msg) + +def string_if_rank_zero(logger: TensorboardLogger, tag: str, msg: str): + if local_rank == 0: + logger.log_string(tag, msg) diff --git a/meanaudio/utils/download_utils.py b/meanaudio/utils/download_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..4b0e40f423a5a7e886628d9b3b20c71a7b85436c --- /dev/null +++ b/meanaudio/utils/download_utils.py @@ -0,0 +1,58 @@ +import hashlib +import logging +from pathlib import Path + +import requests +from tqdm import tqdm + +log = logging.getLogger() + +links = [ + { + 'name': 'mmaudio_small_16k.pth', + 'url': 'https://huggingface.co/hkchengrex/MMAudio/resolve/main/weights/mmaudio_small_16k.pth', + 'md5': 'af93cde404179f58e3919ac085b8033b', + }, + { + 'name': 'v1-16.pth', + 'url': 'https://github.com/hkchengrex/MMAudio/releases/download/v0.1/v1-16.pth', + 'md5': '69f56803f59a549a1a507c93859fd4d7' + }, + { + 'name': 'best_netG.pt', + 'url': 'https://github.com/hkchengrex/MMAudio/releases/download/v0.1/best_netG.pt', + 'md5': 'eeaf372a38a9c31c362120aba2dde292' + }, + { + 'name': 'v1-44.pth', + 'url': 'https://github.com/hkchengrex/MMAudio/releases/download/v0.1/v1-44.pth', + 'md5': 'fab020275fa44c6589820ce025191600' + }, +] + + +def download_model_if_needed(model_path: Path): + base_name = model_path.name + + for link in links: + if link['name'] == base_name: + target_link = link + break + else: + raise ValueError(f'No link found for {base_name}') + + model_path.parent.mkdir(parents=True, exist_ok=True) + if not model_path.exists() or hashlib.md5(open(model_path, + 'rb').read()).hexdigest() != target_link['md5']: + log.info(f'Downloading {base_name} to {model_path}...') + r = requests.get(target_link['url'], stream=True) + total_size = int(r.headers.get('content-length', 0)) + block_size = 1024 + t = tqdm(total=total_size, unit='iB', unit_scale=True) + with open(model_path, 'wb') as f: + for data in r.iter_content(block_size): + t.update(len(data)) + f.write(data) + t.close() + if total_size != 0 and t.n != total_size: + raise RuntimeError('Error while downloading %s' % base_name) diff --git a/meanaudio/utils/email_utils.py b/meanaudio/utils/email_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..3eecab43534f0dfd0d3cf99f245ded60ae578593 --- /dev/null +++ b/meanaudio/utils/email_utils.py @@ -0,0 +1,50 @@ +import logging +import os +from datetime import datetime + +import requests +from dotenv import load_dotenv +from pytz import timezone + +from meanaudio.utils.timezone import my_timezone + +_source = 'USE YOURS' +_target = 'USE YOURS' + +log = logging.getLogger() + +_fmt = "%Y-%m-%d %H:%M:%S %Z%z" + + +class EmailSender: + + def __init__(self, exp_id: str, enable: bool): + self.exp_id = exp_id + self.enable = enable + if enable: + load_dotenv() + self.MAILGUN_API_KEY = os.getenv('MAILGUN_API_KEY') + if self.MAILGUN_API_KEY is None: + log.warning('MAILGUN_API_KEY is not set') + self.enable = False + + def send(self, subject, content): + if self.enable: + subject = str(subject) + content = str(content) + try: + return requests.post(f'https://api.mailgun.net/v3/{_source}/messages', + auth=('api', self.MAILGUN_API_KEY), + data={ + 'from': + f'🤖 ', + 'to': [f'{_target}'], + 'subject': + f'[{self.exp_id}] {subject}', + 'text': + ('\n\n' + content + '\n\n\n' + + datetime.now(timezone(my_timezone)).strftime(_fmt)), + }, + timeout=20) + except Exception as e: + log.error(f'Failed to send email: {e}') diff --git a/meanaudio/utils/log_integrator.py b/meanaudio/utils/log_integrator.py new file mode 100644 index 0000000000000000000000000000000000000000..b4835910051d4c7f8e5c2348595042969c8e379c --- /dev/null +++ b/meanaudio/utils/log_integrator.py @@ -0,0 +1,112 @@ +""" +Integrate numerical values for some iterations +Typically used for loss computation / logging to tensorboard +Call finalize and create a new Integrator when you want to display/log +""" +from typing import Callable, Union + +import torch + +from meanaudio.utils.logger import TensorboardLogger +from meanaudio.utils.tensor_utils import distribute_into_histogram + + +class Integrator: + + def __init__(self, logger: TensorboardLogger, distributed: bool = True): + self.values = {} + self.counts = {} + self.hooks = [] # List is used here to maintain insertion order + + # for binned tensors + self.binned_tensors = {} + self.binned_tensor_indices = {} + + self.logger = logger + + self.distributed = distributed + self.local_rank = torch.distributed.get_rank() + self.world_size = torch.distributed.get_world_size() + + def add_scalar(self, key: str, x: Union[torch.Tensor, int, float]): + if isinstance(x, torch.Tensor): + x = x.detach() + if x.dtype in [torch.long, torch.int, torch.bool]: + x = x.float() + + if key not in self.values: + self.counts[key] = 1 + self.values[key] = x + else: + self.counts[key] += 1 + self.values[key] += x + + def add_dict(self, tensor_dict: dict[str, torch.Tensor]): + for k, v in tensor_dict.items(): + self.add_scalar(k, v) + + def add_binned_tensor(self, key: str, x: torch.Tensor, indices: torch.Tensor): + if key not in self.binned_tensors: + self.binned_tensors[key] = [x.detach().flatten()] + self.binned_tensor_indices[key] = [indices.detach().flatten()] + else: + self.binned_tensors[key].append(x.detach().flatten()) + self.binned_tensor_indices[key].append(indices.detach().flatten()) + + def add_hook(self, hook: Callable[[torch.Tensor], tuple[str, torch.Tensor]]): + """ + Adds a custom hook, i.e. compute new metrics using values in the dict + The hook takes the dict as argument, and returns a (k, v) tuple + e.g. for computing IoU + """ + self.hooks.append(hook) + + def reset_except_hooks(self): + self.values = {} + self.counts = {} + + # Average and output the metrics + def finalize(self, prefix: str, it: int, ignore_timer: bool = False) -> None: + + for hook in self.hooks: + k, v = hook(self.values) + self.add_scalar(k, v) + + # for the metrics + outputs = {} + for k, v in self.values.items(): + avg = v / self.counts[k] + if self.distributed: + # Inplace operation + if isinstance(avg, torch.Tensor): + avg = avg.cuda() + else: + avg = torch.tensor(avg).cuda() + torch.distributed.reduce(avg, dst=0) + + if self.local_rank == 0: + avg = (avg / self.world_size).cpu().item() + outputs[k] = avg + else: + # Simple does it + outputs[k] = avg + + if (not self.distributed) or (self.local_rank == 0): + self.logger.log_metrics(prefix, outputs, it, ignore_timer=ignore_timer) + + # for the binned tensors + for k, v in self.binned_tensors.items(): + x = torch.cat(v, dim=0) + indices = torch.cat(self.binned_tensor_indices[k], dim=0) + hist, count = distribute_into_histogram(x, indices) + + if self.distributed: + torch.distributed.reduce(hist, dst=0) + torch.distributed.reduce(count, dst=0) + if self.local_rank == 0: + hist = hist / count + else: + hist = hist / count + + if (not self.distributed) or (self.local_rank == 0): + self.logger.log_histogram(f'{prefix}/{k}', hist, it) diff --git a/meanaudio/utils/logger.py b/meanaudio/utils/logger.py new file mode 100644 index 0000000000000000000000000000000000000000..e99ac7356b3d927caaa86fe2502864b291c443c5 --- /dev/null +++ b/meanaudio/utils/logger.py @@ -0,0 +1,231 @@ +""" +Dumps things to tensorboard and console +""" + +import datetime +import logging +import math +import os +from collections import defaultdict +from pathlib import Path +from typing import Optional, Union + +import matplotlib.pyplot as plt +import numpy as np +import torch +import torchaudio +from PIL import Image +from pytz import timezone +from torch.utils.tensorboard import SummaryWriter + +from meanaudio.utils.email_utils import EmailSender +from meanaudio.utils.time_estimator import PartialTimeEstimator, TimeEstimator +from meanaudio.utils.timezone import my_timezone + + +def tensor_to_numpy(image: torch.Tensor): + image_np = (image.numpy() * 255).astype('uint8') + return image_np + + +def detach_to_cpu(x: torch.Tensor): + return x.detach().cpu() + + +def fix_width_trunc(x: float): + return ('{:.9s}'.format('{:0.9f}'.format(x))) + + +def plot_spectrogram(spectrogram: np.ndarray, title=None, ylabel="freq_bin", ax=None): + if ax is None: + _, ax = plt.subplots(1, 1) + if title is not None: + ax.set_title(title) + ax.set_ylabel(ylabel) + ax.imshow(spectrogram, origin="lower", aspect="auto", interpolation="nearest") + + +class TensorboardLogger: + + def __init__(self, + exp_id: str, + run_dir: Union[Path, str], + py_logger: logging.Logger, + *, + is_rank0: bool = False, + enable_email: bool = False): + self.exp_id = exp_id + self.run_dir = Path(run_dir) + self.py_log = py_logger + self.email_sender = EmailSender(exp_id, enable=(is_rank0 and enable_email)) + if is_rank0: + self.tb_log = SummaryWriter(run_dir) # tensorboard logger + else: + self.tb_log = None + + # Get current git info for logging + try: + import git + repo = git.Repo(".") + git_info = str(repo.active_branch) + ' ' + str(repo.head.commit.hexsha) + except (ImportError, RuntimeError, TypeError): + print('Failed to fetch git info. Defaulting to None') + git_info = 'None' + + self.log_string('git', git_info) + + # log the SLURM job id if available + job_id = os.environ.get('SLURM_JOB_ID', None) + if job_id is not None: + self.log_string('slurm_job_id', job_id) + self.email_sender.send(f'Job {job_id} started', f'Job started {run_dir}') + + # used when logging metrics + self.batch_timer: TimeEstimator = None + self.data_timer: PartialTimeEstimator = None + + self.nan_count = defaultdict(int) + + def log_scalar(self, tag: str, x: float, it: int): + if self.tb_log is None: + return + if math.isnan(x) and 'grad_norm' not in tag: + self.nan_count[tag] += 1 + if self.nan_count[tag] == 10: + self.email_sender.send( + f'Nan detected in {tag} @ {self.run_dir}', + f'Nan detected in {tag} at iteration {it}; run_dir: {self.run_dir}') + else: + self.nan_count[tag] = 0 + self.tb_log.add_scalar(tag, x, it) + + def log_metrics(self, + prefix: str, + metrics: dict[str, float], + it: int, + ignore_timer: bool = False): + msg = f'{self.exp_id}-{prefix} - it {it:6d}: ' + metrics_msg = '' + for k, v in sorted(metrics.items()): + self.log_scalar(f'{prefix}/{k}', v, it) + metrics_msg += f'{k: >10}:{v:.7f},\t' + + if self.batch_timer is not None and not ignore_timer: + self.batch_timer.update() + avg_time = self.batch_timer.get_and_reset_avg_time() + data_time = self.data_timer.get_and_reset_avg_time() + + # add time to tensorboard + self.log_scalar(f'{prefix}/avg_time', avg_time, it) + self.log_scalar(f'{prefix}/data_time', data_time, it) + + est = self.batch_timer.get_est_remaining(it) + est = datetime.timedelta(seconds=est) + if est.days > 0: + remaining_str = f'{est.days}d {est.seconds // 3600}h' + else: + remaining_str = f'{est.seconds // 3600}h {(est.seconds%3600) // 60}m' + eta = datetime.datetime.now(timezone(my_timezone)) + est + eta_str = eta.strftime('%Y-%m-%d %H:%M:%S %Z%z') + time_msg = f'avg_time:{avg_time:.3f},data:{data_time:.3f},remaining:{remaining_str},eta:{eta_str},\t' + msg = f'{msg} {time_msg}' + + msg = f'{msg} {metrics_msg}' + self.py_log.info(msg) + + def log_histogram(self, tag: str, hist: torch.Tensor, it: int): + if self.tb_log is None: + return + # hist should be a 1D tensor + hist = hist.cpu().numpy() + fig, ax = plt.subplots() + x_range = np.linspace(0, 1, len(hist)) + ax.bar(x_range, hist, width=1 / (len(hist) - 1)) + ax.set_xticks(x_range) + ax.set_xticklabels(x_range) + plt.tight_layout() + self.tb_log.add_figure(tag, fig, it) + plt.close() + + def log_image(self, prefix: str, tag: str, image: np.ndarray, it: int): + image_dir = self.run_dir / f'{prefix}_images' + image_dir.mkdir(exist_ok=True, parents=True) + + image = Image.fromarray(image) + image.save(image_dir / f'{it:09d}_{tag}.png') + + def log_audio(self, + prefix: str, + tag: str, + waveform: torch.Tensor, + it: Optional[int] = None, + *, + subdir: Optional[Path] = None, + sample_rate: int = 16000) -> Path: + if subdir is None: + audio_dir = self.run_dir / prefix + else: + audio_dir = self.run_dir / subdir / prefix + audio_dir.mkdir(exist_ok=True, parents=True) + + if it is None: + name = f'{tag}.flac' + else: + name = f'{it:09d}_{tag}.flac' + + torchaudio.save(audio_dir / name, + waveform.cpu().float(), + sample_rate=sample_rate, + channels_first=True) + return Path(audio_dir) + + def log_spectrogram( + self, + prefix: str, + tag: str, + spec: torch.Tensor, + it: Optional[int], + *, + subdir: Optional[Path] = None, + ): + if subdir is None: + spec_dir = self.run_dir / prefix + else: + spec_dir = self.run_dir / subdir / prefix + spec_dir.mkdir(exist_ok=True, parents=True) + + if it is None: + name = f'{tag}.png' + else: + name = f'{it:09d}_{tag}.png' + + plot_spectrogram(spec.cpu().float()) + plt.tight_layout() + plt.savefig(spec_dir / name) + plt.close() + + def log_string(self, tag: str, x: str): + self.py_log.info(f'{tag} - {x}') + if self.tb_log is None: + return + self.tb_log.add_text(tag, x) + + def debug(self, x): + self.py_log.debug(x) + + def info(self, x): + self.py_log.info(x) + + def warning(self, x): + self.py_log.warning(x) + + def error(self, x): + self.py_log.error(x) + + def critical(self, x): + self.py_log.critical(x) + + self.email_sender.send(f'Error occurred in {self.run_dir}', x) + + def complete(self): + self.email_sender.send(f'Job completed in {self.run_dir}', 'Job completed') diff --git a/meanaudio/utils/synthesize_ema.py b/meanaudio/utils/synthesize_ema.py new file mode 100644 index 0000000000000000000000000000000000000000..827634b408efdaf913a07317a0cb39adc4dd3280 --- /dev/null +++ b/meanaudio/utils/synthesize_ema.py @@ -0,0 +1,28 @@ +from typing import Optional + +from nitrous_ema import PostHocEMA +from omegaconf import DictConfig + +from meanaudio.model.networks import get_mean_audio + + +def synthesize_ema(cfg: DictConfig, sigma: float, step: Optional[int]): + if not cfg.use_repa: + # !NOTE here we need to re-define model so be careful of passed arguments (need to be coherent with before) + vae = get_mean_audio(cfg.model, text_c_dim=cfg.data_dim.text_c_dim) + else: + vae = get_mean_audio(cfg.model, text_c_dim=cfg.data_dim.text_c_dim, + repa_layer=cfg.repa_layer, # repa config + z_dim=cfg.z_dim, + z_len=cfg.z_len, + ufo_objective=cfg.ufo_objective, + proj_version=cfg.repa_version) + emas = PostHocEMA(vae, + sigma_rels=cfg.ema.sigma_rels, + update_every=cfg.ema.update_every, + checkpoint_every_num_steps=cfg.ema.checkpoint_every, + checkpoint_folder=cfg.ema.checkpoint_folder) + + synthesized_ema = emas.synthesize_ema_model(sigma_rel=sigma, step=step, device='cpu') + state_dict = synthesized_ema.ema_model.state_dict() + return state_dict diff --git a/meanaudio/utils/tensor_utils.py b/meanaudio/utils/tensor_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..b650955b04ce097d0a03bbafb6424f9528c631c2 --- /dev/null +++ b/meanaudio/utils/tensor_utils.py @@ -0,0 +1,14 @@ +import torch + + +def distribute_into_histogram(loss: torch.Tensor, + t: torch.Tensor, + num_bins: int = 25) -> tuple[torch.Tensor, torch.Tensor]: + loss = loss.detach().flatten() + t = t.detach().flatten() + t = (t * num_bins).long() + hist = torch.zeros(num_bins, device=loss.device) + count = torch.zeros(num_bins, device=loss.device) + hist.scatter_add_(0, t, loss) + count.scatter_add_(0, t, torch.ones_like(loss)) + return hist, count diff --git a/meanaudio/utils/time_estimator.py b/meanaudio/utils/time_estimator.py new file mode 100644 index 0000000000000000000000000000000000000000..62ff3ca189cda8f9524c11196fdc292eedb1d354 --- /dev/null +++ b/meanaudio/utils/time_estimator.py @@ -0,0 +1,72 @@ +import time + + +class TimeEstimator: + + def __init__(self, total_iter: int, step_size: int, ema_alpha: float = 0.7): + self.avg_time_window = [] # window-based average + self.exp_avg_time = None # exponential moving average + self.alpha = ema_alpha # for exponential moving average + + self.last_time = time.time() # would not be accurate for the first iteration but well + self.total_iter = total_iter + self.step_size = step_size + + self._buffering_exp = True + + # call this at a fixed interval + # does not have to be every step + def update(self): + curr_time = time.time() + time_per_iter = curr_time - self.last_time + self.last_time = curr_time + + self.avg_time_window.append(time_per_iter) + + if self._buffering_exp: + if self.exp_avg_time is not None: + # discard the first iteration call to not pollute the ema + self._buffering_exp = False + self.exp_avg_time = time_per_iter + else: + self.exp_avg_time = self.alpha * self.exp_avg_time + (1 - self.alpha) * time_per_iter + + def get_est_remaining(self, it: int): + if self.exp_avg_time is None: + return 0 + + remaining_iter = self.total_iter - it + return remaining_iter * self.exp_avg_time / self.step_size + + def get_and_reset_avg_time(self): + avg = sum(self.avg_time_window) / len(self.avg_time_window) / self.step_size + self.avg_time_window = [] + return avg + + +class PartialTimeEstimator(TimeEstimator): + """ + Used where the start_time and the end_time do not align + """ + + def update(self): + raise RuntimeError('Please use start() and end() for PartialTimeEstimator') + + def start(self): + self.last_time = time.time() + + def end(self): + assert self.last_time is not None, 'Please call start() before calling end()' + curr_time = time.time() + time_per_iter = curr_time - self.last_time + self.last_time = None + + self.avg_time_window.append(time_per_iter) + + if self._buffering_exp: + if self.exp_avg_time is not None: + # discard the first iteration call to not pollute the ema + self._buffering_exp = False + self.exp_avg_time = time_per_iter + else: + self.exp_avg_time = self.alpha * self.exp_avg_time + (1 - self.alpha) * time_per_iter diff --git a/meanaudio/utils/timezone.py b/meanaudio/utils/timezone.py new file mode 100644 index 0000000000000000000000000000000000000000..4c7f0e6e753816a421f8e5d829ac131c95192a03 --- /dev/null +++ b/meanaudio/utils/timezone.py @@ -0,0 +1 @@ +my_timezone = 'US/Central' diff --git a/meanaudio/utils/video_joiner.py b/meanaudio/utils/video_joiner.py new file mode 100644 index 0000000000000000000000000000000000000000..1a05ae84a079e03f9af96bb2dc0bf38f004732ca --- /dev/null +++ b/meanaudio/utils/video_joiner.py @@ -0,0 +1,66 @@ +from pathlib import Path +from typing import Union + +import torch +from torio.io import StreamingMediaDecoder, StreamingMediaEncoder + + +class VideoJoiner: + + def __init__(self, src_root: Union[str, Path], output_root: Union[str, Path], sample_rate: int, + duration_seconds: float): + self.src_root = Path(src_root) + self.output_root = Path(output_root) + self.sample_rate = sample_rate + self.duration_seconds = duration_seconds + + self.output_root.mkdir(parents=True, exist_ok=True) + + def join(self, video_id: str, output_name: str, audio: torch.Tensor): + video_path = self.src_root / f'{video_id}.mp4' + output_path = self.output_root / f'{output_name}.mp4' + merge_audio_into_video(video_path, output_path, audio, self.sample_rate, + self.duration_seconds) + + +def merge_audio_into_video(video_path: Union[str, Path], output_path: Union[str, Path], + audio: torch.Tensor, sample_rate: int, duration_seconds: float): + # audio: (num_samples, num_channels=1/2) + + frame_rate = 24 + # read the video + reader = StreamingMediaDecoder(video_path) + reader.add_basic_video_stream( + frames_per_chunk=int(frame_rate * duration_seconds), + # buffer_chunk_size=1, # does not work with this -- extracted audio would be too short + format="rgb24", + frame_rate=frame_rate, + ) + + reader.fill_buffer() + video_chunk = reader.pop_chunks()[0] + t, _, h, w = video_chunk.shape + + writer = StreamingMediaEncoder(output_path) + writer.add_audio_stream( + sample_rate=sample_rate, + num_channels=audio.shape[-1], + encoder="libmp3lame", + ) + writer.add_video_stream(frame_rate=frame_rate, + width=w, + height=h, + format="rgb24", + encoder="libx264", + encoder_format="yuv420p") + + with writer.open(): + writer.write_audio_chunk(0, audio.float()) + writer.write_video_chunk(1, video_chunk) + + +if __name__ == '__main__': + # Usage example + import sys + audio = torch.randn(16000 * 4, 1) + merge_audio_into_video(sys.argv[1], sys.argv[2], audio, 16000, 4) diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..e2c6f3bcfb2f2ef19c33e5a72427b71d7353b6c4 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,58 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[tool.hatch.metadata] +allow-direct-references = true + +[tool.yapf] +based_on_style = "pep8" +indent_width = 4 +column_limit = 100 + +[tool.isort] +line_length = 100 + +[project] +name = "meanaudio" +version = "1.0.0" +authors = [{ name = "Xiquan Li", email = "mtxiaoxi55@sjtu.edu.cn" }] +description = "MeanAudio is a fast and faithful text-to-audio generator" +readme = "README.md" +requires-python = ">=3.9" +classifiers = [ + "Programming Language :: Python :: 3", + "Operating System :: OS Independent", +] +dependencies = [ + 'torch >= 2.5.1', + 'huggingface_hub >= 0.26', + 'cython', + 'gitpython >= 3.1', + 'tensorboard >= 2.11', + 'numpy >= 1.21, <2.1', + 'Pillow >= 9.5', + 'opencv-python >= 4.8', + 'scipy >= 1.7', + 'tqdm >= 4.66.1', + 'gradio >= 3.34', + 'einops >= 0.6', + 'hydra-core >= 1.3.2', + 'requests', + 'torchdiffeq >= 0.2.5', + 'librosa >= 0.8.1', + 'nitrous-ema', + 'hydra_colorlog', + 'tensordict >= 0.6.1', + 'colorlog', + 'open_clip_torch >= 2.29.0', + 'av >= 14.0.1', + 'timm >= 1.0.12', + 'python-dotenv', + 'transformers', + 'debugpy', + 'laion-clap' +] + +[tool.hatch.build.targets.wheel] +packages = ["meanaudio"] diff --git a/scripts/extract_audio_latents.sh b/scripts/extract_audio_latents.sh new file mode 100644 index 0000000000000000000000000000000000000000..43b24c1f1ba421d47da309ebace3de763b83b1f4 --- /dev/null +++ b/scripts/extract_audio_latents.sh @@ -0,0 +1,23 @@ +## split audio clips +PATH_TO_AUDIO_DIR= # dir to audio clips e.g.: /home/to/audiocaps_wav +OUTPUT_PARTITION_FILE= # ouput csv path, e.g.: /home/to/output/audiocaps-test-partition.tsv + +python training/partition_clips.py \ + --data_dir $PATH_TO_AUDIO_DIR \ + --output_dir $OUTPUT_PARTITION_FILE + + +## extract audio latents +export CUDA_VISIBLE_DEVICES=0 + +CAPTIONS_TSV=./sets/audiocaps-test.tsv # captions tsv path, e.g.: /home/to/audiocaps-test.tsv +OUTPUT_LATENT_DIR= # output latent dir, e.g.: /home/to/output/audiocaps-test-latent +OUTPUT_NPZ_DIR= # output npz dir, e.g.: /home/to/output/audiocaps-test-npz + +torchrun --standalone --nproc_per_node=1 training/extract_audio_latents.py \ + --captions_tsv $CAPTIONS_TSV \ + --data_dir $PATH_TO_AUDIO_DIR \ + --clips_tsv $OUTPUT_PARTITION_FILE \ + --latent_dir $OUTPUT_LATENT_DIR \ + --output_dir $OUTPUT_NPZ_DIR \ + --text_encoder='t5_clap' # ['clip', 't5', 't5_clap'] \ No newline at end of file diff --git a/scripts/flowmatching/eval_flowmatching.sh b/scripts/flowmatching/eval_flowmatching.sh new file mode 100644 index 0000000000000000000000000000000000000000..4c1a649a33c6fc2b6435fa3826887b656b79e0d9 --- /dev/null +++ b/scripts/flowmatching/eval_flowmatching.sh @@ -0,0 +1,39 @@ +# evaluation on audiocaps + +export CUDA_VISIBLE_DEVICES=2 + +num_steps=25 +ckpt_path=./weights/fluxaudio_fm.pth +output_path=./exps/fluxaudio/test_${num_steps}nfe_fp32 + +python eval.py \ + --variant "fluxaudio_fm" \ + --model_path "$ckpt_path" \ + --output $output_path/audio \ + --cfg_strength 4.5 \ + --num_steps $num_steps \ + --encoder_name t5_clap \ + --duration 10 \ + --use_rope \ + --text_c_dim 512 \ + --num_steps $num_steps \ + --full_precision + + +cd ./av-benchmark +gt_audio='gt_audio' # not used if you specify gt_cache +gt_cache='./data/audiocaps/test-features' + +pred_audio=$output_path/audio +output_metrics_dir=$output_path + +python evaluate.py \ + --gt_audio $gt_audio \ + --gt_cache $gt_cache \ + --pred_audio $pred_audio \ + --pred_cache $output_metrics_dir/cache \ + --audio_length=10 \ + --recompute_pred_cache \ + --skip_video_related \ + --output_metrics_dir=$output_metrics_dir \ + # --debug \ No newline at end of file diff --git a/scripts/flowmatching/infer_flowmatching.sh b/scripts/flowmatching/infer_flowmatching.sh new file mode 100644 index 0000000000000000000000000000000000000000..6203e862508abade15f4e9f567e67b69fb830c99 --- /dev/null +++ b/scripts/flowmatching/infer_flowmatching.sh @@ -0,0 +1,19 @@ +export CUDA_VISIBLE_DEVICES=1 + +output_path=./exps/fluxaudio/output_25nfe + +prompt="A basketball bounces rhythmically on a court, shoes squeak against the floor, and a referee’s whistle cuts through the air" +model=fluxaudio_fm +ckpt_path=weights/fluxaudio_fm.pth + +python infer.py \ + --variant "fluxaudio_fm" \ + --prompt "$prompt" \ + --model_path "$ckpt_path" \ + --output $output_path \ + --num_steps 25 \ + --cfg_strength 4.5 \ + --encoder_name t5_clap \ + --duration 10 \ + --use_rope \ + --text_c_dim 512 diff --git a/scripts/flowmatching/train_flowmatching.sh b/scripts/flowmatching/train_flowmatching.sh new file mode 100644 index 0000000000000000000000000000000000000000..646af38b1dfdfc97032491748f66be83839d7056 --- /dev/null +++ b/scripts/flowmatching/train_flowmatching.sh @@ -0,0 +1,34 @@ +export CUDA_VISIBLE_DEVICES=4,5,6,7 + +NUM_GPUS=$(echo ${CUDA_VISIBLE_DEVICES:-""} | tr ',' '\n' | wc -l) +btz=256 +num_iterations=200_000 +exp_id=AC_${btz}_numgpus${NUM_GPUS}_niter${num_iterations}_T5_CLAP_flowmatching + +text_encoder_name=t5_clap + +text_c_dim=512 # 1024 + 512 +model=fluxaudio_fm # meanaudio_mf, fluxaudio_fm + + +OMP_NUM_THREADS=1 \ +CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES \ +torchrun --standalone --nproc_per_node=$NUM_GPUS \ + train.py \ + --config-name train_config.yaml \ + exp_id=$exp_id \ + compile=False \ + model=$model \ + batch_size=${btz} \ + eval_batch_size=32 \ + num_iterations=$num_iterations \ + text_encoder_name=$text_encoder_name \ + data_dim.text_c_dim=$text_c_dim \ + pin_memory=False \ + num_workers=10 \ + ac_oversample_rate=5 \ + use_meanflow=False \ + cfg_strength=4.5 \ + ++use_rope=True \ + ++use_wandb=True \ + ++debug=False \ No newline at end of file diff --git a/scripts/meanflow/eval_meanflow.sh b/scripts/meanflow/eval_meanflow.sh new file mode 100644 index 0000000000000000000000000000000000000000..509eafdb840b80c80be4eb85ecfb81086cb02539 --- /dev/null +++ b/scripts/meanflow/eval_meanflow.sh @@ -0,0 +1,38 @@ +# evaluation on audiocaps + +export CUDA_VISIBLE_DEVICES=0 + +num_steps=1 +ckpt_path=./weights/meanaudio_mf.pth +output_path=./exps/meanaudio/test_${num_steps}nfe_fp32 + +python eval.py \ + --variant "meanaudio_mf" \ + --model_path "$ckpt_path" \ + --output $output_path/audio \ + --cfg_strength 0.9 \ + --encoder_name t5_clap \ + --duration 10 \ + --use_rope \ + --text_c_dim 512 \ + --num_steps $num_steps \ + --use_meanflow \ + --full_precision + + +gt_audio='gt_audio' # not used if you specify gt_cache +gt_cache='./data/audiocaps/test-features' + +pred_audio=$output_path/audio +output_metrics_dir=$output_path + +python av-benchmark/evaluate.py \ + --gt_audio $gt_audio \ + --gt_cache $gt_cache \ + --pred_audio $pred_audio \ + --pred_cache $output_metrics_dir/cache \ + --audio_length=10 \ + --recompute_pred_cache \ + --skip_video_related \ + --output_metrics_dir=$output_metrics_dir \ + # --debug \ No newline at end of file diff --git a/scripts/meanflow/infer_meanflow.sh b/scripts/meanflow/infer_meanflow.sh new file mode 100644 index 0000000000000000000000000000000000000000..9f768acaee0cf9865ac3a14dc90551825317a4dc --- /dev/null +++ b/scripts/meanflow/infer_meanflow.sh @@ -0,0 +1,21 @@ +export CUDA_VISIBLE_DEVICES=0 + +output_path=./exps/meanaudio/output_1nfe + +prompt="A basketball bounces rhythmically on a court, shoes squeak against the floor, and a referee’s whistle cuts through the air" +model=meanaudio_mf +ckpt_path=weights/meanaudio_mf.pth +num_steps=1 + +python infer.py \ + --variant "meanaudio_mf" \ + --prompt "$prompt" \ + --model_path "$ckpt_path" \ + --output $output_path \ + --cfg_strength 0 \ + --encoder_name t5_clap \ + --duration 10 \ + --use_rope \ + --text_c_dim 512 \ + --num_steps $num_steps \ + --use_meanflow diff --git a/scripts/meanflow/train_meanflow.sh b/scripts/meanflow/train_meanflow.sh new file mode 100644 index 0000000000000000000000000000000000000000..beb39b613089af554d2f43db2b7f54f3332bed2c --- /dev/null +++ b/scripts/meanflow/train_meanflow.sh @@ -0,0 +1,34 @@ +export CUDA_VISIBLE_DEVICES=4,5,6,7 + +NUM_GPUS=$(echo ${CUDA_VISIBLE_DEVICES:-""} | tr ',' '\n' | wc -l) +btz=72 +num_iterations=200_000 +exp_id=AC_${btz}_numgpus${NUM_GPUS}_niter${num_iterations}_T5_CLAP_meanflow_improved_changecfg_seed1415926_flowratio0.75 + +text_encoder_name=t5_clap +weights=./weights/fluxaudio_fm.pth # pre-trained weigths to be loaded for mix-field finetuning + +text_c_dim=512 # 1024 + 512 +model=meanaudio_mf # meanaudio_mf, fluxaudio_fm + + +OMP_NUM_THREADS=1 \ +CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES \ +torchrun --standalone --nproc_per_node=$NUM_GPUS \ + train.py \ + --config-name train_config.yaml \ + exp_id=$exp_id \ + compile=False \ + model=$model \ + batch_size=${btz} \ + eval_batch_size=32 \ + num_iterations=$num_iterations \ + text_encoder_name=$text_encoder_name \ + data_dim.text_c_dim=$text_c_dim \ + pin_memory=False \ + num_workers=10 \ + ac_oversample_rate=5 \ + weights=$weights \ + ++use_rope=True \ + ++use_wandb=True \ + ++debug=False \ No newline at end of file diff --git a/scripts/train_mini.sh b/scripts/train_mini.sh new file mode 100644 index 0000000000000000000000000000000000000000..499a846cedbe99c219249ab2644290a9c30a4b9a --- /dev/null +++ b/scripts/train_mini.sh @@ -0,0 +1,47 @@ +### +# Mini training script, to check if everything runs successfully +### + +export CUDA_VISIBLE_DEVICES=4,5,6,7 + +NUM_GPUS=$(echo ${CUDA_VISIBLE_DEVICES:-""} | tr ',' '\n' | wc -l) +btz=12 + +text_encoder_name=t5_clap +text_c_dim=512 # 1024 + 512 + +num_iterations=200 +model=meanaudio_mf # meanaudio_mf, fluxaudio_fm + +exp_id=debug + +# Loading from pre-trained weights +pretrained_weights=./weights/flux_tta_mf.pth + +OMP_NUM_THREADS=1 \ +CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES \ +torchrun --standalone --nproc_per_node=$NUM_GPUS \ + train.py \ + --config-name train_config.yaml \ + exp_id=$exp_id \ + compile=False \ + model=$model \ + batch_size=${btz} \ + eval_batch_size=32 \ + num_iterations=$num_iterations \ + text_encoder_name=$text_encoder_name \ + data_dim.text_c_dim=$text_c_dim \ + pin_memory=False \ + num_workers=10 \ + ac_oversample_rate=5 \ + val_interval=100 \ + eval_interval=100 \ + save_eval_interval=100 \ + save_weights_interval=100 \ + save_checkpoint_interval=100 \ + mini_train=True \ + ema.checkpoint_every=50 \ + weights=$pretrained_weights \ + ++use_rope=True \ + ++use_wandb=False \ + ++debug=False \ No newline at end of file diff --git a/sets/latent_mean.pt b/sets/latent_mean.pt new file mode 100644 index 0000000000000000000000000000000000000000..1eda0044c8c459333ce720b59a5e69c6956e51ba --- /dev/null +++ b/sets/latent_mean.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9abf0f8ea34a96bdc16b4a203ee509412ab00185957f706b939aed51ba39d2e6 +size 1264 diff --git a/sets/latent_std.pt b/sets/latent_std.pt new file mode 100644 index 0000000000000000000000000000000000000000..d213375316c4fd7ab59564dbe21a8988e174ae8a --- /dev/null +++ b/sets/latent_std.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2540661f1b4e095507c61327ceeca99da1f93802c8425900ce27771d82bd905f +size 1259 diff --git a/sets/performance.pdf b/sets/performance.pdf new file mode 100644 index 0000000000000000000000000000000000000000..c9b0d3cf4eafed9fbb2605257f17982fb17b025d --- /dev/null +++ b/sets/performance.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:929eb5fa61f2794a49a58f7376fb929a5f6b1dadf9221dd2739f3e7f3b416cee +size 113182 diff --git a/sets/performance.png b/sets/performance.png new file mode 100644 index 0000000000000000000000000000000000000000..ef220953c640ace0c6c61123ef6dfffeb653fa0c --- /dev/null +++ b/sets/performance.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ed8b32b44e6b05f77a2693643fb1cc98e42c1c384886cc41813da27b62a832a6 +size 160685 diff --git a/sets/test-audiocaps.tsv b/sets/test-audiocaps.tsv new file mode 100644 index 0000000000000000000000000000000000000000..d84613ca9b246795534e3373c6ce25c78ac4767d --- /dev/null +++ b/sets/test-audiocaps.tsv @@ -0,0 +1,958 @@ +id caption +Y--0w1YA1Hm4 People talking with the dull roar of a vehicle on the road +Y-AheI8Epim4 Plastic crinkling followed by footsteps on concrete as metal clanging and a group of people talk in the background +Y-BUWGM7qeUM Rain falling and wind blowing hard with rustling leaves +Y-CcGuq0yoKo A female speaking +Y-DmjkgWa-rw Large church bells ring +Y-EQByFLFqig Man speaking, rain, thunder +Y-EaZ7EJJUl0 Dishes clanking followed by metal clanking on glass several times as a man is talking +Y-FW109cbv0g Muffled speech followed by laughter +Y-JP1GqPEKtw A man speaks while operating a power tool +Y-NrFeH-kBSM A gun cocking followed by a gunshot and a man talking +Y-NsC63dA01g Woman speaking to meowing cat +Y-R69Fa-mCaY A saw running continuously +Y-SkjbQVgJ0M A man talking as a vehicle drives by as rain patters on a hard surface +Y-Sz4z0QwEuM A person burps loudly +Y-aYumc8KoXg A distant horn ring as a vehicle approaches +Y-mb4Fw4Z0xg Humming of an engine with wind blowing and people speaking +Y-mhFGevxLUg A man is speaking with people talking and whooshing noises in the background +Y-nQHwrRLfc0 A mid-size motor is idling, vibrating and humming +Y-oy0BkpMGAk A vehicle driving and revving several times as tires skid and wind blows into a microphone +Y096oTVzc5Gs A woman talking as a dog growls followed by a fan blowing air +Y0AsXkZkqelg Large engine running with loud exhaust +Y0Dt-pH0pW-Y A motorcycle driving then accelerating as a man talks over an intercom followed by another man cheering +Y0G7rb74R-2A An adult male is speaking and an audience is laughing +Y0NGSrwioYjA A man talking as a sheep baas +Y0On6-JiVwRs A cat meowing as wind blows into a microphone +Y0Rpjl1AO-P0 A vehicle driving then accelerating alongside a series of plastic clicks +Y0_K6OKtoBBU Silence followed by white noise and meowing +Y0_ogYGDGDco A stream of water trickling and splashing while a man talks over a radio alongside wind blowing into a microphone +Y0a9wVat2PWk Train horn sounds as it moves along tracks +Y0fMXnvD38zI Ocean waves crashing and water splashing as gusts of wind blow and seagulls squawking in the distance +Y0jGH7A_hpBM One man speaking, then another man speaking +Y0qbHT34qTZE Loud gunshots and explosions with men speaking, water splashing, wind blowing, and thunder roaring +Y0rSETXszQM0 A motorcycle engine starts and then revs and accelerates +Y0ury8KHQdL4 A man speaking +Y0yETgW44MZU A honking horn from an oncoming train +Y0yxEvdnimGg Barks of distant dogs with men speaking and wind blowing +Y11SEBDuoqSk Humming of an engine with rapid gunshots +Y13CBvjHZhOA Engine noise with other engines passing by +Y14ekd4nkpwc An infant makes babbling noise followed by crying +Y14izd_i3ryE Wind rushes by, a motorcycle sounds, a man speaks +Y1DKLyH3FixM A group of birds call and chirp for a while +Y1FNJbN-eHY4 A crowd of people talking followed by two girls belching with a group of girls laughing +Y1GgEpRZDWN0 Man and woman talking +Y1HCuBnPLMqQ Gurgling and splashing water +Y1IoHRTUp86c A woman speaks briefly over a radio with an aircraft engine running +Y1L_OyngNZMA Echoing male speech, laughter and applause +Y1N_DtRpdAp8 An engine idling followed by the engine revving +Y1Og2TJ3bXW0 An aircraft engine running then slowing down with a plastic clank in the background +Y1OyEgzXCkYE A man speaking into a microphone +Y1PvMtRIlZNI Water gurgling +Y1QNLMF-Kl_s A woman delivering a speech +Y1Uh74_rZ72k Wind blows hard +Y1WTSW96XP6E A man talking followed by a metal clack then a motorcycle engine starting up +Y1_z6NcidGzM Water splashes with people speaking in the distance and a faint whistle +Y1a2XWJ8NA_Q An engine running and wind blowing with people talking +Y1e98HeU9Vrg Close ocean waves +Y1ed87LLY97k Thumping on a wooden surface before and after plastic clanking +Y1j5NMuq1X30 A motor engine humming as liquid sprays and music plays in the background +Y1nUOGZgSzZo Plastic thumping as a man laughs followed by a kid speaking in the distance as water splashes and wind blows into a microphone +Y1slvoNgzBLE A high tone signal followed by train moving down tracks +Y1vCYiVvZ7VE A woman speaking +Y1wW0YJQ-Xa0 A cough, then spray paint bottle shaking and spraying while a muffle of people talking in the background +Y22L_3pBa1AI Humming and vibrations of a vehicle speeding past and into the distance +Y27HIamF8pKo Clickety clanking followed by a horn honking loudly +Y2ABngPM3raQ An adult male speaks, a tapping sound occurs, and frogs croak +Y2ErfX6ZT5pM Some humming followed by a toilet flushing +Y2EsxcKe1A4w A dog barking while wind blows into a microphone followed by a man speaking in the distance proceeded by a bicycle pedaling alongside leaves rustling and a camera muffling +Y2ItTq2JShdU A toy train engine operating +Y2JV3emH50XU A vehicle passes by +Y2JutOgAnqWA A power tool with an electric motor is used for drilling +Y2KEfkDO6hlA Car engine accelerates and shifts gears +Y2KR0C5ysO8o A motor revs hard, a man speaks +Y2RjqBRzmxaM Females voice narrating a scene as music is playing and rain drops are falling +Y2UNuMbxz9ds A vehicle engine revving up as whips smack fiberglass followed by a vehicle accelerating at a fast rate and tires skidding +Y2a6GNu6uCDE A woman talking in an auditorium +Y2bq2lc3DLwM A man is speaking +Y2ceUOv8A3FE A train running on railroad tracks as a train horn blows several times alongside a child yelling in the distance followed by a man speaking then a child speaking in the background +Y2msevPMQB4M A machine is drilling +Y2sZhC_mKeic A cat meows as he enters the room and something is dropped with a big thump +Y2t82STv2GR8 Chiming of loud bells +Y2ymiXjImwGs A siren with people talking faintly in the background +Y30D1tqNFHMc An engine running and a beeping sound +Y31WGUPOYS5g A child shouts, and adult male speaks, and an emergency vehicle siren sounds and the horn blows +Y32565FEuksc A woman gives a speech and a crowd claps +Y350OCezayrk An engine is being started up then idles +Y35b9BSmN5JM An engine running +Y3IScngdQA4I A woman whispers and then another woman speaks +Y3IguMJkqpl4 A child cries and a man and woman speaking with a duck quacking +Y3MoF8myFs8Y Wind blowing and waves crashing +Y3RultJjvTWI People are talking and water is splashing +Y3Sml1wHcuxo A train horn blowing as a train runs on railroad tracks while a train signal beeps in the background +Y3VHpLxtd498 Pigeons are cooing along with a machine rumbling noise and a man speaks +Y3XcIVh40pTI A sleeping person snores loudly +Y3XuyGJqaXv8 A man speaking over an intercom as a crowd of people talk followed by a dog barking +Y3ejndVEAcmQ Cats meows and hisses and some thuds +Y3fomsZXG3aM An engine humming and ticking as it idles then revs +Y3ghVB-KaU_E Cloth scrapping then cloth shuffling as a man talks while compressed air hisses in the background +Y3hzy-FL24no A steady stream of water flowing as a small motor hum followed by plastic clicking +Y3iLGu2Omgrw An elderly man speaking +Y3kBlVLkN0zo A goat bleats and background voices +Y3n05BjV7r7M Rattling and banging with faint humming +Y3ndid3jni7M A train horn blows as a train approaches with warning bells ringing +Y3qTL7QRk-tg Gurgling water with men singing in background metal scraping metal +Y3qrVku794u0 Male speech and then a child speaks with repeated taps and a beep +Y3rna9zo5ZOs A man is speaking along with noise from a cheering crowd +Y3ue0gJM0THk A motor is revving up +Y3wV3ST-c4PE Rhythmic ticking is occurring +Y3wrdPAeqjVI A man speaks with some high pitched ringing and some rustling +Y3xDZ-kdGE3o Metal rattling followed by a door slamming shut then plastic clacking proceeded by a toilet flushing +Y404cD3bVXDc An infant talking while a man speaks through a television speaker +Y41D0yXSBqfI Metal scrapping and gravel pouring while pigeons are cooing +Y466ucPGoNSQ A cat meowing and whining +Y473wBEwC35M A car honks in the background while a man speaks +Y4CAMv5nlr-0 A man speaks as he gives a speech and then the crowd cheers +Y4IeDBwyQ9ZQ A tin can clacking followed by a girl talking then a woman talking +Y4KObP7cREWw An engine revving +Y4SZ7JXDCNps A vehicle engine accelerating and plastic rattling +Y4UPOUGVMlEs A boar snorting as dogs growl and heavily breathe while footsteps shuffle on foliage and crickets chirp in the background +Y4Ujigme2IxY An engine running and screeching with rustling +Y4YMXgLFcR94 A man speaks, and an audience applauds +Y4YodC6RnplI An adult female laugh while snoring is occurring, and an adult male speaks in the background +Y4_Cak7gvly4 A series of whooshes as music plays and a crowd of people talk in the background +Y4_DjmCg8Ra8 Loud bursts and pops with men speaking +Y4abZbau8tZo A man talking as rain falls on a hard surface as vehicles drive by followed by a child talking in the background +Y4bUL_ttiOdw A baby cries continuously +Y4eyY1w2QyM0 Water rushing and rippling +Y4ftDFi4684Y Light rustling followed by faint ticks of a clock +Y4fz0-Kx2oNs Loud, continuous sizzling +Y4pf-PIymDhU A power tool is in use +Y4pv3w--cRrA A machine makes a constant rumbling noise +Y4s2rRnu2PZo Music and gunfire with male speech +Y4sb9jN0SgTM An engine revving and then male speech +Y4xrL4TSgHwU A vehicle engine stalling then starting up and running idle +Y52IxrdTxGs4 A loud burst followed by musical notes and a heartbeat followed by a man speaking +Y54eRRbCtPn8 A woman speaking +Y59VP93Tzjmg A train horn blows several times with leaves rustling in the wind +Y5G6b_QWL3nY An adult female speaks as a pan sizzles +Y5I8lmN8rwDM A machine motor buzzing and humming +Y5K1mISHwggI Men speak with traffic passing in the distance with a honk of a horn +Y5OM3tJh51pE A woman giving a speech +Y5ORpSk5CIWc A small motor is running and fades, and vibration is present +Y5QZ0NtdoKJ8 A two-tone electronic alarm is ongoing, while an adult female is speaking in the foreground and birds are chirping in the background +Y5YzNSjmZ3Wg A man speaking as an insect is buzzing +Y5eSRL3PRHzo Loud, continuous applause +Y5iTRKJqUIw8 Bells ringing followed by a steam engine whistling blowing as a crowd of people talk in the background +Y5rh5-MCjqq8 Snoring and heavy breathing +Y5t6tSW0yT40 Short spraying that stops near the end +Y5xC4hkAWiao An engine running and then revving +Y5ye0X5saadg A man speaks as helicopter blades spin followed by loud explosions +Y63KW_EQ72yU Very loud explosions with pops and bursts of more explosions +Y67BsqRkh-dU A toilet flushing as music is playing and a man is singing in the distance +Y6BJ455B1aAs A whistling and then an explosion and crackling +Y6CDl4CqOgMg Audio feedback followed by an animal breathing then camera muffling and footsteps shuffling on a hard surface +Y6NBPiArs2-w Repetitive sounds of gunfire +Y6Nvu6EcpdE8 A man is talking then an engine turns on +Y6OlHuvJR_Dk Helicopter flying overhead sounds +Y6Pywt0f_NFY Water trickling as a machine motor hums followed by water spraying from a shower +Y6TO9PEGpZcQ Sirens of an ambulance and fire truck traveling and an engine idling +Y6ZFU4PqXmoI Humming and vibrating followed by rustling and splashes with a man speaking briefly +Y6ZwYgzcN6Is A woman yelling and laughing as ocean waves crash and a group of people shout while wind blows into a microphone +Y6aWnK1GyeJY A baby cries while electronic speech occurs in the background, an adult female speaks, a sneeze occurs, and an adult female speaks once more +Y6cS0FsUM-cQ A person speaks and a cat growls +Y6cyKp3EDm-0 Birds are flapping wings and making noises, and a man talks +Y6dLkgq9EKPE An adult male is speaking and a bus hisses +Y6eX6bJOFftA A motorboat drives in the distance as people talk and rap music plays in the background +Y6i5eQOpFk_U Faucet water flowing as plastic clanks and a man speaks followed by another plastic clank and a kid laughing in the background +Y6pssFJ0m-kU Hissing occurs, a bird chirps and sings, and people speak briefly in the far distance +Y6ukYSXzfEgQ Pots bang then a faucet turns on and water flows +Y7-HCqJFwHoI A person types on a keyboard at varying speeds +Y77nElZGi5NU People start cheering and hollering once a gun shot is fired +Y79XDcI6xZm0 A man speaking on a microphone as a crowd of people chant and clap while wind blows into a microphone +Y7JWHbs3gu1w A train running and the horn honking and bells chiming +Y7MLERaOgK_Y A small girl sings with vibrations of a sewing machine which stops +Y7P0N61TVOxE A boat engine running as water splashes then cuts off while a group of people talk in the background alongside music playing proceeded by glass clanking +Y7P6lcyeDKNI A bicycle pedaling on dirt followed by a twig snapping then a man talking and laughing +Y7QN3lwOzfdg A woman speaks then a man speaks +Y7RMpCCkQks0 Repeated ripping occurs, then sharp tapping in the background +Y7WkB6pflr6o A woman speaks continuously +Y7XUt6sQS7nM Several animals are bleating and the wind is blowing, while a group of people talk in the background +Y7XXSOzDQ2z0 An engine throttles and clanks and then suddenly accelerates off into the distance +Y7_smJ8VbfSU A woman speaking +Y7bO0AJI-ihs Horse feet clopping and an engine running +Y7cHRSfbp7tc People talk and rustle then knocking +Y7fmOlUlwoNg A machine makes stitching sounds while people are talking in the background +Y7upINC4seBw A male speaking while engine noise in the background +Y83j4GgHXTLE A man laughing followed by a child screaming then another child laughing before a kid talks proceeded by a man shushing and talking softly +Y86dNVnTwH6U Rustling and then a machine operating +Y8BPTQO_cx7E A man is speaking followed by applause +Y8DLcBdC5UrE Whirring followed by a sudden object clanking +Y8DQfjqPCTI8 Men speaking with insects buzzing +Y8F-ndyrEWJ8 Sirens and speeding cars along with police chatter +Y8GHLfJ6y6zA A slide whistle followed by typing on a computer keyboard and a computer mouse clicking as a man is talking while woodwind music plays in the background +Y8IdCiapDYCU People are laughing and a small dog yips +Y8OTd45_6cvY Footsteps and intermittent spraying +Y8VOibo9Q_Dc Chirping and water splashing lightly +Y8ZH_PoK0clI A series of doors slamming open as footsteps shuffle and plastic clack while a woman speaks +Y8Zo30kV5aiI Vehicles driving by as emergency sirens sound +Y8b9z7N25DmU Bus coming to a stop and door opening +Y8ipe6b1LwHQ Porcelain dishes clank followed by metal rattling +Y8nUqSYC66mI Applause together with speech and hollering with running water in our he background +Y8o-Y4QP8LWs Pots and dishes clanking as a man talks followed by liquid pouring into a container +Y8ycflE3dIHw A train running on railroad tracks followed by a train horn honking +Y9BGLAUSF0sk Machinery running +Y9BukzlPNqC8 'A power tool vibrating with men speaking +Y9E8BmPZ9mWc A truck engine is running with people talking in the background +Y9F3sutgYTvo A man talks loudly followed by a baby crying and female voice laughing and talking +Y9HVgYs8OOLc A male voice speaks and a bird coos and flaps its wings +Y9MgGaTbmc6g Car accelerating and skidding on tarmac +Y9PN4gyxpH2M An adult male and an adult female speak, while rustling and metal scissors cutting paper occurs +Y9U8COLzEegs Water pours, a horn honks, and a man speaks +Y9XqkKuTqEOM Wood brushing followed by a door creaking then wood clanking +Y9ZZHvwaH-CU Gunfire sounds with video game sounds +Y9_YfTz8cnFY A train emits the steam whistle sound with a hissing noise +Y9b6RqajfAmw Pigeons cooing and flapping their wings +Y9dLLsZVRSZI Humming and rustling from a large engine followed by two honks of a horn +Y9hxFqltp3xw A woman vocally demonstrating something, followed by spraying +Y9ucb5HYO8ps An inhale then a burp with girls speaking and laughing +Y9vZDsGjyh5M An engine running +Y9xoYx3lTJ9I Wind gusts as a motorboat speeds by with water splashing, then people nearby speak +Y9z2OwpftxUE Continuous thunder +Y9z8XIRyUq9Q Woman speaking with a southern drawl +Y9zstu_IfAm4 A truck is traveling on the road, and the driver is honking the horn +YA0E_UiD-fR4 Loud beep followed by engine noise +YA2mcp0N__7U Mid frequency of multiple people in the distance talking and yelling, followed by coughing +YA61Mry8zBwE A man speaking over an intercom followed by a crowd of people talking then applauding +YAFgGoY8Ihhg An emergency siren going off as wind blows heavily into a microphone +YAI1OweEW8C0 Rain lightly falling while water trickles and splashes followed by wind blowing into a microphone then thunder roaring in the distance +YAJtNitYMa1I Food sizzling and oil popping +YAKHZMg9ba30 A man speaking as a horse is trotting while a man talks then laughs in the background +YAMQei29haCw A power tool drill running before turning off and on as a man is talking and wood is clanking and scrapping in the background +YAR8-MVl_Mf8 A man yells, slams a door and then speaks +YARFFw0e_jig A series of distorted burps followed by non-distorted burps +YAUJPx81qKtY Insects buzzing as a man talks and birds chirp in the background +YAUmY0YRAFQE Sirens blaring passes by and then diminishes in the distance +YAWGnTI0e2Fs A toy helicopter flying then powering down before powering up +YAagLJkfrFMk A toilet flushing followed by a person breathing then metal clacking and a toilet flushing +YAbplcXwXnvE A young woman talking as plastic crinkles and crumples +YAf4a-9rcnP0 Two popping explosions followed by footsteps running on concrete followed by compressed air spraying +YAgaiowyYt88 A door pounding and rattling +YAgh2EKINlSw A man talking followed by two men talking then a vehicle accelerating as rain falls on a surface +YAizmnCDlXos A high pitched bell ringing continuously +YAj_VMUSNjNM A vehicle engine revving and accelerating +YArHiac57pVk Men are communicating followed by a ticktock noise +YAtkD-3GjXMw As music plays in the foreground, faint machine gunfire occurs in the background +YAxd__X2rixk Clip clops from a horse +YB-gTt3_rceQ A kid talking followed by a group of people talking then a man speaking proceeded by a young girl talking +YB3O476LeuXY A motorbike engine running idle +YB4SZwi9Ce3o A man speaks with a revving engine and clicking that stops +YB8rdur4aams Rattling of an engine with wind blowing and a horn honking +YBA-lFjpzad4 A man laughing as a motorcycle is accelerating followed by a person chuckling while music plays in the background +YBDpU2Qh77NE Birds chirping in the distance followed by people speaking and a camera click +YBGEMgl1xjac Insects chirping and clicking +YBL8ksJ0sTXk Ticking and humming from an idling engine followed by a man speaking +YBLMWD6fxhpo A walking in the grass and a pig oink sound +YBMayJId0X1s A baby crying and a man speaking +YBOB65Nd0pXo A helicopter engine running idle as wind blows heavily into a microphone +YBQ-r9mEHssU Running through grass and leaves, metal clinking, then a woman laughs +YBUAPM4D3-h8 A child speaks followed by a woman speaking and birds tweeting +YBXxlqaDvdaA A man talking as water splashes and wind blows heavily into a microphone +YBZCEDkx37rI A vehicle engine revving then running idle followed by cloth shuffling +YBa92IrXFvJo An idle motorboat engine running +YBlbGXalLNVU Water moving with male voices soft in the background +YBn4lc01q9vE Water splashing with a woman speaking +YBoe3MeEpn_c A woman speaking click of moving dishes followed by spraying of a can +YBrPFQDr99Gg A crowd applauds and whistles, then an adult male speaks +YBvw2gv1fcZw A plastic clack followed by young girl talking then another a young girl burping proceeded by both girls laughing +YBwnGxJD9xh8 A man speaks, and a goat bleats +YBz9Y5nZK3eo Fast and loud typing on computer keyboard +YBzHTqyX69pI Rubbing against wood with birds cooing and woman speaking briefly +YC4JwGJQ2dUA Male yelling loudly and forcefully +YC5kmOK_l4jc A young girl talking as a baby is laughing +YC8kR19NvynA A male speaking over a microphone +YC9NC7wJ7C3w A female speaking +YCBwXKOpJY_o A woman talking followed by a kid talking while birds chirp in the background +YCM49C3RkzV8 A woman is speaking +YCMNlIW6Lkwc Camera muffling followed by a popping explosion as a group of people talk and silverware clacks in the background +YCMUuelJFJ7Q Music plays with low ticktock sounds and a baby cries +YCO6-i8NLbeo A man talking followed by a goat baaing then a metal gate sliding as ducks quack and wind blows into a microphone +YCYUlLTKoa1Y Humming followed by a woman speaking and slow clicks of a sewing machine +YC_ga5m6nOhI A steam engine running on railroad tracks while a steam engine whistle blows several times +YCbe2B6ohBpw Ducks quack with blowing wind and faint splashes +YCchRf2jq6fc A young woman talking +YCeRoaEcqUgM A speedboat is traveling across water followed by strong wind noise +YCefFMA3klxk Humming from a truck with a blowing horn +YCfxWJ1Qoufg A man speaks before a toilet flushes +YCh0LMmhBUg4 A man is speaking with natural outdoor noises, and a child speaks +YCvNAwby6Xos A man talking as a sewing machine rapidly operates and hums +YCwxgQS3SXic A sewing machine operating +YCxaPpRJRkn0 Footsteps shuffling as a person snores +YD1Sy7kRoaR8 A woman talking over an intercom speaker as a crowd of people talk in the background +YD2Xc_jZllDY A man talking with a dog barking in the distance as wind blows into a microphone +YD4s5aHrsBgs A person whistling as music plays in the background +YD96OO7nYYsg A muffled vehicle engine accelerating then revving as vehicles pass by +YD9GHUPGWsV0 Sheep bleat and a woman speaks +YD9tinq3RMpU Outdoor noise from a water vehicle as people are talking +YDAN1t9ukkg0 Typing on a keyboard +YDNtF_mGzQes A vehicle engine running and children speaking +YDc2WEiRk0rA Continuous spray +YDjKGzOe_COc A young girl speaking +YDlWd7Wmdi1E A man speaking as monkeys scream and dogs bark followed by birds cawing in the distance +YDn3buZWMzwY Someone snores and others speak and laugh +YDrCm-HpX67k Cracking with some rustling as distant birds chirp +YDt53UZgyznE A baby cries, and a man talks +YDzKjogSVOLM Ducks quacking and birds tweeting as a crowd of people talk while a rooster crows in the background +YE3D_z0aoUEg Ambient horror music plays as birds chirp and frogs croak +YE3Q1jfTeuWs A baby cries quickly followed by heavy breathing and rattling +YE6FH_xp3I54 A man speaking as birds chirp in the background +YE9zN3-C64KE A woman talking followed by a pig oinking then fabric rustling before camera muffling and footsteps walking on a hard surface +YEBCH7TPgiPc An engine revving +YECw5Yf7QoMo A man talking in the foreground as another man talks in the background followed by a crowd of people applauding +YENTi8Sn4vdM A child and a women speaking with water running in the background +YEQVWhHmT_cE Fabric shuffling followed by a man talking then a series of glass shattering and a crowd of people laughing +YESjMIqrvRj4 Rain and thunder in a storm +YETb9EIQOMAA A woman is speaking with low speech from a crowd +YEUZaxaWqhwg Wind blows and people speak with some rattling and hissing +YEY4p0_NJVQs A woman speaking on a microphone +YEYTz1LPDHsc A vehicle door opening as birds chirp and vehicles pass by in the background +YEbpOXac13yo A man talking followed by another man talking in the background as a vehicle drives and other vehicles pass by +YEcihYbSlyck Vibrations and rattling with distant hums and honks +YEfk5kdn9lR8 A young girl speaking followed by a loud bang and a scream +YEp72tyiL3as Thunder pounds as rain downpours and then fades away +YErxgH5g3Kx0 Clip clops of a horse with buzzing and a neigh +YEvZ3jOMYWxk A woman delivering a speech +YEzWEO2WD_MM A whirring followed by an object hitting a surface +YF-47fRplQEc Wind blowing followed by a distant goat bleating and women speaking +YF7QtqKtllK0 Repeated snoring and then a gasp +YFA11v4SmdBc A man speaking followed by a series of whistling +YFDwK7T1JO_0 Two men talking followed by plastic clacking then a power tool drilling +YFJkvAMLmejY A young man and a girl talking followed by a horn honking then a group of people laughing and a person clapping +YFKaJsvcyHTk A baby crying loudly +YFL8KTgMGrN4 Vacuum cleaner sucks something. +YFR7BDRhMATo A train running and the horn blowing +YFXdoNvmrYxo A young girl speaking followed by a man speaking then a young boy speaking as birds chirp in the background +YFc9pG54DDJM Water flowing down a flushed toilet +YFeHndzYAUkg A power tool drilling as rock music plays +YFf8bCCJfVX4 Rapid and repeated gunfire and then male speech +YFfUqv0Vv3ME Male speech and footsteps +YFhimNYClv40 A fire truck sounds the siren as it travels +YFi4-IqJo2xQ An engine hums and rattles as it accelerates +YFlk-X0gwjF4 Birds are chirping, an adult male speaks in the background, and an adult male speaks in the foreground +YG0IsabU5hn4 Heavy, continuous wind +YG3YO2unWz7k Sputtering and humming of an engine +YGE1aZSnPr2Q Brief silence followed by a man laughing +YGGgQR7aIofY Musical horns from an approaching vehicle with a humming engine +YGIOApFAWDOc Birds chirping and rustling +YGMP8m09j5vk Birds chirping in the foreground and background as a dog barks in the distance +YGOD8Bt5LfDE A train chugging, a child speaking, and then a female speaking and laughter +YGPj8h-WcjWs A muffled bus engine running as a woman speaks over an intercom +YGSHcgY6ATkQ A man is talking as a person types on a keyboard +YGkb4f6yodPE Hard planks click together and an electric saw cuts material +YGuizRlAQ8qQ A power tool drilling +YH-vTZh81qAU Faint low vibrations and humming +YH7-orYrKBeo A child cries continually, while a motor vehicle engine runs quietly and several adult females and adult males speak +YH7rd9bZtbgc A bell sounds its horn +YHUwXtfYRFwk City ambiance with multiple cars driving by and a large vehicle engine revving then accelerating +YHVz-FJBf_iM A man talking during camera muffling followed by water gurgling in a drain then a toilet flushing +YHZ9O6sc7cLA A woman is speaking followed by a dog growling and barking +YHdPSebdDxe4 A man is speaking followed by a beep and burping sound +YHdxfbpnd2-8 A man speaking then whistling +YHeEa1GZpUGI Continuous gunfire and shells hitting the ground +YHg6HxylRGDo A two-tone emergency vehicle siren is blaring, and vehicular traffic is present in the background +YHkbCUN4V3TU A child screaming and laughing as woman talks and giggles followed by thumping on a wooden surface +YHqnSyliKTKA Humming of an engine with people speaking and a horse neighing +YHqndxoujCYI Clock chiming with a laughter in background +YHxZADVzNIqs Water trickles as birds chirp then drums begin to play +YI4HpYGMMsz4 Man speaking giving directions followed by tapping on table +YIFRmbxWK8u0 Mechanical click clacking +YIJ6pm5Kns8A Clicking followed by a burp and laughing +YIKnx3hJv1bs Compressed air spraying followed by a power tool buzzing +YIPfaRF76gVU A siren is wailing and an engine is running followed by a male voice. +YITlqMkR5alY A cat meowing followed by a person screaming as a man talks in the background and wind blows into a microphone +YI_8KqxP5xOA Compressed air releasing and scissors snipping +YI_vN_BFUr0Y A railroad car sounds train horn as it approaches +YIdBDl9Wr51A Male speaking with sword clanging followed by a females giggle +YIhvXtS9-IxM An idle helicopter engine running +YIsUG5SKWNZA A woman whispers, then a baby cries in the distance. The woman called, and a man speaks. +YItS07xtdi4s Popping along with swooshing occur then a bonging +YIvfaKPDWC00 Sirens ring and a vehicle speeds closer +YIvg_q4t-3w0 Female speech followed by a thumping noise +YJ0yeFeKvIt8 Gusts of wind blowing as leaves rustle and birds chirp in the distance while wind blows into a microphone +YJBWJQCS4SvA Wind blows and birds chirp with ocean waves in the background +YJC2ZrXzCX4Y A group of children talking in the background followed by a woman then a man talking as a person is snoring +YJHhEjsAkZoc A train honks for a long time as it drives by +YJQz40TkjymY Typing on a computer keyboard +YJTHMXLC9YRs A person heavily breathing as wood creaks and ducks quack +YJZloTOdIY_c Horses neighing and stomping on the ground +YJdFmMw0zyKA A child speaking followed by a seal howling followed by water splashing then two women laughing in the background +YJfaj4P3us9M Dialing and clicking +YJhGp7HmRQxg Humming of engines followed by the neigh of a horse and birds chirping in the background +YJmWaRt8-u0s An engine chugging with muffled speech +YJnSwRonB9wI People laugh and scream as water splashes and wind blows +YJon_DEFqsfM Birds are chirping, ducks are quacking and other bird whistling noises +YJp64Whpr3BA Rustling and then a sewing machine sewing rapidly +YJsoBpL86R5U A man baaing followed by a goat baaing as a child and a woman talk while metal pots clank in the background +YK-7Y8yhcUiw Repetitive snoring +YK03ydb1uaoQ Continuous long snoring +YK2kIOBeCfuo A crowd of people laughing and cheering as a man speaks on a microphone +YK8-b0VtNOqA A horse vocalizing and muffled speech +YKJKHDKKW3XU A liquid gurgles as it drains +YKJhGuhNHToA A few seconds of silence then a rasping sound against wood +YKOBkbROPv4c A distant engine hums and gets louder as it approaches with squealing tires +YKSHpYhuTotY Insects buzz and men speak +YKVAIaRPry24 Buzzing and scratching with some light banging +YKVbmN9ZRg5Q A train horn gets louder as train approaches and passes by +YK_Vre_-4KqU An airplane engine revving then running +YKel-hfZ_9h8 A child laughs, a man speaks, and people laugh +YKnXNy5Q6YS4 Men speak as insects buzz +YKnsKf9KoNds Male speech with another male speaking on the phone +YKtTLsveexOg Slow humming and rustling along with quick sewing machine sounds +YKtinboYbmHQ Tires squealing and an engine revving +YKvrcRMfFzOE A helicopter flying as wind blows heavily into a microphone +YL2dyilgQ8iM Wind blows followed by rustling and scraping +YL6rnV0oNIII A beep and clicking noise repeat. +YL8dA-2Lu2hY Someone is whistling +YLB6CZ0x-kns A vehicle engine whirring followed by some knocking and a release of brakes +YLBe33dw9ezg High frequency humming +YLCwSUVuTyvg Glass doors rattling and sliding shut followed by a plastic clack +YLF6x7B0Ppvo A race car races, and the engine accelerates +YLKhokVsJhN0 A flock of sheep baaing +YLP_DzNUkAKY Indiscriminate speech and then hissing +YLVvS3s9dFKw A man talking as metal pots clang while water pours from a faucet followed by water trickling +YLWng-4PDzPM A rustling followed by a soulful whistling +YL_CNz9Vrtkw A man talking followed by a crowd of people cheering and applauding +YLbken4JCr94 Rain and thunder +YLs1zyPjs3k8 A mechanical beeping tone followed by static, then the return of the beeping +YLs2vrr9TamU Heavy machinery operating followed by wood crunching and cracking as wind blows into a microphone +YLvMA1Wcgu3w A man speaking as frogs croak and crickets chirp while a motorboat engine runs alongside several plastic clacks and clanging +YLvhvAA11oxE Loud bustle and screech of traffic, with some voices in the background +YLxu-3_h4kc4 A series of belching followed by a group of kids laughing +YMBP4RcnwGZw A woman speaks with others speaking in the distance and wind blowing +YMOxddxW5PXs A man is speaking as food is frying +YMPLZUg89y5U A large truck engine running idle as a man is talking while wind blows heavily into a microphone +YMSziND26UTA Insects buzz faintly with distant chirps of birds and blowing wind +YMTIF_l_8d4Q An infant crying followed by a child speaking in the background then a young woman talking +YMTaLknnq4wc Whistling occurs, followed by an adult female singing +YMVGhC-xB79s A baby talking and a male speaking +YMdlEswBfZMQ A group of children and a woman talking followed by a young girl talking +YMe4npKmtchA Water spraying and gurgling as a man speaks and a crowd of people talk in the background +YMjSegUnQXr4 Rattling is followed by pigeon wing flapping and vocalization +YMj_BO-iK1G4 A sewing machine operating followed by metal clacking and gears cranking then scissors snipping +YMkbP_8zJwXU Water trickling then splashing as a bird is chirping and wind is blowing into a microphone +YMtK8L8gXRrI Footsteps shuffling followed by a cat meowing then a toilet flushing +YMvHpNzDpC6Q A motor engine is working far away and some boys talks then suddenly a loud eructation followed by laughs +YNDaVSIJaXVs A jet engine idles as the wind blows +YNJEPbGVBJIQ Male speech and an engine revving and a honk in the distance +YNX0gR9Eebp0 Water splashing items floating in the water something doping in to the water women voice +YN_s9F4CI_98 Birds are cooing, and wings flap +YNeWW30WZjPc A dog growling and barking repeatedly +YNeZerAPXR-A Laughter and speech and a clap +YNi3dIj90Oa4 Loud gunshots followed by two men speaking +YNlKlRKz8OKI A woman speaking with a chuckle followed by a bird chirping +YNmmbNqmsPaY Humming and rattling of an idling engine with some bells ringing in the background +YNtQiduPRiRg A man laughing followed by a girl laughing as a power tool device operates +YNwoBDrTlbTI An animal makes squeaking noises with buzzing background sounds, and a dog barks +YO90Qy2xG6oA A bird cawing as an infant is crying +YOFVzrakJhbw The wind is blowing, and an adult male laughs and animals bleat +YOMGHnJV0l2U Metal scrapping on wood followed by wood sanding then more metal scrapping against wood +YOTLtzk0W4zg An engine running and then a male speaking while the engine continues running +YOUUckswAaNI Hammering and then a man speaks followed by rubbing and then a second man speaks +YOVQMFBeCHq0 An emergency vehicle two-tone siren is blaring and fades, and hissing is present +YObWjGBJF_94 A machine is in use and making whoosh and vibrating sounds +YOmmPaIAXN0s A man speaks with some light cracking +YOpiWMltpj44 Ducks are quaking along with geese honking +YOr7umk40TZA Vibrations from a humming engine +YOt0bN_hz2ec Train horn blows followed by clickety-clack of rails +YOxUVcZmeiyI A clock ticking followed by plastic clacking then cuckoo bird cooing before bells chiming +YP12nvSpKXcs Insects buzzing as a bird is chirping in the distance followed by camera muffling and a person talking then sniffling as footsteps move over foliage +YP4qd8uodw_M Low speech from a man followed by machine making a beeping noise +YPLHXGDnig4M A man speaking then hissing +YPMMdAKZxI_I Burping and laughing from girls with distant voices over a television +YPMMkPq5jJXY A person belching several times as a group of people laugh +YPO8Nu3F8mkA A distant explosion followed by steam hissing and fire igniting with a person talking in the background +YPRUfwpmYwJ8 Male speech and spraying +YPTyFYxXdut4 Water is trickling while a woman is speaking +YPVvi2SDOjVc A vehicle horn honking +YPWjEfOkb6ro Water trickling and dripping as a crowd of people talk in the background +YPYP-r0nvbFk A man talking a sheep speaks while birds chirp in the background and wind blows into a microphone +YPZBUdlKwX04 Water is falling, splashing and gurgling, a crowd of people talk in the background, and an adult male speaks in the foreground +YPb6MqpdX5Jw A light rain with gentle thunder +YPg2cWEnEEvc A series of burping and farting +YPkmpxrsidZM A crowd applauds followed by a woman and a man speaking +YPtW0cZVprJQ Snoring is ongoing, and an adult male speaks +YPuLuZ_TXv-0 Typing on a typewriter +YPvWI4p74UOs A man laughing followed by another man yelling in the distance then wind blowing in a microphone as a motor hums and birds chirp in the background +YQ0anPAIkfBE A woman talking as an infant is crying followed by a woman laughing while a man talks through a television speaker +YQ3vkJMVMbC8 A toilet flushes followed by another toilet flushing +YQ87LBiwJjTE A woman speaks as food sizzles and music plays +YQARuiRtfy-k Drilling with music playing and a man speaks +YQHfyKaOHSz4 'An insect buzzing as hollow wood clanks twice followed by a person gulping and a frog croaking proceeded by another insect buzzing and guitar music begins playing +YQKHpSAAjakY A man talking followed by an idle motorbike engine running +YQOmV7O9mFwg People laugh on a bus while brakes squeal +YQRtuOWWya30 Food and oil sizzling followed by metal rattling as a man is talking while guitar music plays and water trickles in the background +YQTSKjweEWew Wind and rain sounds, then a man begins to talk +YQoEal_hKz4Q Several rapid bursts of gunshots in the distance followed by nearby gunshots and a man speaking over a radio +YQt0_xTadAT0 High pitched croaking of frogs with some rustling +YQv1HXaT-28U A pop and rattle occurs and then a child giggles +YQvATUKXYFBs A train running on railroad tracks while a train horn blows as railroad warning signals ring +YR4fXcbWFhJg Distant clip clops of horses with a man speaking and a woman screaming +YR8bHTHnF8j4 Vibrations and wind with loud humming of an engine +YR91bUbtKrRs A woman and a boy speaking in a foreign language then a baby cries and the boy laughs. +YRNBoH2LHQEM A crowd applauds +YROootH-mtEI Water flowing down a river +YR_g4RpU9mO0 An engine running and then accelerating +YRdC8cviN6Bs Light rainfall together with rustling +YRfGapDlAYoQ A person whistling as a crowd of people talk in the background +YRk-ujWKzPuc Continuous and steady rainfall +YRp4Ct_TQvAM Rain falling and male speech +YRrmBGjJqlEo Typing on a computer keyboard followed by a computer mouse clicking as a man sniffles before talking +YRtenf2XSXRc Low humming of an idling and accelerating engine +YS0SQyFXbqF8 An animal gurgling followed by a bell clinking as a kid yells and a man laughs then speaks +YS0YE96w0YRk A man speaks and people laugh and clap +YS8k47ME-YT4 Sounds of a thunderstorm with heavy rain +YSCow4mpBsGY Woosh of air and loud snoring +YSE_3nszEw7o A steam engine running on railroad tracks as steam releases and hissing while a man talks in the background +YSGaIvgwwWSE Rain falling on a hard surface as thunder roars in the distance +YSL3wB5sDcdw Wind is blowing while a vacuum machine is in use +YSNIaYhri76w Oinking pig and barking dog followed by man speaking +YSNy_axSCoyw A clock ticking +YSQHYl2Kp5ww Music then sizzle of frying food while male speaks +YSZ6CcXINiiE A man briefly talks followed by a loud burst then laughter +YS_3aeOvniZc Car going fast with changing gear +YSePTNAN7s-w Speech followed by a toilet flushing and more speech +YSmdj6JFB9MQ Music plays and an adult female speaks, a flush occurs and water splashes and gurgles, and a young female speaks +YSoO1HhaEc9Q Humming and vibrating with a man speaking and small oinks +YT32kii824pA A man talking as metal rattles then clanks followed by several metal objects falling over onto a hard surface +YT9_ep-3BZDY An adult female speaks, thumping occurs, and paper crinkles while in a quiet environment +YTOaQMYc79Mw An engine being started repeatedly +YTQr9v-PQOc4 Humming noise then sneezing and coughing +YTSdAJWJ-tW0 People speak with passing traffic +YTSnq6n8tElo Metal clinks five times, a coin drops, knocking on glass occurs, a coin drops again, and a small child speaks +YTWOgvDaDqlU Wood being sawed by a machine followed by a heavy machine motor running while sawing wood +YTaQKhIRwii4 A quick repeat of applause followed by continued applause +YTd2EEDdFlRY A man is speaking as music plays followed by musical instrument sounds +YTdl9SmBbRnA A large motor vehicle engine accelerates and then slows and idles, while an adult male speaks in the foreground +YTgxst7Ft9js Sanding and scraping followed by a man speaking +YTtRtURWVYBE Bells chime and ring +YTwR8BA6buMI Two piano notes and percussion are repeating rhythmically to imitate a ticktock +YU3CAjsm1sec Cats meowing loudly and then a male voice +YU5ij0M7T-hk Footsteps walking on a hard surface before a door slides open followed by a door creaking then a door slamming shut +YU90e2P9jy30 Multiple basketballs bouncing on a hard surface and shoes squeaking as a man shouts in the distance +YUAmDLPjNyMg An aircraft running as wind blows into a microphone as a person sniffles +YUCy1BEx8jBE A man talks while water burbles +YUE3XnVFodMI Applause with distant speech and cheering +YUQtBt6CQpwg Short rapid vibrations of a sewing machine +YUV1kdjwpy6U An engine running and ticking and then speech +YUXGzbBGbqAA Footsteps shuffling followed by fabric slapping a hard surface as a person heavily breathes +YUhCzD6EBJBU A power drill operating with a man speaking and a click +YUjje3lSabsg A person snoring +YUmNrhFKpWIY A vehicle accelerates and then slows, then speeds up, then stops +YV4PLSw_WzVw Tires squealing followed by an engine revving +YV8A0VRGdgwM Goat bleating and making tickling noise +YVE6Ku0-ucUM Men speak followed by a small burst and people laughing +YVMsbrcHPBfk A goat bleating repeatedly +YVOXl8iR-HnI A jet engine hums first distant then louder as it passes by +YVQnmlf2OsUg A high pitched engine running consistently with intermittent scraping +YVZLZ08k3YeA A man talking as a person snores +YVeCSHwtkBZU Police sirens going off +YVjSEIRnLAh8 A woman speaks with food sizzling in a pan with some chopping +YVkbp8VmL3pM An infant crying as fabric shuffles +YW4GEwnXc9tQ A woman speaks an frog croaks followed by wind blowing as others speak +YW7OJevEgq7w Dog yapping and panting +YWCYfCfW9NA0 Blades of a helicopter spinning and getting ready to take off +YWHRnyGXcdy8 A baby crying continuously +YWLzzpzOKtnY Male speaking and an insect buzzing +YWOywdRmySs0 A man talking followed by paper and plastic crinkling +YWU3qB7gf6ao Digital beeping and clicks +YWUpeplQr3A4 A power drill running followed by drilling noises and human screaming +YWUyeFOyKIg0 A racing vehicle driving in the distance then driving by as a crowd of people talk in the background followed by a horn honking while wind blows into a microphone +YWWkhzcmx3VE A duck quacking +YWmDe2xbnSY4 A series of bursting and popping sound effects +YWq4OD3olO2w Children screaming and speaking and a female speaking +YWqXFAY4k79s A person laughing followed by a girl shouting as rain is falling on a plastic surface +YXIooZl1QdM4 Several loud burps +YXJba7pTbpD0 Air spraying followed by fire igniting then loud audio static followed by pressurized air releasing +YXL8JV9qXGLE An infant talking as a woman speaks while a group of people talk in the background +YXPebkNzsnRI A person whistling followed by a young girl laughing as a man talks in the background then another person whistling +YXQxIXaX_7M0 Water flows and people speak in the distance +YXWw7ZM1c_QA Sounds of a mechanical clock +YXZTt1xdK8uQ An engine running and water splashing +YXamQAY_WXRY A man speaks while water slaps on a surface +YXf5LjaE_JQ0 A man talking as plastic rattles while a truck engine runs idle and vehicles drive by in the background +YXi6V0LGvqoo A dog whimpering followed by a dog growling and barking as metal jingles and footsteps squeak on hard surface +YXplKBvZaHXA A motorbike engine driving then accelerating as a man is talking +YXrJcmftCY04 A crowd of people shout and give applause +YXz56Q2Q5j5c A motor humming as wood is being scrapped and sanded +YY3lNEe-ZGF0 Repeated ticking culminates in a sliding click +YYEYeQ0lIkBQ A man shouting and hollering as ducks quack and chirp while wind blows into a microphone +YYH4qi8Ul6v0 A man talking as an infant is crying followed by a man humming +YYIqpIjjee00 A toilet flushing +YYNDKuNINDOY A fire engine with a siren fading then another loud siren +YYQGW5AwDOIo Someone in a crowd whistles while engines rev in cars going slowly by +YYQSuFyFm3Lc A train running on a railroad track followed by a vehicle door closing and a man talking while a train horn is blowing and railroad crossing signals are ringing in the distance +YYflmW68gL4E Burping followed by girls laughing and speaking +YYk274Wr5iIE A large amount of water is splashing and gurgling, and the wind is blowing +YYqYCDis3EUA A swarm of insects buzzing as birds chirp in the background while wind blows into a microphone +YZ-SIyOChVh8 Thunderstorm sounds while raining +YZ0IrCa4MvOA Water trickling rapidly and draining +YZ1Cyj4N05lk Musical whistling followed by a man speaking +YZ3wDry8nnJs Splashing water followed by a girl speaking then scraping and spitting +YZ7yDwpdGelM Puncturing sound, followed by water trickling and a man speaking +YZBtgrP4vU_w Meat sizzling with insect sounds in background +YZNEZLlDVgrE Insects buzzing followed by a man talking +YZTYAQBnU4GM Birds chirping as well as some clanking +YZUmZgPL0ges Several large bells ring +YZY4aGEniU_E Food sizzles as a man speaks with music playing +YZYWCwfCkBp4 Wood sawing and metal filing as orchestral music plays in the background +YZ_smJ66Tb3c A man speaking as someone hisses while pigeons are cooing and bird wings are flapping +YZsTZ7jqbd9M Footsteps walking on dirt as a man is talking and birds chirp in the background +YZsf2YvJfCKw Water gurgling followed by a toilet flushing then a footstep shuffling on a hard surface +Y_9mgOkzm-xg A man is talking, and food is frying +Y_AcJVyToQUQ A baby laughs while a man and a woman speaks and laughs as well +Y_BSmz3SEW1w Pigeons are making grunting sounds and snapping beaks +Y_C2HinL8VlM Humming of an engine with sirens ringing +Y_GI7meqlYZk A cat is meowing, and a child is speaking +Y_YS5uKWoB6g A child crying and a car door closing +Y_duNX6Vyd6g A speedboat races across water with room sounds +Y_ezm-TpKj1w A vehicle engine revving as a crowd of people talk in the background +Y_iUX8CibElk Rain falling outside +Y_oKXrY5Ff0g A person speaking, followed by a number of people laughing +Y_w2pA1VeB40 A group of people laughing followed by a person farting +Y_xylo5_IiaM A young girl talking as a woman is talking +Y_z-bidQYVao A man vocalizing a high-pitch sound and then speaking +Y_z6pymOet7g A man talking followed by a toilet flushing +Ya0yXS7PmVR0 Ocean waves crashing as water trickles and splashes +Ya3GzZKxUTy8 A bird squawks followed by a dog whimpering +YaMhu5eMQAsI An aircraft engine operating +YaZAXO2WZn84 A train engine running as bells chime followed by a lawn mower running then a steam engine running while a steam whistle blows and a crowd of people talk in the background +Ya_Rjlu50TfA A person snoring repetitively +YajheseWZmmU A cat meowing as a person is kissing aloud followed by a man giggling during light audio static +YalaxBd_EEUc A man talking followed by a series of belches +YatmDP_fmK_8 Mechanical growling followed by an explosion +YawxrHOpt-sE A goat yelling as a crowd of people talk and vehicles drive by in the background followed by people laughing and someone clicking +Yazh_-OkQ-uI Animals are bleating and people are talking in the background, an adult female and an adult male speak, and an aircraft is present in the distance +Yb1PXsfgQw5w A few people laughing and giggling followed by someone talking and more laughter +YbA5zPFSFZAA Digital static and beeps +YbAqgL5dCQOE Rain pouring on a solid surface followed by a vehicle driving by in the distance +YbIV3bJZpkgA A helicopter flying in the distance +YbIiiIo20PsY A man talking as a swarm of insects are buzzing +YbJMMp6PLKqM A baby cries and a young girl speaks briefly +YbLZFtoWXYTA Water pouring into a plastic container followed by sand grinding +YbQNX7vDalQw Male speech and then sizzling +YbUTOsLXYyxg A man talking then another man talking followed by a group of people laughing then another man talking +YbX2vDaHL26U A race car is racing and skidding +Ybgbnu5YKTDg A man speaking through an intercom while a helicopter is flying before and after a series of gunshots firing +YbhlhcGONisM A man talking as fabric shuffles during a series of computer mouse clicks +YbmEF-c-M174 Quacking with brief chirping and speech +Ybpv_LneHmfU A high pitched airplane engine idles +YbygBWUkpaC8 A man talking as another man speaks in the background followed by birds chirping and leaves rustling while wind blows into a microphone +Yc0IggDOisOo Bells ringing with voices in background and wind blowing +Yc0V_HAul7rI A group of people laughing followed by a young man talking +Yc3nlaAkv9bA A man talking softly followed by a goat baaing and a cow mooing in the background +Yc6YJgZ3qzOw High frequency humming and vibrations +YcFHFVGOtp6g Buzzing continues getting louder than softer as people talk +YcFoXRmGgIME A train travels on the railroad and sounds the train horn +YcK2kSVR1d2o Clicking and rustling followed by explosions and several pops +YcN-oYKd-M4E Sheep vocalizing nearby and in the distance. +YcNARVD02-tw A man and a woman talking followed by water pouring and draining down a pipe +YcPiSd5nJLrI A group of people laughing and screaming alongside firecrackers igniting then exploding followed by a muffled explosion +Ycr0GiZr0TNY A spray bottle hisses as two infants laugh +Ycz0FSQDVBMw Hissing with people speaking and some rattling +Yd1tL-9BILy8 Pigeons coo with a distant humming +Yd6gu2w19YQo A baby laughs repetitively +YdJYO3RbBabE A digital beep followed by a man speaking +YdP5DbAzTl5M A motorboat engine running idle as wind blows into a microphone while a man talks +YdYZSKX7vuRI Snoring, a man takes a deep breath and speaks +YdYvL6uEMl6E An aircraft motor is operating with rhythmic whirring, then wind roars +YdZDgJzGtLLU Water flows and splashes +Ydkiwn2FdDVw A female voice and a duck quacking +YdlsiellSFf0 Water splashing and something buzzing by +YdmUOSyPXkUw Steam hissing. With light mechanical sounds +Ydxow2DcTrwk A man talking followed by another man laughing as rain falls and thunder roars in the distance +Ye2rScj9UyMs Light footsteps and doves cooing +Ye4ph6bIC5zc People are talking while a motor vehicle engine is revving +Ye6jSpvTvfJ0 It is raining with strong wind sounds +Ye9MWXS34o48 A woman heavily breathing then sneezing and then sniffling +YeJCaRgf1M20 A bell ringing followed by a camera muffling then plastic scrapping on a wooden surface proceeded by a clock ticking +YeNG6fEiAE8c Laughing and speech with a sheep bleat +YeRU-rABp8nk Men are communicating as a motorcycle engine idles +YeUecAF626A8 A vehicle engine running idle then revving as a crowd of people talk in the background +YeXj9OAik5cc A motorbike engine running idle as wind blows into a microphone and birds chirp in the background +YeYbFtxZmKL4 An animal is moving with clip-clop noise with speech in the background +Yek9Fsmm3xqk Wind blows as water splashes +YelztUCeNQvQ Subway horn blows followed by the sound traveling down the rails +YemGPabOePzA Speech in slow motion +YeqcdsdLz954 An explosion sounds with bursting noises +Yeu5bq0A3XVQ Burping and a man speaking +Yf2fSxfvmkZQ A man talking as a metal thumps followed by a power tool sanding as a metal pot clanks +Yf8WPf5F22xI A rapid ticktock and then a man speaks and sneezes followed by another man hiccuping +YfBYDJWChe5c A person snoring +YfGGYeXR_LS8 A man talking as another person whistles while water trickles on a hard surface in the background +YfK4QBQZ6i7w A hissing sound followed by laughter +YfPqj3nnwQOI Water flowing and splashing +YfWvWhLJ5Fow Footsteps running on dirt followed by compressed air spraying as wind blows in the background +YfYTZVxQ8LJk A woman talking before and after a young girl gasping then talking +YfmEft49sPfE A man speaks with wind blowing and leaves rustling +YfrOqlk0Wm5Y A man speaks with some clicks and then loud long scrapes +YfsBR7e_X_0Y A young kid speaking with a series of slapping on a hard surface as a baby is yelling +YfwhkCnOeyC0 A man and a woman talking followed by a bell ringing as a crowd of people applaud +Yfx4r_KuW6No A young boy crying as a woman is talking +Yg5l3Bz6lWnc Insects buzz and distant birds chirp +Yg6CY7qvu81k Music and a male speech +YgQMTOKsCIyk Ducks quacking followed by male speech +YgW7s3YAthpI Glass clanking followed by liquid shaking in a plastic container then liquid pouring and filling a container proceeded by plastic rattling as someone chews a loud +Yg_P29ucKj78 Slight rattling is ongoing while a mid-size motor vehicle engine runs fast and accelerates, and then gears shift downward +Ygbi6MxPf3hA Gusts of wind blowing followed by harp music then more wind blowing and birds chirping in the background +YgbtcDoh0q3c Rustling and scratching and then laughter +Ygf6H_MWCqjw Ducks quack as a stream burbles and low speech in the background +YggN4-K5AgoM A toilet flushes and water drains +YgkWd1HugK2w Pigeons coo and flap their wings +YglAeihz0NAM People talk in the background and the wind is blows gently, and two kids speak in the foreground +Ygr5Zss89yLQ An engine running and a female speaking +YgwQMkQmBITE Wood thumping and rubbing as a man is talking followed by footsteps on a wooden surface +Yh0M4RS8p_mo Wicked laughter followed by a baby crying +Yh3UhoHIMfpw Wind blows with some nearby rustling and distant passing traffic +Yh5_1pnkl_SY Water pouring into a glass container as a man faintly speaks in the background +YhDMHIDJdfDA Loud snoring repeating +YhFCmq9pCBbM A woman is speaking while plastic crinkling several times +YhGWarNR6xmg Continuous hissing at varying levels +YhJtOGmN_KVw Plastic crumpling and crinkling are ongoing, and an adult male speaks +YhV4bDCBDCy0 An engine running and revving lightly +YhVUmQfBIYe8 Some rustling with distant birds chirping and wind blowing +YhiJB_95IWiE A man speaking with some light knocking and high pitched scraping +Yhmd6pa2e_rs A bus is driving, and people are speaking +YhpDltmawxIM Water trickling onto a metal surface and filling a container +YhqPBcvex1VU A person talking softly before an infant cries +Yhrv6fwnmBkY A dog whimpers then a male voice laughs and talks as the dog starts to bark shortly after +YhuMLK0oA3L8 A man speaks followed by whistling and guitar music +YhxbmDeNSO6Q Vibrations from a sewing machine with a man speaking with several clicks and beeps +Yhzn_wGlzGpU A vehicle driving as gusts of wind blows and an emergency sirens sounds in the distance followed by fiberglass clanking +Yi1u_2eZYYlE A engine running briefly +Yi2yhbckq3p0 A car horn honks once shortly and then two long honks while another car honks +Yi6MQCm58zlY Screeching and male speech +YiOCpICiu4LA Birds chirp and a great number of insects buzz, an adult male speaks, and then a sharp thump occurs +Yific_gRalg0 Metal clacking sharply several times as water drains down a pipe followed by metal rattling +Yii3Geza3hAU A man yells in the background, and a sewing machine sews garments +YilspW7JRjAg An engine revs repeatedly +YinQOrxc_oZo Oinking followed by screams +YinSvboaSRwA A man speaking while power tools are whirring and running in the background +Yir1XTdyt4IY An explosion followed by a man shouting. +Yj0KvrVE_Oww A door shutting followed by a couple of men talking then a horn honking and wood clanking +Yj1AiqT5oHZc A man talking followed by several electronic beeps +YjOYvIISk--4 A man is speaking as tap water runs +YjXkLS_QzUrI Mechanical humming followed by meowing +YjYPU6aSDo88 Wind blowing with the distant humming of a jet engine +Yj_NSuPnx5LA Someone dials a push button phone melodically +Yjf4iyQPJSvk Water trickles down continuously +Yjid4t-FzUn0 A man is talking and snickering followed by a goat bleating +YjinJkonlrWc A toy helicopter flying as a man speaks in the background while wind blows into a microphone +Yjj2RyNDj7no Insects are buzzing, and birds are chirping +YjjHIINDfE1c Humming of a passing vehicle with a honking engine and wind blowing +YjjfUaMQaG1A A man speaking together with intermittent, brief drill operating +Yjlwe9jtu5Gw A person whistling followed by metal rattling in the background +Yjs4dr5JusdM A woman and a man speaking through a telephone speaker +Yk1QxQ4jJaEQ Motor sounds with male speaking +Yk4XyfaWVLEY A vehicle engine is idling along with low crinkling noises and birds are chirping from a distance +YkEP-BwMarf8 A woman speaks quietly during a louder, sharp crinkling sound +YkF1KWybdRpM An airplane is taking flight +YkHIe4CfaccQ A goat bleating +YkLYCjD6vWI4 A loud hissing and chugging growing louder +YkVYNXZd0MMY Humming of an idling engine +YkXjzsroVTtw A man is speaking as birds are tweeting +YkagkXkAVPNo A truck engine running and revving as several vehicle horns honk while a group of people talk in the background +Ykdflh3akyH8 Several puppies whimpering during cardboard scratching and rattling +YkgjNIDmO8a8 A ringing of a siren with passing traffic +Yktc_tJxw8sc A person makes sobbing noises +Ykx6Rj4MDIAw A cuckoo clock cooing followed by a steam engine running on railroad tracks as steam hisses and a railroad crossing signal bell rings in the distance +Yl5KdHAWwJCw A clock ticking +YlHh0SwUhP8U Metal clicking before a gunshot firing followed by a person sighing and metal clanking while wood clacks proceeded by another gunshot firing as ducks quack in the distance +YlJayhiVzl_E Motorboat motor and high winds +YlTJLvSvjUZk Whistling with music playing +YlTfNLKEy1RU Continuous snoring +YlVr-PxhZo8s Hissing and vibrating from an idling engine +YlX3k5p2I_g0 Men speak followed by vibrations and hissing with passing traffic in the distance +YlYhwCRX2wNc A woman speaks with dishes clanging and water running in the background +YlfAFQ0-wDJU Sounds of waves and strong winds +YlfO471Rn61k Steam hisses while machinery runs +YlgwpIImXCWA A man speaks during rhythmic cutting +Ylh801oHGtD4 An electric motor buzzing as a man is talking followed by a door closing +YljrL7Cb-jr8 A power sprayer sprays surfaces +YlmPMhs-9IYE Distant humming of engines with people speaking and whistling +Ylq9RvAA4mqY Sizzling food with some scraping and rattling as a man speaks +YlrKGCtSsAkA A toy helicopter flying as wind blows into a microphone +Ym8wV38lf2jg A man speaking over an intercom as emergency sirens wail in the distance and grow louder as a vehicle engine runs +YmGa2JgAiKV8 A young male speaking +YmJ6ZO3xEcgw A woman sneezing repeatedly +YmSF_FqBtRPs A telephone ringing +YmUGmCSNETcg Frying food with the click tap of stirring +YmVjub3o_IxE A man speaking followed by a woman talking in the distance as a faucet pours water and birds chirp in the background +YmW1EpJYcy_E A motorbike revving and driving by as wind blows heavily into a microphone +YmWqH2xwjkYA A baby laughs and women laugh and speak +YmYQrjcYNrW0 A large motor vehicle engine is running, and a motor vehicle horn blows repeatedly and then stops +Ym_NCf-q4Gn0 A vehicle is making a thudding noise +Ym_U506sf9p4 A woman is speaking as food is sizzling +YmaVYiednkSg A man speaking followed by air spraying while vibration rumbling grows louder +YmlnUJH4BQnk Female speech and rubbing simultaneously +Yn-JyOqYSLQM Metal clattering and rattling is ongoing, along with brief hissing, while many people talk in the background +Yn4VktYihtJU Heavy wind followed by yelling and cheering +Yn74IYuCe_ms A stream burbles while a man speaks +YnD1K1Zo0qrM Humming and rattling with rapid fire gunshots and loud bursts +YnLZeG9LaLgw A vehicle engine accelerating and revving as wind is blowing into a microphone +YnLtNjMimLE0 Water trickling and lightly splashing followed by an animal hooting in the distance +YnU-AI3Cmc3M Pigeons cooing followed by bird wings flapping as wind lightly blows into a microphone +YnaPgJvWTIY4 A motorcycle engine idles and then shifts gears +YndxkSQKxaak Engine sounds with a man speaking +Yne2DpKCIr4Y Waves are crashing as the wind blows heavily +YniwgMbB6tpQ A person snoring +YnlC4UI4hZ60 An engine being turned over +YnmLMLgWPmFM A motorboat engine running as water splashes and a man shouts in the background followed by birds chirping in the distance +Ynq0BF9zGkzg A person screaming followed glass crashing then two men communicating with each other as seagulls squawk in the distance +YnuZEAuAl8hQ Ducks quack and then a dog barks +Yo3mZR8OvPko A car accelerating and squealing on road +Yo7-X8DAToGc A vehicle accelerating and driving by +Yo7jW6Suyfbs Race cars are auto racing +YoN0IcZaHD_8 A man talking followed by a power tool drilling then footsteps shuffling and a man speaking +YoNHCc_izsDE An infant and woman laughing as water splashes and birds chirp in the background +YoOMtaqvQ3_M Helicopter propellers spinning and wind +YoZaEHkfh5Eg A series of electronic dings while a vehicle horn honks twice followed by a plastic click while a dog barks in the distance +Yo_3MDLl_aH0 A series of cannons firing +YoiIi6H83Y38 An engine revving inconsistently +Yoklu5ZJD_2U A bird sings followed by drumming +YonBZOH88OYs Air spraying several times as a machine motor hums in the distance +Yorgwzt45ojE A man talking as pigeons coo and bird wings flap +Yos_2U4xqTqw Wind blowing followed by heavy footsteps on a solid surface then a man groaning proceeded by two explosions +Yp9qRTh4BmSE A man talking as people yell in the distance followed by a man shouting loudly then a series of gunshots firing +YpCQEWAFGEjc A train moving down railroad tracks +YpHNMcX-9FDs Dogs whimpering then a bark and a growl +YpHYkWkZ4guE Steam followed by knocking sounds +YpI_kPedctoo A motorcycle engine is revving with low speech in the background +YpO8kbg9IJnc Metal squeaking then plastic clacking as a man is talking followed by a faucet pouring water +YpPLisQ_QXxw A car horn blows amid constant road noise +YpTJKJxaheI8 White noise and slow tick locking with brief coughing +YpWQeV08kYR0 Sirens ring as an emergency vehicle speeds past +Yp_BB_rJaF7Q A man talking while birds chirp in the background followed by a frog squeaking +YpaetCbEqp2w A child crying followed by a person blowing their nose during a series of computer mouse clicks +Ypaf0nyjg1Js A woman is speaking with sizzling background noise +Ypgq2KPX5_SA Paper is crinkling and crumpling, and thumping occurs +YptIksg9KEac A dog barks quickly +YpuZL08fzpXk A speech and gunfire followed by a gun being loaded +Yq1ivQ_2fddk Men are communicating with a hissing noise in the background +Yq3SEOW2m4WY Distant humming gets louder as a train passes after honking a horn +Yq46VXJ6JN9M Birds tweeting followed by spraying +Yq4YFJA5pFXc Wind blowing into a microphone followed by plastic clacking then an electric toy motor starting up +YqF72bT878gw Boat motor buzzes while operating at high throttle +YqPYwp1K4sZE Clicking and crinkling with people speaking +YqWYncqPSy9A A woman talks followed by a buzzing bee right before a man and woman take turns talking +YqZEIs6tS5vk A truck engine revving followed by tires skidding as a crowd of people talk in the background +YqakN0JNbpcU A man talking followed by footsteps walking on dirt as a swarm of insects buzz and birds chirp in the background +YqeSl7YZAfs4 Faucet water pouring followed by a man talking as water gurgles down a drain +Yr2KhpX_QgXA A man speaks with some distant humming and vibrating getting louder +Yr2djvq1vc68 A faucet pouring water as a plastic container fills with liquid and scrubbing against a plastic surface while water trickles +YrBUCIK8JRLg Waves breaking and some wind +YrE6BJ0Bo4w4 Female speaking, water running, and then female speech +YrINmxSXMR-s Water splashing followed by a duck quacking then a person laughing as water splashes +YrJVXE6Axtrg A man speaking as a sewing machine rapidly operates followed by another man speaking +YrN2rpLV3brs A man speaking while beating eggs +YrPkCYq4Zjwk A loud whoosh followed by a woman speaking then digital beeps and men speaking +YrUq4w4EUSWA An electronic device buzzing followed by paper tearing and then crumpling as a bell rings before a toilet flushes +Yram-QPKSQYc Helicopter flying away +YrbO727iF03I Burping and then various speech and laughing +YrgrmLLhxoCQ Continuous crinkling in a quiet environment +YriM7b5bJ9KQ Several people laughing continuously with brief clanking and speech +YrjUrB1WUpcI Water running and then stopping +Yrp3CQsWxVgE Traffic is present nearby, and a car horn plays the theme to the godfather +YrtgVoZCcBw8 Wind noise and farm animal noises followed by a goat screaming +YrvDcg9DoNKA Thunder roaring in the distance as rain lightly pours and a man yells followed by another man humming +YrvtA7c1I4xo Whooshing and humming with a man speaking and distant squeaks +YsI7_ycEYzAY A clock ticking followed by someone sniffling +YsJrFyjfrL-g Metal clicking and clacking as a sewing machine slowly operates +YsTMKled6Q1M Whistling with flapping wings and cooing pigeons with other birds chirping +YsVYTOURVsQ0 A man talking as water streams in the background +Ys_EWjoiVfzo A sewing machine operating and metal clanking +YsbW7XwwUtSU A ticktock noise from a bell +Ysl_Pxpc7beo A vehicle horn beeps +YsqWyxUObwkw Some rustling followed by humming of an engine +YsqsI2UyrcBQ A vehicle engine revving several times +YszkiW0GXEOI A person whistling while birds chirp +Yt1hj7se76wQ Typing on a typewriter followed by a bell chiming +Yt3VFlDiEKgY An insect buzzing and quiet male speech +Yt4prXmPwthg A sewing machine is in use, and a woman talks +YtB8TiiXwKmA Vibrations and humming from a power tool starting and stopping several times +YtIM-H2rdq8U A distorted gasping followed by footsteps walking on a hard surface and people grunting while a beating sounds in the background as a series of gunshots fire +YtJhVH3VIrnE Wood cracking and loud cluttering +YtNxfdAd14qE A machine makes sizzling noise with a television playing in the background +YtTB0BK39JI8 Wood clacking and scrapping as bells chime while a muffled clock ticks +YtaYKM1OSTwE Several goats baaing as grass shuffles followed by a person chuckling in the background +YtdpiXW68adA A female voice crying followed by a person speaking +YtfOIhQpYYe8 A helicopter is flying above while a man is talking +YtjCNwdOUiGc A bus engine running followed by a vehicle horn honking +YtmLAXm1WlnE A woman speaks with nearby insects buzzing +Ytpm5IOD5d4o A woman talking before clapping followed by a bird whistling then a woman whistling +YtwFypUcdgRc A loud wind sound as a man is speaking and laughing +YtxeXrpoMST4 Water trickling into a container +Yu84FiZ_omhA Person singing a long note and birds chirping +Yu8bQf0SnCVI Someone moving around followed by sink water running +Yu9px4Lwv9XI The pitter-patter of feet running +YuJzAf4PaExI Humming of an engine with some rustling +YuY4fe5DT1gI Typing on a computer keyboard +YuhSDBwVrEdo A person whistles with wind blowing +Yup2PpjTzyyc Man speaking with electronic sounds in the background +Yv59uHr-B1no Frogs croak near and far with chirping distant bugs +Yv7BaYF0kagM Airplane engine idles continuously +YvEWmHtiznF8 A man is talking while a bus is idling in the background. +YvaujJ7msKfc City ambiance with light music playing and wind blowing on a microphone as police sirens sound in the distance +YvfNKduToki4 A truck engine running while warning beeps sound followed by a man yelling then compressed air releasing +Yvigslb0kClE A goat vocalizing and male speech +YvruDH_YLaPI Semiautomatic gunfire occurs with slight echo +Yvsy1IpYmrSY Engines hum and rev with squealing tires +YwAZrOPvul4Y Man speaking while there is crinkling sound +YwBs02amFGXs Birds are making noises and flapping wings, and a rooster crows +YwFiCblfZ-vg A man speaking with intermittent second man voice and white noise. +YwNiYSYJXssA A young girl talking as a crowd of people talk then gasp +YwOFBldBFRNk Large metal bells are clanging in different tones, and each ring pattern is echoed once +YwSHzVxdMiTo A pig squeals and men speak +YwVi5w_NU6CM Humming of an engine with a ringing siren +Yw_Utn3CwAXE A toilet flushing with footsteps and door opening +YwbPmnxCLoRQ Several bells ringing followed by a single bell ringing +YwnqUgK_-fo4 Several clicks and pops with people cheering and yodeling +YwoadpeAGHUQ An emergency siren sounding followed by water spraying and a man shouting as a truck engine runs idle in the distance +YwrQDkX0NbTA A motorboat driving by while water splashes +Yx5AH2gW_8S4 A door creaking followed by metal tapping and a wooden clack as pigeons coo +YxBZnvfniA1c A man talking while a man talks over an intercom followed by a girl talking then a woman and group of men laughing +YxIztYnMIWUA A telephone bell rings +YxQDq3A4Zfbo Water splashing and wind blowing over a microphone +YxUWSHYoslPQ A man is speaking with tapping noises and dishes being moved +YxYwpABpZed4 An adult female speaks in the foreground, while sizzling is ongoing and metal thumps occur +YxbLW9Wt1Jsg Machine motors operating followed by glass boinking and a series of metal clacking and shuffling +YxnVqvc7N7Po Man and woman talking on phone clicking and tone when hanging up +YxpZna_FwDhI Woman speaks and then sewing machine stitches +YxqtrbqDlz28 A woman sneezes and then sniffles several times +Yy-RSojxgkDo Man speaking, then a bird chipping +Yy1a8PntuXYw Mechanical humming followed by a click and a toilet flushing +Yy3-M1sonh3M Slight rustling and thumping occur, followed by a flush of water with splashing and gurgling +Yy93cZqNCtks A man shouting as another man talks in the background while a series of gunshots fire and footsteps running on concrete followed by guns cocking and a dog growling +YyL3gKa6YLoM A man talking as a person is snoring and a vehicle accelerates in the distance +YyLu4b01t53k Humming of an idling engine +YyRoKi7rhSRo A bird tweets far away and someone flushes the toilet +YyVVLq4ao1Ck A bird is chirping while a fly is buzzing and another insect is making ringing sounds +YyVjivgsU2aA Humming and accelerating of a car engine with squealing tires and people speaking faintly +Yy_OyLW9lBXU Babies crying as a woman is laughing and a man is talking +Yyau2WIRkxb8 A helicopter is in motion +YyfYNPWs7mWY Male speech and then a door shutting +YyhDw7PZje3g Man speaks while insects buzz +YynHdcJ9Oqaw A series of synthesized laser effects +Yyrxa6_P2I80 Many small, enclosed birds chirping +Yz1ax0QPpd14 A duck quacking as birds chirp and a pigeon cooing +Yz4MeV9IGVo0 An engine roars as someone is speaking over a machine +YzBXoaQ1GVlc Children speak in the distance and a woman speaks nearby with some rustling and banging +YzEM94PH29VQ A young child cries as people speak +YzEaGx6an4es A saw blade blaring +YzF3xXn6NTyU Water runs quickly, while someone talks, coughs, and then talks again close by +YzFzPOsOKog4 Bees buzz as clanking occurs +YzIgGMlZENTs Several quacks of a duck are followed by a man speaking +YzoctgurhvHE A plastic rattling followed by a plastic hatch opening followed by plastic clanking and a vehicle engine accelerating while a man is talking +YzoxFl3pddMg A frog croaks with speech and thumping noises in the background +Yzq00Oe1ecpE A bus engine slowing down then accelerating +YztSjcZNUY7A A baby crying followed by the voice of a woman +YzwoqJY03yHE A woman speaking followed by another woman talking followed by a goat baaing as cloth rustles diff --git a/train.py b/train.py new file mode 100644 index 0000000000000000000000000000000000000000..040b5461ede56e80da29d77ecf6183ae53effb3b --- /dev/null +++ b/train.py @@ -0,0 +1,263 @@ +import warnings +warnings.filterwarnings("ignore", category=FutureWarning) + +import logging +import math +import random +from datetime import timedelta +from pathlib import Path +from tqdm import tqdm +import hydra +import numpy as np +import torch +import torch.distributed as distributed +from hydra import compose +from hydra.core.hydra_config import HydraConfig +from omegaconf import DictConfig, open_dict +from torch.distributed.elastic.multiprocessing.errors import record + +from meanaudio.data.data_setup import setup_training_datasets, setup_val_datasets +from meanaudio.model.sequence_config import CONFIG_16K, CONFIG_44K +from meanaudio.runner_flowmatching import RunnerFlowMatching +from meanaudio.runner_meanflow import RunnerMeanFlow +from meanaudio.sample import sample +from meanaudio.utils.dist_utils import info_if_rank_zero, local_rank, world_size +from meanaudio.utils.logger import TensorboardLogger +from meanaudio.utils.synthesize_ema import synthesize_ema +import os +import wandb + +torch.backends.cuda.matmul.allow_tf32 = True +torch.backends.cudnn.allow_tf32 = True + +log = logging.getLogger() + + +def distributed_setup(): + distributed.init_process_group(backend="nccl", timeout=timedelta(hours=2)) + log.info(f'Initialized: local_rank={local_rank}, world_size={world_size}') + return local_rank, world_size + + +@record +@hydra.main(version_base='1.3.2', config_path='config', config_name='train_config.yaml') +def train(cfg: DictConfig): + + # debug setting + if cfg.get("debug", False): + import debugpy + if "RANK" not in os.environ or int(os.environ["RANK"]) == 0: + debugpy.listen(6665) + print(f'Waiting for debugger attach (rank {os.environ["RANK"]})...') + debugpy.wait_for_client() + + # initial setup + torch.cuda.set_device(local_rank) + torch.backends.cudnn.benchmark = cfg.cudnn_benchmark + distributed_setup() + num_gpus = world_size + run_dir = HydraConfig.get().run.dir + + # patch data dim + seq_cfg = CONFIG_16K # we only support 16k for now + with open_dict(cfg): + cfg.data_dim.latent_seq_len = seq_cfg.latent_seq_len # update sequence config here + + # wrap python logger with a tensorboard logger + log = TensorboardLogger(cfg.exp_id, + run_dir, + logging.getLogger(), + is_rank0=(local_rank == 0), + enable_email=cfg.enable_email and not cfg.debug) + + info_if_rank_zero(log, f'All configuration: {cfg}') + info_if_rank_zero(log, f'Number of GPUs detected: {num_gpus}') + + # number of dataloader workers + info_if_rank_zero(log, f'Number of dataloader workers (per GPU): {cfg.num_workers}') + + # Set seeds to ensure the same initialization + torch.manual_seed(cfg.seed) + np.random.seed(cfg.seed) + random.seed(cfg.seed) + + # setting up configurations + info_if_rank_zero(log, f'Training configuration: {cfg}') + cfg.batch_size //= num_gpus + info_if_rank_zero(log, f'Batch size (per GPU): {cfg.batch_size}') + + # determine time to change max skip + total_iterations = cfg['num_iterations'] + + # setup datasets + if cfg['text_encoder_name'] == 't5_clap_cat': + cfg['concat_text_fc'] = True + + dataset, sampler, loader = setup_training_datasets(cfg) + info_if_rank_zero(log, f'Number of training samples: {len(dataset)}') + info_if_rank_zero(log, f'Number of training batches: {len(loader)}') + + val_dataset, val_loader, eval_loader = setup_val_datasets(cfg) # same dataset (val_dataset) but with different dataloader + info_if_rank_zero(log, f'Number of val samples: {len(val_dataset)}') + val_cfg = cfg.data.AudioCaps_val_npz # tsv and memmap dir + + # compute and set mean and std + latent_mean, latent_std = torch.load(cfg.data.latent_mean), torch.load(cfg.data.latent_std) + + # construct the trainer + if not cfg.use_repa: + if cfg.use_meanflow: + trainer = RunnerMeanFlow(cfg, + log=log, + run_path=run_dir, + for_training=True, + latent_mean=latent_mean, + latent_std=latent_std).enter_train() + else: + trainer = RunnerFlowMatching(cfg, + log=log, + run_path=run_dir, + for_training=True, + latent_mean=latent_mean, + latent_std=latent_std).enter_train() + + else: + raise NotImplementedError('REPA is not supported yet') + trainer = RunnerAT_REPA(cfg, + log=log, + run_path=run_dir, + for_training=True, + latent_mean=latent_mean, + latent_std=latent_std).enter_train() + + eval_rng_clone = trainer.rng.graphsafe_get_state() + + # load previous checkpoint if needed + if cfg['checkpoint'] is not None: + curr_iter = trainer.load_checkpoint(cfg['checkpoint']) + cfg['checkpoint'] = None + info_if_rank_zero(log, 'Model checkpoint loaded!') + else: + # if run_dir exists, load the latest checkpoint + checkpoint = trainer.get_latest_checkpoint_path() + if checkpoint is not None: + curr_iter = trainer.load_checkpoint(checkpoint) + info_if_rank_zero(log, 'Latest checkpoint loaded!') + else: + # load previous network weights if needed + curr_iter = 0 + if cfg['weights'] is not None: + info_if_rank_zero(log, 'Loading weights from the disk') + trainer.load_weights(cfg['weights']) + cfg['weights'] = None + else: + info_if_rank_zero(log, 'No checkpoint or weights found, starting from scratch') + + # determine max epoch + total_epoch = math.ceil(total_iterations / len(loader)) + current_epoch = curr_iter // len(loader) + info_if_rank_zero(log, f'We will approximately use {total_epoch - current_epoch} epochs.') + + # training loop + try: + # Need this to select random bases in different workers + np.random.seed(np.random.randint(2**30 - 1) + local_rank * 1000) + while curr_iter < total_iterations: + # Crucial for randomness! + sampler.set_epoch(current_epoch) # guarantee each epoch has different shuffling + current_epoch += 1 + log.debug(f'Current epoch: {current_epoch}') + + trainer.enter_train() + trainer.log.data_timer.start() + for data in loader: + trainer.train_pass(data, curr_iter) + + if (curr_iter + 1) % cfg.val_interval == 0: + # swap into a eval rng state, i.e., use the same seed for every validation pass + train_rng_snapshot = trainer.rng.graphsafe_get_state() + trainer.rng.graphsafe_set_state(eval_rng_clone) + info_if_rank_zero(log, f'Iteration {curr_iter}: validating') + total_loss = 0 + n = 0 + if cfg.use_repa: + total_diff_loss = 0 + total_proj_loss = 0 + for data in tqdm(val_loader): + n += 1 + if not cfg.use_repa: + mean_loss = trainer.validation_pass(data, curr_iter) + total_loss += mean_loss + else: + mean_loss, diff_loss, proj_loss = trainer.validation_pass(data, curr_iter) + total_loss += mean_loss + total_diff_loss += diff_loss + total_proj_loss += proj_loss + + total_loss /= n + if cfg.use_repa: + total_diff_loss /= n + total_proj_loss /= n + if cfg.use_wandb and local_rank == 0: + wandb.log({"val/loss": total_loss}) + if cfg.use_repa: + wandb.log({"val/diff_loss": total_diff_loss}, step=curr_iter) + wandb.log({"val/proj_loss": total_proj_loss}, step=curr_iter) + + distributed.barrier() + trainer.val_integrator.finalize('val', curr_iter, ignore_timer=True) + trainer.rng.graphsafe_set_state(train_rng_snapshot) + + if (curr_iter + 1) % cfg.eval_interval == 0: + save_eval = (curr_iter + 1) % cfg.save_eval_interval == 0 + train_rng_snapshot = trainer.rng.graphsafe_get_state() + trainer.rng.graphsafe_set_state(eval_rng_clone) + info_if_rank_zero(log, f'Iteration {curr_iter}: inference') + for data in tqdm(eval_loader): + audio_path = trainer.inference_pass(data, + curr_iter, + val_cfg, + save_eval=save_eval) # path to audio files generated + distributed.barrier() + trainer.rng.graphsafe_set_state(train_rng_snapshot) + trainer.eval(audio_path, curr_iter, val_cfg) # av-bench eval + + curr_iter += 1 + + if curr_iter >= total_iterations: + break + + except Exception as e: + log.error(f'Error occurred at iteration {curr_iter}!') + log.critical(e.message if hasattr(e, 'message') else str(e)) + raise + finally: + if not cfg.debug: + trainer.save_checkpoint(curr_iter) # finally will always be called + trainer.save_weights(curr_iter) + + # Inference pass + del trainer + torch.cuda.empty_cache() + + # Synthesize EMA + if local_rank == 0: + log.info(f'Synthesizing EMA with sigma={cfg.ema.default_output_sigma}') + ema_sigma = cfg.ema.default_output_sigma + state_dict = synthesize_ema(cfg, ema_sigma, step=None) + save_dir = Path(run_dir) / f'{cfg.exp_id}_ema_final.pth' + torch.save(state_dict, save_dir) + log.info(f'Synthesized EMA saved to {save_dir}!') + distributed.barrier() + + log.info(f'Evaluation: {cfg}') + sample(cfg) + + # clean-up + log.complete() + distributed.barrier() + distributed.destroy_process_group() + + +if __name__ == '__main__': + train() diff --git a/training/extract_audio_latents.py b/training/extract_audio_latents.py new file mode 100644 index 0000000000000000000000000000000000000000..4fff34c50aea635d8b5d4de4760ec08eff1ba7c1 --- /dev/null +++ b/training/extract_audio_latents.py @@ -0,0 +1,247 @@ +import logging +import os +from argparse import ArgumentParser +from datetime import timedelta +from pathlib import Path + +import pandas as pd +import tensordict as td +import torch +import torch.distributed as distributed +import torch.nn.functional as F +from transformers import T5EncoderModel, AutoTokenizer +from torch.utils.data import DataLoader +from torch.utils.data.distributed import DistributedSampler +from tqdm import tqdm + +from meanaudio.data.data_setup import error_avoidance_collate +from meanaudio.data.extraction.wav_dataset import WavTextClipsDataset +from meanaudio.ext.autoencoder import AutoEncoderModule +from meanaudio.ext.mel_converter import get_mel_converter +from meanaudio.utils.dist_utils import local_rank, world_size +import laion_clap +import numpy as np + +log = logging.getLogger() + +torch.backends.cuda.matmul.allow_tf32 = True +torch.backends.cudnn.allow_tf32 = True + +# 16k +SAMPLE_RATE = 16_000 +NUM_SAMPLES = 16_000 * 10 # use 10 seconds audio for TTA task +tod_vae_ckpt = './weights/v1-16.pth' +bigvgan_vocoder_ckpt = './weights/best_netG.pt' +mode = '16k' + +# 44k +# """ +# NOTE: 352800 (8*44100) is not divisible by (STFT hop size * VAE downsampling ratio) which is 1024. +# 353280 is the next integer divisible by 1024. +# """ + +# SAMPLE_RATE = 44100 +# NUM_SAMPLES = 353280 +# tod_vae_ckpt = './ext_weights/v1-44.pth' +# bigvgan_vocoder_ckpt = None +# mode = '44k' + + +def distributed_setup(): + distributed.init_process_group(backend="nccl", timeout=timedelta(hours=1)) + log.info(f'Initialized: local_rank={local_rank}, world_size={world_size}') + return local_rank, world_size + + +@torch.inference_mode() +def main(): + distributed_setup() + + parser = ArgumentParser() + parser.add_argument('--data_dir', type=Path, default='./training/example_audios/') + parser.add_argument('--captions_tsv', type=Path, default='./training/example_audio.tsv') + parser.add_argument('--clips_tsv', type=Path, default='./training/example_output/clips.tsv') + parser.add_argument('--latent_dir', + type=Path, + default='./training/example_output/audio-latents') + parser.add_argument('--output_dir', + type=Path, + default='./training/example_output/memmap/audio-example') + parser.add_argument('--batch_size', type=int, default=32) + parser.add_argument('--num_workers', type=int, default=8) + parser.add_argument('--text_encoder', type=str, choices=['clip', 't5', 't5_clap'], default='clip') + parser.add_argument('--multi_caption', action='store_true', help='whether the dataset has multiple captions per audio clip') + args = parser.parse_args() + + data_dir = args.data_dir + captions_tsv = args.captions_tsv + clips_tsv = args.clips_tsv + latent_dir = args.latent_dir + output_dir = args.output_dir + batch_size = args.batch_size + num_workers = args.num_workers + + # cuda setup + torch.cuda.set_device(local_rank) + + + if args.text_encoder == 'clip': + from open_clip import create_model_from_pretrained + # a hack to make it output last hidden states + text_encoder = create_model_from_pretrained('hf-hub:apple/DFN5B-CLIP-ViT-H-14-384', + return_transform=False).eval().cuda() + def new_encode_text(self, text, normalize: bool = False): + cast_dtype = self.transformer.get_cast_dtype() + + x = self.token_embedding(text).to(cast_dtype) # [batch_size, n_ctx, d_model] + + x = x + self.positional_embedding.to(cast_dtype) + x = self.transformer(x, attn_mask=self.attn_mask) + x = self.ln_final(x) # [batch_size, n_ctx, transformer.width] + return F.normalize(x, dim=-1) if normalize else x + + text_encoder.encode_text = new_encode_text.__get__(text_encoder) # bind func new_encode_text to clip_model + + elif args.text_encoder == 't5': + t5_tokenizer = AutoTokenizer.from_pretrained('google/flan-t5-large') + t5_model = T5EncoderModel.from_pretrained('google/flan-t5-large').eval().cuda() + + elif args.text_encoder == 't5_clap': + t5_tokenizer = AutoTokenizer.from_pretrained('google/flan-t5-large') + t5_model = T5EncoderModel.from_pretrained('google/flan-t5-large').eval().cuda() + laion_clap_model = laion_clap.CLAP_Module(enable_fusion=False, amodel='HTSAT-base').eval() + + _clap_ckpt_path = "./weights/music_speech_audioset_epoch_15_esc_89.98.pt" + laion_clap_model.load_ckpt(_clap_ckpt_path, verbose=False) + + + tod = AutoEncoderModule(vae_ckpt_path=tod_vae_ckpt, + vocoder_ckpt_path=bigvgan_vocoder_ckpt, + mode=mode).eval().cuda() + mel_converter = get_mel_converter(mode).eval().cuda() + + dataset = WavTextClipsDataset(data_dir, + captions_tsv=captions_tsv, # build dataset from partition_csv and caption_csv + clips_tsv=clips_tsv, + sample_rate=SAMPLE_RATE, + num_samples=NUM_SAMPLES, + normalize_audio=True, + reject_silent=True, + multi_caption=args.multi_caption) + sampler = DistributedSampler(dataset, rank=local_rank, shuffle=False) + dataloader = DataLoader(dataset, + batch_size=batch_size, + num_workers=num_workers, + sampler=sampler, + drop_last=False, + collate_fn=error_avoidance_collate) + latent_dir.mkdir(exist_ok=True, parents=True) + + # extraction + for i, batch in tqdm(enumerate(dataloader), total=len(dataloader)): + ids = batch['id'] + waveforms = batch['waveform'].cuda() + tokens = batch['tokens'].cuda() + caption = batch['caption'] + + if args.text_encoder == 'clip': + text_features = text_encoder.encode_text(tokens, normalize=True) + text_features_c = text_features.mean(dim=1) + elif args.text_encoder == 't5': + tokens = t5_tokenizer( + caption, + max_length=77, + padding="max_length", + truncation=True, + return_tensors="pt" + ) + input_ids, attention_mask = tokens.input_ids.cuda(), tokens.attention_mask.cuda() + + with torch.no_grad(): + text_features = t5_model( + input_ids=input_ids, + attention_mask=attention_mask + )[0] + text_features_c = text_features.mean(dim=1) + elif args.text_encoder == 't5_clap': + tokens = t5_tokenizer( + caption, + max_length=77, + padding="max_length", + truncation=True, + return_tensors="pt" + ) + input_ids, attention_mask = tokens.input_ids.cuda(), tokens.attention_mask.cuda() + + with torch.no_grad(): + text_features = t5_model( + input_ids=input_ids, + attention_mask=attention_mask + )[0] + text_features_c = laion_clap_model.get_text_embedding(caption, use_tensor=True) + + mel = mel_converter(waveforms) + dist = tod.encode(mel) + + a_mean = dist.mean.detach().cpu().transpose(1, 2) + a_std = dist.std.detach().cpu().transpose(1, 2) + text_features = text_features.detach().cpu() + text_features_c = text_features_c.detach().cpu() + mel = mel.detach().cpu() + + ids = [id for id in ids] + captions = [caption for caption in batch['caption']] + + data = { + 'id': ids, + 'caption': captions, + 'mean': a_mean, + 'std': a_std, + 'text_features': text_features, + 'text_features_c': text_features_c, + # 'mel': mel + } + + torch.save(data, latent_dir / f'r{local_rank}_{i:05d}.pth') + + distributed.barrier() + # combine the results + if local_rank == 0: + print('Extraction done. Combining the results.') + output_dir.mkdir(exist_ok=True, parents=True) + + list_of_ids_and_labels = [] + + latents = sorted(os.listdir(latent_dir)) + latents = [l for l in latents if l.endswith('.pth')] + idx = 0 + for t in tqdm(latents): + data = torch.load(latent_dir / t, weights_only=True) + bs = len(data['id']) + + for bi in range(bs): + this_id = data['id'][bi] + this_caption = data['caption'][bi] + list_of_ids_and_labels.append({'id': this_id, 'caption': this_caption}) + + out = { + 'text_features': data['text_features'][bi], + 'text_features_c': data['text_features_c'][bi], + 'mean': data['mean'][bi], + 'std': data['std'][bi], + # 'mel': data['mel'][bi] + } + out_file = f'{output_dir}/{idx}.npz' + np.savez(out_file, **out) # savez/savez_compressed + idx += 1 + + output_df = pd.DataFrame(list_of_ids_and_labels) + output_name = output_dir.stem + output_df.to_csv(output_dir.parent / f'{output_name}.tsv', sep='\t', index=False) + + print(f'Output: {len(output_df)}') + + +if __name__ == '__main__': + main() + distributed.destroy_process_group() diff --git a/training/partition_clips.py b/training/partition_clips.py new file mode 100644 index 0000000000000000000000000000000000000000..522d7d6e0b925b7f1a085f1d1220ea0d9a6016af --- /dev/null +++ b/training/partition_clips.py @@ -0,0 +1,73 @@ +import argparse +import os +from pathlib import Path + +import pandas as pd +import torchaudio +from tqdm import tqdm + +min_length_sec = 10 +max_segments_per_clip = 5 + +parser = argparse.ArgumentParser(description='Process audio clips.') +parser.add_argument('--data_dir', + type=Path, + help='Path to the directory containing audio files', + default='./training/example_audios') +parser.add_argument('--output_dir', + type=Path, + help='Path to the output tsv file', + default='./training/example_output/clips.tsv') +parser.add_argument('--start', type=int, help='Start index for processing files', default=0) +parser.add_argument('--end', type=int, help='End index for processing files', default=-1) +args = parser.parse_args() + +data_dir = args.data_dir +output_dir = args.output_dir +start = args.start +end = args.end + +output_data = [] + +blacklisted = 0 +if end == -1: + end = len(os.listdir(data_dir)) +audio_files = sorted(os.listdir(data_dir))[start:end] +print(f'Processing {len(audio_files)} files from {start} to {end}') + +jump = 0 +for audio_file in tqdm(audio_files): + audio_file_path = data_dir / audio_file + audio_name = audio_file_path.stem # file name without extension + try: + waveform, sample_rate = torchaudio.load(audio_file_path) + except Exception as e: + jump += 1 + continue + + # waveform: (1/2) * length + if waveform.shape[1] < 1/2 * sample_rate * min_length_sec: + jump += 1 + continue + + # try to partition the audio into segments, each with length of min_length_sec + segment_length = int(sample_rate * min_length_sec) + total_length = waveform.shape[1] + num_segments = min(max_segments_per_clip, max(total_length // segment_length, 1)) # at least select one segment + if num_segments > 1: + segment_interval = (total_length - segment_length) // (num_segments - 1) + else: + segment_interval = 0 + + for i in range(num_segments): + start_sample = i * segment_interval + end_sample = start_sample + segment_length # num of points before resampling + audio_id = f'{audio_name}_{i}' + output_data.append((audio_id, audio_name, start_sample, end_sample)) + +output_dir.parent.mkdir(parents=True, exist_ok=True) +print(len(output_data)) +output_df = pd.DataFrame(output_data, columns=['id', 'name', 'start_sample', 'end_sample']) +output_df.to_csv(output_dir, index=False, sep='\t') + +print(f" Jumping {jump} audio files .. ") \ No newline at end of file diff --git a/weights b/weights new file mode 160000 index 0000000000000000000000000000000000000000..b2815b92cbd7c3e862f5aa0791c64ffd09d9fbd0 --- /dev/null +++ b/weights @@ -0,0 +1 @@ +Subproject commit b2815b92cbd7c3e862f5aa0791c64ffd09d9fbd0