import gradio as gr import os import shutil import spaces import sys # we will clone the repo and install the dependencies os.system('git lfs install') os.system('git clone https://huggingface.co/jadechoghari/qa-mdt') os.system('pip install -r qa_mdt/requirements.txt') os.system('pip install xformers==0.0.26.post1') os.system('pip install torchlibrosa==0.0.9 librosa==0.9.2') os.system('pip install -q pytorch_lightning==2.1.3 torchlibrosa==0.0.9 librosa==0.9.2 ftfy==6.1.1 braceexpand') os.system('pip install torch==2.3.0+cu121 torchvision==0.18.0+cu121 torchaudio==2.3.0 --index-url https://download.pytorch.org/whl/cu121') sys.path.append(os.path.abspath("qa_mdt")) # only then import the necessary modules from qa_mdt from qa_mdt.pipeline import MOSDiffusionPipeline pipe = MOSDiffusionPipeline() # this runs the pipeline with user input and saves the output as 'awesome.wav' @spaces.GPU() def generate_waveform(description): pipe(description) generated_file_path = "./awesome.wav" if os.path.exists(generated_file_path): return generated_file_path else: return "Error: Failed to generate the waveform." # gradio interface iface = gr.Interface( fn=generate_waveform, inputs=gr.inputs.Textbox(lines=2, placeholder="Enter a music description here..."), # Text input for description outputs=gr.outputs.File(label="Download Generated WAV file"), # File output for download title="Flux Music Diffusion Pipeline", description="Enter a music description, and the model will generate a corresponding audio waveform. Download the output as 'awesome.wav'." ) # Launch the Gradio app if __name__ == "__main__": iface.launch()