Spaces:
Runtime error
Runtime error
Create app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import torchaudio
|
| 3 |
+
from audiocraft.models import MusicGen
|
| 4 |
+
from audiocraft.data.audio import audio_write
|
| 5 |
+
|
| 6 |
+
def generate_music(description, melody_audio):
|
| 7 |
+
model = MusicGen.get_pretrained('nateraw/musicgen-songstarter-v0.2')
|
| 8 |
+
model.set_generation_params(duration=8) # generate 8 seconds.
|
| 9 |
+
|
| 10 |
+
if description:
|
| 11 |
+
descriptions = [description] * 3
|
| 12 |
+
if melody_audio:
|
| 13 |
+
melody, sr = torchaudio.load(melody_audio)
|
| 14 |
+
wav = model.generate_with_chroma(descriptions, melody[None].expand(3, -1, -1), sr)
|
| 15 |
+
else:
|
| 16 |
+
wav = model.generate(descriptions) # generates 3 samples.
|
| 17 |
+
else:
|
| 18 |
+
wav = model.generate_unconditional(4) # generates 4 unconditional audio samples
|
| 19 |
+
|
| 20 |
+
output_audios = []
|
| 21 |
+
for idx, one_wav in enumerate(wav):
|
| 22 |
+
output_path = f'output_{idx}.wav'
|
| 23 |
+
audio_write(output_path, one_wav.cpu(), model.sample_rate, strategy="loudness", loudness_compression=-14)
|
| 24 |
+
output_audios.append(output_path)
|
| 25 |
+
|
| 26 |
+
return output_audios
|
| 27 |
+
|
| 28 |
+
description = gr.Textbox(label="Description", placeholder="acoustic, guitar, melody, trap, d minor, 90 bpm")
|
| 29 |
+
melody_audio = gr.Audio(label="Melody Audio (optional)", type="filepath")
|
| 30 |
+
output_audio = gr.Audio(label="Generated Music", type="filepath")
|
| 31 |
+
|
| 32 |
+
gr.Interface(
|
| 33 |
+
fn=generate_music,
|
| 34 |
+
inputs=[description, melody_audio],
|
| 35 |
+
outputs=output_audio,
|
| 36 |
+
title="MusicGen Demo",
|
| 37 |
+
description="Generate music using the MusicGen model.",
|
| 38 |
+
examples=[
|
| 39 |
+
["acoustic, guitar, melody, trap, d minor, 90 bpm", "./assets/bach.mp3"],
|
| 40 |
+
["upbeat, electronic, synth, dance, 120 bpm", None]
|
| 41 |
+
]
|
| 42 |
+
).launch()
|