File size: 1,954 Bytes
f3ecad0
47def4a
 
31eea77
47def4a
 
 
31eea77
47def4a
 
d5e3639
 
47def4a
 
 
 
bc421a1
47def4a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d5e3639
 
 
 
 
 
 
3ca1c59
47def4a
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
import gradio as gr
import torch
from spectro import wav_bytes_from_spectrogram_image
from diffusers import DiffusionPipeline

device = "cpu"
MODEL_ID = "Hyeon2/riffusion-musiccaps"
pipe = DiffusionPipeline.from_pretrained(MODEL_ID)
pipe = pipe.to(device)

def predict(prompt):
    spec = pipe(prompt).images[0].convert("L")
    print(spec)
    wav = wav_bytes_from_spectrogram_image(spec)
    with open("output.wav", "wb") as f:
        f.write(wav[0].getbuffer())
    return spec, "output.wav"

title = """
    <div style="text-align: center; max-width: 500px; margin: 0 auto;">
        <div
        style="
            display: inline-flex;
            align-items: center;
            gap: 0.8rem;
            font-size: 1.75rem;
            margin-bottom: 10px;
            line-height: 1em;
        "
        >
        <h1 style="font-weight: 600; margin-bottom: 7px;">
            Riffusion-Musiccaps real-time music generation
        </h1>
        </div>
        <p style="margin-bottom: 10px;font-size: 94%;font-weight: 100;line-height: 1.5em;">
        Describe a musical prompt, generate music by getting a spectrogram image & sound.
        </p>
    </div>
"""

with gr.Blocks(css="style.css") as demo:
    with gr.Column(elem_id="col-container"):
        gr.HTML(title)
        with gr.Row():
            with gr.Column():
                prompt_input = gr.Textbox(placeholder="A LoFi beat", label="Musical prompt", elem_id="prompt-in")
                send_btn = gr.Button(value="Get a new spectrogram!", elem_id="submit-btn")
            with gr.Column(elem_id="col-container-2"):
                spectrogram_output = gr.Image(label="Spectrogram Image Result", elem_id="img-out")
                sound_output = gr.Audio(type='filepath', label="Generated Audio", elem_id="music-out")
    send_btn.click(predict, inputs=[prompt_input], outputs=[spectrogram_output, sound_output])

demo.queue(max_size=250).launch(debug=True, ssr_mode=False)