import gradio as gr
import time
from moviepy.editor import *
from share_btn import community_icon_html, loading_icon_html, share_js
caption = gr.Blocks.load(name="spaces/SRDdev/Image-Caption")
audio_gen = gr.Blocks.load(name="spaces/haoheliu/audioldm-text-to-audio-generation")
def infer(image_input):
cap = caption(image_input, fn_index=0)
sound = audio_gen(cap, 10, 2.5, 45, 3, fn_index=0)
time.sleep(2)
FILETOCONVERT = AudioFileClip(sound)
FILETOCONVERT.write_audiofile("audio.wav")
FILETOCONVERT.close()
return gr.Textbox.update(value=cap, visible=True), "audio.wav", gr.Group.update(visible=True)
title = """
Image to Sound Effect
Convert an image to a corresponding sound effect generated through GPT2 Image Captioning & AudioLDM
"""
article = """
"""
with gr.Blocks(css="style.css") as demo:
with gr.Column(elem_id="col-container"):
gr.HTML(title)
input_img = gr.Image(type="filepath", elem_id="input-img")
caption_output = gr.Textbox(label="Caption", lines=1, visible=False, elem_id="text-caption")
sound_output = gr.Audio(label="Result", elem_id="sound-output")
generate = gr.Button("Generate SFX from Image")
with gr.Group(elem_id="share-btn-container", visible=False) as share_group:
community_icon = gr.HTML(community_icon_html)
loading_icon = gr.HTML(loading_icon_html)
share_button = gr.Button("Share to community", elem_id="share-btn")
gr.HTML(article)
generate.click(infer, inputs=[input_img], outputs=[caption_output, sound_output, share_group], api_name="i2fx")
share_button.click(None, [], [], _js=share_js)
demo.queue(max_size=32).launch(debug=True)