import gradio as gr import os img_to_text = gr.Blocks.load(name="spaces/pharma/CLIP-Interrogator") text_to_music = gr.Interface.load("spaces/fffiloni/text-2-music") def get_prompts(uploaded_image): print(f"""————— Calling CLIP Interrogator ... """) prompt = img_to_text(uploaded_image, fn_index=1)[0] music_result = get_music(prompt) return music_result def get_music(prompt): print(f"""————— Calling now MubertAI ... ——————— """) result = text_to_music(prompt, fn_index=0) print(f"""————— NEW RESULTS prompt : {prompt} music : {result} ——————— """) return result, result css = """ #col-container {max-width: 700px; margin-left: auto; margin-right: auto;} a {text-decoration-line: underline; font-weight: 600;} """ with gr.Blocks(css=css) as demo: with gr.Column(elem_id="col-container"): gr.HTML("""

Image to Music

Sends an image in to CLIP Interrogator to generate a text prompt which is then run through Mubert text-to-music to generate music from the input image!

""") with gr.Column(): input_img = gr.Image(type="filepath", elem_id="input-img") generate = gr.Button("Generate Music from Image") with gr.Column(): music_output = gr.Audio(label="Result", type="filepath") output_text = gr.Textbox(label="Output", elem_id="output-txt", visible=False) generate.click(get_prompts, inputs=[input_img], outputs=[music_output, output_text]) demo.queue(max_size=32, concurrency_count=20).launch()