import gradio as gr import os img_to_text = gr.Blocks.load(name="spaces/pharma/CLIP-Interrogator") text_to_music = gr.Interface.load("spaces/fffiloni/text-2-music") def get_prompts(uploaded_image): print(f"""————— Calling CLIP Interrogator ... """) prompt = img_to_text(uploaded_image, fn_index=1)[0] music_result = get_music(prompt) return music_result def get_music(prompt): print(f"""————— Calling now MubertAI ... ——————— """) result = text_to_music(prompt, fn_index=0) print(f"""————— NEW RESULTS prompt : {prompt} music : {result} ——————— """) return result, result css = """ #col-container {max-width: 700px; margin-left: auto; margin-right: auto;} a {text-decoration-line: underline; font-weight: 600;} """ with gr.Blocks(css=css) as demo: with gr.Column(elem_id="col-container"): gr.HTML("""

Image to Music

Sends an image in to CLIP Interrogator to generate a text prompt which is then run through Mubert text-to-music to generate music from the input image!

""") input_img = gr.Image(type="filepath", elem_id="input-img") generate = gr.Button("Generate Music from Image") music_output = gr.Audio(label="Result", type="filepath", elem_id="music-output") #with gr.Group(elem_id="share-btn-container"): # community_icon = gr.HTML(community_icon_html, visible=False) # loading_icon = gr.HTML(loading_icon_html, visible=False) # share_button = gr.Button("Share to community", elem_id="share-btn", visible=False) generate.click(get_prompts, inputs=[input_img], outputs=[music_output, output_text]) demo.queue(max_size=32, concurrency_count=20).launch()