Spaces:
Sleeping
Sleeping
File size: 3,993 Bytes
8a68e19 fdb16fd 8a68e19 3e07b9c 8a68e19 0535bce 8a68e19 3e07b9c 8a68e19 f2d4c46 0535bce 8a68e19 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 |
import gradio as gr
from .audio_palette import AudioPalette
article = """
### Usage
- Since this space is running on a CPU, it is not possible to generate music in a reasonable time.
- To address this, we have provided a python notebook (check Files) that handles the music generation part which can be run locally (if you have GPU) or elsewhere.
- This uses fastAPI to accept api requests and ngrok to expose the server. The same ngrok link needs to be pasted in the input box. (Make sure to include the trailing `/`).
"""
def single_image_interface(model: AudioPalette):
demo = gr.Interface(
fn=model.generate_single,
inputs=[
gr.Image(
type="pil",
label="Upload an image",
show_label=True,
container=True
),
gr.Radio(
choices=["Piano", "Drums", "Guitar", "Violin", "Flute"],
label="Instrument",
show_label=True,
container=True
),
gr.Textbox(
lines=1,
placeholder="ngrok endpoint",
label="ngrok endpoint",
show_label=True,
container=True,
type="text",
visible=True
)
],
outputs=[
gr.Textbox(
lines=1,
placeholder="Prompt",
label="Generated Prompt",
show_label=True,
container=True,
type="text",
visible=False
),
gr.Textbox(
lines=1,
placeholder="Pace of the image",
label="Pace",
show_label=True,
container=True,
type="text",
visible=False
),
gr.Textbox(
lines=1,
placeholder="Caption for the image",
label="Caption",
show_label=True,
container=True,
type="text",
visible=False
),
gr.Audio(
label="Generated Audio",
show_label=True,
container=True,
visible=True,
format="wav",
autoplay=False,
show_download_button=True,
)
],
cache_examples=False,
live=False,
description="Provide an image to generate an appropriate background soundtrack",
# article=article
)
return demo
def multi_image_interface(model: AudioPalette):
demo = gr.Interface(
fn=model.generate_multiple,
inputs=[
gr.File(
file_count="multiple",
file_types=["image"],
type="filepath",
label="Upload images",
show_label=True,
container=True,
visible=True
),
gr.Radio(
choices=["Piano", "Drums", "Guitar", "Violin", "Flute"],
label="Instrument",
show_label=True,
container=True
),
gr.Textbox(
lines=1,
placeholder="ngrok endpoint",
label="ngrok endpoint",
show_label=True,
container=True,
type="text",
visible=True
)
],
outputs=[
gr.Video(
format="mp4",
label="Generated Video",
show_label=True,
container=True,
visible=True,
autoplay=False,
)
],
cache_examples=False,
live=False,
description="Provide images to generate a slideshow of the images with appropriate music as background",
# article=article
)
return demo
|