import gradio as gr import torch from spectro import wav_bytes_from_spectrogram_image from diffusers import StableDiffusionPipeline model_id = "riffusion/riffusion-model-v1" pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16) pipe = pipe.to("cuda") def predict(prompt): spec = pipe(prompt).images[0] print(spec) wav = wav_bytes_from_spectrogram_image(spec) with open("output.wav", "wb") as f: f.write(wav[0].getbuffer()) return spec, 'output.wav' title = """

Riffusion - Stable diffusion for real-time music generation

Describe a musical prompt, generate music by getting a Riffusion spectrogram and its corresponding sound.

Do you need faster results ? You can skip the queue by duplicating this space: Duplicate Space

""" article = """

About the model: Riffusion is a latent text-to-image diffusion model capable of generating spectrogram images given any text input. These spectrograms can be converted into audio clips.
The Riffusion model was created by fine-tuning the Stable-Diffusion-v1-5 checkpoint.
The model is intended for research purposes only. Possible research areas and tasks include generation of artworks, audio, and use in creative processes, applications in educational or creative tools, research on generative models.

""" css = ''' #col-container, #col-container-2 {max-width: 510px; margin-left: auto; margin-right: auto;} a {text-decoration-line: underline; font-weight: 600;} div#record_btn > .mt-6 { margin-top: 0!important; } div#record_btn > .mt-6 button { width: 100%; height: 40px; } .footer { margin-bottom: 45px; margin-top: 10px; text-align: center; border-bottom: 1px solid #e5e5e5; } .footer>p { font-size: .8rem; display: inline-block; padding: 0 10px; transform: translateY(10px); background: white; } .dark .footer { border-color: #303030; } .dark .footer>p { background: #0b0f19; } ''' with gr.Blocks(css=css) as demo: with gr.Column(elem_id="col-container"): gr.HTML(title) prompt_input = gr.Textbox(placeholder="a cat diva singing in a New York jazz club") send_btn = gr.Button("Get a new spectrogram ! ") with gr.Column(elem_id="col-container-2"): spectrogram_output = gr.Image(label="spectrogram image result") sound_output = gr.Audio(type='filepath', label="spectrogram sound") gr.HTML(article) send_btn.click(chat_hf, inputs=[prompt_input], outputs=[spectrogram_output, sound_output]) demo.queue(max_size=250).launch(debug=True)