import gradio as gr from diffusers import AudioLDMControlNetPipeline, ControlNetModel from pretty_midi import PrettyMIDI import torch if torch.cuda.is_available(): device = "cuda" torch_dtype = torch.float16 else: device = "cpu" torch_dtype = torch.float32 controlnet = ControlNetModel.from_pretrained("lauraibnz/midi-audioldm", torch_dtype=torch_dtype) pipe = AudioLDMControlNetPipeline.from_pretrained("cvssp/audioldm-m-full", controlnet=controlnet, torch_dtype=torch_dtype) pipe = pipe.to(device) def predict(prompt, midi_file="test.mid", audio_length_in_s=10, num_inference_steps=20, controlnet_conditioning_scale=1.0): midi = PrettyMIDI(midi_file) audio = pipe(prompt, midi=midi, audio_length_in_s=audio_length_in_s, num_inference_steps=num_inference_steps, controlnet_conditioning_scale=controlnet_conditioning_scale) return (16000, audio.audios) demo = gr.Interface(fn=predict, inputs="text", outputs="audio") demo.launch()