File size: 5,581 Bytes
0291473
 
d0713cc
0291473
4af33c5
0291473
64ba1a9
0291473
7c70572
 
0291473
 
 
 
 
 
 
24b2aa0
a475156
24b2aa0
 
0291473
a43e51c
0291473
ffa9e1a
4af33c5
d0713cc
b7ab792
ffa9e1a
 
7d49e76
 
431cf64
 
ea2e854
24b2aa0
ea2e854
 
 
 
 
91aeeb9
431cf64
24b4874
 
 
 
 
 
 
 
9b26173
5d8dc18
b488dd2
ea2e854
ffa9e1a
ea2e854
b7ab792
f4a055e
f7ae953
 
083778b
aeb80b8
3711db1
 
2342079
3711db1
9adeb7d
 
4f3a930
5a472ab
 
 
 
 
 
9adeb7d
f1004c6
 
9adeb7d
 
d90097c
9adeb7d
 
f29778b
ffa9e1a
bfed5a6
9e7e24e
b1d070d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import gradio as gr
from diffusers import AudioLDMControlNetPipeline, ControlNetModel
import os
from pretty_midi import PrettyMIDI
from tempfile import _TemporaryFileWrapper
import torch
import torchaudio

SAMPLE_RATE=16000

if torch.cuda.is_available():
    device = "cuda"
    torch_dtype = torch.float16
else:
    device = "cpu"
    torch_dtype = torch.float32

controlnet = ControlNetModel.from_pretrained(
    "lauraibnz/midi-audioldm-v2", torch_dtype=torch_dtype)
pipe = AudioLDMControlNetPipeline.from_pretrained(
    "cvssp/audioldm-m-full", controlnet=controlnet, torch_dtype=torch_dtype)
pipe = pipe.to(device)
generator = torch.Generator(device)

def predict(midi_file=None, midi_synth=None, prompt="", neg_prompt="", duration=None, seed=0, cond=1, inf=20, guidance_scale=2.5, guess=False):
    if isinstance(midi_file, _TemporaryFileWrapper):
        midi_file = midi_file.name
    midi = PrettyMIDI(midi_file)
    if not duration or duration == 0:
        duration = midi_synth[1].shape[0]/SAMPLE_RATE
    if not prompt  and not neg_prompt:
        guess_mode = True
    audio = pipe(
        prompt,
        negative_prompt=neg_prompt,
        midi=midi,
        audio_length_in_s=duration,
        num_inference_steps=inf,
        controlnet_conditioning_scale=float(cond),
        guess_mode=guess,
        generator=generator.manual_seed(int(seed)),
        guidance_scale=float(guidance_scale),
    )
    return (SAMPLE_RATE, audio.audios.T)

def synthesize(midi_file=None):
    if isinstance(midi_file, _TemporaryFileWrapper):
        midi_file = midi_file.name
    midi = PrettyMIDI(midi_file)
    midi_synth = midi.synthesize(fs=SAMPLE_RATE)
    midi_synth = midi_synth.reshape(midi_synth.shape[0], 1)
    return (SAMPLE_RATE, midi_synth)

def run_example(midi_file=None, prompt="", neg_prompt="", duration=None, seed=0, cond=1, inf=20, guidance_scale=2.5, guess=False):
    midi_synth = synthesize(midi_file)
    gen_audio = predict(midi_file, midi_synth, prompt, neg_prompt, duration, seed, cond, inf, guidance_scale, guess)
    return midi_synth, gen_audio

with gr.Blocks(title="🎹 MIDI-AudioLDM", theme=gr.themes.Base(text_size=gr.themes.sizes.text_md, font=[gr.themes.GoogleFont("Nunito Sans")])) as demo:
    gr.HTML(
        """
        <h1 align="center"; size="16">🎹 MIDI-AudioLDM </h1>
        """)
    gr.Markdown(
            """
            MIDI-AudioLDM is a MIDI-conditioned text-to-audio model based on the project [AudioLDM](https://huggingface.co/spaces/haoheliu/audioldm-text-to-audio-generation). The model has been conditioned using the ControlNet architecture and has been developed within Hugging Face’s [🧨 Diffusers](https://huggingface.co/docs/diffusers/) framework. Once trained, MIDI-AudioLDM accepts a MIDI file and a text prompt as input and returns an audio file, which is an interpretation of the MIDI based on the given text description. This enables detailed control over different musical aspects such as notes, mood and timbre.
            """)
    with gr.Column(variant='panel'):
        midi = gr.File(label="midi file", file_types=[".mid"])
        prompt = gr.Textbox(label="prompt", info="Enter a descriptive text prompt to guide the audio generation.")
        with gr.Row():
            with gr.Column():
                midi_synth = gr.Audio(label="synthesized midi")
                midi.upload(synthesize, midi, midi_synth)
            with gr.Column():
                audio = gr.Audio(label="generated audio")
        with gr.Accordion("Advanced Settings", open=False):
            duration = gr.Slider(0, 20, step=2.5, label="duration", info="Modify the duration in seconds of the output audio file. If not set it will be determined by the MIDI file.")
            inf = gr.Slider(0, 100, value=40, step=1, label="inference steps", info="Edit the number of denoising steps. A larger number usually leads to higher quality but slower results.")
            guidance_scale = gr.Slider(0, 4, value=2.5, step=0.5, label="guidance scale", info="Modify the guidance scale. The higher the value the more linked the generated audio to the text prompt, sometimes at the expense of lower quality.")
            neg_prompt = gr.Textbox(label="negative prompt", info="Optionally enter a negative text prompt not to guide the audio generation.")
            seed = gr.Number(value=48, label="random seed", info="Change the random seed for a different generation result.")
            cond = gr.Slider(0.0, 1.0, value=1.0, step=0.1, label="conditioning scale", info="Choose a value between 0 and 1. The larger the more it will take the conditioning into account. Lower values are recommended for more creative prompts.")
            guess = gr.Checkbox(label="guess mode", info="Optionally select guess mode. If so, the model will try to recognize the content of the MIDI without the need of a text prompt.")
    btn = gr.Button("Generate")
    btn.click(predict, inputs=[midi, midi_synth, prompt, neg_prompt, duration, seed, cond, inf, guidance_scale, guess], outputs=[audio])
    gr.Examples(examples=[["S00.mid", "piano", "", 10, 48, 1.0, 20, 2.5, False], ["S00.mid", "violin", "", 10, 48, 1.0, 20, 2.5, False], ["S00.mid", "woman singing, studio recording", "noise", 10, 48, 1.0, 20, 2.5, False], ["S00.mid", "jazz band, clean", "noise", 10, 48, 1.0, 20, 2.5, False], ["S00.mid", "choir", "noise, percussion", 10, 48, 1.0, 20, 2.5, False]], inputs=[midi, prompt, neg_prompt, duration, seed, cond, inf, guidance_scale, guess], fn=run_example, outputs=[midi_synth, audio], cache_examples=True)
    
demo.launch()