AlekseyKorshuk's picture
Update app.py
3d0ab7b
raw
history blame
1.54 kB
import gradio as gr
import numpy as np
import pretty_midi
from accompaniment_generator.generator.base import Generator
import os
import uuid
os.system("apt install fluidsynth")
os.system("cp /usr/share/sounds/sf2/FluidR3_GM.sf2 ./font.sf2")
from midi2audio import FluidSynth
def inference(audio, num_epoch):
generator = Generator()
input_midi_data = pretty_midi.PrettyMIDI(audio.name)
output_midi_data = generator(audio.name, num_epoch=int(num_epoch))
name = uuid.uuid4()
output_midi_data.write(f'{name}.mid')
fs = FluidSynth("font.sf2")
fs.midi_to_audio(f'{name}.mid', f'{name}.wav')
fs.midi_to_audio(audio.name, f'{name}-init.wav')
return [f'{name}-init.wav', f'{name}.wav']
title = "Accompaniment Generator"
description = "Gradio demo for MIDI-DDSP: Detailed Control of Musical Performance via Hierarchical Modeling. To use it, simply upload your midi file, or click one of the examples to load them. Read more at the links below."
article = "<p style='text-align: center'>" \
"<a href='https://github.com/AlekseyKorshuk/accompaniment-generator' target='_blank'>Github Repo</a>" \
"</p>"
examples = [['barbiegirl_mono.mid', 10]]
gr.Interface(
inference,
[gr.inputs.File(type="file", label="Input"), gr.inputs.Number(label="Number of epoch", default=10)],
[gr.outputs.Audio(type="auto", label="Before"), gr.outputs.Audio(type="auto", label="After")],
title=title,
description=description,
article=article,
examples=examples,
).launch(debug=True)