AlekseyKorshuk's picture
Update app.py
6891d3e
raw history blame
No virus
1.68 kB
import gradio as gr
import numpy as np
import pretty_midi
from accompaniment_generator.generator.base import Generator
import os
import uuid
from midi2audio import FluidSynth
import time
from scipy.io import wavfile
def inference(audio, num_epoch):
generator = Generator()
input_midi_data = pretty_midi.PrettyMIDI(audio.name)
output_midi_data = generator(audio.name, num_epoch=int(num_epoch))
name = uuid.uuid4()
output_midi_data.write(f'{name}.mid')
fs = FluidSynth("font.sf2")
fs.midi_to_audio(f'{name}.mid', f'{name}.wav')
fs.midi_to_audio(audio.name, f'{name}-init.wav')
time.sleep(2)
print([
wavfile.read(f'{name}-init.wav'),
wavfile.read(f'{name}.wav'),
])
return [
wavfile.read(f'{name}-init.wav'),
wavfile.read(f'{name}.wav'),
]
title = "Accompaniment Generator"
description = "Gradio demo for MIDI-DDSP: Detailed Control of Musical Performance via Hierarchical Modeling. To use it, simply upload your midi file, or click one of the examples to load them. Read more at the links below."
article = "<p style='text-align: center'>" \
"<a href='https://github.com/AlekseyKorshuk/accompaniment-generator' target='_blank'>Github Repo</a>" \
"</p>"
examples = [['barbiegirl_mono.mid', 10]]
gr.Interface(
inference,
[gr.inputs.File(type="file", label="Input"), gr.inputs.Number(label="Number of epoch", default=10)],
[gr.outputs.Audio(type="auto", label="Before"), gr.outputs.Audio(type="auto", label="After")],
title=title,
description=description,
article=article,
examples=examples,
).launch(debug=True)