AlekseyKorshuk's picture
Update app.py
95addfe
raw history blame
No virus
2.03 kB
import streamlit as st
import numpy as np
import pretty_midi
from accompaniment_generator.generator.base import Generator
import os
import uuid
import time
from midi2audio import FluidSynth
from scipy.io import wavfile
def inference(audio, num_epoch):
generator = Generator()
output_midi_data = generator(audio, num_epoch=int(num_epoch))
name = uuid.uuid4()
output_midi_data.write(f'{name}.mid')
fs = FluidSynth("font.sf2")
fs.midi_to_audio(f'{name}.mid', f'{name}.wav')
fs.midi_to_audio(audio, f'{name}-init.wav')
# time.sleep(2)
print([f'{name}-init.wav', f'{name}.wav'])
return f'{name}-init.wav', f'{name}.wav'
st.title("Accompaniment Generator")
st.description = "Gradio demo for MIDI-DDSP: Detailed Control of Musical Performance via Hierarchical Modeling. To use it, simply upload your midi file, or click one of the examples to load them. Read more at the links below."
article = "<p style='text-align: center'>" \
"<a href='https://github.com/AlekseyKorshuk/accompaniment-generator' target='_blank'>Github Repo</a>" \
"</p>"
from os import listdir
from os.path import isfile, join
onlyfiles = [f for f in listdir("./examples") if isfile(join("./examples", f))]
model_name = st.selectbox(
'Select example MIDI file:',
onlyfiles
)
uploaded_file = st.file_uploader(
'Upload MIDI file:'
)
num_epoch = st.number_input("Number of epochs:",
min_value=1,
max_value=1000,
step=1,
value=10,
)
generate_image_button = st.button("Generate")
if generate_image_button:
input_file = f"./examples/{model_name}"
if uploaded_file is not None:
input_file = uploaded_file.name
with st.spinner(text=f"Generating, this may take some time..."):
before, after = inference(input_file, num_epoch)
st.markdown("Before:")
st.audio(before)
st.markdown("After:")
st.audio(after)