import streamlit as st import numpy as np import pretty_midi from accompaniment_generator.generator.base import Generator import os import uuid import time from midi2audio import FluidSynth from scipy.io import wavfile ABOUT_TEXT = "🤗 Accompaniment Generator - generate accompaniment part with chord using Evolutionary algorithm." CONTACT_TEXT = """ _Built by Aleksey Korshuk with love_ ❤️ [![Follow](https://img.shields.io/github/followers/AlekseyKorshuk?style=social)](https://github.com/AlekseyKorshuk) [![Follow](https://img.shields.io/twitter/follow/alekseykorshuk?style=social)](https://twitter.com/intent/follow?screen_name=alekseykorshuk) Star project repository: [![GitHub stars](https://img.shields.io/github/stars/AlekseyKorshuk/accompaniment-generator?style=social)](https://github.com/AlekseyKorshuk/accompaniment-generator) """ st.sidebar.markdown( """

""", unsafe_allow_html=True, ) st.sidebar.markdown(ABOUT_TEXT) st.sidebar.markdown(CONTACT_TEXT) def inference(audio, num_epoch, chord_duration): generator = Generator() if chord_duration == 0.0: chord_duration = None output_midi_data = generator(audio, num_epoch=int(num_epoch), chord_duration=chord_duration)[0] name = uuid.uuid4() output_midi_data.write(f'{name}.mid') fs = FluidSynth("font.sf2") fs.midi_to_audio(f'{name}.mid', f'{name}.wav') fs.midi_to_audio(audio, f'{name}-init.wav') # time.sleep(2) print([f'{name}-init.wav', f'{name}.wav']) return f'{name}-init.wav', f'{name}.wav' st.title("Accompaniment Generator") st.markdown( "App to generate accompaniment for MIDI music file with Evolutionary algorithm. Check out [project repository](https://github.com/AlekseyKorshuk/accompaniment-generator).") article = "

" \ "Github Repo" \ "

" from os import listdir from os.path import isfile, join onlyfiles = [f for f in listdir("./examples") if isfile(join("./examples", f))] model_name = st.selectbox( 'Select example MIDI file (will be used only for empty file field):', onlyfiles ) uploaded_file = st.file_uploader( 'Upload MIDI file:' ) num_epoch = st.number_input("Number of epochs:", min_value=1, max_value=1000, step=1, value=1, ) chord_duration = st.number_input("Custom chord duration is seconds (leave zero for auto-calculation):", min_value=0.0, max_value=1000.0, step=0.0001, value=0.0, format="%.4f" ) generate_image_button = st.button("Generate") if generate_image_button: input_file = f"./examples/{model_name}" if uploaded_file is not None: input_file = uploaded_file.name with open(input_file, 'wb') as f: f.write(uploaded_file.getvalue()) # print(uploaded_file.getvalue()) with st.spinner(text=f"Generating, this may take some time..."): before, after = inference(input_file, num_epoch, chord_duration) st.markdown("Before:") st.audio(before) st.markdown("After:") st.audio(after) if uploaded_file is not None: os.remove(input_file)