import argparse import glob import os.path import hashlib import time import datetime from pytz import timezone import gradio as gr import pickle import tqdm import json import TMIDIX from midi_to_colab_audio import midi_to_colab_audio import copy from collections import Counter import random import statistics import matplotlib.pyplot as plt #========================================================================================================== in_space = os.getenv("SYSTEM") == "spaces" #========================================================================================================== def render_midi(input_midi, render_options): print('=' * 70) print('Req start time: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now(PDT))) start_time = time.time() print('=' * 70) print('Loading MIDI...') fn = os.path.basename(input_midi) fn1 = fn.split('.')[0] fdata = open(input_midi, 'rb').read() input_midi_md5hash = hashlib.md5(fdata).hexdigest() print('=' * 70) print('Input MIDI file name:', fn) print('Input MIDI md5 hash', input_midi_md5hash) print('Render options:', render_options) print('=' * 70) print('Processing MIDI...Please wait...') #======================================================= # START PROCESSING raw_score = TMIDIX.midi2single_track_ms_score(fdata, recalculate_channels=False) escore = TMIDIX.advanced_score_processor(raw_score, return_score_analysis=False, return_enhanced_score_notes=True)[0] first_note_index = raw_score[1].index(escore[0][:6]) for e in escore: e[1] = int(e[1] / 16) e[2] = int(e[2] / 16) # Sorting by patch, pitch, then by start-time escore.sort(key=lambda x: x[6]) escore.sort(key=lambda x: x[4], reverse=True) escore.sort(key=lambda x: x[1]) cscore = TMIDIX.chordify_score([1000, escore]) meta_data = raw_score[1][:first_note_index] + [escore[0]] + [escore[-1]] + [raw_score[1][-1]] print('Done!') print('=' * 70) print('Input MIDI metadata:', meta_data) print('=' * 70) new_fn = fn1+'.mid' patches = [0] * 16 detailed_stats = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(escore, output_signature = 'Advanced MIDI Renderer', output_file_name = new_fn, track_name='Project Los Angeles', list_of_MIDI_patches=patches ) audio = midi_to_colab_audio(new_fn, soundfont_path=soundfonts[0], sample_rate=16000, # 44100 volume_scale=10, output_for_gradio=True ) print('Sample INTs', escore[:5]) print('=' * 70) x = [] y = [] c = [] colors = ['red', 'yellow', 'green', 'cyan', 'blue', 'pink', 'orange', 'purple', 'gray', 'white', 'gold', 'silver', 'lightgreen', 'indigo', 'maroon', 'turquoise'] for s in escore: x.append(s[1]) y.append(s[4]) c.append(colors[s[3]]) plt.close() plt.figure(figsize=(14,5)) ax=plt.axes(title=title) ax.set_facecolor('black') plt.scatter(x,y, s=10, c=c) plt.xlabel("Time in ms") plt.ylabel("MIDI Pitch") print('Req end time: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now(PDT))) print('-' * 70) print('Req execution time:', (time.time() - start_time), 'sec') yield AUX_DATA[search_match_index][0], AUX_DATA[search_match_index][1], AUX_DATA[search_match_index][2], AUX_DATA[search_match_index][3], fn+'.mid', (16000, audio), plt #========================================================================================================== if __name__ == "__main__": PDT = timezone('US/Pacific') print('=' * 70) print('App start time: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now(PDT))) print('=' * 70) parser = argparse.ArgumentParser() parser.add_argument("--share", action="store_true", default=False, help="share gradio app") parser.add_argument("--port", type=int, default=7860, help="gradio server port") opt = parser.parse_args() soundfonts = ["SGM-v2.01-YamahaGrand-Guit-Bass-v2.7.sf2", "Nice-Strings-PlusOrchestra-v1.6.sf2", "KBH-Real-Choir-V2.5.sf2"] app = gr.Blocks() with app: gr.Markdown("

Advanced MIDI Renderer

") gr.Markdown("

Transform and render any MIDI

") gr.Markdown("![Visitors](https://api.visitorbadge.io/api/visitors?path=asigalov61.Advanced-MIDI-Renderer&style=flat)\n\n" "Los Angeles MIDI Dataset Demo\n\n" "Please see [Los Angeles MIDI Dataset](https://github.com/asigalov61/Los-Angeles-MIDI-Dataset) for more information and features\n\n" "[Open In Colab]" "(https://colab.research.google.com/github/asigalov61/Los-Angeles-MIDI-Dataset/blob/main/Los_Angeles_MIDI_Dataset_Search_and_Explore.ipynb)" " for all features\n\n" ) gr.Markdown("## Upload your MIDI") input_midi = gr.File(label="Input MIDI", file_types=[".midi", ".mid", ".kar"], type="binary") gr.Markdown("## Select desired render options") render_as_is = gr.Checkbox(label="Render as-is") extract_melody = gr.Checkbox(label="Extract melody") transform = gr.Checkbox(label="Transform") submit = gr.Button() gr.Markdown("## Render results") output_midi_md5 = gr.Textbox(label="Output MIDI md5 hash") output_midi_title = gr.Textbox(label="Output MIDI title") output_midi_summary = gr.Textbox(label="Output MIDI summary") output_audio = gr.Audio(label="Output MIDI audio", format="wav", elem_id="midi_audio") output_plot = gr.Plot(label="Output MIDI score plot") output_midi_lyric = gr.Textbox(label="Output Karaoke MIDI lyric") output_midi = gr.File(label="Output MIDI file", file_types=[".mid"]) render_options = [render_as_is, extract_melody, transform] run_event = submit.click(render_midi, [input_midi, render_options], [output_midi_md5, output_midi_title, output_midi_lyric, output_midi_summary, output_midi, output_audio, output_plot]) app.queue(1).launch(server_port=opt.port, share=opt.share, inbrowser=True)