import argparse import glob import os.path import torch import torch.nn.functional as F import gradio as gr import onnxruntime as rt import tqdm from midi_synthesizer import synthesis import TMIDIX from math import ceil in_space = os.getenv("SYSTEM") == "spaces" #================================================================================================= def GenerateMIDI(): start_tokens = [3087, 3073+1, 3075+1] seq_len = 512 max_seq_len = 2048 temperature = 1.0 verbose=False return_prime=False progress=gr.Progress() out = torch.LongTensor([start_tokens]) st = len(start_tokens) if verbose: print("Generating sequence of max length:", seq_len) progress(0, desc="Starting...") for i in progress.tqdm(range(seq_len)): try: x = out[:, -max_seq_len:] torch_in = x.tolist()[0] logits = torch.FloatTensor(session.run(None, {'input': [torch_in]})[0])[:, -1] thres = 0.9 k = ceil((1 - thres) * logits.shape[-1]) val, ind = torch.topk(logits, k) probs = torch.full_like(logits, float('-inf')) probs.scatter_(1, ind, val) probs = F.softmax(probs / temperature, dim=-1) sample = torch.multinomial(probs, 1) out = torch.cat((out, sample), dim=-1) except Exception as e: print('Error', e) break if return_prime: melody_chords_f = out[:, :] else: melody_chords_f = out[:, st:] melody_chords_f = melody_chords_f.tolist()[0] print('=' * 70) print('Sample INTs', melody_chords_f[:12]) print('=' * 70) if len(melody_chords_f) != 0: song = melody_chords_f song_f = [] time = 0 dur = 0 vel = 0 pitch = 0 channel = 0 for ss in song: if ss > 0 and ss < 256: time += ss * 8 if ss >= 256 and ss < 1280: dur = ((ss-256) // 8) * 32 vel = (((ss-256) % 8)+1) * 15 if ss >= 1280 and ss < 2816: channel = (ss-1280) // 128 pitch = (ss-1280) % 128 song_f.append(['note', time, dur, channel, pitch, vel ]) output_signature = 'Allegro Music Transformer' output_file_name = 'Allegro-Music-Transformer-Music-Composition' track_name='Project Los Angeles' list_of_MIDI_patches=[0, 24, 32, 40, 42, 46, 56, 71, 73, 0, 53, 19, 0, 0, 0, 0] number_of_ticks_per_quarter=500 text_encoding='ISO-8859-1' output_header = [number_of_ticks_per_quarter, [['track_name', 0, bytes(output_signature, text_encoding)]]] patch_list = [['patch_change', 0, 0, list_of_MIDI_patches[0]], ['patch_change', 0, 1, list_of_MIDI_patches[1]], ['patch_change', 0, 2, list_of_MIDI_patches[2]], ['patch_change', 0, 3, list_of_MIDI_patches[3]], ['patch_change', 0, 4, list_of_MIDI_patches[4]], ['patch_change', 0, 5, list_of_MIDI_patches[5]], ['patch_change', 0, 6, list_of_MIDI_patches[6]], ['patch_change', 0, 7, list_of_MIDI_patches[7]], ['patch_change', 0, 8, list_of_MIDI_patches[8]], ['patch_change', 0, 9, list_of_MIDI_patches[9]], ['patch_change', 0, 10, list_of_MIDI_patches[10]], ['patch_change', 0, 11, list_of_MIDI_patches[11]], ['patch_change', 0, 12, list_of_MIDI_patches[12]], ['patch_change', 0, 13, list_of_MIDI_patches[13]], ['patch_change', 0, 14, list_of_MIDI_patches[14]], ['patch_change', 0, 15, list_of_MIDI_patches[15]], ['track_name', 0, bytes(track_name, text_encoding)]] output = output_header + [patch_list + song_f] midi_data = TMIDIX.score2midi(output, text_encoding) with open(f"Allegro-Music-Transformer-Music-Composition.mid", 'wb') as f: f.write(midi_data) audio = synthesis(TMIDIX.score2opus(output), 'SGM-v2.01-YamahaGrand-Guit-Bass-v2.7.sf2') yield output, "Allegro-Music-Transformer-Music-Composition.mid", (44100, audio) #================================================================================================= def cancel_run(output_midi_seq): if output_midi_seq is None: return None, None with open(f"Allegro-Music-Transformer-Music-Composition.mid", 'wb') as f: f.write(TMIDIX.score2midi(output_midi_seq)) audio = synthesis(TMIDIX.score2opus(output_midi_seq), 'SGM-v2.01-YamahaGrand-Guit-Bass-v2.7.sf2') return "Allegro-Music-Transformer-Music-Composition.mid", (44100, audio) #================================================================================================= if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--share", action="store_true", default=False, help="share gradio app") parser.add_argument("--port", type=int, default=7860, help="gradio server port") opt = parser.parse_args() print('Loading model...') session = rt.InferenceSession('Allegro_Music_Transformer_Small_Trained_Model_56000_steps_0.9399_loss_0.7374_acc.onnx', providers=['CUDAExecutionProvider', 'CPUExecutionProvider']) print('Done!') app = gr.Blocks() with app: gr.Markdown("

Allegro Music Transformer

") gr.Markdown("![Visitors](https://api.visitorbadge.io/api/visitors?path=asigalov61.Allegro-Music-Transformer&style=flat)\n\n" "Full-attention multi-instrumental music transformer featuring asymmetrical encoding with octo-velocity, and chords counters tokens, optimized for speed and performance\n\n" "Check out [Allegro Music Transformer](https://github.com/asigalov61/Allegro-Music-Transformer) on GitHub!\n\n" "[Open In Colab]" "(https://colab.research.google.com/github/asigalov61/Allegro-Music-Transformer/blob/main/Allegro_Music_Transformer_Composer.ipynb)" " for faster execution and endless generation" ) run_btn = gr.Button("generate", variant="primary") stop_btn = gr.Button("stop and output") output_midi_seq = gr.Variable() output_midi_visualizer = gr.HTML(elem_id="midi_visualizer_container") output_audio = gr.Audio(label="output audio", format="mp3", elem_id="midi_audio") output_midi = gr.File(label="output midi", file_types=[".mid"]) run_event = run_btn.click(GenerateMIDI, [], [output_midi_seq, output_midi, output_audio]) stop_btn.click(cancel_run, output_midi_seq, [output_midi, output_audio], cancels=run_event, queue=False) app.queue(2).launch(server_port=opt.port, share=opt.share, inbrowser=True)