import os.path import time import datetime from pytz import timezone import torch import torch.nn.functional as F import gradio as gr import spaces from x_transformer import * import tqdm import TMIDIX from midi_to_colab_audio import midi_to_colab_audio import matplotlib.pyplot as plt in_space = os.getenv("SYSTEM") == "spaces" # ================================================================================================= @spaces.GPU def GenerateMIDI(num_tok, idrums, iinstr, input_top_k_value): print('=' * 70) print('Req start time: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now(PDT))) start_time = time.time() print('-' * 70) print('Req num tok:', num_tok) print('Req instr:', iinstr) print('Drums:', idrums) print('-' * 70) if idrums: drums = 3074 else: drums = 3073 instruments_list = ["Piano", "Guitar", "Bass", "Violin", "Cello", "Harp", "Trumpet", "Sax", "Flute", 'Drums', "Choir", "Organ"] first_note_instrument_number = instruments_list.index(iinstr) start_tokens = [3087, drums, 3075 + first_note_instrument_number] print('Selected Improv sequence:') print(start_tokens) print('-' * 70) output_signature = 'Allegro Music Transformer' output_file_name = 'Allegro-Music-Transformer-Music-Composition' track_name = 'Project Los Angeles' list_of_MIDI_patches = [0, 24, 32, 40, 42, 46, 56, 71, 73, 0, 53, 19, 0, 0, 0, 0] number_of_ticks_per_quarter = 500 text_encoding = 'ISO-8859-1' output_header = [number_of_ticks_per_quarter, [['track_name', 0, bytes(output_signature, text_encoding)]]] patch_list = [['patch_change', 0, 0, list_of_MIDI_patches[0]], ['patch_change', 0, 1, list_of_MIDI_patches[1]], ['patch_change', 0, 2, list_of_MIDI_patches[2]], ['patch_change', 0, 3, list_of_MIDI_patches[3]], ['patch_change', 0, 4, list_of_MIDI_patches[4]], ['patch_change', 0, 5, list_of_MIDI_patches[5]], ['patch_change', 0, 6, list_of_MIDI_patches[6]], ['patch_change', 0, 7, list_of_MIDI_patches[7]], ['patch_change', 0, 8, list_of_MIDI_patches[8]], ['patch_change', 0, 9, list_of_MIDI_patches[9]], ['patch_change', 0, 10, list_of_MIDI_patches[10]], ['patch_change', 0, 11, list_of_MIDI_patches[11]], ['patch_change', 0, 12, list_of_MIDI_patches[12]], ['patch_change', 0, 13, list_of_MIDI_patches[13]], ['patch_change', 0, 14, list_of_MIDI_patches[14]], ['patch_change', 0, 15, list_of_MIDI_patches[15]], ['track_name', 0, bytes(track_name, text_encoding)]] output = output_header + [patch_list] print('Loading model...') SEQ_LEN = 2048 # instantiate the model model = TransformerWrapper( num_tokens=3088, max_seq_len=SEQ_LEN, attn_layers=Decoder(dim=1024, depth=32, heads=8, attn_flash=True) ) model = AutoregressiveWrapper(model) model = torch.nn.DataParallel(model) model.cuda() print('=' * 70) print('Loading model checkpoint...') model.load_state_dict( torch.load('Allegro_Music_Transformer_Small_Trained_Model_56000_steps_0.9399_loss_0.7374_acc.pth', map_location='cuda')) print('=' * 70) model.eval() print('Done!') print('=' * 70) outy = start_tokens ctime = 0 dur = 0 vel = 90 pitch = 0 channel = 0 for i in range(max(1, min(1024, num_tok))): inp = torch.LongTensor([outy]).cuda() with torch.amp.autocast(device_type='cuda', dtype=torch.bfloat16): with torch.inference_mode(): out = model.module.generate(inp, 1, filter_logits_fn=top_k, filter_kwargs={'k': input_top_k_value}, temperature=0.9, return_prime=False, verbose=False) out0 = out[0].tolist() outy.extend(out0) ss1 = out0[0] if 0 < ss1 < 256: ctime += ss1 * 8 if 256 <= ss1 < 1280: dur = ((ss1 - 256) // 8) * 32 vel = (((ss1 - 256) % 8) + 1) * 15 if 1280 <= ss1 < 2816: channel = (ss1 - 1280) // 128 pitch = (ss1 - 1280) % 128 if channel != 9: pat = list_of_MIDI_patches[channel] else: pat = 128 event = ['note', ctime, dur, channel, pitch, vel, pat] output[-1].append(event) midi_data = TMIDIX.score2midi(output, text_encoding) with open(f"Allegro-Music-Transformer-Composition.mid", 'wb') as f: f.write(midi_data) output_plot = TMIDIX.plot_ms_SONG(output[2], plot_title='Allegro-Music-Transformer-Composition', return_plt=True) audio = midi_to_colab_audio('Allegro-Music-Transformer-Composition.mid', soundfont_path="SGM-v2.01-YamahaGrand-Guit-Bass-v2.7.sf2", sample_rate=16000, volume_scale=10, output_for_gradio=True ) print('Sample INTs', outy[:16]) print('-' * 70) print('Last generated MIDI event', output[2][-1]) print('-' * 70) print('Req end time: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now(PDT))) print('-' * 70) print('Req execution time:', (time.time() - start_time), 'sec') return output_plot, "Allegro-Music-Transformer-Composition.mid", (16000, audio) # ================================================================================================= if __name__ == "__main__": PDT = timezone('US/Pacific') print('=' * 70) print('App start time: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now(PDT))) print('=' * 70) app = gr.Blocks() with app: gr.Markdown("