Monster-MIDI-Dataset / monster_music_transformer.py
projectlosangeles's picture
Upload 2 files
381dd05 verified
# -*- coding: utf-8 -*-
"""Monster_Music_Transformer.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1_fs1W2cuXxiMKznQIP3wtUxSIbxt71Nk
# Monster Music Transformer (ver. 1.0)
***
Powered by tegridy-tools: https://github.com/asigalov61/tegridy-tools
***
WARNING: This complete implementation is a functioning model of the Artificial Intelligence. Please excercise great humility, care, and respect. https://www.nscai.gov/
***
#### Project Los Angeles
#### Tegridy Code 2024
***
# (GPU CHECK)
"""
#@title NVIDIA GPU check
!nvidia-smi
"""# (SETUP ENVIRONMENT)"""
#@title Install dependencies
!git clone --depth 1 https://github.com/asigalov61/Monster-MIDI-Dataset
!pip install huggingface_hub
!pip install einops
!pip install torch-summary
!apt install fluidsynth #Pip does not work for some reason. Only apt works
# Commented out IPython magic to ensure Python compatibility.
#@title Import modules
print('=' * 70)
print('Loading core Monster Music Transformer modules...')
import os
import copy
import pickle
import secrets
import statistics
from time import time
import tqdm
print('=' * 70)
print('Loading main Monster Music Transformer modules...')
import torch
# %cd /content/Monster-MIDI-Dataset
import TMIDIX
from midi_to_colab_audio import midi_to_colab_audio
from x_transformer_1_27_16 import *
import random
# %cd /content/
print('=' * 70)
print('Loading aux Monster Music Transformer modules...')
import matplotlib.pyplot as plt
from torchsummary import summary
from sklearn import metrics
from IPython.display import Audio, display
from huggingface_hub import hf_hub_download
from google.colab import files
print('=' * 70)
print('Done!')
print('Enjoy! :)')
print('=' * 70)
"""# (LOAD MODEL)"""
#@title Load Monster Music Transformer Pre-Trained Model
#@markdown Choose model
select_model_to_load = "651M-32L-Fast-Large" # @param ["651M-32L-Fast-Large"]
#@markdown Model precision option
model_precision = "bfloat16" # @param ["bfloat16", "float16"]
#@markdown bfloat16 == Half precision/faster speed (if supported, otherwise the model will default to float16)
#@markdown float16 == Full precision/fast speed
plot_tokens_embeddings = "None" # @param ["None", "Start Times", "Durations Velocities", "Piano Pitches", "Drums Pitches", "Aux"]
print('=' * 70)
print('Loading Monster Music Transformer', select_model_to_load,'Pre-Trained Model...')
print('Please wait...')
print('=' * 70)
full_path_to_models_dir = "/content/Monster-MIDI-Dataset/"
if select_model_to_load == '651M-32L-Fast-Large':
model_checkpoint_file_name = 'Monster_Music_Transformer_Large_Trained_Model_22501_steps_0.3419_loss_0.9121_acc.pth'
model_path = full_path_to_models_dir+'/'+model_checkpoint_file_name
num_layers = 36
if os.path.isfile(model_path):
print('Model already exists...')
else:
hf_hub_download(repo_id='asigalov61/Monster-Music-Transformer',
filename=model_checkpoint_file_name,
local_dir='/content/Monster-MIDI-Dataset',
local_dir_use_symlinks=False)
print('=' * 70)
print('Instantiating model...')
device_type = 'cuda'
if model_precision == 'bfloat16' and torch.cuda.is_bf16_supported():
dtype = 'bfloat16'
else:
dtype = 'float16'
if model_precision == 'float16':
dtype = 'float16'
ptdtype = {'float32': torch.float32, 'bfloat16': torch.bfloat16, 'float16': torch.float16}[dtype]
ctx = torch.amp.autocast(device_type=device_type, dtype=ptdtype)
SEQ_LEN = 8192
# instantiate the model
model = TransformerWrapper(
num_tokens = 19080,
max_seq_len = SEQ_LEN,
attn_layers = Decoder(dim = 1024, depth = num_layers, heads = 32, attn_flash=True)
)
model = AutoregressiveWrapper(model, ignore_index=19079)
model.cuda()
print('=' * 70)
print('Loading model checkpoint...')
model.load_state_dict(torch.load(model_path))
print('=' * 70)
model.eval()
print('Done!')
print('=' * 70)
print('Model will use', dtype, 'precision...')
print('=' * 70)
# Model stats
print('Model summary...')
summary(model)
# Plot Token Embeddings
if plot_tokens_embeddings != 'None':
tok_emb = model.net.token_emb.emb.weight.detach().cpu().tolist()
if plot_tokens_embeddings == 'Start Times':
tok_range = [0, 256]
elif plot_tokens_embeddings == 'Durations Velocities':
tok_range = [256, 2304]
elif plot_tokens_embeddings == 'Piano Pitches':
tok_range = [2304, 2304+128]
elif plot_tokens_embeddings == 'Drums Pitches':
tok_range = [18945-128, 18945]
elif plot_tokens_embeddings == 'Aux':
tok_range = [18945, 19079]
if plot_tokens_embeddings != 'None':
tok_emb1 = []
for t in tok_emb[tok_range[0]:tok_range[1]]:
tok_emb1.append(t)
cos_sim = metrics.pairwise_distances(
tok_emb1, metric='cosine'
)
plt.figure(figsize=(7, 7))
plt.imshow(cos_sim, cmap="inferno", interpolation="nearest")
im_ratio = cos_sim.shape[0] / cos_sim.shape[1]
plt.colorbar(fraction=0.046 * im_ratio, pad=0.04)
plt.xlabel("Position")
plt.ylabel("Position")
plt.tight_layout()
plt.plot()
plt.savefig("/content/Monster-Music-Transformer-Tokens-Embeddings-Plot.png", bbox_inches="tight")
"""# (GENERATE)
# (IMPROV)
"""
#@title Standard Improv Generator
#@markdown Improv type
improv_type = "Random Freestyle" # @param ["Random Freestyle", "Freestyle without Drums", "Freestyle with Drums", "Custom"]
#@markdown Custom Improv settings
first_note_MIDI_patch_number = 0 # @param {type:"slider", min:0, max:128, step:1}
add_drums = False #@param {type:"boolean"}
#@markdown Generation settings
number_of_tokens_tp_generate = 546 # @param {type:"slider", min:30, max:8190, step:3}
number_of_batches_to_generate = 4 #@param {type:"slider", min:1, max:16, step:1}
temperature = 0.9 # @param {type:"slider", min:0.1, max:1, step:0.05}
#@markdown Other settings
render_MIDI_to_audio = True # @param {type:"boolean"}
print('=' * 70)
print('Monster Music Transformer Standard Improv Model Generator')
print('=' * 70)
if improv_type == 'Random Freestyle':
outy = [19077]
if improv_type == 'Freestyle without Drums':
outy = [19077, 18946]
if improv_type == 'Freestyle with Drums':
outy = [19077, 18947]
if improv_type == 'Custom':
if add_drums:
drumsp = 18947 # Yes
else:
drumsp = 18946 # No
outy = [19077, drumsp, 18948+first_note_MIDI_patch_number]
print('Selected Improv sequence:')
print(outy)
print('=' * 70)
torch.cuda.empty_cache()
inp = [outy] * number_of_batches_to_generate
inp = torch.LongTensor(inp).cuda()
with ctx:
out = model.generate(inp,
number_of_tokens_tp_generate,
temperature=temperature,
return_prime=True,
verbose=True)
out0 = out.tolist()
print('=' * 70)
print('Done!')
print('=' * 70)
torch.cuda.empty_cache()
#======================================================================
print('Rendering results...')
for i in range(number_of_batches_to_generate):
print('=' * 70)
print('Batch #', i)
print('=' * 70)
out1 = out0[i]
print('Sample INTs', out1[:12])
print('=' * 70)
if len(out1) != 0:
song = out1
song_f = []
time = 0
dur = 0
vel = 90
pitch = 0
channel = 0
patches = [-1] * 16
channels = [0] * 16
channels[9] = 1
for ss in song:
if 0 <= ss < 256:
time += ss * 16
if 256 <= ss < 2304:
dur = ((ss-256) // 8) * 16
vel = (((ss-256) % 8)+1) * 15
if 2304 <= ss < 18945:
patch = (ss-2304) // 129
if patch < 128:
if patch not in patches:
if 0 in channels:
cha = channels.index(0)
channels[cha] = 1
else:
cha = 15
patches[cha] = patch
channel = patches.index(patch)
else:
channel = patches.index(patch)
if patch == 128:
channel = 9
pitch = (ss-2304) % 129
song_f.append(['note', time, dur, channel, pitch, vel, patch ])
patches = [0 if x==-1 else x for x in patches]
data = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(song_f,
output_signature = 'Monster Music Transformer',
output_file_name = '/content/Monster-Music-Transformer-Music-Composition_'+str(i),
track_name='Project Los Angeles',
list_of_MIDI_patches=patches
)
print('=' * 70)
print('Displaying resulting composition...')
print('=' * 70)
fname = '/content/Monster-Music-Transformer-Music-Composition_'+str(i)
if render_MIDI_to_audio:
midi_audio = midi_to_colab_audio(fname + '.mid')
display(Audio(midi_audio, rate=16000, normalize=False))
TMIDIX.plot_ms_SONG(song_f, plot_title=fname)
"""# (CUSTOM MIDI)"""
#@title Load Seed MIDI
#@markdown Press play button to to upload your own seed MIDI or to load one of the provided sample seed MIDIs from the dropdown list below
select_seed_MIDI = "Upload your own custom MIDI" # @param ["Upload your own custom MIDI", "Monster-Music-Transformer-Piano-Seed-1", "Monster-Music-Transformer-Piano-Seed-2", "Monster-Music-Transformer-Piano-Seed-3", "Monster-Music-Transformer-Piano-Seed-4", "Monster-Music-Transformer-Piano-Seed-5", "Monster-Music-Transformer-Piano-Seed-6", "Monster-Music-Transformer-MI-Seed-1", "Monster-Music-Transformer-MI-Seed-2", "Monster-Music-Transformer-MI-Seed-3", "Monster-Music-Transformer-MI-Seed-4", "Monster-Music-Transformer-MI-Seed-5", "Monster-Music-Transformer-MI-Seed-6"]
render_MIDI_to_audio = False # @param {type:"boolean"}
print('=' * 70)
print('Monster Music Transformer Seed MIDI Loader')
print('=' * 70)
f = ''
if select_seed_MIDI != "Upload your own custom MIDI":
print('Loading seed MIDI...')
f = '/content/Monster-MIDI-Dataset/Seeds/'+select_seed_MIDI+'.mid'
else:
print('Upload your own custom MIDI...')
print('=' * 70)
uploaded_MIDI = files.upload()
if list(uploaded_MIDI.keys()):
f = list(uploaded_MIDI.keys())[0]
if f != '':
print('=' * 70)
print('File:', f)
print('=' * 70)
#=======================================================
# START PROCESSING
# Convering MIDI to ms score with MIDI.py module
score = TMIDIX.midi2single_track_ms_score(open(f, 'rb').read(), recalculate_channels=False)
# INSTRUMENTS CONVERSION CYCLE
events_matrix = []
itrack = 1
patches = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
while itrack < len(score):
for event in score[itrack]:
if event[0] == 'note' or event[0] == 'patch_change':
events_matrix.append(event)
itrack += 1
events_matrix.sort(key=lambda x: x[1])
events_matrix1 = []
for event in events_matrix:
if event[0] == 'patch_change':
patches[event[2]] = event[3]
if event[0] == 'note':
event.extend([patches[event[3]]])
if events_matrix1:
if (event[1] == events_matrix1[-1][1]):
if ([event[3], event[4]] != events_matrix1[-1][3:5]):
events_matrix1.append(event)
else:
events_matrix1.append(event)
else:
events_matrix1.append(event)
if len(events_matrix1) > 0:
if min([e[1] for e in events_matrix1]) >= 0 and min([e[2] for e in events_matrix1]) >= 0:
#=======================================================
# PRE-PROCESSING
# checking number of instruments in a composition
instruments_list_without_drums = list(set([y[3] for y in events_matrix1 if y[3] != 9]))
instruments_list = list(set([y[3] for y in events_matrix1]))
if len(events_matrix1) > 0 and len(instruments_list_without_drums) > 0:
#======================================
events_matrix2 = []
# Recalculating timings
for e in events_matrix1:
# Original timings
e[1] = int(e[1] / 16)
e[2] = int(e[2] / 16)
#===================================
# ORIGINAL COMPOSITION
#===================================
# Sorting by patch, pitch, then by start-time
events_matrix1.sort(key=lambda x: x[6])
events_matrix1.sort(key=lambda x: x[4], reverse=True)
events_matrix1.sort(key=lambda x: x[1])
#=======================================================
# FINAL PROCESSING
melody_chords = []
melody_chords2 = []
# Break between compositions / Intro seq
if 9 in instruments_list:
drums_present = 18947 # Yes
else:
drums_present = 18946 # No
if events_matrix1[0][3] != 9:
pat = events_matrix1[0][6]
else:
pat = 128
melody_chords.extend([19077, drums_present, 18948+pat, 0]) # Intro seq
#=======================================================
# MAIN PROCESSING CYCLE
#=======================================================
abs_time = 0
pbar_time = 0
pe = events_matrix1[0]
chords_counter = 1
comp_chords_len = len(list(set([y[1] for y in events_matrix1])))
for e in events_matrix1:
#=======================================================
# Timings...
# Cliping all values...
delta_time = max(0, min(255, e[1]-pe[1]))
# Durations and channels
dur = max(0, min(255, e[2]))
cha = max(0, min(15, e[3]))
# Patches
if cha == 9: # Drums patch will be == 128
pat = 128
else:
pat = e[6]
# Pitches
ptc = max(1, min(127, e[4]))
# Velocities
# Calculating octo-velocity
vel = max(8, min(127, e[5]))
velocity = round(vel / 15)-1
#=======================================================
# Outro seq
# if ((comp_chords_len - chords_counter) == 50) and (delta_time != 0):
# out_t = 18946+delta_time
# out_p = 19202+ptc
# melody_chords.extend([18945, out_t, out_p]) # outro seq
# if delta_time != 0:
# chords_counter += 1
#=======================================================
# FINAL NOTE SEQ
# Writing final note asynchronously
dur_vel = (8 * dur) + velocity
pat_ptc = (129 * pat) + ptc
if delta_time != 0:
melody_chords.extend([delta_time, dur_vel+256, pat_ptc+2304])
else:
melody_chords.extend([dur_vel+256, pat_ptc+2304])
melody_chords2.append([delta_time, dur_vel+256, pat_ptc+2304])
pe = e
#=======================================================
# melody_chords.extend([19462, 19462, 19462]) # EOS
#=======================================================
# TOTAL DICTIONARY SIZE 19462+1=19463
#=======================================================
#=======================================================
song = melody_chords
song_f = []
time = 0
dur = 0
vel = 90
pitch = 0
channel = 0
patches = [-1] * 16
channels = [0] * 16
channels[9] = 1
for ss in song:
if 0 <= ss < 256:
time += ss * 16
if 256 <= ss < 2304:
dur = ((ss-256) // 8) * 16
vel = (((ss-256) % 8)+1) * 15
if 2304 <= ss < 18945:
patch = (ss-2304) // 129
if patch < 128:
if patch not in patches:
if 0 in channels:
cha = channels.index(0)
channels[cha] = 1
else:
cha = 15
patches[cha] = patch
channel = patches.index(patch)
else:
channel = patches.index(patch)
if patch == 128:
channel = 9
pitch = (ss-2304) % 129
song_f.append(['note', time, dur, channel, pitch, vel, patch ])
patches = [0 if x==-1 else x for x in patches]
detailed_stats = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(song_f,
output_signature = 'Monster Music Transformer',
output_file_name = '/content/Monster-Music-Transformer-Seed-Composition',
track_name='Project Los Angeles',
list_of_MIDI_patches=patches
)
#=======================================================
print('=' * 70)
print('Composition stats:')
print('Composition has', len(melody_chords2), 'notes')
print('Composition has', len(melody_chords), 'tokens')
print('Composition MIDI patches:', sorted(list(set([((y-2304) // 129) for y in melody_chords if 2304 <= y < 18945]))))
print('=' * 70)
print('Displaying resulting composition...')
print('=' * 70)
fname = '/content/Monster-Music-Transformer-Seed-Composition'
if render_MIDI_to_audio:
midi_audio = midi_to_colab_audio(fname + '.mid')
display(Audio(midi_audio, rate=16000, normalize=False))
TMIDIX.plot_ms_SONG(song_f, plot_title=fname)
else:
print('=' * 70)
"""# (CONTINUATION)"""
#@title Standard Continuation
#@markdown Generation settings
try_to_generate_outro = False #@param {type:"boolean"}
number_of_prime_tokens = 7191 # @param {type:"slider", min:3, max:8190, step:3}
number_of_tokens_to_generate = 504 # @param {type:"slider", min:30, max:8190, step:3}
number_of_batches_to_generate = 4 #@param {type:"slider", min:1, max:16, step:1}
temperature = 0.9 # @param {type:"slider", min:0.1, max:1, step:0.05}
#@markdown Other settings
include_prime_tokens_in_generated_output = False #@param {type:"boolean"}
allow_model_to_stop_generation_if_needed = False #@param {type:"boolean"}
render_MIDI_to_audio = True # @param {type:"boolean"}
print('=' * 70)
print('Monster Music Transformer Standard Continuation Model Generator')
print('=' * 70)
if allow_model_to_stop_generation_if_needed:
min_stop_token = 19078
else:
min_stop_token = None
outy = melody_chords[:number_of_prime_tokens]
if try_to_generate_outro:
outy.extend([18945])
torch.cuda.empty_cache()
inp = [outy] * number_of_batches_to_generate
inp = torch.LongTensor(inp).cuda()
with ctx:
out = model.generate(inp,
number_of_tokens_to_generate,
temperature=temperature,
return_prime=include_prime_tokens_in_generated_output,
eos_token=min_stop_token,
verbose=True)
out0 = out.tolist()
torch.cuda.empty_cache()
print('=' * 70)
print('Done!')
print('=' * 70)
#======================================================================
print('Rendering results...')
for i in range(number_of_batches_to_generate):
print('=' * 70)
print('Batch #', i)
print('=' * 70)
out1 = out0[i]
print('Sample INTs', out1[:12])
print('=' * 70)
if len(out) != 0:
song = out1
song_f = []
time = 0
dur = 0
vel = 90
pitch = 0
channel = 0
patches = [-1] * 16
channels = [0] * 16
channels[9] = 1
for ss in song:
if 0 <= ss < 256:
time += ss * 16
if 256 <= ss < 2304:
dur = ((ss-256) // 8) * 16
vel = (((ss-256) % 8)+1) * 15
if 2304 <= ss < 18945:
patch = (ss-2304) // 129
if patch < 128:
if patch not in patches:
if 0 in channels:
cha = channels.index(0)
channels[cha] = 1
else:
cha = 15
patches[cha] = patch
channel = patches.index(patch)
else:
channel = patches.index(patch)
if patch == 128:
channel = 9
pitch = (ss-2304) % 129
song_f.append(['note', time, dur, channel, pitch, vel, patch ])
patches = [0 if x==-1 else x for x in patches]
detailed_stats = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(song_f,
output_signature = 'Monster Music Transformer',
output_file_name = '/content/Monster-Music-Transformer-Music-Composition_'+str(i),
track_name='Project Los Angeles',
list_of_MIDI_patches=patches
)
print('=' * 70)
print('Displaying resulting composition...')
print('=' * 70)
fname = '/content/Monster-Music-Transformer-Music-Composition_'+str(i)
if render_MIDI_to_audio:
midi_audio = midi_to_colab_audio(fname + '.mid')
display(Audio(midi_audio, rate=16000, normalize=False))
TMIDIX.plot_ms_SONG(song_f, plot_title=fname)
"""# Congrats! You did it! :)"""