{ "cells": [ { "cell_type": "markdown", "source": [ "# Monster Music Transformer (ver. 1.0)\n", "\n", "***\n", "\n", "Powered by tegridy-tools: https://github.com/asigalov61/tegridy-tools\n", "\n", "***\n", "\n", "WARNING: This complete implementation is a functioning model of the Artificial Intelligence. Please excercise great humility, care, and respect. https://www.nscai.gov/\n", "\n", "***\n", "\n", "#### Project Los Angeles\n", "\n", "#### Tegridy Code 2024\n", "\n", "***" ], "metadata": { "id": "gpy3qsulqHa5" } }, { "cell_type": "markdown", "source": [ "# (GPU CHECK)" ], "metadata": { "id": "W_So4w8fqPGL" } }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "X3rABEpKCO02", "cellView": "form" }, "outputs": [], "source": [ "#@title NVIDIA GPU check\n", "!nvidia-smi" ] }, { "cell_type": "markdown", "source": [ "# (SETUP ENVIRONMENT)" ], "metadata": { "id": "C0XxnXGFqVyh" } }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "vK40g6V_BTNj", "cellView": "form" }, "outputs": [], "source": [ "#@title Install dependencies\n", "!git clone --depth 1 https://github.com/asigalov61/Monster-MIDI-Dataset\n", "!pip install huggingface_hub\n", "!pip install einops\n", "!pip install torch-summary\n", "!apt install fluidsynth #Pip does not work for some reason. Only apt works" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "DzCOZU_gBiQV", "cellView": "form" }, "outputs": [], "source": [ "#@title Import modules\n", "\n", "print('=' * 70)\n", "print('Loading core Monster Music Transformer modules...')\n", "\n", "import os\n", "import copy\n", "import pickle\n", "import secrets\n", "import statistics\n", "from time import time\n", "import tqdm\n", "\n", "print('=' * 70)\n", "print('Loading main Monster Music Transformer modules...')\n", "import torch\n", "\n", "%cd /content/Monster-MIDI-Dataset\n", "\n", "import TMIDIX\n", "\n", "from midi_to_colab_audio import midi_to_colab_audio\n", "\n", "from x_transformer_1_27_16 import *\n", "\n", "import random\n", "\n", "%cd /content/\n", "print('=' * 70)\n", "print('Loading aux Monster Music Transformer modules...')\n", "\n", "import matplotlib.pyplot as plt\n", "\n", "from torchsummary import summary\n", "from sklearn import metrics\n", "\n", "from IPython.display import Audio, display\n", "\n", "from huggingface_hub import hf_hub_download\n", "\n", "from google.colab import files\n", "\n", "print('=' * 70)\n", "print('Done!')\n", "print('Enjoy! :)')\n", "print('=' * 70)" ] }, { "cell_type": "markdown", "metadata": { "id": "eI3aQtHzqSnp" }, "source": [ "# (LOAD MODEL)" ] }, { "cell_type": "code", "source": [ "#@title Load Monster Music Transformer Pre-Trained Model\n", "\n", "#@markdown Choose model\n", "\n", "select_model_to_load = \"651M-32L-Fast-Large\" # @param [\"651M-32L-Fast-Large\"]\n", "\n", "#@markdown Model precision option\n", "\n", "model_precision = \"bfloat16\" # @param [\"bfloat16\", \"float16\"]\n", "\n", "#@markdown bfloat16 == Half precision/faster speed (if supported, otherwise the model will default to float16)\n", "\n", "#@markdown float16 == Full precision/fast speed\n", "\n", "plot_tokens_embeddings = \"None\" # @param [\"None\", \"Start Times\", \"Durations Velocities\", \"Piano Pitches\", \"Drums Pitches\", \"Aux\"]\n", "\n", "print('=' * 70)\n", "print('Loading Monster Music Transformer', select_model_to_load,'Pre-Trained Model...')\n", "print('Please wait...')\n", "print('=' * 70)\n", "\n", "full_path_to_models_dir = \"/content/Monster-MIDI-Dataset/\"\n", "\n", "if select_model_to_load == '651M-32L-Fast-Large':\n", "\n", " model_checkpoint_file_name = 'Monster_Music_Transformer_Large_Trained_Model_22501_steps_0.3419_loss_0.9121_acc.pth'\n", " model_path = full_path_to_models_dir+'/'+model_checkpoint_file_name\n", " num_layers = 36\n", " if os.path.isfile(model_path):\n", " print('Model already exists...')\n", "\n", " else:\n", " hf_hub_download(repo_id='asigalov61/Monster-Music-Transformer',\n", " filename=model_checkpoint_file_name,\n", " local_dir='/content/Monster-MIDI-Dataset',\n", " local_dir_use_symlinks=False)\n", "\n", "print('=' * 70)\n", "print('Instantiating model...')\n", "\n", "device_type = 'cuda'\n", "\n", "if model_precision == 'bfloat16' and torch.cuda.is_bf16_supported():\n", " dtype = 'bfloat16'\n", "else:\n", " dtype = 'float16'\n", "\n", "if model_precision == 'float16':\n", " dtype = 'float16'\n", "\n", "ptdtype = {'float32': torch.float32, 'bfloat16': torch.bfloat16, 'float16': torch.float16}[dtype]\n", "ctx = torch.amp.autocast(device_type=device_type, dtype=ptdtype)\n", "\n", "SEQ_LEN = 8192\n", "\n", "# instantiate the model\n", "\n", "model = TransformerWrapper(\n", " num_tokens = 19080,\n", " max_seq_len = SEQ_LEN,\n", " attn_layers = Decoder(dim = 1024, depth = num_layers, heads = 32, attn_flash=True)\n", ")\n", "\n", "model = AutoregressiveWrapper(model, ignore_index=19079)\n", "\n", "model.cuda()\n", "print('=' * 70)\n", "\n", "print('Loading model checkpoint...')\n", "\n", "model.load_state_dict(torch.load(model_path))\n", "print('=' * 70)\n", "\n", "model.eval()\n", "\n", "print('Done!')\n", "print('=' * 70)\n", "\n", "print('Model will use', dtype, 'precision...')\n", "print('=' * 70)\n", "\n", "# Model stats\n", "print('Model summary...')\n", "summary(model)\n", "\n", "# Plot Token Embeddings\n", "if plot_tokens_embeddings != 'None':\n", " tok_emb = model.net.token_emb.emb.weight.detach().cpu().tolist()\n", "\n", "if plot_tokens_embeddings == 'Start Times':\n", " tok_range = [0, 256]\n", "\n", "elif plot_tokens_embeddings == 'Durations Velocities':\n", " tok_range = [256, 2304]\n", "\n", "elif plot_tokens_embeddings == 'Piano Pitches':\n", " tok_range = [2304, 2304+128]\n", "\n", "elif plot_tokens_embeddings == 'Drums Pitches':\n", " tok_range = [18945-128, 18945]\n", "\n", "elif plot_tokens_embeddings == 'Aux':\n", " tok_range = [18945, 19079]\n", "\n", "if plot_tokens_embeddings != 'None':\n", "\n", " tok_emb1 = []\n", "\n", " for t in tok_emb[tok_range[0]:tok_range[1]]:\n", " tok_emb1.append(t)\n", "\n", " cos_sim = metrics.pairwise_distances(\n", " tok_emb1, metric='cosine'\n", " )\n", " plt.figure(figsize=(7, 7))\n", " plt.imshow(cos_sim, cmap=\"inferno\", interpolation=\"nearest\")\n", " im_ratio = cos_sim.shape[0] / cos_sim.shape[1]\n", " plt.colorbar(fraction=0.046 * im_ratio, pad=0.04)\n", " plt.xlabel(\"Position\")\n", " plt.ylabel(\"Position\")\n", " plt.tight_layout()\n", " plt.plot()\n", " plt.savefig(\"/content/Monster-Music-Transformer-Tokens-Embeddings-Plot.png\", bbox_inches=\"tight\")" ], "metadata": { "id": "V4s_G8yUL0cH", "cellView": "form" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "source": [ "# (GENERATE)" ], "metadata": { "id": "7xNyANjZsCOi" } }, { "cell_type": "markdown", "source": [ "# (IMPROV)" ], "metadata": { "id": "BxepTeHVmmKO" } }, { "cell_type": "code", "source": [ "#@title Standard Improv Generator\n", "\n", "#@markdown Improv type\n", "\n", "improv_type = \"Random Freestyle\" # @param [\"Random Freestyle\", \"Freestyle without Drums\", \"Freestyle with Drums\", \"Custom\"]\n", "\n", "#@markdown Custom Improv settings\n", "\n", "first_note_MIDI_patch_number = 0 # @param {type:\"slider\", min:0, max:128, step:1}\n", "add_drums = False #@param {type:\"boolean\"}\n", "\n", "#@markdown Generation settings\n", "\n", "number_of_tokens_tp_generate = 546 # @param {type:\"slider\", min:30, max:8190, step:3}\n", "number_of_batches_to_generate = 4 #@param {type:\"slider\", min:1, max:16, step:1}\n", "temperature = 0.9 # @param {type:\"slider\", min:0.1, max:1, step:0.05}\n", "\n", "#@markdown Other settings\n", "\n", "render_MIDI_to_audio = True # @param {type:\"boolean\"}\n", "\n", "print('=' * 70)\n", "print('Monster Music Transformer Standard Improv Model Generator')\n", "print('=' * 70)\n", "\n", "if improv_type == 'Random Freestyle':\n", "\n", " outy = [19077]\n", "\n", "if improv_type == 'Freestyle without Drums':\n", "\n", " outy = [19077, 18946]\n", "\n", "if improv_type == 'Freestyle with Drums':\n", "\n", " outy = [19077, 18947]\n", "\n", "if improv_type == 'Custom':\n", "\n", " if add_drums:\n", " drumsp = 18947 # Yes\n", " else:\n", " drumsp = 18946 # No\n", "\n", " outy = [19077, drumsp, 18948+first_note_MIDI_patch_number]\n", "\n", "print('Selected Improv sequence:')\n", "print(outy)\n", "print('=' * 70)\n", "\n", "torch.cuda.empty_cache()\n", "\n", "inp = [outy] * number_of_batches_to_generate\n", "\n", "inp = torch.LongTensor(inp).cuda()\n", "\n", "with ctx:\n", " out = model.generate(inp,\n", " number_of_tokens_tp_generate,\n", " temperature=temperature,\n", " return_prime=True,\n", " verbose=True)\n", "\n", "out0 = out.tolist()\n", "\n", "print('=' * 70)\n", "print('Done!')\n", "print('=' * 70)\n", "\n", "torch.cuda.empty_cache()\n", "\n", "#======================================================================\n", "\n", "print('Rendering results...')\n", "\n", "for i in range(number_of_batches_to_generate):\n", "\n", " print('=' * 70)\n", " print('Batch #', i)\n", " print('=' * 70)\n", "\n", " out1 = out0[i]\n", "\n", " print('Sample INTs', out1[:12])\n", " print('=' * 70)\n", "\n", " if len(out1) != 0:\n", "\n", " song = out1\n", " song_f = []\n", "\n", " time = 0\n", " dur = 0\n", " vel = 90\n", " pitch = 0\n", " channel = 0\n", "\n", " patches = [-1] * 16\n", "\n", " channels = [0] * 16\n", " channels[9] = 1\n", "\n", " for ss in song:\n", "\n", " if 0 <= ss < 256:\n", "\n", " time += ss * 16\n", "\n", " if 256 <= ss < 2304:\n", "\n", " dur = ((ss-256) // 8) * 16\n", " vel = (((ss-256) % 8)+1) * 15\n", "\n", " if 2304 <= ss < 18945:\n", "\n", " patch = (ss-2304) // 129\n", "\n", " if patch < 128:\n", "\n", " if patch not in patches:\n", " if 0 in channels:\n", " cha = channels.index(0)\n", " channels[cha] = 1\n", " else:\n", " cha = 15\n", "\n", " patches[cha] = patch\n", " channel = patches.index(patch)\n", " else:\n", " channel = patches.index(patch)\n", "\n", " if patch == 128:\n", " channel = 9\n", "\n", " pitch = (ss-2304) % 129\n", "\n", " song_f.append(['note', time, dur, channel, pitch, vel, patch ])\n", "\n", " patches = [0 if x==-1 else x for x in patches]\n", "\n", " data = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(song_f,\n", " output_signature = 'Monster Music Transformer',\n", " output_file_name = '/content/Monster-Music-Transformer-Music-Composition_'+str(i),\n", " track_name='Project Los Angeles',\n", " list_of_MIDI_patches=patches\n", " )\n", "\n", "\n", " print('=' * 70)\n", " print('Displaying resulting composition...')\n", " print('=' * 70)\n", "\n", " fname = '/content/Monster-Music-Transformer-Music-Composition_'+str(i)\n", "\n", " if render_MIDI_to_audio:\n", " midi_audio = midi_to_colab_audio(fname + '.mid')\n", " display(Audio(midi_audio, rate=16000, normalize=False))\n", "\n", " TMIDIX.plot_ms_SONG(song_f, plot_title=fname)" ], "metadata": { "cellView": "form", "id": "Jwxz-eaF0K1y" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "source": [ "# (CUSTOM MIDI)" ], "metadata": { "id": "Gt03VtO6uKkb" } }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "4QXbFLsKqSnt", "cellView": "form" }, "outputs": [], "source": [ "#@title Load Seed MIDI\n", "\n", "#@markdown Press play button to to upload your own seed MIDI or to load one of the provided sample seed MIDIs from the dropdown list below\n", "\n", "select_seed_MIDI = \"Upload your own custom MIDI\" # @param [\"Upload your own custom MIDI\", \"Monster-Music-Transformer-Piano-Seed-1\", \"Monster-Music-Transformer-Piano-Seed-2\", \"Monster-Music-Transformer-Piano-Seed-3\", \"Monster-Music-Transformer-Piano-Seed-4\", \"Monster-Music-Transformer-Piano-Seed-5\", \"Monster-Music-Transformer-Piano-Seed-6\", \"Monster-Music-Transformer-MI-Seed-1\", \"Monster-Music-Transformer-MI-Seed-2\", \"Monster-Music-Transformer-MI-Seed-3\", \"Monster-Music-Transformer-MI-Seed-4\", \"Monster-Music-Transformer-MI-Seed-5\", \"Monster-Music-Transformer-MI-Seed-6\"]\n", "render_MIDI_to_audio = False # @param {type:\"boolean\"}\n", "\n", "print('=' * 70)\n", "print('Monster Music Transformer Seed MIDI Loader')\n", "print('=' * 70)\n", "\n", "f = ''\n", "\n", "if select_seed_MIDI != \"Upload your own custom MIDI\":\n", " print('Loading seed MIDI...')\n", " f = '/content/Monster-MIDI-Dataset/Seeds/'+select_seed_MIDI+'.mid'\n", "\n", "else:\n", " print('Upload your own custom MIDI...')\n", " print('=' * 70)\n", " uploaded_MIDI = files.upload()\n", " if list(uploaded_MIDI.keys()):\n", " f = list(uploaded_MIDI.keys())[0]\n", "\n", "if f != '':\n", "\n", " print('=' * 70)\n", " print('File:', f)\n", " print('=' * 70)\n", "\n", " #=======================================================\n", " # START PROCESSING\n", "\n", " # Convering MIDI to ms score with MIDI.py module\n", " score = TMIDIX.midi2single_track_ms_score(open(f, 'rb').read(), recalculate_channels=False)\n", "\n", " # INSTRUMENTS CONVERSION CYCLE\n", " events_matrix = []\n", " itrack = 1\n", " patches = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n", "\n", " while itrack < len(score):\n", " for event in score[itrack]:\n", " if event[0] == 'note' or event[0] == 'patch_change':\n", " events_matrix.append(event)\n", " itrack += 1\n", "\n", " events_matrix.sort(key=lambda x: x[1])\n", "\n", " events_matrix1 = []\n", "\n", " for event in events_matrix:\n", " if event[0] == 'patch_change':\n", " patches[event[2]] = event[3]\n", "\n", " if event[0] == 'note':\n", " event.extend([patches[event[3]]])\n", "\n", " if events_matrix1:\n", " if (event[1] == events_matrix1[-1][1]):\n", " if ([event[3], event[4]] != events_matrix1[-1][3:5]):\n", " events_matrix1.append(event)\n", " else:\n", " events_matrix1.append(event)\n", "\n", " else:\n", " events_matrix1.append(event)\n", "\n", " if len(events_matrix1) > 0:\n", " if min([e[1] for e in events_matrix1]) >= 0 and min([e[2] for e in events_matrix1]) >= 0:\n", "\n", " #=======================================================\n", " # PRE-PROCESSING\n", "\n", " # checking number of instruments in a composition\n", " instruments_list_without_drums = list(set([y[3] for y in events_matrix1 if y[3] != 9]))\n", " instruments_list = list(set([y[3] for y in events_matrix1]))\n", "\n", " if len(events_matrix1) > 0 and len(instruments_list_without_drums) > 0:\n", "\n", " #======================================\n", "\n", " events_matrix2 = []\n", "\n", " # Recalculating timings\n", " for e in events_matrix1:\n", "\n", " # Original timings\n", " e[1] = int(e[1] / 16)\n", " e[2] = int(e[2] / 16)\n", "\n", " #===================================\n", " # ORIGINAL COMPOSITION\n", " #===================================\n", "\n", " # Sorting by patch, pitch, then by start-time\n", "\n", " events_matrix1.sort(key=lambda x: x[6])\n", " events_matrix1.sort(key=lambda x: x[4], reverse=True)\n", " events_matrix1.sort(key=lambda x: x[1])\n", "\n", " #=======================================================\n", " # FINAL PROCESSING\n", "\n", " melody_chords = []\n", " melody_chords2 = []\n", "\n", " # Break between compositions / Intro seq\n", "\n", " if 9 in instruments_list:\n", " drums_present = 18947 # Yes\n", " else:\n", " drums_present = 18946 # No\n", "\n", " if events_matrix1[0][3] != 9:\n", " pat = events_matrix1[0][6]\n", " else:\n", " pat = 128\n", "\n", " melody_chords.extend([19077, drums_present, 18948+pat, 0]) # Intro seq\n", "\n", " #=======================================================\n", " # MAIN PROCESSING CYCLE\n", " #=======================================================\n", "\n", " abs_time = 0\n", "\n", " pbar_time = 0\n", "\n", " pe = events_matrix1[0]\n", "\n", " chords_counter = 1\n", "\n", " comp_chords_len = len(list(set([y[1] for y in events_matrix1])))\n", "\n", " for e in events_matrix1:\n", "\n", " #=======================================================\n", " # Timings...\n", "\n", " # Cliping all values...\n", " delta_time = max(0, min(255, e[1]-pe[1]))\n", "\n", " # Durations and channels\n", "\n", " dur = max(0, min(255, e[2]))\n", " cha = max(0, min(15, e[3]))\n", "\n", " # Patches\n", " if cha == 9: # Drums patch will be == 128\n", " pat = 128\n", "\n", " else:\n", " pat = e[6]\n", "\n", " # Pitches\n", "\n", " ptc = max(1, min(127, e[4]))\n", "\n", " # Velocities\n", "\n", " # Calculating octo-velocity\n", " vel = max(8, min(127, e[5]))\n", " velocity = round(vel / 15)-1\n", "\n", " #=======================================================\n", " # Outro seq\n", "\n", " # if ((comp_chords_len - chords_counter) == 50) and (delta_time != 0):\n", " # out_t = 18946+delta_time\n", " # out_p = 19202+ptc\n", " # melody_chords.extend([18945, out_t, out_p]) # outro seq\n", "\n", "\n", " # if delta_time != 0:\n", " # chords_counter += 1\n", "\n", " #=======================================================\n", " # FINAL NOTE SEQ\n", "\n", " # Writing final note asynchronously\n", "\n", " dur_vel = (8 * dur) + velocity\n", " pat_ptc = (129 * pat) + ptc\n", "\n", " if delta_time != 0:\n", " melody_chords.extend([delta_time, dur_vel+256, pat_ptc+2304])\n", " else:\n", " melody_chords.extend([dur_vel+256, pat_ptc+2304])\n", " melody_chords2.append([delta_time, dur_vel+256, pat_ptc+2304])\n", "\n", " pe = e\n", "\n", " #=======================================================\n", "\n", " # melody_chords.extend([19462, 19462, 19462]) # EOS\n", "\n", " #=======================================================\n", "\n", " # TOTAL DICTIONARY SIZE 19462+1=19463\n", " #=======================================================\n", "\n", " #=======================================================\n", "\n", " song = melody_chords\n", "\n", " song_f = []\n", "\n", " time = 0\n", " dur = 0\n", " vel = 90\n", " pitch = 0\n", " channel = 0\n", "\n", " patches = [-1] * 16\n", "\n", " channels = [0] * 16\n", " channels[9] = 1\n", "\n", " for ss in song:\n", "\n", " if 0 <= ss < 256:\n", "\n", " time += ss * 16\n", "\n", " if 256 <= ss < 2304:\n", "\n", " dur = ((ss-256) // 8) * 16\n", " vel = (((ss-256) % 8)+1) * 15\n", "\n", " if 2304 <= ss < 18945:\n", "\n", " patch = (ss-2304) // 129\n", "\n", " if patch < 128:\n", "\n", " if patch not in patches:\n", " if 0 in channels:\n", " cha = channels.index(0)\n", " channels[cha] = 1\n", " else:\n", " cha = 15\n", "\n", " patches[cha] = patch\n", " channel = patches.index(patch)\n", " else:\n", " channel = patches.index(patch)\n", "\n", " if patch == 128:\n", " channel = 9\n", "\n", " pitch = (ss-2304) % 129\n", "\n", " song_f.append(['note', time, dur, channel, pitch, vel, patch ])\n", "\n", " patches = [0 if x==-1 else x for x in patches]\n", "\n", " detailed_stats = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(song_f,\n", " output_signature = 'Monster Music Transformer',\n", " output_file_name = '/content/Monster-Music-Transformer-Seed-Composition',\n", " track_name='Project Los Angeles',\n", " list_of_MIDI_patches=patches\n", " )\n", "\n", " #=======================================================\n", "\n", " print('=' * 70)\n", " print('Composition stats:')\n", " print('Composition has', len(melody_chords2), 'notes')\n", " print('Composition has', len(melody_chords), 'tokens')\n", " print('Composition MIDI patches:', sorted(list(set([((y-2304) // 129) for y in melody_chords if 2304 <= y < 18945]))))\n", " print('=' * 70)\n", "\n", " print('Displaying resulting composition...')\n", " print('=' * 70)\n", "\n", " fname = '/content/Monster-Music-Transformer-Seed-Composition'\n", "\n", " if render_MIDI_to_audio:\n", " midi_audio = midi_to_colab_audio(fname + '.mid')\n", " display(Audio(midi_audio, rate=16000, normalize=False))\n", "\n", " TMIDIX.plot_ms_SONG(song_f, plot_title=fname)\n", "\n", "else:\n", " print('=' * 70)" ] }, { "cell_type": "markdown", "source": [ "# (CONTINUATION)" ], "metadata": { "id": "fmm3KjOtoVp9" } }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "dkvXYwR_qSnx", "cellView": "form" }, "outputs": [], "source": [ "#@title Standard Continuation\n", "\n", "#@markdown Generation settings\n", "\n", "try_to_generate_outro = False #@param {type:\"boolean\"}\n", "number_of_prime_tokens = 7191 # @param {type:\"slider\", min:3, max:8190, step:3}\n", "number_of_tokens_to_generate = 504 # @param {type:\"slider\", min:30, max:8190, step:3}\n", "number_of_batches_to_generate = 4 #@param {type:\"slider\", min:1, max:16, step:1}\n", "temperature = 0.9 # @param {type:\"slider\", min:0.1, max:1, step:0.05}\n", "\n", "#@markdown Other settings\n", "include_prime_tokens_in_generated_output = False #@param {type:\"boolean\"}\n", "allow_model_to_stop_generation_if_needed = False #@param {type:\"boolean\"}\n", "render_MIDI_to_audio = True # @param {type:\"boolean\"}\n", "\n", "print('=' * 70)\n", "print('Monster Music Transformer Standard Continuation Model Generator')\n", "print('=' * 70)\n", "\n", "if allow_model_to_stop_generation_if_needed:\n", " min_stop_token = 19078\n", "else:\n", " min_stop_token = None\n", "\n", "outy = melody_chords[:number_of_prime_tokens]\n", "\n", "if try_to_generate_outro:\n", " outy.extend([18945])\n", "\n", "torch.cuda.empty_cache()\n", "\n", "inp = [outy] * number_of_batches_to_generate\n", "\n", "inp = torch.LongTensor(inp).cuda()\n", "\n", "with ctx:\n", " out = model.generate(inp,\n", " number_of_tokens_to_generate,\n", " temperature=temperature,\n", " return_prime=include_prime_tokens_in_generated_output,\n", " eos_token=min_stop_token,\n", " verbose=True)\n", "\n", "out0 = out.tolist()\n", "\n", "torch.cuda.empty_cache()\n", "\n", "print('=' * 70)\n", "print('Done!')\n", "print('=' * 70)\n", "\n", "#======================================================================\n", "print('Rendering results...')\n", "\n", "for i in range(number_of_batches_to_generate):\n", "\n", " print('=' * 70)\n", " print('Batch #', i)\n", " print('=' * 70)\n", "\n", " out1 = out0[i]\n", "\n", " print('Sample INTs', out1[:12])\n", " print('=' * 70)\n", "\n", " if len(out) != 0:\n", "\n", " song = out1\n", " song_f = []\n", "\n", " time = 0\n", " dur = 0\n", " vel = 90\n", " pitch = 0\n", " channel = 0\n", "\n", " patches = [-1] * 16\n", "\n", " channels = [0] * 16\n", " channels[9] = 1\n", "\n", " for ss in song:\n", "\n", " if 0 <= ss < 256:\n", "\n", " time += ss * 16\n", "\n", " if 256 <= ss < 2304:\n", "\n", " dur = ((ss-256) // 8) * 16\n", " vel = (((ss-256) % 8)+1) * 15\n", "\n", " if 2304 <= ss < 18945:\n", "\n", " patch = (ss-2304) // 129\n", "\n", " if patch < 128:\n", "\n", " if patch not in patches:\n", " if 0 in channels:\n", " cha = channels.index(0)\n", " channels[cha] = 1\n", " else:\n", " cha = 15\n", "\n", " patches[cha] = patch\n", " channel = patches.index(patch)\n", " else:\n", " channel = patches.index(patch)\n", "\n", " if patch == 128:\n", " channel = 9\n", "\n", " pitch = (ss-2304) % 129\n", "\n", " song_f.append(['note', time, dur, channel, pitch, vel, patch ])\n", "\n", " patches = [0 if x==-1 else x for x in patches]\n", "\n", " detailed_stats = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(song_f,\n", " output_signature = 'Monster Music Transformer',\n", " output_file_name = '/content/Monster-Music-Transformer-Music-Composition_'+str(i),\n", " track_name='Project Los Angeles',\n", " list_of_MIDI_patches=patches\n", " )\n", " print('=' * 70)\n", " print('Displaying resulting composition...')\n", " print('=' * 70)\n", "\n", " fname = '/content/Monster-Music-Transformer-Music-Composition_'+str(i)\n", "\n", " if render_MIDI_to_audio:\n", " midi_audio = midi_to_colab_audio(fname + '.mid')\n", " display(Audio(midi_audio, rate=16000, normalize=False))\n", "\n", " TMIDIX.plot_ms_SONG(song_f, plot_title=fname)" ] }, { "cell_type": "markdown", "source": [ "# Congrats! You did it! :)" ], "metadata": { "id": "eoWDEy6CwDr6" } } ], "metadata": { "accelerator": "GPU", "colab": { "private_outputs": true, "provenance": [], "gpuType": "A100", "gpuClass": "premium", "machine_shape": "hm" }, "kernelspec": { "display_name": "Python 3", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.13" } }, "nbformat": 4, "nbformat_minor": 0 }