{ "cells": [ { "cell_type": "markdown", "metadata": { "gradient": { "editing": false, "id": "ac5a4cf0-d9d2-47b5-9633-b53f8d99a4d2", "kernelId": "" }, "id": "SiTIpPjArIyr" }, "source": [ "# Master MIDI Dataset GPU Search and Filter (ver. 7.0)\n", "\n", "***\n", "\n", "Powered by tegridy-tools: https://github.com/asigalov61/tegridy-tools\n", "\n", "***\n", "\n", "#### Project Los Angeles\n", "\n", "#### Tegridy Code 2024\n", "\n", "***" ] }, { "cell_type": "markdown", "source": [ "# (GPU CHECK)" ], "metadata": { "id": "0rMwKVc9FFRw" } }, { "cell_type": "code", "source": [ "# @title NVIDIA GPU Check\n", "!nvidia-smi" ], "metadata": { "cellView": "form", "id": "dVSaUaEZFIip" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "source": [ "# (SETUP ENVIRONMENT)" ], "metadata": { "id": "YRTt3Hx0FQeu" } }, { "cell_type": "code", "execution_count": null, "metadata": { "cellView": "form", "gradient": { "editing": false, "id": "a1a45a91-d909-4fd4-b67a-5e16b971d179", "kernelId": "" }, "id": "fX12Yquyuihc" }, "outputs": [], "source": [ "#@title Install all dependencies (run only once per session)\n", "\n", "!git clone --depth 1 https://github.com/asigalov61/Los-Angeles-MIDI-Dataset\n", "!pip install huggingface_hub" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "gradient": { "editing": false, "id": "b8207b76-9514-4c07-95db-95a4742e52c5", "kernelId": "" }, "id": "z7n9vnKmug1J", "cellView": "form" }, "outputs": [], "source": [ "#@title Import all needed modules\n", "\n", "print('Loading core modules... Please wait...')\n", "\n", "import os\n", "import copy\n", "from collections import Counter\n", "import random\n", "import pickle\n", "from tqdm import tqdm\n", "import pprint\n", "import statistics\n", "import shutil\n", "\n", "import locale\n", "locale.getpreferredencoding = lambda: \"UTF-8\"\n", "\n", "import cupy as cp\n", "\n", "from huggingface_hub import hf_hub_download\n", "\n", "from google.colab import files\n", "\n", "print('Loading TMIDIX module...')\n", "os.chdir('/content/Los-Angeles-MIDI-Dataset')\n", "\n", "import TMIDIX\n", "\n", "os.chdir('/content/')\n", "\n", "print('Creating IO dirs... Please wait...')\n", "\n", "if not os.path.exists('/content/Master-MIDI-Dataset'):\n", " os.makedirs('/content/Master-MIDI-Dataset')\n", "\n", "if not os.path.exists('/content/Master-MIDI-Dataset'):\n", " os.makedirs('/content/Master-MIDI-Dataset')\n", "\n", "if not os.path.exists('/content/Output-MIDI-Dataset'):\n", " os.makedirs('/content/Output-MIDI-Dataset')\n", "\n", "print('Done!')\n", "print('Enjoy! :)')" ] }, { "cell_type": "markdown", "metadata": { "gradient": { "editing": false, "id": "20b8698a-0b4e-4fdb-ae49-24d063782e77", "kernelId": "" }, "id": "ObPxlEutsQBj" }, "source": [ "# (DOWNLOAD AND UNZIP MAIN MIDI DATASET)" ] }, { "cell_type": "code", "source": [ "#@title Download Los Angeles MIDI Dataset\n", "print('=' * 70)\n", "print('Downloading Los Angeles MIDI Dataset...Please wait...')\n", "print('=' * 70)\n", "\n", "hf_hub_download(repo_id='projectlosangeles/Los-Angeles-MIDI-Dataset',\n", " filename='Los-Angeles-MIDI-Dataset-Ver-4-0-CC-BY-NC-SA.zip',\n", " repo_type=\"dataset\",\n", " local_dir='/content/Main-MIDI-Dataset',\n", " local_dir_use_symlinks=False)\n", "print('=' * 70)\n", "print('Done! Enjoy! :)')\n", "print('=' * 70)" ], "metadata": { "cellView": "form", "id": "7aItlhq9cRxZ" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "#@title Unzip Los Angeles MIDI Dataset\n", "%cd /content/Main-MIDI-Dataset/\n", "\n", "print('=' * 70)\n", "print('Unzipping Los Angeles MIDI Dataset...Please wait...')\n", "!unzip 'Los-Angeles-MIDI-Dataset-Ver-4-0-CC-BY-NC-SA.zip'\n", "print('=' * 70)\n", "\n", "print('Done! Enjoy! :)')\n", "print('=' * 70)\n", "%cd /content/" ], "metadata": { "cellView": "form", "id": "zMF4vdMNDYYg" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "source": [ "# (CREATE MIDI DATASET FILES LIST)" ], "metadata": { "id": "GE0hPlAEjCrs" } }, { "cell_type": "code", "source": [ "#@title Create Los Angeles MIDI Dataset files list\n", "print('=' * 70)\n", "print('Creating dataset files list...')\n", "dataset_addr = \"/content/Main-MIDI-Dataset/MIDIs\"\n", "\n", "# os.chdir(dataset_addr)\n", "filez = list()\n", "for (dirpath, dirnames, filenames) in os.walk(dataset_addr):\n", " filez += [os.path.join(dirpath, file) for file in filenames]\n", "\n", "if filez == []:\n", " print('Could not find any MIDI files. Please check Dataset dir...')\n", " print('=' * 70)\n", "\n", "print('=' * 70)\n", "print('Randomizing file list...')\n", "random.shuffle(filez)\n", "print('=' * 70)\n", "\n", "LAMD_files_list = []\n", "\n", "for f in tqdm(filez):\n", " LAMD_files_list.append([f.split('/')[-1].split('.mid')[0], f])\n", "print('Done!')\n", "print('=' * 70)" ], "metadata": { "cellView": "form", "id": "btrUDk8MDfdw" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "source": [ "# (SIGNATURES SEARCH)" ], "metadata": { "id": "iaeqXuIHI0_T" } }, { "cell_type": "code", "source": [ "# @title Load Los Angeles MIDI Dataset Signatures Data\n", "\n", "print('=' * 70)\n", "print('Loading LAMDa Signatures Data...')\n", "sigs_data = pickle.load(open('/content/Main-MIDI-Dataset/SIGNATURES_DATA/LAMDa_SIGNATURES_DATA.pickle', 'rb'))\n", "print('=' * 70)\n", "\n", "print('Prepping signatures...')\n", "print('=' * 70)\n", "\n", "random.shuffle(sigs_data)\n", "\n", "signatures_file_names = []\n", "sigs_matrixes = [ [0]*(len(TMIDIX.ALL_CHORDS)+256) for i in range(len(sigs_data))]\n", "\n", "idx = 0\n", "for s in tqdm(sigs_data):\n", "\n", " signatures_file_names.append(s[0])\n", "\n", " for ss in s[1]:\n", " sigs_matrixes[idx][ss[0]] = ss[1]\n", "\n", " idx += 1\n", "\n", "print('=' * 70)\n", "print('Loading signatures...')\n", "print('=' * 70)\n", "\n", "signatures_data_full = cp.array(sigs_matrixes)\n", "\n", "print('Done!')\n", "print('=' * 70)" ], "metadata": { "id": "Mv-pjxbrIqi2", "cellView": "form" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "#@title Master MIDI Dataset Search and Filter\n", "\n", "#@markdown DO NOT FORGET TO UPLOAD YOUR MASTER DATASET TO \"Master-MIDI-Dataset\" FOLDER\n", "\n", "#@markdown NOTE: You can stop the search at any time to render partial results\n", "\n", "number_of_top_matches_MIDIs_to_collect = 30 #@param {type:\"slider\", min:5, max:50, step:1}\n", "search_matching_type = \"Ratios\" # @param [\"Ratios\", \"Distances\", \"Correlations\"]\n", "maximum_match_ratio_to_search_for = 1 #@param {type:\"slider\", min:0, max:1, step:0.001}\n", "match_results_weight = 2 # @param {type:\"slider\", min:0.1, max:3, step:0.1}\n", "match_lengths_weight = 1 # @param {type:\"slider\", min:0.1, max:3, step:0.1}\n", "match_counts_weight = 1 # @param {type:\"slider\", min:0.1, max:3, step:0.1}\n", "distances_norm_order = 3 # @param {type:\"slider\", min:1, max:10, step:1}\n", "epsilon = 0.5 # @param {type:\"slider\", min:0.001, max:1, step:0.001}\n", "match_drums = False # @param {type:\"boolean\"}\n", "\n", "print('=' * 70)\n", "print('Master MIDI Dataset GPU Search and Filter')\n", "print('=' * 70)\n", "\n", "###########\n", "\n", "search_settings_string = ''\n", "\n", "if match_drums:\n", " search_settings_string += 'Chords_Drums'\n", "else:\n", " search_settings_string += 'Chords'\n", "\n", "if search_matching_type == 'Distances':\n", " search_settings_string += '_O_' + str(distances_norm_order)\n", "\n", "search_settings_string += '_W_'\n", "search_settings_string += str(match_results_weight) + '_'\n", "search_settings_string += str(match_lengths_weight) + '_'\n", "search_settings_string += str(match_counts_weight)\n", "\n", "search_settings_string += '_E_' + str(epsilon)\n", "\n", "###########\n", "\n", "print('Loading MIDI files...')\n", "print('This may take a while on a large dataset in particular.')\n", "\n", "dataset_addr = \"/content/Master-MIDI-Dataset\"\n", "\n", "filez = list()\n", "\n", "for (dirpath, dirnames, filenames) in os.walk(dataset_addr):\n", " for file in filenames:\n", " if file.endswith(('.mid', '.midi', '.kar')):\n", " filez.append(os.path.join(dirpath, file))\n", "\n", "print('=' * 70)\n", "\n", "if filez:\n", "\n", " print('Randomizing file list...')\n", " random.shuffle(filez)\n", " print('=' * 70)\n", "\n", " ###################\n", "\n", " if not os.path.exists('/content/Output-MIDI-Dataset/'+search_matching_type+'_'+search_settings_string):\n", " os.makedirs('/content/Output-MIDI-Dataset/'+search_matching_type+'_'+search_settings_string)\n", "\n", " ###################\n", "\n", " input_files_count = 0\n", " files_count = 0\n", "\n", " for f in filez:\n", " try:\n", "\n", " input_files_count += 1\n", "\n", " fn = os.path.basename(f)\n", " fn1 = os.path.splitext(fn)[0]\n", " ext = os.path.splitext(f)[1]\n", "\n", " print('Processing MIDI File #', files_count+1, 'out of', len(filez))\n", " print('MIDI file name', fn)\n", " print('-' * 70)\n", "\n", " #=======================================================\n", "\n", " raw_score = TMIDIX.midi2single_track_ms_score(open(f, 'rb').read())\n", " escore = TMIDIX.advanced_score_processor(raw_score, return_score_analysis=False, return_enhanced_score_notes=True)[0]\n", "\n", " for e in escore:\n", " e[1] = int(e[1] / 16)\n", " e[2] = int(e[2] / 16)\n", "\n", " drums_offset = len(TMIDIX.ALL_CHORDS) + 128\n", "\n", " src_sigs = []\n", "\n", " for i in range(-6, 6):\n", "\n", " escore_copy = copy.deepcopy(escore)\n", "\n", " for e in escore_copy:\n", " e[4] += i\n", "\n", " cscore = TMIDIX.chordify_score([1000, escore_copy])\n", "\n", " sig = []\n", " dsig = []\n", "\n", " for c in cscore:\n", "\n", " pitches = sorted(set([p[4] for p in c if p[3] != 9]))\n", " drums = sorted(set([p[4]+drums_offset for p in c if p[3] == 9]))\n", "\n", " if pitches:\n", " if len(pitches) > 1:\n", " tones_chord = sorted(set([p % 12 for p in pitches]))\n", " checked_tones_chord = TMIDIX.check_and_fix_tones_chord(tones_chord)\n", "\n", " sig_token = TMIDIX.ALL_CHORDS.index(checked_tones_chord) + 128\n", "\n", " elif len(pitches) == 1:\n", " sig_token = pitches[0]\n", "\n", " sig.append(sig_token)\n", "\n", " if drums:\n", " dsig.extend(drums)\n", "\n", " sig_p = dict.fromkeys(sig+dsig, 0)\n", " for item in sig+dsig:\n", " sig_p[item] += 1\n", "\n", " fsig = [list(v) for v in sig_p.items()]\n", "\n", " src_sig_mat = [0] * (len(TMIDIX.ALL_CHORDS)+256)\n", "\n", " for s in fsig:\n", "\n", " src_sig_mat[s[0]] = s[1]\n", "\n", " src_sigs.append(src_sig_mat)\n", "\n", " src_signatures = cp.stack(cp.array(src_sigs))\n", "\n", " if not match_drums:\n", " src_signatures = src_signatures[:,:drums_offset]\n", " signatures_data = signatures_data_full[:,:drums_offset]\n", " else:\n", " signatures_data = signatures_data_full\n", "\n", " #=======================================================\n", "\n", " print('Searching for matches...Please wait...')\n", " print('-' * 70)\n", "\n", " lower_threshold = 0.0\n", " upper_threshold = maximum_match_ratio_to_search_for\n", " filter_size = number_of_top_matches_MIDIs_to_collect\n", "\n", " final_ratios = []\n", "\n", " avg_idxs = []\n", "\n", " all_filtered_means = []\n", " all_filtered_idxs = []\n", " all_filtered_tvs = []\n", "\n", " tv_idx = -6\n", "\n", " for target_sig in tqdm(src_signatures):\n", "\n", " comps_lengths = cp.vstack((cp.repeat(cp.sum(target_sig != 0), signatures_data.shape[0]), cp.sum(signatures_data != 0, axis=1)))\n", " comps_lengths_ratios = cp.divide(cp.min(comps_lengths, axis=0), cp.max(comps_lengths, axis=0))\n", "\n", " comps_counts_sums = cp.vstack((cp.repeat(cp.sum(target_sig), signatures_data.shape[0]), cp.sum(signatures_data, axis=1)))\n", " comps_counts_sums_ratios = cp.divide(cp.min(comps_counts_sums, axis=0), cp.max(comps_counts_sums, axis=0))\n", "\n", " if search_matching_type == 'Ratios':\n", "\n", " ratios = cp.where(target_sig != 0, cp.divide(cp.minimum(signatures_data, target_sig), cp.maximum(signatures_data, target_sig)), epsilon)\n", " results = cp.mean(ratios, axis=1)\n", "\n", " elif search_matching_type == 'Distances':\n", "\n", " distances = cp.power(cp.sum(cp.power(cp.abs(signatures_data - target_sig), distances_norm_order), axis=1), 1 / distances_norm_order)\n", "\n", " distances_mean = cp.mean(distances)\n", " distances_std = cp.std(distances)\n", "\n", " results = 1 - cp.divide((distances - distances_mean), distances_std)\n", "\n", " elif search_matching_type == 'Correlations':\n", "\n", " main_array_mean = cp.mean(signatures_data, axis=1, keepdims=True)\n", " main_array_std = cp.std(signatures_data, axis=1, keepdims=True)\n", " target_array_mean = cp.mean(target_sig)\n", " target_array_std = cp.std(target_sig)\n", "\n", " signatures_data_normalized = cp.where(main_array_std != 0, (signatures_data - main_array_mean) / main_array_std, epsilon)\n", " target_sig_normalized = cp.where(target_array_std != 0, (target_sig - target_array_mean) / target_array_std, epsilon)\n", "\n", " correlations = cp.divide(cp.einsum('ij,j->i', signatures_data_normalized, target_sig_normalized), (signatures_data.shape[1] - 1))\n", " scaled_correlations = cp.divide(correlations, cp.sqrt(cp.sum(correlations**2)))\n", " exp = cp.exp(scaled_correlations - cp.max(scaled_correlations))\n", " results = cp.multiply(cp.divide(exp, cp.sum(exp)), 1e5)\n", "\n", " results_weight = match_results_weight\n", " comp_lengths_weight = match_lengths_weight\n", " comp_counts_sums_weight = match_counts_weight\n", "\n", " results = cp.divide(cp.add(cp.add(results_weight, comp_lengths_weight), comp_counts_sums_weight), cp.add(cp.add(cp.divide(results_weight, cp.where(results !=0, results, epsilon)), cp.divide(comp_lengths_weight, cp.where(comps_lengths_ratios !=0, comps_lengths_ratios, epsilon))), cp.divide(comp_counts_sums_weight, cp.where(comps_counts_sums_ratios !=0, comps_counts_sums_ratios, epsilon))))\n", "\n", " unique_means = cp.unique(results)\n", " sorted_means = cp.sort(unique_means)[::-1]\n", "\n", " filtered_means = sorted_means[(sorted_means >= lower_threshold) & (sorted_means <= upper_threshold)][:filter_size]\n", "\n", " filtered_idxs = cp.nonzero(cp.in1d(results, filtered_means))[0]\n", "\n", " all_filtered_means.extend(results[filtered_idxs].tolist())\n", "\n", " all_filtered_idxs.extend(filtered_idxs.tolist())\n", "\n", " filtered_tvs = [tv_idx] * filtered_idxs.shape[0]\n", "\n", " all_filtered_tvs.extend(filtered_tvs)\n", "\n", " tv_idx += 1\n", "\n", " f_results = sorted(zip(all_filtered_means, all_filtered_idxs, all_filtered_tvs), key=lambda x: x[0], reverse=True)\n", "\n", " triplet_dict = {}\n", "\n", " for triplet in f_results:\n", "\n", " if triplet[0] not in triplet_dict:\n", " triplet_dict[triplet[0]] = triplet\n", " else:\n", " if triplet[2] == 0:\n", " triplet_dict[triplet[0]] = triplet\n", "\n", " filtered_results = list(triplet_dict.values())[:filter_size]\n", "\n", " #=======================================================\n", "\n", " print('Done!')\n", " print('-' * 70)\n", " print('Max match ratio:', filtered_results[0][0])\n", " print('Max match transpose value:', filtered_results[0][2])\n", " print('Max match signature index:', filtered_results[0][1])\n", " print('Max match file name:', signatures_file_names[filtered_results[0][1]])\n", " print('-' * 70)\n", " print('Copying max ratios MIDIs...')\n", "\n", " for fr in filtered_results:\n", "\n", " max_ratio_index = fr[1]\n", "\n", " ffn = signatures_file_names[fr[1]]\n", " ffn_idx = [y[0] for y in LAMD_files_list].index(ffn)\n", "\n", " ff = LAMD_files_list[ffn_idx][1]\n", "\n", " #=======================================================\n", "\n", " dir_str = str(fn1)\n", " copy_path = '/content/Output-MIDI-Dataset/'+search_matching_type+'_'+search_settings_string+'/'+dir_str\n", " if not os.path.exists(copy_path):\n", " os.mkdir(copy_path)\n", "\n", " fff = str(fr[0] * 100) + '_' + str(fr[2]) + '_' + ffn + '.mid'\n", "\n", " shutil.copy2(ff, copy_path+'/'+fff)\n", "\n", " shutil.copy2(f, copy_path+'/'+fn)\n", "\n", " #======================================================='''\n", " print('Done!')\n", " print('=' * 70)\n", "\n", " #=======================================================\n", "\n", " # Processed files counter\n", " files_count += 1\n", "\n", " except KeyboardInterrupt:\n", " print('Quitting...')\n", " print('Total number of processed MIDI files', files_count)\n", " print('=' * 70)\n", " break\n", "\n", " except Exception as ex:\n", " print('WARNING !!!')\n", " print('=' * 70)\n", " print('Bad file:', f)\n", " print('Error detected:', ex)\n", " print('=' * 70)\n", " continue\n", "\n", " print('Total number of processed MIDI files', files_count)\n", " print('=' * 70)\n", "\n", "else:\n", " print('Could not find any MIDI files. Please check Dataset dir...')\n", " print('=' * 70)" ], "metadata": { "cellView": "form", "id": "M0JWCPzBGNvh" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "source": [ "# (KILO-CHORDS SEARCH)" ], "metadata": { "id": "ekjgrYRaiFE0" } }, { "cell_type": "code", "source": [ "#@title Load Los Angeles MIDI Dataset Kilo-Chords Data\n", "search_matching_type = \"Full-Kilo-Chords\" # @param [\"Full-Kilo-Chords\", \"Unique-Kilo-Chords\"]\n", "\n", "print('=' * 70)\n", "print('Loading LAMDa Kilo-Chords Data...')\n", "kilo_chords = pickle.load(open('/content/Main-MIDI-Dataset/KILO_CHORDS_DATA/LAMDa_KILO_CHORDS_DATA.pickle', 'rb'))\n", "print('=' * 70)\n", "\n", "print('Prepping Kilo-Chords...')\n", "print('=' * 70)\n", "\n", "random.shuffle(kilo_chords)\n", "\n", "if search_matching_type == 'Full-Kilo-Chords':\n", "\n", " kilo_chords_file_names = []\n", "\n", " for kc in tqdm(kilo_chords):\n", "\n", " kilo_chords_file_names.append(kc[0])\n", "\n", " kcho = kc[1]\n", "\n", " kcho += [0] * (1000 - len(kcho))\n", "\n", " print('=' * 70)\n", " print('Loading Kilo-Chords...')\n", " print('=' * 70)\n", "\n", " kilo_chords_data = cp.array([kc[1] for kc in kilo_chords])\n", "\n", "else:\n", "\n", " kilo_chords_file_names = []\n", "\n", " kilo_chords_matrixes = [ [0]*(len(TMIDIX.ALL_CHORDS)+128) for i in range(len(kilo_chords))]\n", "\n", " idx = 0\n", " for kc in tqdm(kilo_chords):\n", "\n", " kilo_chords_file_names.append(kc[0])\n", "\n", " for c in kc[1]:\n", " kilo_chords_matrixes[idx][c] += 1\n", "\n", " idx += 1\n", "\n", " print('=' * 70)\n", " print('Loading Kilo-Chords...')\n", " print('=' * 70)\n", "\n", " kilo_chords_data = cp.array(kilo_chords_matrixes)\n", "\n", "print('Done!')\n", "print('=' * 70)" ], "metadata": { "cellView": "form", "id": "YVyUHQiNiJcX" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "#@title Master MIDI Dataset Search and Filter\n", "\n", "#@markdown DO NOT FORGET TO UPLOAD YOUR MASTER DATASET TO \"Master-MIDI-Dataset\" FOLDER\n", "\n", "#@markdown NOTE: You can stop the search at any time to render partial results\n", "\n", "number_of_top_matches_MIDIs_to_collect = 30 #@param {type:\"slider\", min:5, max:50, step:1}\n", "maximum_match_ratio_to_search_for = 1 #@param {type:\"slider\", min:0, max:1, step:0.001}\n", "match_results_weight = 2 # @param {type:\"slider\", min:0.1, max:3, step:0.1}\n", "match_lengths_weight = 1 # @param {type:\"slider\", min:0.1, max:3, step:0.1}\n", "match_counts_weight = 1 # @param {type:\"slider\", min:0.1, max:3, step:0.1}\n", "epsilon = 0.5 # @param {type:\"slider\", min:0.001, max:1, step:0.001}\n", "\n", "print('=' * 70)\n", "print('Master MIDI Dataset GPU Search and Filter')\n", "print('=' * 70)\n", "\n", "###########\n", "\n", "search_settings_string = ''\n", "\n", "search_settings_string += str(search_matching_type).replace('-', '_')\n", "\n", "search_settings_string += '_W_'\n", "search_settings_string += str(match_results_weight) + '_'\n", "search_settings_string += str(match_lengths_weight) + '_'\n", "search_settings_string += str(match_counts_weight)\n", "\n", "search_settings_string += '_E_' + str(epsilon)\n", "\n", "###########\n", "\n", "print('Loading MIDI files...')\n", "print('This may take a while on a large dataset in particular.')\n", "\n", "dataset_addr = \"/content/Master-MIDI-Dataset\"\n", "\n", "filez = list()\n", "\n", "for (dirpath, dirnames, filenames) in os.walk(dataset_addr):\n", " for file in filenames:\n", " if file.endswith(('.mid', '.midi', '.kar')):\n", " filez.append(os.path.join(dirpath, file))\n", "\n", "print('=' * 70)\n", "\n", "if filez:\n", "\n", " print('Randomizing file list...')\n", " random.shuffle(filez)\n", " print('=' * 70)\n", "\n", " ###################\n", "\n", " if not os.path.exists('/content/Output-MIDI-Dataset/'+search_settings_string):\n", " os.makedirs('/content/Output-MIDI-Dataset/'+search_settings_string)\n", "\n", " ###################\n", "\n", " input_files_count = 0\n", " files_count = 0\n", "\n", " for f in filez:\n", "\n", " try:\n", "\n", " input_files_count += 1\n", "\n", " fn = os.path.basename(f)\n", " fn1 = os.path.splitext(fn)[0]\n", " ext = os.path.splitext(f)[1]\n", "\n", " print('Processing MIDI File #', files_count+1, 'out of', len(filez))\n", " print('MIDI file name', fn)\n", " print('-' * 70)\n", "\n", " #=======================================================\n", "\n", " raw_score = TMIDIX.midi2single_track_ms_score(open(f, 'rb').read())\n", " escore = TMIDIX.advanced_score_processor(raw_score, return_score_analysis=False, return_enhanced_score_notes=True)[0]\n", "\n", " for e in escore:\n", " e[1] = int(e[1] / 16)\n", " e[2] = int(e[2] / 16)\n", "\n", " src_kilo_chords = []\n", "\n", " for i in range(-6, 6):\n", "\n", " escore_copy = copy.deepcopy(escore)\n", "\n", " for e in escore_copy:\n", " e[4] += i\n", "\n", " cscore = TMIDIX.chordify_score([1000, escore_copy])\n", "\n", " kilo_chord = []\n", "\n", " for c in cscore:\n", "\n", " pitches = sorted(set([p[4] for p in c if p[3] != 9]))\n", "\n", " if pitches:\n", " if len(pitches) > 1:\n", " tones_chord = sorted(set([p % 12 for p in pitches]))\n", " checked_tones_chord = TMIDIX.check_and_fix_tones_chord(tones_chord)\n", "\n", " chord_token = TMIDIX.ALL_CHORDS.index(checked_tones_chord) + 128\n", "\n", " elif len(pitches) == 1:\n", " chord_token = pitches[0]\n", "\n", " kilo_chord.append(chord_token)\n", "\n", " if search_matching_type == 'Full-Kilo-Chords':\n", "\n", " kilo_chord = kilo_chord[:1000]\n", " kilo_chord_matrix = kilo_chord + [0] * (1000 - len(kilo_chord))\n", "\n", " else:\n", "\n", " kilo_chord_matrix = [0] * (len(TMIDIX.ALL_CHORDS)+128)\n", "\n", " for c in kilo_chord:\n", " kilo_chord_matrix[c] += 1\n", "\n", " src_kilo_chords.append(kilo_chord_matrix)\n", "\n", " src_kilo_chords = cp.stack(cp.array(src_kilo_chords))\n", "\n", " #=======================================================\n", "\n", " print('Searching for matches...Please wait...')\n", " print('-' * 70)\n", "\n", " lower_threshold = 0.0\n", " upper_threshold = maximum_match_ratio_to_search_for\n", " filter_size = number_of_top_matches_MIDIs_to_collect\n", "\n", " final_ratios = []\n", "\n", " avg_idxs = []\n", "\n", " all_filtered_means = []\n", " all_filtered_idxs = []\n", " all_filtered_tvs = []\n", "\n", " tv_idx = -6\n", "\n", " for target_kc in tqdm(src_kilo_chords):\n", "\n", " comps_lengths = cp.vstack((cp.repeat(cp.sum(target_kc != 0), kilo_chords_data.shape[0]), cp.sum(kilo_chords_data != 0, axis=1)))\n", " comps_lengths_ratios = cp.divide(cp.min(comps_lengths, axis=0), cp.max(comps_lengths, axis=0))\n", "\n", " comps_counts_sums = cp.vstack((cp.repeat(cp.sum(target_kc), kilo_chords_data.shape[0]), cp.sum(kilo_chords_data, axis=1)))\n", " comps_counts_sums_ratios = cp.divide(cp.min(comps_counts_sums, axis=0), cp.max(comps_counts_sums, axis=0))\n", "\n", " intersections = cp.where((kilo_chords_data == target_kc), kilo_chords_data, 0)\n", " results = cp.mean(intersections != 0, axis=1)\n", "\n", " results_weight = match_results_weight\n", " comp_lengths_weight = match_lengths_weight\n", " comp_counts_sums_weight = match_counts_weight\n", "\n", " results = cp.divide(cp.add(cp.add(results_weight, comp_lengths_weight), comp_counts_sums_weight), cp.add(cp.add(cp.divide(results_weight, cp.where(results !=0, results, epsilon)), cp.divide(comp_lengths_weight, cp.where(comps_lengths_ratios !=0, comps_lengths_ratios, epsilon))), cp.divide(comp_counts_sums_weight, cp.where(comps_counts_sums_ratios !=0, comps_counts_sums_ratios, epsilon))))\n", "\n", " unique_means = cp.unique(results)\n", " sorted_means = cp.sort(unique_means)[::-1]\n", "\n", " filtered_means = sorted_means[(sorted_means >= lower_threshold) & (sorted_means <= upper_threshold)][:filter_size]\n", "\n", " filtered_idxs = cp.nonzero(cp.in1d(results, filtered_means))[0]\n", "\n", " all_filtered_means.extend(results[filtered_idxs].tolist())\n", "\n", " all_filtered_idxs.extend(filtered_idxs.tolist())\n", "\n", " filtered_tvs = [tv_idx] * filtered_idxs.shape[0]\n", "\n", " all_filtered_tvs.extend(filtered_tvs)\n", "\n", " tv_idx += 1\n", "\n", " f_results = sorted(zip(all_filtered_means, all_filtered_idxs, all_filtered_tvs), key=lambda x: x[0], reverse=True)\n", "\n", " triplet_dict = {}\n", "\n", " for triplet in f_results:\n", "\n", " if triplet[0] not in triplet_dict:\n", " triplet_dict[triplet[0]] = triplet\n", " else:\n", " if triplet[2] == 0:\n", " triplet_dict[triplet[0]] = triplet\n", "\n", " filtered_results = list(triplet_dict.values())[:filter_size]\n", "\n", " #=======================================================\n", "\n", " print('Done!')\n", " print('-' * 70)\n", " print('Max match ratio:', filtered_results[0][0])\n", " print('Max match transpose value:', filtered_results[0][2])\n", " print('Max match signature index:', filtered_results[0][1])\n", " print('Max match file name:', kilo_chords_file_names[filtered_results[0][1]])\n", " print('-' * 70)\n", " print('Copying max ratios MIDIs...')\n", "\n", " for fr in filtered_results:\n", "\n", " max_ratio_index = fr[1]\n", "\n", " ffn = kilo_chords_file_names[fr[1]]\n", " ffn_idx = [y[0] for y in LAMD_files_list].index(ffn)\n", "\n", " ff = LAMD_files_list[ffn_idx][1]\n", "\n", " #=======================================================\n", "\n", " dir_str = str(fn1)\n", " copy_path = '/content/Output-MIDI-Dataset/'+search_settings_string+'/'+dir_str\n", " if not os.path.exists(copy_path):\n", " os.mkdir(copy_path)\n", "\n", " fff = str(fr[0] * 100) + '_' + str(fr[2]) + '_' + ffn + '.mid'\n", "\n", " shutil.copy2(ff, copy_path+'/'+fff)\n", "\n", " shutil.copy2(f, copy_path+'/'+fn)\n", "\n", " #======================================================='''\n", " print('Done!')\n", " print('=' * 70)\n", "\n", " #=======================================================\n", "\n", " # Processed files counter\n", " files_count += 1\n", "\n", " except KeyboardInterrupt:\n", " print('Quitting...')\n", " print('Total number of processed MIDI files', files_count)\n", " print('=' * 70)\n", " break\n", "\n", " except Exception as ex:\n", " print('WARNING !!!')\n", " print('=' * 70)\n", " print('Bad file:', f)\n", " print('Error detected:', ex)\n", " print('=' * 70)\n", " continue\n", "\n", " print('Total number of processed MIDI files', files_count)\n", " print('=' * 70)\n", "\n", "else:\n", " print('Could not find any MIDI files. Please check Dataset dir...')\n", " print('=' * 70)" ], "metadata": { "cellView": "form", "id": "fhgpI31piWiX" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "source": [ "# (DOWNLOAD RESULTS)" ], "metadata": { "id": "7Lyy0vjV0dlI" } }, { "cell_type": "code", "source": [ "#@title Zip and download all search results\n", "\n", "print('=' * 70)\n", "\n", "try:\n", " os.remove('Master_MIDI_Dataset_Search_Results.zip')\n", "except OSError:\n", " pass\n", "\n", "print('Zipping... Please wait...')\n", "print('=' * 70)\n", "\n", "%cd /content/Output-MIDI-Dataset/\n", "!zip -r Master_MIDI_Dataset_Search_Results.zip *\n", "%cd /content/\n", "\n", "print('=' * 70)\n", "print('Done!')\n", "print('=' * 70)\n", "\n", "print('Downloading final zip file...')\n", "print('=' * 70)\n", "\n", "files.download('/content/Output-MIDI-Dataset/Master_MIDI_Dataset_Search_Results.zip')\n", "\n", "print('Done!')\n", "print('=' * 70)" ], "metadata": { "cellView": "form", "id": "1psdj0RJ0aWH" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "# @title Delete search results directory and files\n", "\n", "#@markdown WARNING: This can't be undone so make sure you downloaded the search results first\n", "\n", "print('=' * 70)\n", "print('Deleting... Please wait...')\n", "print('=' * 70)\n", "\n", "!rm -rf /content/Output-MIDI-Dataset\n", "print('Done!')\n", "print('=' * 70)" ], "metadata": { "cellView": "form", "id": "z3B-YHIz0jDt" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "metadata": { "id": "YzCMd94Tu_gz" }, "source": [ "# Congrats! You did it! :)" ] } ], "metadata": { "colab": { "private_outputs": true, "provenance": [], "gpuType": "T4", "machine_shape": "hm" }, "kernelspec": { "display_name": "Python 3", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.7" }, "accelerator": "GPU" }, "nbformat": 4, "nbformat_minor": 0 }