{ "cells": [ { "cell_type": "code", "execution_count": 47, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Defaulting to user installation because normal site-packages is not writeable\n", "Requirement already satisfied: pandas in /Users/picocreator/Library/Python/3.9/lib/python/site-packages (2.2.0)\n", "Requirement already satisfied: tzdata>=2022.7 in /Users/picocreator/Library/Python/3.9/lib/python/site-packages (from pandas) (2024.1)\n", "Requirement already satisfied: python-dateutil>=2.8.2 in /Users/picocreator/Library/Python/3.9/lib/python/site-packages (from pandas) (2.8.2)\n", "Requirement already satisfied: pytz>=2020.1 in /Users/picocreator/Library/Python/3.9/lib/python/site-packages (from pandas) (2024.1)\n", "Requirement already satisfied: numpy<2,>=1.22.4 in /Users/picocreator/Library/Python/3.9/lib/python/site-packages (from pandas) (1.26.1)\n", "Requirement already satisfied: six>=1.5 in /Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.9/lib/python3.9/site-packages (from python-dateutil>=2.8.2->pandas) (1.15.0)\n", "\u001b[33mWARNING: You are using pip version 21.2.4; however, version 24.0 is available.\n", "You should consider upgrading via the '/Library/Developer/CommandLineTools/usr/bin/python3 -m pip install --upgrade pip' command.\u001b[0m\n" ] } ], "source": [ "!pip3 install pandas" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Get the filelist\n", "\n", "For the full results.json" ] }, { "cell_type": "code", "execution_count": 48, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Found 2217 results.json files\n" ] } ], "source": [ "import glob\n", "\n", "# Specify the path to the folder containing the results.json files\n", "folder_path = \"lm-eval-output\"\n", "\n", "# Use glob to find all the results.json files\n", "results_json_files = glob.glob(f\"{folder_path}/**/results.json\", recursive=True)\n", "\n", "# Show total number of results.json files found\n", "print(f\"Found {len(results_json_files)} results.json files\")\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Process all the results.json\n", "\n", "One file at a time" ] }, { "cell_type": "code", "execution_count": 49, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Processed example: {'name': 'bigscience/bloom-7b1', 'config': {'dtype=bfloat16,trust_remote_code=True': {'confStr': 'dtype=bfloat16,trust_remote_code=True', 'confObj': {'dtype': 'bfloat16', 'trust_remote_code': 'True'}, 'results': {'xcopa': {'acc,none': 0.5709090909090908, 'acc_stderr,none': 0.06135942275478038, 'alias': 'xcopa'}, 'xcopa_et': {'acc,none': 0.482, 'acc_stderr,none': 0.02236856511738799, 'alias': ' - xcopa_et'}, 'xcopa_ht': {'acc,none': 0.516, 'acc_stderr,none': 0.0223716109825804, 'alias': ' - xcopa_ht'}, 'xcopa_id': {'acc,none': 0.702, 'acc_stderr,none': 0.020475118092988978, 'alias': ' - xcopa_id'}, 'xcopa_it': {'acc,none': 0.528, 'acc_stderr,none': 0.022347949832668093, 'alias': ' - xcopa_it'}, 'xcopa_qu': {'acc,none': 0.512, 'acc_stderr,none': 0.02237662679792717, 'alias': ' - xcopa_qu'}, 'xcopa_sw': {'acc,none': 0.518, 'acc_stderr,none': 0.02236856511738799, 'alias': ' - xcopa_sw'}, 'xcopa_ta': {'acc,none': 0.592, 'acc_stderr,none': 0.02200091089387719, 'alias': ' - xcopa_ta'}, 'xcopa_th': {'acc,none': 0.552, 'acc_stderr,none': 0.022261697292270132, 'alias': ' - xcopa_th'}, 'xcopa_tr': {'acc,none': 0.512, 'acc_stderr,none': 0.02237662679792717, 'alias': ' - xcopa_tr'}, 'xcopa_vi': {'acc,none': 0.716, 'acc_stderr,none': 0.02018670369357085, 'alias': ' - xcopa_vi'}, 'xcopa_zh': {'acc,none': 0.65, 'acc_stderr,none': 0.021352091786223104, 'alias': ' - xcopa_zh'}, 'xnli': {'acc,none': 0.41204819277108434, 'acc_stderr,none': 0.051535476594892576, 'alias': 'xnli'}, 'xnli_ar': {'acc,none': 0.3377510040160643, 'acc_stderr,none': 0.009479742273956477, 'alias': ' - xnli_ar'}, 'xnli_bg': {'acc,none': 0.3779116465863454, 'acc_stderr,none': 0.009718712281227459, 'alias': ' - xnli_bg'}, 'xnli_de': {'acc,none': 0.41365461847389556, 'acc_stderr,none': 0.009871502159099368, 'alias': ' - xnli_de'}, 'xnli_el': {'acc,none': 0.3650602409638554, 'acc_stderr,none': 0.009650194822749637, 'alias': ' - xnli_el'}, 'xnli_en': {'acc,none': 0.5261044176706827, 'acc_stderr,none': 0.01000840465166064, 'alias': ' - xnli_en'}, 'xnli_es': {'acc,none': 0.4879518072289157, 'acc_stderr,none': 0.010019162857624487, 'alias': ' - xnli_es'}, 'xnli_fr': {'acc,none': 0.478714859437751, 'acc_stderr,none': 0.010012987604500423, 'alias': ' - xnli_fr'}, 'xnli_hi': {'acc,none': 0.4666666666666667, 'acc_stderr,none': 0.00999977679318763, 'alias': ' - xnli_hi'}, 'xnli_ru': {'acc,none': 0.43253012048192774, 'acc_stderr,none': 0.009930409027139453, 'alias': ' - xnli_ru'}, 'xnli_sw': {'acc,none': 0.3855421686746988, 'acc_stderr,none': 0.009755949341224318, 'alias': ' - xnli_sw'}, 'xnli_th': {'acc,none': 0.3437751004016064, 'acc_stderr,none': 0.009520310502882936, 'alias': ' - xnli_th'}, 'xnli_tr': {'acc,none': 0.3522088353413655, 'acc_stderr,none': 0.009574259292495757, 'alias': ' - xnli_tr'}, 'xnli_ur': {'acc,none': 0.42289156626506025, 'acc_stderr,none': 0.009902179034797438, 'alias': ' - xnli_ur'}, 'xnli_vi': {'acc,none': 0.44497991967871486, 'acc_stderr,none': 0.009961210239024633, 'alias': ' - xnli_vi'}, 'xnli_zh': {'acc,none': 0.3449799196787149, 'acc_stderr,none': 0.009528219800053311, 'alias': ' - xnli_zh'}, 'pawsx': {'acc,none': 0.5078571428571429, 'acc_stderr,none': 0.03988534011535243, 'alias': 'pawsx'}, 'paws_de': {'acc,none': 0.5175, 'acc_stderr,none': 0.011176284251254179, 'alias': ' - paws_de'}, 'paws_en': {'acc,none': 0.4145, 'acc_stderr,none': 0.011018419931591767, 'alias': ' - paws_en'}, 'paws_es': {'acc,none': 0.437, 'acc_stderr,none': 0.011094009127418984, 'alias': ' - paws_es'}, 'paws_fr': {'acc,none': 0.5435, 'acc_stderr,none': 0.011140733053371404, 'alias': ' - paws_fr'}, 'paws_ja': {'acc,none': 0.5575, 'acc_stderr,none': 0.01110894141174761, 'alias': ' - paws_ja'}, 'paws_ko': {'acc,none': 0.552, 'acc_stderr,none': 0.011122493197456285, 'alias': ' - paws_ko'}, 'paws_zh': {'acc,none': 0.533, 'acc_stderr,none': 0.01115875256825067, 'alias': ' - paws_zh'}, 'lambada_multilingual': {'perplexity,none': 131.45396740665825, 'perplexity_stderr,none': 95.28024178884175, 'acc,none': 0.38490199883562976, 'acc_stderr,none': 0.07608898792977997, 'alias': 'lambada_multilingual'}, 'lambada_openai_mt_de': {'perplexity,none': 370.91952810475857, 'perplexity_stderr,none': 24.98299339282566, 'acc,none': 0.23015718998641568, 'acc_stderr,none': 0.0058644241714399855, 'alias': ' - lambada_openai_mt_de'}, 'lambada_openai_mt_en': {'perplexity,none': 6.583236525584539, 'perplexity_stderr,none': 0.17481189179976453, 'acc,none': 0.5717058024451775, 'acc_stderr,none': 0.0068939712541951454, 'alias': ' - lambada_openai_mt_en'}, 'lambada_openai_mt_es': {'perplexity,none': 51.02874715706533, 'perplexity_stderr,none': 2.6341920857292744, 'acc,none': 0.36638851154667185, 'acc_stderr,none': 0.0067126579546010565, 'alias': ' - lambada_openai_mt_es'}, 'lambada_openai_mt_fr': {'perplexity,none': 29.56217917543056, 'perplexity_stderr,none': 1.5411073949753211, 'acc,none': 0.4513875412381137, 'acc_stderr,none': 0.0069329758883686235, 'alias': ' - lambada_openai_mt_fr'}, 'lambada_openai_mt_it': {'perplexity,none': 199.1761460704524, 'perplexity_stderr,none': 13.648756866456297, 'acc,none': 0.30487094896176986, 'acc_stderr,none': 0.006413613926848421, 'alias': ' - lambada_openai_mt_it'}, 'xwinograd': {'acc,none': 0.7442121825129242, 'acc_stderr,none': 0.06414679137553342, 'alias': 'xwinograd'}, 'xwinograd_en': {'acc,none': 0.8219354838709677, 'acc_stderr,none': 0.007935777723887321, 'alias': ' - xwinograd_en'}, 'xwinograd_fr': {'acc,none': 0.6987951807228916, 'acc_stderr,none': 0.0506639425494172, 'alias': ' - xwinograd_fr'}, 'xwinograd_jp': {'acc,none': 0.6037539103232534, 'acc_stderr,none': 0.015802642616557255, 'alias': ' - xwinograd_jp'}, 'xwinograd_pt': {'acc,none': 0.7680608365019012, 'acc_stderr,none': 0.026075593860304693, 'alias': ' - xwinograd_pt'}, 'xwinograd_ru': {'acc,none': 0.5714285714285714, 'acc_stderr,none': 0.02792722339076032, 'alias': ' - xwinograd_ru'}, 'xwinograd_zh': {'acc,none': 0.7559523809523809, 'acc_stderr,none': 0.01915139944664688, 'alias': ' - xwinograd_zh'}, 'xstorycloze': {'acc,none': 0.5927441188857469, 'acc_stderr,none': 0.05262352730974911, 'alias': 'xstorycloze'}, 'xstorycloze_ar': {'acc,none': 0.5883520847121112, 'acc_stderr,none': 0.01266464832921408, 'alias': ' - xstorycloze_ar'}, 'xstorycloze_en': {'acc,none': 0.7081403044341495, 'acc_stderr,none': 0.01169925603764938, 'alias': ' - xstorycloze_en'}, 'xstorycloze_es': {'acc,none': 0.6598279285241562, 'acc_stderr,none': 0.012192034998028832, 'alias': ' - xstorycloze_es'}, 'xstorycloze_eu': {'acc,none': 0.57114493712773, 'acc_stderr,none': 0.012736202713147777, 'alias': ' - xstorycloze_eu'}, 'xstorycloze_hi': {'acc,none': 0.6048974189278623, 'acc_stderr,none': 0.012580772976133262, 'alias': ' - xstorycloze_hi'}, 'xstorycloze_id': {'acc,none': 0.6419589675711449, 'acc_stderr,none': 0.012337624883487575, 'alias': ' - xstorycloze_id'}, 'xstorycloze_my': {'acc,none': 0.48378557246856385, 'acc_stderr,none': 0.012860357805055867, 'alias': ' - xstorycloze_my'}, 'xstorycloze_ru': {'acc,none': 0.5268034414295168, 'acc_stderr,none': 0.012848623899505765, 'alias': ' - xstorycloze_ru'}, 'xstorycloze_sw': {'acc,none': 0.5413633355393779, 'acc_stderr,none': 0.012823020340169822, 'alias': ' - xstorycloze_sw'}, 'xstorycloze_te': {'acc,none': 0.5744540039708802, 'acc_stderr,none': 0.012723670419166326, 'alias': ' - xstorycloze_te'}, 'xstorycloze_zh': {'acc,none': 0.6194573130377233, 'acc_stderr,none': 0.012494500786685344, 'alias': ' - xstorycloze_zh'}}, 'groups': {'xcopa': {'acc,none': 0.5709090909090908, 'acc_stderr,none': 0.06135942275478038, 'alias': 'xcopa'}, 'xnli': {'acc,none': 0.41204819277108434, 'acc_stderr,none': 0.051535476594892576, 'alias': 'xnli'}, 'pawsx': {'acc,none': 0.5078571428571429, 'acc_stderr,none': 0.03988534011535243, 'alias': 'pawsx'}, 'lambada_multilingual': {'perplexity,none': 131.45396740665825, 'perplexity_stderr,none': 95.28024178884175, 'acc,none': 0.38490199883562976, 'acc_stderr,none': 0.07608898792977997, 'alias': 'lambada_multilingual'}, 'xwinograd': {'acc,none': 0.7442121825129242, 'acc_stderr,none': 0.06414679137553342, 'alias': 'xwinograd'}, 'xstorycloze': {'acc,none': 0.5927441188857469, 'acc_stderr,none': 0.05262352730974911, 'alias': 'xstorycloze'}}}}}\n" ] } ], "source": [ "import json\n", "\n", "# Global result map if it's not set\n", "if 'global_result_map' not in globals():\n", " global_result_map = {}\n", "\n", "#\n", "# Function to process the results.json file\n", "#\n", "def process_results_json(file_path):\n", " with open(file_path) as f:\n", " data = json.load(f)\n", "\n", " # Model args, presplit by ','\n", " model_args = data['config']['model_args'].split(',')\n", "\n", " # Extract the pretrained value from config.model_args\n", " modelname = model_args[0].split('=')[1]\n", "\n", " # Opt array\n", " confArgsArr = model_args[1:]\n", "\n", " # Sort the opt array\n", " confArgsArr.sort()\n", " # Convert it to a string\n", " confStr = ','.join(confArgsArr)\n", "\n", " # Convert the option array of key=value strings to a dictionary\n", " confObj = { }\n", " for o in confArgsArr:\n", " k, v = o.split('=')\n", " confObj[k] = v\n", " \n", " # Create a dictionary to store the results, or use the existing one if it exists\n", " if modelname in global_result_map:\n", " modelObj = global_result_map[modelname]\n", " else:\n", " modelObj = {\n", " 'name': modelname,\n", " 'config': { }\n", " }\n", " \n", " # Get the opt object for the model\n", " if confStr in modelObj['config']:\n", " confSet = modelObj['config'][confStr]\n", " else:\n", " confSet = {\n", " 'confStr': confStr,\n", " 'confObj': confObj,\n", " 'results': {},\n", " 'groups': {}\n", " }\n", "\n", " # Iterate over the results and extract the result object for each test/group\n", " if 'results' in data:\n", " for test, result in data['results'].items():\n", " confSet['results'][test] = result\n", " if 'groups' in data:\n", " for test, result in data['groups'].items():\n", " confSet['groups'][test] = result\n", " \n", " # Update the global result map object\n", " modelObj['config'][confStr] = confSet\n", " global_result_map[modelname] = modelObj\n", " return modelObj\n", "\n", "# Lets test the function with the first results.json file\n", "first_result = process_results_json(results_json_files[0])\n", "print(f\"Processed example: \", first_result)\n" ] }, { "cell_type": "code", "execution_count": 50, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Found 36 models\n", "Models: \n", "['bigscience/bloom-7b1', 'togethercomputer/RedPajama-INCITE-7B-Base', 'mistralai/Mistral-7B-v0.1', 'mosaicml/mpt-7b-instruct', 'mosaicml/mpt-7b', 'mosaicml/mpt-7b-chat', 'bigscience/bloomz-7b1-mt', 'bigscience/bloomz-7b1', 'EleutherAI/pythia-2.8b', 'EleutherAI/pythia-1.4b', 'EleutherAI/gpt-j-6b', 'EleutherAI/pythia-6.9b', 'microsoft/phi-1_5', 'microsoft/phi-2', 'microsoft/phi-1', 'allenai/OLMo-7B', 'TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T', 'TinyLlama/TinyLlama-1.1B-Chat-v1.0', 'RWKV/rwkv-5-world-1b5', 'RWKV/rwkv-5-world-3b', 'RWKV/rwkv-4-world-3b', 'RWKV/rwkv-4-world-1b5', 'RWKV/rwkv-4-world-7b', 'RWKV/HF_v5-Eagle-7B', 'togethercomputer/RedPajama-INCITE-7B-Instruct', 'togethercomputer/RedPajama-INCITE-7B-Chat', 'facebook/opt-2.7b', 'facebook/opt-6.7b', 'facebook/opt-1.3b', 'tiiuae/falcon-7b-instruct', 'tiiuae/falcon-rw-1b', 'tiiuae/falcon-rw-7b', 'tiiuae/falcon-7b', 'huggyllama/llama-7b', 'meta-llama/Llama-2-7b-chat-hf', 'meta-llama/Llama-2-7b-hf']\n", "Saved to compiled-lm-eval-results.json\n" ] } ], "source": [ "# Lets reset and reprocess all the results.json files\n", "global_result_map = {}\n", "\n", "# Process all the results.json files\n", "for file in results_json_files:\n", " process_results_json(file)\n", "\n", "# Show high level list of models\n", "print(f\"Found {len(global_result_map)} models\")\n", "print(f\"Models: \\n{list(global_result_map.keys())}\")\n", "\n", "# Save the result map to a file\n", "with open('summary/compiled-lm-eval-results.json', 'w') as f:\n", " json.dump(global_result_map, f, sort_keys=True, indent='\\t')\n", "\n", "# Echo that its been saved to json\n", "print(f\"Saved to compiled-lm-eval-results.json\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Convert the results into CSV table formats" ] }, { "cell_type": "code", "execution_count": 56, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
modelavg_accavg_acc_stderrxcopa (acc)xcopa (acc_stderr)
0bigscience/bloom-7b10.5709090.0613590.5709090.061359
1togethercomputer/RedPajama-INCITE-7B-Base0.5254550.0364070.5254550.036407
2mistralai/Mistral-7B-v0.10.5587270.0551640.5587270.055164
3mosaicml/mpt-7b-instruct0.5370910.0419190.5370910.041919
4mosaicml/mpt-7b0.5360000.0423390.5360000.042339
5mosaicml/mpt-7b-chat0.5380000.0470590.5380000.047059
6bigscience/bloomz-7b1-mt0.5460000.0383210.5460000.038321
7bigscience/bloomz-7b10.5478180.0389200.5478180.038920
8EleutherAI/pythia-2.8b0.5374550.0269410.5374550.026941
9EleutherAI/pythia-1.4b0.5265450.0274410.5265450.027441
10EleutherAI/gpt-j-6b0.5441820.0344040.5441820.034404
11EleutherAI/pythia-6.9b0.5405450.0296890.5405450.029689
12microsoft/phi-1_50.5216360.0261980.5216360.026198
13microsoft/phi-20.5121820.0297420.5121820.029742
14microsoft/phi-10.5176360.0296120.5176360.029612
15allenai/OLMo-7B0.5378180.0341470.5378180.034147
16TinyLlama/TinyLlama-1.1B-intermediate-step-143...0.5292730.0293160.5292730.029316
17TinyLlama/TinyLlama-1.1B-Chat-v1.00.5289090.0317020.5289090.031702
18RWKV/rwkv-5-world-1b50.5789090.0451030.5789090.045103
19RWKV/rwkv-5-world-3b0.5901820.0562410.5901820.056241
20RWKV/rwkv-4-world-3b0.5754550.0409770.5754550.040977
21RWKV/rwkv-4-world-1b50.5540000.0394060.5540000.039406
22RWKV/rwkv-4-world-7b0.6014550.0531160.6014550.053116
23RWKV/HF_v5-Eagle-7B0.6218180.0689860.6218180.068986
24togethercomputer/RedPajama-INCITE-7B-Instruct0.5285450.0364700.5285450.036470
25togethercomputer/RedPajama-INCITE-7B-Chat0.5354550.0387230.5354550.038723
26facebook/opt-2.7b0.5218180.0298210.5218180.029821
27facebook/opt-6.7b0.5229090.0272160.5229090.027216
28facebook/opt-1.3b0.5218180.0291120.5218180.029112
29tiiuae/falcon-7b-instruct0.5367270.0534300.5367270.053430
30tiiuae/falcon-rw-1b0.5225450.0294460.5225450.029446
31tiiuae/falcon-rw-7b0.5358180.0331850.5358180.033185
32tiiuae/falcon-7b0.5596360.0716500.5596360.071650
33huggyllama/llama-7b0.5418180.0407180.5418180.040718
34meta-llama/Llama-2-7b-chat-hf0.0000000.000000NaNNaN
35meta-llama/Llama-2-7b-hf0.5667270.0525150.5667270.052515
\n", "
" ], "text/plain": [ " model avg_acc \\\n", "0 bigscience/bloom-7b1 0.570909 \n", "1 togethercomputer/RedPajama-INCITE-7B-Base 0.525455 \n", "2 mistralai/Mistral-7B-v0.1 0.558727 \n", "3 mosaicml/mpt-7b-instruct 0.537091 \n", "4 mosaicml/mpt-7b 0.536000 \n", "5 mosaicml/mpt-7b-chat 0.538000 \n", "6 bigscience/bloomz-7b1-mt 0.546000 \n", "7 bigscience/bloomz-7b1 0.547818 \n", "8 EleutherAI/pythia-2.8b 0.537455 \n", "9 EleutherAI/pythia-1.4b 0.526545 \n", "10 EleutherAI/gpt-j-6b 0.544182 \n", "11 EleutherAI/pythia-6.9b 0.540545 \n", "12 microsoft/phi-1_5 0.521636 \n", "13 microsoft/phi-2 0.512182 \n", "14 microsoft/phi-1 0.517636 \n", "15 allenai/OLMo-7B 0.537818 \n", "16 TinyLlama/TinyLlama-1.1B-intermediate-step-143... 0.529273 \n", "17 TinyLlama/TinyLlama-1.1B-Chat-v1.0 0.528909 \n", "18 RWKV/rwkv-5-world-1b5 0.578909 \n", "19 RWKV/rwkv-5-world-3b 0.590182 \n", "20 RWKV/rwkv-4-world-3b 0.575455 \n", "21 RWKV/rwkv-4-world-1b5 0.554000 \n", "22 RWKV/rwkv-4-world-7b 0.601455 \n", "23 RWKV/HF_v5-Eagle-7B 0.621818 \n", "24 togethercomputer/RedPajama-INCITE-7B-Instruct 0.528545 \n", "25 togethercomputer/RedPajama-INCITE-7B-Chat 0.535455 \n", "26 facebook/opt-2.7b 0.521818 \n", "27 facebook/opt-6.7b 0.522909 \n", "28 facebook/opt-1.3b 0.521818 \n", "29 tiiuae/falcon-7b-instruct 0.536727 \n", "30 tiiuae/falcon-rw-1b 0.522545 \n", "31 tiiuae/falcon-rw-7b 0.535818 \n", "32 tiiuae/falcon-7b 0.559636 \n", "33 huggyllama/llama-7b 0.541818 \n", "34 meta-llama/Llama-2-7b-chat-hf 0.000000 \n", "35 meta-llama/Llama-2-7b-hf 0.566727 \n", "\n", " avg_acc_stderr xcopa (acc) xcopa (acc_stderr) \n", "0 0.061359 0.570909 0.061359 \n", "1 0.036407 0.525455 0.036407 \n", "2 0.055164 0.558727 0.055164 \n", "3 0.041919 0.537091 0.041919 \n", "4 0.042339 0.536000 0.042339 \n", "5 0.047059 0.538000 0.047059 \n", "6 0.038321 0.546000 0.038321 \n", "7 0.038920 0.547818 0.038920 \n", "8 0.026941 0.537455 0.026941 \n", "9 0.027441 0.526545 0.027441 \n", "10 0.034404 0.544182 0.034404 \n", "11 0.029689 0.540545 0.029689 \n", "12 0.026198 0.521636 0.026198 \n", "13 0.029742 0.512182 0.029742 \n", "14 0.029612 0.517636 0.029612 \n", "15 0.034147 0.537818 0.034147 \n", "16 0.029316 0.529273 0.029316 \n", "17 0.031702 0.528909 0.031702 \n", "18 0.045103 0.578909 0.045103 \n", "19 0.056241 0.590182 0.056241 \n", "20 0.040977 0.575455 0.040977 \n", "21 0.039406 0.554000 0.039406 \n", "22 0.053116 0.601455 0.053116 \n", "23 0.068986 0.621818 0.068986 \n", "24 0.036470 0.528545 0.036470 \n", "25 0.038723 0.535455 0.038723 \n", "26 0.029821 0.521818 0.029821 \n", "27 0.027216 0.522909 0.027216 \n", "28 0.029112 0.521818 0.029112 \n", "29 0.053430 0.536727 0.053430 \n", "30 0.029446 0.522545 0.029446 \n", "31 0.033185 0.535818 0.033185 \n", "32 0.071650 0.559636 0.071650 \n", "33 0.040718 0.541818 0.040718 \n", "34 0.000000 NaN NaN \n", "35 0.052515 0.566727 0.052515 " ] }, "execution_count": 56, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# Lets convert this into a table, which we will display in this notebook, and save as a CSV\n", "import pandas as pd\n", "\n", "##################################################\n", "#\n", "# Utility functions\n", "#\n", "##################################################\n", "\n", "# Check if the given name string, is within the list, including \"*\" wildcard\n", "def is_in_list(name, list):\n", " for n in list:\n", " if n[-1] == '*':\n", " if name.startswith(n[:-1]):\n", " return True\n", " elif n == name:\n", " return True\n", " return False\n", "\n", "# Is in inclusion exclusion list pair\n", "def is_in_list_pair(name, inList, exList):\n", " if not is_in_list(name, inList):\n", " return False\n", " if is_in_list(name, exList):\n", " return False\n", " return True\n", "\n", "# Prepare a single test/group result object\n", "# By applying common filtering and formatting changes\n", "def prepare_test_result(result):\n", " # The reutrn object\n", " ret = {}\n", " # Iterate the result key/value\n", " for k, v in result.items():\n", " # Skip if its alias\n", " if k == 'alias':\n", " continue\n", "\n", " # If the key ends with \",none\", drop the \",none\"\n", " if k.endswith(',none'):\n", " k = k[:-5]\n", " \n", " # Save the result\n", " ret[k] = v\n", " \n", " # Return the result\n", " return ret\n", "\n", "##################################################\n", "#\n", "# Generate the result\n", "#\n", "##################################################\n", "\n", "# Create a list of rows for the table\n", "def generate_result_table(\n", " inConfig = { \"dtype\": \"bfloat16\" },\n", "\n", " # Results and groups to include\n", " inResults = [],\n", " inGroups = [\"*\"],\n", "\n", " # Exclude results and groups, applied after inResults and inGroups\n", " exResults = [],\n", " exGroups = [],\n", "\n", " # Sorted\n", " sort = False\n", "):\n", " table_rows = []\n", "\n", " # Iterate over the models\n", " for model, modelObj in global_result_map.items():\n", " # Iterate over the configurations\n", " for confStr, confSet in modelObj['config'].items():\n", " # Get the confObj\n", " confObj = confSet['confObj']\n", "\n", " # Check if the inConfig, matches the confObj\n", " if inConfig:\n", " skip = False\n", " for k, v in inConfig.items():\n", " if k not in confObj or confObj[k] != v:\n", " skip = True\n", " break\n", " if skip:\n", " continue\n", "\n", " # Create a row object\n", " row = {\n", " 'model': model,\n", " # 'config': confStr\n", "\n", " \"avg_acc\": 0.0,\n", " \"avg_acc_stderr\": 0.0,\n", " }\n", "\n", " # Total acc / acc_stderr\n", " acc_total = 0.0\n", " acc_count = 0\n", " acc_stderr_total = 0.0\n", " acc_stderr_count = 0\n", "\n", " # Add the groups\n", " for test, result in confSet['groups'].items():\n", "\n", " # Skip if not in the inGroups or exGroups\n", " if not is_in_list_pair(test, inGroups, exGroups):\n", " continue\n", "\n", " # Filter the result obj\n", " cleanResult = prepare_test_result(result)\n", "\n", " # Add the result to the row, as seperate columns for each key\n", " for k, v in cleanResult.items():\n", " row[f\"{test} ({k})\"] = v\n", "\n", " if k == 'acc':\n", " acc_total += v\n", " acc_count += 1\n", " elif k == 'acc_stderr':\n", " acc_stderr_total += v\n", " acc_stderr_count += 1\n", "\n", " # Add the results\n", " for test, result in confSet['results'].items():\n", "\n", " # Skip if not in the inResults or exResults\n", " if not is_in_list_pair(test, inResults, exResults):\n", " continue\n", "\n", " # Filter the result obj\n", " cleanResult = prepare_test_result(result)\n", "\n", " # Add the result to the row, as seperate columns for each key\n", " for k, v in cleanResult.items():\n", " row[f\"{test} ({k})\"] = v\n", "\n", " if k == 'acc':\n", " acc_total += v\n", " acc_count += 1\n", " elif k == 'acc_stderr':\n", " acc_stderr_total += v\n", " acc_stderr_count += 1\n", " \n", " # Add the avg acc and acc_stderr\n", " if acc_count > 0:\n", " row[\"avg_acc\"] = acc_total / acc_count\n", " if acc_stderr_count > 0:\n", " row[\"avg_acc_stderr\"] = acc_stderr_total / acc_stderr_count\n", "\n", " # Append the row to the table\n", " table_rows.append(row)\n", "\n", " # Create a dataframe from the table rows\n", " df = pd.DataFrame(table_rows)\n", "\n", " # Sort by avg_acc\n", " if sort:\n", " df = df.sort_values(by='avg_acc', ascending=False)\n", "\n", " # Show the dataframe\n", " return df\n", "\n", "# Generate the dataframe\n", "df = generate_result_table( inConfig = { \"dtype\": \"bfloat16\" }, inGroups=[\"xcopa\"], inResults=[] )\n", "\n", "# # Save the dataframe to a CSV file\n", "# df.to_csv('summary/compiled-lm-eval-results.csv', index=False)\n", "\n", "# Show results\n", "df\n" ] }, { "cell_type": "code", "execution_count": 59, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "total 13472\n", "-rw-r--r--@ 1 picocreator staff 822K Feb 22 13:33 bf16-all-results-and-groups.csv\n", "-rw-r--r-- 1 picocreator staff 750K Feb 22 13:33 bf16-eng-results.csv\n", "-rw-r--r--@ 1 picocreator staff 63K Feb 22 13:33 bf16-eng-summary.csv\n", "-rw-r--r--@ 1 picocreator staff 83K Feb 22 13:33 bf16-multilang-results.csv\n", "-rw-r--r--@ 1 picocreator staff 12K Feb 22 13:33 bf16-multilang-summary.csv\n", "-rw-r--r-- 1 picocreator staff 750K Feb 22 13:33 bf16-sorted-eng-results.csv\n", "-rw-r--r-- 1 picocreator staff 12K Feb 22 13:33 bf16-sorted-multilang-summary.csv\n", "-rw-r--r-- 1 picocreator staff 3.1M Feb 22 13:29 compiled-lm-eval-results.json\n" ] } ], "source": [ "##################################################\n", "#\n", "# Build the various subsets\n", "#\n", "##################################################\n", "\n", "# Overall results\n", "all_results = generate_result_table( inConfig = { \"dtype\": \"bfloat16\" }, inGroups=[\"*\"], inResults=[\"*\"] )\n", "all_results.to_csv('summary/bf16-all-results-and-groups.csv', index=False)\n", "\n", "# Multilang results\n", "multiLang_tTest = [\"xcopa_*\", \"xnli_*\", \"xstorycloze_*\", \"xwinograd_*\", \"lambada_openai_*\", \"pawsx_*\"]\n", "multiLang_tGrps = [\"xcopa\", \"xnli\", \"xstorycloze\", \"xwinograd\", \"lambada_multilingual\", \"pawsx\"]\n", "\n", "multilang_grp = generate_result_table( inConfig = { \"dtype\": \"bfloat16\" }, inGroups=multiLang_tGrps, inResults=[] )\n", "multilang_test = generate_result_table( inConfig = { \"dtype\": \"bfloat16\" }, inGroups=multiLang_tGrps, inResults=multiLang_tTest )\n", "multilang_grp.to_csv('summary/bf16-multilang-summary.csv', index=False)\n", "multilang_test.to_csv('summary/bf16-multilang-results.csv', index=False)\n", "\n", "multilang_grp_sorted = generate_result_table( inConfig = { \"dtype\": \"bfloat16\" }, inGroups=multiLang_tGrps, inResults=[], sort=True )\n", "multilang_grp_sorted.to_csv('summary/bf16-sorted-multilang-summary.csv', index=False)\n", "\n", "# All other results\n", "eng_grp = generate_result_table( inConfig = { \"dtype\": \"bfloat16\" }, inGroups=[\"*\"], inResults=[], exGroups=multiLang_tGrps )\n", "eng_grp_sorted = generate_result_table( inConfig = { \"dtype\": \"bfloat16\" }, inGroups=[\"*\"], inResults=[], exGroups=multiLang_tGrps, sort=True )\n", "eng_test = generate_result_table( inConfig = { \"dtype\": \"bfloat16\" }, inGroups=[\"*\"], inResults=[\"*\"], exGroups=multiLang_tGrps, exResults=multiLang_tTest )\n", "eng_test_sorted = generate_result_table( inConfig = { \"dtype\": \"bfloat16\" }, inGroups=[\"*\"], inResults=[\"*\"], exGroups=multiLang_tGrps, exResults=multiLang_tTest, sort=True )\n", "\n", "eng_grp.to_csv('summary/bf16-eng-summary.csv', index=False)\n", "eng_test.to_csv('summary/bf16-eng-results.csv', index=False)\n", "eng_test_sorted.to_csv('summary/bf16-sorted-eng-results.csv', index=False)\n", "eng_grp_sorted.to_csv('summary/bf16-sorted-eng-summary.csv', index=False)\n", "\n", "# List the files\n", "!ls -lh summary" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.6" } }, "nbformat": 4, "nbformat_minor": 2 }