{ "cells": [ { "cell_type": "code", "execution_count": 67, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Defaulting to user installation because normal site-packages is not writeable\n", "Requirement already satisfied: pandas in /Users/picocreator/Library/Python/3.9/lib/python/site-packages (2.2.0)\n", "Requirement already satisfied: numpy<2,>=1.22.4 in /Users/picocreator/Library/Python/3.9/lib/python/site-packages (from pandas) (1.26.1)\n", "Requirement already satisfied: pytz>=2020.1 in /Users/picocreator/Library/Python/3.9/lib/python/site-packages (from pandas) (2024.1)\n", "Requirement already satisfied: python-dateutil>=2.8.2 in /Users/picocreator/Library/Python/3.9/lib/python/site-packages (from pandas) (2.8.2)\n", "Requirement already satisfied: tzdata>=2022.7 in /Users/picocreator/Library/Python/3.9/lib/python/site-packages (from pandas) (2024.1)\n", "Requirement already satisfied: six>=1.5 in /Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.9/lib/python3.9/site-packages (from python-dateutil>=2.8.2->pandas) (1.15.0)\n", "\u001b[33mWARNING: You are using pip version 21.2.4; however, version 24.0 is available.\n", "You should consider upgrading via the '/Library/Developer/CommandLineTools/usr/bin/python3 -m pip install --upgrade pip' command.\u001b[0m\n" ] } ], "source": [ "!pip3 install pandas" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Get the filelist\n", "\n", "For the full results.json" ] }, { "cell_type": "code", "execution_count": 68, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Found 2489 results.json files\n" ] } ], "source": [ "import glob\n", "\n", "# Specify the path to the folder containing the results.json files\n", "folder_path = \"lm-eval-output\"\n", "\n", "# Use glob to find all the results.json files\n", "results_json_files = glob.glob(f\"{folder_path}/**/results.json\", recursive=True)\n", "\n", "# Show total number of results.json files found\n", "print(f\"Found {len(results_json_files)} results.json files\")\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Process all the results.json\n", "\n", "One file at a time" ] }, { "cell_type": "code", "execution_count": 69, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Processed example: {'name': 'mistralai/Mistral-7B-v0.1', 'config': {'dtype=bfloat16,trust_remote_code=True': {'confStr': 'dtype=bfloat16,trust_remote_code=True', 'confObj': {'dtype': 'bfloat16', 'trust_remote_code': 'True'}, 'results': {'xcopa': {'acc,none': 0.5587272727272727, 'acc_stderr,none': 0.0551636604460852, 'alias': 'xcopa'}, 'xcopa_et': {'acc,none': 0.466, 'acc_stderr,none': 0.02233126442325838, 'alias': ' - xcopa_et'}, 'xcopa_ht': {'acc,none': 0.512, 'acc_stderr,none': 0.02237662679792717, 'alias': ' - xcopa_ht'}, 'xcopa_id': {'acc,none': 0.582, 'acc_stderr,none': 0.022080014812228137, 'alias': ' - xcopa_id'}, 'xcopa_it': {'acc,none': 0.66, 'acc_stderr,none': 0.021206117013673066, 'alias': ' - xcopa_it'}, 'xcopa_qu': {'acc,none': 0.482, 'acc_stderr,none': 0.02236856511738799, 'alias': ' - xcopa_qu'}, 'xcopa_sw': {'acc,none': 0.518, 'acc_stderr,none': 0.02236856511738799, 'alias': ' - xcopa_sw'}, 'xcopa_ta': {'acc,none': 0.542, 'acc_stderr,none': 0.02230396677426995, 'alias': ' - xcopa_ta'}, 'xcopa_th': {'acc,none': 0.564, 'acc_stderr,none': 0.0221989546414768, 'alias': ' - xcopa_th'}, 'xcopa_tr': {'acc,none': 0.568, 'acc_stderr,none': 0.02217510926561316, 'alias': ' - xcopa_tr'}, 'xcopa_vi': {'acc,none': 0.59, 'acc_stderr,none': 0.022017482578127672, 'alias': ' - xcopa_vi'}, 'xcopa_zh': {'acc,none': 0.662, 'acc_stderr,none': 0.021175665695209407, 'alias': ' - xcopa_zh'}, 'xnli': {'acc,none': 0.43175368139223563, 'acc_stderr,none': 0.0565098070106032, 'alias': 'xnli'}, 'xnli_ar': {'acc,none': 0.334136546184739, 'acc_stderr,none': 0.009454577602463621, 'alias': ' - xnli_ar'}, 'xnli_bg': {'acc,none': 0.4534136546184739, 'acc_stderr,none': 0.009978476483838962, 'alias': ' - xnli_bg'}, 'xnli_de': {'acc,none': 0.5012048192771085, 'acc_stderr,none': 0.01002204377131557, 'alias': ' - xnli_de'}, 'xnli_el': {'acc,none': 0.41365461847389556, 'acc_stderr,none': 0.009871502159099366, 'alias': ' - xnli_el'}, 'xnli_en': {'acc,none': 0.5690763052208835, 'acc_stderr,none': 0.009925970741520641, 'alias': ' - xnli_en'}, 'xnli_es': {'acc,none': 0.4562248995983936, 'acc_stderr,none': 0.009983589197693925, 'alias': ' - xnli_es'}, 'xnli_fr': {'acc,none': 0.5100401606425703, 'acc_stderr,none': 0.010020052116889137, 'alias': ' - xnli_fr'}, 'xnli_hi': {'acc,none': 0.42650602409638555, 'acc_stderr,none': 0.009913215943570534, 'alias': ' - xnli_hi'}, 'xnli_ru': {'acc,none': 0.4967871485943775, 'acc_stderr,none': 0.010021865961119557, 'alias': ' - xnli_ru'}, 'xnli_sw': {'acc,none': 0.363855421686747, 'acc_stderr,none': 0.009643393577626719, 'alias': ' - xnli_sw'}, 'xnli_th': {'acc,none': 0.38835341365461845, 'acc_stderr,none': 0.009769028875673285, 'alias': ' - xnli_th'}, 'xnli_tr': {'acc,none': 0.43654618473895584, 'acc_stderr,none': 0.009941039791133128, 'alias': ' - xnli_tr'}, 'xnli_ur': {'acc,none': 0.3381526104417671, 'acc_stderr,none': 0.009482500057981031, 'alias': ' - xnli_ur'}, 'xnli_vi': {'acc,none': 0.41244979919678715, 'acc_stderr,none': 0.009867237678555586, 'alias': ' - xnli_vi'}, 'xnli_zh': {'acc,none': 0.3759036144578313, 'acc_stderr,none': 0.00970848885066604, 'alias': ' - xnli_zh'}, 'pawsx': {'acc,none': 0.41585714285714287, 'acc_stderr,none': 0.05538778178867068, 'alias': 'pawsx'}, 'paws_de': {'acc,none': 0.385, 'acc_stderr,none': 0.010883323176386978, 'alias': ' - paws_de'}, 'paws_en': {'acc,none': 0.3125, 'acc_stderr,none': 0.010367044555050548, 'alias': ' - paws_en'}, 'paws_es': {'acc,none': 0.356, 'acc_stderr,none': 0.010709311120344539, 'alias': ' - paws_es'}, 'paws_fr': {'acc,none': 0.4885, 'acc_stderr,none': 0.011180177690296085, 'alias': ' - paws_fr'}, 'paws_ja': {'acc,none': 0.534, 'acc_stderr,none': 0.011157250652425779, 'alias': ' - paws_ja'}, 'paws_ko': {'acc,none': 0.4175, 'acc_stderr,none': 0.011029855114729358, 'alias': ' - paws_ko'}, 'paws_zh': {'acc,none': 0.4175, 'acc_stderr,none': 0.011029855114729354, 'alias': ' - paws_zh'}, 'lambada_multilingual': {'perplexity,none': 27.047409162154935, 'perplexity_stderr,none': 8.199911438395738, 'acc,none': 0.5190374539103435, 'acc_stderr,none': 0.07089117907004505, 'alias': 'lambada_multilingual'}, 'lambada_openai_mt_de': {'perplexity,none': 43.294453054791916, 'perplexity_stderr,none': 2.4066806886162686, 'acc,none': 0.39996118765767513, 'acc_stderr,none': 0.006825125929166165, 'alias': ' - lambada_openai_mt_de'}, 'lambada_openai_mt_en': {'perplexity,none': 3.1814104914677763, 'perplexity_stderr,none': 0.05822157255540461, 'acc,none': 0.7554822433533864, 'acc_stderr,none': 0.005987967089937308, 'alias': ' - lambada_openai_mt_en'}, 'lambada_openai_mt_es': {'perplexity,none': 36.26423960927208, 'perplexity_stderr,none': 1.790606090078102, 'acc,none': 0.42790607413157383, 'acc_stderr,none': 0.00689318551693077, 'alias': ' - lambada_openai_mt_es'}, 'lambada_openai_mt_fr': {'perplexity,none': 22.218390608610928, 'perplexity_stderr,none': 1.1061897900321798, 'acc,none': 0.5214438191344848, 'acc_stderr,none': 0.006959568274744848, 'alias': ' - lambada_openai_mt_fr'}, 'lambada_openai_mt_it': {'perplexity,none': 30.278552046631987, 'perplexity_stderr,none': 1.6707259318257452, 'acc,none': 0.49039394527459734, 'acc_stderr,none': 0.006964691949428186, 'alias': ' - lambada_openai_mt_it'}, 'xwinograd': {'acc,none': 0.8141155315801304, 'acc_stderr,none': 0.047153752482205775, 'alias': 'xwinograd'}, 'xwinograd_en': {'acc,none': 0.8868817204301075, 'acc_stderr,none': 0.0065702392696682255, 'alias': ' - xwinograd_en'}, 'xwinograd_fr': {'acc,none': 0.7469879518072289, 'acc_stderr,none': 0.048008758304372776, 'alias': ' - xwinograd_fr'}, 'xwinograd_jp': {'acc,none': 0.721584984358707, 'acc_stderr,none': 0.014481292182837467, 'alias': ' - xwinograd_jp'}, 'xwinograd_pt': {'acc,none': 0.7642585551330798, 'acc_stderr,none': 0.026223308206222536, 'alias': ' - xwinograd_pt'}, 'xwinograd_ru': {'acc,none': 0.6888888888888889, 'acc_stderr,none': 0.02612567541895451, 'alias': ' - xwinograd_ru'}, 'xwinograd_zh': {'acc,none': 0.7698412698412699, 'acc_stderr,none': 0.018768533005904867, 'alias': ' - xwinograd_zh'}, 'xstorycloze': {'acc,none': 0.5916611515552614, 'acc_stderr,none': 0.07711658992261772, 'alias': 'xstorycloze'}, 'xstorycloze_ar': {'acc,none': 0.5294506949040371, 'acc_stderr,none': 0.012844785490016997, 'alias': ' - xstorycloze_ar'}, 'xstorycloze_en': {'acc,none': 0.786896095301125, 'acc_stderr,none': 0.010538187590034574, 'alias': ' - xstorycloze_en'}, 'xstorycloze_es': {'acc,none': 0.6909331568497684, 'acc_stderr,none': 0.011892023305070085, 'alias': ' - xstorycloze_es'}, 'xstorycloze_eu': {'acc,none': 0.5109199205823958, 'acc_stderr,none': 0.012864056278255043, 'alias': ' - xstorycloze_eu'}, 'xstorycloze_hi': {'acc,none': 0.5539377895433488, 'acc_stderr,none': 0.012792037953589649, 'alias': ' - xstorycloze_hi'}, 'xstorycloze_id': {'acc,none': 0.5936465916611515, 'acc_stderr,none': 0.012639429420389871, 'alias': ' - xstorycloze_id'}, 'xstorycloze_my': {'acc,none': 0.4884182660489742, 'acc_stderr,none': 0.012863672949335892, 'alias': ' - xstorycloze_my'}, 'xstorycloze_ru': {'acc,none': 0.6651224354731966, 'acc_stderr,none': 0.012145219027833156, 'alias': ' - xstorycloze_ru'}, 'xstorycloze_sw': {'acc,none': 0.5129053606882858, 'acc_stderr,none': 0.012862838605728476, 'alias': ' - xstorycloze_sw'}, 'xstorycloze_te': {'acc,none': 0.5413633355393779, 'acc_stderr,none': 0.012823020340169815, 'alias': ' - xstorycloze_te'}, 'xstorycloze_zh': {'acc,none': 0.6346790205162144, 'acc_stderr,none': 0.012391557728373984, 'alias': ' - xstorycloze_zh'}}, 'groups': {'xcopa': {'acc,none': 0.5587272727272727, 'acc_stderr,none': 0.0551636604460852, 'alias': 'xcopa'}, 'xnli': {'acc,none': 0.43175368139223563, 'acc_stderr,none': 0.0565098070106032, 'alias': 'xnli'}, 'pawsx': {'acc,none': 0.41585714285714287, 'acc_stderr,none': 0.05538778178867068, 'alias': 'pawsx'}, 'lambada_multilingual': {'perplexity,none': 27.047409162154935, 'perplexity_stderr,none': 8.199911438395738, 'acc,none': 0.5190374539103435, 'acc_stderr,none': 0.07089117907004505, 'alias': 'lambada_multilingual'}, 'xwinograd': {'acc,none': 0.8141155315801304, 'acc_stderr,none': 0.047153752482205775, 'alias': 'xwinograd'}, 'xstorycloze': {'acc,none': 0.5916611515552614, 'acc_stderr,none': 0.07711658992261772, 'alias': 'xstorycloze'}}}}}\n" ] } ], "source": [ "import json\n", "\n", "# Global result map if it's not set\n", "if 'global_result_map' not in globals():\n", " global_result_map = {}\n", "\n", "#\n", "# Function to process the results.json file\n", "#\n", "def process_results_json(file_path):\n", " with open(file_path) as f:\n", " data = json.load(f)\n", "\n", " # Model args, presplit by ','\n", " model_args = data['config']['model_args'].split(',')\n", "\n", " # Extract the pretrained value from config.model_args\n", " modelname = model_args[0].split('=')[1]\n", "\n", " # Opt array\n", " confArgsArr = model_args[1:]\n", "\n", " # Sort the opt array\n", " confArgsArr.sort()\n", " # Convert it to a string\n", " confStr = ','.join(confArgsArr)\n", "\n", " # Convert the option array of key=value strings to a dictionary\n", " confObj = { }\n", " for o in confArgsArr:\n", " k, v = o.split('=')\n", " confObj[k] = v\n", " \n", " # Create a dictionary to store the results, or use the existing one if it exists\n", " if modelname in global_result_map:\n", " modelObj = global_result_map[modelname]\n", " else:\n", " modelObj = {\n", " 'name': modelname,\n", " 'config': { }\n", " }\n", " \n", " # Get the opt object for the model\n", " if confStr in modelObj['config']:\n", " confSet = modelObj['config'][confStr]\n", " else:\n", " confSet = {\n", " 'confStr': confStr,\n", " 'confObj': confObj,\n", " 'results': {},\n", " 'groups': {}\n", " }\n", "\n", " # Iterate over the results and extract the result object for each test/group\n", " if 'results' in data:\n", " for test, result in data['results'].items():\n", " confSet['results'][test] = result\n", " if 'groups' in data:\n", " for test, result in data['groups'].items():\n", " confSet['groups'][test] = result\n", " \n", " # Update the global result map object\n", " modelObj['config'][confStr] = confSet\n", " global_result_map[modelname] = modelObj\n", " return modelObj\n", "\n", "# Lets test the function with the first results.json file\n", "first_result = process_results_json(results_json_files[0])\n", "print(f\"Processed example: \", first_result)\n" ] }, { "cell_type": "code", "execution_count": 70, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Found 38 models\n", "Models: \n", "['mistralai/Mistral-7B-v0.1', 'mosaicml/mpt-7b-instruct', 'mosaicml/mpt-7b', 'mosaicml/mpt-7b-chat', 'bigscience/bloom-7b1', 'bigscience/bloomz-7b1-mt', 'bigscience/bloomz-7b1', 'EleutherAI/pythia-2.8b', 'EleutherAI/pythia-1.4b', 'EleutherAI/gpt-j-6b', 'EleutherAI/pythia-6.9b', 'google/gemma-2b', 'google/gemma-7b', 'microsoft/phi-1_5', 'microsoft/phi-2', 'microsoft/phi-1', 'allenai/OLMo-7B', 'TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T', 'TinyLlama/TinyLlama-1.1B-Chat-v1.0', 'RWKV/rwkv-5-world-1b5', 'RWKV/rwkv-5-world-3b', 'RWKV/rwkv-4-world-3b', 'RWKV/rwkv-4-world-1b5', 'RWKV/rwkv-4-world-7b', 'RWKV/HF_v5-Eagle-7B', 'togethercomputer/RedPajama-INCITE-7B-Base', 'togethercomputer/RedPajama-INCITE-7B-Instruct', 'togethercomputer/RedPajama-INCITE-7B-Chat', 'facebook/opt-2.7b', 'facebook/opt-6.7b', 'facebook/opt-1.3b', 'tiiuae/falcon-7b-instruct', 'tiiuae/falcon-rw-1b', 'tiiuae/falcon-rw-7b', 'tiiuae/falcon-7b', 'huggyllama/llama-7b', 'meta-llama/Llama-2-7b-chat-hf', 'meta-llama/Llama-2-7b-hf']\n", "Saved to compiled-lm-eval-results.json\n" ] } ], "source": [ "# Lets reset and reprocess all the results.json files\n", "global_result_map = {}\n", "\n", "# Process all the results.json files\n", "for file in results_json_files:\n", " process_results_json(file)\n", "\n", "# Show high level list of models\n", "print(f\"Found {len(global_result_map)} models\")\n", "print(f\"Models: \\n{list(global_result_map.keys())}\")\n", "\n", "# Save the result map to a file\n", "with open('summary/compiled-lm-eval-results.json', 'w') as f:\n", " json.dump(global_result_map, f, sort_keys=True, indent='\\t')\n", "\n", "# Echo that its been saved to json\n", "print(f\"Saved to compiled-lm-eval-results.json\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Convert the results into CSV table formats" ] }, { "cell_type": "code", "execution_count": 71, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
modelavg_accavg_acc_stderrxcopa (acc)xcopa (acc_stderr)
0mistralai/Mistral-7B-v0.10.5587270.0551640.5587270.055164
1mosaicml/mpt-7b-instruct0.5370910.0419190.5370910.041919
2mosaicml/mpt-7b0.5360000.0423390.5360000.042339
3mosaicml/mpt-7b-chat0.5380000.0470590.5380000.047059
4bigscience/bloom-7b10.5709090.0613590.5709090.061359
5bigscience/bloomz-7b1-mt0.5460000.0383210.5460000.038321
6bigscience/bloomz-7b10.5478180.0389200.5478180.038920
7EleutherAI/pythia-2.8b0.5374550.0269410.5374550.026941
8EleutherAI/pythia-1.4b0.5265450.0274410.5265450.027441
9EleutherAI/gpt-j-6b0.5441820.0344040.5441820.034404
10EleutherAI/pythia-6.9b0.5405450.0296890.5405450.029689
11google/gemma-2b0.5205450.0253320.5205450.025332
12google/gemma-7b0.5176360.0273300.5176360.027330
13microsoft/phi-1_50.5216360.0261980.5216360.026198
14microsoft/phi-20.5121820.0297420.5121820.029742
15microsoft/phi-10.5176360.0296120.5176360.029612
16allenai/OLMo-7B0.5378180.0341470.5378180.034147
17TinyLlama/TinyLlama-1.1B-intermediate-step-143...0.5292730.0293160.5292730.029316
18TinyLlama/TinyLlama-1.1B-Chat-v1.00.5289090.0317020.5289090.031702
19RWKV/rwkv-5-world-1b50.5789090.0451030.5789090.045103
20RWKV/rwkv-5-world-3b0.5901820.0562410.5901820.056241
21RWKV/rwkv-4-world-3b0.5754550.0409770.5754550.040977
22RWKV/rwkv-4-world-1b50.5540000.0394060.5540000.039406
23RWKV/rwkv-4-world-7b0.6014550.0531160.6014550.053116
24RWKV/HF_v5-Eagle-7B0.6218180.0689860.6218180.068986
25togethercomputer/RedPajama-INCITE-7B-Base0.5254550.0364070.5254550.036407
26togethercomputer/RedPajama-INCITE-7B-Instruct0.5285450.0364700.5285450.036470
27togethercomputer/RedPajama-INCITE-7B-Chat0.5354550.0387230.5354550.038723
28facebook/opt-2.7b0.5218180.0298210.5218180.029821
29facebook/opt-6.7b0.5229090.0272160.5229090.027216
30facebook/opt-1.3b0.5218180.0291120.5218180.029112
31tiiuae/falcon-7b-instruct0.5367270.0534300.5367270.053430
32tiiuae/falcon-rw-1b0.5225450.0294460.5225450.029446
33tiiuae/falcon-rw-7b0.5358180.0331850.5358180.033185
34tiiuae/falcon-7b0.5596360.0716500.5596360.071650
35huggyllama/llama-7b0.5418180.0407180.5418180.040718
36meta-llama/Llama-2-7b-chat-hf0.0000000.000000NaNNaN
37meta-llama/Llama-2-7b-hf0.5667270.0525150.5667270.052515
\n", "
" ], "text/plain": [ " model avg_acc \\\n", "0 mistralai/Mistral-7B-v0.1 0.558727 \n", "1 mosaicml/mpt-7b-instruct 0.537091 \n", "2 mosaicml/mpt-7b 0.536000 \n", "3 mosaicml/mpt-7b-chat 0.538000 \n", "4 bigscience/bloom-7b1 0.570909 \n", "5 bigscience/bloomz-7b1-mt 0.546000 \n", "6 bigscience/bloomz-7b1 0.547818 \n", "7 EleutherAI/pythia-2.8b 0.537455 \n", "8 EleutherAI/pythia-1.4b 0.526545 \n", "9 EleutherAI/gpt-j-6b 0.544182 \n", "10 EleutherAI/pythia-6.9b 0.540545 \n", "11 google/gemma-2b 0.520545 \n", "12 google/gemma-7b 0.517636 \n", "13 microsoft/phi-1_5 0.521636 \n", "14 microsoft/phi-2 0.512182 \n", "15 microsoft/phi-1 0.517636 \n", "16 allenai/OLMo-7B 0.537818 \n", "17 TinyLlama/TinyLlama-1.1B-intermediate-step-143... 0.529273 \n", "18 TinyLlama/TinyLlama-1.1B-Chat-v1.0 0.528909 \n", "19 RWKV/rwkv-5-world-1b5 0.578909 \n", "20 RWKV/rwkv-5-world-3b 0.590182 \n", "21 RWKV/rwkv-4-world-3b 0.575455 \n", "22 RWKV/rwkv-4-world-1b5 0.554000 \n", "23 RWKV/rwkv-4-world-7b 0.601455 \n", "24 RWKV/HF_v5-Eagle-7B 0.621818 \n", "25 togethercomputer/RedPajama-INCITE-7B-Base 0.525455 \n", "26 togethercomputer/RedPajama-INCITE-7B-Instruct 0.528545 \n", "27 togethercomputer/RedPajama-INCITE-7B-Chat 0.535455 \n", "28 facebook/opt-2.7b 0.521818 \n", "29 facebook/opt-6.7b 0.522909 \n", "30 facebook/opt-1.3b 0.521818 \n", "31 tiiuae/falcon-7b-instruct 0.536727 \n", "32 tiiuae/falcon-rw-1b 0.522545 \n", "33 tiiuae/falcon-rw-7b 0.535818 \n", "34 tiiuae/falcon-7b 0.559636 \n", "35 huggyllama/llama-7b 0.541818 \n", "36 meta-llama/Llama-2-7b-chat-hf 0.000000 \n", "37 meta-llama/Llama-2-7b-hf 0.566727 \n", "\n", " avg_acc_stderr xcopa (acc) xcopa (acc_stderr) \n", "0 0.055164 0.558727 0.055164 \n", "1 0.041919 0.537091 0.041919 \n", "2 0.042339 0.536000 0.042339 \n", "3 0.047059 0.538000 0.047059 \n", "4 0.061359 0.570909 0.061359 \n", "5 0.038321 0.546000 0.038321 \n", "6 0.038920 0.547818 0.038920 \n", "7 0.026941 0.537455 0.026941 \n", "8 0.027441 0.526545 0.027441 \n", "9 0.034404 0.544182 0.034404 \n", "10 0.029689 0.540545 0.029689 \n", "11 0.025332 0.520545 0.025332 \n", "12 0.027330 0.517636 0.027330 \n", "13 0.026198 0.521636 0.026198 \n", "14 0.029742 0.512182 0.029742 \n", "15 0.029612 0.517636 0.029612 \n", "16 0.034147 0.537818 0.034147 \n", "17 0.029316 0.529273 0.029316 \n", "18 0.031702 0.528909 0.031702 \n", "19 0.045103 0.578909 0.045103 \n", "20 0.056241 0.590182 0.056241 \n", "21 0.040977 0.575455 0.040977 \n", "22 0.039406 0.554000 0.039406 \n", "23 0.053116 0.601455 0.053116 \n", "24 0.068986 0.621818 0.068986 \n", "25 0.036407 0.525455 0.036407 \n", "26 0.036470 0.528545 0.036470 \n", "27 0.038723 0.535455 0.038723 \n", "28 0.029821 0.521818 0.029821 \n", "29 0.027216 0.522909 0.027216 \n", "30 0.029112 0.521818 0.029112 \n", "31 0.053430 0.536727 0.053430 \n", "32 0.029446 0.522545 0.029446 \n", "33 0.033185 0.535818 0.033185 \n", "34 0.071650 0.559636 0.071650 \n", "35 0.040718 0.541818 0.040718 \n", "36 0.000000 NaN NaN \n", "37 0.052515 0.566727 0.052515 " ] }, "execution_count": 71, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# Lets convert this into a table, which we will display in this notebook, and save as a CSV\n", "import pandas as pd\n", "\n", "##################################################\n", "#\n", "# Utility functions\n", "#\n", "##################################################\n", "\n", "# Check if the given name string, is within the list, including \"*\" wildcard\n", "def is_in_list(name, list):\n", " for n in list:\n", " if n[-1] == '*':\n", " if name.startswith(n[:-1]):\n", " return True\n", " elif n == name:\n", " return True\n", " return False\n", "\n", "# Is in inclusion exclusion list pair\n", "def is_in_list_pair(name, inList, exList):\n", " if not is_in_list(name, inList):\n", " return False\n", " if is_in_list(name, exList):\n", " return False\n", " return True\n", "\n", "# Prepare a single test/group result object\n", "# By applying common filtering and formatting changes\n", "def prepare_test_result(result):\n", " # The reutrn object\n", " ret = {}\n", " # Iterate the result key/value\n", " for k, v in result.items():\n", " # Skip if its alias\n", " if k == 'alias':\n", " continue\n", "\n", " # If the key ends with \",none\", drop the \",none\"\n", " if k.endswith(',none'):\n", " k = k[:-5]\n", " \n", " # Save the result\n", " ret[k] = v\n", " \n", " # Return the result\n", " return ret\n", "\n", "##################################################\n", "#\n", "# Generate the result\n", "#\n", "##################################################\n", "\n", "# Create a list of rows for the table\n", "def generate_result_table(\n", " inConfig = { \"dtype\": \"bfloat16\" },\n", "\n", " # Results and groups to include\n", " inResults = [],\n", " inGroups = [\"*\"],\n", "\n", " # Exclude results and groups, applied after inResults and inGroups\n", " exResults = [],\n", " exGroups = [],\n", "\n", " # Sorted\n", " sort = False\n", "):\n", " table_rows = []\n", "\n", " # Iterate over the models\n", " for model, modelObj in global_result_map.items():\n", " # Iterate over the configurations\n", " for confStr, confSet in modelObj['config'].items():\n", " # Get the confObj\n", " confObj = confSet['confObj']\n", "\n", " # Check if the inConfig, matches the confObj\n", " if inConfig:\n", " skip = False\n", " for k, v in inConfig.items():\n", " if k not in confObj or confObj[k] != v:\n", " skip = True\n", " break\n", " if skip:\n", " continue\n", "\n", " # Create a row object\n", " row = {\n", " 'model': model,\n", " # 'config': confStr\n", "\n", " \"avg_acc\": 0.0,\n", " \"avg_acc_stderr\": 0.0,\n", " }\n", "\n", " # Total acc / acc_stderr\n", " acc_total = 0.0\n", " acc_count = 0\n", " acc_stderr_total = 0.0\n", " acc_stderr_count = 0\n", "\n", " # Add the groups\n", " for test, result in confSet['groups'].items():\n", "\n", " # Skip if not in the inGroups or exGroups\n", " if not is_in_list_pair(test, inGroups, exGroups):\n", " continue\n", "\n", " # Filter the result obj\n", " cleanResult = prepare_test_result(result)\n", "\n", " # Add the result to the row, as seperate columns for each key\n", " for k, v in cleanResult.items():\n", " row[f\"{test} ({k})\"] = v\n", "\n", " if k == 'acc':\n", " acc_total += v\n", " acc_count += 1\n", " elif k == 'acc_stderr':\n", " acc_stderr_total += v\n", " acc_stderr_count += 1\n", "\n", " # Add the results\n", " for test, result in confSet['results'].items():\n", "\n", " # Skip if not in the inResults or exResults\n", " if not is_in_list_pair(test, inResults, exResults):\n", " continue\n", "\n", " # Filter the result obj\n", " cleanResult = prepare_test_result(result)\n", "\n", " # Add the result to the row, as seperate columns for each key\n", " for k, v in cleanResult.items():\n", " row[f\"{test} ({k})\"] = v\n", "\n", " if k == 'acc':\n", " acc_total += v\n", " acc_count += 1\n", " elif k == 'acc_stderr':\n", " acc_stderr_total += v\n", " acc_stderr_count += 1\n", " \n", " # Add the avg acc and acc_stderr\n", " if acc_count > 0:\n", " row[\"avg_acc\"] = acc_total / acc_count\n", " if acc_stderr_count > 0:\n", " row[\"avg_acc_stderr\"] = acc_stderr_total / acc_stderr_count\n", "\n", " # Append the row to the table\n", " table_rows.append(row)\n", "\n", " # Create a dataframe from the table rows\n", " df = pd.DataFrame(table_rows)\n", "\n", " # Sort by avg_acc\n", " if sort:\n", " df = df.sort_values(by='avg_acc', ascending=False)\n", "\n", " # Show the dataframe\n", " return df\n", "\n", "# Generate the dataframe\n", "df = generate_result_table( inConfig = { \"dtype\": \"bfloat16\" }, inGroups=[\"xcopa\"], inResults=[] )\n", "\n", "# # Save the dataframe to a CSV file\n", "# df.to_csv('summary/compiled-lm-eval-results.csv', index=False)\n", "\n", "# Show results\n", "df\n" ] }, { "cell_type": "code", "execution_count": 72, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "total 14648\n", "-rw-r--r--@ 1 picocreator staff 928K Feb 22 16:11 bf16-all-results-and-groups.csv\n", "-rw-r--r--@ 1 picocreator staff 853K Feb 22 16:11 bf16-eng-results.csv\n", "-rw-r--r--@ 1 picocreator staff 72K Feb 22 16:11 bf16-eng-summary.csv\n", "-rw-r--r--@ 1 picocreator staff 88K Feb 22 16:11 bf16-multilang-results.csv\n", "-rw-r--r--@ 1 picocreator staff 12K Feb 22 16:11 bf16-multilang-summary.csv\n", "-rw-r--r--@ 1 picocreator staff 853K Feb 22 16:11 bf16-sorted-eng-results.csv\n", "-rw-r--r--@ 1 picocreator staff 72K Feb 22 16:11 bf16-sorted-eng-summary.csv\n", "-rw-r--r--@ 1 picocreator staff 12K Feb 22 16:11 bf16-sorted-multilang-summary.csv\n", "-rw-r--r-- 1 picocreator staff 3.6M Feb 22 16:11 compiled-lm-eval-results.json\n" ] } ], "source": [ "##################################################\n", "#\n", "# Build the various subsets\n", "#\n", "##################################################\n", "\n", "# Overall results\n", "all_results = generate_result_table( inConfig = { \"dtype\": \"bfloat16\" }, inGroups=[\"*\"], inResults=[\"*\"] )\n", "all_results.to_csv('summary/bf16-all-results-and-groups.csv', index=False)\n", "\n", "# Multilang results\n", "multiLang_tTest = [\"xcopa_*\", \"xnli_*\", \"xstorycloze_*\", \"xwinograd_*\", \"lambada_openai_*\", \"pawsx_*\"]\n", "multiLang_tGrps = [\"xcopa\", \"xnli\", \"xstorycloze\", \"xwinograd\", \"lambada_multilingual\", \"pawsx\"]\n", "\n", "multilang_grp = generate_result_table( inConfig = { \"dtype\": \"bfloat16\" }, inGroups=multiLang_tGrps, inResults=[] )\n", "multilang_test = generate_result_table( inConfig = { \"dtype\": \"bfloat16\" }, inGroups=multiLang_tGrps, inResults=multiLang_tTest )\n", "multilang_grp.to_csv('summary/bf16-multilang-summary.csv', index=False)\n", "multilang_test.to_csv('summary/bf16-multilang-results.csv', index=False)\n", "\n", "multilang_grp_sorted = generate_result_table( inConfig = { \"dtype\": \"bfloat16\" }, inGroups=multiLang_tGrps, inResults=[], sort=True )\n", "multilang_grp_sorted.to_csv('summary/bf16-sorted-multilang-summary.csv', index=False)\n", "\n", "# All other results\n", "eng_grp = generate_result_table( inConfig = { \"dtype\": \"bfloat16\" }, inGroups=[\"*\"], inResults=[], exGroups=multiLang_tGrps )\n", "eng_grp_sorted = generate_result_table( inConfig = { \"dtype\": \"bfloat16\" }, inGroups=[\"*\"], inResults=[], exGroups=multiLang_tGrps, sort=True )\n", "eng_test = generate_result_table( inConfig = { \"dtype\": \"bfloat16\" }, inGroups=[\"*\"], inResults=[\"*\"], exGroups=multiLang_tGrps, exResults=multiLang_tTest )\n", "eng_test_sorted = generate_result_table( inConfig = { \"dtype\": \"bfloat16\" }, inGroups=[\"*\"], inResults=[\"*\"], exGroups=multiLang_tGrps, exResults=multiLang_tTest, sort=True )\n", "\n", "eng_grp.to_csv('summary/bf16-eng-summary.csv', index=False)\n", "eng_test.to_csv('summary/bf16-eng-results.csv', index=False)\n", "eng_test_sorted.to_csv('summary/bf16-sorted-eng-results.csv', index=False)\n", "eng_grp_sorted.to_csv('summary/bf16-sorted-eng-summary.csv', index=False)\n", "\n", "# List the files\n", "!ls -lh summary" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.6" } }, "nbformat": 4, "nbformat_minor": 2 }