Spaces:
Runtime error
Runtime error
File size: 4,894 Bytes
d1253a8 8c2ee0f d1253a8 f067bfb 8c2ee0f d1253a8 a5244e0 d1253a8 a5244e0 d1253a8 8c2ee0f d1253a8 a5244e0 d1253a8 8c2ee0f d1253a8 5faacb0 8c2ee0f d1253a8 8c2ee0f 5faacb0 8c2ee0f d1253a8 8c2ee0f d1253a8 8c2ee0f d1253a8 f067bfb d1253a8 8c2ee0f d1253a8 8c2ee0f d1253a8 8c2ee0f d1253a8 8c2ee0f d1253a8 f067bfb d1253a8 5faacb0 d1253a8 8c2ee0f d1253a8 8c2ee0f 06aa2d9 f067bfb d1253a8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 |
import os
import json
import glob
from collections import defaultdict
import pandas as pd
import gradio as gr
from content import *
from css import *
import glob
ARC = "arc"
HELLASWAG = "hellaswag"
MMLU = "mmlu"
TRUTHFULQA = "truthfulqa"
BENCHMARKS = [ARC, HELLASWAG, MMLU, TRUTHFULQA]
METRICS = ["acc_norm", "acc_norm", "acc_norm", "mc2"]
LANGS = 'ar,bn,ca,da,de,es,eu,fr,gu,hi,hr,hu,hy,id,it,kn,ml,mr,ne,nl,pt,ro,ru,sk,sr,sv,ta,te,uk,vi,zh'.split(',')
LANG_NAME = {
'ar': 'Arabic',
'bn': 'Bengali',
'ca': 'Catalan',
'da': 'Danish',
'de': 'German',
'es': 'Spanish',
'eu': 'Basque',
'fr': 'French',
'gu': 'Gujarati',
'hi': 'Hindi',
'hr': 'Croatian',
'hu': 'Hungarian',
'hy': 'Armenian',
'id': 'Indonesian',
'it': 'Italian',
'kn': 'Kannada',
'ml': 'Malayalam',
'mr': 'Marathi',
'ne': 'Nepali',
'nl': 'Dutch',
'pt': 'Portuguese',
'ro': 'Romanian',
'ru': 'Russian',
'sk': 'Slovak',
'sr': 'Serbian',
'sv': 'Swedish',
'ta': 'Tamil',
'te': 'Telugu',
'uk': 'Ukrainian',
'vi': 'Vietnamese',
'zh': 'Chinese'
}
def collect_results():
performance_dict = defaultdict(dict)
pretrained_models = set()
for file in glob.glob('evals/*/*.json'):
with open(file, 'r') as f:
data = json.load(f)
if 'results' not in data:
continue
if 'config' not in data:
continue
results = data['results']
config = data['config']
if 'model_args' not in config:
continue
model_args = config['model_args'].split(',')
pretrained = [x for x in model_args if x.startswith('pretrained=')]
if len(pretrained) != 1:
continue
pretrained = pretrained[0].split('=')[1]
pretrained = pretrained.split('/')[-1]
pretrained_models.add(pretrained)
for lang_task, perfs in results.items():
task, lang = lang_task.split('_')
assert task in BENCHMARKS
if lang and task:
metric = METRICS[BENCHMARKS.index(task)]
p = round(perfs[metric] * 100, 1)
performance_dict[(pretrained, lang)][task] = p
return performance_dict, pretrained_models
def get_leaderboard_df(performance_dict, pretrained_models):
df = list()
for (pretrained, lang), perfs in performance_dict.items():
lang_name = LANG_NAME[lang]
arc_perf = perfs.get(ARC, 0.0)
hellaswag_perf = perfs.get(HELLASWAG, 0.0)
mmlu_perf = perfs.get(MMLU, 0.0)
truthfulqa_perf = perfs.get(TRUTHFULQA, 0.0)
if arc_perf * hellaswag_perf * mmlu_perf * truthfulqa_perf == 0:
continue
avg = round((arc_perf + hellaswag_perf + mmlu_perf + truthfulqa_perf) / 4, 1)
notes = ' '.join([pretrained, lang_name])
row = [pretrained, lang_name, lang, avg, arc_perf, hellaswag_perf, mmlu_perf, truthfulqa_perf, notes]
df.append(row)
df = pd.DataFrame.from_records(df, columns=COLS)
df = df.sort_values(by=[LANG_COL, AVERAGE_COL], ascending=False)
df = df[COLS]
return df
def search_table(df, query):
filtered_df = df[df[NOTES_COL].str.contains(query, case=False)]
return filtered_df
MODEL_COL = "Model"
LANG_COL = "Language"
CODE_COL = "Code"
AVERAGE_COL = "Average"
ARC_COL = "ARC (25-shot)"
HELLASWAG_COL = "HellaSwag (10-shot)️"
MMLU_COL = "MMLU (5-shot)"
TRUTHFULQA_COL = "TruthfulQA (0-shot)"
NOTES_COL = "Notes" # For search only
COLS = [MODEL_COL, LANG_COL, CODE_COL, AVERAGE_COL, ARC_COL, HELLASWAG_COL, MMLU_COL, TRUTHFULQA_COL, NOTES_COL]
TYPES = ["str", "str", "str", "number", "number", "number", "number", "number", "str"]
args = collect_results()
original_df = get_leaderboard_df(*args)
demo = gr.Blocks(css=CUSTOM_CSS)
with demo:
gr.HTML(TITLE)
gr.Markdown(INTRO_TEXT, elem_classes="markdown-text")
gr.Markdown(HOW_TO, elem_classes="markdown-text")
with gr.Box():
search_bar = gr.Textbox(
placeholder="Search models and languages...", show_label=False, elem_id="search-bar"
)
leaderboard_table = gr.components.Dataframe(
value=original_df,
headers=COLS,
datatype=TYPES,
max_rows=5,
elem_id="leaderboard-table",
)
# # Dummy leaderboard for handling the case when the user uses backspace key
hidden_leaderboard_table_for_search = gr.components.Dataframe(
value=original_df, headers=COLS, datatype=TYPES, max_rows=5, visible=False
)
search_bar.change(
search_table,
[hidden_leaderboard_table_for_search, search_bar],
leaderboard_table,
)
gr.Markdown(CREDIT, elem_classes="markdown-text")
gr.Markdown(CITATION, elem_classes="markdown-text")
demo.launch()
|