File size: 7,365 Bytes
e2e6875 25557b5 05c90f4 5b0b818 05c90f4 25557b5 38f4369 ddc25db 38f4369 25557b5 c660995 25557b5 05c90f4 c660995 25557b5 05c90f4 1582397 05c90f4 2436603 fb9885c 05c90f4 045a182 9e63bd6 e29ab28 25557b5 1582397 e29ab28 045a182 e29ab28 1582397 e29ab28 903948a e29ab28 903948a e29ab28 05c90f4 fb9885c 903948a fb9885c 05c90f4 e2e6875 a0691fa 38f4369 a0691fa ddc25db a0691fa 38f4369 05c90f4 e2e6875 a0691fa 38f4369 a0691fa ddc25db 05c90f4 ddc25db 05c90f4 ddc25db 05c90f4 ddc25db 05c90f4 ddc25db 25557b5 e611814 ddc25db e611814 c53c13a e611814 2436603 05c90f4 38f4369 fb9885c 38f4369 25557b5 e611814 ddc25db c53c13a e611814 ddc25db c53c13a e611814 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 |
import io
import json
import gradio as gr
import pandas as pd
from huggingface_hub import HfFileSystem
RESULTS_DATASET_ID = "datasets/open-llm-leaderboard/results"
EXCLUDED_KEYS = {
"pretty_env_info",
"chat_template",
"group_subtasks",
}
EXCLUDED_RESULTS_KEYS = {
"leaderboard",
}
EXCLUDED_RESULTS_LEADERBOARDS_KEYS = {
"alias",
}
DEFAULT_HTML_TABLE = """
<table>
<thead>
<tr>
<th>Parameters</th>
<th>Model-1</th>
<th>Model-2</th>
</tr>
</thead>
<tbody>
</tbody>
</table>
"""
TASKS = {
"leaderboard_arc_challenge": ("ARC", "leaderboard_arc_challenge"),
"leaderboard_bbh": ("BBH", "leaderboard_bbh"),
"leaderboard_gpqa": ("GPQA", "leaderboard_gpqa"),
"leaderboard_ifeval": ("IFEval", "leaderboard_ifeval"),
"leaderboard_math_hard": ("MATH", "leaderboard_math"),
"leaderboard_mmlu": ("MMLU", "leaderboard_mmlu"),
"leaderboard_mmlu_pro": ("MMLU-Pro", "leaderboard_mmlu_pro"),
"leaderboard_musr": ("MuSR", "leaderboard_musr"),
}
fs = HfFileSystem()
def fetch_result_paths():
paths = fs.glob(f"{RESULTS_DATASET_ID}/**/**/*.json")
return paths
def filter_latest_result_path_per_model(paths):
from collections import defaultdict
d = defaultdict(list)
for path in paths:
model_id, _ = path[len(RESULTS_DATASET_ID) +1:].rsplit("/", 1)
d[model_id].append(path)
return {model_id: max(paths) for model_id, paths in d.items()}
def get_result_path_from_model(model_id, result_path_per_model):
return result_path_per_model[model_id]
def load_data(result_path) -> pd.DataFrame:
with fs.open(result_path, "r") as f:
data = json.load(f)
return data
def load_result(model_id):
result_path = get_result_path_from_model(model_id, latest_result_path_per_model)
data = load_data(result_path)
df = to_dataframe(data)
result = [
# to_vertical(df),
to_vertical(filter_results(df)),
to_vertical(filter_configs(df)),
]
return result
def to_vertical(df):
df = df.T.rename_axis("Parameters")
df.index = df.index.str.join(".")
return df
def to_dataframe(data):
df = pd.json_normalize([{key: value for key, value in data.items() if key not in EXCLUDED_KEYS}])
# df.columns = df.columns.str.split(".") # .split return a list instead of a tuple
df.columns = list(map(lambda x: tuple(x.split(".")), df.columns))
df.index = [data.get("model_name", "Model")]
return df
def filter_results(df):
df = df.loc[:, df.columns.str[0] == "results"]
df = df.loc[:, ~df.columns.str[1].isin(EXCLUDED_RESULTS_KEYS)]
# df.columns.str[1].str = df.columns.str[1].str.removeprefix("leaderboard_")
df = df.loc[:, ~df.columns.str[2].isin(EXCLUDED_RESULTS_LEADERBOARDS_KEYS)]
df.columns = df.columns.str[1:]
df.columns = map(lambda x: (x[0].removeprefix("leaderboard_"), *x[1:]), df.columns)
return df
def filter_configs(df):
df = df.loc[:, df.columns.str[0] == "configs"]
# df = df.loc[:, ~df.columns.str[1].isin(EXCLUDED_RESULTS_KEYS)]
# df = df.loc[:, ~df.columns.str[2].isin(EXCLUDED_RESULTS_LEADERBOARDS_KEYS)]
df.columns = df.columns.str[1:]
df.columns = map(lambda x: (x[0].removeprefix("leaderboard_"), *x[1:]), df.columns)
return df
def concat_result_1(result_1, results):
results = pd.read_html(io.StringIO(results))[0]
df = (
pd.concat([result_1, results.iloc[:, [0, 2]].set_index("Parameters")], axis=1)
.reset_index()
)
return df
def display_dataframe(df):
# style = Styler(df, uuid_len=0, cell_ids=False)
return (
df.style
.format(na_rep="")
.hide(axis="index")
.to_html()
)
def concat_result_2(result_2, results):
results = pd.read_html(io.StringIO(results))[0]
df = (
pd.concat([results.iloc[:, [0, 1]].set_index("Parameters"), result_2], axis=1)
.reset_index()
)
return df
def render_result_1(model_id, task, *results):
result = load_result(model_id)
concat_results = [concat_result_1(*result_args) for result_args in zip(result, results)]
if task:
concat_results = [df[df["Parameters"].str.startswith(task[len("leaderboard_"):])] for df in concat_results]
return [display_dataframe(df) for df in concat_results]
def render_result_2(model_id, task, *results):
result = load_result(model_id)
concat_results = [concat_result_2(*result_args) for result_args in zip(result, results)]
if task:
concat_results = [df[df["Parameters"].str.startswith(task[len("leaderboard_"):])] for df in concat_results]
return [display_dataframe(df) for df in concat_results]
def render_results(model_id_1, model_id_2, task, *results):
results = render_result_1(model_id_1, task, *results)
return render_result_2(model_id_2, task, *results)
# if __name__ == "__main__":
latest_result_path_per_model = filter_latest_result_path_per_model(fetch_result_paths())
with gr.Blocks(fill_height=True) as demo:
gr.HTML("<h1 style='text-align: center;'>Compare Results of the 🤗 Open LLM Leaderboard</h1>")
gr.HTML("<h3 style='text-align: center;'>Select 2 results to load and compare</h3>")
with gr.Row():
with gr.Column():
model_id_1 = gr.Dropdown(choices=list(latest_result_path_per_model.keys()), label="Results")
load_btn_1 = gr.Button("Load")
with gr.Column():
model_id_2 = gr.Dropdown(choices=list(latest_result_path_per_model.keys()), label="Results")
load_btn_2 = gr.Button("Load")
with gr.Row():
task = gr.Radio(
["All"] + list(TASKS.values()),
label="Tasks",
info="Evaluation tasks to be displayed",
value="All",
)
results = []
with gr.Row():
# with gr.Tab("All"):
# # results.append(gr.Dataframe(
# # label="Results",
# # headers=["Parameters", "Model-1", "Model-2"],
# # interactive=False,
# # column_widths=["30%", "30%", "30%"],
# # wrap=True,
# # ))
# results.append(gr.HTML(value=DEFAULT_HTML_TABLE))
with gr.Tab("Results"):
# results.append(gr.Dataframe(
# label="Results",
# headers=["Parameters", "Model-1", "Model-2"],
# interactive=False,
# column_widths=["30%", "30%", "30%"],
# wrap=True,
# ))
results.append(gr.HTML(value=DEFAULT_HTML_TABLE))
with gr.Tab("Configs"):
# results.append(gr.Dataframe(
# label="Results",
# headers=["Parameters", "Model-1", "Model-2"],
# interactive=False,
# column_widths=["30%", "30%", "30%"],
# wrap=True,
# ))
results.append(gr.HTML(value=DEFAULT_HTML_TABLE))
load_btn_1.click(
fn=render_result_1,
inputs=[model_id_1, task, *results],
outputs=[*results],
)
load_btn_2.click(
fn=render_result_2,
inputs=[model_id_2, task, *results],
outputs=[*results],
)
task.change(
fn=render_results,
inputs=[model_id_1, model_id_2, task, *results],
outputs=[*results],
)
demo.launch()
|