margsli's picture
Update app.py
0e619a7 verified
raw
history blame
12.2 kB
"""A gradio app that renders a static leaderboard. This is used for Hugging Face Space."""
import ast
import argparse
import glob
import pickle
import gradio as gr
import numpy as np
import pandas as pd
leader_component_values = [None]
space = "   "
def make_default_md(arena_df, elo_results):
leaderboard_md = f"""
# NeurIPS LLM Merging Competition Leaderboard
[Website](https://llm-merging.github.io/index) | [Starter Kit (Github)](https://github.com/llm-merging/LLM-Merging) | [Discord](https://discord.com/invite/dPBHEVnV)
"""
return leaderboard_md
def make_arena_leaderboard_md(arena_df):
total_models = len(arena_df)
leaderboard_md = f"""
Three benchmarks are displayed: **Test Task 1**, **Test Task 2**, **Test Task 3**.
Higher values are better for all benchmarks.
Total #models: **{total_models}**.{space} Last updated: June 1, 2024.
"""
return leaderboard_md
def make_category_arena_leaderboard_md(arena_df, arena_subset_df, name="Overall"):
total_models = len(arena_df)
total_subset_models = len(arena_subset_df)
leaderboard_md = f"""### {cat_name_to_explanation[name]}
#### [Coverage] {space} #models: **{total_subset_models} ({round(total_subset_models/total_models *100)}%)**{space}
"""
return leaderboard_md
def make_leaderboard_md_live(elo_results):
leaderboard_md = f"""
# Leaderboard
Last updated: {elo_results["last_updated_datetime"]}
{elo_results["leaderboard_table"]}
"""
return leaderboard_md
def load_leaderboard_table_csv(filename, add_hyperlink=False):
lines = open(filename).readlines()
heads = [v.strip() for v in lines[0].split(",")]
rows = []
for i in range(1, len(lines)):
row = [v.strip() for v in lines[i].split(",")]
for j in range(len(heads)):
item = {}
for h, v in zip(heads, row):
if h == "Arena Elo rating":
if v != "-":
v = int(ast.literal_eval(v))
else:
v = np.nan
elif h == "MMLU":
if v != "-":
v = round(ast.literal_eval(v) * 100, 1)
else:
v = np.nan
elif h == "MT-bench (win rate %)":
if v != "-":
v = round(ast.literal_eval(v[:-1]), 1)
else:
v = np.nan
elif h == "MT-bench (score)":
if v != "-":
v = round(ast.literal_eval(v), 2)
else:
v = np.nan
item[h] = v
if add_hyperlink:
item["Model"] = f'<a target="_blank" href="{item["Link"]}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{item["Model"]}</a>'
rows.append(item)
return rows
def get_full_table(arena_df, model_table_df):
values = []
for i in range(len(model_table_df)):
row = []
ranking = i+1
row.append(ranking)
model_key = model_table_df.iloc[i]["key"]
model_name = model_table_df.iloc[i]["Model"]
# model display name
row.append(model_name)
row.append(np.nan)
row.append(np.nan)
row.append(np.nan)
# Team
row.append(model_table_df.iloc[i]["Organization"])
values.append(row)
# values.sort(key=lambda x: -x[1] if not np.isnan(x[1]) else 1e9)
return values
key_to_category_name = {
"full": "Overall",
}
cat_name_to_explanation = {
"Overall": "Overall Questions",
}
def build_leaderboard_tab(results_file, leaderboard_table_file, show_plot=False):
arena_dfs = {}
category_elo_results = {}
if results_file is None: # Do live update
default_md = "Loading ..."
else:
with open(results_file, "rb") as fin:
elo_results = pickle.load(fin)
if "full" in elo_results:
print("KEYS ", elo_results.keys())
for k in elo_results.keys():
if k not in key_to_category_name:
continue
arena_dfs[key_to_category_name[k]] = elo_results[k]["leaderboard_table_df"]
category_elo_results[key_to_category_name[k]] = elo_results[k]
arena_df = arena_dfs["Overall"]
default_md = make_default_md(arena_df, category_elo_results["Overall"])
md_1 = gr.Markdown(default_md, elem_id="leaderboard_markdown")
if leaderboard_table_file:
data = load_leaderboard_table_csv(leaderboard_table_file)
model_table_df = pd.DataFrame(data)
with gr.Tabs() as tabs:
arena_table_vals = get_full_table(arena_df, model_table_df)
with gr.Tab("Full leaderboard", id=0):
md = make_arena_leaderboard_md(arena_df)
leaderboard_markdown = gr.Markdown(md, elem_id="leaderboard_markdown")
with gr.Row():
with gr.Column(scale=2):
category_dropdown = gr.Dropdown(choices=list(arena_dfs.keys()), label="Category", value="Overall")
default_category_details = make_category_arena_leaderboard_md(arena_df, arena_df, name="Overall")
with gr.Column(scale=4, variant="panel"):
category_deets = gr.Markdown(default_category_details, elem_id="category_deets")
display_df = gr.Dataframe(
headers=[
"Rank",
"πŸ€– Model",
"⭐ Task 1",
"πŸ“ˆ Task 2",
"πŸ“š Task 3",
"Team",
],
datatype=[
"number",
"markdown",
"number",
"number",
"number",
"str",
],
value=arena_table_vals,
elem_id="arena_leaderboard_dataframe",
height=700,
column_widths=[70, 190, 110, 110, 110, 150],
wrap=True,
)
gr.Markdown(
f"""Note: .
""",
elem_id="leaderboard_markdown"
)
leader_component_values[:] = [default_md]
if not show_plot:
gr.Markdown(
""" ## Submit your model [here]().
""",
elem_id="leaderboard_markdown",
)
else:
pass
def update_leaderboard_df(arena_table_vals):
elo_datarame = pd.DataFrame(arena_table_vals, columns=["Rank", "πŸ€– Model", "⭐ Task 1", "πŸ“ˆ Task 2", "πŸ“š Task 3", "Team"])
# goal: color the rows based on the rank with styler
def highlight_max(s):
# all items in S which contain up arrow should be green, down arrow should be red, otherwise black
return ["color: green; font-weight: bold" if "\u2191" in v else "color: red; font-weight: bold" if "\u2193" in v else "" for v in s]
def highlight_rank_max(s):
return ["color: green; font-weight: bold" if v > 0 else "color: red; font-weight: bold" if v < 0 else "" for v in s]
return elo_datarame.style.apply(highlight_max, subset=["Rank"])
def update_leaderboard_and_plots(category):
arena_subset_df = arena_dfs[category]
arena_subset_df = arena_subset_df[arena_subset_df["num_battles"] > 500]
elo_subset_results = category_elo_results[category]
arena_df = arena_dfs["Overall"]
arena_values = get_arena_table(arena_df, model_table_df, arena_subset_df = arena_subset_df if category != "Overall" else None)
if category != "Overall":
arena_values = update_leaderboard_df(arena_values)
arena_values = gr.Dataframe(
headers=[
"Rank",
"πŸ€– Model",
"⭐ Task 1",
"πŸ“ˆ Task 2",
"πŸ“š Task 3",
"Team",
],
datatype=[
"number",
"markdown",
"number",
"number",
"number",
"str",
],
value=arena_values,
elem_id="arena_leaderboard_dataframe",
height=700,
column_widths=[70, 190, 110, 110, 110, 150],
wrap=True,
)
leaderboard_md = make_category_arena_leaderboard_md(arena_df, arena_subset_df, name=category)
return arena_values, leaderboard_md
category_dropdown.change(update_leaderboard_and_plots, inputs=[category_dropdown], outputs=[display_df, category_deets])
with gr.Accordion(
"πŸ“ Citation",
open=True,
):
citation_md = """
### Citation
Please cite the following paper
"""
gr.Markdown(citation_md, elem_id="leaderboard_markdown")
gr.Markdown(acknowledgment_md)
if show_plot:
return [md_1]
return [md_1]
block_css = """
#notice_markdown {
font-size: 104%
}
#notice_markdown th {
display: none;
}
#notice_markdown td {
padding-top: 6px;
padding-bottom: 6px;
}
#category_deets {
text-align: center;
padding: 0px;
padding-left: 5px;
}
#leaderboard_markdown {
font-size: 104%
}
#leaderboard_markdown td {
padding-top: 6px;
padding-bottom: 6px;
}
#leaderboard_header_markdown {
font-size: 104%;
text-align: center;
display:block;
}
#leaderboard_dataframe td {
line-height: 0.1em;
}
#plot-title {
text-align: center;
display:block;
}
#non-interactive-button {
display: inline-block;
padding: 10px 10px;
background-color: #f7f7f7; /* Super light grey background */
text-align: center;
font-size: 26px; /* Larger text */
border-radius: 0; /* Straight edges, no border radius */
border: 0px solid #dcdcdc; /* A light grey border to match the background */
user-select: none; /* The text inside the button is not selectable */
pointer-events: none; /* The button is non-interactive */
}
footer {
display:none !important
}
.sponsor-image-about img {
margin: 0 20px;
margin-top: 20px;
height: 40px;
max-height: 100%;
width: auto;
float: left;
}
"""
acknowledgment_md = """
### Acknowledgment
We thank []() for their generous [sponsorship]().
<div class="sponsor-image-about">
</div>
"""
def build_demo(elo_results_file, leaderboard_table_file):
text_size = gr.themes.sizes.text_lg
theme = gr.themes.Base(text_size=text_size)
theme.set(button_secondary_background_fill_hover="*primary_300",
button_secondary_background_fill_hover_dark="*primary_700")
with gr.Blocks(
title="LLM Merging Leaderboard",
theme=theme,
# theme = gr.themes.Base.load("theme.json"), # uncomment to use new cool theme
css=block_css,
) as demo:
leader_components = build_leaderboard_tab(
elo_results_file, leaderboard_table_file, show_plot=True
)
return demo
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--share", action="store_true")
parser.add_argument("--host", default="0.0.0.0")
parser.add_argument("--port", type=int, default=7860)
args = parser.parse_args()
elo_result_files = glob.glob("elo_results_*.pkl")
elo_result_files.sort(key=lambda x: int(x[12:-4]))
elo_result_file = elo_result_files[-1]
leaderboard_table_files = glob.glob("leaderboard_table_*.csv")
leaderboard_table_files.sort(key=lambda x: int(x[18:-4]))
leaderboard_table_file = leaderboard_table_files[-1]
demo = build_demo(elo_result_file, leaderboard_table_file)
demo.launch(share=args.share, server_name=args.host, server_port=args.port)