llm-perf-leaderboard / src /leaderboard.py
IlyasMoutawwakil's picture
update
ab5f5f1
raw
history blame
1.77 kB
import gradio as gr
from src.utils import model_hyperlink, process_score
LEADERBOARD_COLUMN_TO_DATATYPE = {
# open llm
"Model πŸ€—" :"markdown",
"Arch πŸ›οΈ" :"markdown",
"Params (B)": "number",
"Open LLM Score (%)": "number",
# deployment settings
"DType πŸ“₯" :"str",
"Backend 🏭" :"str",
"Optimization πŸ› οΈ" :"str",
"Quantization πŸ—œοΈ" :"str",
# primary measurements
"Prefill Latency (s)": "number",
"Decode Throughput (tokens/s)": "number",
"Allocated Memory (MB)": "number",
"Energy (tokens/kWh)": "number",
# additional measurements
"E2E Latency (s)": "number",
"E2E Throughput (tokens/s)": "number",
"Reserved Memory (MB)": "number",
"Used Memory (MB)": "number",
}
def process_model(model_name):
link = f"https://huggingface.co/{model_name}"
return model_hyperlink(link, model_name)
def get_leaderboard_df(llm_perf_df):
df = llm_perf_df.copy()
# transform for leaderboard
df["Model πŸ€—"] = df["Model πŸ€—"].apply(process_model)
# process quantization for leaderboard
df["Open LLM Score (%)"] = df.apply(
lambda x: process_score(x["Open LLM Score (%)"], x["Quantization πŸ—œοΈ"]),
axis=1,
)
return df
def create_leaderboard_table(llm_perf_df):
# descriptive text
gr.HTML("πŸ‘‰ Scroll to the right πŸ‘‰ for additional columns.", elem_id="text")
# get dataframe
leaderboard_df = get_leaderboard_df(llm_perf_df)
# create table
leaderboard_table = gr.components.Dataframe(
value=leaderboard_df,
datatype=list(LEADERBOARD_COLUMN_TO_DATATYPE.values()),
headers=list(LEADERBOARD_COLUMN_TO_DATATYPE.keys()),
elem_id="table",
)
return leaderboard_table