import os
import math
import gradio as gr
import pandas as pd
import plotly.express as px
from apscheduler.schedulers.background import BackgroundScheduler
from src.assets.text_content import (
TITLE,
INTRODUCTION_TEXT,
SINGLE_A100_TEXT,
CITATION_BUTTON_LABEL,
CITATION_BUTTON_TEXT,
)
from src.utils import (
change_tab,
restart_space,
load_dataset_repo,
make_clickable_model,
# make_clickable_score,
# num_to_str,
)
from src.assets.css_html_js import custom_css, custom_js
LLM_PERF_LEADERBOARD_REPO = "optimum/llm-perf-leaderboard"
LLM_PERF_DATASET_REPO = "optimum/llm-perf-dataset"
OPTIMUM_TOKEN = os.environ.get("OPTIMUM_TOKEN", None)
COLUMNS_MAPPING = {
"model": "Model 🤗",
"backend.name": "Backend 🏭",
"backend.torch_dtype": "Load Dtype 📥",
"optimizations": "Optimizations 🛠️",
#
"perf": "Open LLM-Perf Score ⬆️",
#
"generate.throughput(tokens/s)": "Throughput (tokens/s) ⬆️",
"score": "Open LLM Score ⬆️",
"forward.peak_memory(MB)": "Peak Memory (MB) ⬇️",
"num_params": "#️⃣ Parameters (M) 📏",
}
COLUMNS_DATATYPES = [
"markdown",
"str",
"str",
"str",
#
"number",
"number",
#
"number",
"number",
"number",
]
SORTING_COLUMN = ["Open LLM-Perf Score ⬆️"]
llm_perf_dataset_repo = load_dataset_repo(LLM_PERF_DATASET_REPO, OPTIMUM_TOKEN)
def get_benchmark_df(benchmark="1xA100-80GB"):
if llm_perf_dataset_repo:
llm_perf_dataset_repo.git_pull()
# load and merge
bench_df = pd.read_csv(f"./llm-perf-dataset/reports/{benchmark}.csv")
scores_df = pd.read_csv(f"./llm-perf-dataset/reports/open-llm-leaderboard.csv")
bench_df = bench_df.merge(scores_df, on="model", how="left")
# filter out models with no score
bench_df = bench_df[bench_df["score"].notna()]
# create composite score
score_distance = 100 - bench_df["score"]
latency_distance = bench_df["generate.latency(s)"]
bench_df["perf"] = 1 / math.sqrt(score_distance**2 + latency_distance**2)
# normalize between 0 and 100
bench_df["perf"] = (
(bench_df["perf"] - bench_df["perf"].min())
/ (bench_df["perf"].max() - bench_df["perf"].min())
* 100
)
# round to 2 decimals
bench_df["perf"] = bench_df["perf"].round(2)
# add optimizations
bench_df["optimizations"] = bench_df[
["backend.bettertransformer", "backend.load_in_8bit", "backend.load_in_4bit"]
].apply(
lambda x: ", ".join(
filter(
lambda x: x != "",
[
"BetterTransformer" if x[0] == True else "",
"LLM.int8" if x[1] == True else "",
"LLM.fp4" if x[2] == True else "",
],
),
)
if any([x[0] == True, x[1] == True, x[2] == True])
else "None",
axis=1,
)
return bench_df
def get_benchmark_table(bench_df):
# filter
bench_df = bench_df[list(COLUMNS_MAPPING.keys())]
# rename
bench_df.rename(columns=COLUMNS_MAPPING, inplace=True)
# sort
bench_df.sort_values(by=SORTING_COLUMN, ascending=True, inplace=True)
# transform
bench_df["Model 🤗"] = bench_df["Model 🤗"].apply(make_clickable_model)
bench_df["#️⃣ Parameters (M) 📏"] = bench_df["#️⃣ Parameters 📏"].apply(
lambda x: int(x / (1024 * 1024))
)
return bench_df
def get_benchmark_plot(bench_df):
# untill falcon gets fixed / natively supported
bench_df = bench_df[bench_df["generate.latency(s)"] < 150]
fig = px.scatter(
bench_df,
x="generate.latency(s)",
y="score",
color="model_type",
symbol="backend.name",
size="forward.peak_memory(MB)",
custom_data=[
"model",
"backend.name",
"backend.torch_dtype",
"optimizations",
"forward.peak_memory(MB)",
"generate.throughput(tokens/s)",
],
symbol_sequence=["triangle-up", "circle"],
color_discrete_sequence=px.colors.qualitative.Light24,
)
fig.update_layout(
title={
"text": "Model Score vs. Latency vs. Memory",
"y": 0.95,
"x": 0.5,
"xanchor": "center",
"yanchor": "top",
},
xaxis_title="Per 1000 Tokens Latency (s)",
yaxis_title="Open LLM Score",
legend_title="Model Type and Backend",
width=1200,
height=600,
)
fig.update_traces(
hovertemplate="
".join(
[
"Model: %{customdata[0]}",
"Backend: %{customdata[1]}",
"Datatype: %{customdata[2]}",
"Optimizations: %{customdata[3]}",
"Peak Memory (MB): %{customdata[4]}",
"Throughput (tokens/s): %{customdata[5]}",
"Per 1000 Tokens Latency (s): %{x}",
"Open LLM Score: %{y}",
]
)
)
return fig
def filter_query(
text,
backends,
datatypes,
optimizations,
score,
memory,
benchmark="1xA100-80GB",
):
raw_df = get_benchmark_df(benchmark=benchmark)
filtered_df = raw_df[
raw_df["model"].str.lower().str.contains(text.lower())
& raw_df["backend.name"].isin(backends)
& raw_df["backend.torch_dtype"].isin(datatypes)
& (
pd.concat(
[
raw_df["optimizations"].str.contains(optimization)
for optimization in optimizations
],
axis=1,
).any(axis="columns")
if len(optimizations) > 0
else True
)
& (raw_df["score"] >= score)
& (raw_df["forward.peak_memory(MB)"] <= memory)
]
filtered_table = get_benchmark_table(filtered_df)
filtered_plot = get_benchmark_plot(filtered_df)
return filtered_table, filtered_plot
# Dataframes
single_A100_df = get_benchmark_df(benchmark="1xA100-80GB")
single_A100_table = get_benchmark_table(single_A100_df)
single_A100_plot = get_benchmark_plot(single_A100_df)
# Demo interface
demo = gr.Blocks(css=custom_css)
with demo:
# leaderboard title
gr.HTML(TITLE)
# introduction text
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
# control panel title
gr.HTML("