import os
import gradio as gr
import pandas as pd
import plotly.express as px
from apscheduler.schedulers.background import BackgroundScheduler
from src.assets.text_content import TITLE, INTRODUCTION_TEXT, SINGLE_A100_TEXT, CITATION_BUTTON_LABEL, CITATION_BUTTON_TEXT
from src.utils import restart_space, load_dataset_repo, make_clickable_model, make_clickable_score, num_to_str
from src.assets.css_html_js import custom_css
LLM_PERF_LEADERBOARD_REPO = "optimum/llm-perf-leaderboard"
LLM_PERF_DATASET_REPO = "optimum/llm-perf-dataset"
OPTIMUM_TOKEN = os.environ.get("OPTIMUM_TOKEN", None)
COLUMNS_MAPPING = {
"model": "Model 🤗",
"backend.name": "Backend 🏭",
"backend.torch_dtype": "Load Dtype 📥",
"num_parameters": "#Parameters 📏",
"forward.peak_memory(MB)": "Peak Memory (MB) ⬇️",
"generate.throughput(tokens/s)": "Throughput (tokens/s) ⬆️",
"average": "Average Open LLM Score ⬆️",
}
COLUMNS_DATATYPES = ["markdown", "str", "str",
"number", "number", "number", "markdown"]
SORTING_COLUMN = ["Throughput (tokens/s) ⬆️"]
llm_perf_dataset_repo = load_dataset_repo(LLM_PERF_DATASET_REPO, OPTIMUM_TOKEN)
def get_benchmark_df(benchmark="1xA100-80GB"):
if llm_perf_dataset_repo:
llm_perf_dataset_repo.git_pull()
# load
bench_df = pd.read_csv(
f"./llm-perf-dataset/reports/{benchmark}.csv")
scores_df = pd.read_csv(
f"./llm-perf-dataset/reports/additional_data.csv")
bench_df = bench_df.merge(scores_df, on="model", how="left")
return bench_df
def get_benchmark_table(bench_df):
# filter
bench_df = bench_df[list(COLUMNS_MAPPING.keys())]
# rename
bench_df.rename(columns=COLUMNS_MAPPING, inplace=True)
# sort
bench_df.sort_values(by=SORTING_COLUMN, ascending=False, inplace=True)
# transform
bench_df["Model 🤗"] = bench_df["Model 🤗"].apply(make_clickable_model)
bench_df["Average Open LLM Score ⬆️"] = bench_df["Average Open LLM Score ⬆️"].apply(
make_clickable_score)
bench_df["#Parameters 📏"] = bench_df["#Parameters 📏"].apply(num_to_str)
return bench_df
def get_benchmark_plot(bench_df):
# untill falcon gets fixed / natively supported
bench_df = bench_df[bench_df["generate.latency(s)"] < 100]
fig = px.scatter(
bench_df, x="generate.latency(s)", y="average",
color='model_type', symbol='backend.name', size='forward.peak_memory(MB)',
custom_data=['model', 'backend.name', 'backend.torch_dtype',
'forward.peak_memory(MB)', 'generate.throughput(tokens/s)'],
symbol_sequence=['triangle-up', 'circle'],
# as many distinct colors as there are model_type,backend.name couples
color_discrete_sequence=px.colors.qualitative.Light24,
)
fig.update_layout(
title={
'text': "Model Score vs. Latency vs. Memory",
'y': 0.95, 'x': 0.5,
'xanchor': 'center',
'yanchor': 'top'
},
xaxis_title="Per 1000 Tokens Latency (s)",
yaxis_title="Average Open LLM Score",
legend_title="Model Type and Backend",
width=1200,
height=600,
# legend=dict(
# orientation="h",
# yanchor="bottom",
# y=-0.35,
# xanchor="center",
# x=0.5
# )
)
fig.update_traces(
hovertemplate="
".join([
"Model: %{customdata[0]}",
"Backend: %{customdata[1]}",
"Datatype: %{customdata[2]}",
"Peak Memory (MB): %{customdata[3]}",
"Throughput (tokens/s): %{customdata[4]}",
"Per 1000 Tokens Latency (s): %{y}",
"Average Open LLM Score: %{x}",
])
)
return fig
def filter_query(text, backends, datatypes, threshold, benchmark="1xA100-80GB"):
raw_df = get_benchmark_df(benchmark=benchmark)
filtered_df = raw_df[
raw_df["model"].str.lower().str.contains(text.lower()) &
raw_df["backend.name"].isin(backends) &
raw_df["backend.torch_dtype"].isin(datatypes) &
(raw_df["average"] >= threshold)
]
filtered_table = get_benchmark_table(filtered_df)
filtered_plot = get_benchmark_plot(filtered_df)
return filtered_table, filtered_plot
# Dataframes
single_A100_df = get_benchmark_df(benchmark="1xA100-80GB")
single_A100_table = get_benchmark_table(single_A100_df)
single_A100_plot = get_benchmark_plot(single_A100_df)
# Demo interface
demo = gr.Blocks(css=custom_css)
with demo:
# leaderboard title
gr.HTML(TITLE)
# introduction text
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
# control panel title
gr.HTML("