BenchmarkBot's picture
added peak memory and made scores clickable
e747f4e
raw
history blame
6.24 kB
import os
import json
import gradio as gr
import pandas as pd
from apscheduler.schedulers.background import BackgroundScheduler
from src.assets.text_content import TITLE, INTRODUCTION_TEXT, SINGLE_A100_TEXT, CITATION_BUTTON_LABEL, CITATION_BUTTON_TEXT
from src.utils import restart_space, load_dataset_repo, make_clickable_model, make_clickable_score, extract_score_from_clickable
from src.assets.css_html_js import custom_css
LLM_PERF_LEADERBOARD_REPO = "optimum/llm-perf-leaderboard"
LLM_PERF_DATASET_REPO = "optimum/llm-perf-dataset"
OPTIMUM_TOKEN = os.environ.get("OPTIMUM_TOKEN", None)
COLUMNS_MAPPING = {
"model": "Model πŸ€—",
"backend.name": "Backend 🏭",
"backend.torch_dtype": "Datatype πŸ“₯",
"average": "Average H4 Score ⬆️",
"forward.peak_memory(MB)": "Peak Memory (MB) ⬇️",
"generate.throughput(tokens/s)": "Throughput (tokens/s) ⬆️",
}
COLUMNS_DATATYPES = ["markdown", "str", "str", "markdown", "number", "number"]
SORTING_COLUMN = ["Throughput (tokens/s) ⬆️"]
llm_perf_dataset_repo = load_dataset_repo(LLM_PERF_DATASET_REPO, OPTIMUM_TOKEN)
def get_benchmark_df(benchmark):
if llm_perf_dataset_repo:
llm_perf_dataset_repo.git_pull()
# load
bench_df = pd.read_csv(
f"./llm-perf-dataset/reports/{benchmark}/inference_report.csv")
scores_df = pd.read_csv(
f"./llm-perf-dataset/reports/average_scores.csv")
bench_df = bench_df.merge(scores_df, on="model", how="left")
bench_df["average"] = bench_df["average"].apply(
make_clickable_score)
# preprocess
bench_df["model"] = bench_df["model"].apply(make_clickable_model)
# filter
bench_df = bench_df[list(COLUMNS_MAPPING.keys())]
# rename
bench_df.rename(columns=COLUMNS_MAPPING, inplace=True)
# sort
bench_df.sort_values(by=SORTING_COLUMN, ascending=False, inplace=True)
return bench_df
# def change_tab(query_param):
# query_param = query_param.replace("'", '"')
# query_param = json.loads(query_param)
# if (
# isinstance(query_param, dict)
# and "tab" in query_param
# and query_param["tab"] == "evaluation"
# ):
# return gr.Tabs.update(selected=1)
# else:
# return gr.Tabs.update(selected=0)
def submit_query(text, backends, datatypes, threshold, raw_dfs):
filtered_dfs = []
for raw_df in raw_dfs:
# extract the average score (float) from the clickable score (clickable markdown)
raw_df["Average H4 Score ⬆️"] = raw_df["Average H4 Score ⬆️"].apply(
extract_score_from_clickable)
filtered_df = raw_df[
raw_df["Model πŸ€—"].str.contains(text) &
raw_df["Backend 🏭"].isin(backends) &
raw_df["Datatype πŸ“₯"].isin(datatypes) &
(raw_df["Average H4 Score ⬆️"] >= threshold)
]
filtered_df["Average H4 Score ⬆️"] = filtered_df["Average H4 Score ⬆️"].apply(
make_clickable_score)
filtered_dfs.append(filtered_df)
return filtered_dfs
# Define demo interface
demo = gr.Blocks(css=custom_css)
with demo:
gr.HTML(TITLE)
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
with gr.Row():
search_bar = gr.Textbox(
label="Model πŸ€—",
info="Search for a model name",
elem_id="search-bar",
)
backend_checkboxes = gr.CheckboxGroup(
label="Backends 🏭",
choices=["pytorch", "onnxruntime"],
value=["pytorch", "onnxruntime"],
info="Select the backends",
elem_id="backend-checkboxes",
)
datatype_checkboxes = gr.CheckboxGroup(
label="Datatypes πŸ“₯",
choices=["float32", "float16"],
value=["float32", "float16"],
info="Select the load datatypes",
elem_id="datatype-checkboxes",
)
with gr.Row():
threshold_slider = gr.Slider(
label="Average H4 Score πŸ“ˆ",
info="Filter by minimum average H4 score",
value=0.0,
elem_id="threshold-slider",
)
with gr.Row():
submit_button = gr.Button(
value="Submit πŸš€",
info="Submit the filters",
elem_id="submit-button",
)
with gr.Tabs(elem_classes="tab-buttons") as tabs:
with gr.TabItem("πŸ–₯️ A100-80GB Benchmark πŸ‹οΈ", elem_id="A100-benchmark", id=0):
gr.HTML(SINGLE_A100_TEXT)
single_A100_df = get_benchmark_df(benchmark="1xA100-80GB")
# Original leaderboard table
single_A100_leaderboard = gr.components.Dataframe(
value=single_A100_df,
datatype=COLUMNS_DATATYPES,
headers=list(COLUMNS_MAPPING.values()),
elem_id="1xA100-table",
)
# Dummy Leaderboard table for handling the case when the user uses backspace key
single_A100_for_search = gr.components.Dataframe(
value=single_A100_df,
datatype=COLUMNS_DATATYPES,
headers=list(COLUMNS_MAPPING.values()),
max_rows=None,
visible=False,
)
# Callbacks
submit_button.click(
submit_query,
[
search_bar, backend_checkboxes, datatype_checkboxes, threshold_slider,
single_A100_for_search
],
[single_A100_leaderboard]
)
with gr.Row():
with gr.Accordion("πŸ“™ Citation", open=False):
citation_button = gr.Textbox(
value=CITATION_BUTTON_TEXT,
label=CITATION_BUTTON_LABEL,
elem_id="citation-button",
).style(show_copy_button=True)
# dummy = gr.Textbox(visible=False)
# demo.load(
# change_tab,
# dummy,
# tabs,
# _js=get_window_url_params,
# )
# Restart space every hour
scheduler = BackgroundScheduler()
scheduler.add_job(restart_space, "interval", seconds=3600,
args=[LLM_PERF_LEADERBOARD_REPO, OPTIMUM_TOKEN])
scheduler.start()
# Launch demo
demo.queue(concurrency_count=40).launch()