BenchmarkBot's picture
fix multi gpu search
e985813
raw
history blame
6.3 kB
import os
import json
import gradio as gr
import pandas as pd
from apscheduler.schedulers.background import BackgroundScheduler
from src.assets.text_content import TITLE, INTRODUCTION_TEXT, CITATION_BUTTON_LABEL, CITATION_BUTTON_TEXT
from src.utils import restart_space, load_dataset_repo, make_clickable_model
from src.assets.css_html_js import custom_css, get_window_url_params
LLM_PERF_LEADERBOARD_REPO = "optimum/llm-perf-leaderboard"
LLM_PERF_DATASET_REPO = "optimum/llm-perf-dataset"
OPTIMUM_TOKEN = os.environ.get("OPTIMUM_TOKEN", None)
COLUMNS_MAPPING = {
"model": "Model πŸ€—",
"backend.name": "Backend 🏭",
"backend.torch_dtype": "Load Datatype πŸ“₯",
"generate.latency(s)": "Latency (s) ⬇️",
"generate.throughput(tokens/s)": "Throughput (tokens/s) ⬆️",
}
COLUMNS_DATATYPES = ["markdown", "str", "str", "number", "number"]
SORTING_COLUMN = ["Throughput (tokens/s) ⬆️"]
llm_perf_dataset_repo = load_dataset_repo(LLM_PERF_DATASET_REPO, OPTIMUM_TOKEN)
def get_benchmark_df(benchmark):
if llm_perf_dataset_repo:
llm_perf_dataset_repo.git_pull()
# load
df = pd.read_csv(
f"./llm-perf-dataset/reports/{benchmark}/inference_report.csv")
# preprocess
df["model"] = df["model"].apply(make_clickable_model)
# filter
df = df[list(COLUMNS_MAPPING.keys())]
# rename
df.rename(columns=COLUMNS_MAPPING, inplace=True)
# sort
df.sort_values(by=SORTING_COLUMN, ascending=False, inplace=True)
return df
def change_tab(query_param):
query_param = query_param.replace("'", '"')
query_param = json.loads(query_param)
if (
isinstance(query_param, dict)
and "tab" in query_param
and query_param["tab"] == "evaluation"
):
return gr.Tabs.update(selected=1)
else:
return gr.Tabs.update(selected=0)
def search_tables(single_df, multi_df, query):
filtered_single = single_df[single_df["Model πŸ€—"].str.contains(query)]
filtered_multi = multi_df[multi_df["Model πŸ€—"].str.contains(query)]
return filtered_single, filtered_multi
def filter_table(df, backends):
filtered_df = df[df["Backend 🏭"].isin(
[backend.lower() for backend in backends])]
return filtered_df
# Define demo interface
demo = gr.Blocks(css=custom_css)
with demo:
gr.HTML(TITLE)
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
with gr.Row():
with gr.Column():
with gr.Box(elem_id="search-bar-table-box"):
search_bar = gr.Textbox(
label="Search πŸ”Ž",
placeholder="Search your model and press ENTER...",
show_label=False,
elem_id="search-bar",
)
with gr.Column():
with gr.Box(elem_id="backend-checkboxes-box"):
backend_checkboxes = gr.CheckboxGroup(
["Pytorch", "OnnxRuntime"],
label="Backends 🏭",
info="Select the backends you want to compare",
elem_id="backend-checkboxes",
)
with gr.Tabs(elem_classes="tab-buttons") as tabs:
with gr.TabItem("πŸ–₯️ A100-80GB Benchmark πŸ‹οΈ", elem_id="A100-benchmark", id=0):
SINGLE_A100_TEXT = """<h3>Single-GPU (1xA100):</h3>
<ul>
<li>Singleton Batch (1)</li>
<li>Thousand Tokens (1000)</li>
</ul>
"""
gr.HTML(SINGLE_A100_TEXT)
single_A100_df = get_benchmark_df(benchmark="1xA100-80GB")
# Original leaderboard table
single_A100_leaderboard = gr.components.Dataframe(
value=single_A100_df,
datatype=COLUMNS_DATATYPES,
headers=list(COLUMNS_MAPPING.values()),
elem_id="1xA100-table",
)
# Dummy Leaderboard table for handling the case when the user uses backspace key
single_A100_for_search = gr.components.Dataframe(
value=single_A100_df,
datatype=COLUMNS_DATATYPES,
headers=list(COLUMNS_MAPPING.values()),
max_rows=None,
visible=False,
)
MULTI_A100_TEXT = """<h3>Multi-GPU (4xA100):</h3>
<ul>
<li>Singleton Batch (1)</li>
<li>Thousand Tokens (1000)</li>
</ul>"""
gr.HTML(MULTI_A100_TEXT)
multi_A100_df = get_benchmark_df(benchmark="4xA100-80GB")
multi_A100_leaderboard = gr.components.Dataframe(
value=multi_A100_df,
datatype=COLUMNS_DATATYPES,
headers=list(COLUMNS_MAPPING.values()),
elem_id="4xA100-table",
)
# Dummy Leaderboard table for handling the case when the user uses backspace key
multi_A100_for_search = gr.components.Dataframe(
value=multi_A100_df,
datatype=COLUMNS_DATATYPES,
headers=list(COLUMNS_MAPPING.values()),
max_rows=None,
visible=False,
)
search_bar.submit(
search_tables,
[single_A100_for_search, multi_A100_for_search, search_bar],
[single_A100_leaderboard, multi_A100_leaderboard],
)
# backend_checkboxes.select(
# filter_table,
# [single_A100_for_search, backend_checkboxes],
# multi_A100_leaderboard,
# )
with gr.Row():
with gr.Accordion("πŸ“™ Citation", open=False):
citation_button = gr.Textbox(
value=CITATION_BUTTON_TEXT,
label=CITATION_BUTTON_LABEL,
elem_id="citation-button",
).style(show_copy_button=True)
dummy = gr.Textbox(visible=False)
demo.load(
change_tab,
dummy,
tabs,
_js=get_window_url_params,
)
# Restart space every hour
scheduler = BackgroundScheduler()
scheduler.add_job(restart_space, "interval", seconds=3600,
args=[LLM_PERF_LEADERBOARD_REPO, OPTIMUM_TOKEN])
scheduler.start()
# Launch demo
demo.queue(concurrency_count=40).launch()