BenchmarkBot's picture
made models clickable
d262fb3
raw
history blame
2.22 kB
import os
import gradio as gr
import pandas as pd
from apscheduler.schedulers.background import BackgroundScheduler
from src.assets.text_content import TITLE, INTRODUCTION_TEXT
from src.assets.css_html_js import custom_css, get_window_url_params
from src.utils import restart_space, load_dataset_repo, make_clickable_model
LLM_PERF_LEADERBOARD_REPO = "optimum/llm-perf-leaderboard"
LLM_PERF_DATASET_REPO = "optimum/llm-perf-dataset"
OPTIMUM_TOKEN = os.environ.get("OPTIMUM_TOKEN")
llm_perf_dataset_repo = load_dataset_repo(LLM_PERF_DATASET_REPO, OPTIMUM_TOKEN)
def get_vanilla_benchmark_df():
if llm_perf_dataset_repo:
llm_perf_dataset_repo.git_pull()
df = pd.read_csv(
"./llm-perf-dataset/reports/cuda_1_100/inference_report.csv")
df = df[["model", "backend.name", "backend.torch_dtype", "backend.quantization",
"generate.latency(s)", "generate.throughput(tokens/s)"]]
df["model"] = df["model"].apply(make_clickable_model)
df.rename(columns={
"model": "Model",
"backend.name": "Backend 🏭",
"backend.torch_dtype": "Load dtype",
"backend.quantization": "Quantization 🗜️",
"generate.latency(s)": "Latency (s) ⬇️",
"generate.throughput(tokens/s)": "Throughput (tokens/s) ⬆️",
}, inplace=True)
df.sort_values(by=["Throughput (tokens/s) ⬆️"],
ascending=False, inplace=True)
return df
# Define demo interface
demo = gr.Blocks(css=custom_css)
with demo:
gr.HTML(TITLE)
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
with gr.Tabs(elem_classes="tab-buttons") as tabs:
with gr.TabItem("Vanilla Benchmark", elem_id="vanilla-benchmark", id=0):
vanilla_benchmark_df = get_vanilla_benchmark_df()
leaderboard_table_lite = gr.components.Dataframe(
value=vanilla_benchmark_df,
headers=vanilla_benchmark_df.columns.tolist(),
elem_id="vanilla-benchmark",
)
# Restart space every hour
scheduler = BackgroundScheduler()
scheduler.add_job(restart_space, "interval", seconds=3600)
scheduler.start()
# Launch demo
demo.queue(concurrency_count=40).launch()