IlyasMoutawwakil's picture
Update app.py
43a6de1
raw
history blame
2.56 kB
import os
import gradio as gr
import pandas as pd
from apscheduler.schedulers.background import BackgroundScheduler
from src.assets.text_content import TITLE, INTRODUCTION_TEXT
from src.assets.css_html_js import custom_css, get_window_url_params
from src.utils import restart_space, load_dataset_repo, make_clickable_model
LLM_PERF_LEADERBOARD_REPO = "optimum/llm-perf-leaderboard"
LLM_PERF_DATASET_REPO = "optimum/llm-perf-dataset"
OPTIMUM_TOKEN = os.environ.get("OPTIMUM_TOKEN")
OLD_COLUMNS = ["model", "backend.name", "backend.torch_dtype",
"generate.latency(s)", "generate.throughput(tokens/s)"]
NEW_COLUMNS = ["Model", "Backend 🏭", "Load Datatype",
"Latency (s) ⬇️", "Throughput (tokens/s) ⬆️"]
COLUMNS_DATATYPES = ["markdown", "str", "str", "number", "number"]
SORTING_COLUMN = ["Throughput (tokens/s) ⬆️"]
llm_perf_dataset_repo = load_dataset_repo(LLM_PERF_DATASET_REPO, OPTIMUM_TOKEN)
def get_benchmark_df():
if llm_perf_dataset_repo:
llm_perf_dataset_repo.git_pull()
# load
df = pd.read_csv(
"./llm-perf-dataset/reports/cuda_1_100/inference_report.csv")
# remove quantized models
df = df[df["backend.quantization"].isnull()]
# preprocess
df["model"] = df["model"].apply(make_clickable_model)
# filter
df = df[OLD_COLUMNS]
# rename
df.rename(columns={
df_col: rename_col for df_col, rename_col in zip(OLD_COLUMNS, NEW_COLUMNS)
}, inplace=True)
# sort
df.sort_values(by=SORTING_COLUMN, ascending=False, inplace=True)
return df
# Define demo interface
demo = gr.Blocks(css=custom_css)
with demo:
gr.HTML(TITLE)
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
with gr.Tabs(elem_classes="tab-buttons") as tabs:
with gr.TabItem("🖥️ 4xA100-80GB Benchmark 🏋️", elem_id="a100-benchmark", id=0):
dataframe_text = "<h4>Batch Size: 1 ; Generated Tokens: 100</h4>"
gr.HTML(dataframe_text)
benchmark_df = get_benchmark_df()
leaderboard_table_lite = gr.components.Dataframe(
value=benchmark_df,
datatype=COLUMNS_DATATYPES,
headers=NEW_COLUMNS,
elem_id="pytorch-a100-benchmark",
)
# Restart space every hour
scheduler = BackgroundScheduler()
scheduler.add_job(restart_space, "interval", seconds=3600,
args=[LLM_PERF_LEADERBOARD_REPO, OPTIMUM_TOKEN])
scheduler.start()
# Launch demo
demo.queue(concurrency_count=40).launch()