File size: 2,556 Bytes
c8763bd
 
 
 
 
d8b9ce2
 
d262fb3
c8763bd
 
 
d262fb3
 
c8763bd
2773294
efc3d5b
 
2773294
6640b32
efc3d5b
2773294
efc3d5b
6640b32
efc3d5b
 
d262fb3
c8763bd
 
d4acfca
d262fb3
 
c8763bd
efc3d5b
d262fb3
 
c8763bd
2773294
5919d6a
2773294
efc3d5b
6064b14
c8763bd
efc3d5b
 
d262fb3
efc3d5b
d8b9ce2
efc3d5b
d8b9ce2
c8763bd
efc3d5b
 
c8763bd
d8b9ce2
c8763bd
 
d262fb3
c8763bd
 
 
 
 
 
43a6de1
534ff40
d4acfca
 
 
c8763bd
d4acfca
de8c89e
efc3d5b
d4acfca
c8763bd
 
d262fb3
 
c8763bd
5aacd58
 
c8763bd
d262fb3
 
c8763bd
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
import os
import gradio as gr
import pandas as pd
from apscheduler.schedulers.background import BackgroundScheduler

from src.assets.text_content import TITLE, INTRODUCTION_TEXT
from src.assets.css_html_js import custom_css, get_window_url_params
from src.utils import restart_space, load_dataset_repo, make_clickable_model


LLM_PERF_LEADERBOARD_REPO = "optimum/llm-perf-leaderboard"
LLM_PERF_DATASET_REPO = "optimum/llm-perf-dataset"
OPTIMUM_TOKEN = os.environ.get("OPTIMUM_TOKEN")

OLD_COLUMNS = ["model", "backend.name", "backend.torch_dtype",
               "generate.latency(s)", "generate.throughput(tokens/s)"]

NEW_COLUMNS = ["Model", "Backend 🏭", "Load Datatype",
               "Latency (s) ⬇️", "Throughput (tokens/s) ⬆️"]

COLUMNS_DATATYPES = ["markdown", "str", "str", "number", "number"]

SORTING_COLUMN = ["Throughput (tokens/s) ⬆️"]


llm_perf_dataset_repo = load_dataset_repo(LLM_PERF_DATASET_REPO, OPTIMUM_TOKEN)


def get_benchmark_df():
    if llm_perf_dataset_repo:
        llm_perf_dataset_repo.git_pull()

    # load
    df = pd.read_csv(
        "./llm-perf-dataset/reports/cuda_1_100/inference_report.csv")

    # remove quantized models
    df = df[df["backend.quantization"].isnull()]

    # preprocess
    df["model"] = df["model"].apply(make_clickable_model)

    # filter
    df = df[OLD_COLUMNS]

    # rename
    df.rename(columns={
        df_col: rename_col for df_col, rename_col in zip(OLD_COLUMNS, NEW_COLUMNS)
    }, inplace=True)

    # sort
    df.sort_values(by=SORTING_COLUMN, ascending=False, inplace=True)

    return df


# Define demo interface
demo = gr.Blocks(css=custom_css)
with demo:
    gr.HTML(TITLE)
    gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")

    with gr.Tabs(elem_classes="tab-buttons") as tabs:
        with gr.TabItem("🖥️ 4xA100-80GB Benchmark 🏋️", elem_id="a100-benchmark", id=0):
            dataframe_text = "<h4>Batch Size: 1 ; Generated Tokens: 100</h4>"

            gr.HTML(dataframe_text)
            benchmark_df = get_benchmark_df()
            leaderboard_table_lite = gr.components.Dataframe(
                value=benchmark_df,
                datatype=COLUMNS_DATATYPES,
                headers=NEW_COLUMNS,
                elem_id="pytorch-a100-benchmark",
            )


# Restart space every hour
scheduler = BackgroundScheduler()
scheduler.add_job(restart_space, "interval", seconds=3600,
                  args=[LLM_PERF_LEADERBOARD_REPO, OPTIMUM_TOKEN])
scheduler.start()

# Launch demo
demo.queue(concurrency_count=40).launch()