File size: 7,438 Bytes
8e8c463
c8763bd
 
 
 
 
e747f4e
 
 
c8763bd
 
 
d262fb3
708b21b
c8763bd
dcfabfb
 
 
e02ef37
e747f4e
dcfabfb
8e8c463
dcfabfb
67cbded
6640b32
efc3d5b
 
d262fb3
c8763bd
 
a18f8de
e2c5bda
 
 
efc3d5b
930b7c1
8e8c463
930b7c1
8e8c463
e747f4e
930b7c1
efc3d5b
930b7c1
8e8c463
efc3d5b
930b7c1
efc3d5b
930b7c1
efc3d5b
930b7c1
c8763bd
930b7c1
c8763bd
 
8e8c463
 
6bc8c31
 
8e8c463
 
 
8e785e9
8e8c463
 
 
 
 
 
8e785e9
8e8c463
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c8763bd
 
8e8c463
c8763bd
8e8c463
 
c8763bd
 
8e8c463
 
 
 
8e785e9
f208a6d
e747f4e
8e8c463
f208a6d
 
4e5004d
e747f4e
4e5004d
af2159b
8e8c463
4e5004d
 
 
e747f4e
4e5004d
 
8e8c463
4e5004d
 
f208a6d
e747f4e
8e8c463
f208a6d
 
 
02f02af
b075f8f
 
 
 
 
8e785e9
67cbded
c8763bd
8e8c463
708b21b
a18f8de
8e785e9
 
708b21b
a18f8de
708b21b
 
a18f8de
8e785e9
 
 
 
 
 
 
 
a18f8de
8e8c463
 
 
 
 
 
 
 
 
708b21b
9dc4521
00642fb
 
 
 
 
 
d262fb3
8e8c463
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d262fb3
c8763bd
5aacd58
 
c8763bd
d262fb3
 
c8763bd
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
import plotly.express as px
import os
import gradio as gr
import pandas as pd
from apscheduler.schedulers.background import BackgroundScheduler

from src.assets.text_content import TITLE, INTRODUCTION_TEXT, SINGLE_A100_TEXT, CITATION_BUTTON_LABEL, CITATION_BUTTON_TEXT
from src.utils import restart_space, load_dataset_repo, make_clickable_model, make_clickable_score, extract_score_from_clickable
from src.assets.css_html_js import custom_css


LLM_PERF_LEADERBOARD_REPO = "optimum/llm-perf-leaderboard"
LLM_PERF_DATASET_REPO = "optimum/llm-perf-dataset"
OPTIMUM_TOKEN = os.environ.get("OPTIMUM_TOKEN", None)

COLUMNS_MAPPING = {
    "model": "Model πŸ€—",
    "backend.name": "Backend 🏭",
    "backend.torch_dtype": "Datatype πŸ“₯",
    "forward.peak_memory(MB)": "Peak Memory (MB) ⬇️",
    "generate.throughput(tokens/s)": "Throughput (tokens/s) ⬆️",
    "h4_score": "H4 Score ⬆️",
}
COLUMNS_DATATYPES = ["markdown", "str", "str", "markdown", "number", "number"]
SORTING_COLUMN = ["Throughput (tokens/s) ⬆️"]


llm_perf_dataset_repo = load_dataset_repo(LLM_PERF_DATASET_REPO, OPTIMUM_TOKEN)


def get_benchmark_df(benchmark):
    if llm_perf_dataset_repo:
        llm_perf_dataset_repo.git_pull()

    # load
    bench_df = pd.read_csv(
        f"./llm-perf-dataset/reports/{benchmark}.csv")
    scores_df = pd.read_csv(
        f"./llm-perf-dataset/reports/additional_data.csv")
    bench_df = bench_df.merge(scores_df, on="model", how="left")

    # preprocess
    bench_df["model"] = bench_df["model"].apply(make_clickable_model)
    bench_df["h4_score"] = bench_df["h4_score"].apply(make_clickable_score)
    # filter
    bench_df = bench_df[list(COLUMNS_MAPPING.keys())]
    # rename
    bench_df.rename(columns=COLUMNS_MAPPING, inplace=True)
    # sort
    bench_df.sort_values(by=SORTING_COLUMN, ascending=False, inplace=True)

    return bench_df


# Dataframes
single_A100_df = get_benchmark_df(benchmark="1xA100-80GB")


def get_benchmark_plot(benchmark):
    if llm_perf_dataset_repo:
        llm_perf_dataset_repo.git_pull()

    # load
    bench_df = pd.read_csv(
        f"./llm-perf-dataset/reports/{benchmark}.csv")
    scores_df = pd.read_csv(
        f"./llm-perf-dataset/reports/additional_data.csv")
    bench_df = bench_df.merge(scores_df, on="model", how="left")

    fig = px.scatter(
        bench_df, x="h4_score", y="generate.latency(s)",
        color='model_type', symbol='backend.name', size='forward.peak_memory(MB)',
        custom_data=['model', 'backend.name', 'backend.torch_dtype',
                     'forward.peak_memory(MB)', 'generate.throughput(tokens/s)'],
    )

    fig.update_traces(
        title={
            'text': "Model Score vs. Latency vs. Memory",
            'y': 0.95, 'x': 0.5,
            'xanchor': 'center',
            'yanchor': 'top'
        },
        xaxis_title="Average H4 Score",
        yaxis_title="Latency per 1000 Tokens (s)",
        legend_title="Model Type",
        legend=dict(
            orientation="h",
            yanchor="middle",
            xanchor="center",
            y=-0.15,
            x=0.5
        ),
        hovertemplate="<br>".join([
            "Model: %{customdata[0]}",
            "Backend: %{customdata[1]}",
            "Datatype: %{customdata[2]}",
            "Peak Memory (MB): %{customdata[3]}",
            "Throughput (tokens/s): %{customdata[4]}",
            "Latency per 1000 Tokens (s): %{y}",
            "Average H4 Score: %{x}"
        ])
    )

    return fig


# Plots
single_A100_plot = get_benchmark_plot(benchmark="1xA100-80GB")

# Demo interface
demo = gr.Blocks(css=custom_css)
with demo:
    # leaderboard title
    gr.HTML(TITLE)

    # introduction text
    gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")

    # control panel title
    gr.HTML("<h2>Control Panel πŸŽ›οΈ</h2>")

    # control panel interface
    with gr.Row():
        search_bar = gr.Textbox(
            label="Model πŸ€—",
            info="πŸ” Search for a model name",
            elem_id="search-bar",
        )
        backend_checkboxes = gr.CheckboxGroup(
            label="Backends 🏭",
            choices=["pytorch", "onnxruntime"],
            value=["pytorch", "onnxruntime"],
            info="β˜‘οΈ Select the backends",
            elem_id="backend-checkboxes",
        )
        datatype_checkboxes = gr.CheckboxGroup(
            label="Datatypes πŸ“₯",
            choices=["float32", "float16"],
            value=["float32", "float16"],
            info="β˜‘οΈ Select the load datatypes",
            elem_id="datatype-checkboxes",
        )
        threshold_slider = gr.Slider(
            label="Average H4 Score πŸ“ˆ",
            info="lter by minimum average H4 score",
            value=0.0,
            elem_id="threshold-slider",
        )

    with gr.Row():
        submit_button = gr.Button(
            value="Submit πŸš€",
            elem_id="submit-button",
        )

    # leaderboard tabs
    with gr.Tabs(elem_classes="tab-buttons") as tabs:
        with gr.TabItem("πŸ–₯️ A100-80GB Leaderboard πŸ†", id=0):
            gr.HTML(SINGLE_A100_TEXT)

            # Original leaderboard table
            single_A100_leaderboard = gr.components.Dataframe(
                value=single_A100_df,
                datatype=COLUMNS_DATATYPES,
                headers=list(COLUMNS_MAPPING.values()),
                elem_id="1xA100-table",
            )
            # Dummy Leaderboard table for handling the case when the user uses backspace key
            single_A100_for_search = gr.components.Dataframe(
                value=single_A100_df,
                datatype=COLUMNS_DATATYPES,
                headers=list(COLUMNS_MAPPING.values()),
                max_rows=None,
                visible=False,
            )

        with gr.TabItem("πŸ–₯️ A100-80GB Plot πŸ“ˆ", id=1):
            # Original leaderboard plot
            gr.HTML(SINGLE_A100_TEXT)
            
            single_A100_plotly = gr.components.Plot(
                value=single_A100_plot,
                elem_id="1xA100-plot",
                show_label=False,
            )

    with gr.Row():
        with gr.Accordion("πŸ“™ Citation", open=False):
            citation_button = gr.Textbox(
                value=CITATION_BUTTON_TEXT,
                label=CITATION_BUTTON_LABEL,
                elem_id="citation-button",
            ).style(show_copy_button=True)


def submit_query(text, backends, datatypes, threshold, raw_df):
    raw_df["H4 Score ⬆️"] = raw_df["H4 Score ⬆️"].apply(
        extract_score_from_clickable)

    filtered_df = raw_df[
        raw_df["Model πŸ€—"].str.lower().str.contains(text.lower()) &
        raw_df["Backend 🏭"].isin(backends) &
        raw_df["Datatype πŸ“₯"].isin(datatypes) &
        (raw_df["H4 Score ⬆️"] >= threshold)
    ]

    filtered_df["H4 Score ⬆️"] = filtered_df["H4 Score ⬆️"].apply(
        make_clickable_score)
    return filtered_df


# Callbacks
submit_button.click(
    submit_query,
    [
        search_bar, backend_checkboxes, datatype_checkboxes, threshold_slider,
        single_A100_for_search
    ],
    [single_A100_leaderboard]
)

# Restart space every hour
scheduler = BackgroundScheduler()
scheduler.add_job(restart_space, "interval", seconds=3600,
                  args=[LLM_PERF_LEADERBOARD_REPO, OPTIMUM_TOKEN])
scheduler.start()

# Launch demo
demo.queue(concurrency_count=40).launch()