BenchmarkBot commited on
Commit
708b21b
β€’
1 Parent(s): e2c5bda
Files changed (1) hide show
  1. app.py +28 -30
app.py CHANGED
@@ -10,7 +10,7 @@ from src.utils import restart_space, load_dataset_repo, make_clickable_model
10
 
11
  LLM_PERF_LEADERBOARD_REPO = "optimum/llm-perf-leaderboard"
12
  LLM_PERF_DATASET_REPO = "optimum/llm-perf-dataset"
13
- OPTIMUM_TOKEN = os.environ.get("OPTIMUM_TOKEN")
14
 
15
  COLUMNS_MAPPING = {
16
  "model": "Model πŸ€—",
@@ -36,7 +36,7 @@ def get_benchmark_df(benchmark):
36
  # preprocess
37
  df["model"] = df["model"].apply(make_clickable_model)
38
  # filter
39
- df = df[COLUMNS_MAPPING.keys()]
40
  # rename
41
  df.rename(columns=COLUMNS_MAPPING, inplace=True)
42
  # sort
@@ -52,41 +52,39 @@ with demo:
52
  gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
53
 
54
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
55
- with gr.Row():
56
- with gr.TabItem("πŸ–₯️ A100-80GB Benchmark πŸ‹οΈ", elem_id="A100-benchmark", id=0):
57
-
58
- SINGLE_A100_TEXT = """<h3>Single-GPU (1xA100):</h3>
59
- <ul>
60
- <li>Singleton Batch (1)</li>
61
- <li>Thousand Tokens (1000)</li>
62
- </ul>
63
- """
64
- gr.HTML(SINGLE_A100_TEXT)
65
-
66
- single_A100_df = get_benchmark_df(benchmark="1xA100-80GB")
67
- leaderboard_table_lite = gr.components.Dataframe(
68
- value=single_A100_df,
69
- datatype=COLUMNS_DATATYPES,
70
- headers=COLUMNS_MAPPING.values(),
71
- elem_id="1xA100-table",
72
- )
73
-
74
- with gr.Row():
75
- MULTI_A100_TEXT = """<h3>Multi-GPU (4xA100):</h3>
76
  <ul>
77
  <li>Singleton Batch (1)</li>
78
  <li>Thousand Tokens (1000)</li>
79
- </ul>"""
80
- gr.HTML(MULTI_A100_TEXT)
 
81
 
82
- multi_A100_df = get_benchmark_df(benchmark="4xA100-80GB")
83
- leaderboard_table_full = gr.components.Dataframe(
84
- value=multi_A100_df,
85
  datatype=COLUMNS_DATATYPES,
86
- headers=COLUMNS_MAPPING.values(),
87
- elem_id="4xA100-table",
88
  )
89
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
  with gr.Row():
91
  with gr.Accordion("πŸ“™ Citation", open=False):
92
  citation_button = gr.Textbox(
 
10
 
11
  LLM_PERF_LEADERBOARD_REPO = "optimum/llm-perf-leaderboard"
12
  LLM_PERF_DATASET_REPO = "optimum/llm-perf-dataset"
13
+ OPTIMUM_TOKEN = os.environ.get("OPTIMUM_TOKEN", None)
14
 
15
  COLUMNS_MAPPING = {
16
  "model": "Model πŸ€—",
 
36
  # preprocess
37
  df["model"] = df["model"].apply(make_clickable_model)
38
  # filter
39
+ df = df[list(COLUMNS_MAPPING.keys())]
40
  # rename
41
  df.rename(columns=COLUMNS_MAPPING, inplace=True)
42
  # sort
 
52
  gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
53
 
54
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
55
+ with gr.TabItem("πŸ–₯️ A100-80GB Benchmark πŸ‹οΈ", elem_id="A100-benchmark", id=0):
56
+
57
+ SINGLE_A100_TEXT = """<h3>Single-GPU (1xA100):</h3>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  <ul>
59
  <li>Singleton Batch (1)</li>
60
  <li>Thousand Tokens (1000)</li>
61
+ </ul>
62
+ """
63
+ gr.HTML(SINGLE_A100_TEXT)
64
 
65
+ single_A100_df = get_benchmark_df(benchmark="1xA100-80GB")
66
+ leaderboard_table_lite = gr.components.Dataframe(
67
+ value=single_A100_df,
68
  datatype=COLUMNS_DATATYPES,
69
+ headers=list(COLUMNS_MAPPING.values()),
70
+ elem_id="1xA100-table",
71
  )
72
 
73
+ MULTI_A100_TEXT = """<h3>Multi-GPU (4xA100):</h3>
74
+ <ul>
75
+ <li>Singleton Batch (1)</li>
76
+ <li>Thousand Tokens (1000)</li>
77
+ </ul>"""
78
+ gr.HTML(MULTI_A100_TEXT)
79
+
80
+ multi_A100_df = get_benchmark_df(benchmark="4xA100-80GB")
81
+ leaderboard_table_full = gr.components.Dataframe(
82
+ value=multi_A100_df,
83
+ datatype=COLUMNS_DATATYPES,
84
+ headers=list(COLUMNS_MAPPING.values()),
85
+ elem_id="4xA100-table",
86
+ )
87
+
88
  with gr.Row():
89
  with gr.Accordion("πŸ“™ Citation", open=False):
90
  citation_button = gr.Textbox(