BenchmarkBot commited on
Commit
c382b2a
β€’
1 Parent(s): e0ef314

put everything in about tab

Browse files
Files changed (2) hide show
  1. app.py +5 -5
  2. src/assets/text_content.py +2 -1
app.py CHANGED
@@ -8,7 +8,7 @@ from src.assets.css_html_js import custom_css, custom_js
8
  from src.assets.text_content import (
9
  TITLE,
10
  INTRODUCTION_TEXT,
11
- A100_TEXT,
12
  CITATION_BUTTON_LABEL,
13
  CITATION_BUTTON_TEXT,
14
  )
@@ -224,8 +224,6 @@ with demo:
224
  # leaderboard tabs
225
  with gr.Tabs(elem_classes="A100-tabs") as A100_tabs:
226
  with gr.TabItem("πŸ–₯️ A100-80GB Leaderboar Table πŸ†", id=0):
227
- gr.HTML(A100_TEXT)
228
-
229
  # Original leaderboard table
230
  A100_leaderboard = gr.components.Dataframe(
231
  value=A100_table,
@@ -235,8 +233,6 @@ with demo:
235
  )
236
 
237
  with gr.TabItem("πŸ–₯️ A100-80GB Interactive Plot πŸ“Š", id=2):
238
- gr.HTML(A100_TEXT)
239
-
240
  # Original leaderboard plot
241
  A100_plotly = gr.components.Plot(
242
  value=A100_plot,
@@ -304,6 +300,9 @@ with demo:
304
  elem_id="filter-button",
305
  )
306
 
 
 
 
307
  demo.load(
308
  change_tab,
309
  A100_tabs,
@@ -344,3 +343,4 @@ scheduler.start()
344
 
345
  # Launch demo
346
  demo.queue(concurrency_count=40).launch()
 
 
8
  from src.assets.text_content import (
9
  TITLE,
10
  INTRODUCTION_TEXT,
11
+ ABOUT_TEXT,
12
  CITATION_BUTTON_LABEL,
13
  CITATION_BUTTON_TEXT,
14
  )
 
224
  # leaderboard tabs
225
  with gr.Tabs(elem_classes="A100-tabs") as A100_tabs:
226
  with gr.TabItem("πŸ–₯️ A100-80GB Leaderboar Table πŸ†", id=0):
 
 
227
  # Original leaderboard table
228
  A100_leaderboard = gr.components.Dataframe(
229
  value=A100_table,
 
233
  )
234
 
235
  with gr.TabItem("πŸ–₯️ A100-80GB Interactive Plot πŸ“Š", id=2):
 
 
236
  # Original leaderboard plot
237
  A100_plotly = gr.components.Plot(
238
  value=A100_plot,
 
300
  elem_id="filter-button",
301
  )
302
 
303
+ with gr.TabItem("πŸ“– About ❔", id=4):
304
+ gr.HTML(ABOUT_TEXT)
305
+
306
  demo.load(
307
  change_tab,
308
  A100_tabs,
 
343
 
344
  # Launch demo
345
  demo.queue(concurrency_count=40).launch()
346
+ demo.queue(concurrency_count=40).launch()
src/assets/text_content.py CHANGED
@@ -8,8 +8,9 @@ Anyone from the community can request a model or a hardware/backend/optimization
8
  - Hardware/Backend/Optimization performance requests should be made in the [community discussions](https://huggingface.co/spaces/optimum/llm-perf-leaderboard/discussions) to assess their relevance and feasibility.
9
  """
10
 
11
- A100_TEXT = """<h3>Single-GPU Benchmark (1xA100):</h3>
12
  <ul>
 
13
  <li>LLMs are evaluated on a singleton batch and genrating a thousand tokens.</li>
14
  <li>Peak memory is measured in MB during the first forward pass of the LLM (no warmup).</li>
15
  <li>Each pair of (Model Type, Weight Class) is represented by the best scored model. This LLM is the one used for all the hardware/backend/optimization experiments.</li>
 
8
  - Hardware/Backend/Optimization performance requests should be made in the [community discussions](https://huggingface.co/spaces/optimum/llm-perf-leaderboard/discussions) to assess their relevance and feasibility.
9
  """
10
 
11
+ ABOUT_TEXT = """<h3>About the πŸ€— Open LLM-Perf Leaderboard πŸ‹οΈ</h3>
12
  <ul>
13
+ <li>To avoid communication-dependent results, only one GPU is used.</li>
14
  <li>LLMs are evaluated on a singleton batch and genrating a thousand tokens.</li>
15
  <li>Peak memory is measured in MB during the first forward pass of the LLM (no warmup).</li>
16
  <li>Each pair of (Model Type, Weight Class) is represented by the best scored model. This LLM is the one used for all the hardware/backend/optimization experiments.</li>