BenchmarkBot commited on
Commit
9dc4521
β€’
1 Parent(s): 172f670

added citation

Browse files
Files changed (2) hide show
  1. app.py +12 -4
  2. src/assets/text_content.py +16 -3
app.py CHANGED
@@ -3,7 +3,7 @@ import gradio as gr
3
  import pandas as pd
4
  from apscheduler.schedulers.background import BackgroundScheduler
5
 
6
- from src.assets.text_content import TITLE, INTRODUCTION_TEXT
7
  from src.assets.css_html_js import custom_css, get_window_url_params
8
  from src.utils import restart_space, load_dataset_repo, make_clickable_model
9
 
@@ -61,8 +61,8 @@ with demo:
61
  gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
62
 
63
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
64
- with gr.TabItem("πŸ–₯️ 4xA100-80GB Benchmark πŸ‹οΈ", elem_id="a100-benchmark", id=0):
65
- dataframe_text = "<h4>Batch Size: 1 ; Generated Tokens: 100</h4>"
66
 
67
  gr.HTML(dataframe_text)
68
  benchmark_df = get_benchmark_df()
@@ -70,9 +70,17 @@ with demo:
70
  value=benchmark_df,
71
  datatype=COLUMNS_DATATYPES,
72
  headers=NEW_COLUMNS,
73
- elem_id="pytorch-a100-benchmark",
74
  )
75
 
 
 
 
 
 
 
 
 
76
 
77
  # Restart space every hour
78
  scheduler = BackgroundScheduler()
 
3
  import pandas as pd
4
  from apscheduler.schedulers.background import BackgroundScheduler
5
 
6
+ from src.assets.text_content import TITLE, INTRODUCTION_TEXT, CITATION_BUTTON_LABEL, CITATION_BUTTON_TEXT
7
  from src.assets.css_html_js import custom_css, get_window_url_params
8
  from src.utils import restart_space, load_dataset_repo, make_clickable_model
9
 
 
61
  gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
62
 
63
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
64
+ with gr.TabItem("πŸ“Š A100-80GB Benchmark πŸ‹οΈ", elem_id="a100-benchmark", id=0):
65
+ dataframe_text = "<h4>Specification:\nSingle and Multi-GPU Setup\nBatch Size: 1\nGenerated Tokens: 100</h4>"
66
 
67
  gr.HTML(dataframe_text)
68
  benchmark_df = get_benchmark_df()
 
70
  value=benchmark_df,
71
  datatype=COLUMNS_DATATYPES,
72
  headers=NEW_COLUMNS,
73
+ elem_id="pytorch-A100-benchmark",
74
  )
75
 
76
+ with gr.Row():
77
+ with gr.Column():
78
+ with gr.Accordion("πŸ“™ Citation", open=False):
79
+ citation_button = gr.Textbox(
80
+ value=CITATION_BUTTON_TEXT,
81
+ label=CITATION_BUTTON_LABEL,
82
+ elem_id="citation-button",
83
+ ).style(show_copy_button=True)
84
 
85
  # Restart space every hour
86
  scheduler = BackgroundScheduler()
src/assets/text_content.py CHANGED
@@ -1,8 +1,21 @@
1
  TITLE = """<h1 align="center" id="space-title">πŸ€— Open LLM-Perf Leaderboard πŸ‹οΈ</h1>"""
2
 
3
  INTRODUCTION_TEXT = f"""
4
- The πŸ€— Open LLM-Perf Leaderboard πŸ‹οΈ aims to benchmark the performance (latency & throughput) of Large Language Models (LLMs) on different hardwares and backends using [Optimum-Benchmark](https://github.com/huggingface/optimum-benchmark).
5
  Anyone from the community can submit a model or a hardware+backend configuration for automated benchmarking:
6
- - Model submissions should be made in the [πŸ€— Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) and will be added to the πŸ€— Open LLM-Perf Leaderboard πŸ‹οΈ if they're accepted.
7
- - Hardware+Backend submissions should be made in the πŸ€— Open LLM-Perf Leaderboard πŸ‹οΈ [community discussions](https://huggingface.co/spaces/optimum/llm-perf-leaderboard/discussions).
8
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  TITLE = """<h1 align="center" id="space-title">πŸ€— Open LLM-Perf Leaderboard πŸ‹οΈ</h1>"""
2
 
3
  INTRODUCTION_TEXT = f"""
4
+ The πŸ€— Open LLM-Perf Leaderboard πŸ‹οΈ aims to benchmark the performance (latency & throughput) of Large Language Models (LLMs) on different hardwares and backends using [Optimum-Benchmark](https://github.com/huggingface/optimum-benchmark) and [Optimum](https://github.com/huggingface/optimum) flavors.
5
  Anyone from the community can submit a model or a hardware+backend configuration for automated benchmarking:
6
+ - Model submissions should be made in the [πŸ€— Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) and will be added to the πŸ€— Open LLM-Perf Leaderboard πŸ‹οΈ once they're publicly available.
7
+ - Hardware+Backend submissions should be made in the πŸ€— Open LLM-Perf Leaderboard πŸ‹οΈ [community discussions](https://huggingface.co/spaces/optimum/llm-perf-leaderboard/discussions); An automated process will be set up soon to allow for direct submissions.
8
  """
9
+
10
+ CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
11
+ CITATION_BUTTON_TEXT = """@misc{open-llm-perf-leaderboard,
12
+ author = {Ilyas Moutawwakil},
13
+ title = {Open LLM-Perf Leaderboard},
14
+ year = {2023},
15
+ publisher = {Hugging Face},
16
+ howpublished = "\url{https://huggingface.co/spaces/optimum/open-llm-perf-leaderboard}",
17
+ @software{optimum-benchmark,
18
+ author = {Ilyas Moutawwakil},
19
+ title = {A framework for benchmarking the performance of Transformers models},
20
+ }
21
+ """