BenchmarkBot commited on
Commit
8dc4b22
β€’
1 Parent(s): 5919d6a
Files changed (2) hide show
  1. app.py +1 -1
  2. src/assets/text_content.py +5 -6
app.py CHANGED
@@ -61,7 +61,7 @@ with demo:
61
  gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
62
 
63
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
64
- with gr.TabItem("4xA100-80GB Benchmark", elem_id="a100-benchmark", id=0):
65
  dataframe_text = "<h4>Batch Size: 1 ; Generated Tokens: 100</h4>"
66
 
67
  gr.HTML(dataframe_text)
 
61
  gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
62
 
63
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
64
+ with gr.TabItem("πŸ“Š 4xA100-80GB Benchmark πŸ‹οΈ", elem_id="a100-benchmark", id=0):
65
  dataframe_text = "<h4>Batch Size: 1 ; Generated Tokens: 100</h4>"
66
 
67
  gr.HTML(dataframe_text)
src/assets/text_content.py CHANGED
@@ -1,9 +1,8 @@
1
- TITLE = """<h1 align="center" id="space-title">πŸ€— Open LLM-Perf Leaderboard</h1>"""
2
 
3
  INTRODUCTION_TEXT = f"""
4
- The πŸ€— Open LLM-Perf Leaderboard aims to benchmark the performance (latency & throughput) of Large Language Models (LLMs) on different backends and hardwares using [Optimum-Benchmark](https://github.com/huggingface/optimum-benchmark)
5
-
6
- πŸ€— Anyone from the community can submit a model or hardware+backend configuration for automated benchmarking on the πŸ€— GPU cluster.
7
- Model submissions should be made in the [πŸ€— Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).
8
- Hardware+Backend submissions should be made in the [πŸ€— Open LLM-Perf Leaderboard's community discussions](https://huggingface.co/spaces/optimum/llm-perf-leaderboard/discussions).
9
  """
 
1
+ TITLE = """<h1 align="center" id="space-title">πŸ€— Open LLM-Perf Leaderboard πŸ‹οΈ</h1>"""
2
 
3
  INTRODUCTION_TEXT = f"""
4
+ The πŸ€— Open LLM-Perf Leaderboard πŸ‹οΈ aims to benchmark the performance (latency & throughput) of Large Language Models (LLMs) on different hardwares and backends using [Optimum-Benchmark](https://github.com/huggingface/optimum-benchmark).
5
+ Anyone from the community can submit a model or a hardware+backend configuration for automated benchmarking:
6
+ - Model submissions should be made in the [πŸ€— Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) and will be added to the πŸ€— Open LLM-Perf Leaderboard πŸ‹οΈ if they're accepted.
7
+ - Hardware+Backend submissions should be made in the πŸ€— Open LLM-Perf Leaderboard πŸ‹οΈ [community discussions](https://huggingface.co/spaces/optimum/llm-perf-leaderboard/discussions).
 
8
  """