BenchmarkBot commited on
Commit
534ff40
β€’
1 Parent(s): d4acfca

added submissions protocol

Browse files
Files changed (2) hide show
  1. app.py +2 -8
  2. src/assets/text_content.py +3 -1
app.py CHANGED
@@ -58,14 +58,8 @@ with demo:
58
  gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
59
 
60
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
61
- with gr.TabItem("A100 Benchmark", elem_id="a100-benchmark", id=0):
62
- A100_text = "<h1>Machine: 4x A100 80GB<h1>"
63
- gr.HTML(A100_text)
64
-
65
- dataframe_text = """
66
- <h3>Batch Size: 1</h3>
67
- <h3>Generated Tokens: 100</h3>
68
- """
69
 
70
  gr.HTML(dataframe_text)
71
  benchmark_df = get_benchmark_df()
 
58
  gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
59
 
60
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
61
+ with gr.TabItem("4xA100-80GB Benchmark", elem_id="a100-benchmark", id=0):
62
+ dataframe_text = "<h4>Batch Size: 1 ; Generated Tokens: 100</h4>"
 
 
 
 
 
 
63
 
64
  gr.HTML(dataframe_text)
65
  benchmark_df = get_benchmark_df()
src/assets/text_content.py CHANGED
@@ -3,5 +3,7 @@ TITLE = """<h1 align="center" id="space-title">πŸ€— Open LLM-Perf Leaderboard</h
3
  INTRODUCTION_TEXT = f"""
4
  The πŸ€— Open LLM-Perf Leaderboard aims to benchmark the performance (latency & throughput) of Large Language Models (LLMs) on different backends and hardwares using [Optimum-Benchmark](https://github.com/huggingface/optimum-benchmark)
5
 
6
- πŸ€— Anyone from the community can submit a model for automated benchmarking on the πŸ€— GPU cluster, as long as it is a πŸ€— Transformers model with weights on the Hub. We also support benchmarks of models with delta-weights for non-commercial licensed models, such as LLaMa.
 
 
7
  """
 
3
  INTRODUCTION_TEXT = f"""
4
  The πŸ€— Open LLM-Perf Leaderboard aims to benchmark the performance (latency & throughput) of Large Language Models (LLMs) on different backends and hardwares using [Optimum-Benchmark](https://github.com/huggingface/optimum-benchmark)
5
 
6
+ πŸ€— Anyone from the community can submit a model or hardware+backend configuration for automated benchmarking on the πŸ€— GPU cluster.
7
+ Model submissions should be made in the [πŸ€— Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).
8
+ Hardware+Backend submissions should be made in the [πŸ€— Open LLM-Perf Leaderboard's community discussions](https://huggingface.co/spaces/optimum/llm-perf-leaderboard/discussions).
9
  """