BenchmarkBot commited on
Commit
d4acfca
1 Parent(s): 6640b32

added dataframe text

Browse files
Files changed (1) hide show
  1. app.py +12 -7
app.py CHANGED
@@ -26,7 +26,7 @@ SORTING_COLUMN = ["Throughput (tokens/s) ⬆️"]
26
  llm_perf_dataset_repo = load_dataset_repo(LLM_PERF_DATASET_REPO, OPTIMUM_TOKEN)
27
 
28
 
29
- def get_vanilla_benchmark_df():
30
  if llm_perf_dataset_repo:
31
  llm_perf_dataset_repo.git_pull()
32
 
@@ -58,17 +58,22 @@ with demo:
58
  gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
59
 
60
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
61
- with gr.TabItem("A100 Benchmark", elem_id="vanilla-benchmark", id=0):
62
- vanilla_benchmark_df = get_vanilla_benchmark_df()
63
-
64
- A100_text = "Machine: 4x A100 80GB<br>Batches: 1<br>Number of tokens: 100<br>"
65
  gr.HTML(A100_text)
66
 
 
 
 
 
 
 
 
67
  leaderboard_table_lite = gr.components.Dataframe(
68
- value=vanilla_benchmark_df,
69
  datatype=COLUMNS_DATATYPES,
70
  headers=NEW_COLUMNS,
71
- elem_id="vanilla-benchmark",
72
  )
73
 
74
 
 
26
  llm_perf_dataset_repo = load_dataset_repo(LLM_PERF_DATASET_REPO, OPTIMUM_TOKEN)
27
 
28
 
29
+ def get_benchmark_df():
30
  if llm_perf_dataset_repo:
31
  llm_perf_dataset_repo.git_pull()
32
 
 
58
  gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
59
 
60
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
61
+ with gr.TabItem("A100 Benchmark", elem_id="a100-benchmark", id=0):
62
+ A100_text = "<h1>Machine: 4x A100 80GB<h1>"
 
 
63
  gr.HTML(A100_text)
64
 
65
+ dataframe_text = """
66
+ <h3>Batch Size: 1</h3>
67
+ <h3>Generated Tokens: 100</h3>
68
+ """
69
+
70
+ gr.HTML(dataframe_text)
71
+ benchmark_df = get_benchmark_df()
72
  leaderboard_table_lite = gr.components.Dataframe(
73
+ value=benchmark_df,
74
  datatype=COLUMNS_DATATYPES,
75
  headers=NEW_COLUMNS,
76
+ elem_id="pytorch-a100-benchmark",
77
  )
78
 
79