BenchmarkBot commited on
Commit
efc3d5b
1 Parent(s): 5aacd58

added columns types

Browse files
Files changed (1) hide show
  1. app.py +22 -12
app.py CHANGED
@@ -12,6 +12,17 @@ LLM_PERF_LEADERBOARD_REPO = "optimum/llm-perf-leaderboard"
12
  LLM_PERF_DATASET_REPO = "optimum/llm-perf-dataset"
13
  OPTIMUM_TOKEN = os.environ.get("OPTIMUM_TOKEN")
14
 
 
 
 
 
 
 
 
 
 
 
 
15
  llm_perf_dataset_repo = load_dataset_repo(LLM_PERF_DATASET_REPO, OPTIMUM_TOKEN)
16
 
17
 
@@ -19,25 +30,23 @@ def get_vanilla_benchmark_df():
19
  if llm_perf_dataset_repo:
20
  llm_perf_dataset_repo.git_pull()
21
 
 
22
  df = pd.read_csv(
23
  "./llm-perf-dataset/reports/cuda_1_100/inference_report.csv")
24
 
25
- df = df[["model", "backend.name", "backend.torch_dtype", "backend.quantization",
26
- "generate.latency(s)", "generate.throughput(tokens/s)"]]
27
 
28
- df["model"] = df["model"].apply(make_clickable_model)
 
29
 
 
30
  df.rename(columns={
31
- "model": "Model",
32
- "backend.name": "Backend 🏭",
33
- "backend.torch_dtype": "Load dtype",
34
- "backend.quantization": "Quantization 🗜️",
35
- "generate.latency(s)": "Latency (s) ⬇️",
36
- "generate.throughput(tokens/s)": "Throughput (tokens/s) ⬆️",
37
  }, inplace=True)
38
 
39
- df.sort_values(by=["Throughput (tokens/s) ⬆️"],
40
- ascending=False, inplace=True)
41
 
42
  return df
43
 
@@ -54,7 +63,8 @@ with demo:
54
  vanilla_benchmark_df = get_vanilla_benchmark_df()
55
  leaderboard_table_lite = gr.components.Dataframe(
56
  value=vanilla_benchmark_df,
57
- headers=vanilla_benchmark_df.columns.tolist(),
 
58
  elem_id="vanilla-benchmark",
59
  )
60
 
 
12
  LLM_PERF_DATASET_REPO = "optimum/llm-perf-dataset"
13
  OPTIMUM_TOKEN = os.environ.get("OPTIMUM_TOKEN")
14
 
15
+ OLD_COLUMNS = ["model", "backend.name", "backend.torch_dtype", "backend.quantization",
16
+ "generate.latency(s)", "generate.throughput(tokens/s)"]
17
+
18
+ NEW_COLUMNS = ["Model", "Backend 🏭", "Load dtype", "Quantization 🗜️",
19
+ "Latency (s) ⬇️", "Throughput (tokens/s) ⬆️"]
20
+
21
+ COLUMNS_TYPES = ["markdown", "text", "text", "text", "number", "number"]
22
+
23
+ SORTING_COLUMN = ["Throughput (tokens/s) ⬆️"]
24
+
25
+
26
  llm_perf_dataset_repo = load_dataset_repo(LLM_PERF_DATASET_REPO, OPTIMUM_TOKEN)
27
 
28
 
 
30
  if llm_perf_dataset_repo:
31
  llm_perf_dataset_repo.git_pull()
32
 
33
+ # load
34
  df = pd.read_csv(
35
  "./llm-perf-dataset/reports/cuda_1_100/inference_report.csv")
36
 
37
+ # preprocess
38
+ df["Model"] = df["Model"].apply(make_clickable_model)
39
 
40
+ # filter
41
+ df = df[OLD_COLUMNS]
42
 
43
+ # rename
44
  df.rename(columns={
45
+ df_col: rename_col for df_col, rename_col in zip(OLD_COLUMNS, NEW_COLUMNS)
 
 
 
 
 
46
  }, inplace=True)
47
 
48
+ # sort
49
+ df.sort_values(by=SORTING_COLUMN, ascending=False, inplace=True)
50
 
51
  return df
52
 
 
63
  vanilla_benchmark_df = get_vanilla_benchmark_df()
64
  leaderboard_table_lite = gr.components.Dataframe(
65
  value=vanilla_benchmark_df,
66
+ type=COLUMNS_TYPES,
67
+ headers=NEW_COLUMNS,
68
  elem_id="vanilla-benchmark",
69
  )
70