BenchmarkBot commited on
Commit
18b69eb
β€’
1 Parent(s): 8985298

added number of params

Browse files
Files changed (2) hide show
  1. app.py +5 -3
  2. src/utils.py +12 -0
app.py CHANGED
@@ -5,7 +5,7 @@ import plotly.express as px
5
  from apscheduler.schedulers.background import BackgroundScheduler
6
 
7
  from src.assets.text_content import TITLE, INTRODUCTION_TEXT, SINGLE_A100_TEXT, CITATION_BUTTON_LABEL, CITATION_BUTTON_TEXT
8
- from src.utils import restart_space, load_dataset_repo, make_clickable_model, make_clickable_score
9
  from src.assets.css_html_js import custom_css
10
 
11
 
@@ -17,12 +17,13 @@ COLUMNS_MAPPING = {
17
  "model": "Model πŸ€—",
18
  "backend.name": "Backend 🏭",
19
  "backend.torch_dtype": "Load Dtype πŸ“₯",
 
20
  "forward.peak_memory(MB)": "Peak Memory (MB) ⬇️",
21
  "generate.throughput(tokens/s)": "Throughput (tokens/s) ⬆️",
22
  "h4_score": "Average Open LLM Score ⬆️",
23
-
24
  }
25
- COLUMNS_DATATYPES = ["markdown", "str", "str", "number", "number", "markdown"]
 
26
  SORTING_COLUMN = ["Throughput (tokens/s) ⬆️"]
27
 
28
 
@@ -55,6 +56,7 @@ def get_benchmark_table(bench_df):
55
  bench_df["Model πŸ€—"] = bench_df["Model πŸ€—"].apply(make_clickable_model)
56
  bench_df["Average Open LLM Score ⬆️"] = bench_df["Average Open LLM Score ⬆️"].apply(
57
  make_clickable_score)
 
58
 
59
  return bench_df
60
 
 
5
  from apscheduler.schedulers.background import BackgroundScheduler
6
 
7
  from src.assets.text_content import TITLE, INTRODUCTION_TEXT, SINGLE_A100_TEXT, CITATION_BUTTON_LABEL, CITATION_BUTTON_TEXT
8
+ from src.utils import restart_space, load_dataset_repo, make_clickable_model, make_clickable_score, num_to_str
9
  from src.assets.css_html_js import custom_css
10
 
11
 
 
17
  "model": "Model πŸ€—",
18
  "backend.name": "Backend 🏭",
19
  "backend.torch_dtype": "Load Dtype πŸ“₯",
20
+ "num_parameters": "#Parameters πŸ“",
21
  "forward.peak_memory(MB)": "Peak Memory (MB) ⬇️",
22
  "generate.throughput(tokens/s)": "Throughput (tokens/s) ⬆️",
23
  "h4_score": "Average Open LLM Score ⬆️",
 
24
  }
25
+ COLUMNS_DATATYPES = ["markdown", "str", "str",
26
+ "number", "number", "number", "markdown"]
27
  SORTING_COLUMN = ["Throughput (tokens/s) ⬆️"]
28
 
29
 
 
56
  bench_df["Model πŸ€—"] = bench_df["Model πŸ€—"].apply(make_clickable_model)
57
  bench_df["Average Open LLM Score ⬆️"] = bench_df["Average Open LLM Score ⬆️"].apply(
58
  make_clickable_score)
59
+ bench_df["#Parameters πŸ“"] = bench_df["#Parameters πŸ“"].apply(num_to_str)
60
 
61
  return bench_df
62
 
src/utils.py CHANGED
@@ -66,3 +66,15 @@ def make_clickable_model(model_name):
66
  def make_clickable_score(score):
67
  link = f"https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard"
68
  return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{score}</a>'
 
 
 
 
 
 
 
 
 
 
 
 
 
66
  def make_clickable_score(score):
67
  link = f"https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard"
68
  return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{score}</a>'
69
+
70
+
71
+ def num_to_str(num):
72
+ if num < 1000:
73
+ return str(int(num))
74
+ elif num < 1000000:
75
+ return str(int(num / 1000)) + "K"
76
+ elif num < 1000000000:
77
+ return str(int(num / 1000000)) + "M"
78
+ elif num < 1000000000000:
79
+ return str(int(num / 1000000000)) + "B"
80
+ return None