gsaivinay commited on
Commit
51678bf
1 Parent(s): ee366c0
app.py CHANGED
@@ -102,11 +102,6 @@ models = original_df["model_name_for_query"].tolist() # needed for model backlin
102
 
103
  to_be_dumped = f"models = {repr(models)}\n"
104
 
105
- # with open("models_backlinks.py", "w") as f:
106
- # f.write(to_be_dumped)
107
-
108
- # print(to_be_dumped)
109
-
110
  leaderboard_df = original_df.copy()
111
  (
112
  finished_eval_queue_df,
@@ -216,8 +211,8 @@ def change_tab(query_param: str):
216
 
217
 
218
  # Searching and filtering
219
- def update_table(hidden_df: pd.DataFrame, current_columns_df: pd.DataFrame, columns: list, type_query: list, size_query: list, show_deleted: bool, query: str):
220
- filtered_df = filter_models(hidden_df, type_query, size_query, show_deleted)
221
  if query != "":
222
  filtered_df = search_table(filtered_df, query)
223
  df = select_columns(filtered_df, columns)
@@ -249,7 +244,7 @@ NUMERIC_INTERVALS = {
249
  }
250
 
251
  def filter_models(
252
- df: pd.DataFrame, type_query: list, size_query: list, show_deleted: bool
253
  ) -> pd.DataFrame:
254
  # Show all models
255
  if show_deleted:
@@ -259,6 +254,7 @@ def filter_models(
259
 
260
  type_emoji = [t[0] for t in type_query]
261
  filtered_df = filtered_df[df[AutoEvalColumn.model_type_symbol.name].isin(type_emoji)]
 
262
 
263
  numeric_interval = pd.IntervalIndex(sorted([NUMERIC_INTERVALS[s] for s in size_query]))
264
  params_column = pd.to_numeric(df[AutoEvalColumn.params.name], errors="coerce")
@@ -277,6 +273,12 @@ with demo:
277
  with gr.TabItem("🏅 LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
278
  with gr.Row():
279
  with gr.Column():
 
 
 
 
 
 
280
  with gr.Row():
281
  shown_columns = gr.CheckboxGroup(
282
  choices=[
@@ -310,11 +312,6 @@ with demo:
310
  value=True, label="Show gated/private/deleted models", interactive=True
311
  )
312
  with gr.Column(min_width=320):
313
- search_bar = gr.Textbox(
314
- placeholder="🔍 Search for your model and press ENTER...",
315
- show_label=False,
316
- elem_id="search-bar",
317
- )
318
  with gr.Box(elem_id="box-filter"):
319
  filter_columns_type = gr.CheckboxGroup(
320
  label="Model types",
@@ -323,16 +320,25 @@ with demo:
323
  ModelType.FT.to_str(),
324
  ModelType.IFT.to_str(),
325
  ModelType.RL.to_str(),
 
326
  ],
327
  value=[
328
  ModelType.PT.to_str(),
329
  ModelType.FT.to_str(),
330
  ModelType.IFT.to_str(),
331
  ModelType.RL.to_str(),
 
332
  ],
333
  interactive=True,
334
  elem_id="filter-columns-type",
335
  )
 
 
 
 
 
 
 
336
  filter_columns_size = gr.CheckboxGroup(
337
  label="Model sizes",
338
  choices=list(NUMERIC_INTERVALS.keys()),
@@ -375,6 +381,7 @@ with demo:
375
  leaderboard_table,
376
  shown_columns,
377
  filter_columns_type,
 
378
  filter_columns_size,
379
  deleted_models_visibility,
380
  search_bar,
@@ -388,6 +395,7 @@ with demo:
388
  leaderboard_table,
389
  shown_columns,
390
  filter_columns_type,
 
391
  filter_columns_size,
392
  deleted_models_visibility,
393
  search_bar,
@@ -402,6 +410,22 @@ with demo:
402
  leaderboard_table,
403
  shown_columns,
404
  filter_columns_type,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
405
  filter_columns_size,
406
  deleted_models_visibility,
407
  search_bar,
@@ -416,6 +440,7 @@ with demo:
416
  leaderboard_table,
417
  shown_columns,
418
  filter_columns_type,
 
419
  filter_columns_size,
420
  deleted_models_visibility,
421
  search_bar,
@@ -430,6 +455,7 @@ with demo:
430
  leaderboard_table,
431
  shown_columns,
432
  filter_columns_type,
 
433
  filter_columns_size,
434
  deleted_models_visibility,
435
  search_bar,
 
102
 
103
  to_be_dumped = f"models = {repr(models)}\n"
104
 
 
 
 
 
 
105
  leaderboard_df = original_df.copy()
106
  (
107
  finished_eval_queue_df,
 
211
 
212
 
213
  # Searching and filtering
214
+ def update_table(hidden_df: pd.DataFrame, current_columns_df: pd.DataFrame, columns: list, type_query: list, precision_query: str, size_query: list, show_deleted: bool, query: str):
215
+ filtered_df = filter_models(hidden_df, type_query, size_query, precision_query, show_deleted)
216
  if query != "":
217
  filtered_df = search_table(filtered_df, query)
218
  df = select_columns(filtered_df, columns)
 
244
  }
245
 
246
  def filter_models(
247
+ df: pd.DataFrame, type_query: list, size_query: list, precision_query: list, show_deleted: bool
248
  ) -> pd.DataFrame:
249
  # Show all models
250
  if show_deleted:
 
254
 
255
  type_emoji = [t[0] for t in type_query]
256
  filtered_df = filtered_df[df[AutoEvalColumn.model_type_symbol.name].isin(type_emoji)]
257
+ filtered_df = filtered_df[df[AutoEvalColumn.precision.name].isin(precision_query + ["None"])]
258
 
259
  numeric_interval = pd.IntervalIndex(sorted([NUMERIC_INTERVALS[s] for s in size_query]))
260
  params_column = pd.to_numeric(df[AutoEvalColumn.params.name], errors="coerce")
 
273
  with gr.TabItem("🏅 LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
274
  with gr.Row():
275
  with gr.Column():
276
+ with gr.Row():
277
+ search_bar = gr.Textbox(
278
+ placeholder=" 🔍 Search for your model and press ENTER...",
279
+ show_label=False,
280
+ elem_id="search-bar",
281
+ )
282
  with gr.Row():
283
  shown_columns = gr.CheckboxGroup(
284
  choices=[
 
312
  value=True, label="Show gated/private/deleted models", interactive=True
313
  )
314
  with gr.Column(min_width=320):
 
 
 
 
 
315
  with gr.Box(elem_id="box-filter"):
316
  filter_columns_type = gr.CheckboxGroup(
317
  label="Model types",
 
320
  ModelType.FT.to_str(),
321
  ModelType.IFT.to_str(),
322
  ModelType.RL.to_str(),
323
+ ModelType.Unknown.to_str(),
324
  ],
325
  value=[
326
  ModelType.PT.to_str(),
327
  ModelType.FT.to_str(),
328
  ModelType.IFT.to_str(),
329
  ModelType.RL.to_str(),
330
+ ModelType.Unknown.to_str(),
331
  ],
332
  interactive=True,
333
  elem_id="filter-columns-type",
334
  )
335
+ filter_columns_precision = gr.CheckboxGroup(
336
+ label="Precision",
337
+ choices=["torch.float16", "torch.bfloat16", "torch.float32", "8bit", "4bit", "GPTQ"],
338
+ value=["torch.float16", "torch.bfloat16", "torch.float32", "8bit", "4bit", "GPTQ"],
339
+ interactive=True,
340
+ elem_id="filter-columns-precision",
341
+ )
342
  filter_columns_size = gr.CheckboxGroup(
343
  label="Model sizes",
344
  choices=list(NUMERIC_INTERVALS.keys()),
 
381
  leaderboard_table,
382
  shown_columns,
383
  filter_columns_type,
384
+ filter_columns_precision,
385
  filter_columns_size,
386
  deleted_models_visibility,
387
  search_bar,
 
395
  leaderboard_table,
396
  shown_columns,
397
  filter_columns_type,
398
+ filter_columns_precision,
399
  filter_columns_size,
400
  deleted_models_visibility,
401
  search_bar,
 
410
  leaderboard_table,
411
  shown_columns,
412
  filter_columns_type,
413
+ filter_columns_precision,
414
+ filter_columns_size,
415
+ deleted_models_visibility,
416
+ search_bar,
417
+ ],
418
+ leaderboard_table,
419
+ queue=True,
420
+ )
421
+ filter_columns_precision.change(
422
+ update_table,
423
+ [
424
+ hidden_leaderboard_table_for_search,
425
+ leaderboard_table,
426
+ shown_columns,
427
+ filter_columns_type,
428
+ filter_columns_precision,
429
  filter_columns_size,
430
  deleted_models_visibility,
431
  search_bar,
 
440
  leaderboard_table,
441
  shown_columns,
442
  filter_columns_type,
443
+ filter_columns_precision,
444
  filter_columns_size,
445
  deleted_models_visibility,
446
  search_bar,
 
455
  leaderboard_table,
456
  shown_columns,
457
  filter_columns_type,
458
+ filter_columns_precision,
459
  filter_columns_size,
460
  deleted_models_visibility,
461
  search_bar,
model_info_cache.pkl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c307938f15bda18b6c38af3d02cc0407d9d8d5345bc31f475af2cbbb33a4f8b5
3
- size 2895750
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c80b745050df96eb1bc908e15b2406533b076c9160486a48b88c8a29f1ed312
3
+ size 2985167
model_size_cache.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5b09d9f81d22f7849f92081950b675c2d68e3bfd320e5dfd1892d14602a29a2
3
+ size 58166
requirements.txt CHANGED
@@ -1,3 +1,4 @@
 
1
  aiofiles==23.1.0
2
  aiohttp==3.8.4
3
  aiosignal==1.3.1
 
1
+ accelerate==0.23.0
2
  aiofiles==23.1.0
3
  aiohttp==3.8.4
4
  aiosignal==1.3.1
src/display_models/get_model_metadata.py CHANGED
@@ -8,6 +8,8 @@ from typing import List
8
  import huggingface_hub
9
  from huggingface_hub import HfApi
10
  from tqdm import tqdm
 
 
11
 
12
  from src.display_models.model_metadata_flags import DO_NOT_SUBMIT_MODELS, FLAGGED_MODELS
13
  from src.display_models.model_metadata_type import MODEL_TYPE_METADATA, ModelType, model_type_from_str
@@ -21,8 +23,13 @@ def get_model_infos_from_hub(leaderboard_data: List[dict]):
21
  try:
22
  with open("model_info_cache.pkl", "rb") as f:
23
  model_info_cache = pickle.load(f)
24
- except EOFError:
25
  model_info_cache = {}
 
 
 
 
 
26
 
27
  for model_data in tqdm(leaderboard_data):
28
  model_name = model_data["model_name_for_query"]
@@ -37,16 +44,21 @@ def get_model_infos_from_hub(leaderboard_data: List[dict]):
37
  print("Repo not found!", model_name)
38
  model_data[AutoEvalColumn.license.name] = None
39
  model_data[AutoEvalColumn.likes.name] = None
40
- model_data[AutoEvalColumn.params.name] = get_model_size(model_name, None)
41
- continue
 
42
 
43
  model_data[AutoEvalColumn.license.name] = get_model_license(model_info)
44
  model_data[AutoEvalColumn.likes.name] = get_model_likes(model_info)
45
- model_data[AutoEvalColumn.params.name] = get_model_size(model_name, model_info)
 
 
46
 
47
  # save cache to disk in pickle format
48
  with open("model_info_cache.pkl", "wb") as f:
49
  pickle.dump(model_info_cache, f)
 
 
50
 
51
 
52
  def get_model_license(model_info):
@@ -69,11 +81,17 @@ def get_model_size(model_name, model_info):
69
  return round(model_info.safetensors["total"] / 1e9, 3)
70
  except AttributeError:
71
  try:
72
- size_match = re.search(size_pattern, model_name.lower())
73
- size = size_match.group(0)
74
- return round(float(size[:-1]) if size[-1] == "b" else float(size[:-1]) / 1e3, 3)
75
- except AttributeError:
76
- return 0
 
 
 
 
 
 
77
 
78
 
79
  def get_model_type(leaderboard_data: List[dict]):
 
8
  import huggingface_hub
9
  from huggingface_hub import HfApi
10
  from tqdm import tqdm
11
+ from transformers import AutoModel, AutoConfig
12
+ from accelerate import init_empty_weights
13
 
14
  from src.display_models.model_metadata_flags import DO_NOT_SUBMIT_MODELS, FLAGGED_MODELS
15
  from src.display_models.model_metadata_type import MODEL_TYPE_METADATA, ModelType, model_type_from_str
 
23
  try:
24
  with open("model_info_cache.pkl", "rb") as f:
25
  model_info_cache = pickle.load(f)
26
+ except (EOFError, FileNotFoundError):
27
  model_info_cache = {}
28
+ try:
29
+ with open("model_size_cache.pkl", "rb") as f:
30
+ model_size_cache = pickle.load(f)
31
+ except (EOFError, FileNotFoundError):
32
+ model_size_cache = {}
33
 
34
  for model_data in tqdm(leaderboard_data):
35
  model_name = model_data["model_name_for_query"]
 
44
  print("Repo not found!", model_name)
45
  model_data[AutoEvalColumn.license.name] = None
46
  model_data[AutoEvalColumn.likes.name] = None
47
+ if model_name not in model_size_cache:
48
+ model_size_cache[model_name] = get_model_size(model_name, None)
49
+ model_data[AutoEvalColumn.params.name] = model_size_cache[model_name]
50
 
51
  model_data[AutoEvalColumn.license.name] = get_model_license(model_info)
52
  model_data[AutoEvalColumn.likes.name] = get_model_likes(model_info)
53
+ if model_name not in model_size_cache:
54
+ model_size_cache[model_name] = get_model_size(model_name, model_info)
55
+ model_data[AutoEvalColumn.params.name] = model_size_cache[model_name]
56
 
57
  # save cache to disk in pickle format
58
  with open("model_info_cache.pkl", "wb") as f:
59
  pickle.dump(model_info_cache, f)
60
+ with open("model_size_cache.pkl", "wb") as f:
61
+ pickle.dump(model_size_cache, f)
62
 
63
 
64
  def get_model_license(model_info):
 
81
  return round(model_info.safetensors["total"] / 1e9, 3)
82
  except AttributeError:
83
  try:
84
+ config = AutoConfig.from_pretrained(model_name, trust_remote_code=False)
85
+ with init_empty_weights():
86
+ model = AutoModel.from_config(config, trust_remote_code=False)
87
+ return round(sum(p.numel() for p in model.parameters() if p.requires_grad) / 1e9, 3)
88
+ except (EnvironmentError, ValueError, KeyError): # model config not found, likely private
89
+ try:
90
+ size_match = re.search(size_pattern, model_name.lower())
91
+ size = size_match.group(0)
92
+ return round(float(size[:-1]) if size[-1] == "b" else float(size[:-1]) / 1e3, 3)
93
+ except AttributeError:
94
+ return 0
95
 
96
 
97
  def get_model_type(leaderboard_data: List[dict]):
src/display_models/model_metadata_flags.py CHANGED
@@ -7,6 +7,9 @@ FLAGGED_MODELS = {
7
  "Fredithefish/ReasonixPajama-3B-HF": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/236",
8
  "TigerResearch/tigerbot-7b-sft-v1": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/237",
9
  "gaodrew/gaodrew-gorgonzola-13b": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/215",
 
 
 
10
  }
11
 
12
  # Models which have been requested by orgs to not be submitted on the leaderboard
 
7
  "Fredithefish/ReasonixPajama-3B-HF": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/236",
8
  "TigerResearch/tigerbot-7b-sft-v1": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/237",
9
  "gaodrew/gaodrew-gorgonzola-13b": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/215",
10
+ "AIDC-ai-business/Marcoroni-70B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/287",
11
+ "AIDC-ai-business/Marcoroni-13B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/287",
12
+ "AIDC-ai-business/Marcoroni-7B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/287",
13
  }
14
 
15
  # Models which have been requested by orgs to not be submitted on the leaderboard
src/display_models/model_metadata_type.py CHANGED
@@ -14,7 +14,7 @@ class ModelType(Enum):
14
  FT = ModelInfo(name="fine-tuned", symbol="🔶")
15
  IFT = ModelInfo(name="instruction-tuned", symbol="⭕")
16
  RL = ModelInfo(name="RL-tuned", symbol="🟦")
17
- Unknown = ModelInfo(name="Unknown, add type to request file!", symbol="?")
18
 
19
  def to_str(self, separator=" "):
20
  return f"{self.value.symbol}{separator}{self.value.name}"
 
14
  FT = ModelInfo(name="fine-tuned", symbol="🔶")
15
  IFT = ModelInfo(name="instruction-tuned", symbol="⭕")
16
  RL = ModelInfo(name="RL-tuned", symbol="🟦")
17
+ Unknown = ModelInfo(name="Unknown", symbol="?")
18
 
19
  def to_str(self, separator=" "):
20
  return f"{self.value.symbol}{separator}{self.value.name}"