eduagarcia commited on
Commit
9839977
1 Parent(s): 359d8a9

Merge Origin - Rename model types (#1)

Browse files

- testing hiding the CI for the updater (47aab9dee9b5199b9c0ac9afc01c6159c1541b60)
- testing the dynamic updater at 30, without the CI (0b5382088e122d5c0f9380a3b74103659027e348)
- flag models (4b67a330d44b119fdf5ef65bc435bc291092901e)
- better checkboxes, better filtering (f04f90eb834bf75d912827652781831866453e8c)
- relaxed filters on merged models (bcf0226a9a21925c68838637656f555a79e4de82)
- mini fix (c2cc6bf98a7a9ad3ef801aac4496de52b424881d)
- change model types available at submission time (05bda40b490bc20ff5fccd6c5e36445fcc06c652)
- merge and moerge update (193f184fa5f98a440e9ab6d8750f10fa2b17e887)
- update for adapters on the hub (5c07fb7121ea75f208e42fb69bc850b72d95a6ba)
- Merge branch 'main' of https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard into merge_origin (af9a67984183f7f91cc055b27ac86c6fb31906fb)

app.py CHANGED
@@ -42,7 +42,7 @@ from src.tools.plots import (
42
  )
43
 
44
  # Start ephemeral Spaces on PRs (see config in README.md)
45
- enable_space_ci()
46
 
47
  def restart_space():
48
  API.restart_space(repo_id=REPO_ID, token=H4_TOKEN)
@@ -106,13 +106,10 @@ def update_table(
106
  type_query: list,
107
  precision_query: str,
108
  size_query: list,
109
- show_deleted: bool,
110
- show_merges: bool,
111
- show_moe: bool,
112
- show_flagged: bool,
113
  query: str,
114
  ):
115
- filtered_df = filter_models(hidden_df, type_query, size_query, precision_query, show_deleted, show_merges, show_moe, show_flagged)
116
  filtered_df = filter_queries(query, filtered_df)
117
  df = select_columns(filtered_df, columns)
118
  return df
@@ -160,21 +157,21 @@ def filter_queries(query: str, filtered_df: pd.DataFrame):
160
 
161
 
162
  def filter_models(
163
- df: pd.DataFrame, type_query: list, size_query: list, precision_query: list, show_deleted: bool, show_merges: bool, show_moe:bool, show_flagged: bool
164
  ) -> pd.DataFrame:
165
  # Show all models
166
- if show_deleted:
167
- filtered_df = df
168
- else: # Show only still on the hub models
169
  filtered_df = df[df[AutoEvalColumn.still_on_hub.name] == True]
 
 
170
 
171
- if not show_merges:
172
  filtered_df = filtered_df[filtered_df[AutoEvalColumn.merged.name] == False]
173
 
174
- if not show_moe:
175
  filtered_df = filtered_df[filtered_df[AutoEvalColumn.moe.name] == False]
176
 
177
- if not show_flagged:
178
  filtered_df = filtered_df[filtered_df[AutoEvalColumn.flagged.name] == False]
179
 
180
  type_emoji = [t[0] for t in type_query]
@@ -193,10 +190,7 @@ leaderboard_df = filter_models(
193
  type_query=[t.to_str(" : ") for t in ModelType],
194
  size_query=list(NUMERIC_INTERVALS.keys()),
195
  precision_query=[i.value.name for i in Precision],
196
- show_deleted=True,
197
- show_merges=False,
198
- show_moe=True,
199
- show_flagged=False
200
  )
201
 
202
  demo = gr.Blocks(css=custom_css)
@@ -231,17 +225,11 @@ with demo:
231
  interactive=True,
232
  )
233
  with gr.Row():
234
- deleted_models_visibility = gr.Checkbox(
235
- value=True, label="Show private/deleted models", interactive=True
236
- )
237
- merged_models_visibility = gr.Checkbox(
238
- value=False, label="Show merges", interactive=True
239
- )
240
- moe_models_visibility = gr.Checkbox(
241
- value=True, label="Show MoE", interactive=True
242
- )
243
- flagged_models_visibility = gr.Checkbox(
244
- value=False, label="Show flagged models", interactive=True
245
  )
246
  with gr.Column(min_width=320):
247
  #with gr.Box(elem_id="box-filter"):
@@ -296,10 +284,7 @@ with demo:
296
  filter_columns_type,
297
  filter_columns_precision,
298
  filter_columns_size,
299
- deleted_models_visibility,
300
- merged_models_visibility,
301
- moe_models_visibility,
302
- flagged_models_visibility,
303
  search_bar,
304
  ],
305
  leaderboard_table,
@@ -315,10 +300,7 @@ with demo:
315
  filter_columns_type,
316
  filter_columns_precision,
317
  filter_columns_size,
318
- deleted_models_visibility,
319
- merged_models_visibility,
320
- moe_models_visibility,
321
- flagged_models_visibility,
322
  search_bar,
323
  ],
324
  leaderboard_table,
@@ -326,7 +308,7 @@ with demo:
326
  # Check query parameter once at startup and update search bar + hidden component
327
  demo.load(load_query, inputs=[], outputs=[search_bar, hidden_search_bar])
328
 
329
- for selector in [shown_columns, filter_columns_type, filter_columns_precision, filter_columns_size, deleted_models_visibility, merged_models_visibility, moe_models_visibility, flagged_models_visibility]:
330
  selector.change(
331
  update_table,
332
  [
@@ -335,10 +317,7 @@ with demo:
335
  filter_columns_type,
336
  filter_columns_precision,
337
  filter_columns_size,
338
- deleted_models_visibility,
339
- merged_models_visibility,
340
- moe_models_visibility,
341
- flagged_models_visibility,
342
  search_bar,
343
  ],
344
  leaderboard_table,
@@ -487,7 +466,7 @@ with demo:
487
 
488
  scheduler = BackgroundScheduler()
489
  scheduler.add_job(restart_space, "interval", seconds=10800) # restarted every 3h
490
- scheduler.add_job(update_dynamic_files, "cron", minute=00) # launched every hour on the hour
491
  scheduler.start()
492
 
493
  demo.queue(default_concurrency_limit=40).launch()
 
42
  )
43
 
44
  # Start ephemeral Spaces on PRs (see config in README.md)
45
+ #enable_space_ci()
46
 
47
  def restart_space():
48
  API.restart_space(repo_id=REPO_ID, token=H4_TOKEN)
 
106
  type_query: list,
107
  precision_query: str,
108
  size_query: list,
109
+ hide_models: list,
 
 
 
110
  query: str,
111
  ):
112
+ filtered_df = filter_models(df=hidden_df, type_query=type_query, size_query=size_query, precision_query=precision_query, hide_models=hide_models)
113
  filtered_df = filter_queries(query, filtered_df)
114
  df = select_columns(filtered_df, columns)
115
  return df
 
157
 
158
 
159
  def filter_models(
160
+ df: pd.DataFrame, type_query: list, size_query: list, precision_query: list, hide_models: list
161
  ) -> pd.DataFrame:
162
  # Show all models
163
+ if "Private or deleted" in hide_models:
 
 
164
  filtered_df = df[df[AutoEvalColumn.still_on_hub.name] == True]
165
+ else:
166
+ filtered_df = df
167
 
168
+ if "Contains a merge/moerge" in hide_models:
169
  filtered_df = filtered_df[filtered_df[AutoEvalColumn.merged.name] == False]
170
 
171
+ if "MoE" in hide_models:
172
  filtered_df = filtered_df[filtered_df[AutoEvalColumn.moe.name] == False]
173
 
174
+ if "Flagged" in hide_models:
175
  filtered_df = filtered_df[filtered_df[AutoEvalColumn.flagged.name] == False]
176
 
177
  type_emoji = [t[0] for t in type_query]
 
190
  type_query=[t.to_str(" : ") for t in ModelType],
191
  size_query=list(NUMERIC_INTERVALS.keys()),
192
  precision_query=[i.value.name for i in Precision],
193
+ hide_models=["Contains a merge/moerge", "Flagged"], # "Private or deleted", "Contains a merge/moerge", "Flagged"
 
 
 
194
  )
195
 
196
  demo = gr.Blocks(css=custom_css)
 
225
  interactive=True,
226
  )
227
  with gr.Row():
228
+ hide_models = gr.CheckboxGroup(
229
+ label="Hide models",
230
+ choices = ["Private or deleted", "Contains a merge/moerge", "Flagged", "MoE"],
231
+ value=["Private or deleted", "Contains a merge/moerge", "Flagged"],
232
+ interactive=True
 
 
 
 
 
 
233
  )
234
  with gr.Column(min_width=320):
235
  #with gr.Box(elem_id="box-filter"):
 
284
  filter_columns_type,
285
  filter_columns_precision,
286
  filter_columns_size,
287
+ hide_models,
 
 
 
288
  search_bar,
289
  ],
290
  leaderboard_table,
 
300
  filter_columns_type,
301
  filter_columns_precision,
302
  filter_columns_size,
303
+ hide_models,
 
 
 
304
  search_bar,
305
  ],
306
  leaderboard_table,
 
308
  # Check query parameter once at startup and update search bar + hidden component
309
  demo.load(load_query, inputs=[], outputs=[search_bar, hidden_search_bar])
310
 
311
+ for selector in [shown_columns, filter_columns_type, filter_columns_precision, filter_columns_size, hide_models]:
312
  selector.change(
313
  update_table,
314
  [
 
317
  filter_columns_type,
318
  filter_columns_precision,
319
  filter_columns_size,
320
+ hide_models,
 
 
 
321
  search_bar,
322
  ],
323
  leaderboard_table,
 
466
 
467
  scheduler = BackgroundScheduler()
468
  scheduler.add_job(restart_space, "interval", seconds=10800) # restarted every 3h
469
+ scheduler.add_job(update_dynamic_files, "cron", minute=30) # launched every hour on the hour
470
  scheduler.start()
471
 
472
  demo.queue(default_concurrency_limit=40).launch()
src/display/about.py CHANGED
@@ -64,9 +64,8 @@ Side note on the baseline scores:
64
  ## Icons
65
  - {ModelType.PT.to_str(" : ")} model: new, base models, trained on a given corpora
66
  - {ModelType.FT.to_str(" : ")} model: pretrained models finetuned on more data
67
- Specific fine-tune subcategories (more adapted to chat):
68
- - {ModelType.IFT.to_str(" : ")} model: instruction fine-tunes, which are model fine-tuned specifically on datasets of task instruction
69
- - {ModelType.RL.to_str(" : ")} model: reinforcement fine-tunes, which usually change the model loss a bit with an added policy.
70
  If there is no icon, we have not uploaded the information on the model yet, feel free to open an issue with the model information!
71
 
72
  "Flagged" indicates that this model has been flagged by the community, and should probably be ignored! Clicking the link will redirect you to the discussion about the model.
 
64
  ## Icons
65
  - {ModelType.PT.to_str(" : ")} model: new, base models, trained on a given corpora
66
  - {ModelType.FT.to_str(" : ")} model: pretrained models finetuned on more data
67
+ - {ModelType.chat.to_str(" : ")} model: chat like fine-tunes, either using IFT (datasets of task instruction), RLHF or DPO (changing the model loss a bit with an added policy), etc
68
+ - {ModelType.merges.to_str(" : ")} model: merges or MoErges, models which have been merged or fused without additional fine-tuning.
 
69
  If there is no icon, we have not uploaded the information on the model yet, feel free to open an issue with the model information!
70
 
71
  "Flagged" indicates that this model has been flagged by the community, and should probably be ignored! Clicking the link will redirect you to the discussion about the model.
src/display/utils.py CHANGED
@@ -210,9 +210,9 @@ class ModelDetails:
210
 
211
  class ModelType(Enum):
212
  PT = ModelDetails(name="pretrained", symbol="🟢")
213
- FT = ModelDetails(name="fine-tuned", symbol="🔶")
214
- IFT = ModelDetails(name="instruction-tuned", symbol="")
215
- RL = ModelDetails(name="RL-tuned", symbol="🟦")
216
  Unknown = ModelDetails(name="", symbol="?")
217
 
218
  def to_str(self, separator=" "):
@@ -224,10 +224,10 @@ class ModelType(Enum):
224
  return ModelType.FT
225
  if "pretrained" in type or "🟢" in type:
226
  return ModelType.PT
227
- if "RL-tuned" in type or "🟦" in type:
228
- return ModelType.RL
229
- if "instruction-tuned" in type or "" in type:
230
- return ModelType.IFT
231
  return ModelType.Unknown
232
 
233
  class WeightType(Enum):
 
210
 
211
  class ModelType(Enum):
212
  PT = ModelDetails(name="pretrained", symbol="🟢")
213
+ FT = ModelDetails(name="fine-tuned on domain-specific datasets", symbol="🔶")
214
+ chat = ModelDetails(name="chat models (RLHF, DPO, IFT, ...)", symbol="💬")
215
+ merges = ModelDetails(name="base merges and moerges", symbol="🤝")
216
  Unknown = ModelDetails(name="", symbol="?")
217
 
218
  def to_str(self, separator=" "):
 
224
  return ModelType.FT
225
  if "pretrained" in type or "🟢" in type:
226
  return ModelType.PT
227
+ if any([k in type for k in ["instruction-tuned", "RL-tuned", "chat", "🟦", "⭕", "💬"]]):
228
+ return ModelType.chat
229
+ if "merge" in type or "🤝" in type:
230
+ return ModelType.merges
231
  return ModelType.Unknown
232
 
233
  class WeightType(Enum):
src/leaderboard/filter_models.py CHANGED
@@ -43,6 +43,13 @@ FLAGGED_MODELS = {
43
  "dillfrescott/trinity-medium": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474",
44
  "udkai/Garrulus": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/526",
45
  "dfurman/GarrulusMarcoro-7B-v0.1": "https://huggingface.co/dfurman/GarrulusMarcoro-7B-v0.1/discussions/1",
 
 
 
 
 
 
 
46
  # Merges not indicated
47
  "gagan3012/MetaModelv2": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510",
48
  "gagan3012/MetaModelv3": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510",
@@ -126,6 +133,6 @@ def remove_forbidden_models(leaderboard_data: list[dict]):
126
  return leaderboard_data
127
 
128
 
129
- def filter_models(leaderboard_data: list[dict]):
130
  leaderboard_data = remove_forbidden_models(leaderboard_data)
131
  flag_models(leaderboard_data)
 
43
  "dillfrescott/trinity-medium": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474",
44
  "udkai/Garrulus": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/526",
45
  "dfurman/GarrulusMarcoro-7B-v0.1": "https://huggingface.co/dfurman/GarrulusMarcoro-7B-v0.1/discussions/1",
46
+ "udkai/Turdus": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/548",
47
+ "eren23/slerp-test-turdus-beagle": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/548",
48
+ "abideen/NexoNimbus-7B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/548",
49
+ "alnrg2arg/test2_3": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/548",
50
+ "nfaheem/Marcoroni-7b-DPO-Merge": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/548",
51
+ "CultriX/MergeTrix-7B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/548",
52
+ "liminerity/Blur-7b-v1.21": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/548",
53
  # Merges not indicated
54
  "gagan3012/MetaModelv2": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510",
55
  "gagan3012/MetaModelv3": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510",
 
133
  return leaderboard_data
134
 
135
 
136
+ def filter_models_flags(leaderboard_data: list[dict]):
137
  leaderboard_data = remove_forbidden_models(leaderboard_data)
138
  flag_models(leaderboard_data)
src/populate.py CHANGED
@@ -5,7 +5,7 @@ import pandas as pd
5
 
6
  from src.display.formatting import has_no_nan_values, make_requests_clickable_model
7
  from src.display.utils import AutoEvalColumn, EvalQueueColumn, baseline_row
8
- from src.leaderboard.filter_models import filter_models
9
  from src.leaderboard.read_evals import get_raw_eval_results
10
 
11
 
@@ -13,7 +13,7 @@ def get_leaderboard_df(results_path: str, requests_path: str, dynamic_path: str,
13
  raw_data = get_raw_eval_results(results_path=results_path, requests_path=requests_path, dynamic_path=dynamic_path)
14
  all_data_json = [v.to_dict() for v in raw_data]
15
  all_data_json.append(baseline_row)
16
- filter_models(all_data_json)
17
 
18
  df = pd.DataFrame.from_records(all_data_json)
19
  df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
 
5
 
6
  from src.display.formatting import has_no_nan_values, make_requests_clickable_model
7
  from src.display.utils import AutoEvalColumn, EvalQueueColumn, baseline_row
8
+ from src.leaderboard.filter_models import filter_models_flags
9
  from src.leaderboard.read_evals import get_raw_eval_results
10
 
11
 
 
13
  raw_data = get_raw_eval_results(results_path=results_path, requests_path=requests_path, dynamic_path=dynamic_path)
14
  all_data_json = [v.to_dict() for v in raw_data]
15
  all_data_json.append(baseline_row)
16
+ filter_models_flags(all_data_json)
17
 
18
  df = pd.DataFrame.from_records(all_data_json)
19
  df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
src/scripts/update_all_request_files.py CHANGED
@@ -3,7 +3,8 @@ from huggingface_hub import ModelCard
3
 
4
  import json
5
  import time
6
- from src.submission.check_validity import is_model_on_hub, check_model_card
 
7
  from src.envs import DYNAMIC_INFO_REPO, DYNAMIC_INFO_PATH, DYNAMIC_INFO_FILE_PATH, API, H4_TOKEN
8
 
9
  def update_models(file_path, models):
@@ -28,45 +29,20 @@ def update_models(file_path, models):
28
  #data['params'] = get_model_size(model_cfg, data['precision'])
29
  data['license'] = model_cfg.card_data.license if model_cfg.card_data is not None else ""
30
 
31
- # Is the model still on the hub
32
- still_on_hub, error, model_config = is_model_on_hub(
33
- model_name=model_id, revision=data.get("revision"), trust_remote_code=True, test_tokenizer=False, token=H4_TOKEN
 
 
 
34
  )
35
  data['still_on_hub'] = still_on_hub
36
 
37
  tags = []
38
 
39
  if still_on_hub:
40
- model = model_id
41
- modelcard_OK, error_msg = check_model_card(model)
42
- model_card = None
43
- if modelcard_OK:
44
- model_card = ModelCard.load(model)
45
-
46
- is_merge_from_metadata = False
47
- is_moe_from_metadata = False
48
- is_merge_from_model_card = False
49
- is_moe_from_model_card = False
50
-
51
- # Storing the model tags
52
- moe_keywords = ["moe", "mixture of experts", "mixtral"]
53
- if modelcard_OK:
54
- if model_card.data.tags:
55
- is_merge_from_metadata = "merge" in model_card.data.tags
56
- is_moe_from_metadata = "moe" in model_card.data.tags
57
- merge_keywords = ["mergekit", "merged model", "merge model", "merging"]
58
- # If the model is a merge but not saying it in the metadata, we flag it
59
- is_merge_from_model_card = any(keyword in model_card.text.lower() for keyword in merge_keywords)
60
- if is_merge_from_model_card or is_merge_from_metadata:
61
- tags.append("merge")
62
- if not is_merge_from_metadata:
63
- tags.append("flagged:undisclosed_merge")
64
- is_moe_from_model_card = any(keyword in model_card.text.lower() for keyword in moe_keywords)
65
- is_moe_from_name = "moe" in model.lower().replace("/", "-").replace("_", "-").split("-")
66
- if is_moe_from_model_card or is_moe_from_name or is_moe_from_metadata:
67
- tags.append("moe")
68
- if not is_moe_from_metadata:
69
- tags.append("flagged:undisclosed_moe")
70
 
71
  data["tags"] = tags
72
 
@@ -108,3 +84,4 @@ def update_dynamic_files():
108
  commit_message=f"Daily request file update.",
109
  )
110
  print(f"UPDATE_DYNAMIC: pushed to hub")
 
 
3
 
4
  import json
5
  import time
6
+
7
+ from src.submission.check_validity import is_model_on_hub, check_model_card, get_model_tags
8
  from src.envs import DYNAMIC_INFO_REPO, DYNAMIC_INFO_PATH, DYNAMIC_INFO_FILE_PATH, API, H4_TOKEN
9
 
10
  def update_models(file_path, models):
 
29
  #data['params'] = get_model_size(model_cfg, data['precision'])
30
  data['license'] = model_cfg.card_data.license if model_cfg.card_data is not None else ""
31
 
32
+ # Is the model still on the hub?
33
+ model_name = model_id
34
+ if model_cfg.card_data is not None and model_cfg.card_data.base_model is not None:
35
+ model_name = model_cfg.card_data.base_model # for adapters, we look at the parent model
36
+ still_on_hub, _, _ = is_model_on_hub(
37
+ model_name=model_name, revision=data.get("revision"), trust_remote_code=True, test_tokenizer=False, token=H4_TOKEN
38
  )
39
  data['still_on_hub'] = still_on_hub
40
 
41
  tags = []
42
 
43
  if still_on_hub:
44
+ status, _, model_card = check_model_card(model_id)
45
+ tags = get_model_tags(model_card, model_id)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
 
47
  data["tags"] = tags
48
 
 
84
  commit_message=f"Daily request file update.",
85
  )
86
  print(f"UPDATE_DYNAMIC: pushed to hub")
87
+
src/submission/check_validity.py CHANGED
@@ -19,7 +19,7 @@ def check_model_card(repo_id: str) -> tuple[bool, str]:
19
  try:
20
  card = ModelCard.load(repo_id)
21
  except huggingface_hub.utils.EntryNotFoundError:
22
- return False, "Please add a model card to your model to explain how you trained/fine-tuned it."
23
 
24
  # Enforce license metadata
25
  if card.data.license is None:
@@ -27,13 +27,13 @@ def check_model_card(repo_id: str) -> tuple[bool, str]:
27
  return False, (
28
  "License not found. Please add a license to your model card using the `license` metadata or a"
29
  " `license_name`/`license_link` pair."
30
- )
31
 
32
  # Enforce card content
33
  if len(card.text) < 200:
34
- return False, "Please add a description to your model card, it is too short."
35
 
36
- return True, ""
37
 
38
 
39
  def is_model_on_hub(model_name: str, revision: str, token: str = None, trust_remote_code=TRUST_REMOTE_CODE, test_tokenizer=False) -> tuple[bool, str, AutoConfig]:
@@ -133,3 +133,34 @@ def already_submitted_models(requested_models_dir: str) -> set[str]:
133
  users_to_submission_dates[organisation].append(info["submitted_time"])
134
 
135
  return set(file_names), users_to_submission_dates
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  try:
20
  card = ModelCard.load(repo_id)
21
  except huggingface_hub.utils.EntryNotFoundError:
22
+ return False, "Please add a model card to your model to explain how you trained/fine-tuned it.", None
23
 
24
  # Enforce license metadata
25
  if card.data.license is None:
 
27
  return False, (
28
  "License not found. Please add a license to your model card using the `license` metadata or a"
29
  " `license_name`/`license_link` pair."
30
+ ), None
31
 
32
  # Enforce card content
33
  if len(card.text) < 200:
34
+ return False, "Please add a description to your model card, it is too short.", None
35
 
36
+ return True, "", card
37
 
38
 
39
  def is_model_on_hub(model_name: str, revision: str, token: str = None, trust_remote_code=TRUST_REMOTE_CODE, test_tokenizer=False) -> tuple[bool, str, AutoConfig]:
 
133
  users_to_submission_dates[organisation].append(info["submitted_time"])
134
 
135
  return set(file_names), users_to_submission_dates
136
+
137
+ def get_model_tags(model_card, model: str):
138
+ is_merge_from_metadata = False
139
+ is_moe_from_metadata = False
140
+ is_merge_from_model_card = False
141
+ is_moe_from_model_card = False
142
+
143
+ # Storing the model tags
144
+ tags = []
145
+ moe_keywords = ["moe", "mixture of experts", "mixtral"]
146
+ if model_card is not None:
147
+ if model_card.data.tags:
148
+ is_merge_from_metadata = "merge" in model_card.data.tags
149
+ is_moe_from_metadata = "moe" in model_card.data.tags
150
+ merge_keywords = ["merged model", "merge model"]
151
+ # If the model is a merge but not saying it in the metadata, we flag it
152
+ is_merge_from_model_card = any(keyword in model_card.text.lower() for keyword in merge_keywords)
153
+ if is_merge_from_model_card or is_merge_from_metadata:
154
+ tags.append("merge")
155
+ if not is_merge_from_metadata:
156
+ tags.append("flagged:undisclosed_merge")
157
+ moe_keywords = ["moe", "mixtral"]
158
+ is_moe_from_model_card = any(keyword in model_card.text.lower() for keyword in moe_keywords)
159
+ is_moe_from_name = "moe" in model.lower().replace("/", "-").replace("_", "-").split("-")
160
+ if is_moe_from_model_card or is_moe_from_name or is_moe_from_metadata:
161
+ tags.append("moe")
162
+ # We no longer tag undisclosed MoEs
163
+ #if not is_moe_from_metadata:
164
+ # tags.append("flagged:undisclosed_moe")
165
+
166
+ return tags
src/submission/submit.py CHANGED
@@ -13,6 +13,7 @@ from src.submission.check_validity import (
13
  get_model_size,
14
  is_model_on_hub,
15
  user_submission_permission,
 
16
  )
17
 
18
  REQUESTED_MODELS = None
@@ -97,37 +98,9 @@ def add_new_eval(
97
  license = None
98
  #return styled_error("Please select a license for your model")
99
 
100
- modelcard_OK, error_msg = check_model_card(model)
101
- model_card = None
102
- if modelcard_OK:
103
- model_card = ModelCard.load(model)
104
 
105
- is_merge_from_metadata = False
106
- is_moe_from_metadata = False
107
- is_merge_from_model_card = False
108
- is_moe_from_model_card = False
109
-
110
- # Storing the model tags
111
- tags = []
112
- moe_keywords = ["moe", "mixture of experts", "mixtral"]
113
- if modelcard_OK:
114
- if model_card.data.tags:
115
- is_merge_from_metadata = "merge" in model_card.data.tags
116
- is_moe_from_metadata = "moe" in model_card.data.tags
117
- merge_keywords = ["mergekit", "merged model", "merge model", "merging"]
118
- # If the model is a merge but not saying it in the metadata, we flag it
119
- is_merge_from_model_card = any(keyword in model_card.text.lower() for keyword in merge_keywords)
120
- if is_merge_from_model_card or is_merge_from_metadata:
121
- tags.append("merge")
122
- if not is_merge_from_metadata:
123
- tags.append("flagged:undisclosed_merge")
124
- is_moe_from_model_card = any(keyword in model_card.text.lower() for keyword in moe_keywords)
125
- is_moe_from_name = "moe" in model.lower().replace("/", "-").replace("_", "-").split("-")
126
- if is_moe_from_model_card or is_moe_from_name or is_moe_from_metadata:
127
- tags.append("moe")
128
- if not is_moe_from_metadata:
129
- tags.append("flagged:undisclosed_moe")
130
-
131
 
132
  # Seems good, creating the eval
133
  print("Adding new eval")
 
13
  get_model_size,
14
  is_model_on_hub,
15
  user_submission_permission,
16
+ get_model_tags
17
  )
18
 
19
  REQUESTED_MODELS = None
 
98
  license = None
99
  #return styled_error("Please select a license for your model")
100
 
101
+ modelcard_OK, error_msg, model_card = check_model_card(model)
 
 
 
102
 
103
+ tags = get_model_tags(model_card, model)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
 
105
  # Seems good, creating the eval
106
  print("Adding new eval")
update_dynamic.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from src.scripts.update_all_request_files import update_dynamic_files
2
+
3
+ if __name__ == "__main__":
4
+ update_dynamic_files()