Clémentine commited on
Commit
a40c960
1 Parent(s): 699e8ff

removed need for tokens in the leaderboard + removed skull in flagged models

Browse files
src/auto_leaderboard/get_model_metadata.py CHANGED
@@ -100,13 +100,10 @@ def get_model_type(leaderboard_data: List[dict]):
100
  model_data[AutoEvalColumn.model_type_symbol.name] = ModelType.Unknown.value.symbol
101
 
102
  def flag_models(leaderboard_data:List[dict]):
103
- flag_symbol = "💀"
104
  for model_data in leaderboard_data:
105
  if model_data["model_name_for_query"] in FLAGGED_MODELS:
106
  issue_num = FLAGGED_MODELS[model_data["model_name_for_query"]].split("/")[-1]
107
  issue_link = model_hyperlink(FLAGGED_MODELS[model_data["model_name_for_query"]], f"See discussion #{issue_num}")
108
-
109
- model_data[AutoEvalColumn.model_type_symbol.name] = flag_symbol
110
  model_data[AutoEvalColumn.model.name] = f"{model_data[AutoEvalColumn.model.name]} has been flagged! {issue_link}"
111
 
112
  def apply_metadata(leaderboard_data: List[dict]):
 
100
  model_data[AutoEvalColumn.model_type_symbol.name] = ModelType.Unknown.value.symbol
101
 
102
  def flag_models(leaderboard_data:List[dict]):
 
103
  for model_data in leaderboard_data:
104
  if model_data["model_name_for_query"] in FLAGGED_MODELS:
105
  issue_num = FLAGGED_MODELS[model_data["model_name_for_query"]].split("/")[-1]
106
  issue_link = model_hyperlink(FLAGGED_MODELS[model_data["model_name_for_query"]], f"See discussion #{issue_num}")
 
 
107
  model_data[AutoEvalColumn.model.name] = f"{model_data[AutoEvalColumn.model.name]} has been flagged! {issue_link}"
108
 
109
  def apply_metadata(leaderboard_data: List[dict]):
src/init.py CHANGED
@@ -1,8 +1,6 @@
1
  import os
2
  from huggingface_hub import Repository
3
 
4
- H4_TOKEN = os.environ.get("H4_TOKEN", None)
5
-
6
 
7
  def get_all_requested_models(requested_models_dir):
8
  depth = 1
@@ -20,28 +18,23 @@ def load_all_info_from_hub(QUEUE_REPO, RESULTS_REPO, QUEUE_PATH, RESULTS_PATH):
20
  eval_results_repo = None
21
  requested_models = None
22
 
23
- if H4_TOKEN:
24
- print("Pulling evaluation requests and results.")
25
-
26
- eval_queue_repo = Repository(
27
- local_dir=QUEUE_PATH,
28
- clone_from=QUEUE_REPO,
29
- use_auth_token=H4_TOKEN,
30
- repo_type="dataset",
31
- )
32
- eval_queue_repo.git_pull()
33
-
34
- eval_results_repo = Repository(
35
- local_dir=RESULTS_PATH,
36
- clone_from=RESULTS_REPO,
37
- use_auth_token=H4_TOKEN,
38
- repo_type="dataset",
39
- )
40
- eval_results_repo.git_pull()
41
-
42
- requested_models = get_all_requested_models("eval-queue")
43
- else:
44
- print("No HuggingFace token provided. Skipping evaluation requests and results.")
45
 
46
  return eval_queue_repo, requested_models, eval_results_repo
47
 
 
1
  import os
2
  from huggingface_hub import Repository
3
 
 
 
4
 
5
  def get_all_requested_models(requested_models_dir):
6
  depth = 1
 
18
  eval_results_repo = None
19
  requested_models = None
20
 
21
+ print("Pulling evaluation requests and results.")
22
+
23
+ eval_queue_repo = Repository(
24
+ local_dir=QUEUE_PATH,
25
+ clone_from=QUEUE_REPO,
26
+ repo_type="dataset",
27
+ )
28
+ eval_queue_repo.git_pull()
29
+
30
+ eval_results_repo = Repository(
31
+ local_dir=RESULTS_PATH,
32
+ clone_from=RESULTS_REPO,
33
+ repo_type="dataset",
34
+ )
35
+ eval_results_repo.git_pull()
36
+
37
+ requested_models = get_all_requested_models("eval-queue")
 
 
 
 
 
38
 
39
  return eval_queue_repo, requested_models, eval_results_repo
40