Sean Cho commited on
Commit
6cdd0ad
1 Parent(s): adf26ec

Apply snapshot download

Browse files
app.py CHANGED
@@ -7,7 +7,7 @@ from distutils.util import strtobool
7
  import gradio as gr
8
  import pandas as pd
9
  from apscheduler.schedulers.background import BackgroundScheduler
10
- from huggingface_hub import HfApi
11
 
12
  from src.assets.css_html_js import custom_css, get_window_url_params
13
  from src.assets.text_content import (
@@ -28,7 +28,7 @@ from src.display_models.utils import (
28
  styled_message,
29
  styled_warning,
30
  )
31
- from src.load_from_hub import get_evaluation_queue_df, get_leaderboard_df, is_model_on_hub, load_all_info_from_hub
32
  from src.rate_limiting import user_submission_permission
33
 
34
  pd.set_option("display.precision", 1)
@@ -86,22 +86,12 @@ BENCHMARK_COLS = [
86
  ]
87
  ]
88
 
89
- ## LOAD INFO FROM HUB
90
- eval_queue, requested_models, eval_results, users_to_submission_dates = load_all_info_from_hub(
91
- QUEUE_REPO, RESULTS_REPO, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH
92
- )
93
-
94
- if not IS_PUBLIC:
95
- (eval_queue_private, requested_models_private, eval_results_private, _) = load_all_info_from_hub(
96
- PRIVATE_QUEUE_REPO,
97
- PRIVATE_RESULTS_REPO,
98
- EVAL_REQUESTS_PATH_PRIVATE,
99
- EVAL_RESULTS_PATH_PRIVATE,
100
- )
101
- else:
102
- eval_queue_private, eval_results_private = None, None
103
 
104
- original_df = get_leaderboard_df(eval_results, eval_results_private, COLS, BENCHMARK_COLS)
 
105
  models = original_df["model_name_for_query"].tolist() # needed for model backlinks in their to the leaderboard
106
 
107
  # Commented out because it causes infinite restart loops in local
@@ -112,13 +102,12 @@ models = original_df["model_name_for_query"].tolist() # needed for model backlin
112
 
113
  # print(to_be_dumped)
114
 
115
- leaderboard_df = original_df.copy()
116
  (
117
  finished_eval_queue_df,
118
  running_eval_queue_df,
119
  pending_eval_queue_df,
120
  failed_eval_queue_df,
121
- ) = get_evaluation_queue_df(eval_queue, eval_queue_private, EVAL_REQUESTS_PATH, EVAL_COLS)
122
 
123
  ## INTERACTION FUNCTIONS
124
  def add_new_eval(
@@ -157,6 +146,27 @@ def add_new_eval(
157
  model_on_hub, error = is_model_on_hub(model, revision)
158
  if not model_on_hub:
159
  return styled_error(f'Model "{model}" {error}')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160
 
161
  print("adding new eval")
162
 
 
7
  import gradio as gr
8
  import pandas as pd
9
  from apscheduler.schedulers.background import BackgroundScheduler
10
+ from huggingface_hub import HfApi, snapshot_download
11
 
12
  from src.assets.css_html_js import custom_css, get_window_url_params
13
  from src.assets.text_content import (
 
28
  styled_message,
29
  styled_warning,
30
  )
31
+ from src.load_from_hub import get_all_requested_models, get_evaluation_queue_df, get_leaderboard_df, is_model_on_hub
32
  from src.rate_limiting import user_submission_permission
33
 
34
  pd.set_option("display.precision", 1)
 
86
  ]
87
  ]
88
 
89
+ snapshot_download(repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None)
90
+ snapshot_download(repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None)
91
+ requested_models, users_to_submission_dates = get_all_requested_models(EVAL_REQUESTS_PATH)
 
 
 
 
 
 
 
 
 
 
 
92
 
93
+ original_df = get_leaderboard_df(EVAL_RESULTS_PATH, COLS, BENCHMARK_COLS)
94
+ leaderboard_df = original_df.copy()
95
  models = original_df["model_name_for_query"].tolist() # needed for model backlinks in their to the leaderboard
96
 
97
  # Commented out because it causes infinite restart loops in local
 
102
 
103
  # print(to_be_dumped)
104
 
 
105
  (
106
  finished_eval_queue_df,
107
  running_eval_queue_df,
108
  pending_eval_queue_df,
109
  failed_eval_queue_df,
110
+ ) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
111
 
112
  ## INTERACTION FUNCTIONS
113
  def add_new_eval(
 
146
  model_on_hub, error = is_model_on_hub(model, revision)
147
  if not model_on_hub:
148
  return styled_error(f'Model "{model}" {error}')
149
+
150
+ model_info = api.model_info(repo_id=model, revision=revision)
151
+
152
+ size_pattern = re.compile(r"(\d+\.)?\d+(b|m)")
153
+ try:
154
+ model_size = round(model_info.safetensors["total"] / 1e9, 3)
155
+ except AttributeError:
156
+ try:
157
+ size_match = re.search(size_pattern, model.lower())
158
+ model_size = size_match.group(0)
159
+ model_size = round(float(model_size[:-1]) if model_size[-1] == "b" else float(model_size[:-1]) / 1e3, 3)
160
+ except AttributeError:
161
+ return 65
162
+
163
+ size_factor = 8 if (precision == "GPTQ" or "GPTQ" in model) else 1
164
+ model_size = size_factor * model_size
165
+
166
+ try:
167
+ license = model_info.cardData["license"]
168
+ except Exception:
169
+ license = "?"
170
 
171
  print("adding new eval")
172
 
model_info_cache.pkl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:df89e55d44e9203e4902e734bdf72fd21315a9e2e9dbee43fffb04c199019f59
3
- size 595504
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:337f1fb80e92327e7c7b130c03617439f7923e3f7c5383f5abb07e017ef9cae3
3
+ size 715983
model_size_cache.pkl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:df80f59fb639788c197e0b7ca13de55c24cc2ae54ca9c67f54ef730515f7032b
3
- size 16828
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64d63b51e6f5d6dd985b44ef6ddf513d9a7a138e734d77ae7382fd7a49a137ea
3
+ size 20652
src/display_models/read_results.py CHANGED
@@ -113,10 +113,10 @@ def parse_eval_result(json_filepath: str) -> Tuple[str, list[dict]]:
113
  return result_key, eval_results
114
 
115
 
116
- def get_eval_results() -> List[EvalResult]:
117
  json_filepaths = []
118
 
119
- for root, dir, files in os.walk("eval-results" + ("-private" if not IS_PUBLIC else "")):
120
  # We should only have json files in model results
121
  if len(files) == 0 or any([not f.endswith(".json") for f in files]):
122
  continue
@@ -146,7 +146,7 @@ def get_eval_results() -> List[EvalResult]:
146
  return eval_results
147
 
148
 
149
- def get_eval_results_dicts() -> List[Dict]:
150
- eval_results = get_eval_results()
151
 
152
  return [e.to_dict() for e in eval_results]
 
113
  return result_key, eval_results
114
 
115
 
116
+ def get_eval_results(results_path: str) -> List[EvalResult]:
117
  json_filepaths = []
118
 
119
+ for root, dir, files in os.walk(results_path + ("-private" if not IS_PUBLIC else "")):
120
  # We should only have json files in model results
121
  if len(files) == 0 or any([not f.endswith(".json") for f in files]):
122
  continue
 
146
  return eval_results
147
 
148
 
149
+ def get_eval_results_dicts(results_path: str) -> List[Dict]:
150
+ eval_results = get_eval_results(results_path)
151
 
152
  return [e.to_dict() for e in eval_results]
src/load_from_hub.py CHANGED
@@ -1,10 +1,9 @@
1
  import json
2
  import os
 
3
 
4
  import pandas as pd
5
- from huggingface_hub import Repository
6
  from transformers import AutoConfig
7
- from collections import defaultdict
8
 
9
  from src.assets.hardcoded_evals import baseline
10
  from src.display_models.get_model_metadata import apply_metadata
@@ -35,43 +34,8 @@ def get_all_requested_models(requested_models_dir: str) -> set[str]:
35
  return set(file_names), users_to_submission_dates
36
 
37
 
38
- def load_all_info_from_hub(QUEUE_REPO: str, RESULTS_REPO: str, QUEUE_PATH: str, RESULTS_PATH: str) -> list[Repository]:
39
- eval_queue_repo = None
40
- eval_results_repo = None
41
- requested_models = None
42
-
43
- print("Pulling evaluation requests and results.")
44
-
45
- eval_queue_repo = Repository(
46
- local_dir=QUEUE_PATH,
47
- clone_from=QUEUE_REPO,
48
- repo_type="dataset",
49
- )
50
- eval_queue_repo.git_pull()
51
-
52
- eval_results_repo = Repository(
53
- local_dir=RESULTS_PATH,
54
- clone_from=RESULTS_REPO,
55
- repo_type="dataset",
56
- )
57
- eval_results_repo.git_pull()
58
-
59
- requested_models, users_to_submission_dates = get_all_requested_models("eval-queue")
60
-
61
- return eval_queue_repo, requested_models, eval_results_repo, users_to_submission_dates
62
-
63
-
64
- def get_leaderboard_df(
65
- eval_results: Repository, eval_results_private: Repository, cols: list, benchmark_cols: list
66
- ) -> pd.DataFrame:
67
- if eval_results:
68
- print("Pulling evaluation results for the leaderboard.")
69
- eval_results.git_pull()
70
- if eval_results_private:
71
- print("Pulling evaluation results for the leaderboard.")
72
- eval_results_private.git_pull()
73
-
74
- all_data = get_eval_results_dicts()
75
 
76
  # all_data.append(baseline)
77
  apply_metadata(all_data) # Populate model type based on known hardcoded values in `metadata.py`
@@ -85,15 +49,7 @@ def get_leaderboard_df(
85
  return df
86
 
87
 
88
- def get_evaluation_queue_df(
89
- eval_queue: Repository, eval_queue_private: Repository, save_path: str, cols: list
90
- ) -> list[pd.DataFrame]:
91
- if eval_queue:
92
- print("Pulling changes for the evaluation queue.")
93
- eval_queue.git_pull()
94
- if eval_queue_private:
95
- print("Pulling changes for the evaluation queue.")
96
- eval_queue_private.git_pull()
97
 
98
  entries = [entry for entry in os.listdir(save_path) if not entry.startswith(".")]
99
  all_evals = []
@@ -143,6 +99,5 @@ def is_model_on_hub(model_name: str, revision: str) -> bool:
143
  "needs to be launched with `trust_remote_code=True`. For safety reason, we do not allow these models to be automatically submitted to the leaderboard.",
144
  )
145
 
146
- except Exception as e:
147
- print(f"Could not get the model config from the hub.: {e}")
148
  return False, "was not found on hub!"
 
1
  import json
2
  import os
3
+ from collections import defaultdict
4
 
5
  import pandas as pd
 
6
  from transformers import AutoConfig
 
7
 
8
  from src.assets.hardcoded_evals import baseline
9
  from src.display_models.get_model_metadata import apply_metadata
 
34
  return set(file_names), users_to_submission_dates
35
 
36
 
37
+ def get_leaderboard_df(results_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
38
+ all_data = get_eval_results_dicts(results_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
  # all_data.append(baseline)
41
  apply_metadata(all_data) # Populate model type based on known hardcoded values in `metadata.py`
 
49
  return df
50
 
51
 
52
+ def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
 
 
 
 
 
 
 
 
53
 
54
  entries = [entry for entry in os.listdir(save_path) if not entry.startswith(".")]
55
  all_evals = []
 
99
  "needs to be launched with `trust_remote_code=True`. For safety reason, we do not allow these models to be automatically submitted to the leaderboard.",
100
  )
101
 
102
+ except Exception:
 
103
  return False, "was not found on hub!"