Nathan Habib commited on
Commit
df0b79f
1 Parent(s): d1e81be
app.py CHANGED
@@ -50,21 +50,21 @@ def init_space(full_init: bool = True):
50
  try:
51
  print(EVAL_REQUESTS_PATH)
52
  snapshot_download(
53
- repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30
54
  )
55
  except Exception:
56
  restart_space()
57
  try:
58
  print(DYNAMIC_INFO_PATH)
59
  snapshot_download(
60
- repo_id=DYNAMIC_INFO_REPO, local_dir=DYNAMIC_INFO_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30
61
  )
62
  except Exception:
63
  restart_space()
64
  try:
65
  print(EVAL_RESULTS_PATH)
66
  snapshot_download(
67
- repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30
68
  )
69
  except Exception:
70
  restart_space()
 
50
  try:
51
  print(EVAL_REQUESTS_PATH)
52
  snapshot_download(
53
+ repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, max_workers=8
54
  )
55
  except Exception:
56
  restart_space()
57
  try:
58
  print(DYNAMIC_INFO_PATH)
59
  snapshot_download(
60
+ repo_id=DYNAMIC_INFO_REPO, local_dir=DYNAMIC_INFO_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, max_workers=8
61
  )
62
  except Exception:
63
  restart_space()
64
  try:
65
  print(EVAL_RESULTS_PATH)
66
  snapshot_download(
67
+ repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, max_workers=8
68
  )
69
  except Exception:
70
  restart_space()
src/leaderboard/read_evals.py CHANGED
@@ -202,6 +202,8 @@ def get_raw_eval_results(results_path: str, requests_path: str, dynamic_path: st
202
  # Creation of result
203
  eval_result = EvalResult.init_from_json_file(model_result_filepath)
204
  eval_result.update_with_request_file(requests_path)
 
 
205
  if eval_result.full_model in dynamic_data:
206
  eval_result.update_with_dynamic_file_dict(dynamic_data[eval_result.full_model])
207
  # Hardcoding because of gating problem
 
202
  # Creation of result
203
  eval_result = EvalResult.init_from_json_file(model_result_filepath)
204
  eval_result.update_with_request_file(requests_path)
205
+ if eval_result.full_model == "databricks/dbrx-base":
206
+ print("WE HERE")
207
  if eval_result.full_model in dynamic_data:
208
  eval_result.update_with_dynamic_file_dict(dynamic_data[eval_result.full_model])
209
  # Hardcoding because of gating problem
src/populate.py CHANGED
@@ -13,9 +13,12 @@ def get_leaderboard_df(results_path: str, requests_path: str, dynamic_path: str,
13
  raw_data = get_raw_eval_results(results_path=results_path, requests_path=requests_path, dynamic_path=dynamic_path)
14
  all_data_json = [v.to_dict() for v in raw_data]
15
  all_data_json.append(baseline_row)
 
16
  filter_models_flags(all_data_json)
17
 
18
  df = pd.DataFrame.from_records(all_data_json)
 
 
19
  df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
20
  df = df[cols].round(decimals=2)
21
 
@@ -44,7 +47,11 @@ def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
44
  for sub_entry in sub_entries:
45
  file_path = os.path.join(save_path, entry, sub_entry)
46
  with open(file_path) as fp:
47
- data = json.load(fp)
 
 
 
 
48
 
49
  data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
50
  data[EvalQueueColumn.revision.name] = data.get("revision", "main")
 
13
  raw_data = get_raw_eval_results(results_path=results_path, requests_path=requests_path, dynamic_path=dynamic_path)
14
  all_data_json = [v.to_dict() for v in raw_data]
15
  all_data_json.append(baseline_row)
16
+ print([data for data in all_data_json if data["model_name_for_query"] == "databricks/dbrx-base"])
17
  filter_models_flags(all_data_json)
18
 
19
  df = pd.DataFrame.from_records(all_data_json)
20
+ print(df.columns)
21
+ print(df[df["model_name_for_query"] == "databricks/dbrx-base"])
22
  df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
23
  df = df[cols].round(decimals=2)
24
 
 
47
  for sub_entry in sub_entries:
48
  file_path = os.path.join(save_path, entry, sub_entry)
49
  with open(file_path) as fp:
50
+ try:
51
+ data = json.load(fp)
52
+ except json.JSONDecodeError:
53
+ print(f"Error reading {file_path}")
54
+ continue
55
 
56
  data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
57
  data[EvalQueueColumn.revision.name] = data.get("revision", "main")
src/submission/check_validity.py CHANGED
@@ -150,6 +150,9 @@ def get_model_tags(model_card, model: str):
150
  if is_merge_from_model_card or is_merge_from_metadata:
151
  tags.append("merge")
152
  is_moe_from_model_card = any(keyword in model_card.text.lower() for keyword in ["moe", "mixtral"])
 
 
 
153
  is_moe_from_name = "moe" in model.lower().replace("/", "-").replace("_", "-").split("-")
154
  if is_moe_from_model_card or is_moe_from_name or is_moe_from_metadata:
155
  tags.append("moe")
 
150
  if is_merge_from_model_card or is_merge_from_metadata:
151
  tags.append("merge")
152
  is_moe_from_model_card = any(keyword in model_card.text.lower() for keyword in ["moe", "mixtral"])
153
+ # Hardcoding because of gating problem
154
+ if model == "Qwen/Qwen1.5-32B":
155
+ is_moe_from_model_card = False
156
  is_moe_from_name = "moe" in model.lower().replace("/", "-").replace("_", "-").split("-")
157
  if is_moe_from_model_card or is_moe_from_name or is_moe_from_metadata:
158
  tags.append("moe")