XufengDuan commited on
Commit
4d9df48
1 Parent(s): caa4425

updated scripts

Browse files
Files changed (1) hide show
  1. app.py +26 -1
app.py CHANGED
@@ -12,9 +12,32 @@ import src.submission.submit as submit
12
  import os
13
  TOKEN = os.environ.get("H4_TOKEN", None)
14
  print("TOKEN", TOKEN)
 
 
 
 
 
 
 
 
15
  def restart_space():
16
  envs.API.restart_space(repo_id=envs.REPO_ID, token=TOKEN)
17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  # try:
19
  # print(envs.EVAL_REQUESTS_PATH)
20
  # snapshot_download(
@@ -30,7 +53,9 @@ def restart_space():
30
  # except Exception:
31
  # restart_space()
32
 
33
- raw_data, original_df = populate.get_leaderboard_df(envs.RESULTS_REPO, envs.QUEUE_REPO, utils.COLS, utils.BENCHMARK_COLS)
 
 
34
  leaderboard_df = original_df.copy()
35
 
36
  (
 
12
  import os
13
  TOKEN = os.environ.get("H4_TOKEN", None)
14
  print("TOKEN", TOKEN)
15
+
16
+ def ui_snapshot_download(repo_id, local_dir, repo_type, tqdm_class, etag_timeout):
17
+ try:
18
+ print(local_dir)
19
+ snapshot_download(repo_id=repo_id, local_dir=local_dir, repo_type=repo_type, tqdm_class=tqdm_class, etag_timeout=etag_timeout)
20
+ except Exception as e:
21
+ restart_space()
22
+
23
  def restart_space():
24
  envs.API.restart_space(repo_id=envs.REPO_ID, token=TOKEN)
25
 
26
+ def init_space():
27
+ dataset_df = get_dataset_summary_table(file_path='blog/Hallucination-Leaderboard-Summary.csv')
28
+
29
+ if socket.gethostname() not in {'neuromancer'}:
30
+ # sync model_type with open-llm-leaderboard
31
+ ui_snapshot_download(repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30)
32
+ ui_snapshot_download(repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30)
33
+ raw_data, original_df = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, "", COLS, BENCHMARK_COLS)
34
+
35
+ finished_eval_queue_df, running_eval_queue_df, pending_eval_queue_df = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
36
+ return dataset_df, original_df, finished_eval_queue_df, running_eval_queue_df, pending_eval_queue_df
37
+
38
+ dataset_df, original_df, finished_eval_queue_df, running_eval_queue_df, pending_eval_queue_df = init_space()
39
+
40
+
41
  # try:
42
  # print(envs.EVAL_REQUESTS_PATH)
43
  # snapshot_download(
 
53
  # except Exception:
54
  # restart_space()
55
 
56
+ # raw_data, original_df = populate.get_leaderboard_df(envs.RESULTS_REPO, envs.QUEUE_REPO, utils.COLS, utils.BENCHMARK_COLS)
57
+
58
+
59
  leaderboard_df = original_df.copy()
60
 
61
  (