djstrong commited on
Commit
331e613
1 Parent(s): bddad3e

disable unused code

Browse files
Files changed (2) hide show
  1. app.py +1 -1
  2. main_backend.py +56 -55
app.py CHANGED
@@ -34,7 +34,7 @@ from src.populate import get_evaluation_queue_df, get_leaderboard_df
34
  from src.submission.submit import add_new_eval
35
 
36
 
37
- subprocess.run(["python", "scripts/fix_harness_import.py"])
38
 
39
  def restart_space():
40
  API.restart_space(repo_id=REPO_ID)
 
34
  from src.submission.submit import add_new_eval
35
 
36
 
37
+ # subprocess.run(["python", "scripts/fix_harness_import.py"])
38
 
39
  def restart_space():
40
  API.restart_space(repo_id=REPO_ID)
main_backend.py CHANGED
@@ -5,9 +5,9 @@ from huggingface_hub import snapshot_download
5
 
6
  logging.getLogger("openai").setLevel(logging.WARNING)
7
 
8
- from src.backend.run_eval_suite import run_evaluation
9
- from src.backend.manage_requests import check_completed_evals, get_eval_requests, set_eval_request
10
- from src.backend.sort_queue import sort_models_by_priority
11
 
12
  from src.envs import QUEUE_REPO, EVAL_REQUESTS_PATH_BACKEND, RESULTS_REPO, EVAL_RESULTS_PATH_BACKEND, DEVICE, API, LIMIT, TOKEN
13
  from src.about import Tasks, NUM_FEWSHOT
@@ -21,58 +21,59 @@ RUNNING_STATUS = "RUNNING"
21
  FINISHED_STATUS = "FINISHED"
22
  FAILED_STATUS = "FAILED"
23
 
 
24
  snapshot_download(repo_id=RESULTS_REPO, revision="main", local_dir=EVAL_RESULTS_PATH_BACKEND, repo_type="dataset", max_workers=60, token=TOKEN)
25
  snapshot_download(repo_id=QUEUE_REPO, revision="main", local_dir=EVAL_REQUESTS_PATH_BACKEND, repo_type="dataset", max_workers=60, token=TOKEN)
26
 
27
- def run_auto_eval():
28
- current_pending_status = [PENDING_STATUS]
29
-
30
- # pull the eval dataset from the hub and parse any eval requests
31
- # check completed evals and set them to finished
32
- check_completed_evals(
33
- api=API,
34
- checked_status=RUNNING_STATUS,
35
- completed_status=FINISHED_STATUS,
36
- failed_status=FAILED_STATUS,
37
- hf_repo=QUEUE_REPO,
38
- local_dir=EVAL_REQUESTS_PATH_BACKEND,
39
- hf_repo_results=RESULTS_REPO,
40
- local_dir_results=EVAL_RESULTS_PATH_BACKEND
41
- )
42
-
43
- # Get all eval request that are PENDING, if you want to run other evals, change this parameter
44
- eval_requests = get_eval_requests(job_status=current_pending_status, hf_repo=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH_BACKEND)
45
- # Sort the evals by priority (first submitted first run)
46
- eval_requests = sort_models_by_priority(api=API, models=eval_requests)
47
-
48
- print(f"Found {len(eval_requests)} {','.join(current_pending_status)} eval requests")
49
-
50
- if len(eval_requests) == 0:
51
- return
52
-
53
- eval_request = eval_requests[0]
54
- pp.pprint(eval_request)
55
-
56
- set_eval_request(
57
- api=API,
58
- eval_request=eval_request,
59
- set_to_status=RUNNING_STATUS,
60
- hf_repo=QUEUE_REPO,
61
- local_dir=EVAL_REQUESTS_PATH_BACKEND,
62
- )
63
-
64
- run_evaluation(
65
- eval_request=eval_request,
66
- task_names=TASKS_HARNESS,
67
- num_fewshot=NUM_FEWSHOT,
68
- local_dir=EVAL_RESULTS_PATH_BACKEND,
69
- results_repo=RESULTS_REPO,
70
- batch_size=1,
71
- device=DEVICE,
72
- no_cache=True,
73
- limit=LIMIT
74
- )
75
-
76
-
77
- if __name__ == "__main__":
78
- run_auto_eval()
 
5
 
6
  logging.getLogger("openai").setLevel(logging.WARNING)
7
 
8
+ # from src.backend.run_eval_suite import run_evaluation
9
+ # from src.backend.manage_requests import check_completed_evals, get_eval_requests, set_eval_request
10
+ # from src.backend.sort_queue import sort_models_by_priority
11
 
12
  from src.envs import QUEUE_REPO, EVAL_REQUESTS_PATH_BACKEND, RESULTS_REPO, EVAL_RESULTS_PATH_BACKEND, DEVICE, API, LIMIT, TOKEN
13
  from src.about import Tasks, NUM_FEWSHOT
 
21
  FINISHED_STATUS = "FINISHED"
22
  FAILED_STATUS = "FAILED"
23
 
24
+ print('Downloading results and requests.')
25
  snapshot_download(repo_id=RESULTS_REPO, revision="main", local_dir=EVAL_RESULTS_PATH_BACKEND, repo_type="dataset", max_workers=60, token=TOKEN)
26
  snapshot_download(repo_id=QUEUE_REPO, revision="main", local_dir=EVAL_REQUESTS_PATH_BACKEND, repo_type="dataset", max_workers=60, token=TOKEN)
27
 
28
+ # def run_auto_eval():
29
+ # current_pending_status = [PENDING_STATUS]
30
+ #
31
+ # # pull the eval dataset from the hub and parse any eval requests
32
+ # # check completed evals and set them to finished
33
+ # check_completed_evals(
34
+ # api=API,
35
+ # checked_status=RUNNING_STATUS,
36
+ # completed_status=FINISHED_STATUS,
37
+ # failed_status=FAILED_STATUS,
38
+ # hf_repo=QUEUE_REPO,
39
+ # local_dir=EVAL_REQUESTS_PATH_BACKEND,
40
+ # hf_repo_results=RESULTS_REPO,
41
+ # local_dir_results=EVAL_RESULTS_PATH_BACKEND
42
+ # )
43
+ #
44
+ # # Get all eval request that are PENDING, if you want to run other evals, change this parameter
45
+ # eval_requests = get_eval_requests(job_status=current_pending_status, hf_repo=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH_BACKEND)
46
+ # # Sort the evals by priority (first submitted first run)
47
+ # eval_requests = sort_models_by_priority(api=API, models=eval_requests)
48
+ #
49
+ # print(f"Found {len(eval_requests)} {','.join(current_pending_status)} eval requests")
50
+ #
51
+ # if len(eval_requests) == 0:
52
+ # return
53
+ #
54
+ # eval_request = eval_requests[0]
55
+ # pp.pprint(eval_request)
56
+ #
57
+ # set_eval_request(
58
+ # api=API,
59
+ # eval_request=eval_request,
60
+ # set_to_status=RUNNING_STATUS,
61
+ # hf_repo=QUEUE_REPO,
62
+ # local_dir=EVAL_REQUESTS_PATH_BACKEND,
63
+ # )
64
+ #
65
+ # run_evaluation(
66
+ # eval_request=eval_request,
67
+ # task_names=TASKS_HARNESS,
68
+ # num_fewshot=NUM_FEWSHOT,
69
+ # local_dir=EVAL_RESULTS_PATH_BACKEND,
70
+ # results_repo=RESULTS_REPO,
71
+ # batch_size=1,
72
+ # device=DEVICE,
73
+ # no_cache=True,
74
+ # limit=LIMIT
75
+ # )
76
+ #
77
+ #
78
+ # if __name__ == "__main__":
79
+ # run_auto_eval()