pminervini commited on
Commit
4c2b065
1 Parent(s): b1a5839
Files changed (2) hide show
  1. completed-cli.py +80 -0
  2. src/backend/envs.py +2 -2
completed-cli.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ from huggingface_hub import snapshot_download
4
+
5
+ from src.backend.manage_requests import get_eval_requests
6
+ from src.backend.sort_queue import sort_models_by_priority
7
+ from src.backend.envs import Tasks, EVAL_REQUESTS_PATH_BACKEND, EVAL_RESULTS_PATH_BACKEND
8
+
9
+ from src.backend.manage_requests import EvalRequest
10
+ from src.leaderboard.read_evals import EvalResult
11
+
12
+ from src.envs import QUEUE_REPO, RESULTS_REPO, API
13
+
14
+ import logging
15
+ import pprint
16
+
17
+ logging.getLogger("openai").setLevel(logging.WARNING)
18
+
19
+ logging.basicConfig(level=logging.ERROR)
20
+ pp = pprint.PrettyPrinter(width=80)
21
+
22
+ PENDING_STATUS = "PENDING"
23
+ RUNNING_STATUS = "RUNNING"
24
+ FINISHED_STATUS = "FINISHED"
25
+ FAILED_STATUS = "FAILED"
26
+
27
+ TASKS_HARNESS = [task.value for task in Tasks]
28
+
29
+ snapshot_download(repo_id=RESULTS_REPO, revision="main", local_dir=EVAL_RESULTS_PATH_BACKEND, repo_type="dataset", max_workers=60)
30
+ snapshot_download(repo_id=QUEUE_REPO, revision="main", local_dir=EVAL_REQUESTS_PATH_BACKEND, repo_type="dataset", max_workers=60)
31
+
32
+
33
+ def request_to_result_name(request: EvalRequest) -> str:
34
+ org_and_model = request.model.split("/", 1)
35
+ if len(org_and_model) == 1:
36
+ model = org_and_model[0]
37
+ res = f"{model}_{request.precision}"
38
+ else:
39
+ org = org_and_model[0]
40
+ model = org_and_model[1]
41
+ res = f"{org}_{model}_{request.precision}"
42
+ return res
43
+
44
+
45
+ def process_finished_requests() -> bool:
46
+ current_finished_status = [FINISHED_STATUS]
47
+
48
+ # Get all eval request that are FINISHED, if you want to run other evals, change this parameter
49
+ eval_requests: list[EvalRequest] = get_eval_requests(job_status=current_finished_status, hf_repo=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH_BACKEND)
50
+ # Sort the evals by priority (first submitted first run)
51
+ eval_requests: list[EvalRequest] = sort_models_by_priority(api=API, models=eval_requests)
52
+
53
+ import random
54
+ random.shuffle(eval_requests)
55
+
56
+ from src.leaderboard.read_evals import get_raw_eval_results
57
+ eval_results: list[EvalResult] = get_raw_eval_results(EVAL_RESULTS_PATH_BACKEND, EVAL_REQUESTS_PATH_BACKEND)
58
+
59
+ result_name_to_request = {request_to_result_name(r): r for r in eval_requests}
60
+ result_name_to_result = {r.eval_name: r for r in eval_results}
61
+
62
+ for eval_request in eval_requests:
63
+ result_name: str = request_to_result_name(eval_request)
64
+
65
+ # Check the corresponding result
66
+ from typing import Optional
67
+ eval_result: Optional[EvalResult] = result_name_to_result[result_name] if result_name in result_name_to_result else None
68
+
69
+ # Iterate over tasks and, if we do not have results for a task, run the relevant evaluations
70
+ for task in TASKS_HARNESS:
71
+ task_name = task.benchmark
72
+
73
+ if eval_result is None or task_name not in eval_result.results:
74
+ eval_request: EvalRequest = result_name_to_request[result_name]
75
+
76
+ print(result_name, 'is incomplete -- missing task:', task_name)
77
+
78
+
79
+ if __name__ == "__main__":
80
+ res = process_finished_requests()
src/backend/envs.py CHANGED
@@ -22,8 +22,8 @@ class Tasks(Enum):
22
  # task1 = Task("logiqa", "acc_norm", "LogiQA")
23
  task0 = Task("nq_open", "em", "NQ Open", 64) # 64, as in the ATLAS paper
24
  task1 = Task("triviaqa", "em", "TriviaQA", 64) # 64, as in the ATLAS paper
25
- task2 = Task("truthfulqa_mc1", "mc1", "TruthfulQA MC1", 0)
26
- task3 = Task("truthfulqa_mc2", "mc2", "TruthfulQA MC2", 0) # TruthfulQA is intended as a zero-shot benchmark [5, 47]. https://owainevans.github.io/pdfs/truthfulQA_lin_evans.pdf
27
 
28
  # NUM_FEWSHOT = 64 # Change with your few shot
29
 
 
22
  # task1 = Task("logiqa", "acc_norm", "LogiQA")
23
  task0 = Task("nq_open", "em", "NQ Open", 64) # 64, as in the ATLAS paper
24
  task1 = Task("triviaqa", "em", "TriviaQA", 64) # 64, as in the ATLAS paper
25
+ task2 = Task("truthfulqa_mc1", "acc", "TruthfulQA MC1", 0)
26
+ task3 = Task("truthfulqa_mc2", "acc", "TruthfulQA MC2", 0) # TruthfulQA is intended as a zero-shot benchmark [5, 47]. https://owainevans.github.io/pdfs/truthfulQA_lin_evans.pdf
27
 
28
  # NUM_FEWSHOT = 64 # Change with your few shot
29