Miaoran000 commited on
Commit
2b9835a
1 Parent(s): 150bb15

minor updates in publishing and logging results

Browse files
Files changed (2) hide show
  1. main_backend.py +29 -12
  2. src/backend/run_eval_suite.py +1 -1
main_backend.py CHANGED
@@ -1,6 +1,7 @@
1
  import argparse
2
  import logging
3
  import pprint
 
4
 
5
  from huggingface_hub import snapshot_download
6
 
@@ -9,6 +10,8 @@ import src.backend.manage_requests as manage_requests
9
  import src.backend.sort_queue as sort_queue
10
  import src.envs as envs
11
 
 
 
12
  logging.basicConfig(level=logging.ERROR)
13
  pp = pprint.PrettyPrinter(width=80)
14
 
@@ -46,22 +49,30 @@ def run_auto_eval(args):
46
  logging.info("Sorted eval requests")
47
 
48
  print(f"Found {len(eval_requests)} {','.join(current_pending_status)} eval requests")
49
-
50
  if len(eval_requests) == 0:
51
  print("No eval requests found. Exiting.")
52
  return
53
 
54
- eval_request = eval_requests[0]
55
- pp.pprint(eval_request)
56
-
57
- manage_requests.set_eval_request(
58
- api=envs.API,
59
- eval_request=eval_request,
60
- new_status=RUNNING_STATUS,
61
- hf_repo=envs.QUEUE_REPO,
62
- local_dir=envs.EVAL_REQUESTS_PATH_BACKEND
63
- )
64
- logging.info("Set eval request to running, now running eval")
 
 
 
 
 
 
 
 
65
 
66
  run_eval_suite.run_evaluation(
67
  eval_request=eval_request,
@@ -70,6 +81,8 @@ def run_auto_eval(args):
70
  batch_size=1,
71
  device=envs.DEVICE,
72
  no_cache=True,
 
 
73
  )
74
  logging.info("Eval finished, now setting status to finished")
75
  else:
@@ -87,6 +100,8 @@ def run_auto_eval(args):
87
  results_repo=envs.RESULTS_REPO,
88
  batch_size=1,
89
  device=envs.DEVICE,
 
 
90
  )
91
  logging.info("Reproducibility eval finished")
92
 
@@ -98,6 +113,8 @@ def main():
98
  parser.add_argument("--reproduce", type=bool, default=False, help="Reproduce the evaluation results")
99
  parser.add_argument("--model", type=str, default=None, help="Your Model ID")
100
  parser.add_argument("--precision", type=str, default="float16", help="Precision of your model")
 
 
101
 
102
  args = parser.parse_args()
103
 
 
1
  import argparse
2
  import logging
3
  import pprint
4
+ import os
5
 
6
  from huggingface_hub import snapshot_download
7
 
 
10
  import src.backend.sort_queue as sort_queue
11
  import src.envs as envs
12
 
13
+ os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'expandable_segments:True'
14
+
15
  logging.basicConfig(level=logging.ERROR)
16
  pp = pprint.PrettyPrinter(width=80)
17
 
 
49
  logging.info("Sorted eval requests")
50
 
51
  print(f"Found {len(eval_requests)} {','.join(current_pending_status)} eval requests")
52
+ print(eval_requests)
53
  if len(eval_requests) == 0:
54
  print("No eval requests found. Exiting.")
55
  return
56
 
57
+ if args.model is not None:
58
+ eval_request = manage_requests.EvalRequest(
59
+ model=args.model,
60
+ status=PENDING_STATUS,
61
+ precision=args.precision
62
+ )
63
+ pp.pprint(eval_request)
64
+ else:
65
+ eval_request = eval_requests[0]
66
+ pp.pprint(eval_request)
67
+
68
+ # manage_requests.set_eval_request(
69
+ # api=envs.API,
70
+ # eval_request=eval_request,
71
+ # new_status=RUNNING_STATUS,
72
+ # hf_repo=envs.QUEUE_REPO,
73
+ # local_dir=envs.EVAL_REQUESTS_PATH_BACKEND
74
+ # )
75
+ # logging.info("Set eval request to running, now running eval")
76
 
77
  run_eval_suite.run_evaluation(
78
  eval_request=eval_request,
 
81
  batch_size=1,
82
  device=envs.DEVICE,
83
  no_cache=True,
84
+ need_check=not args.publish,
85
+ write_results=args.update
86
  )
87
  logging.info("Eval finished, now setting status to finished")
88
  else:
 
100
  results_repo=envs.RESULTS_REPO,
101
  batch_size=1,
102
  device=envs.DEVICE,
103
+ need_check=not args.publish,
104
+ write_results=args.update
105
  )
106
  logging.info("Reproducibility eval finished")
107
 
 
113
  parser.add_argument("--reproduce", type=bool, default=False, help="Reproduce the evaluation results")
114
  parser.add_argument("--model", type=str, default=None, help="Your Model ID")
115
  parser.add_argument("--precision", type=str, default="float16", help="Precision of your model")
116
+ parser.add_argument("--publish", type=bool, default=False, help="whether directly publish the evaluation results on HF")
117
+ parser.add_argument("--update", type=bool, default=False, help="whether to update google drive files")
118
 
119
  args = parser.parse_args()
120
 
src/backend/run_eval_suite.py CHANGED
@@ -15,7 +15,7 @@ logging.getLogger("openai").setLevel(logging.WARNING)
15
 
16
  def run_evaluation(eval_request: EvalRequest, batch_size, device,
17
  local_dir: str, results_repo: str, no_cache=True, limit=None,
18
- need_check=True, write_results=True):
19
  """
20
  Run the evaluation for a given model and upload the results.
21
 
 
15
 
16
  def run_evaluation(eval_request: EvalRequest, batch_size, device,
17
  local_dir: str, results_repo: str, no_cache=True, limit=None,
18
+ need_check=True, write_results=False):
19
  """
20
  Run the evaluation for a given model and upload the results.
21