Clémentine commited on
Commit
18abd06
1 Parent(s): 6fdb0a4

added example for parameters

Browse files
Files changed (2) hide show
  1. main_backend_lighteval.py +3 -2
  2. src/envs.py +3 -3
main_backend_lighteval.py CHANGED
@@ -5,7 +5,7 @@ from huggingface_hub import snapshot_download
5
 
6
  logging.getLogger("openai").setLevel(logging.WARNING)
7
 
8
- from backend.run_eval_suite_lighteval import run_evaluation
9
  from src.backend.manage_requests import check_completed_evals, get_eval_requests, set_eval_request
10
  from src.backend.sort_queue import sort_models_by_priority
11
 
@@ -61,7 +61,8 @@ def run_auto_eval():
61
  )
62
 
63
  # This needs to be done
64
- instance_size, instance_type = get_instance_for_model(eval_request)
 
65
 
66
  run_evaluation(
67
  eval_request=eval_request,
 
5
 
6
  logging.getLogger("openai").setLevel(logging.WARNING)
7
 
8
+ from src.backend.run_eval_suite_lighteval import run_evaluation
9
  from src.backend.manage_requests import check_completed_evals, get_eval_requests, set_eval_request
10
  from src.backend.sort_queue import sort_models_by_priority
11
 
 
61
  )
62
 
63
  # This needs to be done
64
+ #instance_size, instance_type = get_instance_for_model(eval_request)
65
+ instance_size, instance_type = "small", "g4dn.xlarge"
66
 
67
  run_evaluation(
68
  eval_request=eval_request,
src/envs.py CHANGED
@@ -13,9 +13,9 @@ DEVICE = "cpu" # "cuda:0" if you add compute, for harness evaluations
13
  LIMIT = 20 # !!!! Should be None for actual evaluations!!!
14
 
15
  # For lighteval evaluations
16
- ACCELERATOR = ""
17
- REGION = ""
18
- VENDOR = ""
19
  # ----------------------------------
20
 
21
  REPO_ID = f"{OWNER}/leaderboard-backend"
 
13
  LIMIT = 20 # !!!! Should be None for actual evaluations!!!
14
 
15
  # For lighteval evaluations
16
+ ACCELERATOR = "gpu"
17
+ REGION = "us-east-1"
18
+ VENDOR = "aws"
19
  # ----------------------------------
20
 
21
  REPO_ID = f"{OWNER}/leaderboard-backend"