xuanricheng commited on
Commit
be82687
1 Parent(s): 5c07fb7

change repo

Browse files
src/envs.py CHANGED
@@ -5,13 +5,13 @@ from huggingface_hub import HfApi
5
  # clone / pull the lmeh eval data
6
  H4_TOKEN = os.environ.get("H4_TOKEN", None)
7
 
8
- REPO_ID = "HuggingFaceH4/open_llm_leaderboard"
9
- QUEUE_REPO = "open-llm-leaderboard/requests"
10
- DYNAMIC_INFO_REPO = "open-llm-leaderboard/dynamic_model_information"
11
- RESULTS_REPO = "open-llm-leaderboard/results"
12
 
13
- PRIVATE_QUEUE_REPO = "open-llm-leaderboard/private-requests"
14
- PRIVATE_RESULTS_REPO = "open-llm-leaderboard/private-results"
15
 
16
  IS_PUBLIC = bool(os.environ.get("IS_PUBLIC", True))
17
 
@@ -25,7 +25,7 @@ DYNAMIC_INFO_FILE_PATH = os.path.join(DYNAMIC_INFO_PATH, "model_infos.json")
25
  EVAL_REQUESTS_PATH_PRIVATE = "eval-queue-private"
26
  EVAL_RESULTS_PATH_PRIVATE = "eval-results-private"
27
 
28
- PATH_TO_COLLECTION = "open-llm-leaderboard/llm-leaderboard-best-models-652d6c7965a4619fb5c27a03"
29
 
30
  # Rate limit variables
31
  RATE_LIMIT_PERIOD = 7
 
5
  # clone / pull the lmeh eval data
6
  H4_TOKEN = os.environ.get("H4_TOKEN", None)
7
 
8
+ REPO_ID = "BAAI/open_cn_llm_leaderboard"
9
+ QUEUE_REPO = "open_cn_llm_leaderboard/requests"
10
+ DYNAMIC_INFO_REPO = "open_cn_llm_leaderboard/dynamic_model_information"
11
+ RESULTS_REPO = "open_cn_llm_leaderboard/results"
12
 
13
+ PRIVATE_QUEUE_REPO = "open_cn_llm_leaderboard/private-requests"
14
+ PRIVATE_RESULTS_REPO = "open_cn_llm_leaderboard/private-results"
15
 
16
  IS_PUBLIC = bool(os.environ.get("IS_PUBLIC", True))
17
 
 
25
  EVAL_REQUESTS_PATH_PRIVATE = "eval-queue-private"
26
  EVAL_RESULTS_PATH_PRIVATE = "eval-results-private"
27
 
28
+ PATH_TO_COLLECTION = "open_cn_llm_leaderboard/llm-leaderboard-best-models-652d6c7965a4619fb5c27a03"
29
 
30
  # Rate limit variables
31
  RATE_LIMIT_PERIOD = 7
src/scripts/create_request_file.py CHANGED
@@ -11,7 +11,7 @@ from src.submission.check_validity import get_model_size
11
  from src.display.utils import ModelType, WeightType
12
 
13
  EVAL_REQUESTS_PATH = "eval-queue"
14
- QUEUE_REPO = "open-llm-leaderboard/requests"
15
 
16
  precisions = ("float16", "bfloat16", "8bit (LLM.int8)", "4bit (QLoRA / FP4)", "GPTQ")
17
  model_types = [e.name for e in ModelType]
 
11
  from src.display.utils import ModelType, WeightType
12
 
13
  EVAL_REQUESTS_PATH = "eval-queue"
14
+ QUEUE_REPO = "open_cn_llm_leaderboard/requests"
15
 
16
  precisions = ("float16", "bfloat16", "8bit (LLM.int8)", "4bit (QLoRA / FP4)", "GPTQ")
17
  model_types = [e.name for e in ModelType]