AppleSwing
commited on
Commit
•
ecaae4e
1
Parent(s):
9be75ab
change repo names
Browse files- Dockerfile +1 -1
- cli/create_request_file.py +1 -1
- src/backend/run_eval_suite.py +1 -1
- src/envs.py +7 -7
Dockerfile
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
# Use specific python image
|
2 |
-
FROM registry.hf.space/
|
3 |
|
4 |
RUN pip install -i https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple/ moe-infinity --no-cache-dir
|
5 |
# To fix pydantic version
|
|
|
1 |
# Use specific python image
|
2 |
+
FROM registry.hf.space/sparse-generative-ai-moe-llm-gpu-poor-leaderboard:latest
|
3 |
|
4 |
RUN pip install -i https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple/ moe-infinity --no-cache-dir
|
5 |
# To fix pydantic version
|
cli/create_request_file.py
CHANGED
@@ -9,7 +9,7 @@ from colorama import Fore
|
|
9 |
from huggingface_hub import HfApi, snapshot_download
|
10 |
|
11 |
EVAL_REQUESTS_PATH = "eval-queue"
|
12 |
-
QUEUE_REPO = "
|
13 |
|
14 |
precisions = ("float16", "bfloat16", "8bit (LLM.int8)", "4bit (QLoRA / FP4)", "GPTQ")
|
15 |
model_types = ("pretrained", "fine-tuned", "RL-tuned", "instruction-tuned")
|
|
|
9 |
from huggingface_hub import HfApi, snapshot_download
|
10 |
|
11 |
EVAL_REQUESTS_PATH = "eval-queue"
|
12 |
+
QUEUE_REPO = "sparse-generative-ai/requests"
|
13 |
|
14 |
precisions = ("float16", "bfloat16", "8bit (LLM.int8)", "4bit (QLoRA / FP4)", "GPTQ")
|
15 |
model_types = ("pretrained", "fine-tuned", "RL-tuned", "instruction-tuned")
|
src/backend/run_eval_suite.py
CHANGED
@@ -12,7 +12,7 @@ from src.backend.tasks.cnndm.task_v2 import CNNDMv2
|
|
12 |
from src.backend.tasks.selfcheckgpt.task import SelfCheckGPT
|
13 |
|
14 |
from src.backend.huggingface_generate_until import HFLMwithChatTemplate
|
15 |
-
from src.backend.moe_infinity import MoEHFLM
|
16 |
|
17 |
|
18 |
def run_evaluation(
|
|
|
12 |
from src.backend.tasks.selfcheckgpt.task import SelfCheckGPT
|
13 |
|
14 |
from src.backend.huggingface_generate_until import HFLMwithChatTemplate
|
15 |
+
# from src.backend.moe_infinity import MoEHFLM
|
16 |
|
17 |
|
18 |
def run_evaluation(
|
src/envs.py
CHANGED
@@ -5,15 +5,15 @@ from huggingface_hub import HfApi
|
|
5 |
# clone / pull the lmeh eval data
|
6 |
H4_TOKEN = os.environ.get("H4_TOKEN", None)
|
7 |
|
8 |
-
# REPO_ID = "pminervini/
|
9 |
-
REPO_ID = "
|
10 |
|
11 |
-
QUEUE_REPO = "
|
12 |
QUEUE_REPO_OPEN_LLM = "open-llm-leaderboard/requests"
|
13 |
-
RESULTS_REPO = "
|
14 |
|
15 |
-
PRIVATE_QUEUE_REPO = "
|
16 |
-
PRIVATE_RESULTS_REPO = "
|
17 |
|
18 |
IS_PUBLIC = bool(os.environ.get("IS_PUBLIC", True))
|
19 |
|
@@ -26,7 +26,7 @@ EVAL_REQUESTS_PATH_OPEN_LLM = os.path.join(CACHE_PATH, "eval-queue-open-llm")
|
|
26 |
EVAL_REQUESTS_PATH_PRIVATE = "eval-queue-private"
|
27 |
EVAL_RESULTS_PATH_PRIVATE = "eval-results-private"
|
28 |
|
29 |
-
PATH_TO_COLLECTION = "
|
30 |
|
31 |
# Rate limit variables
|
32 |
RATE_LIMIT_PERIOD = 7
|
|
|
5 |
# clone / pull the lmeh eval data
|
6 |
H4_TOKEN = os.environ.get("H4_TOKEN", None)
|
7 |
|
8 |
+
# REPO_ID = "pminervini/sparse-generative-ai"
|
9 |
+
REPO_ID = "sparse-generative-ai/open-moe-llm-leaderboard"
|
10 |
|
11 |
+
QUEUE_REPO = "sparse-generative-ai/requests"
|
12 |
QUEUE_REPO_OPEN_LLM = "open-llm-leaderboard/requests"
|
13 |
+
RESULTS_REPO = "sparse-generative-ai/results"
|
14 |
|
15 |
+
PRIVATE_QUEUE_REPO = "sparse-generative-ai/private-requests"
|
16 |
+
PRIVATE_RESULTS_REPO = "sparse-generative-ai/private-results"
|
17 |
|
18 |
IS_PUBLIC = bool(os.environ.get("IS_PUBLIC", True))
|
19 |
|
|
|
26 |
EVAL_REQUESTS_PATH_PRIVATE = "eval-queue-private"
|
27 |
EVAL_RESULTS_PATH_PRIVATE = "eval-results-private"
|
28 |
|
29 |
+
PATH_TO_COLLECTION = "sparse-generative-ai/llm-leaderboard-best-models-652d6c7965a4619fb5c27a03"
|
30 |
|
31 |
# Rate limit variables
|
32 |
RATE_LIMIT_PERIOD = 7
|