Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Commit
•
8b85b8d
1
Parent(s):
3f2e3c9
Fix style
Browse files- app.py +6 -5
- main_backend_harness.py +11 -10
- main_backend_lighteval.py +11 -12
- scripts/create_request_file.py +3 -1
- scripts/fix_harness_import.py +1 -0
- src/backend/manage_requests.py +2 -0
- src/backend/run_eval_suite_harness.py +3 -3
- src/backend/run_eval_suite_lighteval.py +2 -2
- src/envs.py +1 -0
- src/logging.py +1 -0
app.py
CHANGED
@@ -5,13 +5,14 @@ import gradio as gr
|
|
5 |
from apscheduler.schedulers.background import BackgroundScheduler
|
6 |
|
7 |
# Choose ligtheval or harness backend
|
8 |
-
from main_backend_lighteval import run_auto_eval
|
9 |
# from main_backend_harness import run_auto_eval
|
|
|
10 |
|
11 |
-
from src.display.log_visualizer import log_file_to_html_string
|
12 |
from src.display.css_html_js import dark_mode_gradio_js
|
13 |
-
from src.
|
14 |
-
from src.
|
|
|
|
|
15 |
|
16 |
logging.getLogger("httpx").setLevel(logging.WARNING)
|
17 |
logging.getLogger("numexpr").setLevel(logging.WARNING)
|
@@ -24,7 +25,7 @@ logger = setup_logger(__name__)
|
|
24 |
|
25 |
intro_md = """
|
26 |
# Intro
|
27 |
-
This is a visual for the auto evaluator.
|
28 |
"""
|
29 |
|
30 |
links_md = f"""
|
|
|
5 |
from apscheduler.schedulers.background import BackgroundScheduler
|
6 |
|
7 |
# Choose ligtheval or harness backend
|
|
|
8 |
# from main_backend_harness import run_auto_eval
|
9 |
+
from main_backend_lighteval import run_auto_eval
|
10 |
|
|
|
11 |
from src.display.css_html_js import dark_mode_gradio_js
|
12 |
+
from src.display.log_visualizer import log_file_to_html_string
|
13 |
+
from src.envs import QUEUE_REPO, REFRESH_RATE, REPO_ID, RESULTS_REPO
|
14 |
+
from src.logging import configure_root_logger, log_file, setup_logger
|
15 |
+
|
16 |
|
17 |
logging.getLogger("httpx").setLevel(logging.WARNING)
|
18 |
logging.getLogger("numexpr").setLevel(logging.WARNING)
|
|
|
25 |
|
26 |
intro_md = """
|
27 |
# Intro
|
28 |
+
This is a visual for the auto evaluator.
|
29 |
"""
|
30 |
|
31 |
links_md = f"""
|
main_backend_harness.py
CHANGED
@@ -3,28 +3,29 @@ import pprint
|
|
3 |
|
4 |
from huggingface_hub import snapshot_download
|
5 |
|
6 |
-
from src.backend.run_eval_suite_harness import run_evaluation
|
7 |
from src.backend.manage_requests import (
|
|
|
|
|
|
|
|
|
8 |
check_completed_evals,
|
9 |
get_eval_requests,
|
10 |
set_eval_request,
|
11 |
-
PENDING_STATUS,
|
12 |
-
RUNNING_STATUS,
|
13 |
-
FINISHED_STATUS,
|
14 |
-
FAILED_STATUS,
|
15 |
)
|
|
|
16 |
from src.backend.sort_queue import sort_models_by_priority
|
17 |
from src.envs import (
|
18 |
-
|
|
|
19 |
EVAL_REQUESTS_PATH_BACKEND,
|
20 |
-
RESULTS_REPO,
|
21 |
EVAL_RESULTS_PATH_BACKEND,
|
22 |
-
DEVICE,
|
23 |
-
API,
|
24 |
LIMIT,
|
|
|
|
|
|
|
|
|
25 |
TOKEN,
|
26 |
)
|
27 |
-
from src.envs import TASKS_HARNESS, NUM_FEWSHOT
|
28 |
from src.logging import setup_logger
|
29 |
|
30 |
|
|
|
3 |
|
4 |
from huggingface_hub import snapshot_download
|
5 |
|
|
|
6 |
from src.backend.manage_requests import (
|
7 |
+
FAILED_STATUS,
|
8 |
+
FINISHED_STATUS,
|
9 |
+
PENDING_STATUS,
|
10 |
+
RUNNING_STATUS,
|
11 |
check_completed_evals,
|
12 |
get_eval_requests,
|
13 |
set_eval_request,
|
|
|
|
|
|
|
|
|
14 |
)
|
15 |
+
from src.backend.run_eval_suite_harness import run_evaluation
|
16 |
from src.backend.sort_queue import sort_models_by_priority
|
17 |
from src.envs import (
|
18 |
+
API,
|
19 |
+
DEVICE,
|
20 |
EVAL_REQUESTS_PATH_BACKEND,
|
|
|
21 |
EVAL_RESULTS_PATH_BACKEND,
|
|
|
|
|
22 |
LIMIT,
|
23 |
+
NUM_FEWSHOT,
|
24 |
+
QUEUE_REPO,
|
25 |
+
RESULTS_REPO,
|
26 |
+
TASKS_HARNESS,
|
27 |
TOKEN,
|
28 |
)
|
|
|
29 |
from src.logging import setup_logger
|
30 |
|
31 |
|
main_backend_lighteval.py
CHANGED
@@ -3,30 +3,29 @@ import pprint
|
|
3 |
|
4 |
from huggingface_hub import snapshot_download
|
5 |
|
6 |
-
|
7 |
-
from src.backend.run_eval_suite_lighteval import run_evaluation
|
8 |
from src.backend.manage_requests import (
|
|
|
|
|
|
|
|
|
9 |
check_completed_evals,
|
10 |
get_eval_requests,
|
11 |
set_eval_request,
|
12 |
-
PENDING_STATUS,
|
13 |
-
RUNNING_STATUS,
|
14 |
-
FINISHED_STATUS,
|
15 |
-
FAILED_STATUS,
|
16 |
)
|
|
|
17 |
from src.backend.sort_queue import sort_models_by_priority
|
18 |
from src.envs import (
|
19 |
-
|
|
|
20 |
EVAL_REQUESTS_PATH_BACKEND,
|
21 |
-
RESULTS_REPO,
|
22 |
EVAL_RESULTS_PATH_BACKEND,
|
23 |
-
API,
|
24 |
LIMIT,
|
25 |
-
|
26 |
-
ACCELERATOR,
|
27 |
-
VENDOR,
|
28 |
REGION,
|
|
|
29 |
TASKS_LIGHTEVAL,
|
|
|
|
|
30 |
)
|
31 |
from src.logging import setup_logger
|
32 |
|
|
|
3 |
|
4 |
from huggingface_hub import snapshot_download
|
5 |
|
|
|
|
|
6 |
from src.backend.manage_requests import (
|
7 |
+
FAILED_STATUS,
|
8 |
+
FINISHED_STATUS,
|
9 |
+
PENDING_STATUS,
|
10 |
+
RUNNING_STATUS,
|
11 |
check_completed_evals,
|
12 |
get_eval_requests,
|
13 |
set_eval_request,
|
|
|
|
|
|
|
|
|
14 |
)
|
15 |
+
from src.backend.run_eval_suite_lighteval import run_evaluation
|
16 |
from src.backend.sort_queue import sort_models_by_priority
|
17 |
from src.envs import (
|
18 |
+
ACCELERATOR,
|
19 |
+
API,
|
20 |
EVAL_REQUESTS_PATH_BACKEND,
|
|
|
21 |
EVAL_RESULTS_PATH_BACKEND,
|
|
|
22 |
LIMIT,
|
23 |
+
QUEUE_REPO,
|
|
|
|
|
24 |
REGION,
|
25 |
+
RESULTS_REPO,
|
26 |
TASKS_LIGHTEVAL,
|
27 |
+
TOKEN,
|
28 |
+
VENDOR,
|
29 |
)
|
30 |
from src.logging import setup_logger
|
31 |
|
scripts/create_request_file.py
CHANGED
@@ -7,7 +7,9 @@ from datetime import datetime, timezone
|
|
7 |
import click
|
8 |
from colorama import Fore
|
9 |
from huggingface_hub import HfApi, snapshot_download
|
10 |
-
|
|
|
|
|
11 |
|
12 |
precisions = ("float16", "bfloat16", "8bit (LLM.int8)", "4bit (QLoRA / FP4)", "GPTQ", "float32")
|
13 |
model_types = ("pretrained", "fine-tuned", "RL-tuned", "instruction-tuned")
|
|
|
7 |
import click
|
8 |
from colorama import Fore
|
9 |
from huggingface_hub import HfApi, snapshot_download
|
10 |
+
|
11 |
+
from src.envs import EVAL_REQUESTS_PATH, QUEUE_REPO, TOKEN
|
12 |
+
|
13 |
|
14 |
precisions = ("float16", "bfloat16", "8bit (LLM.int8)", "4bit (QLoRA / FP4)", "GPTQ", "float32")
|
15 |
model_types = ("pretrained", "fine-tuned", "RL-tuned", "instruction-tuned")
|
scripts/fix_harness_import.py
CHANGED
@@ -7,6 +7,7 @@ import os
|
|
7 |
|
8 |
import lm_eval
|
9 |
|
|
|
10 |
if __name__ == "__main__":
|
11 |
lm_eval_path = lm_eval.__path__[0]
|
12 |
os.makedirs(os.path.join(lm_eval_path, "datasets", "bigbench_resources"), exist_ok=True)
|
|
|
7 |
|
8 |
import lm_eval
|
9 |
|
10 |
+
|
11 |
if __name__ == "__main__":
|
12 |
lm_eval_path = lm_eval.__path__[0]
|
13 |
os.makedirs(os.path.join(lm_eval_path, "datasets", "bigbench_resources"), exist_ok=True)
|
src/backend/manage_requests.py
CHANGED
@@ -4,9 +4,11 @@ from dataclasses import dataclass
|
|
4 |
from typing import Optional
|
5 |
|
6 |
from huggingface_hub import HfApi, snapshot_download
|
|
|
7 |
from src.envs import TOKEN
|
8 |
from src.logging import setup_logger
|
9 |
|
|
|
10 |
logger = setup_logger(__name__)
|
11 |
|
12 |
PENDING_STATUS = "PENDING"
|
|
|
4 |
from typing import Optional
|
5 |
|
6 |
from huggingface_hub import HfApi, snapshot_download
|
7 |
+
|
8 |
from src.envs import TOKEN
|
9 |
from src.logging import setup_logger
|
10 |
|
11 |
+
|
12 |
logger = setup_logger(__name__)
|
13 |
|
14 |
PENDING_STATUS = "PENDING"
|
src/backend/run_eval_suite_harness.py
CHANGED
@@ -1,16 +1,16 @@
|
|
1 |
import json
|
2 |
-
import os
|
3 |
import logging
|
|
|
4 |
from datetime import datetime
|
|
|
5 |
|
6 |
from lm_eval import evaluator, utils
|
7 |
from lm_eval.tasks import TaskManager
|
8 |
|
9 |
-
from src.envs import API
|
10 |
from src.backend.manage_requests import EvalRequest
|
|
|
11 |
from src.logging import setup_logger
|
12 |
|
13 |
-
from typing import Union
|
14 |
|
15 |
logging.getLogger("openai").setLevel(logging.WARNING)
|
16 |
logger = setup_logger(__name__)
|
|
|
1 |
import json
|
|
|
2 |
import logging
|
3 |
+
import os
|
4 |
from datetime import datetime
|
5 |
+
from typing import Union
|
6 |
|
7 |
from lm_eval import evaluator, utils
|
8 |
from lm_eval.tasks import TaskManager
|
9 |
|
|
|
10 |
from src.backend.manage_requests import EvalRequest
|
11 |
+
from src.envs import API
|
12 |
from src.logging import setup_logger
|
13 |
|
|
|
14 |
|
15 |
logging.getLogger("openai").setLevel(logging.WARNING)
|
16 |
logger = setup_logger(__name__)
|
src/backend/run_eval_suite_lighteval.py
CHANGED
@@ -5,11 +5,11 @@ from lighteval.logging.evaluation_tracker import EvaluationTracker
|
|
5 |
from lighteval.models.model_config import InferenceEndpointModelConfig
|
6 |
from lighteval.pipeline import ParallelismManager, Pipeline, PipelineParameters
|
7 |
|
8 |
-
|
9 |
-
from src.envs import RESULTS_REPO
|
10 |
from src.backend.manage_requests import EvalRequest
|
|
|
11 |
from src.logging import setup_logger
|
12 |
|
|
|
13 |
logging.getLogger("openai").setLevel(logging.WARNING)
|
14 |
logger = setup_logger(__name__)
|
15 |
|
|
|
5 |
from lighteval.models.model_config import InferenceEndpointModelConfig
|
6 |
from lighteval.pipeline import ParallelismManager, Pipeline, PipelineParameters
|
7 |
|
|
|
|
|
8 |
from src.backend.manage_requests import EvalRequest
|
9 |
+
from src.envs import RESULTS_REPO
|
10 |
from src.logging import setup_logger
|
11 |
|
12 |
+
|
13 |
logging.getLogger("openai").setLevel(logging.WARNING)
|
14 |
logger = setup_logger(__name__)
|
15 |
|
src/envs.py
CHANGED
@@ -2,6 +2,7 @@ import os
|
|
2 |
|
3 |
from huggingface_hub import HfApi
|
4 |
|
|
|
5 |
# Info to change for your repository
|
6 |
# ----------------------------------
|
7 |
TOKEN = os.environ.get("HF_TOKEN") # A read/write token for your org
|
|
|
2 |
|
3 |
from huggingface_hub import HfApi
|
4 |
|
5 |
+
|
6 |
# Info to change for your repository
|
7 |
# ----------------------------------
|
8 |
TOKEN = os.environ.get("HF_TOKEN") # A read/write token for your org
|
src/logging.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
import logging
|
2 |
from pathlib import Path
|
3 |
|
|
|
4 |
proj_dir = Path(__file__).parents[1]
|
5 |
|
6 |
log_file = proj_dir / "output.log"
|
|
|
1 |
import logging
|
2 |
from pathlib import Path
|
3 |
|
4 |
+
|
5 |
proj_dir = Path(__file__).parents[1]
|
6 |
|
7 |
log_file = proj_dir / "output.log"
|