leaderboard / main_backend.py
ayushi0430's picture
cleanup
283ff70
import logging
import os
import pprint
from huggingface_hub import snapshot_download
import subprocess
subprocess.run(["python", "scripts/fix_harness_import.py"])
logging.getLogger("openai").setLevel(logging.WARNING)
from src.backend.run_eval_suite import run_evaluation
from src.backend.manage_requests import check_completed_evals, get_eval_requests, set_eval_request, EvalRequest
from src.backend.sort_queue import sort_models_by_priority
from src.envs import QUEUE_REPO, EVAL_REQUESTS_PATH_BACKEND, RESULTS_REPO, EVAL_RESULTS_PATH_BACKEND, DEVICE, API, \
LIMIT, TOKEN, RUN_MODE
from src.about import NUM_FEWSHOT, HarnessTasks
import asyncio
TASKS_HARNESS = [task.value.benchmark for task in HarnessTasks]
logging.basicConfig(level=logging.ERROR)
pp = pprint.PrettyPrinter(width=80)
PENDING_STATUS = "PENDING"
RUNNING_STATUS = "RUNNING"
FINISHED_STATUS = "FINISHED"
FAILED_STATUS = "FAILED"
# TODO: uncomment
if RUN_MODE != "LOCAL":
snapshot_download(repo_id=RESULTS_REPO, revision="main", local_dir=EVAL_RESULTS_PATH_BACKEND, repo_type="dataset",
max_workers=60, token=TOKEN)
snapshot_download(repo_id=QUEUE_REPO, revision="main", local_dir=EVAL_REQUESTS_PATH_BACKEND, repo_type="dataset",
max_workers=60, token=TOKEN)
def run_auto_eval():
current_pending_status = [PENDING_STATUS]
# pull the eval dataset from the hub and parse any eval requests
# check completed evals and set them to finished
if RUN_MODE != "LOCAL":
check_completed_evals(
api=API,
checked_status=RUNNING_STATUS,
completed_status=FINISHED_STATUS,
failed_status=FAILED_STATUS,
hf_repo=QUEUE_REPO,
local_dir=EVAL_REQUESTS_PATH_BACKEND,
hf_repo_results=RESULTS_REPO,
local_dir_results=EVAL_RESULTS_PATH_BACKEND
)
# Get all eval request that are PENDING, if you want to run other evals, change this parameter
eval_requests = get_eval_requests(job_status=current_pending_status, hf_repo=QUEUE_REPO,
local_dir=EVAL_REQUESTS_PATH_BACKEND)
# Sort the evals by priority (first submitted first run)
eval_requests = sort_models_by_priority(api=API, models=eval_requests)
else:
local_model_name = os.getenv("LOCAL_MODEL_NAME", "hf-internal-testing/tiny-random-gpt2")
sample_request = {
"model": local_model_name, "json_filepath": "", "base_model": "", "revision": "main",
"private": False,
"precision": "bfloat16", "weight_type": "Original", "status": "PENDING",
"submitted_time": "2023-11-21T18:10:08Z", "model_type": "\ud83d\udfe2 : pretrained", "likes": 0,
"params": 0.1, "license": "custom"
}
eval_requests = [EvalRequest(**sample_request)]
print(f"Found {len(eval_requests)} {','.join(current_pending_status)} eval requests")
if len(eval_requests) == 0:
return
eval_request = eval_requests[0]
pp.pprint(eval_request)
if RUN_MODE != "LOCAL":
set_eval_request(
api=API,
eval_request=eval_request,
set_to_status=RUNNING_STATUS,
hf_repo=QUEUE_REPO,
local_dir=EVAL_REQUESTS_PATH_BACKEND,
)
asyncio.run(
run_evaluation(
eval_request=eval_request,
task_names=TASKS_HARNESS,
num_fewshot=NUM_FEWSHOT,
local_dir=EVAL_RESULTS_PATH_BACKEND,
results_repo=RESULTS_REPO,
batch_size=1,
device=DEVICE,
no_cache=True,
limit=LIMIT
)
)
logging.info("Shopping finished")
if __name__ == "__main__":
run_auto_eval()