|
import json |
|
import os |
|
from datetime import datetime, timezone |
|
|
|
from src.display.formatting import styled_error, styled_message, styled_warning |
|
from src.envs import API, EVAL_REQUESTS_PATH, H4_TOKEN, QUEUE_REPO, RATE_LIMIT_PERIOD, RATE_LIMIT_QUOTA, DEBUG_QUEUE_REPO |
|
from src.leaderboard.filter_models import DO_NOT_SUBMIT_MODELS |
|
from src.submission.check_validity import ( |
|
already_submitted_models, |
|
check_model_card, |
|
get_model_size, |
|
is_model_on_hub, |
|
user_submission_permission, |
|
) |
|
|
|
REQUESTED_MODELS = None |
|
USERS_TO_SUBMISSION_DATES = None |
|
|
|
|
|
def add_new_eval( |
|
model: str, |
|
base_model: str, |
|
revision: str, |
|
precision: str, |
|
private: bool, |
|
weight_type: str, |
|
model_type: str, |
|
inference_framework: str, |
|
debug: bool = False, |
|
gpu_type: str = "NVIDIA-A100-PCIe-80GB", |
|
): |
|
global REQUESTED_MODELS |
|
global USERS_TO_SUBMISSION_DATES |
|
if not REQUESTED_MODELS: |
|
REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH) |
|
|
|
if debug: |
|
QUEUE_REPO = DEBUG_QUEUE_REPO |
|
|
|
user_name = "" |
|
model_path = model |
|
if "/" in model: |
|
user_name = model.split("/")[0] |
|
model_path = model.split("/")[1] |
|
|
|
precision = precision.split(" ")[0] |
|
current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ") |
|
|
|
if model_type is None or model_type == "": |
|
return styled_error("Please select a model type.") |
|
|
|
|
|
if user_name != "": |
|
user_can_submit, error_msg = user_submission_permission( |
|
user_name, USERS_TO_SUBMISSION_DATES, RATE_LIMIT_PERIOD, RATE_LIMIT_QUOTA |
|
) |
|
if not user_can_submit: |
|
return styled_error(error_msg) |
|
|
|
|
|
if model in DO_NOT_SUBMIT_MODELS or base_model in DO_NOT_SUBMIT_MODELS: |
|
return styled_warning("Model authors have requested that their model be not submitted on the leaderboard.") |
|
|
|
|
|
if revision == "": |
|
revision = "main" |
|
|
|
|
|
if weight_type in ["Delta", "Adapter"]: |
|
base_model_on_hub, error, _ = is_model_on_hub( |
|
model_name=base_model, revision=revision, token=H4_TOKEN, test_tokenizer=False |
|
) |
|
if not base_model_on_hub: |
|
return styled_error(f'Base model "{base_model}" {error}') |
|
|
|
if not weight_type == "Adapter": |
|
model_on_hub, error, _ = is_model_on_hub(model_name=model, revision=revision, test_tokenizer=False) |
|
if not model_on_hub: |
|
return styled_error(f'Model "{model}" {error}') |
|
|
|
|
|
try: |
|
model_info = API.model_info(repo_id=model, revision=revision) |
|
except Exception: |
|
return styled_error("Could not get your model information. Please fill it up properly.") |
|
|
|
model_size = get_model_size(model_info=model_info, precision=precision) |
|
|
|
|
|
try: |
|
license = model_info.cardData["license"] |
|
except Exception: |
|
return styled_error("Please select a license for your model") |
|
|
|
|
|
|
|
modelcard_OK, error_msg = check_model_card(model) |
|
if not modelcard_OK: |
|
return styled_error(error_msg) |
|
|
|
|
|
print("Adding new eval") |
|
|
|
eval_entry = { |
|
"model": model, |
|
"base_model": base_model, |
|
"revision": revision, |
|
"private": private, |
|
"precision": precision, |
|
"weight_type": weight_type, |
|
"status": "PENDING", |
|
"submitted_time": current_time, |
|
"model_type": model_type, |
|
"likes": model_info.likes, |
|
"params": model_size, |
|
"license": license, |
|
"inference_framework": inference_framework, |
|
"gpu_type": gpu_type |
|
} |
|
|
|
|
|
if f"{model}_{revision}_{precision}_{inference_framework}_{gpu_type}" in REQUESTED_MODELS: |
|
return styled_warning("This model has been already submitted.") |
|
|
|
print("Creating eval file") |
|
OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}" |
|
os.makedirs(OUT_DIR, exist_ok=True) |
|
|
|
out_path = f"{OUT_DIR}/{model_path}_eval_request_{private}_{precision}_{weight_type}_{inference_framework}_{gpu_type}.json" |
|
|
|
with open(out_path, "w") as f: |
|
f.write(json.dumps(eval_entry)) |
|
|
|
print("Uploading eval file") |
|
API.upload_file( |
|
path_or_fileobj=out_path, |
|
path_in_repo=out_path.split("eval-queue/")[1], |
|
repo_id=QUEUE_REPO, |
|
repo_type="dataset", |
|
commit_message=f"Add {model} to eval queue", |
|
) |
|
|
|
|
|
os.remove(out_path) |
|
|
|
return styled_message( |
|
"Your request has been submitted to the evaluation queue!\nPlease wait for up to an hour for the model to show in the PENDING list." |
|
) |
|
|