kexinhuang12345
fix upload bug
d779781
import json
import os
from datetime import datetime, timezone
from ast import literal_eval
from src.display.formatting import styled_error, styled_message, styled_warning
from src.envs import API, EVAL_REQUESTS_PATH, TOKEN, QUEUE_REPO
from src.submission.check_validity import (
already_submitted_models,
check_model_card,
get_model_size,
is_model_on_hub,
)
REQUESTED_MODELS = None
USERS_TO_SUBMISSION_DATES = None
class CustomJSONEncoder(json.JSONEncoder):
def default(self, obj):
try:
return super().default(obj)
except TypeError:
return str(obj) # Convert non-serializable object to string
def add_new_eval_json(eval_entry, out_path):
with open(out_path, "w") as f:
f.write(json.dumps(eval_entry, cls=CustomJSONEncoder))
def add_new_eval(
author,
email,
relbench_version,
model,
official_or_not,
test_performance,
valid_performance,
paper_url,
github_url,
#parameters,
honor_code,
task_track
):
global REQUESTED_MODELS
global USERS_TO_SUBMISSION_DATES
if not REQUESTED_MODELS:
REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH)
if task_track in ['Node Classification', 'Entity Classification']:
task_type = 'nc'
elif task_track in ['Node Regression', 'Entity Regression']:
task_type = 'nr'
elif task_track in ['Link Prediction', 'Recommendation']:
task_type = 'lp'
model_path = model + '_' + task_type
#precision = precision.split(" ")[0]
current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
#model_size = parameters
# Seems good, creating the eval
print("Adding new eval")
eval_entry = {
"model": model,
"author": author,
"email": email,
"relbench_version": relbench_version,
"official_or_not": official_or_not,
"test": test_performance,
"valid": valid_performance,
"paper_url": paper_url,
"github_url": github_url,
"honor_code": honor_code,
"status": "PENDING",
"submitted_time": current_time,
#"params": model_size,
"task": task_track,
"private": False,
}
## add a checking to verify if the submission has no bug
try:
xx = literal_eval(eval_entry["test"])
xx = literal_eval(eval_entry["valid"])
except:
return styled_error("The testing/validation performance submitted do not follow the correct format. Please check the format and resubmit.")
# TODO: Check for duplicate submission
#if f"{model}" in REQUESTED_MODELS:
# return styled_error("This model has been already submitted.")
print("Creating eval file")
OUT_DIR = f"{EVAL_REQUESTS_PATH}/{model}"
os.makedirs(OUT_DIR, exist_ok=True)
out_path = f"{OUT_DIR}/{model_path}_eval_request_False.json"
print(eval_entry)
#with open(out_path, "w") as f:
# f.write(json.dumps(eval_entry))
add_new_eval_json(eval_entry, out_path)
print("Uploading eval file")
print(out_path)
print(QUEUE_REPO)
print(TOKEN)
print(API)
API.upload_file(
path_or_fileobj=out_path,
path_in_repo=out_path.split("eval-queue/")[1],
repo_id=QUEUE_REPO,
repo_type="dataset",
commit_message=f"Add {model} to eval queue",
)
# Remove the local file
os.remove(out_path)
return styled_message(
"Your request has been submitted to the evaluation queue!\nPlease wait for up to an hour for the model to show in the PENDING list."
)