T145's picture
Initial project
835d0a5
raw
history blame
7.51 kB
import os
import gradio as gr
import pandas as pd
from huggingface_hub import (
CommitOperationAdd,
EvalResult,
ModelCard,
RepoUrl,
create_commit,
)
from huggingface_hub.repocard_data import eval_results_to_model_index
from pytablewriter import MarkdownTableWriter
from openllm import get_datas, get_json_format_data
BOT_HF_TOKEN = os.getenv("BOT_HF_TOKEN")
def search(df, value):
result_df = df[df["Model"] == value]
return result_df.iloc[0].to_dict() if not result_df.empty else None
def get_details_url(repo):
author, model = repo.split("/")
return f"https://huggingface.co/datasets/open-llm-leaderboard/{author}__{model}-details"
def get_query_url(repo):
return f"https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query={repo}"
def get_task_summary(results):
return {
"IFEval": {
"dataset_type": "HuggingFaceH4/ifeval",
"dataset_name": "IFEval (0-Shot)",
"metric_type": "inst_level_strict_acc and prompt_level_strict_acc",
"metric_value": results["IFEval"],
"dataset_config": None, # don't know
"dataset_split": None, # don't know
"dataset_revision": None,
"dataset_args": {"num_few_shot": 0},
"metric_name": "strict accuracy",
},
"BBH": {
"dataset_type": "BBH",
"dataset_name": "BBH (3-Shot)",
"metric_type": "acc_norm",
"metric_value": results["BBH"],
"dataset_config": None, # don't know
"dataset_split": None, # don't know
"dataset_revision": None,
"dataset_args": {"num_few_shot": 3},
"metric_name": "normalized accuracy",
},
"MATH Lvl 5": {
"dataset_type": "hendrycks/competition_math",
"dataset_name": "MATH Lvl 5 (4-Shot)",
"metric_type": "exact_match",
"metric_value": results["MATH Lvl 5"],
"dataset_config": None, # don't know
"dataset_split": None, # don't know
"dataset_revision": None,
"dataset_args": {"num_few_shot": 4},
"metric_name": "exact match",
},
"GPQA": {
"dataset_type": "Idavidrein/gpqa",
"dataset_name": "GPQA (0-shot)",
"metric_type": "acc_norm",
"metric_value": results["GPQA"],
"dataset_config": None, # don't know
"dataset_split": None, # don't know
"dataset_revision": None,
"dataset_args": {"num_few_shot": 0},
"metric_name": "acc_norm",
},
"MuSR": {
"dataset_type": "TAUR-Lab/MuSR",
"dataset_name": "MuSR (0-shot)",
"metric_type": "acc_norm",
"metric_value": results["MUSR"],
"dataset_config": None, # don't know
"dataset_split": None, # don't know
"dataset_args": {"num_few_shot": 0},
"metric_name": "acc_norm",
},
"MMLU-PRO": {
"dataset_type": "TIGER-Lab/MMLU-Pro",
"dataset_name": "MMLU-PRO (5-shot)",
"metric_type": "acc",
"metric_value": results["MMLU-PRO"],
"dataset_config": "main",
"dataset_split": "test",
"dataset_args": {"num_few_shot": 5},
"metric_name": "accuracy",
},
}
def get_eval_results(df, repo):
results = search(df, repo)
task_summary = get_task_summary(results)
md_writer = MarkdownTableWriter()
md_writer.headers = ["Metric", "Value"]
md_writer.value_matrix = [["Avg.", results["Average ⬆️"]]] + [
[v["dataset_name"], v["metric_value"]] for v in task_summary.values()
]
text = f"""
# [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard)
Detailed results can be found [here]({get_details_url(repo)})
{md_writer.dumps()}
"""
return text
def get_edited_yaml_readme(df, repo, token: str | None):
card = ModelCard.load(repo, token=token)
results = search(df, repo)
common = {
"task_type": "text-generation",
"task_name": "Text Generation",
"source_name": "Open LLM Leaderboard",
"source_url": f"https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query={repo}",
}
tasks_results = get_task_summary(results)
if not card.data[
"eval_results"
]: # No results reported yet, we initialize the metadata
card.data["model-index"] = eval_results_to_model_index(
repo.split("/")[1],
[EvalResult(**task, **common) for task in tasks_results.values()],
)
else: # We add the new evaluations
for task in tasks_results.values():
cur_result = EvalResult(**task, **common)
if any(
result.is_equal_except_value(cur_result)
for result in card.data["eval_results"]
):
continue
card.data["eval_results"].append(cur_result)
return str(card)
def commit(
repo,
pr_number=None,
message="Adding Evaluation Results",
oauth_token: gr.OAuthToken | None = None,
): # specify pr number if you want to edit it, don't if you don't want
data = get_json_format_data()
finished_models = get_datas(data)
df = pd.DataFrame(finished_models)
desc = """
This is an automated PR created with https://huggingface.co/spaces/Weyaxi/open-llm-leaderboard-results-pr
The purpose of this PR is to add evaluation results from the Open LLM Leaderboard to your model card.
Please report any issues here: https://huggingface.co/spaces/Weyaxi/open-llm-leaderboard-results-pr/discussions
"""
if not oauth_token:
raise gr.Warning(
"You are not logged in. Click on 'Sign in with Huggingface' to log in."
)
else:
token = oauth_token
if repo.startswith("https://huggingface.co/"):
try:
repo = RepoUrl(repo).repo_id
except Exception:
raise gr.Error(f"Not a valid repo id: {str(repo)}")
edited = {"revision": f"refs/pr/{pr_number}"} if pr_number else {"create_pr": True}
try:
try: # check if there is a readme already
readme_text = get_edited_yaml_readme(
df, repo, token=token
) + get_eval_results(df, repo)
except Exception as e:
if "Repo card metadata block was not found." in str(e): # There is no readme
readme_text = get_edited_yaml_readme(df, repo, token=token)
else:
print(f"Something went wrong: {e}")
liste = [
CommitOperationAdd(
path_in_repo="README.md", path_or_fileobj=readme_text.encode()
)
]
commit = create_commit(
repo_id=repo,
token=token,
operations=liste,
commit_message=message,
commit_description=desc,
repo_type="model",
**edited,
).pr_url
return commit
except Exception as e:
if "Discussions are disabled for this repo" in str(e):
return "Discussions disabled"
elif "Cannot access gated repo" in str(e):
return "Gated repo"
elif "Repository Not Found" in str(e):
return "Repository Not Found"
else:
return e