eduagarcia's picture
Update functions.py
8c44c94 verified
import os
from huggingface_hub import CommitOperationAdd, create_commit, RepoUrl
from huggingface_hub import EvalResult, ModelCard
from huggingface_hub.repocard_data import eval_results_to_model_index
import time
from pytablewriter import MarkdownTableWriter
import gradio as gr
from openllm import get_json_format_data, get_datas
import pandas as pd
import traceback
from huggingface_hub import HfApi
BOT_HF_TOKEN = os.getenv('BOT_HF_TOKEN')
data = get_json_format_data()
finished_models = get_datas(data)
df = pd.DataFrame(finished_models)
source_name = "Open Portuguese LLM Leaderboard"
default_pull_request_title = "Adding the Open Portuguese LLM Leaderboard Evaluation Results"
desc = """
This is an automated PR created with https://huggingface.co/spaces/eduagarcia-temp/portuguese-leaderboard-results-to-modelcard
The purpose of this PR is to add evaluation results from the [🚀 Open Portuguese LLM Leaderboard](https://huggingface.co/spaces/eduagarcia/open_pt_llm_leaderboard) to your model card.
If you encounter any issues, please report them to https://huggingface.co/spaces/eduagarcia-temp/portuguese-leaderboard-results-to-modelcard/discussions
"""
def search(df, value):
result_df = df[df["Model Name"] == value]
return result_df.iloc[0].to_dict() if not result_df.empty else None
def get_details_url(repo):
#author, model = repo.split("/")
return f"https://huggingface.co/datasets/eduagarcia-temp/llm_pt_leaderboard_raw_results/tree/main/{repo}"
def get_query_url(repo):
return f"https://huggingface.co/spaces/eduagarcia/open_pt_llm_leaderboard?query={repo}"
def get_task_summary(results):
return {
"ENEM":
{"dataset_type":"eduagarcia/enem_challenge",
"dataset_name":"ENEM Challenge (No Images)",
"metric_type":"acc",
"metric_value":results["ENEM"],
"dataset_config": None,
"dataset_split":"train",
"dataset_revision":None,
"dataset_args":{"num_few_shot": 3},
"metric_name":"accuracy"
},
"BLUEX":
{"dataset_type":"eduagarcia-temp/BLUEX_without_images",
"dataset_name":"BLUEX (No Images)",
"metric_type":"acc",
"metric_value":results["BLUEX"],
"dataset_config": None,
"dataset_split":"train",
"dataset_revision":None,
"dataset_args":{"num_few_shot": 3},
"metric_name":"accuracy"
},
"OAB Exams":
{"dataset_type":"eduagarcia/oab_exams",
"dataset_name":"OAB Exams",
"metric_type":"acc",
"metric_value":results["OAB Exams"],
"dataset_config": None,
"dataset_split":"train",
"dataset_revision":None,
"dataset_args":{"num_few_shot": 3},
"metric_name":"accuracy"
},
"ASSIN2 RTE":
{"dataset_type":"assin2",
"dataset_name":"Assin2 RTE",
"metric_type":"f1_macro",
"metric_value":results["ASSIN2 RTE"],
"dataset_config": None,
"dataset_split":"test",
"dataset_revision":None,
"dataset_args":{"num_few_shot": 15},
"metric_name":"f1-macro"
},
"ASSIN2 STS":
{"dataset_type":"eduagarcia/portuguese_benchmark",
"dataset_name":"Assin2 STS",
"metric_type":"pearson",
"metric_value":results["ASSIN2 STS"],
"dataset_config": None,
"dataset_split":"test",
"dataset_revision":None,
"dataset_args":{"num_few_shot": 15},
"metric_name":"pearson"
},
"FAQUAD NLI":
{"dataset_type":"ruanchaves/faquad-nli",
"dataset_name":"FaQuAD NLI",
"metric_type":"f1_macro",
"metric_value":results["FAQUAD NLI"],
"dataset_config": None,
"dataset_split":"test",
"dataset_revision":None,
"dataset_args":{"num_few_shot": 15},
"metric_name":"f1-macro"
},
"HateBR":
{"dataset_type":"ruanchaves/hatebr",
"dataset_name":"HateBR Binary",
"metric_type":"f1_macro",
"metric_value":results["HateBR"],
"dataset_config": None,
"dataset_split":"test",
"dataset_revision":None,
"dataset_args":{"num_few_shot": 25},
"metric_name":"f1-macro"
},
"PT Hate Speech":
{"dataset_type":"hate_speech_portuguese",
"dataset_name":"PT Hate Speech Binary",
"metric_type":"f1_macro",
"metric_value":results["PT Hate Speech"],
"dataset_config": None,
"dataset_split":"test",
"dataset_revision":None,
"dataset_args":{"num_few_shot": 25},
"metric_name":"f1-macro"
},
"tweetSentBR":
{"dataset_type":"eduagarcia/tweetsentbr_fewshot",
"dataset_name":"tweetSentBR",
"metric_type":"f1_macro",
"metric_value":results["tweetSentBR"],
"dataset_config": None,
"dataset_split":"test",
"dataset_revision":None,
"dataset_args":{"num_few_shot": 25},
"metric_name":"f1-macro"
}
}
def get_eval_results(repo):
results = search(df, repo)
task_summary = get_task_summary(results)
md_writer = MarkdownTableWriter()
md_writer.headers = ["Metric", "Value"]
md_writer.value_matrix = [["Average", f"**{results['Average ⬆️']}**"]] + [[v["dataset_name"], v["metric_value"]] for v in task_summary.values()]
text = f"""
# Open Portuguese LLM Leaderboard Evaluation Results
Detailed results can be found [here]({get_details_url(repo)}) and on the [🚀 Open Portuguese LLM Leaderboard](https://huggingface.co/spaces/eduagarcia/open_pt_llm_leaderboard)
{md_writer.dumps()}
"""
return text
def get_edited_yaml_readme(repo, token: str | None):
card = ModelCard.load(repo, token=token)
results = search(df, repo)
common = {"task_type": 'text-generation', "task_name": 'Text Generation', "source_name": source_name, "source_url": get_query_url(repo)}
tasks_results = get_task_summary(results)
if not card.data['eval_results']: # No results reported yet, we initialize the metadata
card.data["model-index"] = eval_results_to_model_index(repo.split('/')[1], [EvalResult(**task, **common) for task in tasks_results.values()])
else: # We add the new evaluations
for task in tasks_results.values():
cur_result = EvalResult(**task, **common)
if any(result.is_equal_except_value(cur_result) for result in card.data['eval_results']):
continue
card.data['eval_results'].append(cur_result)
return str(card)
def pr_already_exists(repo, token: str | None = None):
card = ModelCard.load(repo, token=token)
if 'eval_results' in card.data and card.data['eval_results']:
for x in card.data['eval_results']:
if x.source_name == source_name:
return True
if 'Open Portuguese LLM Leaderboard' in card.content:
return True
if 'Open PT LLM Leaderboard' in card.content:
return True
api = HfApi(token=token)
for x in api.get_repo_discussions(repo):
if x.title == default_pull_request_title:
return True
if x.author == "leaderboard-pt-pr-bot":
return True
if x.author == "eduagarcia" and x.is_pull_request:
return True
return False
def commit(repo, pr_number=None, message=default_pull_request_title, oauth_token: gr.OAuthToken | None = None, check_if_pr_exists=False): # specify pr number if you want to edit it, don't if you don't want
if oauth_token is None:
gr.Warning("You are not logged in; therefore, the leaderboard-pr-bot will open the pull request instead of you. Click on 'Sign in with Huggingface' to log in.")
token = BOT_HF_TOKEN
elif oauth_token.expires_at < time.time():
raise gr.Error("Token expired. Logout and try again.")
else:
token = oauth_token.token
if repo.startswith("https://huggingface.co/"):
try:
repo = RepoUrl(repo).repo_id
except Exception:
raise gr.Error(f"Not a valid repo id: {str(repo)}")
if check_if_pr_exists or token == BOT_HF_TOKEN:
if pr_already_exists(repo, token):
return "PR already exists, Login to make a duplicate PR"
edited = {"revision": f"refs/pr/{pr_number}"} if pr_number else {"create_pr": True}
try:
try: # check if there is a readme already
readme_text = get_edited_yaml_readme(repo, token=token).rstrip() + '\n\n' + get_eval_results(repo)
except Exception as e:
if "Repo card metadata block was not found." in str(e): # There is no readme
readme_text = get_edited_yaml_readme(repo, token=token)
else:
traceback.print_exc()
print(f"Something went wrong: {e}")
liste = [CommitOperationAdd(path_in_repo="README.md", path_or_fileobj=readme_text.encode())]
commit = (create_commit(repo_id=repo, token=token, operations=liste, commit_message=message, commit_description=desc, repo_type="model", **edited).pr_url)
return commit
except Exception as e:
if "Discussions are disabled for this repo" in str(e):
return "Discussions disabled"
elif "Cannot access gated repo" in str(e):
return "Gated repo"
elif "Repository Not Found" in str(e):
return "Repository Not Found"
else:
return e
if __name__ == "__main__":
print(get_eval_results("Qwen/Qwen1.5-72B-Chat"))